text stringlengths 957 885k |
|---|
"""
Code derived and rehashed from: https://www.github.com/kyubyong/transformer
"""
from __future__ import print_function
import numpy as np
import codecs
import regex
import random
import torch
def load_de_vocab(min_cnt):
vocab = [line.split()[0] for line in codecs.open('preprocessed/de.vocab.tsv', 'r', 'utf-8').read().splitlines() if
int(line.split()[1]) >= min_cnt]
word2idx = {word: idx for idx, word in enumerate(vocab)}
idx2word = {idx: word for idx, word in enumerate(vocab)}
return word2idx, idx2word
def load_en_vocab(min_cnt):
vocab = [line.split()[0] for line in codecs.open('preprocessed/en.vocab.tsv', 'r', 'utf-8').read().splitlines() if
int(line.split()[1]) >= min_cnt]
word2idx = {word: idx for idx, word in enumerate(vocab)}
idx2word = {idx: word for idx, word in enumerate(vocab)}
return word2idx, idx2word
def create_data(src_sents, tgt_sents, min_cnt, max_len):
de2idx, idx2de = load_de_vocab(min_cnt)
en2idx, idx2en = load_en_vocab(min_cnt)
# Index
x_list, y_list, Sources, Targets = [], [], [], []
for src_sent, tgt_sent in zip(src_sents, tgt_sents):
x = [de2idx.get(word, 1) for word in (src_sent + u" </S>").split()] # 1: OOV, </S>: End of Text
y = [en2idx.get(word, 1) for word in (tgt_sent + u" </S>").split()]
if max(len(x), len(y)) <= max_len:
x_list.append(np.array(x))
y_list.append(np.array(y))
Sources.append(src_sent)
Targets.append(tgt_sent)
# Pad
X = np.zeros([len(x_list), max_len], np.int32)
Y = np.zeros([len(y_list), max_len], np.int32)
for i, (x, y) in enumerate(zip(x_list, y_list)):
X[i] = np.lib.pad(x, [0, max_len - len(x)], 'constant', constant_values=(0, 0))
Y[i] = np.lib.pad(y, [0, max_len - len(y)], 'constant', constant_values=(0, 0))
return X, Y, Sources, Targets
def load_train_data(train_src, train_tgt, min_cnt, max_len):
de_sents = [regex.sub("[^\s\p{Latin}']", "", line) for line in codecs.open(train_src, 'r', 'utf-8').read().split("\n") if
line and line[0] != "<"]
en_sents = [regex.sub("[^\s\p{Latin}']", "", line) for line in codecs.open(train_tgt, 'r', 'utf-8').read().split("\n") if
line and line[0] != "<"]
X, Y, Sources, Targets = create_data(de_sents, en_sents, min_cnt, max_len)
return X, Y
def load_test_data(test_src, test_tgt, min_cnt, max_len):
def _refine(line):
line = regex.sub("<[^>]+>", "", line)
line = regex.sub("[^\s\p{Latin}']", "", line)
return line.strip()
de_sents = [_refine(line) for line in codecs.open(test_src, 'r', 'utf-8').read().split("\n") if line and line[:4] == "<seg"]
en_sents = [_refine(line) for line in codecs.open(test_tgt, 'r', 'utf-8').read().split("\n") if line and line[:4] == "<seg"]
X, Y, Sources, Targets = create_data(de_sents, en_sents, min_cnt, max_len)
return X, Y, Sources, Targets # (1064, 150)
def get_batch_indices(total_length, batch_size):
current_index = 0
indexs = [i for i in range(total_length)]
random.shuffle(indexs)
while 1:
if current_index + batch_size >= total_length:
break
current_index += batch_size
yield indexs[current_index: current_index + batch_size], current_index
|
<gh_stars>1-10
"""
Linkedlist
:file: server.py
:author: <NAME>
:date: March, 2016
:description:
The implementation of our server which runs as a python script,
using the schema.sql file in this directory to initialize the db
"""
from contextlib import closing
import sqlite3
from flask import Flask, Response, request, session, g, url_for, abort
#from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import generate_password_hash, check_password_hash
from functools import wraps
import json
import hashlib
import time
# config
DATABASE = 'linkedlist.db'
DEBUG = True
SECRET_KEY = 'secret'
USERNAME = 'admin'
PASSWORD = 'password'
app = Flask(__name__)
#app.config['SQLALCHEMY_DATABASE_URL'] = 'sqlite:////tmp/linkedlist.db'
#db = SQLAlchemy(app)
app.config.from_object(__name__)
# app.config.from_envvar('LINKEDLIST_SETTINGS', silent=True) # our config file
"""~~~~SQLAlchemy class definitions~~~~"""
# for when I have time to migrate our app to this schema...
"""
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(25))
email = db.Column(db.String(120), unique=True)
auth_token = db.Column(db.Text)
def __init__(self, username, email, auth_token)
self.username = username
self.email = email
auth_token = auth_token
class Session(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer)
session_api_key = db.Column(db.String())
"""
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
# initialize out database, reading from the file 'schema.sql' in this directory
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def check_auth(api_key):
cur = g.db.execute('select * from session where session_api_key=?', [api_key])
return (len(cur.fetchall()) == 1)
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
message_json = request.get_json()
session_api_key = message_json.get('session_api_key')
if session_api_key is None:
abort(400, 'No API key')
if not check_auth(session_api_key):
abort(400, 'Invalid API key')
else:
return f(*args, **kwargs)
return decorated
# return the user id associated with the passed in api key
def get_auth_user(session_api_key):
if session_api_key is None:
return None
rows = g.db.execute('select user_id from session where session_api_key=?', [session_api_key]).fetchall()
if len(rows) == 1:
return rows[0][0]
else:
return None
"""~~~~ Routes ~~~~"""
def json_ok_response(data=None):
if data is not None:
js = json.dumps(data)
response = Response(js, status=200, mimetype='application/json')
return response
else:
response = Response(status=200, mimetype='application/json')
return response
"""~~~~ User Routes ~~~~"""
def gen_api_key(auth_token):
sha = hashlib.sha256()
sha.update(str(auth_token)+str(time.time()))
return sha.hexdigest()
@app.route('/user/login', methods=['POST'])
def login():
content = request.get_json()
if not content:
abort(400) # invalid request
email = content.get('email_address')
password = content.get('password')
if email is None or password is None:
abort(400, 'Invalid fields in request json')
cur = g.db.execute('select id, auth_token, username from user where email==?', [email])
rows = cur.fetchall()
if (len(rows) == 0):
abort(400, 'Invalid login credentials')
elif (len(rows) == 1):
user = rows[0]
user_id = user[0]
db_auth_token = user[1]
username = user[2]
session_api_key = gen_api_key(db_auth_token)
if check_password_hash(db_auth_token, password):
# the password is good, return a session token
g.db.execute('delete from session where user_id=?', [user_id])
g.db.execute('insert into session (user_id, session_api_key) values (?, ?)', [user_id, session_api_key])
g.db.commit()
return json_ok_response(dict(session_api_key=session_api_key, username=username))
else:
# the password is invalid, return a failure
abort(400, 'Invalid login credentials')
else:
# this shouldn't be possible because emails are unique but we should still handle the case, and print an error to the server log.
abort(500, 'Something bad happened, please contact a server administrator')
def email_taken(email):
cur = g.db.execute('select * from user where email = ?', [email])
return (len(cur.fetchall()) != 0)
@app.route('/user/createaccount', methods=['POST'])
def create_user():
content = request.get_json() # force=True will try and get json data even if the header doesn't say the content type is application/json
if not content:
abort(400) # invalid request
username = content.get('username')
# verify that the email is unique to the database
email = content.get('email_address')
if email_taken(email):
abort(400, 'Email is in use')
password = content.get('password')
password_conf = content.get('password_conf')
if username is None or email is None or password is None or password_conf is None:
abort(400, 'Invalid fields in request json')
if password != password_conf: # passwords don't match
abort(400, 'Passwords don\'t match')
# hash the password into an auth token
auth_token = generate_password_hash(password)
# insert the data
g.db.execute('insert into user (username, email, auth_token) values (?, ?, ?)',
[username, email, auth_token])
g.db.commit()
cur = g.db.execute('select id, auth_token from user where email = ?', [email])
row = cur.fetchone()
user_id = row[0]
auth_token = row[1]
session_api_key = gen_api_key(auth_token)
g.db.execute('insert into session (user_id, session_api_key) values (?, ?)', [user_id, session_api_key])
g.db.commit()
return json_ok_response(dict(session_api_key=session_api_key))
@app.route('/user', methods=['POST'])
@requires_auth
def get_user_info():
content = request.get_json()
if not content:
abort(400) # invalid request
email = content.get('email_address')
if email is None:
abort(400)
rows = g.db.execute('select username, email from user where email = ?', [email])
row = rows.fetchone()
if (len(row) == 0):
abort(500, 'Invalid username')
else:
username = row[0]
email = row[1]
return json_ok_response(dict(username=username, email=email))
"""~~~~ List Routes ~~~~"""
def list_exists(owner_id, list_name):
rows = g.db.execute('select * from list where (owner_id==?) and (name==?)', [owner_id, list_name]).fetchall()
return (len(rows) != 0)
def list_exists_and_user_is_owner(owner_id, list_id):
rows = g.db.execute('select * from list where owner_id==? and id==?', [owner_id, list_id]).fetchall()
return len(rows) == 1
def get_user_id_from_email(email):
user = g.db.execute('select id from user where email==?', [email]).fetchone()
if (len(user) == 1):
user_id = user[0]
return user_id
else:
return None
def list_exists_and_user_is_member(list_id, user_id):
if list_id is None or user_id is None:
return False
list_exists = len(g.db.execute('select id from list where id==?', [list_id]).fetchall())
if not list_exists:
return False
user_is_member = len(g.db.execute('select id from list_member where list_id==? and user_id==?', [list_id, user_id]).fetchall())
if not user_is_member:
return False
return True
@app.route('/list/create', methods=['POST'])
@requires_auth
def create_list():
content = request.get_json()
if not content:
app.logger.debug('create_list: no message content')
abort(400) # invalid request
session_api_key = request.get_json()['session_api_key']
name = 'my new list'
user_id = get_auth_user(session_api_key=session_api_key)
if user_id is None:
app.logger.debug('User tried to create a list, authorized but could not get user_id with session key %s' % session_api_key)
abort(400)
# a user can't create more than one list with the same name
if list_exists(owner_id=user_id, list_name=name):
app.logger.debug('User %s tried to create the list %s but it already exists' % (user_id, name))
abort(400)
g.db.execute('insert into list (owner_id, name) values (?, ?)', [user_id, name])
g.db.commit()
row = g.db.execute('select id from list where (owner_id==?) and (name==?)', [user_id, name]).fetchone()
list_id = row[0]
# add the user to the list_member table, even though this user is already listed as the list owner
g.db.execute('insert into list_member (list_id, user_id) values (?, ?)', [list_id, user_id])
g.db.commit()
return json_ok_response(dict(list_id=list_id))
@app.route('/list/delete', methods=['POST'])
@requires_auth
def delete_list():
content = request.get_json()
if not content:
abort(400) # invalid request
session_api_key = content.get('session_api_key')
list_id = content.get('list_id')
user_id = get_auth_user(session_api_key=session_api_key)
if user_id is None:
app.logger.debug('User tried to delete a list, authorized but could not get user_id with session key %s' % session_api_key)
abort(400)
if list_exists_and_current_user_is_owner(owner_id=user_id, list_id=list_id):
g.db.execute('delete from list where id==?',[list_id])
g.db.commit()
else:
abort(400)
@app.route('/list/update', methods=['POST'])
@requires_auth
def update_list():
content = request.get_json()
if not content:
abort(400) # invalid request
session_api_key = content.get('session_api_key')
list_id = content.get('list_id')
name = content.get('name')
if name is None:
abort(400)
user_id = get_auth_user(session_api_key=session_api_key)
if user_id is None:
app.logger.debug('User tried to delete a list, authorized but could not get user_id with session key %s' % session_api_key)
abort(400)
if list_exists_and_current_user_is_owner(owner_id=user_id, list_id=list_id):
g.db.execute('update list set name=? where id=?',[name, list_id])
g.db.commit()
else:
abort(400)
@app.route('/lists', methods=['POST'])
@requires_auth
def get_lists():
session_api_key = request.get_json()['session_api_key']
user_id = get_auth_user(session_api_key=session_api_key)
rows = g.db.execute('select list_id from list_member where (user_id==?)', [user_id]).fetchall()
lists = []
for row in rows:
this_list = g.db.execute('select id, name from list where (id==?)', [row[0]]).fetchone()
list_id = this_list[0]
list_name = this_list[1]
lists.append(dict(list_name=list_name, list_id=list_id))
return json_ok_response(dict(lists=lists))
@app.route('/list', methods=['POST'])
@requires_auth
def get_list():
content = request.get_json()
if not content:
abort(400) # invalid request
list_id = content.get('list_id')
if list_id is None:
abort(400)
# check that the user has authorization to access this list
session_api_key = content['session_api_key']
user_id = get_auth_user(session_api_key=session_api_key)
# make sure the user is a member of this list
if not list_exists_and_user_is_member(list_id=list_id, user_id=user_id):
abort(400, 'User does not belong to requested list, or list does not exist')
# get the name of the list
list_info = g.db.execute('select name, owner_id from list where id==?', [list_id]).fetchone()
list_name = list_info[0]
list_owner_id = list_info[1]
# compile the members of the list
list_members = []
list_member_rows = g.db.execute('select user_id from list_member where list_id==?', [list_id]).fetchall()
for member_row in list_member_rows:
member_id = member_row[0]
list_member = g.db.execute('select username, email from user where id==?', [member_id]).fetchone()
member_username = list_member[0]
member_email = list_member[1]
member = dict(id=member_id, username=member_username, email=member_email)
list_members.append(member)
# compile the items in the list
list_items = []
list_item_rows = g.db.execute('select id, value, checked from list_item where list_id==?', [list_id])
for item_row in list_item_rows:
item_id = item_row[0]
item_value = item_row[1]
item_checked = item_row[2]
item = dict(id=item_id, value=item_value, checked=item_checked)
list_items.append(item)
data = dict(list_id=list_id, list_name=list_name, owner_id=list_owner_id, list_members=list_members, list_items=list_items)
return json_ok_response(data)
@app.route('/list/adduser', methods=['POST'])
@requires_auth
def list_add_user():
session_api_key = request.get_json()['session_api_key']
current_user_id = get_auth_user(session_api_key=session_api_key)
content = request.get_json()
if not content:
abort(400) # invalid request
list_id = content.get('list_id')
if list_id is None:
abort(400)
user_to_add_email = content.get('user_email')
if user_to_add_email is None:
abort(400)
list_exists_and_current_user_is_owner = len(g.db.execute('select * from list where owner_id==? and id==?', [current_user_id, list_id]).fetchall())
if not list_exists_and_current_user_is_owner:
abort(400)
user_to_add_id = get_user_id_from_email(email=user_to_add_email)
if not user_to_add_id:
abort(400)
# check to see if the user is already a member of this list
already_member = len(g.db.execute('select * from list_member where list_id==? and user_id==?', [list_id,user_to_add_id]).fetchall())
if already_member:
abort(400)
g.db.execute('insert into list_member (list_id, user_id) values (?, ?)', [list_id, user_to_add_id])
g.db.commit()
return json_ok_response()
@app.route('/list/removeuser', methods=['POST'])
@requires_auth
def list_remove_user():
session_api_key = request.get_json()['session_api_key']
current_user_id = get_auth_user(session_api_key=session_api_key)
content = request.get_json()
if not content:
abort(400) # invalid request
# get the list id and check if it's valid
list_id = content.get('list_id')
if list_id is None:
abort(400)
# get the user to remove and check if it's valid and they actually belong to this list
user_to_remove_id = content.get('user_id')
if user_to_remove_id == current_user_id:
# if you remove yourself from a list you own, the whole list is deleted, until we support ownership change
g.db.execute('delete from list where id==?', [list_id])
d.db.commit()
return json_ok_response()
if user_to_remove_id is None:
abort(400)
list_exists_and_current_user_is_owner = len(g.db.execute('select * from list where owner_id==? and id==?', [current_user_id, list_id]).fetchall())
if not list_exists_and_current_user_is_owner:
abort(400)
user_belongs_to_list = len(g.db.execute('select * from list_member where list_id==? and user_id==?', [list_id, user_to_remove_id]).fetchall())
if not user_belongs_to_list:
abort(400) # that user doesn't belong to this list, you can't remove them
# we have made all of the necessary checks, delete the row from the table where this user is a member of this list!
g.db.execute('delete from list_member where list_id==? and user_id==?', [list_id, user_to_remove_id])
g.db.commit()
return json_ok_response()
"""~~~~ List Item Routes ~~~~"""
def item_with_value_exists_in_list(list_id, value):
for list_item in g.db.execute('select value from list_item where list_id==?', [list_id]).fetchall():
if list_item[0] == value:
return True
return False
def item_exists_in_list(list_id, item_id):
return len(g.db.execute('select * from list_item where id==? and list_id==?', [item_id, list_id]).fetchall())
@app.route('/list/additemtolist', methods=['POST'])
@requires_auth
def add_list_item():
session_api_key = request.get_json()['session_api_key']
current_user_id = get_auth_user(session_api_key=session_api_key)
content = request.get_json()
if not content:
abort(400, 'No message content') # invalid request
# get the list id and check if it's valid
list_id = content.get('list_id')
if list_id is None:
abort(400, 'No list_id')
if not list_exists_and_user_is_member(list_id=list_id, user_id=current_user_id):
abort(400, 'Not member of list')
list_item = content.get('item')
if list_item is None:
abort(400, 'No list item')
list_item_value = list_item.get('value')
list_item_checked = bool(list_item.get('checked'))
if list_item_value is None or list_item_checked is None:
abort(400, 'List item has no data in the \'value\' or \'checked\' fields')
# check if there is already an entry in this list with this value, if there is, abort
if item_with_value_exists_in_list(list_id=list_id, value=list_item_value):
abort(400, 'An item with this value already exists in this list')
g.db.execute('insert into list_item (list_id, value, checked) values (?,?,?)', [list_id, list_item_value, list_item_checked])
g.db.commit()
return json_ok_response()
@app.route('/list/updatelistitem', methods=['POST'])
@requires_auth
def update_list_item():
session_api_key = request.get_json()['session_api_key']
current_user_id = get_auth_user(session_api_key=session_api_key)
content = request.get_json()
if not content:
abort(400) # invalid request
# get the list id and check if it's valid
list_id = content.get('list_id')
if list_id is None:
abort(400)
if not list_exists_and_user_is_member(list_id=list_id, user_id=current_user_id):
abort(400)
list_item = content.get('item')
if list_item is None:
abort(400)
list_item_to_update_id = list_item.get('id')
if list_item_to_update_id is None:
abort(400)
list_item_value = list_item.get('value')
list_item_checked = bool(list_item.get('checked'))
if list_item_value is None or list_item_checked is None:
abort(400)
# check if there is already an entry in this list with this value that does not have this id, if there is, abort
if (len(g.db.execute('select id from list_item where value==? and id!=?', [list_item_value, list_item_to_update_id]).fetchall()) != 0):
abort(400)
g.db.execute('update list_item set value=?, checked=? where id=?', [list_item_value, list_item_checked, list_item_to_update_id])
g.db.commit()
return json_ok_response()
@app.route('/list/removeitemfromlist', methods=['POST'])
@requires_auth
def remove_list_item():
session_api_key = request.get_json()['session_api_key']
current_user_id = get_auth_user(session_api_key=session_api_key)
content = request.get_json()
if not content:
abort(400) # invalid request
# get the list id and check if it's valid
list_id = content.get('list_id')
if list_id is None:
abort(400)
if not list_exists_and_user_is_member(list_id=list_id, user_id=current_user_id):
abort(400)
list_item_to_remove_id = content.get('item_id')
if list_item_to_remove_id is None:
abort(400)
if not item_exists_in_list(list_id=list_id, item_id=list_item_to_remove_id):
abort(400)
# the user making the request is a member of the list and the item to remove belongs to the list, so remove it
g.db.execute('delete from list_item where id==?', [list_item_to_remove_id])
g.db.commit()
return json_ok_response()
if __name__ == '__main__':
app.run()
|
import numpy as np
from sklearn.datasets import make_blobs, make_moons
from sklearn.cluster import KMeans
import mglearn
import matplotlib.pyplot as plt
# 生成模拟的二维数据
X, y = make_blobs(random_state=1)
# 构建聚类模型
kmeans = KMeans(n_clusters=3)
kmeans.fit(X)
plt.figure(figsize=(11, 4.5))
plt.subplots_adjust(left=0.32, right=0.68)
mglearn.discrete_scatter(X[:, 0], X[:, 1], kmeans.labels_, markers='o')
mglearn.discrete_scatter(
kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], [0, 1, 2],
markers='^', markeredgewidth=2)
fig, axes = plt.subplots(1, 2, figsize=(11, 4.5))
# using two cluster centers:
kmeans = KMeans(n_clusters=2)
kmeans.fit(X)
assignments = kmeans.labels_
mglearn.discrete_scatter(X[:, 0], X[:, 1], assignments, ax=axes[0])
# using five cluster centers:
kmeans = KMeans(n_clusters=5)
kmeans.fit(X)
assignments = kmeans.labels_
mglearn.discrete_scatter(X[:, 0], X[:, 1], assignments, ax=axes[1])
plt.figure(figsize=(11, 4.5))
plt.subplots_adjust(left=0.32, right=0.68)
X_varied, y_varied = make_blobs(n_samples=200,
cluster_std=[1.0, 2.5, 0.5],
random_state=170)
y_pred = KMeans(n_clusters=3, random_state=0).fit_predict(X_varied)
mglearn.discrete_scatter(X_varied[:, 0], X_varied[:, 1], y_pred)
plt.legend(["cluster 0", "cluster 1", "cluster 2"], loc='best')
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
plt.figure(figsize=(11, 4.5))
plt.subplots_adjust(left=0.32, right=0.68)
# 生成一些随机分组数据
X, y = make_blobs(random_state=170, n_samples=600)
rng = np.random.RandomState(74)
# 变换数据使其拉长
transformation = rng.normal(size=(2, 2))
X = np.dot(X, transformation)
# 将数据聚类成 3 个簇
kmeans = KMeans(n_clusters=3)
kmeans.fit(X)
y_pred = kmeans.predict(X)
# 画出簇分配和簇中心
mglearn.discrete_scatter(X[:, 0], X[:, 1], kmeans.labels_, markers='o')
mglearn.discrete_scatter(
kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], [0, 1, 2],
markers='^', markeredgewidth=2)
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
plt.figure(figsize=(11, 4.5))
plt.subplots_adjust(left=0.32, right=0.68)
# 生成模拟的 two_moons 数据集(较小的噪声)
X, y = make_moons(n_samples=200, noise=0.05, random_state=0)
# 将数据聚类成 2 个簇
kmeans = KMeans(n_clusters=2)
kmeans.fit(X)
y_pred = kmeans.predict(X)
# 画出簇分配和簇中心
plt.scatter(X[:, 0], X[:, 1], c=y_pred, cmap=mglearn.cm2, s=60, edgecolor='k')
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1],
marker='^', c=[mglearn.cm2(0), mglearn.cm2(1)], s=100, linewidth=2,
edgecolor='k')
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
X, y = make_moons(n_samples=200, noise=0.05, random_state=0)
kmeans = KMeans(n_clusters=10, random_state=0)
kmeans.fit(X)
y_pred = kmeans.predict(X)
plt.scatter(X[:, 0], X[:, 1], c=y_pred, s=60, cmap='Paired')
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=60,
marker='^', c=range(kmeans.n_clusters), linewidth=2, cmap='Paired')
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
print("Cluster memberships:\n{}".format(y_pred))
# plt.show()
distance_features = kmeans.transform(X)
print("Distance feature shape: {}".format(distance_features.shape))
print("Distance features:\n{}".format(distance_features))
|
"""Adapted from:
Licensed under The MIT License [see LICENSE for details]
"""
from __future__ import print_function
import argparse
import os, os.path as osp
import time
import numpy as np
import cv2
import torch
from torch.autograd import Variable
from lib.utils.config import cfg, merge_cfg_from_file
from lib.datasets import dataset_factory
from lib.models import model_factory
from lib.utils import eval_solver_factory
from lib.utils.utils import setup_cuda, setup_folder
from lib.layers import DetectOut
import xml.etree.ElementTree as ET
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def save_list2txt(img_gt_list, file_path):
if img_gt_list is None or len(img_gt_list) == 0: return
fw = open(file_path, 'w')
fw.writelines(img_gt_list)
fw.close()
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Evaluation')
parser.add_argument('--cfg_name', default='ssd_vgg16_voc_re', type=str,
help='Trained state_dict file path to open')
parser.add_argument('--job_group', default='rfbnet', type=str,
help='Directory for saving checkpoint models')
parser.add_argument('--trained_model', default=None, type=str,
help='Trained state_dict file path to open')
parser.add_argument('--save_folder', default='./results/debug', type=str,
help='File path to save results')
parser.add_argument('--confidence_threshold', default=0.01, type=float,
help='Detection confidence threshold')
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--num_workers', default=4, type=int,
help='cpu workers for datasets processing')
parser.add_argument('--top_k', default=5, type=int,
help='Further restrict the number of predictions to parse')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use cuda to train model')
parser.add_argument('--devices', default="0", type=str,
help='GPU to use')
parser.add_argument('--net_gpus', default=[0,], type=list,
help='GPU to use for net forward')
parser.add_argument('--loss_gpu', default=0, type=list,
help='GPU to use for loss calculation')
parser.add_argument('--cleanup', default=True, type=str2bool,
help='Cleanup and remove results files following eval')
# parser.add_argument('--show_test_image', default=False, type=str2bool,
# help='Cleanup and remove results files following eval')
parser.add_argument('--save_log', default=False, type=str2bool,
help='save log or not')
parser.add_argument('--test_path', default=None, type=str,
help='test path')
parser.add_argument('--vis', default=False, type=str2bool,
help='vis')
parser.add_argument('--crop', default=False, type=str2bool,
help='save crop')
args = parser.parse_args()
#python test.py --cfg_name res10_face_t --job_group face --trained_model ./weights/face/res10_face_t/res10_face_t_20000dark86.9.pth --test_path ./test_imgs --vis 1
class_ind = ['bg','car','person','bicycle','tricycle']
color_ind = [(255,0,0), (0,255,0),(0,0,255),(255,0,0),(255,255,0)]
label_name = ['drink','phone','hand','face']
classes_name = ['part_cover','all_cover','lp', 'nolp']
def save_xml(max_conf_bbx, img_name):
xmin, ymin, xmax, ymax, _s, label = max_conf_bbx
new_xml_path = './Annotations/{}.xml'.format(img_name[:-4])
tree = ET.parse('/home/maolei/data/coverLP_det/coverlp_det_20181208/Annotations/193957909_1.xml')
target = tree.getroot()
name = None
for obj in target.iter('object'):
name = obj.find('name').text.lower().strip()
if 'part_cover' in name:
new_name = 'gg'
obj.find('name').text = new_name
bbox = obj.find('bndbox')
bbox.find('xmin').text = str(int(xmin))
bbox.find('ymin').text = str(int(ymin))
bbox.find('xmax').text = str(int(xmax))
bbox.find('ymax').text = str(int(ymax))
else:
#print('gg', name, img_path)
pass
tree.write(new_xml_path)
if __name__ == '__main__':
tb_writer, cfg_path, snapshot_dir, log_dir = setup_folder(args, cfg, phase='eval')
cfg.DATASET.NUM_EVAL_PICS = 0
cfg.EVAL.ONLY_SAVE_RESULTS = True
cfg.DATASET.EVAL_BATCH_SIZE = 8
cfg.DATASET.NUM_WORKERS = 2
# cfg.DATASET.VAL_DATASET_DIR = '/home/maolei/data/coverLP_det/'
# cfg.DATASET.TEST_SETS = (('test_data', 'small_test.txt'), )
if tb_writer is not None:
tb_writer.cfg['show_test_image'] = args.save_log
model_dir = args.trained_model
np.set_printoptions(precision=3, suppress=True, edgeitems=4)
#loader = dataset_factory(phase='eval', cfg=cfg)
# load net
net, priors, _ = model_factory(phase='eval', cfg=cfg)
# net.load_state_dict(torch.load(model_dir))
net.load_state_dict(torch.load(model_dir)['state_dict'])
if args.cuda:
net = torch.nn.DataParallel(net)
net = net.cuda()
priors = Variable(priors.cuda(), volatile=True)
else:
priors = Variable(priors)
net.eval()
detector = DetectOut(cfg)
print('test_type:', cfg.DATASET.TEST_SETS, 'test_model:', args.trained_model,
'device_id:', args.devices, 'test_dir:', args.test_path)
#files = os.listdir(args.test_dir)
file_list = args.test_path
if file_list[-1] == '/': file_list = file_list[:-1]
data_name = os.path.splitext(os.path.basename(file_list))[0]
parent_dir = os.path.dirname(os.path.abspath(file_list))
save_path = parent_dir + '/{}_results/'.format(data_name)
print('save_path:', save_path)
if not os.path.exists(save_path):
os.makedirs(save_path)
frame = 0
save_list = []
fw_rs = open(save_path + '/test_tmp.txt', 'w')
if os.path.isdir(file_list):
data_root = file_list + '/'
lines = os.listdir(file_list)
else:
data_root = parent_dir+ '/'
lines = open(file_list).readlines()
for idx, f in enumerate(lines):
#if idx > 10: break
f = f.strip().split()[0]
img_name = osp.basename(f)
if frame % 100 == 0: print("processing ", frame)
if f[-3:] not in ['jpg', 'png', 'bmp']:
print(f, 'is not image')
continue
img_root = os.path.join(data_root, f)
img = cv2.imread(img_root)
if img is None:
print(img_root)
continue
im_copy = img.copy()
h,w,c = img.shape
x = cv2.resize(img, (cfg.DATASET.IMAGE_SIZE[1], cfg.DATASET.IMAGE_SIZE[0])).astype(np.float32)
x -= (104., 117., 123.)
x = x[:, :, (2, 1, 0)]
x = torch.from_numpy(x).permute(2,0,1)
x = Variable(x.unsqueeze(0)).cuda()
# net = net.cuda()
loc, conf = net(x, phase='eval')
detections = detector(loc, conf, priors).data
cnt = 0
#xmin, ymin, xmax, ymax, score, cls
max_conf_bbx = [-1., -1., -1., -1., -1., -1.] #conf idx
for j in range(1, detections.size(1)):
#print(j)
dets = detections[0, j, :]
label = label_name[j-1]
mask = dets[:, 0].gt(0.).expand(5, dets.size(0)).t()
dets = torch.masked_select(dets, mask).view(-1, 5)
if dets.dim() == 0:
continue
boxes = dets[:, 1:]
boxes[:, 0] *= w
boxes[:, 2] *= w
boxes[:, 1] *= h
boxes[:, 3] *= h
scores = dets[:, 0].cpu().numpy()
for t in range(scores.shape[0]):
if(scores[t] > 0.5):
x1 = min(boxes[t][0], w)
x1 = round(max(x1, 0), 1)
x2 = min(boxes[t][2], w)
x2 = round(max(x2, 0), 1)
y1 = min(boxes[t][1], h)
y1 = round(max(y1, 0), 1)
y2 = min(boxes[t][3], h)
y2 = round(max(y2, 0), 1)
if max_conf_bbx[4] < scores[t]:
max_conf_bbx[0] = x1
max_conf_bbx[1] = y1
max_conf_bbx[2] = x2
max_conf_bbx[3] = y2
max_conf_bbx[4] = scores[t]
max_conf_bbx[5] = j - 1
if args.vis:
if True or (x2 - x1 > 20 and y2 - y1 > 20):
fw_rs.write(' '.join([f, str(scores[t]), str(j), str(x1), str(y1), str(x2), str(y2)]) + '\n')
cv2.rectangle(img,(int(x1),int(y1)),(int(x2),int(y2)),color_ind[j],2)
cv2.putText(img,label+'_'+str(scores[t]),(int(x1),int(y1)),cv2.FONT_HERSHEY_PLAIN,2,(255,0,255),3)
if args.crop:
scale = 0.5
if x2-x1>30 and y2-y1>30:
fw_rs.write(' '.join([f, str(scores[t]), str(j), str(x1), str(y1), str(x2), str(y2)]) + '\n')
roih, roiw = y2-y1, x2-x1
xmin = max(0, x1 - roiw*scale)
ymin = max(0, y1 - roih*scale)
xmax = min(w, x2 + roiw*scale)
ymax = min(h, y2 + roih*scale)
roi = im_copy[int(ymin):int(ymax), int(xmin):int(xmax)]
name=os.path.join(save_path, str(cnt)+'_'+f)
cv2.imwrite(name, roi)
cnt+=1
if args.vis:
cv2.imwrite(os.path.join(save_path, f.split('/')[-1]), img)
frame += 1
# save_list2txt(save_list, '{}_result.txt'.format(data_name))
fw_rs.close()
|
<reponame>milescsmith/cDNA_Cupcake<gh_stars>0
#!/usr/bin/env python
import re
import sys
from collections import defaultdict
from csv import DictReader, DictWriter
from pathlib import Path
from typing import Optional, Tuple
import typer
from Bio import SeqIO
from cupcake import version_callback
from cupcake import cupcake_logger as logger
from cupcake.sequence.GFF import collapseGFFReader
fusion_pbid = re.compile(r"PBfusion.(\d+).(\d+)")
"""
Run after fusion_finder.py + SQANTI3 classification
"""
FIELDS = [
"UniqueID",
"FusionName",
"LeftGeneName",
"LeftGeneID",
"LeftBreakpoint",
"LeftFlankingSequence",
"RightGeneName",
"RightGeneID",
"RightBreakpoint",
"RightFlankingSequence",
"JunctionSupport",
"SpanningReads",
"ReadCountScore",
"Sequence",
"LeftORF",
"RightORF",
"LeftExonCount",
"RightExonCount",
"LeftCDSExonCount",
"RightCDSExonCount",
"Comments",
]
app = typer.Typer(name="cupcake.tofu.fusion_collate_into")
def get_breakpoint_n_seq(
r1: dict, r2: dict, genome_dict: Optional[str] = None, flanking_size: int = 50
) -> Tuple[str, str, str, str]:
if r1.strand == "+":
left_breakpoint = f"{r1.chr}:{r1.end}:+"
if genome_dict is not None:
left_seq = str(genome_dict[r1.chr][r1.end - flanking_size : r1.end].seq)
else:
left_seq = "NA"
else:
left_breakpoint = f"{r1.chr}:{r1.start+1}:-"
if genome_dict is not None:
left_seq = str(
genome_dict[r1.chr][r1.start : r1.start + flanking_size]
.reverse_complement()
.seq
)
else:
left_seq = "NA"
if r2.strand == "+":
right_breakpoint = f"{r2.chr}:{r2.start}:+"
if genome_dict is not None:
right_seq = str(
genome_dict[r2.chr][r2.start : r2.start + flanking_size].seq
)
else:
right_seq = "NA"
else:
right_breakpoint = f"{r2.chr}:{r2.end}:-"
if genome_dict is not None:
right_seq = str(
genome_dict[r2.chr][r2.end - flanking_size : r2.end]
.reverse_complement()
.seq
)
else:
right_seq = "NA"
return left_breakpoint, left_seq, right_breakpoint, right_seq
def collate_info(
fusion_prefix: str,
class_filename: str,
genepred_filename: str,
total_fl_count: Optional[int] = None,
config_filename: Optional[str] = None,
genome_dict: Optional[dict] = None,
cds_gff_filename: Optional[str] = None,
min_fl_count: int = 2,
min_breakpoint_dist_kb: int = 10,
include_Mt_genes: bool = False,
) -> None:
global_info = {} # holding information for general information
if config_filename is not None:
logger.info(f"Reading config file {config_filename}...")
for line in open(config_filename):
k, v = line.strip().split("=")
global_info[k] = v
gene_to_id = {} # gene name --> ensembl ID
for line in open(genepred_filename):
raw = line.strip().split()
gene_to_id[raw[11]] = raw[0]
d = defaultdict(lambda: {}) # PBfusion.X --> isoform index -> sqanti3 record
orf_dict = {}
# read SQANTI3 classification file
for r in DictReader(open(class_filename), delimiter="\t"):
m = fusion_pbid.match(r["isoform"])
if m is None:
logger.error("ERROR: fusion pbid must follow format `PBfusion.X.Y`. Abort!")
sys.exit(-1)
gene_index, isoform_index = m.group(1), m.group(2)
d[gene_index][isoform_index] = r
orf_dict[r["isoform"]] = r["ORF_seq"]
# get sequences
seq_dict = {
r.id.split("|")[0]: r.seq
for r in SeqIO.parse(open(f"{fusion_prefix}.rep.fa"), "fasta")
}
# get count information
count_d = defaultdict(lambda: "NA")
count_filename = f"{fusion_prefix}.abundance.txt"
if Path(count_filename).exists():
for r in DictReader(open(count_filename), delimiter="\t"):
count_d[r["pbid"]] = int(r["count_fl"])
if total_fl_count is None:
logger.info(
"Total FL count not given --- using the sum FL count from fusions only instead."
)
total_fl_count = sum(count_d.values())
# get breakpoint information
gff_d = defaultdict(lambda: {}) # PBfusion.X --> isoform index -> sqanti3 record
if cds_gff_filename is None:
gff_filename = f"{fusion_prefix}.gff"
else:
gff_filename = cds_gff_filename
for r in collapseGFFReader(gff_filename):
m = fusion_pbid.match(r.seqid)
if m is None:
logger.error(
f"ERROR: fusion pbid in {gff_filename} must follow format `PBfusion.X.Y`. Abort!"
)
sys.exit(-1)
gene_index, isoform_index = m.group(1), int(m.group(2))
gff_d[gene_index][isoform_index] = r
if r.strand not in ("+", "-"):
logger.error(
f"ERROR: fusion {r.seqid} did not specify strand in {gff_filename}! Abort!"
)
sys.exit(-1)
fields2 = list(global_info.keys()) + FIELDS
with open(f"{fusion_prefix}.annotated.txt", "w") as f, open(
f"{fusion_prefix}.annotated_ignored.txt", "w"
) as f_bad:
writer = DictWriter(f, fields2, delimiter=",")
writer.writeheader()
writer_bad = DictWriter(f_bad, fields2, delimiter=",")
writer_bad.writeheader()
for gene_index, iso_dict in d.items():
iso_dict = list(iso_dict.items()) # (isoform index, classification record)
iso_dict.sort(key=lambda x: x[0])
has_novel = any(
r["associated_gene"].startswith("novelGene")
or r["associated_gene"] == ""
for junk, r in iso_dict
)
pbid = f"PBfusion.{str(gene_index)}"
gff_info = list(gff_d[gene_index].items())
gff_info.sort(key=lambda x: x[0])
rec1 = gff_info[0][1]
rec2 = gff_info[-1][1]
(
left_breakpoint,
left_seq,
right_breakpoint,
right_seq,
) = get_breakpoint_n_seq(rec1, rec2, genome_dict)
left_exon_count = len(rec1.ref_exons)
right_exon_count = len(rec2.ref_exons)
gene1 = iso_dict[0][1]["associated_gene"]
gene2 = iso_dict[-1][1]["associated_gene"]
if cds_gff_filename is not None:
left_cds_exon_count = len(rec1.cds_exons)
right_cds_exon_count = len(rec2.cds_exons)
else:
left_cds_exon_count = "NA"
right_cds_exon_count = "NA"
left_orf, right_orf = "NA", "NA"
if orf_dict is not None:
seqid1 = gff_info[0][1].seqid
seqid2 = gff_info[-1][1].seqid
left_orf = orf_dict[seqid1]
right_orf = orf_dict[seqid2]
info = {
"UniqueID": pbid,
"FusionName": "--".join(
[_r["associated_gene"] for (_index, _r) in iso_dict]
),
"LeftGeneName": gene1,
"LeftGeneID": gene_to_id[gene1] if gene1 in gene_to_id else "NA",
"LeftBreakpoint": left_breakpoint,
"LeftFlankingSequence": left_seq,
"RightGeneName": gene2,
"RightGeneID": gene_to_id[gene2] if gene2 in gene_to_id else "NA",
"RightBreakpoint": right_breakpoint,
"RightFlankingSequence": right_seq,
"JunctionSupport": "NA",
"SpanningReads": count_d[pbid],
"ReadCountScore": (count_d[pbid] * (10 ** 6) / total_fl_count)
if count_d[pbid] != "NA"
else "NA",
"Sequence": seq_dict[pbid],
"LeftORF": left_orf,
"RightORF": right_orf,
"LeftExonCount": left_exon_count,
"RightExonCount": right_exon_count,
"LeftCDSExonCount": left_cds_exon_count,
"RightCDSExonCount": right_cds_exon_count,
"Comments": "PASS",
}
info.update(global_info)
left_chr, left_break, left_strand = left_breakpoint.split(":")
right_chr, right_break, right_strand = right_breakpoint.split(":")
if has_novel:
info["Comments"] = "FAIL:NovelGene"
elif gene1 == gene2:
info["Comments"] = "FAIL:SameGene"
elif info["SpanningReads"] != "NA" and info["SpanningReads"] < min_fl_count:
info["Comments"] = "FAIL:TooFewFLReads"
elif not include_Mt_genes and (
gene1.startswith("MT-") or gene2.startswith("MT-")
):
info["Comments"] = "FAIL:MtGenes"
elif (
left_chr == right_chr
and abs(int(left_break) - int(right_break)) / 1000
<= min_breakpoint_dist_kb
):
info["Comments"] = "FAIL:BreakpointTooClose"
if info["Comments"].startswith("FAIL:"):
writer_bad.writerow(info)
else:
writer.writerow(info)
@app.command(name="")
def main(
fusion_prefix: str = typer.Argument(
..., help="Prefix for fusion files (ex: my.fusion)"
),
class_filename: str = typer.Argument(..., help="SQANTI3 classification filename"),
genepred_filename: str = typer.Argument(
..., help="GenePred filename used by SQANTI3 classification"
),
cds_gff: Optional[str] = typer.Option(None, help="CDS GFF filename"),
total_fl_count: Optional[int] = typer.Option(
None, help="Total FL count used to normalize fusion counts"
),
config: Optional[str] = typer.Option(
None, help="(optional) Additional information to include in the output"
),
genome: Optional[str] = typer.Option(None, help="Reference genome"),
min_fl_count: int = typer.Option(2, help="Minimum FL count"),
min_breakpoint_dist_kb: int = typer.Option(
10, help="Minimum breakpoint distance, in kb"
),
include_Mt_genes: bool = typer.Option(False, help="Include Mt genes"),
version: bool = typer.Option(
None,
"--version",
callback=version_callback,
is_eager=True,
help="Prints the version of the SQANTI3 package.",
),
) -> None:
if genome is not None:
genome_dict = SeqIO.to_dict(SeqIO.parse(open(genome), "fasta"))
print(f"Finished reading reference genome {genome}.")
else:
genome_dict = None
collate_info(
fusion_prefix,
class_filename,
genepred_filename,
total_fl_count=total_fl_count,
config_filename=config,
genome_dict=genome_dict,
cds_gff_filename=cds_gff,
min_fl_count=min_fl_count,
min_breakpoint_dist_kb=min_breakpoint_dist_kb,
include_Mt_genes=include_Mt_genes,
)
if __name__ == "__main__":
typer.run(main)
|
# coding: utf-8
import random
def partition(lst, start, end):
"""
move elements below pivot value to left half of list and bigger to right half
return the new position of the pivot element
"""
# use pivot as the last element in list
# get it value
x = lst[end]
# initial store_index
store_index = start
# loop up to pivot (last element)
i = start
for i in range(start, end):
# if current value below pivot
if lst[i] <= x:
# swap index and store_index elements if are differ
if i != store_index:
lst[i], lst[store_index] = lst[store_index], lst[i]
# update store_index
store_index += 1
# swap pivot element (last in list) and store_index
lst[store_index], lst[i + 1] = lst[i + 1], lst[store_index]
return store_index
def quick_sort(lst, start, end):
""" quick sort algorithm always use last element as pivot """
if start >= end:
return lst
# regrouping the list
new_pivot = partition(lst, start, end)
# run recursive quick sort for left half
quick_sort(lst, start, new_pivot - 1)
# run for right half
quick_sort(lst, new_pivot + 1, end)
def quick_sort_median(lst, start, end):
""" quick sort algorithm use meadian value element"""
if start >= end:
return lst
# for list more than 30 elements
# use median value
elif end - start >= 30:
# create dict for select pivot element
# keys --> values of lst, values --> index in lst
median_values = {}
while len(median_values) < 3:
# select random element
rand = random.randint(start, end)
median_values[lst[rand]] = rand
# select median element
pivot_value = list(median_values.keys())[1]
# get pivot index
pivot = median_values[pivot_value]
else:
pivot = random.randint(start, end)
# swap pivot and last element into list
lst[-1], lst[pivot] = lst[-1], lst[pivot]
# regrouping the list
new_pivot = partition(lst, start, end)
# run recursive quick sort for left half
quick_sort_median(lst, start, new_pivot - 1)
# run for right half
quick_sort_median(lst, new_pivot + 1, end)
def quick_sort_random(lst, start, end):
""" random quick sort algorithm use random element everytime """
if start >= end:
return lst
# select random pivot element
pivot = random.randint(start, end)
# swap pivot and last element into list
lst[-1], lst[pivot] = lst[-1], lst[pivot]
# regrouping the list
new_pivot = partition(lst, start, end)
# run recursive quick sort for left half
quick_sort_random(lst, start, new_pivot - 1)
# run for right half
quick_sort_random(lst, new_pivot + 1, end)
def quicksort(lst):
""" Implementation of quicksort algorithm with constant pivot """
quick_sort(lst, 0, len(lst) - 1)
return lst
def quicksort_random(lst):
""" Implementation of quicksort algorithm with random pivot """
quick_sort_random(lst, 0, len(lst) - 1)
return lst
def quicksort_median(lst):
""" Implementation of quicksort algorithm with random pivot """
quick_sort_median(lst, 0, len(lst) - 1)
return lst
if __name__ in "__main__":
a = [2, 8, 7, 1, 3, 5, 6, 4]
print('list :', a)
print('quicksort :', quicksort(a), quicksort(a) == sorted(a))
print('quicksort random:', quicksort_random(a), quicksort_random(a) == sorted(a))
print('quicksort median:', quicksort_median(a), quicksort_median(a) == sorted(a))
|
<reponame>xgess/btctxstore
# coding: utf-8
# Copyright (c) 2015 <NAME> <<EMAIL>>
# License: MIT (see LICENSE file)
from __future__ import print_function
from __future__ import unicode_literals
import binascii
from btctxstore import serialize
from btctxstore import deserialize
from btctxstore import control
from btctxstore import exceptions
from btctxstore import common
from btctxstore import services
from btctxstore import validate
class BtcTxStore(): # TODO use apigen when ported to python 3
"""Bitcoin nulldata output io library."""
def __init__(self, testnet=False, dryrun=False, service="automatic"):
self.testnet = deserialize.flag(testnet)
self.dryrun = deserialize.flag(dryrun)
self.service = services.select(service, testnet=testnet,
dryrun=dryrun)
###########
# wallets #
###########
def create_wallet(self, master_secret=b""):
"""Create a BIP0032-style hierarchical wallet.
@param: master_secret Create from master secret, otherwise random.
"""
master_secret = deserialize.bytes_str(master_secret)
bip32node = control.create_wallet(self.testnet,
master_secret=master_secret)
return bip32node.hwif(as_private=True)
def validate_wallet(self, hwif):
return validate.wallet_network(hwif, self.testnet)
########
# keys #
########
def get_key(self, hwif): # TODO add optional path for sub keys
bip32node = deserialize.wallet(self.testnet, hwif)
return bip32node.wif()
def create_key(self, master_secret=b""):
"""Create new private key and return in wif format.
@param: master_secret Create from master secret, otherwise random.
"""
master_secret = deserialize.bytes_str(master_secret)
bip32node = control.create_wallet(self.testnet,
master_secret=master_secret)
return bip32node.wif()
def validate_key(self, wif): # TODO test
return validate.key_network(wif, self.testnet)
#############
# addresses #
#############
def get_address(self, wif):
"""Return bitcoin address for given wallet. """
return deserialize.key(self.testnet, wif).address()
def validate_address(self, address): # TODO test
return validate.address_network(address, self.testnet)
###############
# transaction #
###############
def create_tx(self, txins=None, txouts=None, lock_time=0):
"""Create unsigned rawtx with given txins/txouts as json data.
<txins>: '[{"txid" : hexdata, "index" : integer}, ...]'
<txouts>: '[{"address" : hexdata, "value" : satoshis}, ...]'
"""
txins = [] if txins is None else txins
txouts = [] if txouts is None else txouts
lock_time = deserialize.positive_integer(lock_time)
txins = deserialize.txins(txins)
txouts = deserialize.txouts(self.testnet, txouts)
tx = control.create_tx(self.service, self.testnet, txins, txouts,
lock_time=lock_time)
return serialize.tx(tx)
def send(self, wifs, txouts, change_address=None, lock_time=0, fee=10000):
"""TODO add doc string"""
# FIXME test!!
rawtx = self.create_tx(txouts=txouts, lock_time=lock_time)
rawtx = self.add_inputs(rawtx, wifs, change_address=change_address,
fee=fee)
return self.publish(rawtx)
def add_inputs(self, rawtx, wifs, change_address=None, fee=10000,
dont_sign=False):
"""Add sufficient inputs from given <wifs> to cover <rawtx> outputs
and <fee>. If no <change_address> is given, change will be sent to
first wif.
"""
tx = deserialize.tx(rawtx)
keys = deserialize.keys(self.testnet, wifs)
fee = deserialize.positive_integer(fee)
if change_address is not None:
change_address = deserialize.address(self.testnet, change_address)
tx = control.add_inputs(self.service, self.testnet, tx, keys,
change_address=change_address, fee=fee)
if not dont_sign:
tx = control.sign_tx(self.service, self.testnet, tx, keys)
return serialize.tx(tx)
def sign_tx(self, rawtx, wifs):
"""Sign <rawtx> with given <wifs> as json data.
<wifs>: '["privatekey_in_wif_format", ...]'
"""
tx = deserialize.tx(rawtx)
keys = deserialize.keys(self.testnet, wifs)
tx = control.sign_tx(self.service, self.testnet, tx, keys)
return serialize.tx(tx)
#################
# blockchain io #
#################
def retrieve_tx(self, txid):
"""Returns rawtx for <txid>."""
txid = deserialize.txid(txid)
tx = self.service.get_tx(txid)
return serialize.tx(tx)
def retrieve_utxos(self, addresses):
"""Get current utxos for <address>."""
addresses = deserialize.addresses(self.testnet, addresses)
spendables = control.retrieve_utxos(self.service, addresses)
return serialize.utxos(spendables)
def publish(self, rawtx):
"""Publish signed <rawtx> to bitcoin network."""
tx = deserialize.signedtx(rawtx)
if not self.dryrun:
self.service.send_tx(tx)
return serialize.txid(tx.hash())
###########
# signing #
###########
def sign_data(self, wif, hexdata):
"""Signing <hexdata> with <wif> private key."""
data = deserialize.binary(hexdata)
key = deserialize.key(self.testnet, wif)
sigdata = control.sign_data(self.testnet, data, key)
return serialize.signature(sigdata)
def verify_signature(self, address, signature, hexdata):
"""Verify <signature> of <hexdata> by <address>."""
try:
address = deserialize.address(self.testnet, address)
data = deserialize.binary(hexdata)
signature = deserialize.signature(signature)
return control.verify_signature(self.testnet, address,
signature, data)
except exceptions.InvalidAddress:
return False
def sign_unicode(self, wif, message):
"""Signing <unicode> with <wif> private key."""
hexdata = binascii.hexlify(message.encode("utf-8"))
return self.sign_data(wif, hexdata)
def verify_signature_unicode(self, address, signature, message):
"""Verify <signature> of <unicode> by <address>."""
hexdata = binascii.hexlify(message.encode("utf-8"))
return self.verify_signature(address, signature, hexdata)
###############
# hash160data #
###############
def add_hash160data(self, rawtx, hexdata, dust_limit=common.DUST_LIMIT):
"""Writes <hexdata> as new Pay-to-PubkeyHash output to <rawtx>."""
tx = deserialize.unsignedtx(rawtx)
dust_limit = deserialize.positive_integer(dust_limit)
hash160data_txout = deserialize.hash160data_txout(hexdata, dust_limit)
tx = control.add_hash160data_output(tx, hash160data_txout)
return serialize.tx(tx)
def get_hash160data(self, rawtx, output_index):
"""TODO doc string"""
tx = deserialize.unsignedtx(rawtx)
output_index = deserialize.positive_integer(output_index)
data = control.get_hash160_data(tx, output_index)
return serialize.data(data)
def store_hash160data(self, hexdata, wifs, change_address=None,
txouts=None, fee=10000, lock_time=0,
dust_limit=common.DUST_LIMIT):
"""TODO doc string"""
rawtx = self.create_tx(txouts=txouts, lock_time=lock_time)
rawtx = self.add_hash160data(rawtx, hexdata, dust_limit=dust_limit)
rawtx = self.add_inputs(rawtx, wifs, change_address=change_address,
fee=fee)
return self.publish(rawtx)
def retrieve_hash160data(self, txid, output_index):
"""TODO doc string"""
rawtx = self.retrieve_tx(txid)
return self.get_hash160_data(rawtx, output_index)
############
# nulldata #
############
def add_nulldata(self, rawtx, hexdata):
"""Writes <hexdata> as new nulldata output to <rawtx>."""
tx = deserialize.unsignedtx(rawtx)
nulldata_txout = deserialize.nulldata_txout(hexdata)
tx = control.add_nulldata_output(tx, nulldata_txout)
return serialize.tx(tx)
def get_nulldata(self, rawtx):
"""Returns nulldata from <rawtx> as hexdata."""
tx = deserialize.tx(rawtx)
index, data = control.get_nulldata(tx)
return serialize.data(data)
def store_nulldata(self, hexdata, wifs, change_address=None, txouts=None,
fee=10000, lock_time=0):
"""Store <hexdata> in blockchain and return new txid.
Utxos taken from <wifs> and change sent to <change_address>.
<wifs>: '["privatekey_in_wif_format", ...]'
"""
rawtx = self.create_tx(txouts=txouts, lock_time=lock_time)
rawtx = self.add_nulldata(rawtx, hexdata)
rawtx = self.add_inputs(rawtx, wifs, change_address=change_address,
fee=fee)
return self.publish(rawtx)
def retrieve_nulldata(self, txid):
"""Returns nulldata stored in blockchain <txid> as hexdata."""
rawtx = self.retrieve_tx(txid)
return self.get_nulldata(rawtx)
#############
# data blob #
#############
def get_data_blob(self, rawtx):
"""TODO add docstring"""
tx = deserialize.tx(rawtx)
data = control.get_data_blob(tx)
return serialize.data(data)
def add_data_blob(self, rawtx, hexdata, dust_limit=common.DUST_LIMIT):
"""TODO add docstring"""
tx = deserialize.tx(rawtx)
data = deserialize.binary(hexdata)
tx = control.add_data_blob(tx, data, dust_limit=dust_limit)
return serialize.tx(tx)
def store_data_blob(self, hexdata, wifs, change_address=None,
txouts=None, fee=10000, lock_time=0,
dust_limit=common.DUST_LIMIT):
"""TODO add docstring"""
rawtx = self.create_tx(txouts=txouts, lock_time=lock_time)
rawtx = self.add_data_blob(rawtx, hexdata, dust_limit=dust_limit)
rawtx = self.add_inputs(rawtx, wifs, change_address=change_address,
fee=fee)
return self.publish(rawtx)
def retrieve_data_blob(self, txid):
"""TODO add docstring"""
rawtx = self.retrieve_tx(txid)
return self.get_data_blob(rawtx)
#####################
# broadcast message #
#####################
def add_broadcast_message(self, rawtx, message, sender_wif,
dust_limit=common.DUST_LIMIT):
"""TODO add docstring"""
tx = deserialize.tx(rawtx)
message = deserialize.unicode_str(message)
sender_key = deserialize.key(self.testnet, sender_wif)
tx = control.add_broadcast_message(self.testnet, tx, message,
sender_key, dust_limit=dust_limit)
return serialize.tx(tx)
def get_broadcast_message(self, rawtx):
"""TODO add docstring"""
tx = deserialize.tx(rawtx)
result = control.get_broadcast_message(self.testnet, tx)
result["signature"] = serialize.signature(result["signature"])
return result
def store_broadcast_message(self, message, sender_wif, wifs,
change_address=None, txouts=None, fee=10000,
lock_time=0, dust_limit=common.DUST_LIMIT):
"""TODO add docstring"""
rawtx = self.create_tx(txouts=txouts, lock_time=lock_time)
rawtx = self.add_broadcast_message(rawtx, message, sender_wif,
dust_limit=dust_limit)
rawtx = self.add_inputs(rawtx, wifs, change_address=change_address,
fee=fee)
return self.publish(rawtx)
def retrieve_broadcast_message(self, txid):
"""TODO add docstring"""
rawtx = self.retrieve_tx(txid)
return self.get_broadcast_message(rawtx)
########
# misc #
########
def confirms(self, txid):
"""Returns number of confirms or None if unpublished."""
txid = deserialize.txid(txid)
return self.service.confirms(txid)
def get_transactions(self, address):
address = deserialize.address(self.testnet, address)
return self.service.transactions_for_address(address)
def split_utxos(self, wif, limit, fee=10000, max_outputs=100):
"""Split utxos of <wif> unitil <limit> or <max_outputs> reached."""
key = deserialize.key(self.testnet, wif)
limit = deserialize.positive_integer(limit)
fee = deserialize.positive_integer(fee)
max_outputs = deserialize.positive_integer(max_outputs)
spendables = control.retrieve_utxos(self.service, [key.address()])
txids = control.split_utxos(self.service, self.testnet, key,
spendables, limit, fee=fee,
max_outputs=max_outputs,
publish=(not self.dryrun))
return serialize.txids(txids)
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experiments for kernel vs feature map in SQuAD.
`feature` model does not allow any interaction between question and context
except at the end, where the dot product (or L1/L2 distance) is used to get the
answer.
`kernel` model allows any interaction between question and context
(e.g. cross attention).
This script is for establishing baseline for both feature and kernel models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import json
import os
import numpy as np
from IPython import embed
import tensorflow as tf
from tqdm import tqdm
import tensorflow.contrib.learn as learn
# This is required for importing google specific flags:
# `output_dir`, `schedule`
# (`learn` above is not sufficient). Will need to add these flags when
# removing this import for open-sourcing.
from tensorflow.contrib.learn import learn_runner
import squad_data
from common_model import get_loss
from common_model import get_pred_ops
from common_model import get_train_op
from model import model as kernel_model
from color import Color
tf.flags.DEFINE_string('data', 'squad', 'data')
tf.flags.DEFINE_integer('emb_size', 200, 'embedding size')
tf.flags.DEFINE_integer('glove_size', 200, 'GloVe size')
tf.flags.DEFINE_integer('hidden_size', 100, 'hidden state size')
tf.flags.DEFINE_integer('embed_hidden_size', 0, 'hidden state size for embedding. same as hidden_size if 0')
tf.flags.DEFINE_integer('num_train_steps', 15000, 'num train steps')
tf.flags.DEFINE_integer('num_eval_steps', 50, 'num eval steps')
tf.flags.DEFINE_boolean('draft', False, 'draft?')
tf.flags.DEFINE_integer('batch_size', 64, 'batch size')
tf.flags.DEFINE_float('dropout_rate', 0.2,
'dropout rate, applied to the input of LSTMs.')
tf.flags.DEFINE_string('root_data_dir', 'prepro', 'root data dir')
tf.flags.DEFINE_integer('save_checkpoints_steps', 500, '')
tf.flags.DEFINE_integer('num_eval_delay_secs', 1, 'eval delay secs')
tf.flags.DEFINE_boolean('shuffle_examples', False, 'Use shuffle example queue?')
tf.flags.DEFINE_boolean('shuffle_files', True, 'Use shuffle file queue?')
tf.flags.DEFINE_string('model', 'kernel', '`feature` or `kernel`.')
tf.flags.DEFINE_boolean('oom_test', False, 'Performs out-of-memory test')
tf.flags.DEFINE_string(
'dist', 'dot', 'Distance function for feature model. `dot`, `l1` or `l2`.')
tf.flags.DEFINE_string('opt', 'Adam', 'optimizer')
tf.flags.DEFINE_float('learning_rate', 0.001,
'(Initial) learning rate for optimizer')
tf.flags.DEFINE_boolean(
'infer', False,
'If `True`, obtains and saves predictions for the test dataset '
'at `answers_path`.')
tf.flags.DEFINE_string('answers_path', '',
'The path for saving predictions on test dataset. '
'If not specified, saves in `restore_dir` directory.')
tf.flags.DEFINE_float('clip_norm', 0, 'Clip norm threshold, 0 for no clip.')
tf.flags.DEFINE_integer(
'restore_step', 0,
'The global step for which the model is restored in the beginning. '
'`0` for the most recent save file.')
tf.flags.DEFINE_float(
'restore_decay', 1.0,
'The decay rate for exponential moving average of variables that '
'will be restored upon eval or infer. '
'`1.0` for restoring variables without decay.')
tf.flags.DEFINE_string(
'ema_decays', '',
'List of exponential moving average (EMA) decay rates (float) '
'to track for variables during training. Values are separated by commas.')
tf.flags.DEFINE_string(
'restore_dir', '',
'Directory from which variables are restored. If not specfied, `output_dir`'
'will be used instead. For inference mode, this needs to be specified.')
tf.flags.DEFINE_string('model_id', 'm00', 'Model id.')
tf.flags.DEFINE_string('glove_dir', '/home/kyungissac/data/glove',
'GloVe dir.')
tf.flags.DEFINE_boolean('merge', False, 'If `True`, merges answers from same '
'paragraph that were split in preprocessing step.')
tf.flags.DEFINE_integer('queue_capacity', 5000, 'Input queue capacity.')
tf.flags.DEFINE_integer('min_after_dequeue', 1000, 'Minimum number of examples '
'after queue dequeue.')
tf.flags.DEFINE_integer('max_answer_size', 15, 'Max number of answer words.')
tf.flags.DEFINE_string('restore_scopes', '', 'Restore scopes, separated by ,.')
tf.flags.DEFINE_boolean('reg_gen', True, 'Whether to regularize training '
'with question generation (reconstruction) loss.')
tf.flags.DEFINE_float('reg_cf', 3.0, 'Regularization initial coefficient.')
tf.flags.DEFINE_float('reg_half_life', 6000, 'Regularization decay half life. '
'Set it to very high value to effectively disable decay.')
tf.flags.DEFINE_integer('max_gen_length', 32, 'During inference, maximum '
'length of generated question.')
# Below are added for third party.
tf.flags.DEFINE_string('schedule', 'train_and_evaluate',
'schedule for learn_runner.')
tf.flags.DEFINE_string('output_dir', '/tmp/squad_ckpts',
'Output directory for saving model.')
# Below are added for Skim-RNN.
tf.flags.DEFINE_boolean('skim_embed', False, 'If `True`, use Skim-RNN instead of plain RNN')
tf.flags.DEFINE_boolean('skim_1', False, 'If `True`, use Skim-RNN instead of plain RNN')
tf.flags.DEFINE_boolean('skim_2', False, 'If `True`, use Skim-RNN instead of plain RNN')
# For multi-level skim cell
tf.flags.DEFINE_string('small_hidden_sizes', '[20, 10, 5, 0]', 'small hidden sizes of Skim-RNN')
tf.flags.DEFINE_integer('num_cells', 5, 'Number of lstm cells in a skim cell')
tf.flags.DEFINE_float('temp_period', 100, '')
tf.flags.DEFINE_float('temp_decay', 1e-3, 'temperature in gumbel-softmax')
tf.flags.DEFINE_float('p_decay', 0.01, 'decay rate for preferred lstm choice loss.')
tf.flags.DEFINE_float('embed_p_decay', 0.01, 'decay rate for preferred lstm choice loss. (for embed)')
tf.flags.DEFINE_integer('p_pref', 1, 'preferred lstm cell, to be used for choice loss.')
# tf.flags.DEFINE_float('threshold', 0.5, 'threshold for skimming')
# tf.flags.DEFINE_integer('threshold_period', 0, 'threshold period')
tf.flags.DEFINE_boolean('emb_word_feat', True, 'use word feature vector (one-hot)')
tf.flags.DEFINE_boolean('word_feat', True, 'use word feature vector (one-hot)')
tf.flags.DEFINE_boolean('out_word_feat', True, 'use word feature vector (one-hot)')
tf.flags.DEFINE_boolean('big2nested', False, '')
tf.flags.DEFINE_boolean('small2nested', False, '')
tf.flags.DEFINE_boolean('only_train_small', False, '')
tf.flags.DEFINE_boolean('only_train_big', False, '')
tf.flags.DEFINE_boolean('vcgru', False, '')
tf.flags.DEFINE_float('sparsity_decay', 0, 'hyperparem for sparsity')
tf.flags.DEFINE_float('sparsity_th', 0.01, 'threshold for sparsity')
FLAGS = tf.flags.FLAGS
tf.logging.set_verbosity(tf.logging.INFO)
def model_fn(features, targets, mode, params):
"""Model function to be used for `Experiment` object.
Should not access `flags.FLAGS`.
Args:
features: a dictionary of feature tensors.
targets: a dictionary of target tensors.
mode: `learn.ModeKeys.TRAIN` or `learn.ModeKeys.EVAL`.
params: `HParams` object.
Returns:
`ModelFnOps` object.
Raises:
ValueError: rasied if `params.model` is not an appropriate value.
"""
with tf.variable_scope('model'):
data = _get_data(params.data)
if params.model == 'feature':
logits_start, logits_end, tensors = feature_model(
features, mode, params)
elif params.model == 'kernel':
logits_start, logits_end, tensors = kernel_model(
features, mode, params)
else:
raise ValueError(
'`%s` is an invalid argument for `model` parameter.' % params.model)
no_answer_bias = tf.get_variable('no_answer_bias', shape=[], dtype='float')
no_answer_bias = tf.tile(
tf.reshape(no_answer_bias, [1, 1]),
[tf.shape(features['context_words'])[0], 1])
predictions = get_pred_ops(features, params, logits_start, logits_end,
no_answer_bias)
predictions.update(tensors)
predictions.update(features)
if mode == learn.ModeKeys.INFER:
eval_metric_ops, loss = None, None
else:
eval_metric_ops = data.get_eval_metric_ops(targets, predictions, tensors)
loss = get_loss(targets['word_answer_starts'], targets['word_answer_ends'],
logits_start, logits_end, no_answer_bias, tensors, params)
emas = {
decay: tf.train.ExponentialMovingAverage(
decay=decay, name='EMA_%f' % decay)
for decay in params.ema_decays
}
ema_ops = [ema.apply() for ema in emas.values()]
restore_vars = []
for restore_scope in params.restore_scopes:
restore_vars.extend(
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, restore_scope))
# FIXME: indentation
if params.restore_dir and not tf.gfile.Exists(params.output_dir):
assert params.restore_scopes
checkpoint_dir = params.restore_dir
print("*" * 20)
print(params.restore_dir, params.restore_step)
if params.restore_step:
checkpoint_dir = os.path.join(params.restore_dir,
'model.ckpt-%d' % params.restore_step)
def _rename(name):
if not (params.big2nested or params.small2nested):
return name
for rnn in ['x_bi_rnn_0', 'x1_bi_rnn', 'x2_bi_rnn', 'x3_bi_rnn']:
plain_name = 'model/kernel_model/%s/bidirectional_rnn/' % rnn
if name.startswith(plain_name):
if name[len(plain_name) + 2:].startswith('/nested_rnn_cell/') and not params.small2nested:
di = name[len(plain_name)]
assert di in ['f', 'b'], (name, di)
ty = name[len(plain_name) + 2 + len('/nested_rnn_cell/'):]
# For multi skim cell.
if ty.startswith('dense') or ty.startswith('cell_1') or ty.startswith('cell_2')\
or ty.startswith('cell_3') or ty.startswith('cell_4'):
return None
assert ty.startswith('cell_0/basic_lstm_cell/'), (name, ty)
ty_ = ty[len('cell_0/basic_lstm_cell/'):]
assert ty_ in ['kernel', 'bias'], (name, ty_)
print(name)
return plain_name + '%sw/basic_lstm_cell/%s' % (di, ty_)
elif params.small2nested and name.startswith('model/kernel_model/%s/dense' % rnn):
print(name)
return None
if params.small2nested and name.startswith('model/kernel_model/logits'):
return None
return name
assignment_map = {_rename(var.op.name): var for var in restore_vars if _rename(var.op.name) is not None}
tf.contrib.framework.init_from_checkpoint(checkpoint_dir, assignment_map)
if mode == learn.ModeKeys.TRAIN:
var_list = restore_vars
if params.only_train_small or params.only_train_big:
no_train_list = []
def get_var_in_lstm(i):
for di in ['fw', 'bw']:
# model layer - number of layers
for no in ['1', '2']:
for ty in ['kernel', 'bias']:
no_train_list.append(
'model/kernel_model/x%s_bi_rnn/bidirectional_rnn/%s/nested_rnn_cell/cell_%d/basic_lstm_cell/%s' % (
no, di, i, ty))
def _filter(var, i):
get_var_in_lstm(i)
for v in no_train_list:
if var.op.name.startswith(v):
return True
return False
if params.only_train_small:
print("only train small RNN")
var_list = [v for v in var_list if _filter(v, 1)]
else:
print("only train big RNN")
var_list = [v for v in var_list if _filter(v, 0)]
print([var.op.name[len('model/kernel_model/'):] for var in var_list])
else:
print("Train all variables")
train_op = get_train_op(
loss,
var_list=var_list,
opt=params.opt,
learning_rate=params.learning_rate,
clip_norm=params.clip_norm,
post_ops=ema_ops)
else:
if params.restore_decay < 1.0:
ema = emas[params.restore_decay]
assign_ops = []
for var in tf.trainable_variables():
assign_op = tf.assign(var, ema.average(var))
assign_ops.append(assign_op)
with tf.control_dependencies(assign_ops):
for key, val in predictions.items():
predictions[key] = tf.identity(val)
train_op = None
return learn.ModelFnOps(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
def _experiment_fn(run_config, hparams):
"""Outputs `Experiment` object given `output_dir`.
Args:
run_config: `EstimatorConfig` object fo run configuration.
hparams: `HParams` object that contains hyperparameters.
Returns:
`Experiment` object
"""
estimator = learn.Estimator(
model_fn=model_fn, config=run_config, params=hparams)
num_train_steps = 1 if FLAGS.oom_test else FLAGS.num_train_steps
num_eval_steps = 1 if FLAGS.oom_test else FLAGS.num_eval_steps
data = _get_data(hparams.data)
return learn.Experiment(
estimator=estimator,
train_input_fn=_get_train_input_fn(data),
eval_input_fn=_get_eval_input_fn(data),
train_steps=num_train_steps,
eval_steps=num_eval_steps,
eval_delay_secs=FLAGS.num_eval_delay_secs)
def _get_data(data_name):
return squad_data
def _get_train_input_fn(data):
"""Get train input function."""
train_input_fn = data.get_input_fn(
FLAGS.root_data_dir,
FLAGS.glove_dir,
'train',
FLAGS.batch_size,
FLAGS.glove_size,
shuffle_files=FLAGS.shuffle_files,
shuffle_examples=FLAGS.shuffle_examples,
queue_capacity=FLAGS.queue_capacity,
min_after_dequeue=FLAGS.min_after_dequeue,
oom_test=FLAGS.oom_test)
return train_input_fn
def _get_eval_input_fn(data):
"""Get eval input function."""
eval_input_fn = data.get_input_fn(
FLAGS.root_data_dir,
FLAGS.glove_dir,
'dev',
FLAGS.batch_size,
FLAGS.glove_size,
shuffle_files=True,
shuffle_examples=True,
queue_capacity=FLAGS.queue_capacity,
min_after_dequeue=FLAGS.min_after_dequeue,
num_epochs=1,
oom_test=FLAGS.oom_test)
return eval_input_fn
def _get_test_input_fn(data):
"""Get test input function."""
# TODO(seominjoon) For now, test input is same as eval input (dev).
test_input_fn = data.get_input_fn(
FLAGS.root_data_dir,
FLAGS.glove_dir,
'dev',
FLAGS.batch_size,
FLAGS.glove_size,
shuffle_files=FLAGS.shuffle_files,
shuffle_examples=FLAGS.shuffle_examples,
queue_capacity=FLAGS.queue_capacity,
min_after_dequeue=FLAGS.min_after_dequeue,
num_epochs=1,
oom_test=FLAGS.oom_test)
return test_input_fn
def _get_config():
"""Get configuration object for `Estimator` object.
For open-soucing, `EstimatorConfig` has been replaced with `RunConfig`.
Depends on `flags.FLAGS`, and should not be used outside of this main script.
Returns:
`EstimatorConfig` object.
"""
config = learn.RunConfig(
model_dir=FLAGS.restore_dir if FLAGS.infer else FLAGS.output_dir,
keep_checkpoint_max=0, # Keep all checkpoints.
save_checkpoints_steps=FLAGS.save_checkpoints_steps)
return config
def _get_hparams():
"""Model-specific hyperparameters go here.
All model parameters go here, since `model_fn()` should not access
`flags.FLAGS`.
Depends on `flags.FLAGS`, and should not be used outside of this main script.
Returns:
`HParams` object.
"""
hparams = tf.contrib.training.HParams()
hparams.data = FLAGS.data
data = _get_data(hparams.data)
data_hparams = data.get_params(FLAGS.root_data_dir)
hparams.infer = FLAGS.infer
hparams.vocab_size = data_hparams['vocab_size']
hparams.char_vocab_size = data_hparams['char_vocab_size']
hparams.batch_size = FLAGS.batch_size
hparams.hidden_size = FLAGS.hidden_size
hparams.embed_hidden_size = FLAGS.hidden_size if FLAGS.embed_hidden_size == 0 else FLAGS.embed_hidden_size
hparams.emb_size = FLAGS.emb_size
hparams.dropout_rate = FLAGS.dropout_rate
hparams.dist = FLAGS.dist
hparams.learning_rate = FLAGS.learning_rate
hparams.model = FLAGS.model
hparams.restore_dir = FLAGS.restore_dir
hparams.output_dir = FLAGS.output_dir
hparams.clip_norm = FLAGS.clip_norm
hparams.opt = FLAGS.opt
hparams.restore_decay = FLAGS.restore_decay
if FLAGS.ema_decays:
hparams.ema_decays = list(map(float, FLAGS.ema_decays.split(',')))
else:
hparams.ema_decays = []
hparams.restore_step = FLAGS.restore_step
hparams.model_id = FLAGS.model_id
hparams.max_answer_size = FLAGS.max_answer_size
hparams.restore_scopes = FLAGS.restore_scopes.split(',')
hparams.glove_size = FLAGS.glove_size
# Regularization by Query Generation (reconstruction) parameters.
hparams.reg_gen = FLAGS.reg_gen
hparams.reg_cf = FLAGS.reg_cf
hparams.reg_half_life = FLAGS.reg_half_life
# For Skim-RNN
hparams.skim_embed = FLAGS.skim_embed
hparams.skim_1 = FLAGS.skim_1
hparams.skim_2 = FLAGS.skim_2
# For multi-level skim cells
hparams.small_hidden_sizes = json.loads(FLAGS.small_hidden_sizes)
hparams.num_cells = FLAGS.num_cells
# hparams.threshold = FLAGS.threshold
# hparams.threshold_period = FLAGS.threshold_period
hparams.temp_period = FLAGS.temp_period
hparams.temp_decay = FLAGS.temp_decay
hparams.p_decay = FLAGS.p_decay
hparams.embed_p_decay = FLAGS.embed_p_decay
hparams.p_pref = FLAGS.p_pref
hparams.emb_word_feat = FLAGS.emb_word_feat
hparams.word_feat = FLAGS.word_feat
hparams.out_word_feat = FLAGS.out_word_feat
hparams.big2nested = FLAGS.big2nested
hparams.small2nested = FLAGS.small2nested
hparams.only_train_small = FLAGS.only_train_small
hparams.only_train_big = FLAGS.only_train_big
hparams.vcgru = FLAGS.vcgru
hparams.sparsity_decay = FLAGS.sparsity_decay
hparams.sparsity_th = FLAGS.sparsity_th
return hparams
def train_and_eval():
"""Train and eval routine."""
learn_runner.run(
experiment_fn=_experiment_fn,
schedule=FLAGS.schedule,
run_config=_get_config(),
hparams=_get_hparams())
def _set_ckpt():
# TODO(seominjoon): This is adhoc. Need better ckpt loading during inf.
if FLAGS.restore_step:
path = os.path.join(FLAGS.restore_dir, 'checkpoint')
with tf.gfile.GFile(path, 'w') as fp:
fp.write('model_checkpoint_path: "model.ckpt-%d"\n' % FLAGS.restore_step)
def infer():
"""Inference routine, outputting answers to `FLAGS.answers_path`."""
_set_ckpt()
params = _get_hparams()
estimator = learn.Estimator(
model_fn=model_fn, config=_get_config(), params=params)
# estimator.evaluate(
# input_fn=_get_test_input_fn(_get_data(params.data)))
# return
predictions = estimator.predict(
input_fn=_get_test_input_fn(_get_data(params.data)))
global_step = estimator.get_variable_value('global_step')
answer_path = FLAGS.answers_path or os.path.join(FLAGS.restore_dir,
'answers-%d-%.2f.json' % (global_step, params.threshold))
choice_path = os.path.join(FLAGS.restore_dir,
'choices-%d-%.2f.json' % (global_step, params.threshold))
answer_dict = {'no_answer_prob': {}, 'answer_prob': {}, 'choice': {}}
skim = params.skim_embed or params.skim_1 or params.skim_2
choice_dict = {}
for prediction in tqdm(predictions):
id_ = prediction['id'].decode('utf-8')
context_words = [str(word, encoding="utf-8")
for word in prediction['context_words'].tolist()]
question = prediction['question'].decode('utf-8')
gt_answers = [a.decode('utf-8') for a in prediction['answers'] if len(a) > 0]
answer = prediction['a'].decode('utf-8')
answer_dict[id_] = {
'context_words': context_words,
'question': question,
'gt_answers': gt_answers,
'answer': answer,
'answer_prob': prediction['answer_prob'].tolist(),
'no_answer_prob': prediction['no_answer_prob'].tolist()
}
if skim:
choices = {}
for key in prediction:
if key.startswith('choice'):
choices[key] = prediction[key].tolist()
choice_dict[id_] = {
'context_words': context_words,
'question': question,
'answer': answer,
'gt_answers': gt_answers,
'choice': choices}
if FLAGS.oom_test:
break
if FLAGS.merge:
new_answer_dict = defaultdict(list)
context_dict, question_dict, gt_dict = {}, {}, {}
for id_, dic in answer_dict.items():
answer = dic['answer']
answer_prob = dic['answer_prob']
id_ = id_.split(' ')[0] # retrieve true id
new_answer_dict[id_].append([answer_prob, answer])
context_dict[id_] = dic['context_words']
question_dict[id_] = dic['question']
gt_dict[id_] = dic['gt_answers']
answer_dict = {
id_: {
'context': context_dict[id_],
'question': question_dict[id_],
'gt_answrs': gt_dict[id_],
'answer': max(each, key=lambda pair: pair[0])[1]
}
for id_, each in new_answer_dict.items()
}
with tf.gfile.GFile(answer_path, 'w') as fp:
print(answer_path)
json.dump(answer_dict, fp)
if skim:
with tf.gfile.GFile(choice_path, 'w') as fp:
print(choice_path)
json.dump(choice_dict, fp)
def main(_):
if FLAGS.infer:
infer()
else:
train_and_eval()
if __name__ == '__main__':
tf.app.run()
|
<filename>average/gather_data.py<gh_stars>0
"""
Arthur: <NAME>
Purpose: This module is used for gathering data. That is 30 data plots to be later used for
boxplots.
Date: 29/03/21
"""
import default
TITLE_LR = "Learning Rate"
TITLE_BATCH_SIZE = "Batch Size"
TITLE_NUM_EPOCHES = "Number Of Epoches"
TITLE_TIME_STEP = "Time Step"
TITLE_CUSTOME = "Custome items"
HIDDEN_LAYERS_0 = [50, 75, 100, 125, 150, 175, 200, 225]
HIDDEN_LAYERS_1 = [50, 250, 350, 450, 550, 650, 750, 850]
HIDDEN_LAYERS_2 = [600, 800, 100, 1000]
HIDDEN_LAYERS_3 = [600, 800, 1000]
HIDDEN_LAYERS_4 = [25, 50, 75, 100, 125, 150, 175, 200, 225, 250, 275, 300]
HIDDEN_LAYERS = [HIDDEN_LAYERS_0, HIDDEN_LAYERS_1, HIDDEN_LAYERS_2, HIDDEN_LAYERS_3]
DEFAULT_LAYER = [{"in_features" :2, "out_features" : 50}, {"in_features" : 50, "out_features" : 100},
{"in_features" : 100, "out_features" : 200},
{"in_features" : 200, "out_features" : 400},
{"in_features" : 400, "out_features" : 1}]
def thiry_data_items(title, model, filename, **para):
"""
thirty_data_item: This denotes will gather the 30 plots data loss item corresponding to the parameter
Args:
title (string) : This will show the title for the plots being gathered
model (class <LossUtilities>) : This will show the model being used
filename (string) : This will save the filename
***para: This denotes the parameter that will be used.
"""
print("--- %s ---" % (title))
values = list(para.values())
if len(values) > 0:
_, df = model.loss(len(values[0]), **para)
df.to_csv(filename, index=False)
def append_inputs_outputs_layers(num_inputs, num_outputs):
"""
append_inputs_outputs_layers:
This is used to append the input and output layers
to the hidden constant layers defined at the top
Args:
num_inputs (int) : The number of inputs the neural network hass
num_outputs (int) : The number of outputs the neural network
Returns
(class 'list') : A list of all the layers to test
"""
test_layers = []
for layer in HIDDEN_LAYERS:
test = layer # assign test to layer
test.insert(0, num_inputs) # insert number inputs to first postion
test.append(num_outputs) # insert num inputs
test_layers.append(test) # add to test layers
return test_layers
# This is the methods will perform the collection of data plots ...
def newton(lr=False, batch_size=False, num_epoches=False, custome=False, layer=False):
"""
newton: This is assoicated with gathering all the data with the assoicated learning
parameters.
"""
if lr:
para = {"lr" : [0.1, 0.001, 0.0001, 0.00001, 0.000001, 0.0000001]}
thiry_data_items(TITLE_LR, default.NewtonsLoss(),"../data/boxplots/newton/lr.csv", **para)
if batch_size:
para = {"batch_size" : [50, 100, 150, 200, 250, 300]}
thiry_data_items(TITLE_BATCH_SIZE, default.NewtonsLoss(),"../data/boxplots/newton/batch_size.csv", **para)
if num_epoches:
para = {"num_epoches" : [50, 100, 150, 200, 250, 300]}
thiry_data_items(TITLE_NUM_EPOCHES, default.NewtonsLoss(),"../data/boxplots/newton/num_epoches.csv", **para)
if custome:
para = {"lr" : [0.000001, 0.000001, 0.001, 0.001], "batch_size" : [32, 150, 32, 150], "num_epoches": [200, 200, 200, 200]}
thiry_data_items(TITLE_CUSTOME, default.NewtonsLoss(),"../data/boxplots/newton/custome.csv", **para)
if layer:
test_layers = append_inputs_outputs_layers(num_inputs=2, num_outputs=1)
loss = default.NewtonsLoss()
_, df = loss.loss_modeling(test_layers)
print(TITLE_CUSTOME)
df.to_csv("../data/boxplots/newton/layers.csv",index=False)
def van(lr = False, batch_size = False, num_epoches = False, time_step = False, custome = False, layer = False):
"""
van: This is for collecting 30 data loss items with the assoicated learning parameters defined within
the dictionatary
"""
if num_epoches:
para = {"num_epoches" : [10, 20, 30,]}
thiry_data_items(TITLE_NUM_EPOCHES, default.VanDerPol(),"../data/boxplots/van/num_epoches.csv", **para)
if lr:
para = {"lr" : [0.0000005]}
thiry_data_items(TITLE_LR, default.VanDerPol(),"../data/boxplots/van/lr.csv", **para)
if batch_size:
para = {"batch_size" : [15, 20, 25, 30, 35, 40]}
thiry_data_items(TITLE_BATCH_SIZE, default.VanDerPol(),"../data/boxplots/van/batch_size.csv", **para)
if time_step:
para = {"time_step" : [0.1, 0.01, 0.001]}
thiry_data_items(TITLE_TIME_STEP, default.VanDerPol(),"../data/boxplots/van/time_step.csv", **para)
if custome:
para = {"lr" : [0.0001, 0.000001], "bs" : [35, 15], "num_epoches" : [50, 100]}
thiry_data_items(TITLE_CUSTOME, default.VanDerPol(),"../data/boxplots/van/custome.csv", **para)
if layer:
test_layers = append_inputs_outputs_layers(num_inputs=3, num_outputs=2)
loss = default.VanDerPol()
_, df = loss.loss_modeling(test_layers)
print(TITLE_CUSTOME)
df.to_csv("../data/boxplots/van/layers.csv", index=False)
def laub(lr = False, batch_size = False, num_epoches = False, time_step = False, custome = False, layer = False):
"""
laub:
This is for gathering a collecting of 30 data loss items with the assoicated learning
parameter defined within the dictionary.
"""
if lr:
para = {"lr" : [0.0001, 0.00005, 0.000005, 0.0000005, 0.00000005, 0.000000005]}
thiry_data_items(TITLE_LR, default.Laub(),"../data/boxplots/laub/lr.csv", **para)
if batch_size:
para = {"batch_size" : [500, 550, 600, 650, 700, 750]}
thiry_data_items(TITLE_BATCH_SIZE, default.Laub(),"../data/boxplots/laub/batch_size.csv", **para)
if num_epoches:
para = {"num_epoches" : [10, 20, 30, 40, 50, 60]}
thiry_data_items(TITLE_NUM_EPOCHES, default.Laub(),"../data/boxplots/laub/num_epoches.csv", **para)
if time_step:
para = {"time_step" : [0.1, 0.01]}
thiry_data_items(TITLE_TIME_STEP, default.Laub(),"../data/boxplots/laub/time_step.csv", **para)
if custome:
para = {"lr" : [5e-8, 5e-5, 5e-8, 5e-5], "batch_size" : [30, 500, 500,30], "num_epoches" : [180, 60, 180, 60]}
thiry_data_items(TITLE_CUSTOME, default.Laub(),"../data/boxplots/laub/custome.csv", **para)
if layer:
test_layers = append_inputs_outputs_layers(num_inputs=8, num_outputs=7)
loss = default.Laub()
_, df = loss.loss_modeling(test_layers)
print(TITLE_CUSTOME)
df.to_csv("../data/boxplots/laub/layers.csv", index=False)
if __name__== "__main__":
# Add data here for plots you want to collect
print("..Collecting data..")
#newton(layer=True) |
import datetime
import pytest
import numpy as np
from htcanalyze.htcanalyze import HTCAnalyze, gen_time_dict, sort_dict_by_col
def test_gen_time_dict():
strp_format = "%Y-%m-%dT%H:%M:%S"
strf_format = "%m/%d %H:%M:%S"
today = datetime.datetime.now()
today = today.replace(microsecond=0)
submission = "2019-6-23T22:25:25"
execution = "2019-6-24T06:32:25"
termination = "2020-01-13T6:5:5"
sub_date = datetime.datetime.strptime(submission, strp_format)
exec_date = datetime.datetime.strptime(execution, strp_format)
term_date = datetime.datetime.strptime(termination, strp_format)
# test all None
time_dict = gen_time_dict()
assert time_dict['Dates and times'] == []
assert time_dict['Values'] == []
# test only submission date given
waiting_time = today - sub_date
time_dict = gen_time_dict(sub_date)
assert time_dict['Dates and times'] == ['Submission date', 'Waiting time']
assert time_dict['Values'][0] == sub_date.strftime(strf_format)
assert time_dict['Values'][1] == waiting_time
# test only execution date given
time_dict = gen_time_dict(execution_date=exec_date)
assert time_dict['Dates and times'] == ['Execution date']
assert time_dict['Values'][0] == exec_date.strftime(strf_format)
# test only termination date given
time_dict = gen_time_dict(termination_date=term_date)
assert time_dict['Dates and times'] == ['Termination date']
assert time_dict['Values'][0] == term_date.strftime(strf_format)
# test only submission and execution date given
time_dict = gen_time_dict(sub_date, exec_date)
assert time_dict['Dates and times'] == ['Submission date',
'Execution date',
'Waiting time',
'Execution runtime']
waiting_time = exec_date - sub_date
execution_runtime = today - exec_date
assert time_dict['Values'][0] == sub_date.strftime(strf_format)
assert time_dict['Values'][1] == exec_date.strftime(strf_format)
assert time_dict['Values'][2] == waiting_time
assert time_dict['Values'][3] == execution_runtime
# test only submission and termination date given
time_dict = gen_time_dict(submission_date=sub_date,
termination_date=term_date)
assert time_dict['Dates and times'] == ['Submission date',
'Termination date',
'Total runtime']
total_runtime = term_date - sub_date
assert time_dict['Values'][0] == sub_date.strftime(strf_format)
assert time_dict['Values'][1] == term_date.strftime(strf_format)
assert time_dict['Values'][2] == total_runtime
# test only execution and termination date given
time_dict = gen_time_dict(submission_date=None,
execution_date=exec_date,
termination_date=term_date)
execution_runtime = term_date - exec_date
assert time_dict['Dates and times'] == ['Execution date',
'Termination date',
'Execution runtime']
assert time_dict['Values'][0] == exec_date.strftime(strf_format)
assert time_dict['Values'][1] == term_date.strftime(strf_format)
assert time_dict['Values'][2] == execution_runtime
# test all given
time_dict = gen_time_dict(submission_date=sub_date,
execution_date=exec_date,
termination_date=term_date)
assert time_dict['Dates and times'] == ['Submission date',
'Execution date',
'Termination date',
'Waiting time',
'Execution runtime',
'Total runtime']
waiting_time = exec_date - sub_date
execution_runtime = term_date - exec_date
total_runtime = term_date - sub_date
assert time_dict['Values'][0] == sub_date.strftime(strf_format)
assert time_dict['Values'][1] == exec_date.strftime(strf_format)
assert time_dict['Values'][2] == term_date.strftime(strf_format)
assert time_dict['Values'][3] == waiting_time
assert time_dict['Values'][4] == execution_runtime
assert time_dict['Values'][5] == total_runtime
def test_sort_dict_by_column():
test_dict = {
"Key1": ["Hello", "It's", "Me", "I", "Was", "Wondering"],
"Key2": [1, 3, 6, 5, 2, 4],
"Key3": ["a", "b", "d", "c", "e", "f"]
}
sorted_by_key1 = {'Key1': ['Hello', 'I', "It's", 'Me', 'Was', 'Wondering'],
'Key2': [1, 5, 3, 6, 2, 4],
'Key3': ['a', 'c', 'b', 'd', 'e', 'f']}
sorted_by_key2 = {'Key1': ['Hello', 'Was', "It's", 'Wondering', 'I', 'Me'],
'Key2': [1, 2, 3, 4, 5, 6],
'Key3': ['a', 'e', 'b', 'f', 'c', 'd']}
sorted_by_key3 = {'Key1': ['Hello', "It's", 'I', 'Me', 'Was', 'Wondering'],
'Key2': [1, 3, 5, 6, 2, 4],
'Key3': ['a', 'b', 'c', 'd', 'e', 'f']}
sorted_dict = sort_dict_by_col(test_dict, "Key1", reverse=False)
assert sorted_dict == sorted_by_key1
sorted_dict = sort_dict_by_col(test_dict, "Key2", reverse=False)
assert sorted_dict == sorted_by_key2
sorted_dict = sort_dict_by_col(test_dict, "Key3", reverse=False)
assert sorted_dict == sorted_by_key3
sorted_by_key1_reversed = {
'Key1': ['Wondering', 'Was', 'Me', "It's", 'I', 'Hello'],
'Key2': [4, 2, 6, 3, 5, 1],
'Key3': ['f', 'e', 'd', 'b', 'c', 'a']}
sorted_by_key2_reversed = {
'Key1': ['Me', 'I', 'Wondering', "It's", 'Was', 'Hello'],
'Key2': [6, 5, 4, 3, 2, 1],
'Key3': ['d', 'c', 'f', 'b', 'e', 'a']}
sorted_by_key3_reversed = {
'Key1': ['Wondering', 'Was', 'Me', 'I', "It's", 'Hello'],
'Key2': [4, 2, 6, 5, 3, 1],
'Key3': ['f', 'e', 'd', 'c', 'b', 'a']}
sorted_dict = sort_dict_by_col(test_dict, "Key1")
assert sorted_dict == sorted_by_key1_reversed
sorted_dict = sort_dict_by_col(test_dict, "Key2")
assert sorted_dict == sorted_by_key2_reversed
sorted_dict = sort_dict_by_col(test_dict, "Key3")
assert sorted_dict == sorted_by_key3_reversed
@pytest.fixture(scope="module")
def htcan():
htcan = HTCAnalyze()
return htcan
def test_HTCAnalyze_init(htcan):
assert htcan.ext_log == ""
assert htcan.ext_err == ".err"
assert htcan.ext_out == ".out"
assert htcan.show_list == []
assert htcan.rdns_cache == {}
assert htcan.rdns_lookup is False
assert htcan.tolerated_usage == 0.1
assert htcan.bad_usage == 0.25
def test_manage_thresholds(htcan):
htcan.tolerated_usage = 0.1
htcan.bad_usage = 0.25
res_dict = {
"Resources": ["Cpu", "Disk", "Memory"],
"Usage": [0.23, 3000, 4051],
"Requested": [1, 2700, 4500],
"Allocated": [1, 6000, 6000]
}
expected_dict = {'Resources': ['Cpu', 'Disk', 'Memory'],
'Usage': ['[red]0.23[/red]',
'[yellow]3000[/yellow]',
'[green]4051[/green]'],
'Requested': [1, 2700, 4500],
'Allocated': [1, 6000, 6000]}
managed_res = htcan.manage_thresholds(res_dict)
assert managed_res == expected_dict
def test_log_to_dict(htcan):
"""Tests the log_to_dict function of HTCAnalyze class.
Only the following files are tested:
- tests/test_logs/valid_logs/normal_log.log
- tests/test_logs/valid_logs/aborted_with_errors.log
:param htcan:
:return:
"""
file = "tests/test_logs/valid_logs/normal_log.log"
job_events_dict, res_dict, time_dict, \
ram_history_dict, error_dict = htcan.log_to_dict(file)
assert job_events_dict == {
'Execution details': ['Termination State',
'Submitted from',
'Executing on',
'Return Value'],
'Values': ['[green]Normal[/green]',
'10.0.8.10',
'10.0.9.201',
1]}
assert list(res_dict.keys()) == [
"Resources", "Usage", "Requested", "Allocated"]
assert res_dict['Resources'] == ["Cpu", "Disk", "Memory"]
assert np.array_equal(res_dict['Usage'],
[1.10e-01, 4.00e+00, 9.22e+02]) is True
assert np.array_equal(res_dict['Requested'],
[1.000000e+00, 2.020096e+07, 2.048000e+04]) is True
assert np.array_equal(res_dict['Allocated'],
[1.0000e+00, 2.2312484e+07, 2.0480000e+04]) is True
assert time_dict == {
'Dates and times': ['Submission date',
'Execution date',
'Termination date',
'Waiting time',
'Execution runtime',
'Total runtime'],
'Values': ['07/11 20:39:51',
'07/11 20:39:54',
'07/11 20:45:50',
datetime.timedelta(seconds=3),
datetime.timedelta(seconds=356),
datetime.timedelta(seconds=359)]}
assert ram_history_dict == {
'Dates': [datetime.datetime(2020, 7, 11, 20, 40, 3),
datetime.datetime(2020, 7, 11, 20, 45, 4)],
'Image size updates': [448, 1052936],
'Memory usages': [1, 922],
'Resident Set Sizes': [448, 943244]}
assert error_dict == {}
file = "tests/test_logs/valid_logs/aborted_with_errors.log"
job_events_dict, res_dict, time_dict, \
ram_history_dict, error_dict = htcan.log_to_dict(file)
assert job_events_dict == {
'Execution details': ['Process was', 'Submitted from', 'Executing on'],
'Values': ['[red]Aborted[/red]', '10.0.8.10', '10.0.9.1']}
assert res_dict == {}
assert time_dict == {
'Dates and times': ['Submission date',
'Execution date',
'Termination date',
'Waiting time',
'Execution runtime',
'Total runtime'],
'Values': ['02/11 09:45:05',
'02/11 12:29:18',
'02/25 09:29:26',
datetime.timedelta(seconds=9853),
datetime.timedelta(days=13, seconds=75608),
datetime.timedelta(days=13, seconds=85461)]}
assert ram_history_dict == {
'Dates': [datetime.datetime(2020, 2, 11, 12, 29, 26)],
'Image size updates': [28644],
'Memory usages': [28],
'Resident Set Sizes': [28644]}
assert error_dict == {
'Event Number': [7, 12, 9],
'Time': ['02/11 12:31:27', '02/11 12:31:27', '02/25 09:29:26'],
'Error': ['SHADOW_EXCEPTION', 'JOB_HELD', 'Aborted'],
'Reason': ['Error from <EMAIL>: '
'Job has encountered an out-of-memory event.',
'Error from <EMAIL>: '
'Job has encountered an out-of-memory event.',
'via condor_rm (by user tkadelka)']}
def test_rdns_lookup(htcan):
htcan.gethostbyaddrcached("172.16.58.3")
assert htcan.rdns_cache["172.16.58.3"] == "ord38s04-in-f0.1e100.net"
htcan.gethostbyaddrcached("NoIP")
assert htcan.rdns_cache["NoIP"] == "NoIP"
|
#!/usr/bin/env python3
import logging
import os
import sys
import time
import faust
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster
from paho.mqtt import client as mqtt_client
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.WARN)
mqtt_broker = os.environ['MQTT_BROKER']
mqtt_port = int(os.environ['MQTT_BROKER_PORT'])
mqtt_topic = os.environ['MQTT_ACTUATOR_TOPIC']
mqtt_client_id = f'mqtt-faust-analyzer'
kafka_broker = 'kafka://' + os.environ['KAFKA_BROKER']
kafka_topic = os.environ['KAFKA_TOPIC']
kafka_key = "server-room"
value_type = os.environ['VALUE_TYPE']
save_data = os.environ['SAVE_DATA']
data_file_normal = "/analyzer/temperature-data-normal.csv"
data_file_anomalous = "/analyzer/temperature-data-anomalous.csv"
actuator_id = 'actuator-0'
actuator_actions = ['power-on', 'pause', 'shutdown']
def connect_to_cassandra():
"""Create Cassandra connection"""
auth_provider = PlainTextAuthProvider(username='cassandra', password='<PASSWORD>')
cluster = Cluster(['cassandra-0.cassandra-headless.uc0.svc.cluster.local'],
auth_provider=auth_provider)
try:
session = cluster.connect()
except Exception as ex:
logging.error(f'Problem while connecting to Casandra.')
try:
session.execute(f'DROP keyspace IF EXISTS iiot;')
logging.info("Creating keyspace...")
session.execute(
"create keyspace iiot with replication={'class': 'SimpleStrategy', 'replication_factor' : 1};")
logging.info(f'Created keyspace iiot.')
except Exception as ex:
logging.error(f'Problem while dropping or creating iiot keyspace.')
try:
session = cluster.connect('iiot')
except Exception as ex:
logging.error(f'Problem while connecting to Casandra.')
query_temperature_valid_table = '''
create table temperature (
readingTS timestamp,
processTS timestamp,
sensorID text,
readingValue float,
primary key(readingTS)
);'''
query_temperature_invalid_table = '''
create table temperature_invalid (
readingTS timestamp,
processTS timestamp,
sensorID text,
readingValue float,
primary key(readingTS)
);'''
try:
session.execute(query_temperature_valid_table)
except Exception as ex:
logging.info(f'Table already exists. Not creating.')
try:
session.execute(query_temperature_invalid_table)
except Exception as ex:
logging.info(f'Table already exists. Not creating.')
return session
# Cast values to correct type
if value_type == 'integer':
min_threshold_value = int(os.environ['MIN_THRESHOLD_VALUE'])
max_threshold_value = int(os.environ['MAX_THRESHOLD_VALUE'])
invalid_value = int(os.environ['INVALID_VALUE'])
elif value_type == 'float':
min_threshold_value = float(os.environ['MIN_THRESHOLD_VALUE'])
max_threshold_value = float(os.environ['MAX_THRESHOLD_VALUE'])
invalid_value = float(os.environ['INVALID_VALUE'])
# Remove old data file from persistent volume
if save_data == 'file':
if os.path.exists(data_file_normal):
os.remove(data_file_normal)
logging.info('Removed old file from the PV.')
else:
logging.info('The file does not exist.')
if os.path.exists(data_file_anomalous):
os.remove(data_file_anomalous)
logging.info('Removed old file from the PV.')
else:
logging.info('The file does not exist.')
# Open data file for writing
try:
temperature_file_normal = open(data_file_normal, "a")
except Exception as ex:
logging.error(f'Exception while opening file {temperature_file_normal}.', exc_info=True)
try:
temperature_file_anomalous = open(data_file_anomalous, "a")
except Exception as ex:
logging.error(f'Exception while opening file {temperature_file_anomalous}.', exc_info=True)
# Connect to MQTT broker
def connect_to_mqtt():
def on_connect(client, userdata, flags, rc):
if rc == 0:
logging.info('Connected to MQTT Broker!')
else:
logging.critical(f'Failed to connect, return code {rc}.')
try:
client = mqtt_client.Client(mqtt_client_id)
client.on_connect = on_connect
client.connect(mqtt_broker, mqtt_port)
except Exception as ex:
logging.critical('Exception while connecting MQTT.', exc_info=True)
return client
# Publish message to MQTT topic
def mqtt_publish_message(mqtt_publisher, message):
time_ms = round(time.time() * 1000)
message = f'processed_ts:{time_ms} {message}'
result = mqtt_publisher.publish(mqtt_topic, message)
status = result[0]
if status == 0:
logging.info(f"Send {message} to topic `{mqtt_topic}`")
else:
logging.error(f"Failed to send message to topic {mqtt_topic}")
client = connect_to_mqtt()
# Parse message for MQTT
def parse_message_for_actuator(reading_ts, actuator, action):
logging.info(f'{action} heating system action is generated.')
message = f"reading_ts:{reading_ts} actuator_id:{actuator} action:{action}"
mqtt_publish_message(client, message)
# Create a class to parse message from Kafka
if value_type == 'integer':
class Temperature(faust.Record, serializer='json'):
reading_ts: int
sensor: str
value: int
elif value_type == 'float':
class Temperature(faust.Record, serializer='json'):
reading_ts: int
sensor: str
value: float
else:
logging.critical(f'Invalid value type {value_type} is provided. Exiting.')
sys.exit()
if save_data == 'cassandra':
session = connect_to_cassandra()
app = faust.App('temp-analyzer', broker=kafka_broker, )
topic = app.topic(kafka_topic, value_type=Temperature)
# Create worker to process incoming streaming data
@app.agent(topic)
async def check(temperatures):
async for temperature in temperatures:
start_time = time.perf_counter()
logging.info(f'Reading: {temperature.value} Timestamp: {temperature.reading_ts} Sensor: {temperature.sensor}')
# Write data to a file
#temperature_file.write(temperature.reading_ts + "," + temperature.sensor + "," + temperature.value + "\n")
# ts = int(temperature.reading_ts[:-3])
processts = int(time.time())
readingts = int(temperature.reading_ts[:-3])
# readingts = datetime.datetime.fromtimestamp(ts)
# Create some checks on incoming data to create actuator actions
if value_type == 'integer':
if int(temperature.value) == invalid_value:
if save_data == 'cassandra':
session.execute(
"""
INSERT INTO temperature_invalid (readingTS, ProcessTS, sensorID, readingValue) VALUES(%s, %s, %s, %s)
""",
(readingts, processts, temperature.sensor, float(temperature.value))
)
elif save_data == 'file':
temperature_file_anomalous.write(
str(readingts) + "," + str(processts) + "," + temperature.sensor + "," + temperature.value + "\n")
logging.warning('Anomalous value found. It is discarded from further analysis.')
else:
if int(temperature.value) < min_threshold_value:
parse_message_for_actuator(temperature.reading_ts, actuator_id, actuator_actions[0])
elif int(temperature.value) > max_threshold_value:
parse_message_for_actuator(temperature.reading_ts, actuator_id, actuator_actions[2])
else:
logging.info('No action required.')
if save_data == 'cassandra':
session.execute(
"""
INSERT INTO temperature (readingTS, ProcessTS, sensorID, readingValue) VALUES(%s, %s, %s, %s)
""",
(readingts, processts, temperature.sensor, int(temperature.value))
)
elif save_data == 'file':
temperature_file_normal.write(
str(readingts) + "," + str(processts) + "," + temperature.sensor + "," + temperature.value + "\n")
elif value_type == 'float':
if float(temperature.value) == invalid_value:
if save_data == 'cassandra':
session.execute(
"""
INSERT INTO temperature_invalid (readingTS, ProcessTS, sensorID, readingValue) VALUES(%s, %s, %s, %s)
""",
(readingts, processts, temperature.sensor, float(temperature.value))
)
elif save_data == 'file':
temperature_file_anomalous.write(
str(readingts) + "," + str(processts) + "," + temperature.sensor + "," + temperature.value + "\n")
logging.warning('Anomalous value found. It is discarded from further analysis.')
else:
if float(temperature.value) < min_threshold_value:
parse_message_for_actuator(temperature.reading_ts, actuator_id, actuator_actions[0])
elif float(temperature.value) > max_threshold_value:
parse_message_for_actuator(temperature.reading_ts, actuator_id, actuator_actions[2])
else:
logging.info('No action required.')
if save_data == 'cassandra':
session.execute(
"""
INSERT INTO temperature (readingTS, ProcessTS, sensorID, readingValue) VALUES(%s, %s, %s, %s)
""",
(readingts, processts, temperature.sensor, float(temperature.value))
)
elif save_data == 'file':
temperature_file_normal.write(
str(readingts) + "," + str(processts) + "," + temperature.sensor + "," + temperature.value + "\n")
end_time = time.perf_counter()
time_ms = (end_time - start_time) * 1000
logging.info(f'Message processing took {time_ms} ms.')
if __name__ == '__main__':
app.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" jsontool - Perform some actions with json using CLI
http://msztolcman.github.io/jsontool
Author: <NAME> (<EMAIL>)
Get help with: jsontool --help
Information about version: jsontool --version
"""
from __future__ import print_function, unicode_literals
__version__ = '0.2.1'
import argparse
import json
import os.path
import sys
import textwrap
def show_version():
"""
Show version info and exit.
:return:
"""
print('{0}: version {1}'.format(os.path.basename(sys.argv[0]), __version__))
sys.exit(0)
def build_filters(filter_definitions):
"""
Build function to filter jsons.
Filter definitions is a list of strings in format: key:value[:modifier]
:param filter_definitions: list of strings
:return: function
"""
filters = []
if not filter_definitions:
return lambda item: bool(item)
modifiers = {
'i': int,
'f': float,
's': str,
'b': bool,
}
for definition in filter_definitions:
try:
key, value, modifier = definition.split(':', 2)
modifier = modifiers.get(modifier, None)
except ValueError:
key, value = definition.split(':', 1)
modifier = str
if not modifier:
modifier = lambda item: item
filters.append(lambda data: key in data and data[key] == modifier(value))
def _filter(item):
return item and all(flt(item) for flt in filters)
return _filter
def get_printer(mode='auto'):
"""
Generate printer function.
:param mode: string: always, never or auto
:return:
"""
def printer(data):
print(data)
if mode in ('auto', 'always'):
try:
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import get_formatter_by_name
if mode == 'always' or sys.stdout.isatty():
lexer = get_lexer_by_name('json')
formatter = get_formatter_by_name('terminal256')
def printer(data):
print(highlight(data, lexer, formatter), end='', file=sys.stdout)
except ImportError as e:
if mode == 'always':
import warnings
warnings.warn('No pygments module available, cannot colorize output')
return printer
def json_loads(data):
"""
Safely convert data to json (do not throw an exception on fail)
:param data:
:return: parsed json
"""
try:
return json.loads(data)
except ValueError:
pass
def wrap_text(txt):
"""
Make custom wrapper for passed text.
Splits given text for lines, and for every line apply custom
textwrap.TextWrapper settings, then return reformatted string.
"""
_wrap = textwrap.TextWrapper(
width = 72,
expand_tabs = True,
replace_whitespace = False,
drop_whitespace = True,
subsequent_indent = ' ',
)
txt = [_wrap.fill(line) for line in txt.splitlines()]
return "\n".join(txt)
def main():
"""
Run everything
"""
epilog = "Argument to --grep option should be in format:\n" \
" field:value:modifier\n" \
"Where: \n" \
"- \"field\" must be in all JSONs. \n" \
"- \"value\" is value to search \n" \
"- \"modifier\" is optional, and say how to treat \"value\": allowed \n" \
" options are: s (string - default), b (boolean), i (integer), f (float)"
p = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog
)
p.add_argument('-f', '--sort-by', type=str,
help='sort given list of JSONs by this field')
p.add_argument('-r', '--sort-reversed', action='store_true',
help='sort in reverse order')
p.add_argument('-g', '--grep', action='append',
help='filter list of JSONs using this rules (can be added more then once)')
p.add_argument('-v', '--version', action='store_true',
help='show version and exit')
p.add_argument('--sort-keys', action='store_true',
help='sort keys in printed JSONs (default: not sorted)')
p.add_argument('--indent', type=int,
help='indent JSONs with INDENT spaces')
p.add_argument('--color', type=str, choices=('auto', 'always', 'never'), default='auto',
help='manipulate colorizing of JSONs (default: auto)')
args = p.parse_args()
if args.version:
show_version()
filters = build_filters(args.grep)
printer = get_printer(args.color)
data = map(json_loads, sys.stdin)
data = filter(filters, data)
if args.sort_by:
data.sort(key=lambda item: item[args.sort_by], reverse=args.sort_reversed)
for line in data:
line = json.dumps(line, sort_keys=args.sort_keys, indent=args.indent)
printer(line)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from . import read_input as rin
def out_rslt(rslt_data):
# ---------- asc in Struc_ID or Gen
with open('./data/cryspy_rslt', 'w') as frslt:
if rin.algo == 'RS' or rin.algo == 'LAQA':
frslt.write(rslt_data.sort_values(by=['Struc_ID'], ascending=True).to_string(index=False))
elif rin.algo == 'BO':
frslt.write(rslt_data.sort_values(by=['Gen', 'Struc_ID'], ascending=True).to_string(index=False))
# ---------- asc in energy
with open('./data/cryspy_rslt_energy_asc', 'w') as fasc:
fasc.write(rslt_data.sort_values(by=['Energy'], ascending=True).to_string(index=False))
# ---------- LAQA
def out_kpts(kpt_data):
# ------ asc in ID
with open('./data/kpts_rslt', 'w') as frslt:
frslt.write('{0:>10} {1:>10}\n'.format('Struc_ID', 'k-points'))
for key, value in sorted(kpt_data.items()):
frslt.write('{0:10d} {1}\n'.format(key, value))
def out_LAQA_status(LAQA_step, LAQA_score, LAQA_energy, LAQA_bias):
# ------ desc in score
with open('./data/LAQA_status', 'w') as frslt:
frslt.write('{0:>10} {1:>14} {2:>14} {3:>14} {4:>12} {5:>12}\n'.format(
'Struc_ID', 'Score', 'E(eV/atom)', 'Bias', 'Selection', 'Step'))
for key, value in sorted(LAQA_score.items(), key=lambda x: -x[1][-1]):
if LAQA_energy[key]: # whether list is vacant or not?
frslt.write('{0:10d} {1: 14.8f} {2: 14.8f} {3: 14.8f} {4:12d} {5:12d}\n'.format(
key, value[-1], LAQA_energy[key][-1], LAQA_bias[key][-1],
len(LAQA_step[key]), sum(LAQA_step[key])))
else:
frslt.write('{0:10d} {1: 14.8f} {2:>14} {3:>14} {4:12d} {5:12d}\n'.format(
key, value[-1], LAQA_energy[key], LAQA_bias[key], len(LAQA_step[key]), sum(LAQA_step[key])))
def out_LAQA_step(LAQA_step):
# ------ asc in ID
with open('./data/LAQA_step', 'w') as frslt:
frslt.write('{0:>10} {1:>4}\n'.format('Struc_ID', 'Step'))
for key, value in sorted(LAQA_step.items()):
frslt.write('{0:10d}'.format(key))
for x in value:
frslt.write(' {:4d}'.format(x))
frslt.write('\n')
def out_LAQA_score(LAQA_score):
# ------ asc in ID
with open('./data/LAQA_score', 'w') as frslt:
frslt.write('{0:>10} {1:>14}\n'.format('Struc_ID', 'Score'))
for key, value in sorted(LAQA_score.items()):
frslt.write('{0:10d}'.format(key))
for x in value:
frslt.write(' {: 14.8f}'.format(x))
frslt.write('\n')
def out_LAQA_energy(LAQA_energy):
# ------ asc in ID
with open('./data/LAQA_energy', 'w') as frslt:
frslt.write('{0:>10} {1:>12}\n'.format('Struc_ID', 'E(eV/atom)'))
for key, value in sorted(LAQA_energy.items()):
frslt.write('{0:10d}'.format(key))
for x in value:
frslt.write(' {: 12.8f}'.format(x))
frslt.write('\n')
def out_LAQA_bias(LAQA_bias):
# ------ asc in ID
with open('./data/LAQA_bias', 'w') as frslt:
frslt.write('{0:>10} {1:>14}\n'.format('Struc_ID', 'Bias'))
for key, value in sorted(LAQA_bias.items()):
frslt.write('{0:10d}'.format(key))
for x in value:
frslt.write(' {: 14.8f}'.format(x))
frslt.write('\n')
def out_LAQA_id_hist(id_select_hist):
with open('./data/LAQA_select_id', 'w') as frslt:
frslt.write('{0:>10} {1:>5}\n'.format('Selection', 'ID'))
for i, j in enumerate(id_select_hist):
frslt.write('{0:10d}'.format(i+1))
for x in j:
frslt.write(' {:5d}'.format(x))
frslt.write('\n')
|
<filename>polls/tests.py
import datetime
from django.test import TestCase
from django.utils import timezone
from django.core.urlresolvers import reverse
from .models import Question
# Create your tests here.
class QuestionMethodTests(TestCase):
"""
was_published_recently should return false for questions whose pub_date is in the future
"""
def test_was_published_recently_with_future_question(self):
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertEqual(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_pulished_recently should return false for questions whose pub_date is older than 1 day
"""
time = timezone.now() - datetime.timedelta(days=30)
old_question = Question(pub_date=time)
self.assertEqual(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently should return true for questions whose pub_date is within 1 day
"""
time = timezone.now() - datetime.timedelta(hours=1)
recent_question = Question(pub_date=time)
self.assertEqual(recent_question.was_published_recently(), True)
def create_question(question_text, days):
"""
Creates a question at any days from now
:param question_text: content of the question to be published
:param days: number of days offset to now, negative for the past and positive for the future
:return: the Qestion instance
"""
time = timezone.now() + datetime.timedelta(days)
return Question.objects.create(question_text=question_text,
pub_date=time)
class QuestionViewTests(TestCase):
def test_index_view_with_no_questions(self):
"""
If no questions exit, display a message
:return:
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available currently.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_index_view_with_a_past_question(self):
"""
Questions with a pub_date in the past should be displayed on the index page
:return:
"""
create_question(question_text="Past question", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question>']
)
def test_index_view_with_a_future_question(self):
"""
Questions with a pub_date in the future should not be displayed in the index page
:return:
"""
create_question(question_text="Future question", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available currently.",
status_code=200)
self.assertQuerysetEqual(response.context['latest_question_list'],
[])
def test_indext_view_with_future_and_past_questions(self):
"""
For the cases where both past and future questions exit, only the past questions will be displayed
:return:
"""
create_question(question_text="Past question", days=-30)
create_question(question_text="Future question", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question>']
)
def test_indext_view_with_two_past_questions(self):
"""
If more than one past questions exit, all of them should be displayed
:return:
"""
create_question(question_text="Past question 1", days=-30)
create_question(question_text="Past question 2", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question 2>', '<Question: Past question 1>']
)
class QuestionIndexDetailTests(TestCase):
def test_detail_view_with_a_future_question(self):
"""
the detail view for a question in the future should return a 404 not found error
:return:
"""
future_question = create_question(question_text='Future question.', days=5)
response = self.client.get(reverse('polls:detail',
args=(future_question.id,)))
self.assertEqual(response.status_code, 404)
def test_detail_view_with_a_past_question(self):
"""
the detail view for a published question should display its content
:return:
"""
past_question = create_question(question_text='Past question.',
days=-5)
response = self.client.get(reverse('polls:detail',
args=(past_question.id,)))
self.assertContains(response, past_question.question_text, status_code=200)
|
import requests
from requests.auth import AuthBase
RED_COLOR = "\033[91m"
GREEN_COLOR = "\033[92m"
WARN_BG_COLOR = "\033[43m"
WARN_COLOR = "\033[93m"
BLUE_COLOR = "\033[94m"
ENDTERM = "\033[0m"
class TokenAuth(AuthBase):
"""Implements a custom authentication scheme."""
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers["Authorization"] = "Basic " + self.token
return r
# ==============================================================================
class JiraJSONParser:
"""Collecting & parsing Jira tasks via REST API"""
issueHasSubtasks = False
issueJson = {}
subtasksCount = 0
subtasksWOEstimationCount = 0
subtasksWOEstimation = []
subtasksOriginalEstimation = 0
issueTypeName = "Issue"
issueProgress = {}
issueAggregateProgress = {}
issueOriginalTypeName = ""
issueStatus = ""
requestParameters = {"Content-Type": "application/json"}
authToken = ""
jiraBaseAPIURL = ""
def __init__(self, authToken: str = "", jiraBaseAPIURL: str = ""):
self.authToken = authToken
self.jiraBaseAPIURL = jiraBaseAPIURL
def getAndParse(self, issueKey: str):
self.issueHasSubtasks = False
self.issueJson = {}
self.subtasksCount = 0
self.subtasksWOEstimationCount = 0
self.subtasksWOEstimation = []
self.subtasksOriginalEstimation = 0
self.issueTypeName = "Issue"
self.issueProgress = {}
self.issueAggregateProgress = {}
self.issueOriginalTypeName = ""
self.issueStatus = ""
resp = requests.get(
self.jiraBaseAPIURL + issueKey,
auth=TokenAuth(self.authToken),
params=self.requestParameters,
)
if resp.status_code != 200:
raise Exception(
"Issue {} details response code: {}".format(issueKey, resp.status_code)
)
self.parseIssueJson(resp.json())
def parseIssueJson(self, issueExternalJson: str):
self.issueJson = issueExternalJson
self.subtasksCount = len(self.issueJson["fields"]["subtasks"])
self.issueHasSubtasks = (
not bool(self.issueJson["fields"]["issuetype"]["subtask"])
and self.subtasksCount > 0
)
if self.subtasksCount > 0:
self.issueTypeName = "Story"
self.issueOriginalTypeName = self.issueJson["fields"]["issuetype"]["name"]
self.issueStatus = self.issueJson["fields"]["status"]["name"]
self.issueProgress["originalEstimate"] = 0
if (
"timetracking" in self.issueJson["fields"]
and "originalEstimateSeconds" in self.issueJson["fields"]["timetracking"]
):
self.issueProgress["originalEstimate"] = self.issueJson["fields"][
"timetracking"
]["originalEstimateSeconds"]
self.issueProgress["total"] = self.issueJson["fields"]["progress"]["total"]
self.issueProgress["progress"] = self.issueJson["fields"]["progress"][
"progress"
]
self.issueProgress["progressPercent"] = 0
if "percent" in self.issueJson["fields"]["progress"]:
self.issueProgress["progressPercent"] = self.issueJson["fields"][
"progress"
]["percent"]
self.issueProgress["timeLeft"] = 0
if self.issueJson["fields"]["timeestimate"] != None:
self.issueProgress["timeLeft"] = self.issueJson["fields"]["timeestimate"]
self.issueProgress["timeLeftOriginal"] = 0
if self.issueProgress["originalEstimate"] > 0:
self.issueProgress["timeLeftOriginal"] = (
self.issueProgress["originalEstimate"] - self.issueProgress["progress"]
)
self.issueAggregateProgress["originalEstimate"] = 0
self.issueAggregateProgress["total"] = 0
self.issueAggregateProgress["progress"] = 0
self.issueAggregateProgress["progressPercent"] = 0
self.issueAggregateProgress["timeLeft"] = 0
if (
self.issueHasSubtasks
and self.issueJson["fields"]
and self.issueJson["fields"]["aggregateprogress"]
):
if (
self.issueJson["fields"]
and self.issueJson["fields"]["aggregatetimeoriginalestimate"]
):
self.issueAggregateProgress["originalEstimate"] = self.issueJson[
"fields"
]["aggregatetimeoriginalestimate"]
self.issueAggregateProgress["total"] = self.issueJson["fields"][
"aggregateprogress"
]["total"]
self.issueAggregateProgress["progress"] = self.issueJson["fields"][
"aggregateprogress"
]["progress"]
if "percent" in self.issueJson["fields"]["aggregateprogress"]:
self.issueAggregateProgress["progressPercent"] = self.issueJson[
"fields"
]["aggregateprogress"]["percent"]
if self.issueJson["fields"]["aggregatetimeestimate"] != None:
self.issueAggregateProgress["timeLeft"] = self.issueJson["fields"][
"aggregatetimeestimate"
]
self.issueAggregateProgress["timeLeftOriginal"] = 0
if self.issueAggregateProgress["originalEstimate"] > 0:
self.issueAggregateProgress["timeLeftOriginal"] = (
self.issueAggregateProgress["originalEstimate"]
- self.issueAggregateProgress["progress"]
)
def getAndParseSubtasks(self, logProgress: bool = True):
self.subtasksWOEstimationCount = 0
self.subtasksOriginalEstimation = 0
self.subtasksWOEstimation = []
i = 0
if self.issueHasSubtasks:
printLine = "Subtasks count: " + str(self.subtasksCount) + " "
if logProgress:
print("")
print(printLine)
for subtask in self.issueJson["fields"]["subtasks"]:
if logProgress:
# sys.stdout.write('\\')
loader = "/"
if i == 1:
loader = "\\"
i = 0
else:
i = 1
print("\033[F" + printLine + loader)
subtaskURL = self.jiraBaseAPIURL + subtask["key"]
resp = requests.get(
subtaskURL,
auth=TokenAuth(self.authToken),
params=self.requestParameters,
)
if resp.status_code != 200:
raise Exception(
"Issue {} details response code: {}".format(
subtask["key"], resp.status_code
)
)
subtaskJson = resp.json()
if "originalEstimate" not in subtaskJson["fields"]["timetracking"]:
self.subtasksWOEstimation.append(subtask["key"])
if subtask["fields"]["status"]["name"] != "Done":
self.subtasksWOEstimationCount += 1
# print(', ESTIMATION needed!')
else:
if (
"originalEstimateSeconds"
in subtaskJson["fields"]["timetracking"]
):
self.subtasksOriginalEstimation += subtaskJson["fields"][
"timetracking"
]["originalEstimateSeconds"]
# print(', originalEstimate = ', self.convertMsToHours(subtaskJson['fields']['timetracking']['originalEstimateSeconds']))
self.subtasksOriginalEstimation = self.convertMsToHours(
self.subtasksOriginalEstimation
)
def convertMsToHours(self, valueMs: int, showUnit: bool = True) -> str:
result = str(valueMs / 3600)
if showUnit:
result += "h"
return result
@staticmethod
def formJQLQuery(
projectId: str,
excludeDone: bool = True,
excludeOpen: bool = True,
filter: int = 0,
taskTypes=["Task", "Story", "Bug"],
) -> str:
jSQLString = (
'project = "' + projectId + '" and type in (' + ",".join(taskTypes) + ")"
)
if filter > 0:
jSQLString += " AND filter = " + str(filter)
if excludeDone:
jSQLString += " AND status != Done"
if excludeOpen:
jSQLString += " AND status != Open"
jSQLString += " ORDER BY created DESC"
return jSQLString
# --- output related ------------------------------------------------
def printGeneralInfo(self):
print("Issue type:", self.issueOriginalTypeName)
print("Are there subtasks?:", self.issueHasSubtasks)
termColor = WARN_BG_COLOR
if self.issueStatus == "Done":
termColor = GREEN_COLOR
if self.issueStatus == "To Do" or self.issueStatus == "Open":
termColor = BLUE_COLOR
print(self.issueTypeName + " status:", termColor + self.issueStatus + ENDTERM)
def printProgressInfo(self):
if (
self.issueProgress["total"] > 0
or self.issueProgress["originalEstimate"] > 0
):
print("")
print("Exact " + self.issueTypeName.lower() + " progress:")
print(
" Original estimation = ",
self.convertMsToHours(self.issueProgress["originalEstimate"]),
)
print(" Total:", self.convertMsToHours(self.issueProgress["total"]))
print(" Progress:", self.convertMsToHours(self.issueProgress["progress"]))
print(" ", str(self.issueProgress["progressPercent"]) + "%")
timeLeftColor = GREEN_COLOR
if self.issueProgress["timeLeft"] <= 0:
timeLeftColor = RED_COLOR
print(
" Time left: ",
timeLeftColor
+ self.convertMsToHours(self.issueProgress["timeLeft"])
+ ENDTERM,
)
timeLeftColor = GREEN_COLOR
if self.issueProgress["timeLeftOriginal"] <= 0:
timeLeftColor = RED_COLOR
print(
" Time left (original): ",
timeLeftColor
+ self.convertMsToHours(self.issueProgress["timeLeftOriginal"])
+ ENDTERM,
)
if self.issueAggregateProgress["total"] > 0 and self.issueHasSubtasks:
print("")
print("Aggregated progress:")
print(
" Original estimation = ",
self.convertMsToHours(self.issueAggregateProgress["originalEstimate"]),
)
print(
" Total:", self.convertMsToHours(self.issueAggregateProgress["total"])
)
print(
" Progress:",
self.convertMsToHours(self.issueAggregateProgress["progress"]),
)
print(" ", str(self.issueAggregateProgress["progressPercent"]) + "%")
timeLeftColor = GREEN_COLOR
if self.issueAggregateProgress["timeLeft"] <= 0:
timeLeftColor = RED_COLOR
print(
" Time left: ",
timeLeftColor
+ self.convertMsToHours(self.issueAggregateProgress["timeLeft"])
+ ENDTERM,
)
timeLeftColor = GREEN_COLOR
if self.issueAggregateProgress["timeLeftOriginal"] <= 0:
timeLeftColor = RED_COLOR
print(
" Time left (original): ",
timeLeftColor
+ self.convertMsToHours(self.issueAggregateProgress["timeLeftOriginal"])
+ ENDTERM,
)
def getCompactProgressInfo(self) -> str:
originalInfoLine = ""
if (
self.issueProgress["total"] > 0
or self.issueProgress["originalEstimate"] > 0
):
originalInfoLine += (
"Original: e"
+ self.convertMsToHours(self.issueProgress["originalEstimate"])
+ ", p"
)
totalColor = ""
totalEndColor = ""
if self.issueProgress["total"] > self.issueProgress["originalEstimate"]:
totalColor = WARN_COLOR
totalEndColor = ENDTERM
originalInfoLine += (
self.convertMsToHours(self.issueProgress["progress"], False)
+ "/"
+ totalColor
+ self.convertMsToHours(self.issueProgress["total"])
+ totalEndColor
)
originalInfoLine += ", " + str(self.issueProgress["progressPercent"]) + "%"
timeLeftColor = GREEN_COLOR
if self.issueProgress["timeLeft"] <= 0:
timeLeftColor = RED_COLOR
originalInfoLine += (
", l"
+ timeLeftColor
+ self.convertMsToHours(self.issueProgress["timeLeft"], False)
+ ENDTERM
)
timeLeftColor = GREEN_COLOR
if self.issueProgress["timeLeftOriginal"] <= 0:
timeLeftColor = RED_COLOR
originalInfoLine += (
", lo"
+ timeLeftColor
+ self.convertMsToHours(self.issueProgress["timeLeftOriginal"])
+ ENDTERM
)
if self.issueAggregateProgress["total"] > 0 and self.issueHasSubtasks:
if len(originalInfoLine) > 0:
originalInfoLine += "\r\n"
originalInfoLine += (
"Aggregated: e"
+ self.convertMsToHours(self.issueAggregateProgress["originalEstimate"])
+ ", p"
)
totalColor = ""
totalEndColor = ""
if (
self.issueAggregateProgress["total"]
> self.issueAggregateProgress["originalEstimate"]
):
totalColor = WARN_COLOR
totalEndColor = ENDTERM
originalInfoLine += (
self.convertMsToHours(self.issueAggregateProgress["progress"], False)
+ "/"
+ totalColor
+ self.convertMsToHours(self.issueAggregateProgress["total"])
+ totalEndColor
)
originalInfoLine += (
", " + str(self.issueAggregateProgress["progressPercent"]) + "%"
)
timeLeftColor = GREEN_COLOR
if self.issueAggregateProgress["timeLeft"] <= 0:
timeLeftColor = RED_COLOR
originalInfoLine += (
", l"
+ timeLeftColor
+ self.convertMsToHours(self.issueAggregateProgress["timeLeft"], False)
+ ENDTERM
)
timeLeftColor = GREEN_COLOR
if self.issueAggregateProgress["timeLeftOriginal"] <= 0:
timeLeftColor = RED_COLOR
originalInfoLine += (
", lo"
+ timeLeftColor
+ self.convertMsToHours(self.issueAggregateProgress["timeLeftOriginal"])
+ ENDTERM
)
return originalInfoLine
def printSubtasksStats(self):
if self.issueHasSubtasks:
print("")
print("Sub-tasks initial estimation: ", self.subtasksOriginalEstimation)
print("Sub-tasks with NO estimation: ", self.subtasksWOEstimationCount)
|
<filename>ScrappingTool/utils/get_etym.py<gh_stars>1-10
'''
Date: 2021-02-20 22:05:36
LastEditors: Jecosine
LastEditTime: 2021-02-21 15:38:24
'''
from bs4 import BeautifulSoup as bs
import requests
import sqlite3
import time
import random
import re
import sys
requests.adapters.DEFAULT_RETRIES = 5
session = requests.session()
session.keep_alive = False
url = 'https://www.etymonline.com/word/{}?utm_source=extension_searchhint'
con = sqlite3.connect('./test.db')
cursor = con.cursor()
update_sql = 'update word set en_etym=? where spell=?'
header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9',
'Connection': 'close',
'Cache-Control': 'max-age=0',
# 'Cookie': '_ga=GA1.2.3862693.1612697354; __gads=ID=86ff14de8e3d4bd3-221f66e9f3c50037:T=1612697388:RT=1612697388:S=ALNI_MYm0kKIZ7MXKnc7vAdlm3M1DvDHoA; _gid=GA1.2.636938744.1613830066; _gat_gtag_UA_26425972_1=1',
'DNT': '1',
'Host': 'www.etymonline.com',
'If-None-Match': 'W/"7c44-ZDH96gGi02N0bMrNWxh06pAb3DM"',
'sec-ch-ua': '"Chromium";v="88", "Google Chrome";v="88", ";Not A Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'same-origin',
'Sec-Fetch-Site': 'same-origin',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36',
}
proxies = {
'http': '127.0.0.1:1081',
'https': '127.0.0.1:1081'
}
def fetch_data(word):
global session
resp = session.get(url.format(word), headers=header, proxies=proxies)
if resp and resp.status_code == 404:
return '404'
html = resp.text
bsobj = bs(html, 'html.parser')
res = bsobj.findAll('div', {'class': 'word--C9UPa'})
if not res:
return '404'
else:
# process
res = str(res)
if res and len(res) >= 2:
res = res[1:-1]
else:
return ''
pattern = re.compile('[\w-]+="[^"]+"')
res = re.sub(pattern, '', res, 0)
res.replace('<div></div>', '')
res.replace('<div ></div>', '')
res.replace('<div ></div>', '')
res.replace('<p ></p>', '')
res.replace('<p></p>', '')
return res
if __name__ == '__main__':
word_list = cursor.execute(
"select spell from word where en_etym is null").fetchall()
word_list = [i[0] for i in word_list]
data_patch = []
str_pattern = 'current fetching No.{} ...'
pre = ''
for i in range(len(word_list)):
print('\b' * len(pre), end='')
pre = str_pattern.format(i + 1)
print(pre, end='')
sys.stdout.flush()
if i >= 50 and i % 50 == 0:
cursor.executemany(update_sql, tuple(data_patch))
con.commit()
print('...saving {} to db...current {}'.format(
len(data_patch), i + 1))
data_patch.clear()
time.sleep(random.random() * 2 + 1)
res = fetch_data(word_list[i])
if res != '':
if res == '404':
print(" No etym for {}".format(word_list[i]))
data_patch.append(('no', word_list[i]))
data_patch.append((str(res), word_list[i]))
else:
print(' ERROR fetching {}'.format(word_list[i]))
cursor.executemany(update_sql, tuple(data_patch))
con.commit()
con.close()
session.close()
|
<gh_stars>0
# md5 : a0cd017919ae710459270dbdf15d2ab5
# sha1 : c1d7117f1fe991fc7e5d17ce0a437bbd1c32aa11
# sha256 : 9d18d8a88a7b5dfdd44e5e371e96a3fac90df9a901aa22ddf55d6774a9a3b811
ord_names = {
109: b'FileBearsMarkOfTheWeb',
110: b'GetPortFromUrlScheme',
118: b'GetPropertyFromName',
119: b'GetPropertyName',
120: b'IsDWORDProperty',
121: b'IsStringProperty',
122: b'AsyncGetClassBits',
123: b'AsyncInstallDistributionUnit',
124: b'BindAsyncMoniker',
125: b'CAuthenticateHostUI_CreateInstance',
126: b'CDLGetLongPathNameA',
127: b'CDLGetLongPathNameW',
128: b'CORPolicyProvider',
129: b'CoGetClassObjectFromURL',
130: b'CoInstall',
131: b'CoInternetCanonicalizeIUri',
132: b'CoInternetCombineIUri',
133: b'CoInternetCombineUrl',
134: b'CoInternetCombineUrlEx',
135: b'CoInternetCompareUrl',
136: b'CoInternetCreateSecurityManager',
137: b'CoInternetCreateZoneManager',
138: b'CoInternetFeatureSettingsChanged',
139: b'CoInternetGetMobileBrowserAppCompatMode',
140: b'CoInternetGetMobileBrowserForceDesktopMode',
141: b'CoInternetGetProtocolFlags',
142: b'CoInternetGetSecurityUrl',
143: b'CoInternetGetSecurityUrlEx',
144: b'CoInternetGetSession',
145: b'CoInternetIsFeatureEnabled',
146: b'CoInternetIsFeatureEnabledForIUri',
147: b'CoInternetIsFeatureEnabledForUrl',
148: b'CoInternetIsFeatureZoneElevationEnabled',
149: b'CoInternetParseIUri',
150: b'CoInternetParseUrl',
151: b'CoInternetQueryInfo',
152: b'CoInternetSetFeatureEnabled',
153: b'CoInternetSetMobileBrowserAppCompatMode',
154: b'CoInternetSetMobileBrowserForceDesktopMode',
155: b'CompareSecurityIds',
156: b'CompatFlagsFromClsid',
157: b'CopyBindInfo',
158: b'CopyStgMedium',
159: b'CreateAsyncBindCtx',
160: b'CreateAsyncBindCtxEx',
161: b'CreateFormatEnumerator',
162: b'CreateIUriBuilder',
163: b'CreateURLMoniker',
164: b'CreateURLMonikerEx2',
165: b'CreateURLMonikerEx',
166: b'CreateUri',
167: b'CreateUriFromMultiByteString',
168: b'CreateUriPriv',
169: b'CreateUriWithFragment',
170: b'DllCanUnloadNow',
171: b'DllGetClassObject',
172: b'DllInstall',
173: b'DllRegisterServer',
174: b'DllRegisterServerEx',
175: b'DllUnregisterServer',
176: b'Extract',
177: b'FaultInIEFeature',
178: b'FindMediaType',
179: b'FindMediaTypeClass',
180: b'FindMimeFromData',
181: b'GetAddSitesFileUrl',
182: b'GetClassFileOrMime',
183: b'GetClassURL',
184: b'GetComponentIDFromCLSSPEC',
185: b'GetIDNFlagsForUri',
186: b'GetIUriPriv2',
187: b'GetIUriPriv',
188: b'GetLabelsFromNamedHost',
189: b'GetMarkOfTheWeb',
190: b'GetSoftwareUpdateInfo',
191: b'GetUrlmonThreadNotificationHwnd',
192: b'GetZoneFromAlternateDataStreamEx',
193: b'HlinkGoBack',
194: b'HlinkGoForward',
195: b'HlinkNavigateMoniker',
196: b'HlinkNavigateString',
197: b'HlinkSimpleNavigateToMoniker',
198: b'HlinkSimpleNavigateToString',
199: b'IEDllLoader',
200: b'IEGetUserPrivateNamespaceName',
201: b'IEInstallScope',
202: b'IntlPercentEncodeNormalize',
203: b'IsAsyncMoniker',
204: b'IsIntranetAvailable',
205: b'IsJITInProgress',
206: b'IsLoggingEnabledA',
207: b'IsLoggingEnabledW',
208: b'IsValidURL',
209: b'LaunchEdgeForDebug',
210: b'MkParseDisplayNameEx',
211: b'ObtainUserAgentString',
212: b'PrivateCoInstall',
213: b'QueryAssociations',
214: b'QueryClsidAssociation',
215: b'RegisterBindStatusCallback',
216: b'RegisterFormatEnumerator',
217: b'RegisterMediaTypeClass',
218: b'RegisterMediaTypes',
219: b'RegisterWebPlatformPermanentSecurityManager',
220: b'ReleaseBindInfo',
221: b'RestrictHTTP2',
222: b'RevokeBindStatusCallback',
223: b'RevokeFormatEnumerator',
224: b'SetAccessForIEAppContainer',
225: b'SetSoftwareUpdateAdvertisementState',
226: b'ShouldDisplayPunycodeForUri',
227: b'ShouldShowIntranetWarningSecband',
228: b'ShowTrustAlertDialog',
229: b'URLDownloadA',
230: b'URLDownloadToCacheFileA',
231: b'URLDownloadToCacheFileW',
232: b'URLDownloadToFileA',
233: b'URLDownloadToFileW',
234: b'URLDownloadW',
235: b'URLOpenBlockingStreamA',
236: b'URLOpenBlockingStreamW',
237: b'URLOpenPullStreamA',
238: b'URLOpenPullStreamW',
239: b'URLOpenStreamA',
240: b'URLOpenStreamW',
241: b'UnregisterWebPlatformPermanentSecurityManager',
242: b'UrlMkBuildVersion',
243: b'UrlMkGetSessionOption',
244: b'UrlMkSetSessionOption',
245: b'UrlmonCleanupCurrentThread',
246: b'WriteHitLogging',
247: b'ZonesReInit',
322: b'IECompatLogCSSFix',
} |
<filename>BackBones/utils.py<gh_stars>0
import torch
from torch import nn
from torch.nn.init import kaiming_normal_
import os
import json
from termcolor import colored
from datetime import datetime as dt
def init_weights(model):
for layer in model.features:
if type(layer) in [nn.Conv2d, nn.Linear]:
kaiming_normal_(layer.weight)
for layer in model.classifier:
if type(layer) in [nn.Conv2d, nn.Linear]:
kaiming_normal_(layer.weight)
return model # TODO: should I return?
def get_n_classes(min_class, root = 'home/alex/datasets/imagenet/'):
classes = os.listdir(root)
out = 0
for my_class in classes:
if '.' in my_class: continue
imgs = os.listdir('{}/{}'.format(root, my_class))
out += (len(imgs) >= min_class)
return out
def get_accuracy(loader, model, device, dtype, loss_func = nn.CrossEntropyLoss(), n_tops = [1, 5], max_images = 32500):
num_correct = [0] * len(n_tops)
num_samples = [0] * len(n_tops)
model.eval() # set model to evaluation mode
losses = []
with torch.no_grad():
for (imgs, labels) in loader:
imgs = imgs.to(device = device, dtype = dtype) # move to device, e.g. GPU
labels = labels.to(device = device, dtype = torch.long)
# loss
scores = model(imgs)
loss = loss_func(scores, labels)
losses.append(float(loss))
# accuracy
_, preds = scores.sort(dim = 1, descending = True)
correct = preds == labels.view(-1,1)
preds.shape[0]
for i, n_top in enumerate(n_tops):
num_correct[i] += correct[:,:n_top].sum()
num_samples[i] += preds.size(0)
if num_samples[0] >= max_images:
break
acc = list(map(lambda x, y: round(100 * float(x) / y, 2), num_correct, num_samples))
loss = sum(losses) / len(losses)
return acc, loss
def save_checkpoint(model, cfg, epoch, loss):
N, PATH = cfg['SAVE_MODEL_N'], '{}/{}{}'.format(cfg['SAVE_MODEL_DIR'], cfg['MODEL'], cfg['N_LAYERS'])
# Delete the worst checkpoint
if len(os.listdir(PATH)) == N:
max_loss = 0
file_del = None
for filename in os.listdir(PATH):
f = filename.replace('.pth.tar','').split('_')[1:]
f = float('{}.{}'.format(*f))
if f > max_loss:
max_loss = f
file_del = filename
if loss < max_loss and os.path.exists('{}/{}'.format(PATH, file_del)):
os.remove('{}/{}'.format(PATH, file_del))
if len(os.listdir(PATH)) < N:
state = {
'model': model.state_dict(),
'cfg': cfg,
'epoch': epoch
}
file_save = 'epoch{}{}_{}_{}.pth.tar'.format(
'0' * (3 - len(str(epoch))), epoch, *str(loss).split('.')
)
torch.save(state, '{}/{}'.format(PATH, file_save))
def load_checkpoint(file_name, model):
print('Loading checkpoint...')
checkpoint = torch.load(file_name)
model.load_state_dict(checkpoint['model'])
cfg = checkpoint['cfg']
epoch = checkpoint['epoch']
return cfg, epoch
def print_report(part, epoch = None, t = None, metrics = None):
if part == 'start':
# print(colored('-' * 130, 'cyan'))
# print(colored('{} Epoch {}{} {}'.format('-' * 60, ' ' * (3 - len(str(epoch))), epoch, '-' * 61), 'cyan'))
# print(colored('-' * 132, 'cyan'))
# print('-' * 50)
# print('{} Epoch {}{} {}'.format('-' * 60, ' ' * (3 - len(str(epoch))), epoch, '-' * 61))
print('{} Epoch {}{} {}'.format(' ' * 60, ' ' * (3 - len(str(epoch))), epoch, ' ' * 61))
print(' ' * 132)
elif part == 'end':
t_min, t_sec = str(t // 60), str(t % 60)
txt = 'It took {}{} min. {}{} sec.'.format(' ' * (2 - len(t_min)), t_min, ' ' * (2 - len(t_sec)), t_sec)
# print()
# print(colored(txt, 'cyan'))
# print(colored('-' * 132, 'cyan'))
print(txt)
# print('-' * 132)
print()
print(colored('-' * 132, 'cyan'))
print()
else: # report statistics
train_loss, val_loss, train_acc, val_acc, n_tops = metrics
t_loss, v_loss = round(train_loss, 3), round(val_loss, 3)
t_loss = '{}{}'.format(t_loss, ' ' * (5 - len(str(t_loss))))
v_loss = '{}{}'.format(v_loss, ' ' * (5 - len(str(v_loss))))
t_print = 'TRAIN : loss = {}'.format(t_loss)
v_print = 'VALIDATE: loss = {}'.format(v_loss)
for i, n_top in enumerate(n_tops):
t_acc, v_acc = train_acc[i], val_acc[i]
if '.' not in str(t_acc):
t_acc = '{}.00'.format(t_acc)
else:
t_acc = '{}{}'.format(t_acc, '0' * (5 - len(str(t_acc))))
if '.' not in str(v_acc):
t_acc = '{}.00'.format(v_acc)
else:
v_acc = '{}{}'.format(v_acc, '0' * (5 - len(str(v_acc))))
t_print += ' | TOP{} acc. = {}%'.format(n_top, t_acc)
v_print += ' | TOP{} acc. = {}%'.format(n_top, v_acc)
print(t_print)
print(v_print)
|
<filename>infra/tools/dockerbuild/source.py
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Manages the raw sources needed to build wheels.
A source has a remote (public) address. That file is then downloaded and cached
locally as a CIPD package in the "infra/third_party/source" CIPD tree.
Systems that want to operate on a source reference it by a source constant, or
by a constructor (e.g., Pip).
"""
import collections
import contextlib
import hashlib
import os
import shutil
import subprocess
import tarfile
import tempfile
import zipfile
from . import cipd
from . import concurrency
from . import util
class Source(
collections.namedtuple(
'Source',
(
'name', # The name of the source.
'version', # The version of the source.
'download_type', # Type of download function to use.
'download_meta', # Arbitrary metadata to pass to download function.
'patches', # Short patch names to apply to the source tree.
'patch_base', # Base name of patches, defaults to 'name'.
))):
# A registry of all created Source instances.
_REGISTRY = {}
def __new__(cls, *args, **kwargs):
kwargs.setdefault('patches', ())
if not kwargs.get('patch_base'):
kwargs['patch_base'] = kwargs['name']
src = super(Source, cls).__new__(cls, *args, **kwargs)
src._patches_hash = None
# Register source with "_REGISTRY" and enforce that any source with the same
# (name, version) is defined exactly the same.
#
# NOTE: If this expectation is ever violated, we will need to update CIPD
# source package naming to incorporate the difference.
key = (src.name, src.version)
current = cls._REGISTRY.get(key)
if not current:
cls._REGISTRY[key] = src
elif current != src:
raise ValueError('Incompatible source definitions (%r != %r)' % (
current, src))
return src
@classmethod
def all(cls):
return cls._REGISTRY.values()
@property
def tag(self):
"""A tag that identifies back to the upstream package."""
return '%s-%s' % (self.name, self.version)
@property
def buildid(self):
"""Uniquely identifies this package & local patches."""
ret = self.version
srchash = self.patches_hash
if srchash:
ret += '-' + srchash
return ret
@property
def patches_hash(self):
"""Return a hash of all the patches applied to this source."""
if self._patches_hash is None:
self._patches_hash = ''
patches = self.get_patches()
if patches:
hash_obj = hashlib.md5()
for patch in sorted(patches):
with open(patch, 'rb') as f:
hash_obj.update(f.read())
self._patches_hash = hash_obj.hexdigest().lower()
return self._patches_hash
def get_patches(self):
"""Return list of patches (full paths) to be applied."""
return [
os.path.join(util.PATCHES_DIR,
'%s-%s-%s.patch' % (self.patch_base, self.version, x))
for x in self.patches
]
class Repository(object):
# Map of "download_type" to download function. Mapping will be made
# later as functions are defined.
_DOWNLOAD_MAP = {}
def __init__(self, system, workdir, upload=False, force_download=False):
self._system = system
self._root = workdir
self._upload = upload
self._force_download = force_download
# Will be set to True if a source was encountered without a corresponding
# CIPD package, but not uploaded to CIPD.
self._missing_sources = False
# Build our archive suffixes.
self._archive_suffixes = collections.OrderedDict()
for suffix in ('.tar.gz', '.tgz', '.tar.bz2'):
self._archive_suffixes[suffix] = self._unpack_tar_generic
for suffix in ('.zip',):
self._archive_suffixes[suffix] = self._unpack_zip_generic
self.lock = concurrency.KeyedLock()
@property
def missing_sources(self):
return self._missing_sources
def ensure(self, src, dest_dir, unpack=True, unpack_file_filter=None):
util.LOGGER.debug('Ensuring source %r', src.tag)
# Check if the CIPD package exists.
package = cipd.Package(
name=cipd.normalize_package_name(
'infra/third_party/source/%s' % (src.name,)),
tags=(
'version:%s' % (src.version,),
),
install_mode=cipd.INSTALL_SYMLINK,
compress_level=cipd.COMPRESS_NONE,
)
package_path = os.path.join(self._root, '%s.pkg' % (src.tag,))
# Lock around accesses to the shared self._root directory, which is a
# central cache for source packages which may be used between different
# wheel builds.
with self.lock.get(src.tag):
package_dest = util.ensure_directory(self._root, src.tag)
# If the package doesn't exist, or if we are forcing a download, create a
# local package.
have_package = False
if os.path.isfile(package_path):
# Package file is on disk, reuse unless we're forcing a download.
if not self._force_download:
have_package = True
# By default, assume the cached source package exists and try to download
# it. If this produces an error we infer that it doesn't exist and go
# create and upload it. This saves a call to CIPD compared to making an
# explicit check up-front.
cipd_exists = True
if not have_package:
# Don't even try downloading it if force_download is set.
if not self._force_download:
try:
self._system.cipd.fetch_package(package.name, package.tags[0],
package_path)
except self._system.SubcommandError as e:
# The CIPD command line tool returns 1 for all errors, so we're
# forced to just check its stdout.
if e.returncode == 1 and ('no such tag' in e.output or
'no such package' in e.output):
cipd_exists = False
else:
raise
if not cipd_exists or self._force_download:
self._create_cipd_package(package, src, package_path)
have_package = True
# We must have acquired the package at "package_path" by now.
assert have_package
# If we built a CIPD package, upload it. This will be fatal if we could
# not perform the upload; if a user wants to not care about this, set
# "upload" to False.
if not cipd_exists:
if self._upload:
self._system.cipd.register_package(package_path, package.tags)
util.LOGGER.info('Uploaded CIPD source package')
else:
self._missing_sources = True
util.LOGGER.warning('Missing CIPD source package, but not uploaded.')
# Install the CIPD package into our source directory. This is a no-op if
# it is already installed.
self._system.cipd.deploy_package(package_path, package_dest)
# The package directory should contain exactly one file.
package_files = [
f for f in os.listdir(package_dest) if not f.startswith('.')
]
if len(package_files) != 1:
raise ValueError('Package contains %d (!= 1) files: %s' %
(len(package_files), package_dest))
package_file = package_files[0]
package_file_path = os.path.join(package_dest, package_file)
# The same destination path must not be accessed concurrently, so we can
# safely release the lock before copying.
# Unpack or copy the source file into the destination path.
if unpack:
for suffix, unpack_func in self._archive_suffixes.items():
if package_file.endswith(suffix):
# unpack_file_filter is a workaround for python < 3.6 on Windows.
# Windows only allow path less than 260, which means we need to
# filter some files from the package if they are not required in the
# build to avoid triggering the limitation. This can be removed after
# we migrate to Python 3.6 or later.
# https://docs.microsoft.com/en-us/windows/win32/fileio/maximum-file-path-limitation
return self._unpack_archive(package_file_path, dest_dir, unpack_func,
unpack_file_filter)
# Single file.
dest = os.path.join(dest_dir, os.path.basename(package_file))
util.LOGGER.debug('Installing source from [%s] => [%s]', package_file, dest)
with concurrency.PROCESS_SPAWN_LOCK.shared():
shutil.copyfile(package_file_path, dest)
return dest
def _create_cipd_package(self, package, src, package_path):
# Download to a temporary file.
with self._system.temp_subdir(src.tag) as tdir:
download_dir = util.ensure_directory(tdir, 'download')
package_dir = util.ensure_directory(tdir, 'package')
path = os.path.join(download_dir, 'file')
util.LOGGER.debug('Downloading source to: [%s]', path)
with concurrency.PROCESS_SPAWN_LOCK.shared():
with open(path, 'wb') as fd:
filename = self._DOWNLOAD_MAP[src.download_type](fd,
src.download_meta)
# Move the downloaded "file" into the package under its download name
# and package it.
os.rename(path, os.path.join(package_dir, filename))
self._system.cipd.create_package(package, package_dir, package_path)
def _unpack_archive(self, path, dest_dir, unpack_func, file_filter):
with self._system.temp_subdir(os.path.basename(
path)) as tdir, concurrency.PROCESS_SPAWN_LOCK.shared():
unpack_func(path, tdir, file_filter)
contents = os.listdir(tdir)
if len(contents) != 1:
raise ValueError('Archive contained %d (!= 1) file(s)' % (
len(contents),))
archive_base = os.path.join(tdir, contents[0])
dest = os.path.join(dest_dir, os.path.basename(archive_base))
os.rename(archive_base, dest)
return dest
@staticmethod
def _unpack_tar_generic(path, dest_dir, file_filter):
with tarfile.open(path, 'r') as tf:
tf.extractall(
dest_dir,
members=filter(
(lambda m: file_filter(m.name)) if file_filter else None,
tf.getmembers(),
))
@staticmethod
def _unpack_zip_generic(path, dest_dir, file_filter):
with zipfile.ZipFile(path, 'r') as zf:
zf.extractall(dest_dir, members=filter(file_filter, zf.namelist()))
def remote_file(name, version, url):
return Source(
name=name,
version=version,
download_type='url',
download_meta=url,
)
def remote_archive(name, version, url):
return Source(
name=name,
version=version,
download_type='url',
download_meta=url,
)
def _download_url(fd, meta):
url = meta
return util.download_to(url, fd)
Repository._DOWNLOAD_MAP['url'] = _download_url
def _download_pypi_archive(fd, meta):
name, version = meta
url = 'https://pypi.org/pypi/%s/%s/json' % (name, version)
content = util.download_json(url)
release = content.get('releases', {}).get(version)
if not release:
raise ValueError('No PyPi release for package %r at version %r' % (
name, version))
entry = None
for entry in release:
if entry.get('packagetype') == 'sdist':
break
else:
raise ValueError('No PyPi source distribution for package %r at version '
'%r' % (name, version))
hash_obj = None
expected_hash = entry.get('md5')
if expected_hash:
hash_obj = hashlib.md5()
url = entry['url']
util.LOGGER.debug('Downloading package %r @ %r from PyPi: %s',
name, version, url)
filename = util.download_to(url, fd, hash_obj=hash_obj)
if hash_obj:
download_hash = hash_obj.hexdigest().lower()
if download_hash != expected_hash:
raise ValueError("Download hash %r doesn't match expected hash %r." % (
download_hash, expected_hash))
return filename
Repository._DOWNLOAD_MAP['pypi_archive'] = _download_pypi_archive
def _download_local(fd, meta):
basename = os.path.basename(meta)
with tarfile.open(mode='w:bz2', fileobj=fd) as tf:
tf.add(meta, arcname=basename)
return '%s.tar.bz2' % (basename,)
def local_directory(name, version, path):
return Source(
name=name,
version=version,
download_type='local_directory',
download_meta=path)
Repository._DOWNLOAD_MAP['local_directory'] = _download_local
def pypi_sdist(name, version, patches=(), patch_base=None):
"""Defines a Source whose remote data is a PyPi source distribution."""
return Source(
name=name,
version=version,
download_type='pypi_archive',
download_meta=(name, version),
patches=patches,
patch_base=patch_base,
)
|
import torch
from torch import nn
from core.config import config
import models.frame_modules as frame_modules
import models.prop_modules as prop_modules
import models.map_modules as map_modules
import models.fusion_modules as fusion_modules
import models.textual_modules as textual_modules
class CDN(nn.Module):
def __init__(self):
super(CDN, self).__init__()
self.frame_layer = getattr(frame_modules, config.TAN.FRAME_MODULE.NAME)(config.TAN.FRAME_MODULE.PARAMS)
self.prop_layer = getattr(prop_modules, config.TAN.PROP_MODULE.NAME)(config.TAN.PROP_MODULE.PARAMS)
self.fusion_layer = getattr(fusion_modules, config.TAN.FUSION_MODULE.NAME)(config.TAN.FUSION_MODULE.PARAMS)
self.map_layer = getattr(map_modules, config.TAN.MAP_MODULE.NAME)(config.TAN.MAP_MODULE.PARAMS)
# self.pred_layer = nn.Conv2d(config.TAN.PRED_INPUT_SIZE, 1, 16, 1, padding=15)
self.pred_layer = nn.Conv2d(config.TAN.PRED_INPUT_SIZE, 1, 1, 1)
if not config.TAN.TEXTUAL_MODULE.NAME:
self.textual_encoding = None
else:
self.textual_encoding = getattr(textual_modules, config.TAN.TEXTUAL_MODULE.NAME)(
config.TAN.TEXTUAL_MODULE.PARAMS)
if not config.TAN.FRAME_MODULE.PARAMS.ATTENTION:
self.channel_attention = None
else:
self.channel_attention = getattr(frame_modules, config.TAN.FRAME_MODULE.PARAMS.ATTENTION)(config)
if not config.TAN.FRAME_MODULE.PARAMS.SEMANTIC_ENHANCE:
self.semantic_frame_enhance = None
else:
self.semantic_frame_enhance = nn.ModuleList(
[getattr(fusion_modules, config.TAN.FRAME_MODULE.PARAMS.SEMANTIC_ENHANCE)(config)
for _ in range(config.TAN.FRAME_MODULE.PARAMS.SEMANTIC_ENHANCE_NUM)])
if not config.TAN.PROP_MODULE.PARAMS.SEMANTIC_ENHANCE:
self.semantic_map_enhance = None
else:
self.semantic_map_enhance = nn.ModuleList(
[getattr(fusion_modules, config.TAN.FRAME_MODULE.PARAMS.SEMANTIC_ENHANCE)(config)
for _ in range(config.TAN.PROP_MODULE.PARAMS.SEMANTIC_ENHANCE_NUM)])
def forward(self, textual_input, textual_mask, visual_input):
if config.TAN.TEXTUAL_MODULE.NAME == 'BiTextualEncoding':
tex_encode, word_encode = self.textual_encoding(textual_input, textual_mask)
else:
tex_encode = self.textual_encoding(textual_input, textual_mask)
if config.TAN.FRAME_MODULE.NAME == 'WordAttentionPool':
vis_h = self.frame_layer(visual_input.transpose(1, 2), tex_encode)
else:
vis_h, att = self.frame_layer(visual_input.transpose(1, 2)) # batchsize * 512 * 16
if self.channel_attention is not None:
vis_h = self.channel_attention(vis_h, tex_encode)
if self.semantic_frame_enhance is not None:
vis_h = vis_h.transpose(1, 2) # batchsize * 16 * 512
for enhance_module in self.semantic_frame_enhance:
vis_h = enhance_module(vis_h, word_encode, textual_mask)
vis_h = vis_h.transpose(1, 2) # batchsize * 512 * 16
map_h, map_mask = self.prop_layer(vis_h) # batchsize * 512 * 16 * 16
if self.semantic_map_enhance is not None:
batch, hidden_size, map_l, _ = map_h.size()
map_h = map_h.view(batch, hidden_size, -1).transpose(1, 2) # batchsize * 256 * 512
for enhance_module in self.semantic_map_enhance:
map_h = enhance_module(map_h, word_encode, textual_mask)
map_h = map_h.transpose(1, 2).view(batch, hidden_size, map_l, map_l) # batchsize * 512 * 16 * 16
fused_h = self.fusion_layer(tex_encode, textual_mask, map_h, map_mask)
fused_h = self.map_layer(fused_h, map_mask, tex_encode)
# prediction = self.pred_layer(fused_h)[:, :, 15:31, 0:16] * map_mask
prediction = self.pred_layer(fused_h) * map_mask # batchsize * 1 * 16 * 16
return prediction, map_mask, att
def extract_features(self, textual_input, textual_mask, visual_input):
tex_encode = self.textual_encoding(textual_input)
vis_h = self.frame_layer(visual_input.transpose(1, 2)) # batchsize * 512 * 16
if self.channel_attention is not None:
vis_h, attention = self.channel_attention.get_attention(vis_h, tex_encode)
map_h, map_mask = self.prop_layer(vis_h) # batchsize * 512 * 16 * 16
fused_h = self.fusion_layer(tex_encode, textual_mask, map_h, map_mask)
fused_h = self.map_layer(fused_h, map_mask)
# prediction = self.pred_layer(fused_h)[:, :, 15:31, 0:16] * map_mask
prediction = self.pred_layer(fused_h) * map_mask
return fused_h, prediction, attention |
import cv2
from PIL import Image
import numpy as np
from osr2mp4.ImageProcess.Curves.curves import getclass
from osr2mp4.ImageProcess import imageproc
from itertools import chain
def convertlist(longlist):
tmp = list(chain.from_iterable(longlist))
return np.array(tmp, dtype=np.int32).reshape((len(longlist), len(longlist[0])))
class GenerateSlider:
def __init__(self, settings, sliderborder, slideroverride, radius, scale):
"""
:param sliderborder: list, color of the slider's border
:param slideroverride: list, color of the slider's body
:param radius: float, size of slider
:param scale: float, current resolution with 512x384
"""
self.sliderborder = tuple(sliderborder)
self.slideroverride = tuple(slideroverride)
self.to_color = np.array([50, 50, 50]) # slider gradually become this color, the closer to the center the closer the color
self.npslideroverride = np.array(self.slideroverride)
self.radius = radius
self.coef = settings.settings["Slider quality"]
self.scale = scale * self.coef
self.extended = self.radius * self.scale + 2
self.img = np.zeros((int(768 * self.scale), int(1376 * self.scale), 4), dtype=np.uint8)
self.pbuffer = Image.frombuffer("RGBA", (self.img.shape[1], self.img.shape[0]), self.img, 'raw', "RGBA", 0, 1)
self.pbuffer.readonly = False
def convert(self, ps_unscale):
ps = []
for pos in ps_unscale:
ps.append([(pos[0] + 600) * self.scale, (pos[1] + 300) * self.scale])
return ps
@staticmethod
def get_min_max(curve_pos):
# get pos where slider start drawing and end drawing, basically reduce image size without touching the slider
min_x = min(curve_pos, key=lambda i: i[0])[0]
min_y = min(curve_pos, key=lambda i: i[1])[1]
max_x = max(curve_pos, key=lambda i: i[0])[0]
max_y = max(curve_pos, key=lambda i: i[1])[1]
return min_x, min_y, max_x, max_y
def draw(self, curve_pos):
cv2.polylines(self.img, [curve_pos], False, (*self.sliderborder, 200), int(self.radius*2*self.scale), cv2.LINE_AA)
cv2.polylines(self.img, [curve_pos], False, (*self.slideroverride, 200), int((self.radius*0.875)*2*self.scale), cv2.LINE_AA)
# make shadow color effect
for c in range(4, int(self.radius), 1):
coefficient = max(0, (c-6)) / (self.radius * 0.7)
cur_slider = self.to_color * coefficient + self.npslideroverride
cur_slider[cur_slider > 255] = 255
cur_slider = tuple(cur_slider)
cv2.polylines(self.img, [curve_pos], False, (*cur_slider, 200), int((self.radius*2 - c*2) * self.scale), cv2.LINE_AA)
def get_slider_img(self, slidertype, ps, pixel_length):
ps = self.convert(ps)
slider_c = getclass(slidertype, ps, pixel_length * self.scale)
curve_pos = np.int32(slider_c.pos)
min_x, min_y, max_x, max_y = self.get_min_max(curve_pos) # start y end y start x end x
self.draw(curve_pos)
# crop useless part of image
up_y_corner = max(0, int(min_y - self.extended))
left_x_corner = max(0, int(min_x - self.extended))
down_y_corner = min(self.img.shape[0], int(max_y + self.extended))
right_x_corner = min(self.img.shape[1], int(max_x + self.extended))
img = self.pbuffer.crop((left_x_corner, up_y_corner, right_x_corner, down_y_corner))
if self.coef != 1:
img = imageproc.change_size(img, 1/self.coef, 1/self.coef)
self.img[up_y_corner:down_y_corner, left_x_corner:right_x_corner] = 0 # reset
x_offset = int((curve_pos[0][0] - left_x_corner)/self.coef)
y_offset = int((curve_pos[0][1] - up_y_corner)/self.coef)
return img, x_offset, y_offset
if __name__ == "__main__":
# slidercode = "96,180,0,2,0,B|286:44|286:44|416:201|416:201|251:340|251:340|95:179|95:179,1,875"
# slidercode = "130,77,0,2,0,B|414:155|98:307,1,375"
slidercode = "105,194,0,2,0,L|407:190,1,300"
#slidercode = "226,81,0,2,0,B|427:107|272:303|85:222|226:81,1,400"
# slidercode = "142,314,0,2,0,B|267:54|267:54|387:330|387:330|95:128|95:128|417:124|417:124|141:314,1,1600"
# slidercode = "182,326,3923,2,0,P|99:174|394:243,1,700"
# slidercode = "485,360,99863,2,0,P|483:342|470:225,1,135.8307"
# slidercode = "446,22,11863,2,0,L|442:57,1,36.0000013732911"
WIDTH = 1920
HEIGHT = 1080
playfield_width, playfield_height = WIDTH * 0.8 * 3 / 4, HEIGHT * 0.8
scale = playfield_width/512
gs = GenerateSlider([255, 69, 0], [0, 60, 120], 36.48, scale)
stype, ps, length = gs.convert_string(slidercode)
img, x, y = gs.get_slider_img(stype, ps, length)
square = np.full((2, 2, 4), 255)
img[y-1:y+1, x-1:x+1] = square
cv2.imwrite("test.png", img)
|
from random import randint, random
# Implement a 2D Lattice with a hexagonal unit cell
class Lattice:
def __init__(self, _size, _prob_boundaries, _prob_reaction):
# size = (N,M)
self.size = _size
# A[i] = [(i,j),id]
self.color_A = []
self.color_B = []
self.reaction_histogram = [0] * (_size[0]*2 + 1)
self.prob_boundaries = _prob_boundaries # (p,q)
self.prob_reaction = _prob_reaction
self.number_of_reactions = 0
self.time_of_the_last_reaction = 0
self.just_react = False
def get_size(self):
return self.size
def get_area(self):
# to implement
return -1
def get_A_particles(self):
return self.color_A
def get_B_particles(self):
return self.color_B
def get_number_of_reactios(self):
return self.number_of_reactions
def get_number_of_colored_particles(self):
return [len(self.color_A), len(self.color_B)]
def get_time_last_reaction(self):
return self.time_of_the_last_reaction
def get_reaction_histogram(self):
return self.reaction_histogram
def has_just_react(self):
return self.just_react
def reset_just_react(self):
self.just_react = False
def set_last_time_reaction(self, current_time):
self.time_of_the_last_reaction = current_time
def add_particle(self, color, position, _id):
# A color = 1
# B color = -1
if color == 1:
self.color_A.append([position, _id])
if color == -1:
self.color_B.append([position, _id])
def set_initial_conditions(self, Na, Nb):
N = self.size[0]
M = self.size[1]
if Na + Nb > 2 * (2*N + 1) * (2*M + 1):
print "error, too many colored particles"
exit()
count_a = 0
count_b = 0
while count_a < Na:
if random() <= 0.5:
_id = 100
else:
_id = 200
i = randint(-N, -1)
j = randint(-M, M)
if [[i, j], _id] not in self.color_A:
self.color_A.append([[i, j], _id])
count_a += 1
while count_b < Nb:
if random() <= 0.5:
_id = 100
else:
_id = 200
i = randint(1, N)
j = randint(-M, M)
if [[i, j], _id] not in self.color_A:
if [[i, j], _id] not in self.color_B:
self.color_B.append([[i, j], _id])
count_b += 1
def update_move(self, _id, lattice_vector):
N = self.size[0]
M = self.size[1]
for i in xrange(len(self.color_A) - 1, -1, -1):
if self.color_A[i][1] == _id:
if lattice_vector[1] == 0:
if lattice_vector[0] == 1:
if self.color_A[i][0][0] == self.size[0]:
del self.color_A[i]
else:
self.color_A[i][0][0] = self.color_A[i][0][0] +1
elif lattice_vector[0] == -1:
if self.color_A[i][0][0] == -self.size[0]:
del self.color_A[i]
else:
self.color_A[i][0][0] = self.color_A[i][0][0] -1
for i in xrange(len(self.color_B) - 1, -1, -1):
if self.color_B[i][1] == _id:
if lattice_vector[1] == 0:
if lattice_vector[0] == 1:
if self.color_B[i][0][0] == self.size[0]:
del self.color_B[i]
else:
self.color_B[i][0][0] = self.color_B[i][0][0] + 1
elif lattice_vector[0] == -1:
if self.color_B[i][0][0] == -self.size[0]:
del self.color_B[i]
else:
self.color_B[i][0][0] = self.color_B[i][0][0] - 1
# add the particles if it was a deplacement with a probability p
if lattice_vector[0] == 1 and lattice_vector[1] == 0:
if random() <= self.prob_boundaries[0]:
# add a left particle (unidimensional j = 0)
self.add_particle(1, [-self.size[0], 0], _id)
# add the particles if it was a deplacement
if lattice_vector[0] == -1 and lattice_vector[1] == 0:
if random() <= self.prob_boundaries[1]:
# add a right particle
self.add_particle(-1, [self.size[0], 0], _id)
#
def update_collision(self, lattice_vector, energy_exch, current_time):
N = self.size[0]
M = self.size[1]
for a in self.color_A:
for b in self.color_B:
# check if they are different particles
# in the unit cell
if a[1] != b[1]:
new_index_a = [0, 0]
new_index_a[0] = a[0][0]
new_index_a[1] = a[0][1]
new_index_b = [0, 0]
new_index_b[0] = b[0][0]
new_index_b[1] = b[0][1]
if lattice_vector[1] == 0:
new_index_a[0] += lattice_vector[0]
new_index_a[1] += lattice_vector[1]
new_index_b[0] += lattice_vector[0]
new_index_b[1] += lattice_vector[1]
if new_index_a == b[0] or new_index_b == a[0]:
# start collide colors ------------------------------
condition = False
if energy_exch < 0:
print "ERROR energy"
exit()
if random() >= self.prob_reaction:
condition = True
if condition is True:
# erase a,b
self.number_of_reactions += 1
self.just_react = True
index = self.size[0] + a[0][0]
self.reaction_histogram[index] += 1
self.color_A.remove(a)
self.color_B.remove(b)
break
if condition is True:
print "condition error"
exit()
# end collide colors --------------------------------
#
def get_density_distribution(self, _string):
outfile_A = open(_string + "A.dat", "w")
outfile_B = open(_string + "B.dat", "w")
for a in self.color_A:
outfile_A.write(" %f %f %f \n" % (a[0][0], a[0][1], a[1]))
for b in self.color_B:
outfile_B.write(" %f %f %f \n" % (b[0][0], b[0][1], b[1]))
outfile_A.close()
outfile_B.close()
def get_min_max(self):
# return the rigthmost A particle and the leftmost B particle' position
max_pos = -self.size[0]
min_pos = self.size[0]
for a in self.color_A:
if max_pos < a[0][0]:
max_pos = a[0][0]
for b in self.color_B:
if min_pos > b[0][0]:
min_pos = b[0][0]
return (max_pos, min_pos)
def print_histo_pos(self, str):
outfile = open(str, "w")
for i in range(len(self.reaction_histogram)):
outfile.write("%f %f \n" % (-self.size[0] + i, self.reaction_histogram[i]))
outfile.close()
|
from django.core.exceptions import ValidationError
from django.db.models import Case, When
from django_directed.models.abstract_base_graph_models import base_edge, base_graph, base_node
def cyclic_graph_factory(config):
"""
Type: Subclassed Abstract Model
Abstract methods of the Graph base model are implemented.
"""
class CyclicGraph(base_graph(config)):
class Meta:
abstract = True
return CyclicGraph
def cyclic_edge_factory(config):
"""
Type: Subclassed Abstract Model
Abstract methods of the Edge base model are implemented.
"""
class CyclicEdge(base_edge(config)):
class Meta:
abstract = True
def save(self, *args, **kwargs):
# Check for self links
allow_self_links = config.allow_self_links
if not allow_self_links:
self.parent.__class__.self_link_check(self.parent, self.child)
super().save(*args, **kwargs)
return CyclicEdge
def cyclic_node_factory(config):
"""
Type: Subclassed Abstract Model
Abstract methods of the Node base model are implemented.
"""
class CyclicNode(base_node(config)):
class Meta:
abstract = True
return CyclicNode
def dag_graph_factory(config):
"""
Type: Subclassed Abstract Model
Abstract methods of the Graph base model are implemented.
"""
class DAGGraph(base_graph(config)):
class Meta:
abstract = True
return DAGGraph
def dag_edge_factory(config):
"""
Type: Subclassed Abstract Model
Abstract methods of the Edge base model are implemented.
"""
class DAGEdge(base_edge(config)):
class Meta:
abstract = True
def save(self, *args, **kwargs):
# Check for circular links
self.parent.__class__.circular_check(self.parent, self.child)
super().save(*args, **kwargs)
return DAGEdge
def dag_node_factory(config):
"""
Type: Subclassed Abstract Model
Abstract methods of the Node base model are implemented.
"""
class DAGNode(base_node(config)):
class Meta:
abstract = True
return DAGNode
def polytree_graph_factory(config):
"""
Type: Subclassed Abstract Model
Abstract methods of the Graph base model are implemented.
"""
class PolytreeGraph(base_graph(config)):
class Meta:
abstract = True
return PolytreeGraph
def polytree_edge_factory(config):
"""
Type: Subclassed Abstract Model
Abstract methods of the Edge base model are implemented.
"""
class PolytreeEdge(base_edge(config)):
class Meta:
abstract = True
def save(self, *args, **kwargs):
# Check for circular links
self.parent.__class__.circular_check(self.parent, self.child)
super().save(*args, **kwargs)
return PolytreeEdge
def polytree_node_factory(config):
"""
Type: Subclassed Abstract Model
Abstract methods of the Node base model are implemented.
"""
class PolytreeNode(base_node(config)):
class Meta:
abstract = True
return PolytreeNode
def arborescence_graph_factory(config):
"""
Type: Subclassed Abstract Model
Abstract methods of the Graph base model are implemented.
"""
class ArborescenceGraph(base_graph(config)):
class Meta:
abstract = True
return ArborescenceGraph
def arborescence_edge_factory(config):
"""
Type: Subclassed Abstract Model
Abstract methods of the Edge base model are implemented.
"""
class ArborescenceEdge(base_edge(config)):
class Meta:
abstract = True
def save(self, *args, **kwargs):
# Check for circular links, if needed
self.parent.__class__.circular_check(self.parent, self.child)
super().save(*args, **kwargs)
return ArborescenceEdge
def arborescence_node_factory(config):
"""
Type: Subclassed Abstract Model
Abstract methods of the Node base model are implemented.
"""
class ArborescenceNode(base_node(config)):
class Meta:
abstract = True
return ArborescenceNode
|
import random
#import tools
from deap import tools
def varAnd(population, toolbox, cxpb, mutpb):
"""Part of an evolutionary algorithm applying only the variation part
(crossover **and** mutation). The modified individuals have their
fitness invalidated. The individuals are cloned so returned population is
independent of the input population.
:param population: A list of individuals to vary.
:param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
operators.
:param cxpb: The probability of mating two individuals.
:param mutpb: The probability of mutating an individual.
:returns: A list of varied individuals that are independent of their
parents.
The variation goes as follow. First, the parental population
:math:`P_\mathrm{p}` is duplicated using the :meth:`toolbox.clone` method
and the result is put into the offspring population :math:`P_\mathrm{o}`. A
first loop over :math:`P_\mathrm{o}` is executed to mate pairs of
consecutive individuals. According to the crossover probability *cxpb*, the
individuals :math:`\mathbf{x}_i` and :math:`\mathbf{x}_{i+1}` are mated
using the :meth:`toolbox.mate` method. The resulting children
:math:`\mathbf{y}_i` and :math:`\mathbf{y}_{i+1}` replace their respective
parents in :math:`P_\mathrm{o}`. A second loop over the resulting
:math:`P_\mathrm{o}` is executed to mutate every individual with a
probability *mutpb*. When an individual is mutated it replaces its not
mutated version in :math:`P_\mathrm{o}`. The resulting :math:`P_\mathrm{o}`
is returned.
This variation is named *And* beceause of its propention to apply both
crossover and mutation on the individuals. Note that both operators are
not applied systematicaly, the resulting individuals can be generated from
crossover only, mutation only, crossover and mutation, and reproduction
according to the given probabilities. Both probabilities should be in
:math:`[0, 1]`.
"""
offspring = [toolbox.clone(ind) for ind in population]
# Apply crossover and mutation on the offspring
for i in range(1, len(offspring), 2):
if random.random() < cxpb:
offspring[i - 1], offspring[i] = toolbox.mate(offspring[i - 1],
offspring[i])
del offspring[i - 1].fitness.values, offspring[i].fitness.values
for i in range(len(offspring)):
if random.random() < mutpb:
offspring[i], = toolbox.mutate(offspring[i])
del offspring[i].fitness.values
return offspring
def eaSimple(population, toolbox, cxpb, mutpb, ngen, stats=None,
halloffame=None, verbose=__debug__):
logbook = tools.Logbook()
logbook.header = ['gen'] + (stats.fields if stats else [])
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in population if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
if halloffame is not None:
halloffame.update(population)
record = stats.compile(population) if stats else {}
logbook.record(gen=0, **record)
if verbose:
print (logbook.stream)
# Begin the generational process
for gen in range(1, ngen + 1):
# Select the next generation individuals
offspring = toolbox.select(population, len(population))
# Vary the pool of individuals
offspring = varAnd(offspring, toolbox, cxpb, mutpb)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Update the hall of fame with the generated individuals
if halloffame is not None:
halloffame.update(offspring)
# Replace the current population by the offspring
population[:] = offspring
# Append the current generation statistics to the logbook
record = stats.compile(population) if stats else {}
logbook.record(gen=gen, **record)
if verbose:
print (logbook.stream)
return population, logbook
|
'''Some utilities to manipulate strings.'''
import re
__all__ = ['straighten', 'text_filename']
def straighten(s, length, align_left=True, delimiter=' '):
'''Straighten a Unicode string to have a fixed length.
Parameters
----------
s : str
string to be trimmed
length : int
number of characters of the output string
align_left : bool
whether to align the string to the left (True) or to the right (False)
delimiter : char
the whitespace character to fill in if the length input string is shorter than the output length
Returns
-------
retval : str
straightened string
Examples
--------
>>> from mt.base.str import straighten
>>> straighten('Hello world', 7)
'Hello …'
>>> straighten('Hi there', 3, align_left=False)
'…re'
>>> straighten("Oh Carol!", 20, align_left=False)
' Oh Carol!'
'''
if not isinstance(s, str):
raise ValueError("Input is not a string: {}".format(s))
if length < 0:
raise ValueError("Output length must be non-negative, received {}".format(length))
if not isinstance(delimiter, str) or len(delimiter) != 1:
raise ValueError("Expecting delimiter to be a single character, but '{}' received.".format(delimiter))
in_len = len(s)
if in_len == length: # nothing to do
return s
# need to fill in delimiter
if in_len < length:
if align_left:
return s + delimiter*(length-in_len)
return delimiter*(length-in_len) + s
# need to truncate with '\u2026' (horizontal lower dots)
if length == 1:
return '\u2026' # single-char case
if align_left:
return s[:length-1] + '\u2026'
return '\u2026' + s[in_len-length+1:]
def _make_t2f():
prefixes = b'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_'
t2f = {b'_': b'__'}
#for x in prefixes:
#x2 = x.encode()
#t2f[x2] = x2
#t2f[b'_'] = b'__'
for i in range(256):
if i in prefixes:
continue
x = bytes((i,))
y = b'_' + hex(i)[2:].encode() + b'_'
t2f[x] = y
f2t = {v:k for k,v in t2f.items()}
t2f = {re.escape(k): v for k,v in t2f.items()}
f2t = {re.escape(k): v for k,v in f2t.items()}
t2f_pattern = re.compile(b"|".join(t2f.keys()))
f2t_pattern = re.compile(b"|".join(f2t.keys()))
return t2f, t2f_pattern, f2t, f2t_pattern
_t2f, _t2f_pattern, _f2t, _f2t_pattern = _make_t2f()
def text_filename(s, forward=True):
'''Converts a text to a filename or vice versa, replacing special characters with subtexts.
Parameters
----------
s : str or bytes
input text
forward : bool
whether text to filename (True) or filename to text (False)
Returns
-------
str or bytes
filename-friendly output text, same type as input
Raises
------
ValueError
if the text contains a character that cannot be converted
'''
if isinstance(s, bytes):
s1 = s
out_str = False
else:
s1 = s.encode()
out_str = True
if forward:
s2 = _t2f_pattern.sub(lambda m: _t2f[re.escape(m.group(0))], s1)
else:
s2 = _f2t_pattern.sub(lambda m: _f2t[re.escape(m.group(0))], s1)
return s2.decode() if out_str else s2
|
import ast
import builtins
import cinder
from compiler.readonly import (
readonly_compile,
ReadonlyCodeGenerator,
ReadonlyTypeBinder,
)
from compiler.static import StaticCodeGenerator
from contextlib import contextmanager
from typing import Any, List, NewType, Optional, Tuple
from ..test_static.common import StaticTestBase as TestBase, TestErrors
@contextmanager
def with_detection(detection_func):
old_handler = cinder.get_immutable_warn_handler()
cinder.set_immutable_warn_handler(detection_func)
yield
cinder.set_immutable_warn_handler(old_handler)
return
Readonly = NewType("Readonly", object)
class ReadonlyTestBase(TestBase):
def setUp(self):
cinder.flush_immutable_warnings() # make sure no existing warnings interfere with tests
def tearDown(self):
cinder.flush_immutable_warnings()
def compile(
self,
code,
generator=ReadonlyCodeGenerator,
modname="<module>",
optimize=0,
peephole_enabled=True,
ast_optimizer_enabled=True,
enable_patching=False,
):
if generator is ReadonlyCodeGenerator:
tree = ast.parse(self.clean_code(code))
return readonly_compile(modname, f"{modname}.py", tree, 0, optimize)
else:
return super().compile(
code,
generator,
modname,
optimize,
peephole_enabled,
ast_optimizer_enabled,
)
def compile_and_run(self, code, future_annotations=True):
# find out indent
index = 0
while code[index].isspace():
index += 1
if future_annotations:
code = code[:index] + "from __future__ import annotations\n" + code
errors = self.lint(code)
self.assertEqual(errors.errors, [])
compiled = self.compile(code)
builts = builtins.__dict__
builts["Readonly"] = Readonly
d = {"<builtins>": builts}
exec(compiled, d)
return d
def static_compile(
self,
code,
generator=StaticCodeGenerator,
modname="<module>",
optimize=0,
peephole_enabled=True,
ast_optimizer_enabled=True,
enable_patching=False,
):
return super().compile(
code,
generator,
modname,
optimize,
peephole_enabled,
ast_optimizer_enabled,
enable_patching,
)
def lint(self, code: str) -> TestErrors:
code = self.clean_code(code)
tree = ast.parse(code)
s = ReadonlyCodeGenerator._SymbolVisitor()
s.visit(tree)
type_binder = ReadonlyTypeBinder(tree, "<module>.py", s)
type_binder.get_types()
return TestErrors(self, code, type_binder.error_sink.errors)
def static_lint(self, code: str) -> TestErrors:
return super().lint(code)
def get_detection_func(self, errors):
def detection_func(arg):
errors.extend(arg)
return detection_func
@contextmanager
def assertNoImmutableErrors(self):
errors = []
with with_detection(self.get_detection_func(errors)):
yield
cinder.flush_immutable_warnings()
msg = (
""
if len(errors) == 0
else f"expected no errors but see error {errors[0][1]}"
)
self.assertFalse(errors, msg)
@contextmanager
def assertImmutableErrors(
self, expected_errors: List[Tuple[int, str, Optional[object]]]
):
errors = []
with with_detection(self.get_detection_func(errors)):
yield
cinder.flush_immutable_warnings()
self.assertTrue(len(errors) > 0, "expected errors but no errors found")
self.assertEqual(errors, expected_errors)
|
<reponame>MarquesThiago/sumarize-text<gh_stars>0
import re
import string
import nltk
import pandas as pd
import numpy as np
from common import (simple_clear, stop_word, wiegth_sentency, normalize_text, init)
def check_index(phrase, word):
'''
check index in phrase and return or index of words in a list
:param list phrase:
:param str word
return: int phrase.index(word)| None
'''
try:
return phrase.index(word)
except:
pass
def sepeared_words_sent(sentences):
'''
separeds in list of words on list of sentences and
return list of words in sentences per sentences
:param sentences: list
return list [nltk.word_tokenize(sentence.lower()) for sentence in sentences ];'''
return [nltk.word_tokenize(sentence.lower()) for sentence in sentences ]
def sepeared_words_sent(sentences):
'''
separeds in list of words on list of sentences and
return list of words in sentences per sentences
:param sentences: list
return list [nltk.word_tokenize(sentence.lower()) for sentence in sentences ];'''
return [nltk.word_tokenize(sentence.lower()) for sentence in sentences ]
def define_index(sentences, import_words):
'''
Check in list if exists importants words and
return list of index per sentences
:param sentences: list
:param import_words: list
:return list index:
'''
index = []
sentences_sep_words = sepeared_words_sent(sentences)
for sentence in sentences_sep_words:
check = [check_index(sentence, word) for word in import_words]
index_ = [index for index in check if not index == None]
index.append(sorted(index_))
return index
def define_group(index, distance):
'''
create subgroups in the matrix of indexes, considering the distance between index.
:param index : list -> matrix of the index
:param distance: int -> parameter distance between index
:return list listed:
'''
listed = []
for group in index:
if len(group) == 0:
listed.append([])
continue
list_group = []
sub_group = [group[0]]
num = 1
while num < len(group):
if group[num] - group[num-1] < distance:
sub_group.append(group[num])
else:
list_group.append(sub_group[:])
sub_group = [group[num]]
num += 1
list_group.append(sub_group)
listed.append(list_group)
return listed
def calc(listed):
'''
applied calculation grade for matrix of the index and return a list of the grades
:param listed:list matrix with subgroup of the index
:return list grades:
'''
grades = []
for index in range(0,len(listed)):
if len(listed[index]) == 0:
grades.append((index, 0))
continue
minimum = 0
for n_group in listed[index]:
import_word = len(n_group)
total_words = n_group[-1] - n_group[0] + 1
grade = 1.0 * import_word**2 / total_words
if minimum < grade:
minimum = grade
grades.append((index, minimum))
return grades
def calc_note(sentences, import_words, distance):
'''
calculate grade per sentences, from the algoritm of the lunh
:param sentences list
:param import_words: list -> important words in text
:param distance: int -> distance to created groups used in calculate of grade
:return calc(ocorrency) list '''
index = define_index(sentences, import_words)
ocorrency = define_group(index, distance)
return calc(ocorrency)
def variables(text, rank , lemma = False):
'''
create aariables for use in calculate of the lunh, starting by text and number of ranking
created variables in function
tokens: separed text in sentences, setence_clear: applied cleaning in sentences (remove break line,stop words and others),
all_words list of the all words in text (aren't stop words) and rank: n words, most used in text
:param text: str
:param rank: int -> number to return of words most commons in text
:return dict {"tokens": tokens, "sentence_clear": clear_sentency, "all_words": words, "rank": rank_words}'''
tokens= nltk.sent_tokenize(text.lower(), language = "Portuguese")
clear_sentency = [" ".join(normalize_text(sentency, lemma)) for sentency in tokens]
words = nltk.word_tokenize(" ".join(clear_sentency), language = "Portuguese")
frenquency = nltk.FreqDist(words)
rank_words = [target[0] for target in frenquency.most_common(rank)]
return {"tokens": tokens, "sentence_clear": clear_sentency, "all_words": words, "rank": rank_words}
def sumarize(tokens, notes, limit, order = False):
'''
applied notes to the sentences and return more high notes
:param tokens: list -> list of the sentences
:param notes: list -> list of the grede per sentences
:param limit: number -> limit of the sentences returned
:param order: bool -> important order of the text
:return str text:
'''
rankeament = sorted(notes, key = lambda x: x[1], reverse = True)
if order:
rankeament = sorted(rankeament[:limit], key = lambda x: x[0], reverse = False)
text = [tokens[target[0]] for target in rankeament[:limit]]
return text
def sumarize_by_luhn(text, n = 5, distanc = 3, order = False, lemma = False):
var = variables(text, n, lemma)
note = calc_note(var["sentence_clear"], var["rank"], distanc )
return "".join(sumarize(var["tokens"], note, n, order ))
|
<reponame>dangerousbeak/tpc
from game import Zone, State, Exit
from random import randrange
from buttons import Button
ATTRACT ="ATTRACT"
PRESTAGE = "PRESTAGE"
WAITING_FOR_STAGE = "WAITING_FOR_STAGE"
STAGE = "STAGE"
BLINK = "BLINK"
FAULT = "FAULT"
GO = "GO"
RUNNING = "RUNNING"
WAITING_TO_CROSS = "WAITING TO CROSS"
GAVE_UP = "GAVE UP"
FINISHED = "FINISHED"
END_OF_RACE = "END OF RACE"
class Racing(Zone):
def enter(self):
self.shut_up_until = 0
return State(ATTRACT)
def exit(self):
self.game.sounds.stop_background()
self.game.clock.reset()
def enter_state(self, state):
g = self.game
sub_state = state.sub_state
if state == ATTRACT:
if state.sub_state == 0:
g.clock.reset()
g.lights.turn_off_all()
g.sounds.play_background("0 - race-track-sounds.mp3", loop=True)
self.random_time = self.random_sound_time(state)
if not sub_state % 2:
g.lights.turn_on(7) #turn on Button light
g.outlets.turn_on_all()
else:
g.lights.turn_off(7) #turn off Button light
g.outlets.turn_off_all()
return State(ATTRACT, sub_state+1, delay=1)
if state == PRESTAGE:
g.outlets.turn_on_all() #turn ON outlet lights in case they ended in off state
g.sounds.play_random([
"racers at prestage-1",
"racers at prestage-2",
"racers at prestage-3",
"racers at prestage-5",
])
g.sounds.play_random([
"1 - arriving",
"1 - arriving-2",
"1 - arriving-3",
"1 - arriving-4",
"1 - arriving-5",
])
g.lights.turn_on_only(0)
return State(WAITING_FOR_STAGE, delay=3)
if state == WAITING_FOR_STAGE:
if sub_state == 3:
return State(ATTRACT, delay=30)
if sub_state > 2:
g.sounds.play_random([
"what are you doing-1",
"what are you doing-2",
"what are you doing-4",
"what are you doing-7",
"don't just stand there I know youre there-1",
"don't just stand there I know youre there-2",
])
return State(WAITING_FOR_STAGE, sub_state+1, delay=10)
if state == STAGE:
g.lights.turn_off(6) #turn off Red Light (if prior fault)
g.sounds.play_random([
"racers ready-1",
"racers ready-2",
"racers ready-3",
"racers ready-4",
])
g.lights.turn_on(1)
return State(BLINK, delay=2)
if state == BLINK:
self.game.sounds.stop_background()
if sub_state % 15 == 0:
g.sounds.play_random([
"revving",
"revving 1",
"revving 2",
"revving 3",
"revving 4",
"revving 5",
])
if not sub_state % 2:
g.lights.turn_on(7) #turn on Button light
g.outlets.turn_on_all()
else:
g.lights.turn_off(7) #turn off Button light
g.outlets.turn_off_all()
# Eventually, time out
if sub_state % 20 == 0:
g.sounds.play_random([
"press yellow button to start race-1",
"press yellow button to start race-4",
"press yellow button to start race-5",
])
# Eventually, time out
if sub_state == 50:
g.sounds.play_random([
"cant you follow instructions-1",
"cant you follow instructions-3",
"cant you follow instructions-4",
"cant you follow instructions-5",
])
return State(GAVE_UP)
return State(BLINK, sub_state+1, delay=0.5) #delay is between button blinks
if state == FAULT:
g.lights.turn_on(6) #turn on Red Light
g.sounds.play_random([
"racer disqualified-1",
"racer disqualified-2",
"racer disqualified-3",
"racer disqualified-4",
"racer disqualified-5",
])
return State(STAGE, delay=5)
if state == GO:
g.sounds.play("short beep")
g.lights.turn_on(2 + sub_state)
if sub_state == 0:
g.outlets.turn_on_all() #turn ON outlet lights in case they ended in off state
if sub_state == 2:
return State(WAITING_TO_CROSS, delay=1)
return State(GO, sub_state+1, delay=1)
if state == WAITING_TO_CROSS:
if sub_state == 0:
g.outlets.turn_on_all() #turn ON outlet lights in case they ended in off state
g.lights.turn_on(5) #turn on Green Light
g.sounds.play("long beep")
g.clock.start()
g.sounds.play_background("0 - race-track-sounds.mp3", loop=True)
g.sounds.play_random([
"3 - pulling away",
"3 - pulling away-2",
"3 - pulling away-3",
])
if sub_state > 200:
return State(GAVE_UP)
if sub_state % 2: #Alternate flashing rope lights until they cross - 200*.05 = 10 seconds
g.outlets.turn_on(0)
g.outlets.turn_off(1)
else:
g.outlets.turn_on(1)
g.outlets.turn_off(0)
return State(WAITING_TO_CROSS, sub_state+1, delay=0.05)
if state == GAVE_UP:
g.lights.turn_off(5) #turn off Green Light
g.lights.turn_on(6) #turn on Red Light
g.clock.stop()
g.sounds.play("racer disqualified-4")
return State(ATTRACT, delay=5)
if state == RUNNING:
if not sub_state:
return State(RUNNING, 1, delay=5)
return State(GAVE_UP, delay=180)
if state == FINISHED: #Do a quick alternating flash of outlets
if sub_state == 0:
g.clock.stop()
if not sub_state % 2:
g.outlets.turn_on(0)
g.outlets.turn_off(1)
else:
g.outlets.turn_on(1)
g.outlets.turn_off(0)
# Finish alternating lights
if sub_state == 50:
g.outlets.turn_on_all() #turn ON outlet lights in case they ended in off state
return State(END_OF_RACE)
return State(FINISHED, sub_state+1, delay=0.05)
if state == END_OF_RACE:
g.sounds.play_random([
"1 - arriving",
"1 - arriving-2",
"1 - arriving-3",
"1 - arriving-4",
"1 - arriving-5",
])
#stop the clock
return State(ATTRACT, delay=15)
raise ValueError("Unknown state {}".format(state))
def exit_state(self, state):
g = self.game
if state == BLINK:
g.outlets.turn_on_all()
return
if state == GO:
g.lights.turn_off_all()
return
pass
def idle(self, state):
g = self.game
sub_state = state.sub_state
# This is just an example to change mode, but better to do in game.py
#if g.buttons.blue:
# return Exit("quiet")
# Multi button check: inner parens are important
#if g.buttons.check( (Button.BLUE, Button.BLACK) ):
# g.sounds.play("crash")
# return State(ATTRACT)
if g.buttons.red:
if state.state in (GO, WAITING_TO_CROSS, GAVE_UP, RUNNING, END_OF_RACE):
g.sounds.play_random([
"4 - trouble starting",
"4 - trouble starting-2",
"4 - trouble starting-3",
"racer down-1",
"racer down please use caution-1",
"racer down please use caution-2",
"racer down please use caution-3",
"racer down please use caution-4",
"racer down please use caution-5",
"racer down please use caution-6",
"racer down please use caution-7",
])
if g.buttons.yellow:
if state.state in (RUNNING):
g.sounds.play_random([
"all racers must enter pit area-1",
"all racers must enter pit area-2",
"all racers must enter pit area-3",
"please enter pit area-1",
"please enter pit area-2",
"please enter pit area-3",
"please enter pit area-4",
"please enter pit area-5",
"please enter pit area-6",
"please enter pit area-7",
"please enter pit area-8",
"please enter pit area-9",
"please enter pit area-all racers-4",
"please enter pit area-all racers-5",
])
if g.buttons.green:
if state.state in (RUNNING):
g.sounds.play_random([
"2 - fixing-1",
"2 - fixing-2",
"2 - fixing-3",
"2 - fixing-4",
"2 - fixing-5",
"2 - fixing-6",
"2 - fixing-7",
"2 - fixing-8",
"2 - fixing-9",
"2 - fixing-10",
])
if g.buttons.blue:
if state.state in (GO, WAITING_TO_CROSS, GAVE_UP, RUNNING, END_OF_RACE):
g.sounds.play_random([
"cheering-1",
"cheering-2",
"cheering-3",
"cheering-4",
])
if g.buttons.black:
if state.state in (GO, WAITING_TO_CROSS, GAVE_UP, RUNNING, END_OF_RACE):
g.sounds.play_random([
"booing-1",
"booing-2",
"booing-3",
])
if state == ATTRACT:
if state.timer > self.random_time:
g.sounds.play_random([
"hey stoner press this button-1",
"hey stoner press this button-2",
"hey stoner press this button-3",
"hey stoner press this button-4",
"hey stoner press this button-5",
])
self.random_time = self.random_sound_time(state)
if g.buttons.red or g.buttons.yellow or g.buttons.green or g.buttons.blue or g.buttons.black:
g.sounds.play_random([
"don't touch that-1",
"don't touch that-2",
"don't touch that-3",
"don't touch that-5",
"don't touch that-6",
"don't touch that-7",
])
if g.optos.outer: #see if someone is nearby in attract mode
if state.timer > self.shut_up_until:
g.sounds.play_random([
"don't just stand there I know youre there-1",
"don't just stand there I know youre there-2",
"hey stoner press this button-1",
"hey stoner press this button-2",
"hey stoner press this button-3",
])
self.shut_up_until = state.timer + randrange(45, 150) # seconds between talking
if g.buttons.big:
return State(PRESTAGE)
return
if state == WAITING_FOR_STAGE:
if g.optos.inner: #make sure someone is one the track
return State(STAGE) #go from pre-stage to stage
return
if state == BLINK:
if g.buttons.big:
return State(GO)
return
if state == GO:
if g.optos.beam:
return State(FAULT)
return
if state == WAITING_TO_CROSS:
if g.optos.beam:
g.sounds.play("beep tone")
g.outlets.turn_on_all() #turn ON outlet lights in case they ended in off state
return State(RUNNING)
return
if state == RUNNING:
if sub_state == 1 and g.optos.beam:
g.sounds.play("beep tone")
g.sounds.play_random([
"cheering-1",
"cheering-2",
"cheering-3",
"cheering-4",
])
return State(FINISHED)
return
def random_sound_time(self, state):
if state == ATTRACT:
return state.timer + randrange(300, 1000) #seconds. Used to play a random sound when nothing happening
return state.timer + 10
|
<filename>pisti.py
import random
def desteYap():
renkL = ["♥️","♦️","♣️","♠️"]
renk = [i for i in range(2,11)]
renk.insert(0, "A")
renk.append("J")
renk.append("Q")
renk.append("K")
deste = []
for i in renkL:
for j in renk:
deste.append(str(j)+i)
random.shuffle(deste) #Karıştırdı
return deste
def kartVer(deste):
verilecek = random.sample(deste,4) #Random 4 kart verir.
for i in verilecek:
if i in deste:
deste.remove(i) #Verilen kartları desteden çıkarır.
return verilecek #Verilen kart listesini return'ler
def at(el):
index = input('Atılcak kart:')
while( (not index.isdigit()) or (int(index) < 1 or int(index) > len(el))): #elden fazla sayı ve karakter girmesin
index = input('Lütfen geçerli sayı girin')
# index=1
index = int(index)-1
#-1 index 0'dan başladığından
return el.pop(index) #Atılacak kartı elden çıkartır ve return'ler
def bastır(liste):
for i in liste:
print(i,end='')
def tur(el,yer,Al,isim):
print("Yer:\t\t\t\t", yer)
print(isim," Oyuncusunun")
print("\t\tAldığı kartlar:\t\t", Al)
print("\t\tElindeki :\t\t\t", el)
# print("\t\t\t\t\t\t\t",[str(i)+"." for i in range(1,len(el)+1)]) #Kart indexlerini bastırır.
check = at(el) #Atılacak kartı ister
print("\t\tAtılan kart: ",check)
yer.append(check)
if (check[0] == yer[len(yer)-2][0]) or check[0]=="J": #Eğer yerdeki son kağıt atılana eşitse
Al.extend(yer)
print("\t\tAldığı kartlar: ", [i for i in Al])
yer = yer.clear()
yer = []
def puanHesap(deste):
puan = 0
kartSayisi = 0
for i in deste:
if(i[0] == '2'):
if(i[1]== '♣'):
puan +=2
elif(i[:2] == '10'):
if(i[2] == '♦'):
puan +=3
elif(i[0] == "A"):
puan += 1
elif(i[0] == "J"):
puan += 1
for i in deste:
if i != ' ':
kartSayisi += 1
return(kartSayisi,puan)
oyuncu1="Mehmet"
oyuncu2="Emrullah"
oyuncu1=input('İsminizi girin:')
oyuncu2=input('İsminizi girin:')
o1Al=[]
o2Al=[]
deste = desteYap()
yer = kartVer(deste)
while(len(deste) != 0):
print("Kalan kart sayısı: ", len(deste))
D1 = kartVer(deste)
D2 = kartVer(deste)
while(len(D1) > 0 or len(D2) > 0):
tur(D1,yer,o1Al,oyuncu1)
tur(D2,yer,o2Al,oyuncu2)
print("\n\n\n")
print(puanHesap(o1Al))
print(puanHesap(o2Al))
|
"""
Command-line interface for interacting with Luigi scheduler.
"""
import json
import requests
import datetime
import click
from fnmatch import fnmatch
import sys
from os.path import join
from collections import Counter
from babel.numbers import format_number
class TooManyTasksError(Exception):
def __init__(self, num_tasks):
self.num_tasks = num_tasks
def __str__(self):
return 'That request would return {} tasks; try filtering by status, glob query or set the --no-limit flag.'.format(format_number(self.num_tasks))
def rpc(scheduler_url, method, **kwargs):
url = join(scheduler_url, 'api', method)
payload = {'data': json.dumps(kwargs)}
res = requests.get(url, params=payload if kwargs else None)
res.raise_for_status()
response_data = res.json()['response']
if 'num_tasks' in response_data:
raise TooManyTasksError(response_data['num_tasks'])
return response_data
def task_sort_key(task):
"""Produce a key to sort tasks by relevance."""
return datetime.datetime.now() - (task['time_running'] if task['status'] == 'RUNNING' else task['last_updated'])
def task_matches(task, task_glob):
"""Match a task against a glob pattern."""
return task_glob is None or fnmatch(task['name'], task_glob) or fnmatch(task['display_name'], task_glob)
class TaskFormatter(object):
"""Format a task for texutual display."""
@staticmethod
def format_status(status):
tg = {'DONE': 'green',
'PENDING': 'yellow',
'RUNNING': 'blue',
'FAILED': 'red',
'DISABLED': 'white',
'UNKNOWN': 'white'}
return click.style(status, fg=tg[status]) if status in tg else status
def format(self, task):
raise NotImplementedError
class InlineTaskFormatter(TaskFormatter):
"""Format a task for inline display."""
def __init__(self, task_id_width, status_width=17):
self.task_id_width = task_id_width
self.status_width = status_width
def format(self, task):
if task['status'] == 'RUNNING':
tr = (datetime.datetime.now() - task['time_running'])
else:
tr = task['last_updated']
return '{id:{id_width}}\t{status:{status_width}}\t{time}\n'.format(
id=click.style(task['id'], bold=True),
id_width=self.task_id_width,
status=self.format_status(task['status']),
status_width=self.status_width,
time=tr)
class DetailedTaskFormatter(TaskFormatter):
"""Format a task for detailed multi-line display."""
@staticmethod
def format_dl(dl):
key_fill = max(len(click.style(k)) for k in dl)
return '\n\t'.join('{:{key_fill}}\t{}'.format(click.style(k, bold=True) + ':', v, key_fill=key_fill) for k, v in dl.items())
def format(self, task):
return '''{id}
Status: \t{status}
Priority: \t{priority}
Name: \t{display_name}
Start time: \t{start_time}
Last updated:\t{last_updated}
Time running:\t{time_running}
Status message:\n\t{status_message}
Parameters:\n\t{params}
Resources:\n\t{resources}
Workers:\n\t{workers}\n\n'''.format(
id=click.style(task['id'], bold=True),
display_name=task['display_name'],
status=self.format_status(task['status']),
priority=task['priority'],
status_message=task['status_message'] if task['status_message'] else 'No status message were received for this task.',
start_time=task['start_time'],
last_updated=task['last_updated'],
time_running=(datetime.datetime.now() - task['time_running']) if task['status'] == 'RUNNING' else '',
params=self.format_dl(task['params']) if task['params'] else 'No parameters were set.',
resources=self.format_dl(task['resources']) if task['resources'] else 'No resources were requested for the execution of this task.',
workers='\n\t'.join(task['workers']) if task['workers'] else 'No workers are assigned to this task.')
class TasksSummaryFormatter(object):
def format(self, tasks):
count_by_status = Counter()
for task in tasks:
count_by_status[task['status']] += 1
return '\n'.join('{}\t{}'.format(TaskFormatter.format_status(k), v) for k, v in count_by_status.items())
def fix_tasks_dict(tasks):
for key, t in tasks.items():
t['id'] = key
t['start_time'] = t['start_time'] and datetime.datetime.fromtimestamp(t['start_time'])
t['time_running'] = t['time_running'] and datetime.datetime.fromtimestamp(t['time_running'])
t['last_updated'] = t['last_updated'] and datetime.datetime.fromtimestamp(t['last_updated'])
@click.group()
@click.option('--scheduler-url', default='http://localhost:8082/')
@click.pass_context
def main(ctx, scheduler_url):
ctx.obj = {'SCHEDULER_URL': scheduler_url}
@main.command()
@click.argument('task_glob', required=False)
@click.option('--status', multiple=True)
@click.option('--user', multiple=True)
@click.option('--summary', is_flag=True)
@click.option('--detailed', is_flag=True)
@click.option('--no-limit', is_flag=True)
@click.pass_context
def list(ctx, task_glob, status, user, summary, detailed, no_limit):
"""
List all tasks that match the given pattern and filters.
"""
scheduler_url = ctx.obj['SCHEDULER_URL']
search = task_glob.replace('*', '') if task_glob else None
limit = None if no_limit else 100000
tasks = {}
if status:
for s in status:
try:
tasks.update(rpc(scheduler_url, 'task_list', search=search, status=s, limit=limit))
except TooManyTasksError as e:
click.echo(e)
return
else:
try:
tasks.update(rpc(scheduler_url, 'task_list', search=search, limit=limit))
except TooManyTasksError as e:
click.echo(e)
return
fix_tasks_dict(tasks)
filtered_tasks = tasks.values()
# filter by user
if user:
filtered_tasks = [task for task in filtered_tasks
if any(u in worker for worker in task['workers'] for u in user)]
filtered_tasks = [task for task in filtered_tasks
if task_matches(task, task_glob)]
if not filtered_tasks:
click.echo('No task match the provided query.')
return
task_id_width = max(len(click.style(task['id'], bold=True)) for task in filtered_tasks)
if summary:
formatter = TasksSummaryFormatter()
click.echo(formatter.format(filtered_tasks))
else:
if detailed:
formatter = DetailedTaskFormatter()
else:
formatter = InlineTaskFormatter(task_id_width=task_id_width)
click.echo_via_pager(formatter.format(t) for t in sorted(filtered_tasks, key=task_sort_key))
@main.command()
def submit(*args):
"""
Schedule a given task for execution.
"""
luigi.cmdline.luigi_run(args)
@main.command()
@click.argument('task_id')
@click.pass_context
def show(ctx, task_id):
"""
Show the details of a specific task given its identifier.
TASK_ID Task identifier
"""
scheduler_url = ctx.obj['SCHEDULER_URL']
tasks = {}
for status, t in rpc(scheduler_url, 'task_search', task_str=task_id).items():
tasks.update(t)
fix_tasks_dict(tasks)
formatter = DetailedTaskFormatter()
try:
click.echo(formatter.format(tasks[task_id]))
except KeyError:
click.echo('No such task %s.' % task_id)
sys.exit(1)
@main.command()
@click.argument('task_id')
@click.option('--recursive', is_flag=True)
@click.pass_context
def reenable(ctx, task_id, recursive):
"""
Reenable a disabled task.
"""
scheduler_url = ctx.obj['SCHEDULER_URL']
toreenable = [task_id]
if recursive:
deps = rpc(scheduler_url, 'dep_graph', task_id=task_id)
toreenable.extend(k for k in deps if deps[k]['status'] == 'DISABLED')
for task_id in toreenable:
try:
rpc(scheduler_url, 're_enable_task', task_id=task_id)
click.echo('%s has been re-enabled.' % task_id)
except requests.exceptions.HTTPError as e:
click.echo('Failed to re-enable {}: {}'.format(task_id, e))
continue
@main.command()
@click.argument('task_id')
@click.option('--recursive', is_flag=True)
@click.pass_context
def forgive(ctx, task_id, recursive):
"""
Forgive a failed task.
"""
scheduler_url = ctx.obj['SCHEDULER_URL']
toforgive = []
if recursive:
deps = rpc(scheduler_url, 'dep_graph', task_id=task_id)
toforgive.extend(k for k in deps if deps[k]['status'] == 'FAILED')
for task_id in toforgive:
try:
rpc(scheduler_url, 'forgive_failures', task_id=task_id)
click.echo('%s has been forgiven.' % task_id)
except requests.exceptions.HTTPError as e:
click.echo('Failed to forgive {}: {}'.format(task_id, e))
continue
|
from skillmap.main import generate
from skillmap.nodes.common import SECTION_SEPARATOR
from skillmap.nodes.skillmap_node import create_skillmap_graph
from skillmap.nodes.group_node import create_group_subgraph
from skillmap.nodes.skill_node import create_skill_node
def test_generate():
skillmap_file = "tests/url_shortener.toml"
map_graph = generate(skillmap_file)
assert map_graph
assert "flowchart TD" in map_graph
assert "url_shortener" in map_graph
assert "url_shortener-->groups.backend" in map_graph
assert "class groups.webui" in map_graph
def test_skillmap_node_with_missing_name():
map_graph = create_skillmap_graph({"skillmap": {}})
assert map_graph
assert "flowchart TD" in map_graph
assert "unamed_skill_map" in map_graph
def test_skillmap_node_with_missing_theme():
map_graph = create_skillmap_graph(
{
"skillmap": {
"name": "url shortener",
"icon": "anchor",
"theme": "not_found",
}
}
)
assert map_graph
assert "flowchart TD" in map_graph
assert "url shortener" in map_graph
def test_skillmap_node_with_orientation():
map_graph = create_skillmap_graph(
{
"skillmap": {
"name": "url shortener",
"icon": "anchor",
"orientation": "LR",
}
}
)
assert map_graph
assert "flowchart LR" in map_graph
assert "url shortener" in map_graph
def test_skillmap_node_with_auto_required_groups():
map_graph = create_skillmap_graph(
{
"skillmap": {
"name": "url shortener",
},
"groups": {
"g1": {
"name": "g1",
},
"g2": {
"name": "g2",
"requires": ["g1"],
},
},
}
)
assert map_graph
assert "flowchart TD" in map_graph
assert "url_shortener-->groups.g1" in map_graph
assert "url_shortener-->groups.g2" not in map_graph
def test_visit_group_without_skill():
group_graph = create_group_subgraph(
"g1",
{
"name": "web ui",
"icon": "anchor",
},
)
sections = [
SECTION_SEPARATOR,
'subgraph groups.g1["fa:fa-anchor web ui"]',
"", # skill list is skipped
"end",
"class groups.g1 newSkillGroup;",
"",
]
assert group_graph.split("\n") == sections
def test_visit_group():
group_graph = create_group_subgraph(
"g1",
{
"name": "web ui",
"icon": "anchor",
"skills": {
"s1": {"name": "url validator", "icon": "globe"},
"s2": {"name": "React", "icon": "html5"},
},
},
)
sections = [
SECTION_SEPARATOR,
'subgraph groups.g1["fa:fa-anchor web ui"]',
'groups.g1.skills.s1("fa:fa-globe<br/>url validator")',
"class groups.g1.skills.s1 newSkill;",
"",
'groups.g1.skills.s2("fa:fa-html5<br/>React")',
"class groups.g1.skills.s2 newSkill;",
"",
"end",
"class groups.g1 newSkillGroup;",
"",
]
assert group_graph.split("\n") == sections
def test_visit_group_with_requires():
group_graph = create_group_subgraph(
"g1",
{
"name": "web ui",
"icon": "anchor",
"requires": ["groups.g2.skills.s1"],
},
)
sections = [
SECTION_SEPARATOR,
'subgraph groups.g1["fa:fa-anchor web ui"]',
"", # skill list is skipped
"end",
"class groups.g1 newSkillGroup;",
"groups.g2.skills.s1-->groups.g1",
]
assert group_graph.split("\n") == sections
|
# Aliases for commands. The keys of the given dictionary are the
# aliases, while the values are the commands they map to.
# Type: Dict
c.aliases = {'w': 'session-save', 'wq': 'quit --save', 'mpv': 'spawn -d mpv --force-window=immediate {url}', 'nicehash': 'spawn --userscript nicehash', 'pass': 'spawn -d pass -c'}
# Require a confirmation before quitting the application.
# Type: ConfirmQuit
# Valid values:
# - always: Always show a confirmation.
# - multiple-tabs: Show a confirmation if multiple tabs are opened.
# - downloads: Show a confirmation if downloads are running
# - never: Never show a confirmation.
c.confirm_quit = ['always']
# Always restore open sites when qutebrowser is reopened.
# Type: Bool
c.auto_save.session = True
# Value to send in the `Accept-Language` header. Note that the value
# read from JavaScript is always the global value.
# Type: String
c.content.headers.accept_language = 'en-US,en;q=0.8,fi;q=0.6'
c.content.headers.do_not_track = True
# User agent to send. Unset to send the default. Note that the value
# read from JavaScript is always the global value.
# Type: String
c.content.headers.user_agent = 'Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'
# List of domains that should always be loaded, despite being ad-
# blocked. Domains may contain * and ? wildcards and are otherwise
# required to exactly match the requested domain. Local domains are
# always exempt from hostblocking.
# Type: List of String
c.content.host_blocking.whitelist = ['*.flurry.com',
'*.firebase.google.com',
'*.google.com',
'*.googleapis.com',
'*.newrelic.com',
'*.googleadservices.com',
'*.gstatic.com',
'*.googletagmanager.com',
'*.githubapp.com',
'*.nr-data.net',
'*.qq.com',
'*.googlecode.com',
'*.google-analytics.com']
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'file://*')
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'chrome://*/*')
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'qute://*/*')
# Proxy to use. In addition to the listed values, you can use a
# `socks://...` or `http://...` URL.
# Type: Proxy
# Valid values:
# - system: Use the system wide proxy.
# - none: Don't use any proxy
c.content.proxy = 'none'
# Validate SSL handshakes.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
c.content.ssl_strict = True
# List of user stylesheet filenames to use.
# Type: List of File, or File
c.content.user_stylesheets = 'user.css'
# Height (in pixels or as percentage of the window) of the completion.
# Type: PercOrInt
c.completion.height = '25%'
# Move on to the next part when there's only one possible completion
# left.
# Type: Bool
c.completion.quick = False
# When to show the autocompletion window.
# Type: String
# Valid values:
# - always: Whenever a completion is available.
# - auto: Whenever a completion is requested.
# - never: Never.
c.completion.show = 'auto'
# Directory to save downloads to. If unset, a sensible OS-specific
# default is used.
# Type: Directory
c.downloads.location.directory = '~/Downloads'
# Prompt the user for the download location. If set to false,
# `downloads.location.directory` will be used.
# Type: Bool
c.downloads.location.prompt = False
# Editor (and arguments) to use for the `open-editor` command. The
# following placeholders are defined: * `{file}`: Filename of the file
# to be edited. * `{line}`: Line in which the caret is found in the
# text. * `{column}`: Column in which the caret is found in the text. *
# `{line0}`: Same as `{line}`, but starting from index 0. * `{column0}`:
# Same as `{column}`, but starting from index 0.
# Type: ShellCommand
c.editor.command = ["emacsclient", "-c", "{}"]
# When a hint can be automatically followed without pressing Enter.
# Type: String
# Valid values:
# - always: Auto-follow whenever there is only a single hint on a page.
# - unique-match: Auto-follow whenever there is a unique non-empty match in either the hint string (word mode) or filter (number mode).
# - full-match: Follow the hint when the user typed the whole hint (letter, word or number mode) or the element's text (only in number mode).
# - never: The user will always need to press Enter to follow a hint.
c.hints.auto_follow = 'always'
# Characters used for hint strings.
# Type: UniqueCharString
c.hints.chars = 'hutenosadi'
# Automatically enter insert mode if an editable element is focused
# after loading the page.
# Type: Bool
c.input.insert_mode.auto_load = True
# Leave insert mode if a non-editable element is clicked.
# Type: Bool
c.input.insert_mode.auto_leave = True
# Show a scrollbar.
# Type: Bool
# c.scrolling.bar = False
# Enable smooth scrolling for web pages. Note smooth scrolling does not
# work with the `:scroll-px` command.
# Type: Bool
c.scrolling.smooth = False
# Open new tabs (middleclick/ctrl+click) in the background.
# Type: Bool
c.tabs.background = True
# How to behave when the last tab is closed.
# Type: String
# Valid values:
# - ignore: Don't do anything.
# - blank: Load a blank page.
# - startpage: Load the start page.
# - default-page: Load the default page.
# - close: Close the window.
c.tabs.last_close = 'close'
# Padding (in pixels) around text for tabs.
# Type: Padding
c.tabs.padding = {'left': 5, 'right': 5, 'top': 2, 'bottom': 2}
# Position of the tab bar.
# Type: Position
# Valid values:
# - top
# - bottom
# - left
# - right
c.tabs.position = 'top'
# Which tab to select when the focused tab is removed.
# Type: SelectOnRemove
# Valid values:
# - prev: Select the tab which came before the closed one (left in horizontal, above in vertical).
# - next: Select the tab which came after the closed one (right in horizontal, below in vertical).
# - last-used: Select the previously selected tab.
c.tabs.select_on_remove = 'prev'
# Page to open if :open -t/-b/-w is used without URL. Use `about:blank`
# for a blank page.
# Type: FuzzyUrl
c.url.default_page = 'about:blank'
# Search engines which can be used via the address bar. Maps a search
# engine name (such as `DEFAULT`, or `ddg`) to a URL with a `{}`
# placeholder. The placeholder will be replaced by the search term, use
# `{{` and `}}` for literal `{`/`}` signs. The search engine named
# `DEFAULT` is used when `url.auto_search` is turned on and something
# else than a URL was entered to be opened. Other search engines can be
# used by prepending the search engine name to the search term, e.g.
# `:open google qutebrowser`.
# Type: Dict
c.url.searchengines = {'DEFAULT': 'https://www.google.com/search?q={}',
'gh': 'https://github.com/search?q={}',
'so': 'https://stackoverflow.com/search?q={}',
'bd': 'https://www.baidu.com/s?wd={}'}
# Page(s) to open at the start.
# Type: List of FuzzyUrl, or FuzzyUrl
c.url.start_pages = 'https://github.com/'
# Hide the window decoration. This setting requires a restart on
# Wayland.
# Type: Bool
c.window.hide_decoration = False
# Format to use for the window title. The same placeholders like for
# `tabs.title.format` are defined.
# Type: FormatString
c.window.title_format = '{private}{perc}{current_title}{title_sep}qutebrowser'
# Font used in the completion widget.
# Type: Font
c.fonts.completion.entry = "16pt 'Fira code'"
# Font used in the completion categories.
# Type: Font
c.fonts.completion.category = "bold 16pt 'Fira code'"
# Font used for the debugging console.
# Type: QtFont
c.fonts.debug_console = "16pt 'Fira code'"
# Font used for the downloadbar.
# Type: Font
c.fonts.downloads = "16pt 'Fira code'"
# Font used for the hints.
# Type: Font
c.fonts.hints = "bold 16pt 'DejaVu Sans Mono'"
# Font used in the keyhint widget.
# Type: Font
c.fonts.keyhint = "16pt 'Fira code'"
# Font used for error messages.
# Type: Font
c.fonts.messages.error = "16pt 'Fira code'"
# Font used for info messages.
# Type: Font
c.fonts.messages.info = "16pt 'Fira code'"
# Font used for warning messages.
# Type: Font
c.fonts.messages.warning = "16pt 'Fira code'"
# Font used for prompts.
# Type: Font
c.fonts.prompts = "16pt 'Fira code'"
# Font used in the statusbar.
# Type: Font
c.fonts.statusbar = "16pt 'Fira code'"
# Font used in the tab bar.
# Type: QtFont
c.fonts.tabs = "16pt 'Fira code'"
c.fonts.default_family= "16pt 'Fira code'"
# Bindings for command mode
config.bind('<Ctrl+n>', 'completion-item-focus next', mode='command')
config.bind('<Ctrl+p>', 'completion-item-focus prev', mode='command')
config.source('qutewal.py')
|
<reponame>jadecastro/imitation
"""Configuration settings for train_rl, training a policy with RL."""
import sacred
import highway_env
from imitation.scripts.common import common, rl, train
train_rl_ex = sacred.Experiment(
"train_rl",
ingredients=[common.common_ingredient, train.train_ingredient, rl.rl_ingredient],
)
@train_rl_ex.config
def train_rl_defaults():
total_timesteps = int(1e6) # Number of training timesteps in model.learn()
normalize = True # Use VecNormalize
normalize_kwargs = dict() # kwargs for `VecNormalize`
# If specified, overrides the ground-truth environment reward
reward_type = None # override reward type
reward_path = None # override reward path
rollout_save_final = True # If True, save after training is finished.
rollout_save_n_timesteps = None # Min timesteps saved per file, optional.
rollout_save_n_episodes = None # Num episodes saved per file, optional.
policy_save_interval = 10000 # Num timesteps between saves (<=0 disables)
policy_save_final = True # If True, save after training is finished.
@train_rl_ex.config
def default_end_cond(rollout_save_n_timesteps, rollout_save_n_episodes):
# Only set default if both end cond options are None.
# This way the Sacred CLI caller can set `rollout_save_n_episodes` only
# without getting an error that `rollout_save_n_timesteps is not None`.
if rollout_save_n_timesteps is None and rollout_save_n_episodes is None:
rollout_save_n_timesteps = 2000 # Min timesteps saved per file, optional.
# Standard Gym env configs
@train_rl_ex.named_config
def highway_fast():
common = dict(env_name="highway-fast-v0")
@train_rl_ex.named_config
def acrobot():
common = dict(env_name="Acrobot-v1")
@train_rl_ex.named_config
def ant():
common = dict(env_name="Ant-v2")
rl = dict(batch_size=16384)
total_timesteps = int(5e6)
@train_rl_ex.named_config
def cartpole():
common = dict(env_name="CartPole-v1")
total_timesteps = int(1e5)
@train_rl_ex.named_config
def seals_cartpole():
common = dict(env_name="seals/CartPole-v0")
total_timesteps = int(1e6)
@train_rl_ex.named_config
def half_cheetah():
common = dict(env_name="HalfCheetah-v2")
total_timesteps = int(5e6) # does OK after 1e6, but continues improving
@train_rl_ex.named_config
def seals_hopper():
common = dict(env_name="seals/Hopper-v0")
@train_rl_ex.named_config
def seals_humanoid():
common = dict(env_name="seals/Humanoid-v0")
rl = dict(batch_size=16384)
total_timesteps = int(10e6) # fairly discontinuous, needs at least 5e6
@train_rl_ex.named_config
def mountain_car():
common = dict(env_name="MountainCar-v0")
@train_rl_ex.named_config
def seals_mountain_car():
common = dict(env_name="seals/MountainCar-v0")
@train_rl_ex.named_config
def pendulum():
common = dict(env_name="Pendulum-v0")
@train_rl_ex.named_config
def reacher():
common = dict(env_name="Reacher-v2")
@train_rl_ex.named_config
def seals_ant():
common = dict(env_name="seals/Ant-v0")
@train_rl_ex.named_config
def seals_swimmer():
common = dict(env_name="seals/Swimmer-v0")
@train_rl_ex.named_config
def seals_walker():
common = dict(env_name="seals/Walker2d-v0")
# Debug configs
@train_rl_ex.named_config
def fast():
"""Intended for testing purposes: small # of updates, ends quickly."""
total_timesteps = int(4)
policy_save_interval = 2
|
<reponame>xmings/IdeaNote
#!/bin/python
# -*- coding: utf-8 -*-
# @File : service.py
# @Author: wangms
# @Date : 2019/8/3
import zlib, requests
from core.model import Catalog, Image, db
from datetime import datetime
from app import app
from flask import session
from sqlalchemy.sql import functions
from common import SyncStatusEnum, NoteStatusEnum, PasswordStatusEnum, status_text_mapping
class NoteService(object):
catalog_root_id = 0
@classmethod
def create_note(self, title, parent_id, content, **kwargs):
content = zlib.compress(content.encode("utf8"))
last_child = Catalog.query.filter(Catalog.parent_id == parent_id).order_by(Catalog.seq_no.asc()).first()
seq_no = 1 if not last_child or not last_child.seq_no else last_child.seq_no + 1
note = Catalog(title=title, parent_id=parent_id, content=content, seq_no=seq_no, **kwargs)
note.status = NoteStatusEnum.create.value
note.sync_status = SyncStatusEnum.need_sync.value
db.session.add(note)
db.session.commit()
return note.id
@classmethod
def update_note_title(cls, note_id, title):
note = Catalog.query.filter_by(id=note_id).first()
note.title = title
note.sync_status = SyncStatusEnum.need_sync.value
note.modification_time = datetime.now()
note.status = NoteStatusEnum.update_title.value
db.session.commit()
return True
@classmethod
def update_note_position(cls, note_id, parent_id, index):
note = Catalog.query.filter_by(id=note_id).first()
node_seq = 0
for child in Catalog.query.filter(
Catalog.parent_id == parent_id,
Catalog.status != NoteStatusEnum.delete.value,
Catalog.id != note.id
).order_by(Catalog.seq_no.asc()).all():
if node_seq == index:
# 为note留出该位置
node_seq += 1
if child.seq_no != node_seq:
child.seq_no = node_seq
child.sync_status = SyncStatusEnum.need_sync.value
child.modification_time = datetime.now()
child.status = NoteStatusEnum.update_position.value
node_seq += 1
# 最后插入该位置
note.parent_id = parent_id
note.seq_no = index
note.sync_status = SyncStatusEnum.need_sync.value
note.modification_time = datetime.now()
note.status = NoteStatusEnum.update_position.value
db.session.commit()
return True
@classmethod
def update_note_content(cls, note_id, content):
note = Catalog.query.filter_by(id=note_id).first()
if zlib.decompress(note.content).decode("utf8") == content:
return True
note.content = zlib.compress(content.encode("utf8"))
note.sync_status = SyncStatusEnum.need_sync.value
note.modification_time = datetime.now()
note.status = NoteStatusEnum.update_content.value
db.session.commit()
return True
@classmethod
def update_note_lock_status(cls, note_id, toggle=True, lock=True):
note = Catalog.query.filter_by(id=note_id).first()
if toggle:
note.with_passwd = PasswordStatusEnum.has_password.value \
if note.with_passwd == PasswordStatusEnum.no_password.value else PasswordStatusEnum.no_password.value
else:
note.with_passwd = PasswordStatusEnum.has_password.value if lock else PasswordStatusEnum.no_password.value
note.sync_status = SyncStatusEnum.need_sync.value
note.modification_time = datetime.now()
note.status = NoteStatusEnum.update_lock.value
db.session.commit()
return True
@classmethod
def delete_note(cls, id):
note = Catalog.query.filter_by(id=id).first()
delete_notes = [note]
while delete_notes:
note = delete_notes.pop(0)
children = Catalog.query.filter_by(parent_id=note.id).all()
delete_notes += list(children)
note.sync_status = SyncStatusEnum.need_sync.value
note.status = NoteStatusEnum.delete.value
note.modification_time = datetime.now()
db.session.commit()
return True
@classmethod
def fetch_catalog_tree(cls):
root = Catalog.query.filter(Catalog.status != NoteStatusEnum.delete.value, Catalog.parent_id == "self").first()
notes = Catalog.query.filter(Catalog.status != NoteStatusEnum.delete.value).order_by(Catalog.parent_id,
Catalog.seq_no).all()
notes_dict = {}
for n in notes:
notes_dict[n.id] = {
"id": n.id,
"text": n.title,
"type": "file",
"parent_id": n.parent_id
}
for id, v in notes_dict.items():
pid = v.get("parent_id")
if pid and notes_dict.get(pid):
if not notes_dict[pid].get("children"):
notes_dict[pid]["children"] = []
notes_dict[pid]["children"].append(v)
notes_dict[pid]["type"] = "folder"
cls.catalog_root_id = root.id
root = notes_dict[root.id]
root["opened"] = True
return root
@classmethod
def need_auth_code(cls, id):
note = Catalog.query.filter_by(id=id).first()
with app.app_context():
if note.with_passwd == 0:
return False
else:
if f"auth-{id}" in session:
return False
return True
@classmethod
def fetch_note(cls, id):
note = Catalog.query.filter_by(id=id).first()
content = zlib.decompress(note.content).decode("utf8")
images = Image.query.filter(Image.note_id == id, Image.status != NoteStatusEnum.delete.value).all()
for img in images:
if img.id not in content:
img.status = NoteStatusEnum.delete.value
img.modification_time = datetime.now()
db.session.commit()
return content
@classmethod
def fetch_recently_change_note(cls):
notes = Catalog.query.order_by(
functions.coalesce(Catalog.modification_time, Catalog.creation_time).desc()).limit(30).all()
notes_list = []
for n in notes:
notes_list.append({
"id": n.id,
"title": n.title,
"status": status_text_mapping[n.status],
"sync_status": "已同步" if n.sync_status == SyncStatusEnum.has_sync.value else "未同步",
"change_time": str(n.modification_time if n.modification_time else n.creation_time)
})
return notes_list
@classmethod
def create_image(cls, note_id, image, image_id=None):
image_content = zlib.compress(image.read())
image = Image(id=image_id, note_id=note_id, image=image_content, mime_type=image.mimetype)
image.status = NoteStatusEnum.create
db.session.add(image)
db.session.commit()
return image.id
@classmethod
def fetch_image(cls, image_id):
img = Image.query.filter_by(id=image_id).first()
return zlib.decompress(img.image), img.mime_type
@classmethod
def fetch_images(cls, note_id):
imgs = Image.query.filter_by(note_id=note_id).all()
return ((zlib.decompress(i.image), i.mime_type) for i in imgs)
@classmethod
def sync_note(cls, note_id, with_children=0):
note = Catalog.query.filter_by(id=note_id).first()
sync_notes = [note]
while sync_notes:
note = sync_notes.pop(0)
if with_children == 1:
children = Catalog.query.filter(Catalog.parent_id == note.id,
Catalog.status != NoteStatusEnum.delete.value).all()
sync_notes += list(children)
note.sync_status = SyncStatusEnum.need_sync.value
note.status = NoteStatusEnum.manual_sync.value
note.modification_time = datetime.now()
db.session.commit()
@classmethod
def translate_text(cls, text):
r = requests.post("http://fy.iciba.com/ajax.php?a=fy", data={"f": "auto", "t": "auto", "w": text})
return r.json()
|
from plotly.basedatatypes import BaseTraceType
import copy
class Scatterternary(BaseTraceType):
# a
# -
@property
def a(self):
"""
Sets the quantity of component `a` in each data point. If `a`,
`b`, and `c` are all provided, they need not be normalized,
only the relative values matter. If only two arrays are
provided they must be normalized to match `ternary<i>.sum`.
The 'a' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['a']
@a.setter
def a(self, val):
self['a'] = val
# asrc
# ----
@property
def asrc(self):
"""
Sets the source reference on plot.ly for a .
The 'asrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['asrc']
@asrc.setter
def asrc(self, val):
self['asrc'] = val
# b
# -
@property
def b(self):
"""
Sets the quantity of component `a` in each data point. If `a`,
`b`, and `c` are all provided, they need not be normalized,
only the relative values matter. If only two arrays are
provided they must be normalized to match `ternary<i>.sum`.
The 'b' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['b']
@b.setter
def b(self, val):
self['b'] = val
# bsrc
# ----
@property
def bsrc(self):
"""
Sets the source reference on plot.ly for b .
The 'bsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['bsrc']
@bsrc.setter
def bsrc(self, val):
self['bsrc'] = val
# c
# -
@property
def c(self):
"""
Sets the quantity of component `a` in each data point. If `a`,
`b`, and `c` are all provided, they need not be normalized,
only the relative values matter. If only two arrays are
provided they must be normalized to match `ternary<i>.sum`.
The 'c' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['c']
@c.setter
def c(self, val):
self['c'] = val
# cliponaxis
# ----------
@property
def cliponaxis(self):
"""
Determines whether or not markers and text nodes are clipped
about the subplot axes. To show markers and text nodes above
axis lines and tick labels, make sure to set `xaxis.layer` and
`yaxis.layer` to *below traces*.
The 'cliponaxis' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['cliponaxis']
@cliponaxis.setter
def cliponaxis(self, val):
self['cliponaxis'] = val
# connectgaps
# -----------
@property
def connectgaps(self):
"""
Determines whether or not gaps (i.e. {nan} or missing values)
in the provided data arrays are connected.
The 'connectgaps' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['connectgaps']
@connectgaps.setter
def connectgaps(self, val):
self['connectgaps'] = val
# csrc
# ----
@property
def csrc(self):
"""
Sets the source reference on plot.ly for c .
The 'csrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['csrc']
@csrc.setter
def csrc(self, val):
self['csrc'] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['customdata']
@customdata.setter
def customdata(self, val):
self['customdata'] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on plot.ly for customdata .
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['customdatasrc']
@customdatasrc.setter
def customdatasrc(self, val):
self['customdatasrc'] = val
# fill
# ----
@property
def fill(self):
"""
Sets the area to fill with a solid color. Use with `fillcolor`
if not "none". scatterternary has a subset of the options
available to scatter. "toself" connects the endpoints of the
trace (or each segment of the trace if it has gaps) into a
closed shape. "tonext" fills the space between two traces if
one completely encloses the other (eg consecutive contour
lines), and behaves like "toself" if there is no trace before
it. "tonext" should not be used if one trace does not enclose
the other.
The 'fill' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'toself', 'tonext']
Returns
-------
Any
"""
return self['fill']
@fill.setter
def fill(self, val):
self['fill'] = val
# fillcolor
# ---------
@property
def fillcolor(self):
"""
Sets the fill color. Defaults to a half-transparent variant of
the line color, marker color, or marker line color, whichever
is available.
The 'fillcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['fillcolor']
@fillcolor.setter
def fillcolor(self, val):
self['fillcolor'] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['a', 'b', 'c', 'text', 'name'] joined with '+' characters
(e.g. 'a+b')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self['hoverinfo']
@hoverinfo.setter
def hoverinfo(self, val):
self['hoverinfo'] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on plot.ly for hoverinfo .
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['hoverinfosrc']
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self['hoverinfosrc'] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of plotly.graph_objs.scatterternary.Hoverlabel
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on plot.ly for
bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on plot.ly for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the length (in number of characters) of
the trace name in the hover labels for this
trace. -1 shows the whole name regardless of
length. 0-3 shows the first 0-3 characters, and
an integer >3 will show the whole name if it is
less than that many characters, but if it is
longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for
namelength .
Returns
-------
plotly.graph_objs.scatterternary.Hoverlabel
"""
return self['hoverlabel']
@hoverlabel.setter
def hoverlabel(self, val):
self['hoverlabel'] = val
# hoveron
# -------
@property
def hoveron(self):
"""
Do the hover effects highlight individual points (markers or
line points) or do they highlight filled regions? If the fill
is "toself" or "tonext" and there are no markers or text, then
the default is "fills", otherwise it is "points".
The 'hoveron' property is a flaglist and may be specified
as a string containing:
- Any combination of ['points', 'fills'] joined with '+' characters
(e.g. 'points+fills')
Returns
-------
Any
"""
return self['hoveron']
@hoveron.setter
def hoveron(self, val):
self['hoveron'] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Sets hover text elements associated with each (a,b,c) point. If
a single string, the same string appears over all the data
points. If an array of strings, the items are mapped in order
to the the data points in (a,b,c). To be seen, trace
`hoverinfo` must contain a "text" flag.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self['hovertext']
@hovertext.setter
def hovertext(self, val):
self['hovertext'] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on plot.ly for hovertext .
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['hovertextsrc']
@hovertextsrc.setter
def hovertextsrc(self, val):
self['hovertextsrc'] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['ids']
@ids.setter
def ids(self, val):
self['ids'] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on plot.ly for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['idssrc']
@idssrc.setter
def idssrc(self, val):
self['idssrc'] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['legendgroup']
@legendgroup.setter
def legendgroup(self, val):
self['legendgroup'] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of plotly.graph_objs.scatterternary.Line
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
shape
Determines the line shape. With "spline" the
lines are drawn using spline interpolation. The
other available values correspond to step-wise
line shapes.
smoothing
Has an effect only if `shape` is set to
"spline" Sets the amount of smoothing. 0
corresponds to no smoothing (equivalent to a
"linear" shape).
width
Sets the line width (in px).
Returns
-------
plotly.graph_objs.scatterternary.Line
"""
return self['line']
@line.setter
def line(self, val):
self['line'] = val
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of plotly.graph_objs.scatterternary.Marker
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
colorbar
plotly.graph_objs.scatterternary.marker.ColorBa
r instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color
space, use`marker.cmin` and `marker.cmax`.
Alternatively, `colorscale` may be a palette
name string of the following list: Greys,YlGnBu
,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,R
ainbow,Portland,Jet,Hot,Blackbody,Earth,Electri
c,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color
.
gradient
plotly.graph_objs.scatterternary.marker.Gradien
t instance or dict with compatible properties
line
plotly.graph_objs.scatterternary.marker.Line
instance or dict with compatible properties
maxdisplayed
Sets a maximum number of points to be drawn on
the graph. 0 corresponds to no limit.
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on plot.ly for
opacity .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px)
of the rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the
data in `size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points.
Use with `sizemin` and `sizemode`.
sizesrc
Sets the source reference on plot.ly for size
.
symbol
Sets the marker symbol type. Adding 100 is
equivalent to appending "-open" to a symbol
name. Adding 200 is equivalent to appending
"-dot" to a symbol name. Adding 300 is
equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on plot.ly for
symbol .
Returns
-------
plotly.graph_objs.scatterternary.Marker
"""
return self['marker']
@marker.setter
def marker(self, val):
self['marker'] = val
# mode
# ----
@property
def mode(self):
"""
Determines the drawing mode for this scatter trace. If the
provided `mode` includes "text" then the `text` elements appear
at the coordinates. Otherwise, the `text` elements appear on
hover. If there are less than 20 points, then the default is
"lines+markers". Otherwise, "lines".
The 'mode' property is a flaglist and may be specified
as a string containing:
- Any combination of ['lines', 'markers', 'text'] joined with '+' characters
(e.g. 'lines+markers')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self['mode']
@mode.setter
def mode(self, val):
self['mode'] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['name']
@name.setter
def name(self, val):
self['name'] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['opacity']
@opacity.setter
def opacity(self, val):
self['opacity'] = val
# selected
# --------
@property
def selected(self):
"""
The 'selected' property is an instance of Selected
that may be specified as:
- An instance of plotly.graph_objs.scatterternary.Selected
- A dict of string/value properties that will be passed
to the Selected constructor
Supported dict properties:
marker
plotly.graph_objs.scatterternary.selected.Marke
r instance or dict with compatible properties
textfont
plotly.graph_objs.scatterternary.selected.Textf
ont instance or dict with compatible properties
Returns
-------
plotly.graph_objs.scatterternary.Selected
"""
return self['selected']
@selected.setter
def selected(self, val):
self['selected'] = val
# selectedpoints
# --------------
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self['selectedpoints']
@selectedpoints.setter
def selectedpoints(self, val):
self['selectedpoints'] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['showlegend']
@showlegend.setter
def showlegend(self, val):
self['showlegend'] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of plotly.graph_objs.scatterternary.Stream
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See
https://plot.ly/settings for more details.
Returns
-------
plotly.graph_objs.scatterternary.Stream
"""
return self['stream']
@stream.setter
def stream(self, val):
self['stream'] = val
# subplot
# -------
@property
def subplot(self):
"""
Sets a reference between this trace's data coordinates and a
ternary subplot. If "ternary" (the default value), the data
refer to `layout.ternary`. If "ternary2", the data refer to
`layout.ternary2`, and so on.
The 'subplot' property is an identifier of a particular
subplot, of type 'ternary', that may be specified as the string 'ternary'
optionally followed by an integer >= 1
(e.g. 'ternary', 'ternary1', 'ternary2', 'ternary3', etc.)
Returns
-------
str
"""
return self['subplot']
@subplot.setter
def subplot(self, val):
self['subplot'] = val
# sum
# ---
@property
def sum(self):
"""
The number each triplet should sum to, if only two of `a`, `b`,
and `c` are provided. This overrides `ternary<i>.sum` to
normalize this specific trace, but does not affect the values
displayed on the axes. 0 (or missing) means to use
ternary<i>.sum
The 'sum' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['sum']
@sum.setter
def sum(self, val):
self['sum'] = val
# text
# ----
@property
def text(self):
"""
Sets text elements associated with each (a,b,c) point. If a
single string, the same string appears over all the data
points. If an array of strings, the items are mapped in order
to the the data points in (a,b,c). If trace `hoverinfo`
contains a "text" flag and "hovertext" is not set, these
elements will be seen in the hover labels.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self['text']
@text.setter
def text(self, val):
self['text'] = val
# textfont
# --------
@property
def textfont(self):
"""
Sets the text font.
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of plotly.graph_objs.scatterternary.Textfont
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
Returns
-------
plotly.graph_objs.scatterternary.Textfont
"""
return self['textfont']
@textfont.setter
def textfont(self, val):
self['textfont'] = val
# textposition
# ------------
@property
def textposition(self):
"""
Sets the positions of the `text` elements with respects to the
(x,y) coordinates.
The 'textposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top left', 'top center', 'top right', 'middle left',
'middle center', 'middle right', 'bottom left', 'bottom
center', 'bottom right']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self['textposition']
@textposition.setter
def textposition(self, val):
self['textposition'] = val
# textpositionsrc
# ---------------
@property
def textpositionsrc(self):
"""
Sets the source reference on plot.ly for textposition .
The 'textpositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['textpositionsrc']
@textpositionsrc.setter
def textpositionsrc(self, val):
self['textpositionsrc'] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on plot.ly for text .
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['textsrc']
@textsrc.setter
def textsrc(self, val):
self['textsrc'] = val
# uid
# ---
@property
def uid(self):
"""
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['uid']
@uid.setter
def uid(self, val):
self['uid'] = val
# unselected
# ----------
@property
def unselected(self):
"""
The 'unselected' property is an instance of Unselected
that may be specified as:
- An instance of plotly.graph_objs.scatterternary.Unselected
- A dict of string/value properties that will be passed
to the Unselected constructor
Supported dict properties:
marker
plotly.graph_objs.scatterternary.unselected.Mar
ker instance or dict with compatible properties
textfont
plotly.graph_objs.scatterternary.unselected.Tex
tfont instance or dict with compatible
properties
Returns
-------
plotly.graph_objs.scatterternary.Unselected
"""
return self['unselected']
@unselected.setter
def unselected(self, val):
self['unselected'] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self['visible']
@visible.setter
def visible(self, val):
self['visible'] = val
# type
# ----
@property
def type(self):
return self._props['type']
# property parent name
# --------------------
@property
def _parent_path_str(self):
return ''
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
a
Sets the quantity of component `a` in each data point.
If `a`, `b`, and `c` are all provided, they need not be
normalized, only the relative values matter. If only
two arrays are provided they must be normalized to
match `ternary<i>.sum`.
asrc
Sets the source reference on plot.ly for a .
b
Sets the quantity of component `a` in each data point.
If `a`, `b`, and `c` are all provided, they need not be
normalized, only the relative values matter. If only
two arrays are provided they must be normalized to
match `ternary<i>.sum`.
bsrc
Sets the source reference on plot.ly for b .
c
Sets the quantity of component `a` in each data point.
If `a`, `b`, and `c` are all provided, they need not be
normalized, only the relative values matter. If only
two arrays are provided they must be normalized to
match `ternary<i>.sum`.
cliponaxis
Determines whether or not markers and text nodes are
clipped about the subplot axes. To show markers and
text nodes above axis lines and tick labels, make sure
to set `xaxis.layer` and `yaxis.layer` to *below
traces*.
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the provided data arrays are connected.
csrc
Sets the source reference on plot.ly for c .
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on plot.ly for customdata .
fill
Sets the area to fill with a solid color. Use with
`fillcolor` if not "none". scatterternary has a subset
of the options available to scatter. "toself" connects
the endpoints of the trace (or each segment of the
trace if it has gaps) into a closed shape. "tonext"
fills the space between two traces if one completely
encloses the other (eg consecutive contour lines), and
behaves like "toself" if there is no trace before it.
"tonext" should not be used if one trace does not
enclose the other.
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on plot.ly for hoverinfo .
hoverlabel
plotly.graph_objs.scatterternary.Hoverlabel instance or
dict with compatible properties
hoveron
Do the hover effects highlight individual points
(markers or line points) or do they highlight filled
regions? If the fill is "toself" or "tonext" and there
are no markers or text, then the default is "fills",
otherwise it is "points".
hovertext
Sets hover text elements associated with each (a,b,c)
point. If a single string, the same string appears over
all the data points. If an array of strings, the items
are mapped in order to the the data points in (a,b,c).
To be seen, trace `hoverinfo` must contain a "text"
flag.
hovertextsrc
Sets the source reference on plot.ly for hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on plot.ly for ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
line
plotly.graph_objs.scatterternary.Line instance or dict
with compatible properties
marker
plotly.graph_objs.scatterternary.Marker instance or
dict with compatible properties
mode
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover. If there are less than
20 points, then the default is "lines+markers".
Otherwise, "lines".
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
plotly.graph_objs.scatterternary.Selected instance or
dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
plotly.graph_objs.scatterternary.Stream instance or
dict with compatible properties
subplot
Sets a reference between this trace's data coordinates
and a ternary subplot. If "ternary" (the default
value), the data refer to `layout.ternary`. If
"ternary2", the data refer to `layout.ternary2`, and so
on.
sum
The number each triplet should sum to, if only two of
`a`, `b`, and `c` are provided. This overrides
`ternary<i>.sum` to normalize this specific trace, but
does not affect the values displayed on the axes. 0 (or
missing) means to use ternary<i>.sum
text
Sets text elements associated with each (a,b,c) point.
If a single string, the same string appears over all
the data points. If an array of strings, the items are
mapped in order to the the data points in (a,b,c). If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textfont
Sets the text font.
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
textpositionsrc
Sets the source reference on plot.ly for textposition
.
textsrc
Sets the source reference on plot.ly for text .
uid
unselected
plotly.graph_objs.scatterternary.Unselected instance or
dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
"""
def __init__(
self,
arg=None,
a=None,
asrc=None,
b=None,
bsrc=None,
c=None,
cliponaxis=None,
connectgaps=None,
csrc=None,
customdata=None,
customdatasrc=None,
fill=None,
fillcolor=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hoveron=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
legendgroup=None,
line=None,
marker=None,
mode=None,
name=None,
opacity=None,
selected=None,
selectedpoints=None,
showlegend=None,
stream=None,
subplot=None,
sum=None,
text=None,
textfont=None,
textposition=None,
textpositionsrc=None,
textsrc=None,
uid=None,
unselected=None,
visible=None,
**kwargs
):
"""
Construct a new Scatterternary object
Provides similar functionality to the "scatter" type but on a
ternary phase diagram. The data is provided by at least two
arrays out of `a`, `b`, `c` triplets.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.Scatterternary
a
Sets the quantity of component `a` in each data point.
If `a`, `b`, and `c` are all provided, they need not be
normalized, only the relative values matter. If only
two arrays are provided they must be normalized to
match `ternary<i>.sum`.
asrc
Sets the source reference on plot.ly for a .
b
Sets the quantity of component `a` in each data point.
If `a`, `b`, and `c` are all provided, they need not be
normalized, only the relative values matter. If only
two arrays are provided they must be normalized to
match `ternary<i>.sum`.
bsrc
Sets the source reference on plot.ly for b .
c
Sets the quantity of component `a` in each data point.
If `a`, `b`, and `c` are all provided, they need not be
normalized, only the relative values matter. If only
two arrays are provided they must be normalized to
match `ternary<i>.sum`.
cliponaxis
Determines whether or not markers and text nodes are
clipped about the subplot axes. To show markers and
text nodes above axis lines and tick labels, make sure
to set `xaxis.layer` and `yaxis.layer` to *below
traces*.
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the provided data arrays are connected.
csrc
Sets the source reference on plot.ly for c .
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on plot.ly for customdata .
fill
Sets the area to fill with a solid color. Use with
`fillcolor` if not "none". scatterternary has a subset
of the options available to scatter. "toself" connects
the endpoints of the trace (or each segment of the
trace if it has gaps) into a closed shape. "tonext"
fills the space between two traces if one completely
encloses the other (eg consecutive contour lines), and
behaves like "toself" if there is no trace before it.
"tonext" should not be used if one trace does not
enclose the other.
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on plot.ly for hoverinfo .
hoverlabel
plotly.graph_objs.scatterternary.Hoverlabel instance or
dict with compatible properties
hoveron
Do the hover effects highlight individual points
(markers or line points) or do they highlight filled
regions? If the fill is "toself" or "tonext" and there
are no markers or text, then the default is "fills",
otherwise it is "points".
hovertext
Sets hover text elements associated with each (a,b,c)
point. If a single string, the same string appears over
all the data points. If an array of strings, the items
are mapped in order to the the data points in (a,b,c).
To be seen, trace `hoverinfo` must contain a "text"
flag.
hovertextsrc
Sets the source reference on plot.ly for hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on plot.ly for ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
line
plotly.graph_objs.scatterternary.Line instance or dict
with compatible properties
marker
plotly.graph_objs.scatterternary.Marker instance or
dict with compatible properties
mode
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover. If there are less than
20 points, then the default is "lines+markers".
Otherwise, "lines".
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
plotly.graph_objs.scatterternary.Selected instance or
dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
plotly.graph_objs.scatterternary.Stream instance or
dict with compatible properties
subplot
Sets a reference between this trace's data coordinates
and a ternary subplot. If "ternary" (the default
value), the data refer to `layout.ternary`. If
"ternary2", the data refer to `layout.ternary2`, and so
on.
sum
The number each triplet should sum to, if only two of
`a`, `b`, and `c` are provided. This overrides
`ternary<i>.sum` to normalize this specific trace, but
does not affect the values displayed on the axes. 0 (or
missing) means to use ternary<i>.sum
text
Sets text elements associated with each (a,b,c) point.
If a single string, the same string appears over all
the data points. If an array of strings, the items are
mapped in order to the the data points in (a,b,c). If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textfont
Sets the text font.
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
textpositionsrc
Sets the source reference on plot.ly for textposition
.
textsrc
Sets the source reference on plot.ly for text .
uid
unselected
plotly.graph_objs.scatterternary.Unselected instance or
dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
Returns
-------
Scatterternary
"""
super(Scatterternary, self).__init__('scatterternary')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Scatterternary
constructor must be a dict or
an instance of plotly.graph_objs.Scatterternary"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators import (scatterternary as v_scatterternary)
# Initialize validators
# ---------------------
self._validators['a'] = v_scatterternary.AValidator()
self._validators['asrc'] = v_scatterternary.AsrcValidator()
self._validators['b'] = v_scatterternary.BValidator()
self._validators['bsrc'] = v_scatterternary.BsrcValidator()
self._validators['c'] = v_scatterternary.CValidator()
self._validators['cliponaxis'] = v_scatterternary.CliponaxisValidator()
self._validators['connectgaps'
] = v_scatterternary.ConnectgapsValidator()
self._validators['csrc'] = v_scatterternary.CsrcValidator()
self._validators['customdata'] = v_scatterternary.CustomdataValidator()
self._validators['customdatasrc'
] = v_scatterternary.CustomdatasrcValidator()
self._validators['fill'] = v_scatterternary.FillValidator()
self._validators['fillcolor'] = v_scatterternary.FillcolorValidator()
self._validators['hoverinfo'] = v_scatterternary.HoverinfoValidator()
self._validators['hoverinfosrc'
] = v_scatterternary.HoverinfosrcValidator()
self._validators['hoverlabel'] = v_scatterternary.HoverlabelValidator()
self._validators['hoveron'] = v_scatterternary.HoveronValidator()
self._validators['hovertext'] = v_scatterternary.HovertextValidator()
self._validators['hovertextsrc'
] = v_scatterternary.HovertextsrcValidator()
self._validators['ids'] = v_scatterternary.IdsValidator()
self._validators['idssrc'] = v_scatterternary.IdssrcValidator()
self._validators['legendgroup'
] = v_scatterternary.LegendgroupValidator()
self._validators['line'] = v_scatterternary.LineValidator()
self._validators['marker'] = v_scatterternary.MarkerValidator()
self._validators['mode'] = v_scatterternary.ModeValidator()
self._validators['name'] = v_scatterternary.NameValidator()
self._validators['opacity'] = v_scatterternary.OpacityValidator()
self._validators['selected'] = v_scatterternary.SelectedValidator()
self._validators['selectedpoints'
] = v_scatterternary.SelectedpointsValidator()
self._validators['showlegend'] = v_scatterternary.ShowlegendValidator()
self._validators['stream'] = v_scatterternary.StreamValidator()
self._validators['subplot'] = v_scatterternary.SubplotValidator()
self._validators['sum'] = v_scatterternary.SumValidator()
self._validators['text'] = v_scatterternary.TextValidator()
self._validators['textfont'] = v_scatterternary.TextfontValidator()
self._validators['textposition'
] = v_scatterternary.TextpositionValidator()
self._validators['textpositionsrc'
] = v_scatterternary.TextpositionsrcValidator()
self._validators['textsrc'] = v_scatterternary.TextsrcValidator()
self._validators['uid'] = v_scatterternary.UidValidator()
self._validators['unselected'] = v_scatterternary.UnselectedValidator()
self._validators['visible'] = v_scatterternary.VisibleValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('a', None)
self['a'] = a if a is not None else _v
_v = arg.pop('asrc', None)
self['asrc'] = asrc if asrc is not None else _v
_v = arg.pop('b', None)
self['b'] = b if b is not None else _v
_v = arg.pop('bsrc', None)
self['bsrc'] = bsrc if bsrc is not None else _v
_v = arg.pop('c', None)
self['c'] = c if c is not None else _v
_v = arg.pop('cliponaxis', None)
self['cliponaxis'] = cliponaxis if cliponaxis is not None else _v
_v = arg.pop('connectgaps', None)
self['connectgaps'] = connectgaps if connectgaps is not None else _v
_v = arg.pop('csrc', None)
self['csrc'] = csrc if csrc is not None else _v
_v = arg.pop('customdata', None)
self['customdata'] = customdata if customdata is not None else _v
_v = arg.pop('customdatasrc', None)
self['customdatasrc'
] = customdatasrc if customdatasrc is not None else _v
_v = arg.pop('fill', None)
self['fill'] = fill if fill is not None else _v
_v = arg.pop('fillcolor', None)
self['fillcolor'] = fillcolor if fillcolor is not None else _v
_v = arg.pop('hoverinfo', None)
self['hoverinfo'] = hoverinfo if hoverinfo is not None else _v
_v = arg.pop('hoverinfosrc', None)
self['hoverinfosrc'] = hoverinfosrc if hoverinfosrc is not None else _v
_v = arg.pop('hoverlabel', None)
self['hoverlabel'] = hoverlabel if hoverlabel is not None else _v
_v = arg.pop('hoveron', None)
self['hoveron'] = hoveron if hoveron is not None else _v
_v = arg.pop('hovertext', None)
self['hovertext'] = hovertext if hovertext is not None else _v
_v = arg.pop('hovertextsrc', None)
self['hovertextsrc'] = hovertextsrc if hovertextsrc is not None else _v
_v = arg.pop('ids', None)
self['ids'] = ids if ids is not None else _v
_v = arg.pop('idssrc', None)
self['idssrc'] = idssrc if idssrc is not None else _v
_v = arg.pop('legendgroup', None)
self['legendgroup'] = legendgroup if legendgroup is not None else _v
_v = arg.pop('line', None)
self['line'] = line if line is not None else _v
_v = arg.pop('marker', None)
self['marker'] = marker if marker is not None else _v
_v = arg.pop('mode', None)
self['mode'] = mode if mode is not None else _v
_v = arg.pop('name', None)
self['name'] = name if name is not None else _v
_v = arg.pop('opacity', None)
self['opacity'] = opacity if opacity is not None else _v
_v = arg.pop('selected', None)
self['selected'] = selected if selected is not None else _v
_v = arg.pop('selectedpoints', None)
self['selectedpoints'
] = selectedpoints if selectedpoints is not None else _v
_v = arg.pop('showlegend', None)
self['showlegend'] = showlegend if showlegend is not None else _v
_v = arg.pop('stream', None)
self['stream'] = stream if stream is not None else _v
_v = arg.pop('subplot', None)
self['subplot'] = subplot if subplot is not None else _v
_v = arg.pop('sum', None)
self['sum'] = sum if sum is not None else _v
_v = arg.pop('text', None)
self['text'] = text if text is not None else _v
_v = arg.pop('textfont', None)
self['textfont'] = textfont if textfont is not None else _v
_v = arg.pop('textposition', None)
self['textposition'] = textposition if textposition is not None else _v
_v = arg.pop('textpositionsrc', None)
self['textpositionsrc'
] = textpositionsrc if textpositionsrc is not None else _v
_v = arg.pop('textsrc', None)
self['textsrc'] = textsrc if textsrc is not None else _v
_v = arg.pop('uid', None)
self['uid'] = uid if uid is not None else _v
_v = arg.pop('unselected', None)
self['unselected'] = unselected if unselected is not None else _v
_v = arg.pop('visible', None)
self['visible'] = visible if visible is not None else _v
# Read-only literals
# ------------------
from _plotly_utils.basevalidators import LiteralValidator
self._props['type'] = 'scatterternary'
self._validators['type'] = LiteralValidator(
plotly_name='type',
parent_name='scatterternary',
val='scatterternary'
)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
<filename>3D_CNN/MiniBatchGenerator.py
import numpy as np
import matplotlib.pyplot as plt
from random import randint
from DataAugmentation import DataAugmentation
class MiniBatchGenerator:
# Create minibatches of a given size from a dataset.
# Preserves the original sample order unless shuffle() is used.
nSeq = 0;
batchsize = 0;
dataset = None;
tform = None;
stat = None;
nBatches = 0;
b = [];
dataGenerator = None;
mode = 0;
shuffled_idx = [];
def __init__(self, dataset, split, nSeq, bs):
# Constructor.
# dataset is a ClassificationDataset to wrap.
# bs is an integer specifying the minibatch size.
# tform is an optional SampleTransformation.
# If given, tform is applied to all samples returned in minibatches.
self.dataset = dataset;
self.batchsize = bs;
self.nSeq = nSeq;
if((self.dataset.getNClasses() % self.batchsize) == 0 ):
self.nBatches = int(self.dataset.getNClasses() / self.batchsize);
else:
self.nBatches = int(self.dataset.getNClasses() / self.batchsize)+1;
print("nBatches: " + str(self.nBatches));
print("nSequences: " + str(self.nSeq));
self.dataGenerator = DataAugmentation();
if(split == "train"):
self.mode = 0;
elif(split == "val"):
self.mode = 1;
elif(split == "test"):
self.mode = 2;
def batchsize(self):
# Return the number of samples per minibatch.
# The size of the last batch might be smaller.
return self.batchsize;
def nbatches(self):
# Return the number of minibatches.
return self.nBatches;
def shuffle(self):
# Shuffle the dataset samples so that each
# ends up at a random location in a random minibatch.
return 0;
def batch(self, bid):
# Return the bid-th minibatch.
# Batch IDs start with 0 and are consecutive.
# Throws an error if the minibatch does not exist.
samples = [];
labels = [];
if(self.mode == 0 or self.mode == 1):
if( bid < self.nBatches and bid >= 0):
#for i in range(0, self.batchsize):
i = 0;
vid = 1;
while(i < self.batchsize):
offset = bid * self.batchsize;
if((offset+i) >= self.dataset.getNClasses() ):
break;
#print(str(offset) + " + " + str(i) + " = " + str(offset + i) + " --> " + str(offset + vid));
ids, n, s, l = self.dataset.getAllSamplesOfID( offset + vid );
#print("ids shape: " + str(ids.shape));
if(ids.shape[0] <> 0):
s_n = np.zeros((self.nSeq, s.shape[1], s.shape[2], s.shape[3]));
l_n = np.zeros((self.nSeq, l.shape[1]));
if(self.nSeq > s.shape[0]):
s_n[:s.shape[0],:,:,:] = s[:s.shape[0],:,:,:];
l_n[:s.shape[0],:] = l[:s.shape[0],:];
elif(self.nSeq <= s.shape[0]):
s_n[:self.nSeq,:,:,:] = s[:self.nSeq,:,:,:];
l_n[:self.nSeq,:] = l[:self.nSeq,:];
tmp_b = [s_n, l_n];
#self.dataGenerator.showImages(tmp_b);
b_dataAug = self.dataGenerator.runDataAugmentation(tmp_b);
#self.dataGenerator.showImages(b_dataAug);
samples.append(b_dataAug[0]);
labels.append(l_n[0]);
i = i + 1;
vid = vid + 1;
else:
vid = vid + 1;
samples = np.array(samples);
labels = np.array(labels);
self.b = (samples, labels);
return self.b;
else:
print("ERROR: index out of range!");
exit();
elif(self.mode == 2):
if( bid < self.nBatches and bid >= 0):
#for i in range(0, self.batchsize):
i = 0;
vid = 1;
while(i < self.batchsize):
offset = bid * self.batchsize;
if((offset+i) >= self.dataset.getNClasses() ):
break;
#print(str(offset) + " + " + str(i) + " = " + str(offset + i) + " --> " + str(offset + vid));
ids, n, s = self.dataset.getAllTestSamplesOfID( offset + vid );
#print("ids shape: " + str(ids.shape));
if(ids.shape[0] <> 0):
s_n = np.zeros((self.nSeq, s.shape[1], s.shape[2], s.shape[3]));
if(self.nSeq > s.shape[0]):
s_n[:s.shape[0],:,:,:] = s[:s.shape[0],:,:,:];
elif(self.nSeq <= s.shape[0]):
s_n[:self.nSeq,:,:,:] = s[:self.nSeq,:,:,:];
samples.append(s_n);
i = i + 1;
vid = vid + 1;
else:
vid = vid + 1;
samples = np.array(samples);
self.b = (samples);
return self.b;
else:
print("ERROR: index out of range!");
exit();
def batch_Random(self, bid, r_idx):
# Return the bid-th minibatch.
# Batch IDs start with 0 and are consecutive.
# Throws an error if the minibatch does not exist.
samples = [];
labels = [];
if(self.mode == 0 or self.mode == 1):
if( bid < self.nBatches and bid >= 0):
#for i in range(0, self.batchsize):
i = 0;
vid = 1;
while(i < self.batchsize):
offset = bid * self.batchsize;
if((offset+i) >= self.dataset.getNClasses() ):
break;
#print(str(offset) + " + " + str(i) + " = " + str(offset + i) + " --> " + str(offset + vid));
ids, n, s, l = self.dataset.getAllSamplesOfID( offset + vid );
#print("ids shape: " + str(ids.shape));
if(ids.shape[0] <> 0):
if(ids.shape[0] == 60):
# take samples with generated index
r_s = s[r_idx];
r_l = l[r_idx];
#print("random samples: " + str(r_shape));
#print("random labels: " + str(r_l.shape));
tmp_b = [r_s, r_l];
else:
s_n = np.zeros((10, s.shape[1], s.shape[2], s.shape[3]));
l_n = np.zeros((10, l.shape[1]));
if(10 > s.shape[0]):
s_n[:s.shape[0],:,:,:] = s[:s.shape[0],:,:,:];
l_n[:s.shape[0],:] = l[:s.shape[0],:];
elif(10 <= s.shape[0]):
s_n[:10,:,:,:] = s[:10,:,:,:];
l_n[:10,:] = l[:10,:];
tmp_b = [s_n, l_n];
#self.dataGenerator.showImages(tmp_b);
#b_dataAug = self.dataGenerator.runDataAugmentation(tmp_b);
#self.dataGenerator.showImages(b_dataAug);
samples.append(tmp_b[0]);
labels.append(tmp_b[1][0]);
i = i + 1;
vid = vid + 1;
else:
vid = vid + 1;
samples = np.array(samples);
labels = np.array(labels);
self.b = (samples, labels);
#print("------------------------------------");
#print("bid: " + str(bid) + " - " + str(samples.shape));
return self.b;
else:
print("ERROR: index out of range!");
exit();
elif(self.mode == 2):
if( bid < self.nBatches and bid >= 0):
for i in range(0, self.batchsize, 1):
offset = bid * self.batchsize;
if((offset+i) >= self.dataset.size() ):
break;
s = self.dataset.sample(offset + i);
samples.append(s);
samples = np.array(samples);
self.b = (samples);
return self.b;
else:
print("ERROR: index out of range!");
exit();
def batchNew(self, bid):
# Return the bid-th minibatch.
# Batch IDs start with 0 and are consecutive.
# Throws an error if the minibatch does not exist.
samples = [];
labels = [];
if( bid < self.nBatches and bid >= 0):
for i in range(0, self.batchsize):
offset = bid * self.batchsize;
if((offset+i) >= self.dataset.size() ):
break;
s, l = self.dataset.sample(offset + i);
samples.append(s);
labels.append(l);
samples = np.array(samples);
labels = np.array(labels);
self.b = (samples, labels);
return self.b;
else:
print("ERROR: index out of range!");
exit();
def BatchGenerator(self):
if(self.mode == 0):
# training mode
while(1):
#print("\nshuffle batch idxs");
self.shuffled_idx = np.arange(self.nbatches());
np.random.shuffle(self.shuffled_idx);
#print("\ngenerate new random indexes ...");
# generate random number
r_idx = [];
for a in range(0, 10):
start = a*6;
end = (a*6 + 6)-1;
r_tmp = randint(start, end);
r_idx.append(r_tmp);
#print("random numbers: " + str(r_idx));
#for a in range(0, self.nbatches()):
for a in range(0, len(self.shuffled_idx)):
#print("\nbatch: " + str(a));
#batch = self.batch(a);
#batch = self.batch(self.shuffled_idx[a]);
batch = self.batch_Random(self.shuffled_idx[a], r_idx);
#fBatch, lBatch = self.dataGenerator.runDataAugmentation(batch);
#self.dataGenerator.showImages(batch);
#print(str(a) + ": " + str(batch[0].shape));
#yield fBatch, lBatch;
#print(batch[0]);
#print(batch[1]);
yield batch[0], batch[1];
elif(self.mode == 1):
# validation mode
while(1):
for a in range(0, self.nbatches()):
batch = self.batch(a);
#fBatch, lBatch = self.dataGenerator.runDataAugmentation(batch);
#print(str(a) + ": " + str(fBatch.shape));
#yield fBatch, lBatch;
yield batch[0], batch[1];
elif(self.mode == 2):
# test mode
while(1):
for a in range(0, self.nbatches()):
batch = self.batch(a);
#fBatch, lBatch = self.dataGenerator.runDataAugmentation(batch);
#print(str(a) + ": " + str(batch.shape));
#yield fBatch, lBatch;
yield batch;
def printBatchImages(self, b):
for i in range(0, int(b.shape[0]) , 1):
im = plt.imshow(b[i]);
plt.pause(0.6);
plt.show();
|
"""Examples for spanned() decorator with peer-to-peer based tracing."""
import logging
import os
import random
import sys
import time
import coloredlogs # type: ignore[import]
import pika # type: ignore[import]
if "examples" not in os.listdir():
raise RuntimeError("Script needs to be ran from root of repository.")
sys.path.append(".")
import wipac_telemetry.tracing_tools as wtt # noqa: E402 # pylint: disable=C0413,E0401
ADDRESS = "localhost" # "127.0.0.1"
PORT = 2000
LOGGER = logging.getLogger(f"{random.randint(0, 1000):04d}")
########################################################################################
@wtt.spanned(all_args=True, kind=wtt.SpanKind.PRODUCER)
def go_publish(
another_span: wtt.Span,
friend: str,
myself: str,
channel: pika.adapters.blocking_connection.BlockingChannel,
) -> None:
"""Publish a message."""
msg = f"Hey {friend}, I'm {myself}"
another_link = wtt.span_to_link(
another_span,
{
"name": "another_span",
"NOTE": "explicitly linking `another_span` isn't necessary, it's `producer-span`'s parent",
"REASONING": "`another_span` is already automatically accessible via the `producer-span`'s `parent_id` pointer",
"FURTHERMORE": "this is just an example of linking multiple spans :D",
},
)
headers = wtt.inject_links_carrier(
attrs={"name": "producer-span", "from": myself, "to": friend},
addl_links=[another_link],
)
channel.basic_publish(
exchange="",
routing_key=friend,
body=msg,
properties=pika.BasicProperties(headers=headers),
)
LOGGER.debug(f" [x] Sent '{msg}'")
@wtt.spanned(all_args=True)
def send(friend: str, myself: str) -> None:
"""Send a message."""
connection = pika.BlockingConnection(pika.ConnectionParameters(host=ADDRESS))
channel = connection.channel()
channel.queue_declare(queue=friend)
go_publish(wtt.get_current_span(), friend, myself, channel)
connection.close()
########################################################################################
@wtt.spanned(
kind=wtt.SpanKind.CONSUMER,
these=["properties.headers.just-a-key"],
carrier="properties.headers",
carrier_relation=wtt.CarrierRelation.LINK,
)
def receive_callback(
channel: pika.adapters.blocking_connection.BlockingChannel,
method: pika.spec.Basic.Deliver,
properties: pika.spec.BasicProperties,
body: bytes,
) -> None:
"""Handle received message."""
LOGGER.debug(channel)
LOGGER.debug(method)
LOGGER.debug(properties)
LOGGER.debug(properties.headers)
LOGGER.debug(f" [x] Received '{str(body)}'")
channel.stop_consuming()
@wtt.spanned(all_args=True)
def receive(myself: str) -> None:
"""Receive a message."""
connection = pika.BlockingConnection(pika.ConnectionParameters(host=ADDRESS))
channel = connection.channel()
channel.queue_declare(queue=myself)
channel.basic_consume(
queue=myself, on_message_callback=receive_callback, auto_ack=True
)
# print(" [*] Waiting for messages. To exit press CTRL+C")
channel.start_consuming()
########################################################################################
@wtt.spanned(all_args=True)
def main() -> None:
"""Do the things."""
try:
myself, friend = sys.argv[1], sys.argv[2]
except (IndexError, KeyError):
LOGGER.debug("enter name of 'friend' and 'me'")
sys.exit(1)
time.sleep(1)
send(friend, myself)
time.sleep(1)
receive(myself)
time.sleep(1)
print("Done.")
if __name__ == "__main__":
coloredlogs.install(level="DEBUG")
logging.getLogger("pika").setLevel(logging.WARNING)
main()
|
from pyowm import OWM
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5 import QtCore, QtGui, QtWidgets
from datetime import datetime
import threading
import openpyxl
import random
import webbrowser
API_key = "<KEY>" #openweathermap API
owm = OWM(API_key)
datetime.today() # 시간 가져오기
datetime.today().year
datetime.today().month
datetime.today().day
datetime.today().hour
wb = openpyxl.load_workbook('음식.xlsx') #엑셀 파일 읽기
ws = wb.active # ws = wb.get_sheet_by_name("Sheet1")
class MyWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setupUI()
def setupUI(self):
palette = QtGui.QPalette() # 배경색 지정
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
self.setPalette(palette)
font = QtGui.QFont() # 폰트 지정
font.setFamily("휴먼옛체")
font.setPointSize(10)
self.setFont(font)
self.setGeometry(800, 400, 300, 350) # 창 크기 설정
self.setWindowTitle('W-F') # 창 이름 설정
self.setWindowIcon(QIcon('아이콘.png')) # 아이콘 설정
textLabel = QLabel("지역 : ",self) # 텍스트 출력
textLabel.move(40, 30) # 텍스트 위치 설정
self.lineEdit = QLineEdit("",self) #lineEdit 생성
self.lineEdit.setAlignment(QtCore.Qt.AlignCenter) #텍스트 가운데 정렬
self.lineEdit.move(100,30) #lineEdit 위치 설정
self.lineEdit.returnPressed.connect(self.lineEditPressed) #엔터를 누르면 lineEditPressed 실행
self.label = QLabel("", self) # QLabel 생성
self.label.setAlignment(QtCore.Qt.AlignCenter) #텍스트 가운데 정렬
self.image = QLabel(self) # QLabel 생성
self.image.setAlignment(QtCore.Qt.AlignCenter) #텍스트 가운데 정렬
btn1 = QPushButton("추천 메뉴", self) # 버튼 생성
btn1.setStyleSheet('QPushButton {background-color: white}')
btn1.move(40, 280)
btn1.clicked.connect(self.btn1_clicked)
self.clock = QLabel("",self) # 시간 Label 생성
self.clock.move(140,320)
self.clock.resize(200, 20)
self.Time()
btn2 = QPushButton("음식점 검색", self) # 버튼 생성
btn2.setStyleSheet('QPushButton {background-color: white}')
btn2.move(160, 280)
btn2.clicked.connect(self.btn2_clicked)
def Time(self):
self.clock.setText(datetime.today().strftime("%Y/%m/%d %H:%M:%S"))
threading.Timer(1,self.Time).start()#1초마다 업데이트
def btn1_clicked(self):
self.image.move(55, 200) # QLabel 위치 설정
self.image.resize(210, 100) # QLabel 크기 설정
self.label.move(75, 70) # QLabel 위치 설정
self.label.resize(150, 150) # QLabel 크기 설정
Rain_B = 'B3','B4','B5','B6','B7','B8','B9','B10','B11' #비오는날 아침
Rain_R = 'B12','B13','B14','B15','B16','B17','B18','B19','B20', 'B21' #비오는날 점심
Rain_D = 'B22','B23','B24','B25','B26','B27','B28','B29','B30' #비오는날 저녁
Snow_B = 'E3','E4','E5','E6','E7','E8','E9','E10','E11' #눈오는날 아침
Snow_R = 'E12','E13','E14','E15','E16','E17','E18','E19','E20', 'E21' #눈오는날 점심
Snow_D = 'E22','E23','E24','E25','E26','E27','E28','E29','E30' #눈오는날 저녁
Clear_B = 'K3','K4','K5','K6','K7','K8','K9','K10','K11' #맑은날 아침
Clear_R = 'K12','K13','K14','K15','K16','K17','K18','K19','K20', 'K21' #맑은날 점심
Clear_D = 'K22','K23','K24','K25','K26','K27','K28','K29','K30' #맑은날 저녁
Clouds_B = 'H3','H4','H5','H6','H7','H8','H9','H10','H11' #흐린날 아침
Clouds_R = 'H12','H13','H14','H15','H16','H17','H18','H19','H20', 'H21' #흐린날 점심
Clouds_D = 'H22','H23','H24','H25','H26','H27','H28','H29','H30' #흐린날 저녁
Mist_B = Rain_B + Snow_B + Clear_B + Clouds_B # 안개낀날은 맑음,흐림,비,눈 중 랜덤
Mist_R = Rain_B + Snow_B + Clear_B + Clouds_B
Mist_D = Rain_B + Snow_B + Clear_B + Clouds_B
Haze_B = Mist_B # Haze와 Mist 모두 안개
Haze_R = Mist_R
Haze_D = Mist_D
Fog_B = Mist_B # Haze와 Mist 모두 안개
Fog_R = Mist_R
Fog_D = Mist_D
try :
city = str(self.lineEdit.text()) # city에 lineEdit에 써진 텍스트 입력
obs = owm.weather_at_place(city) #날씨를 검색할 장소를 city로 설정
w = obs.get_weather() # 날씨 정보 가져오기
l = obs.get_location() # 위치 정보 가져오기
if w.get_status() == 'Snow' and datetime.today().strftime("%H")<str(12): # 눈오는날 아침
Snow_B = random.choice(Snow_B)
food = ws[Snow_B].value
self.image.setText(food)
pixmap = QPixmap('아이콘.png')
self.label.setPixmap(pixmap)
if w.get_status() == 'Snow' and datetime.today().strftime("%H")<str(18) and datetime.today().strftime("%H")>str(12): #눈오는날 점심
Snow_R = random.choice(Snow_R)
food = ws[Snow_R].value
self.image.setText(food)
pixmap = QPixmap('아이콘.png')
self.label.setPixmap(pixmap)
if w.get_status() == 'Snow' and datetime.today().strftime("%H")<str(24) and datetime.today().strftime("%H")>str(18): #눈오는날 저녁
Snow_D = random.choice(Snow_D)
food = ws[Snow_D].value
self.image.setText(food)
pixmap = QPixmap('아이콘.png')
self.label.setPixmap(pixmap)
if w.get_status() == 'Rain'and datetime.today().strftime("%H")<str(12): #비오는날 아침
Rain_B = random.choice(Rain_B)
food = ws[Rain_B].value
self.image.setText(food)
pixmap = QPixmap('아이콘.png')
self.label.setPixmap(pixmap)
if w.get_status() == 'Rain'and datetime.today().strftime("%H")<str(18) and datetime.today().strftime("%H")>str(12): #비오는날 점심
Rain_R = random.choice(Rain_R)
food = ws[Rain_R].value
self.image.setText(food)
pixmap = QPixmap('아이콘.png')
self.label.setPixmap(pixmap)
if w.get_status() == 'Rain'and datetime.today().strftime("%H")<str(24) and datetime.today().strftime("%H")>str(18): #비오는날 저녁
Rain_D = random.choice(Rain_D)
food = ws[Rain_D].value
self.image.setText(food)
pixmap = QPixmap('아이콘.png')
self.label.setPixmap(pixmap)
if w.get_status() == 'Clear'and datetime.today().strftime("%H")<str(12): # 맑은날 아침
Clear_B = random.choice(Clear_B)
food = ws[Clear_B].value
self.image.setText(food)
pixmap = QPixmap('아이콘.png')
self.label.setPixmap(pixmap)
if w.get_status() == 'Clear'and datetime.today().strftime("%H")<str(18) and datetime.today().strftime("%H")>str(12): #맑은날 점심
Clear_R = random.choice(Clear_R)
food = ws[Clear_R].value
self.image.setText(food)
pixmap = QPixmap('아이콘.png')
self.label.setPixmap(pixmap)
if w.get_status() == 'Clear'and datetime.today().strftime("%H")<str(24) and datetime.today().strftime("%H")>str(18): #맑은날 저녁
food = ws[Clear_D].value
self.image.setText(food)
pixmap = QPixmap('아이콘.png')
self.label.setPixmap(pixmap)
if w.get_status() == 'Clouds'and datetime.today().strftime("%H")<str(12): #흐린날 아침
Clouds_B = random.choice(Clouds_B)
food = ws[Clouds_B].value
self.image.setText(food)
pixmap = QPixmap('아이콘.png')
self.label.setPixmap(pixmap)
if w.get_status() == 'Clouds'and datetime.today().strftime("%H")<str(18) and datetime.today().strftime("%H")>str(12): #흐린날 점심
Clouds_R = random.choice(Clouds_R)
food = ws[Clouds_R].value
self.image.setText(food)
pixmap = QPixmap('아이콘.png')
self.label.setPixmap(pixmap)
if w.get_status() == 'Clouds'and datetime.today().strftime("%H")<str(24) and datetime.today().strftime("%H")>str(18): #흐린날 저녁
Clouds_D = random.choice(Clouds_D)
food = ws[Clouds_D].value
self.image.setText(food)
pixmap = QPixmap('아이콘.png')
self.label.setPixmap(pixmap)
if w.get_status() == 'Mist' and datetime.today().strftime("%H")<str(12): #안개낀날 아침
Mist_B = random.choice(Mist_B)
food = ws[Mist_B].value
self.image.setText(food)
pixmap = QPixmap('아이콘.png')
self.label.setPixmap(pixmap)
if w.get_status() == 'Mist' and datetime.today().strftime("%H")<str(18) and datetime.today().strftime("%H")>str(12): #안개낀날 점심
Mist_R = random.choice(Mist_R)
food = ws[Mist_R].value
self.image.setText(food)
pixmap = QPixmap('아이콘.png')
self.label.setPixmap(pixmap)
if w.get_status() == 'Mist' and datetime.today().strftime("%H")<str(24) and datetime.today().strftime("%H")>str(18): #안개낀날 저녁
Mist_D = random.choice(Mist_D)
food = ws[Mist_D].value
self.image.setText(food)
pixmap = QPixmap('아이콘.png')
self.label.setPixmap(pixmap)
if w.get_status() == 'Haze' and datetime.today().strftime("%H")<str(12): #안개낀날 아침
Haze_B = random.choice(Haze_B)
food = ws[Haze_B].value
self.image.setText(food)
pixmap = QPixmap('아이콘.png')
self.label.setPixmap(pixmap)
if w.get_status() == 'Haze' and datetime.today().strftime("%H")<str(18) and datetime.today().strftime("%H")>str(12): #안개낀날 점심
Haze_R = random.choice(Haze_R)
food = ws[Haze_R].value
self.image.setText(food)
pixmap = QPixmap('아이콘.png')
self.label.setPixmap(pixmap)
if w.get_status() == 'Haze' and datetime.today().strftime("%H")<str(24) and datetime.today().strftime("%H")>str(18): #안개낀날 저녁
Haze_D = random.choice(Haze_D)
food = ws[Haze_D].value
self.image.setText(food)
pixmap = QPixmap('아이콘.png')
self.label.setPixmap(pixmap)
if w.get_status() == 'Fog' and datetime.today().strftime("%H")<str(12): #안개낀날 아침
Fog_B = random.choice(Fog_B)
food = ws[Fog_B].value
self.image.setText(food)
pixmap = QPixmap('아이콘.png')
self.label.setPixmap(pixmap)
if w.get_status() == 'Fog' and datetime.today().strftime("%H")<str(18) and datetime.today().strftime("%H")>str(12): #안개낀날 점심
Fog_R = random.choice(Fog_R)
food = ws[Fog_R].value
self.image.setText(food)
pixmap = QPixmap('아이콘.png')
self.label.setPixmap(pixmap)
if w.get_status() == 'Fog' and datetime.today().strftime("%H")<str(24) and datetime.today().strftime("%H")>str(18): #안개낀날 저녁
Fog_D = random.choice(Fog_D)
food = ws[Fog_D].value
self.image.setText(food)
pixmap = QPixmap('아이콘.png')
self.label.setPixmap(pixmap)
global search #search를 전역변수로 선언하고 search에 음식이름을 넣어줌, 그후 음식 이름 별로 분류
search = food
if search == "도넛" or search == "바게뜨" or search == "토스트" or search == "베이글" or search == "모닝빵" or search == "샌드위치" or search == "머핀" or search == "파이":
search = "베이커리"
if search == "동지죽" or search == "호박죽":
search = "죽"
if search == "유부초밥" or search == "롤" or search == "초밥" :
search = "초밥"
if search == "떡튀순" or search == "어묵":
search = "분식"
if search == "돼지주물럭" or search == "제육볶음" or search == "오삼불고기":
search = "백반"
if search == "돌솥비빔밥":
search = "비빔밥"
if search == "닭강정" or search == "치킨&맥주":
search = "치킨"
if search == "짬뽕" or search == "짜장" or search == "칠리새우" or search == "깐풍기" or search == "쟁반짜장" or search == "탕수육":
search = "중국집"
if search == "갈매기살" or search == "삼겹살":
search = "고기"
if search == "쭈꾸미철판볶음":
search = "쭈꾸미"
if search == "낚지볶음":
search = "낚지"
if search == "족발&보쌈":
search = "족발"
if search == "고르곤졸라":
search = "레스토랑"
if search == "튀김&맥주":
search = "호프"
if search == "김치찌개" or search == "된장찌개":
search = "찌개"
if search == "삶은달걀"or search == "오꼬노미야끼"or search == "과일" or search == "붕어빵" or search == "호떡" or search == "타코야끼" or search == "와플" or search == "핫도그" or search == "고구마" or search == "호빵" or search == "미역국" or search == "떡국" or search == "스프" or search == "약밥" or search == "씨리얼" or search == "에그스크램블&베이컨" or search == "주먹밥" or search == "핫케익" or search == "견과류" or search == "과일주스" or search == "오믈렛" or search == "간장계란밥" or search == "샐러드" or search == "볶음밥":
search = "마트"
if search == "홍합탕":
search = "홍합"
if search == "막걸리&전":
search = "막걸리"
except: # 오류 발생시 처리
print('순서 오류')
self.image.setText("지역을 입력해 주세요.")
pixmap = QPixmap('Question.png')
self.label.setPixmap(pixmap)
def btn2_clicked(self):
try :
url = "https://www.google.co.kr/maps/search/" + search
webbrowser.open(url)
except: #오류 발생시 처리
print('search 오류')
self.image.move(35, 200) # QLabel 위치 설정
self.image.resize(240, 100) # QLabel 크기 설정
self.label.move(75, 70) # QLabel 위치 설정
self.label.resize(150, 150) # QLabel 크기 설정
self.image.setText('추천메뉴를 먼저 검색해주세요.')
pixmap = QPixmap('Question.png')
self.label.setPixmap(pixmap)
def lineEditPressed(self): #lineEditPressed 선언
self.label.move(60, 20) # QLabel 위치 설정
self.label.resize(180, 200) # QLabel 크기 설정
self.image.resize(100,100) # QLabel 크기 설정
self.image.move(105, 170) # QLabel 위치 설정
try :
city = str(self.lineEdit.text()) # city에 lineEdit에 써진 텍스트 입력
obs = owm.weather_at_place(city) #날씨를 검색할 장소를 city로 설정
w = obs.get_weather() # 날씨 정보 가져오기
l = obs.get_location() # 위치 정보 가져오기
self.label.setText(l.get_name()+'\n최고 기온 : ' + str(w.get_temperature(unit='celsius')['temp_max']) +
'˚C' + '\n최저 기온 : '+ str(w.get_temperature(unit='celsius')['temp_min']) + '˚C'
+ '\n현재 기온 : ' + str(w.get_temperature(unit='celsius')['temp']) + '˚C' +
'\n현재 날씨 : ' + w.get_status()) # QLabel에 날씨정보 출력
if w.get_status() == 'Snow': # 눈오는 날씨면 눈 그림 출력
pixmap = QPixmap('Snow.png')
self.image.setPixmap(pixmap)
if w.get_status() == 'Rain': # 비오는 날씨면 비 그림 출력
pixmap = QPixmap('Rain.png')
self.image.setPixmap(pixmap)
if w.get_status() == 'Clear': # 화창한 날씨면 해 그림 출력
pixmap = QPixmap('Clear.png')
self.image.setPixmap(pixmap)
if w.get_status() == 'Clouds': # 구름낀 날씨면 구름 그림 출력
pixmap = QPixmap('Clouds.png')
self.image.setPixmap(pixmap)
if w.get_status() == 'Mist': # 안개낀 날씨면 안개 그림 출력
pixmap = QPixmap('Mist.png')
self.image.setPixmap(pixmap)
if w.get_status() == 'Haze': # 안개낀 날씨면 안개 그림 출력
pixmap = QPixmap('Mist.png')
self.image.setPixmap(pixmap)
if w.get_status() == 'Fog': # 안개낀 날씨면 안개 그림 출력
pixmap = QPixmap('Mist.png')
self.image.setPixmap(pixmap)
except: #오류 발생시 처리
print('지역명 오류')
self.image.move(45, 200) # QLabel 위치 설정
self.image.resize(220, 100) # QLabel 크기 설정
self.label.move(75, 70) # QLabel 위치 설정
self.label.resize(150, 150) # QLabel 크기 설정
self.image.setText('지역명이 틀렸습니다.')
pixmap = QPixmap('Question.png')
self.label.setPixmap(pixmap)
if __name__ == "__main__": # 창 출력
app = QApplication(sys.argv)
mywindow = MyWindow()
mywindow.show()
app.exec_()
|
<gh_stars>1-10
#coding=utf-8
import base64
import utils
import json
import hashlib
import urllib, urllib2
import re
import os
import tempfile
import random
import xml.dom.minidom as minidom
from cookielib import MozillaCookieJar
import requests
from bs4 import BeautifulSoup
from bilibili_config import *
class Bilibili():
def __init__(self, appkey = APPKEY, appsecret = APPSECRET):
self.appkey = appkey
self.appsecret = appsecret
self.is_login = False
cookie_path = os.path.dirname(os.path.abspath(__file__)) + '/.cookie'
self.cj = MozillaCookieJar(cookie_path)
if os.path.isfile(cookie_path):
self.cj.load()
if requests.utils.dict_from_cookiejar(self.cj).has_key('DedeUserID'):
self.is_login = True
self.mid = str(requests.utils.dict_from_cookiejar(self.cj)['DedeUserID'])
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
urllib2.install_opener(opener)
def get_captcha(self, path = None):
if not requests.utils.dict_from_cookiejar(self.cj).has_key('sid'):
utils.get_page_content(LOGIN_CAPTCHA_URL.format(random.random()),
headers = {'Referer':'https://passport.bilibili.com/login'})
result = utils.get_page_content(LOGIN_CAPTCHA_URL.format(random.random()),
headers = {'Referer':'https://passport.bilibili.com/login'})
if path == None:
path = tempfile.gettempdir() + '/captcha.jpg'
with open(path, 'wb') as f:
f.write(result)
return path
def get_encryped_pwd(self, pwd):
import rsa
result = json.loads(utils.get_page_content(LOGIN_HASH_URL.format(random.random()),
headers={'Referer':'https://passport.bilibili.com/login'}))
pwd = result['hash'] + pwd
key = result['key']
pub_key = rsa.PublicKey.load_pkcs1_openssl_pem(key)
pwd = rsa.encrypt(pwd.encode('utf-8'), pub_key)
pwd = base64.b64encode(pwd)
pwd = urllib.quote(pwd)
return pwd
def api_sign(self, params):
params['appkey']=self.appkey
data = ""
keys = params.keys()
keys.sort()
for key in keys:
if data != "":
data += "&"
value = params[key]
if type(value) == int:
value = str(value)
data += key + "=" + str(urllib.quote(value))
if self.appsecret == None:
return data
m = hashlib.md5()
m.update(data + self.appsecret)
return data + '&sign=' + m.hexdigest()
def get_category_from_web_page(self):
category_dict = {'0': {'title': u'全部', 'url': HOME_URL, 'subs':[]}}
node = category_dict['0']
url = node['url']
result = BeautifulSoup(utils.get_page_content(url), "html.parser").findAll('li', {'class': 'm-i'})
for item in result:
if len(item['class']) != 1:
continue
tid = item['data-tid']
title = item.em.contents[0]
url = 'http:' + item.a['href']
category_dict[tid] = {'title': title, 'url': url, 'subs':[]}
node['subs'].append(tid)
#Fix video and movie
if '11' not in category_dict['0']['subs']:
category_dict['0']['subs'].append('11')
if '23' not in category_dict['0']['subs']:
category_dict['0']['subs'].append('23')
category_dict['11'] = {'title': u'电视剧', 'url': 'http://bangumi.bilibili.com/tv/', 'subs': []}
category_dict['23'] = {'title': u'电影', 'url': 'http://bangumi.bilibili.com/movie/', 'subs': []}
for sub in category_dict['0']['subs']:
node = category_dict[sub]
url = node['url']
result = BeautifulSoup(utils.get_page_content(url), "html.parser").select('ul.n_num li')
for item in result[1:]:
if not item.has_attr('tid'):
continue
if not hasattr(item, 'a'):
continue
if item.has_attr('class'):
continue
tid = item['tid']
title = item.a.contents[0]
if item.a['href'][:2] == '//':
url = 'http:' + item.a['href']
else:
url = HOME_URL + item.a['href']
category_dict[tid] = {'title': title, 'url': url, 'subs':[]}
node['subs'].append(tid)
return category_dict
def get_category(self, tid = '0'):
items = [{tid: {'title': '全部', 'url': CATEGORY[tid]['url'], 'subs': []}}]
for sub in CATEGORY[tid]['subs']:
items.append({sub: CATEGORY[sub]})
return items
def get_category_name(self, tid):
return CATEGORY[str(tid)]['title']
def get_order(self):
return ORDER
def get_category_list(self, tid = 0, order = 'default', days = 30, page = 1, pagesize = 10):
params = {'tid': tid, 'order': order, 'days': days, 'page': page, 'pagesize': pagesize}
url = LIST_URL.format(self.api_sign(params))
result = json.loads(utils.get_page_content(url))
results = []
for i in range(pagesize):
if result['list'].has_key(str(i)):
results.append(result['list'][str(i)])
else:
break
return results, result['pages']
def get_my_info(self):
if self.is_login == False:
return []
result = json.loads(utils.get_page_content(MY_INFO_URL))
return result['data']
def get_bangumi_chase(self, page = 1, pagesize = 10):
if self.is_login == False:
return []
url = BANGUMI_CHASE_URL.format(self.mid, page, pagesize)
result = json.loads(utils.get_page_content(url))
return result['data']['result'], result['data']['pages']
def get_bangumi_detail(self, season_id):
url = BANGUMI_SEASON_URL.format(season_id)
result = utils.get_page_content(url)
if result[0] != '{':
start = result.find('(') + 1
end = result.find(');')
result = result[start:end]
result = json.loads(result)
return result['result']
def get_history(self, page = 1, pagesize = 10):
if self.is_login == False:
return []
url = HISTORY_URL.format(page, pagesize)
result = json.loads(utils.get_page_content(url))
if len(result['data']) >= int(pagesize):
total_page = int(page) + 1
else:
total_page = int(page)
return result['data'], total_page
def get_dynamic(self, page = 1, pagesize = 10):
if self.is_login == False:
return []
url = DYNAMIC_URL.format(pagesize, page)
result = json.loads(utils.get_page_content(url))
total_page = int((result['data']['page']['count'] + pagesize - 1) / pagesize)
return result['data']['feeds'], total_page
def get_attention(self, page = 1, pagesize = 10):
if self.is_login == False:
return []
url = ATTENTION_URL.format(self.mid, page, pagesize)
result = json.loads(utils.get_page_content(url))
return result['data']['list'], result['data']['pages']
def get_attention_video(self, mid, tid = 0, page = 1, pagesize = 10):
if self.is_login == False:
return []
url = ATTENTION_VIDEO_URL.format(mid, page, pagesize, tid)
result = json.loads(utils.get_page_content(url))
return result['data'], result['data']['pages']
def get_attention_channel(self, mid):
if self.is_login == False:
return []
url = ATTENTION_CHANNEL_URL.format(mid)
result = json.loads(utils.get_page_content(url))
return result['data']['list']
def get_attention_channel_list(self, mid, cid, page = 1, pagesize = 10):
if self.is_login == False:
return []
url = ATTENTION_CHANNEL_LIST_URL.format(mid, cid, page, pagesize)
result = json.loads(utils.get_page_content(url))
return result['data']['list'], result['data']['total']
def get_fav_box(self):
if self.is_login == False:
return []
url = FAV_BOX_URL.format(self.mid)
result = json.loads(utils.get_page_content(url))
return result['data']['list']
def get_fav(self, fav_box, page = 1, pagesize = 10):
if self.is_login == False:
return []
url = FAV_URL.format(self.mid, page, pagesize, fav_box)
result = json.loads(utils.get_page_content(url))
return result['data']['vlist'], result['data']['pages']
def login(self, userid, pwd, captcha):
#utils.get_page_content('http://www.bilibili.com')
if self.is_login == True:
return True, ''
pwd = <PASSWORD>.get_<PASSWORD>(pwd)
data = 'cType=2&vcType=1&captcha={}&user={}&pwd={}&keep=true&gourl=http://www.bilibili.com/'.format(captcha, userid, pwd)
result = utils.get_page_content(LOGIN_URL, data,
{'Origin':'https://passport.bilibili.com',
'Referer':'https://passport.bilibili.com/login'})
if not requests.utils.dict_from_cookiejar(self.cj).has_key('DedeUserID'):
return False, LOGIN_ERROR_MAP[json.loads(result)['code']]
self.cj.save()
self.is_login = True
self.mid = str(requests.utils.dict_from_cookiejar(self.cj)['DedeUserID'])
return True, ''
def logout(self):
self.cj.clear()
self.cj.save()
self.is_login = False
def get_av_list_detail(self, aid, page = 1, fav = 0, pagesize = 10):
params = {'id': aid, 'page': page}
if fav != 0:
params['fav'] = fav
url = VIEW_URL.format(self.api_sign(params))
result = json.loads(utils.get_page_content(url))
results = [result]
if (int(page) < result['pages']) and (pagesize > 1):
results += self.get_av_list_detail(aid, int(page) + 1, fav, pagesize = pagesize - 1)[0]
return results, result['pages']
def get_av_list(self, aid):
url = AV_URL.format(aid)
result = json.loads(utils.get_page_content(url))
return result
def get_video_urls(self, cid):
m = hashlib.md5()
m.update(INTERFACE_PARAMS.format(str(cid), SECRETKEY_MINILOADER))
url = INTERFACE_URL.format(str(cid), m.hexdigest())
doc = minidom.parseString(utils.get_page_content(url))
urls = [durl.getElementsByTagName('url')[0].firstChild.nodeValue for durl in doc.getElementsByTagName('durl')]
urls = [url
if not re.match(r'.*\.qqvideo\.tc\.qq\.<EMAIL>', url)
else re.sub(r'.*\.qqvideo\.tc\.qq\.com', 'http://vsrc.store.qq.com', url)
for url in urls]
return urls
def add_history(self, aid, cid):
url = ADD_HISTORY_URL.format(str(cid), str(aid))
utils.get_page_content(url)
if __name__ == '__main__':
b = Bilibili()
#if b.is_login == False:
# b.get_captcha('')
# captcha = raw_input('Captcha: ')
# print b.login(u'<EMAIL>', u'123456', captcha)
#print b.get_fav(49890104)
#print b.get_av_list(8163111)
#print b.add_history(8163111, 13425238)
#print b.get_video_urls(12821893)
#print b.get_category_list('32')
#print b.get_dynamic('2')[1]
#print b.get_category()
#print b.get_bangumi_chase()
#print b.get_attention()
#print b.get_attention_video('7349', 0, 1, 1)
#print b.get_attention_channel('7349')
#print json.dumps(b.get_bangumi_detail('5800'), indent=4, ensure_ascii=False)
#print b.get_bangumi_detail('5800')
#print b.get_history(1)
#with open('bilibili_config.py', 'a') as f:
# f.write('\nCATEGORY = ')
# f.write(json.dumps(b.get_category_from_web_page(), indent=4, ensure_ascii=False).encode('utf8'))
|
<filename>network/ds_transforms.py
import torch
import numpy as np
import torch.nn.functional as F
class ToTensor(object):
def __init__(self, hdf5=False):
self.hdf5 = hdf5
def __call__(self, sample):
if self.hdf5:
return torch.from_numpy(sample['image'][()]), torch.from_numpy(sample['label'][()]),\
torch.from_numpy(sample['teacher'][()])
else:
return torch.from_numpy(sample['image']), torch.from_numpy(sample['label']), \
torch.from_numpy(sample['teacher'])
class DataAugmentation(object):
def __call__(self, sample):
if torch.rand(1) < 0.1:
sample['image'] = sample['image'][::-1].copy()
sample['label'] = sample['label'][::-1].copy()
sample['teacher'] = sample['teacher'][::-1].copy()
r = 0.5
delta = np.random.uniform(-r, r)
sample['image'] = sample['image'] + delta
r1 = 0.75
r2 = 1.25
contrast_factor = np.random.uniform(r1, r2)
m = np.mean(sample['image'])
sample['image'] = (sample['image'] - m) * contrast_factor + m
return sample
class RandomCropCollate(object):
def __init__(self, patch_size):
self.dim_x_l, self.dim_x_h = (np.floor(patch_size[0] / 2).astype(np.int32),
np.ceil(patch_size[0] / 2).astype(np.int32))
self.dim_y_l, self.dim_y_h = (np.floor(patch_size[1] / 2).astype(np.int32),
np.ceil(patch_size[1] / 2).astype(np.int32))
self.dim_z_l, self.dim_z_h = (np.floor(patch_size[2] / 2).astype(np.int32),
np.ceil(patch_size[2] / 2).astype(np.int32))
def collate(self, batch, device, multi_class=False):
cropped_image = []
cropped_label = []
cropped_teacher = []
for b in batch:
current_x, current_y, current_teacher = b[0].to(device=device, non_blocking=True),\
b[1].to(device=device, non_blocking=True),\
b[2].to(device=device, non_blocking=True)
idx = torch.where(current_y > 0)
idx = torch.stack(idx, dim=1)
idx = torch.flatten(idx[torch.randint(low=0, high=idx.shape[0], size=(1,))])
current_shape = current_y.shape
if idx[2] - self.dim_x_l < 0:
pad_value_left = abs(idx[2] - self.dim_x_l)
else:
pad_value_left = 0
if idx[2] + self.dim_x_h > current_shape[2]:
pad_value_right = abs(idx[2] + self.dim_x_h - current_shape[2])
else:
pad_value_right = 0
if idx[3] - self.dim_y_l < 0:
pad_value_bottom = abs(idx[3] - self.dim_y_l)
else:
pad_value_bottom = 0
if idx[3] + self.dim_y_h > current_shape[3]:
pad_value_top = abs(idx[3] + self.dim_y_h - current_shape[3])
else:
pad_value_top = 0
if idx[1] - self.dim_z_l < 0:
pad_value_back = abs(idx[1] - self.dim_z_l)
else:
pad_value_back = 0
if idx[1] + self.dim_z_h > current_shape[1]:
pad_value_forward = abs(idx[1] + self.dim_z_h - current_shape[1])
else:
pad_value_forward = 0
padding = [pad_value_bottom, pad_value_top,
pad_value_left, pad_value_right,
pad_value_back, pad_value_forward, 0, 0]
if sum(padding) > 0:
current_x = F.pad(current_x, padding, "constant")
current_y = F.pad(current_y, padding, "constant")
current_teacher = F.pad(current_teacher, padding, "constant")
idx = idx + torch.tensor([0, pad_value_back, pad_value_left, pad_value_bottom], device=device)
current_x = current_x[:, idx[1] - self.dim_z_l:idx[1] + self.dim_z_h,
idx[2] - self.dim_x_l:idx[2] + self.dim_x_h,
idx[3] - self.dim_y_l:idx[3] + self.dim_y_h]
current_y = current_y[:, idx[1] - self.dim_z_l:idx[1] + self.dim_z_h,
idx[2] - self.dim_x_l:idx[2] + self.dim_x_h,
idx[3] - self.dim_y_l:idx[3] + self.dim_y_h]
if multi_class:
current_teacher = current_teacher[0, :, idx[1] - self.dim_z_l:idx[1] + self.dim_z_h,
idx[2] - self.dim_x_l:idx[2] + self.dim_x_h,
idx[3] - self.dim_y_l:idx[3] + self.dim_y_h]
else:
current_teacher = current_teacher[:, idx[1] - self.dim_z_l:idx[1] + self.dim_z_h,
idx[2] - self.dim_x_l:idx[2] + self.dim_x_h,
idx[3] - self.dim_y_l:idx[3] + self.dim_y_h]
cropped_image.append(current_x)
cropped_label.append(current_y)
cropped_teacher.append(current_teacher)
return torch.stack(cropped_image), torch.stack(cropped_label), torch.stack(cropped_teacher)
|
import sid
# import ssim
import msssim
import dataloader
import paddle
import paddle.nn as nn
import os
train_path = 'data/train/moire_train_dataset'
# print(train_path)
train_data = dataloader.TrainData(train_path, patch_size=512, file_type='*', hr_dir='gts', lr_dir='images', scale = 1, start=100, end=999999)
train_loader = paddle.io.DataLoader(train_data, batch_size=2, shuffle=True, num_workers=0)
val_path = train_path
val_data = dataloader.ValData(val_path, file_type='*', hr_dir='gts', lr_dir='images', start=0, end=100)
val_loader = paddle.io.DataLoader(val_data, batch_size=1, shuffle=False, num_workers=0)
def rot90(input, k, dims):
l = len(input.shape)
new_dims = list(range(l))
new_dims[dims[0]] = dims[1]
new_dims[dims[1]] = dims[0]
flip_dim = min(dims)
for i in range(k):
input = paddle.transpose(input, new_dims)
input = paddle.flip(input, [flip_dim])
return input
if paddle.is_compiled_with_cuda():
paddle.set_device('gpu:0')
else:
paddle.set_device('cpu')
save_path = 'ckpts/sid'
net = sid.UNetSeeInDark(in_channels=3, out_channels=3)
os.makedirs(save_path, exist_ok=True)
load_pretrain=0
learning_rate = 1e-4
if load_pretrain:
net_path = 'model_best.pdiparams'
print('load ' + net_path)
weights = paddle.load(os.path.join(net_path))
net.load_dict(weights)
optimizer = paddle.optimizer.Adam(learning_rate=learning_rate, parameters=net.parameters(), weight_decay=0.0)
mse = nn.MSELoss()
l1 = nn.L1Loss()
ms_ssim_loser = msssim.MS_SSIMLoss(data_range=1.)#.cuda() #####################################
iters = 0
best_score = 0
best_psnr = 0
best_ssim = 0
for epoch in range(2000):
train_loss = 0
net.train()
if epoch > 1000:
for param_group in optimizer.param_groups:
param_group['lr'] = 1e-5
for i, (lr, hr, lrp, hrp) in enumerate(train_loader):
# print(lr.shape, hr.shape)
res = net(lr)
# loss = l1(res*255.0, hr*255.0) #l1(res*255.0, hr*255.0)
loss = -(0.5 * paddle.log10(65025.0 / mse(res*255.0, hr*255.0)) * 10 /100 + 0.5 * ms_ssim_loser(res, hr).mean())
optimizer.clear_grad()
loss.backward()
optimizer.step()
# scheduler.step()
train_loss += loss.item()
iters += 1
# current_lr = optimizer.param_groups[0]['lr']
current_lr = optimizer.get_lr()
if iters % 100 ==0:
print('epoch:', epoch, 'iter:', iters, 'loss:', train_loss/(i+1), 'lr:', current_lr, 'net:', save_path)
paddle.save(net.state_dict(), '{}/model_latest.pdiparams'.format(save_path))
if epoch % 1 == 0:
val_psnr = 0
val_ms_ssim = 0
val_score = 0
net.eval()
with paddle.no_grad():
for i, (lr, hr, lrp, hrp) in enumerate(val_loader):
# lr2 = paddle.transpose(lr, (0,2,3,1)).squeeze().cpu().numpy()
# lr2 = lr.permute(0,2,3,1).squeeze().cpu().numpy()
if os.path.basename(save_path) not in ['rcan']:
_,_,h,w = lr.shape
rh, rw = h, w
div = 32
pad_h = div - h%div if h%div != 0 else 0
pad_w = div - w%div if w%div != 0 else 0
m = nn.Pad2D((0,pad_w,0, pad_h), mode='reflect')
lr = m(lr)
# hr = m(hr)
# print(lrp, hrp)
# print(lr.shape, hr.shape)
res = net(lr)
# res += rot90(net(rot90(lr,1,[2,3])),3,[2,3])
# res += rot90(net(rot90(lr,2,[2,3])),2,[2,3])
# res += rot90(net(rot90(lr,3,[2,3])),1,[2,3])
# res += net(lr.flip([3])).flip([3])
# res += rot90(net(rot90(lr.flip([3]),1,[2,3])),3,[2,3]).flip([3])
# res += rot90(net(rot90(lr.flip([3]),2,[2,3])),2,[2,3]).flip([3])
# res += rot90(net(rot90(lr.flip([3]),3,[2,3])),1,[2,3]).flip([3])
# res = res / 8
res = res[:,:,:rh,:rw]
# res = paddle.where(res > 1, 1, res)
# res = paddle.where(res < 0, 0, res)
# print(res)
loss = mse((res * 255.0).round(), hr*255.0)
psnr = paddle.log10(65025.0 / loss) * 10
ms_ssim = ms_ssim_loser(res, hr).mean()
psnr = psnr.cpu().numpy()
ms_ssim = ms_ssim.cpu().numpy()
score = psnr / 100 * 0.5 + ms_ssim * 0.5
val_psnr += psnr
val_ms_ssim += ms_ssim
val_score += score
# if i % 20 ==0:
# print(lrp, hrp)
# print('i:', i, 'psnr:', psnr, 'ms_ssim:', ms_ssim, 'score:', score)
# if i==1:
# break
ave_psnr = val_psnr/(i+1)
ave_ms_ssim = val_ms_ssim/(i+1)
ave_score = val_score/(i+1)
if ave_score > best_score:
best_score = ave_score
# print(save_path, ave_psnr, ave_ms_ssim[0], ave_score[0])
# paddle.save(net.state_dict(), '{}/model_{:.2f}_{:.4f}_{:.4f}.pdiparams'.format(save_path, ave_psnr[0], ave_ms_ssim[0], ave_score[0]))
paddle.save(net.state_dict(), '{}/model_best.pdiparams'.format(save_path))
# if ave_psnr > best_psnr:
# best_psnr = ave_psnr
# paddle.save(net.state_dict(), '{}/model_{:.2f}_{:.4f}_{:.4f}.pdiparams'.format(save_path, ave_psnr[0], ave_ms_ssim[0], ave_score[0]))
# if ave_ms_ssim > best_ssim:
# best_ssim = ave_ms_ssim
# paddle.save(net.state_dict(), '{}/model_{:.2f}_{:.4f}_{:.4f}.pdiparams'.format(save_path, ave_psnr[0], ave_ms_ssim[0], ave_score[0]))
print('epoch:', epoch, 'iter:', iters, 'ave_psnr:', ave_psnr[0], 'ave_ms_ssim:', ave_ms_ssim[0], 'ave_score:', ave_score[0], 'best_score:', best_score, 'best_psnr:', best_psnr, 'best_ssim:', best_ssim)
|
<gh_stars>1-10
from tool.runners.python import SubmissionPy
def parse(s):
depth = 0
res = [] # List of []
for char in s:
if char == "[":
depth += 1
continue
if char == "]":
depth -= 1
continue
if char == ",":
continue
res.append([int(char), depth])
return res
def explode(s):
current_depth = -1
has_exploded = False
for i, (value, depth) in enumerate(s):
if current_depth != depth:
current_depth = depth
continue
# i = right, i-1 = left
if depth > 4:
if i - 2 >= 0:
s[i - 2][0] += s[i - 1][0]
if i + 1 < len(s):
s[i + 1][0] += s[i][0]
should_merge_to_left = i - 2 >= 0 and s[i - 2][1] == depth - 1
if should_merge_to_left:
s[i - 1] = [0, depth - 1]
s.remove(s[i])
else:
s[i] = [0, depth - 1]
s.remove(s[i - 1])
has_exploded = True
break
return has_exploded
def split(s):
has_split = False
for i, (value, depth) in enumerate(s):
if value >= 10:
left = value // 2
right = value - left
s[i] = [right, depth + 1]
s.insert(i, [left, depth + 1])
has_split = True
break
return has_split
def reduce(s):
while True:
if explode(s):
continue
if split(s):
continue
break
def add(s1, s2):
s = [[value, depth + 1] for value, depth in s1] + [[value, depth + 1]
for value, depth in s2]
reduce(s)
return s
def get_magnitude(s):
if len(s) == 1:
return s[0][0]
# find the deepest node
index = 0
deepest = -1
for i, (value, depth) in enumerate(s):
if depth > deepest:
deepest = depth
index = i
s[index] = [3 * s[index][0] + 2 * s[index + 1][0], deepest - 1]
s.remove(s[index + 1])
return get_magnitude(s)
class ThChSubmission(SubmissionPy):
def run(self, s):
"""
:param s: input in string format
:return: solution flag
"""
nb = None
for line in s.splitlines():
if nb is None:
nb = parse(line)
else:
nb = add(nb, parse(line))
return get_magnitude(nb)
def test_th_ch():
"""
Run `python -m pytest ./day-18/part-1/th-ch.py` to test the submission.
"""
assert (ThChSubmission().run("""
[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]
""".strip()) == 4140)
|
<gh_stars>10-100
import os
import numpy as np
import sys
import json
import pickle
sys.path.append('./')
import matplotlib
from .ResultMerge_multi_process import mergebypoly_multiprocess
from .dota_evaluation_task1 import do_eval
matplotlib.use('Agg')
wordname_15 = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle',
'large-vehicle', 'ship', 'tennis-court','basketball-court', 'storage-tank',
'soccer-ball-field', 'roundabout', 'harbor', 'swimming-pool', 'helicopter']
def voc_eval(quad_predict):
txt_before_merge = os.path.join(quad_predict, '..', 'before_merge')
quad_json = '/home/yangfan/Pet-dev/data/DOTA/dota_800_200/val/dota_800_200_val_merge.json'
quad_json_result_to_txt(quad_json, quad_predict, txt_before_merge, wordname_15)
txt_after_merge = os.path.join(quad_predict, '..', 'after_merge')
mergebypoly_multiprocess(txt_before_merge, txt_after_merge)
det_path = os.path.join(txt_before_merge, '{:s}.txt')
anno_path = r'/home/yangfan/Pet-dev/data/DOTA-v1/data_ori/val/labelTxt-v1.0/{:s}.txt'
imageset_file = r'home/yangfan/Pet-dev/data/DOTA-v1/data_ori/val/labelTxt-v1.0'
do_eval(det_path, anno_path, imageset_file, wordname_15)
def rjson_result2txt(index_json, pred_file, txt_file_dir, categroy_list):
# img_list = os.listdir(pred_file_dir)
with open(index_json, 'r') as load_f:
index_list = json.load(load_f)["images"]
inex_dict = {}
for index in index_list:
# print(index["id"])
inex_dict[index["id"]] = index["file_name"][:-4]
with open(pred_file, 'r') as load_f:
pred_list = json.load(load_f) # ["images"]
multi_mkdir(txt_file_dir)
for category_index, category_name in enumerate(categroy_list):
if category_name == '__background__':
continue
txt_file_path = os.path.join(txt_file_dir, category_name + '.txt')
with open(txt_file_path, "w") as save_f:
for pred_index in pred_list:
if pred_index["category_id"] != category_index:
continue
# print(file_index)
score = pred_index["score"]
# if score < 0.005:
# continue
file_index = pred_index["image_id"]
rbox = pred_index["bbox"]
rbox[-1] = 180 - rbox[-1]
rbox = rbox2quad_convert(np.array([rbox]), False).tolist()
file_name = inex_dict[file_index].split('_')[0] + '__1__' + \
inex_dict[file_index].split('_')[1] + '___' + \
inex_dict[file_index].split('_')[2]
line = '{} {} {}'.format(file_name, score, rbox)
# line = '{} {} {}'.format(inex_dict[file_index], score, rbox)
line = line.replace('[', '').replace(',', '').replace(']', '')
save_f.writelines(line + '\n')
save_f.close()
# break
def rescale_quad_json_result_to_txt(index_json, pred_file, txt_file_dir, categroy_list, scale_factor):
with open(index_json, 'r') as load_f:
index_list = json.load(load_f)["images"]
inex_dict = {}
for index in index_list:
inex_dict[index["id"]] = index["file_name"][:-4]
with open(pred_file, 'r') as load_f:
pred_list = json.load(load_f)
print(len(pred_list))
multi_mkdir(txt_file_dir)
for category_index, category_name in enumerate(categroy_list):
if category_name == '__background__':
continue
txt_file_path = os.path.join(txt_file_dir, category_name + '.txt')
with open(txt_file_path, "w") as save_f:
for pred_index in pred_list:
if pred_index["category_id"] != category_index:
continue
score = pred_index["score"]
file_index = pred_index["image_id"]
quad = pred_index["bbox"]
quad = [point / scale_factor for point in quad]
file_name = inex_dict[file_index].split('_')[0] + '__1__' + \
inex_dict[file_index].split('_')[1] + '___' + \
inex_dict[file_index].split('_')[2]
line = '{} {} {}'.format(file_name, score, quad)
line = line.replace('[', '').replace(',', '').replace(']', '')
save_f.writelines(line + '\n')
save_f.close()
def quad_json_result_to_txt(index_json, pred_file, txt_file_dir, categroy_list):
with open(index_json, 'r') as load_f:
index_list = json.load(load_f)["images"]
inex_dict = {}
for index in index_list:
inex_dict[index["id"]] = index["file_name"][:-4]
with open(pred_file, 'r') as load_f:
pred_list = json.load(load_f)
print(len(pred_list))
multi_mkdir(txt_file_dir)
for category_index, category_name in enumerate(categroy_list):
if category_name == '__background__':
continue
txt_file_path = os.path.join(txt_file_dir, category_name + '.txt')
with open(txt_file_path, "w") as save_f:
for pred_index in pred_list:
if pred_index["category_id"] != category_index:
continue
score = pred_index["score"]
file_index = pred_index["image_id"]
quad = pred_index["bbox"][0:8] # .tolist()
file_name = inex_dict[file_index].split('_')[0] + '__1__' + \
inex_dict[file_index].split('_')[1] + '___' + \
inex_dict[file_index].split('_')[2]
line = '{} {} {}'.format(file_name, score, quad)
line = line.replace('[', '').replace(',', '').replace(']', '')
save_f.writelines(line + '\n')
save_f.close()
def bboxjson_result2txt(index_json, pred_file, txt_file_dir, categroy_list):
with open(index_json, 'r') as load_f:
index_list = json.load(load_f)["images"]
inex_dict = {}
for index in index_list:
inex_dict[index["id"]] = index["file_name"][:-4]
with open(pred_file, 'r') as load_f:
pred_list = json.load(load_f)
multi_mkdir(txt_file_dir)
for category_index, category_name in enumerate(categroy_list):
if category_name == '__background__':
continue
txt_file_path = os.path.join(txt_file_dir, 'Task1_' + category_name + '.txt')
with open(txt_file_path, "w") as save_f:
for pred_index in pred_list:
if pred_index["category_id"] != category_index:
continue
file_index = pred_index["image_id"]
# print(file_index)
bbox = pred_index["bbox"]
ploy = dots4ToRec8([bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]])
score = pred_index["score"]
line = '{} {} {}'.format(inex_dict[file_index], score, ploy)
line = line.replace('[', '').replace(',', '').replace(']', '').replace('(', '').replace(')', '')
save_f.writelines(line + '\n')
save_f.close()
# if __name__ == '__main__':
# txt_file = os.path.join('./', 'result/before_split')
#
# # val_json = 'data/DOTA-v1/dota_800_200/val/dota_800_200_val.json'
# # pred_file = '/home/yangfan/Pet-dev/ckpts/rcnn/DOTA_rotated/e2e_rotated_faster_rcnn_R-101-C4-2FC_1x/test/bbox.json'
# # json_result2txt(val_json, pred_file, txt_file, wordname_15)
# # pkl_result2txt(val_json, pred_file, txt_file, wordname_15)
# # result2category(pred_file, txt_file, wordname_15)
# # json_result2txt(val_json, json_file, txt_file, wordname_15)
#
# # quad_json = '/home/yangfan/Pet-dev/data/DOTA/dota_800_200/val/dota_800_200_val_merge.json'
# # quad_json = 'data/DOTA-v1/dota_800_200/val/dota_800_200_val_quad_order_all.json'
# # quad_json = '/home/yangfan/Pet-dev/data/DOTA/dota_1024_200/val/dota_1024_200_val_quad_order.json'
# quad_json = 'data/DOTA-v1/dota_800_200/val/dota_800_200_val_quad_order.json'
# quad_predict = '/home/yangfan/Pet-dev/ckpts/DOTA_rotated/five/e2e_hor_quad_grid_R-50-FPN-r2-1x/test/bbox.json'
# quad_json_result_to_txt(quad_json, quad_predict, txt_file, wordname_15)
# # scale_factor = 800/600
# # rescale_quad_json_result_to_txt(quad_json, quad_predict, txt_file, wordname_15, scale_factor)
#
# # hor_predict = ''
# # box_json_result_to_txt(quad_json, hor_predict, txt_file, wordname_15)
|
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
import copy
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestFrobenius_norm(TestCase):
def generate_single_data(self, min_d, max_d, shape, dtype):
input1 = np.random.uniform(min_d, max_d, shape).astype(dtype)
npu_input1 = torch.from_numpy(input1)
return npu_input1
def cpu_single_input_op_exec(self, input1):
output = torch.frobenius_norm(input1)
output = output.numpy()
return output
def cpu_op_exec(self, input1, axis, keep_dim):
output = torch.frobenius_norm(input1, axis, keep_dim)
# output = torch.fmod(input1, input2)
output = output.numpy()
return output
def npu_single_input_op_exec(self, input1):
input1 = input1.to("npu")
output = torch.frobenius_norm(input1)
output = output.to("cpu")
output = output.numpy()
return output
def npu_op_exec_tensor_need_to_npu(self, input1, axis, keep_dim):
input1 = input1.to("npu")
output = torch.frobenius_norm(input1, axis, keep_dim)
# output = torch.frobenius_norm(input1, input2)
output = output.to("cpu")
output = output.numpy()
return output
def test_single_input_format(self, device):
shape_format = [
[np.float32, -1, (4, 3)],
[np.float32, -1, (2, 3)],
[np.float32, -1, (4, 3)],
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item, 1, 100)
cpu_output = self.cpu_single_input_op_exec(cpu_input1)
print(cpu_output)
npu_output = self.npu_single_input_op_exec(npu_input1)
print(npu_output)
self.assertRtolEqual(cpu_output, npu_output)
def test_add_common_shape_format(self, device):
shape_format = [
[np.float32, -1, (4, 3)],
[np.float32, -1, (2, 3)],
[np.float32, -1, (4, 3)],
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item, 1, 100)
cpu_output = self.cpu_op_exec(cpu_input1, [1], False)
npu_output = self.npu_op_exec_tensor_need_to_npu(npu_input1, [1], False)
self.assertRtolEqual(cpu_output, npu_output)
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item, 1, 100)
cpu_output = self.cpu_op_exec(cpu_input1, [0], False)
npu_output = self.npu_op_exec_tensor_need_to_npu(npu_input1, [0], False)
self.assertRtolEqual(cpu_output, npu_output)
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item, 1, 100)
cpu_output = self.cpu_op_exec(cpu_input1, [1], True)
npu_output = self.npu_op_exec_tensor_need_to_npu(npu_input1, [1], True)
self.assertRtolEqual(cpu_output, npu_output)
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item, 1, 100)
cpu_output = self.cpu_op_exec(cpu_input1, [0], True)
npu_output = self.npu_op_exec_tensor_need_to_npu(npu_input1, [0], True)
self.assertRtolEqual(cpu_output, npu_output)
def test_add_float16_shape_format(self, device):
def cpu_op_exec_fp16(input1, axis, keep_dim):
input1 = input1.to(torch.float32)
output = torch.frobenius_norm(input1, axis, keep_dim)
output = output.numpy()
output = output.astype(np.float16)
return output
shape_format = [
[np.float16, -1, (4, 3)],
[np.float16, -1, (4, 1)],
[np.float16,-1,(65535, 1)],
[np.float16, -1, (1, 8192)],
[np.float16, -1, (1, 16384)],
[np.float16, -1, (1, 32768)],
[np.float16, -1, ( 1, 131072)],
[np.float16, -1, (1, 196608)],
[np.float16, -1, (1, 262144)],
[np.float16, -1, (1, 393216)],
[np.float16, -1, (1, 524288)],
[np.float16, -1, (1, 655360)],
[np.float16, -1, (1, 786432)],
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item, 1, 100)
cpu_output = cpu_op_exec_fp16(cpu_input1,[1], True)
npu_output = self.npu_op_exec_tensor_need_to_npu(npu_input1, [1], True)
self.assertRtolEqual(cpu_output, npu_output)
def test_frobenius_norm__float32_data_range(self, device):
data_range = [
[-1.1754943508e-38, -1.1754943508e-38],
[-3402823500.0, 3402823500.0],
[-0.000030517578125, 0.000030517578125],
[3402823500, 3402800000],
[-9.313225746154785e-10, 9.313225746154785e-10],
[-3402823500.0, -3402823500.0],
[-3402823500.0, 3402823500.0],
[-9.313225746154785e-10, 9.313225746154785e-10],
[-3402823500.0,-3402823500.0],
[-0.000000000000000000000000000000000000011754943508, 0.000000000000000000000000000000000000011754943508],
[0.000000000000000000000000000000000000011754943508, 0.000000000000000000000000000000000000011754943508],
[-0.000000000000000000000000000000000000011754943508, -0.000000000000000000000000000000000000011754943508],
[-0.000000000000000000000000000000000000011754943508, 0.000000000000000000000000000000000000011754943508]
]
for item in data_range:
cpu_input1, npu_input1 = create_common_tensor([np.float32, - 1, (1, 31, 149, 2)], item[0], item[1])
cpu_output = self.cpu_op_exec(cpu_input1, [1], False)
npu_output = self.npu_op_exec_tensor_need_to_npu(npu_input1, [1], False)
self.assertRtolEqual(cpu_output, npu_output)
for item in data_range:
cpu_input1, npu_input1 = create_common_tensor([np.float32, - 1, (1, 31, 149, 2)], item[0], item[1])
cpu_output = self.cpu_op_exec(cpu_input1, [-1], False)
npu_output = self.npu_op_exec_tensor_need_to_npu(npu_input1, [-1], False)
self.assertRtolEqual(cpu_output, npu_output)
for item in data_range:
cpu_input1, npu_input1 = create_common_tensor([np.float32, - 1, (1, 31, 149, 2)], item[0], item[1])
cpu_output = self.cpu_op_exec(cpu_input1, [-1,0], False)
npu_output = self.npu_op_exec_tensor_need_to_npu(npu_input1, [-1,0], False)
self.assertRtolEqual(cpu_output, npu_output)
for item in data_range:
cpu_input1, npu_input1 = create_common_tensor([np.float32, - 1, (1, 31, 149, 2)], item[0], item[1])
cpu_output = self.cpu_op_exec(cpu_input1, [-2,1], False)
npu_output = self.npu_op_exec_tensor_need_to_npu(npu_input1, [-2,1], False)
self.assertRtolEqual(cpu_output, npu_output)
instantiate_device_type_tests(TestFrobenius_norm, globals(), except_for='cpu')
if __name__ == "__main__":
run_tests()
|
<reponame>redart-2021-final/backend
#!/bin/env python3
import asyncio
import datetime
import httpx
import pandas as pd
from redis import Redis
from rq import Queue
from rq_scheduler import Scheduler
from scipy.stats import mstats
from tortoise import Tortoise
import config
from models import Event
redis = Redis.from_url(config.BROKER)
queue = Queue(connection=redis)
scheduler = Scheduler(queue=queue, connection=redis)
repeated_jobs = {
'tasks.process_events': {
'args': [],
'kwargs': {},
'interval': 60,
}
}
class KalmanFilter:
"""
https://pbs.twimg.com/media/EEkW3ZmUEAAa8Be.jpg
"""
def __init__(self):
self.mean_deviation = 0.00023
self.process = 0.05
self.Pc = 0.0
self.G = 0.0
self.P = 1.0
self.Xp = 0.0
self.Zp = 0.0
self.Xe = 0.0
def _single_filter(self, value):
self.Pc = self.P + self.process
self.G = self.Pc / (self.Pc + self.mean_deviation)
self.P = (1 - self.G) * self.Pc
self.Xp = self.Xe
self.Zp = self.Xp
self.Xe = self.G * (value - self.Zp) + self.Xp
return self.Xe
def filter(self, data: list) -> list:
result = [self._single_filter(i) for i in data]
return result
def init_jobs() -> None:
now = datetime.datetime.utcnow()
current_jobs = scheduler.get_jobs()
jobs_by_func = {job.func_name: job for job in current_jobs}
for rjob_name, rjob_data in repeated_jobs.items():
job = jobs_by_func.get(rjob_name)
if not job:
scheduler.schedule(now, rjob_name, **rjob_data)
print(f'scheduled job {rjob_name}')
continue
if job.meta['interval'] != rjob_data['interval']:
job.meta['interval'] = rjob_data['interval']
job.save()
print(f'updated job {rjob_name}')
async def _process_event(client: httpx.AsyncClient, event: Event) -> None:
print(f'start update event {dict(event)}')
params = {
'lat': event.latitude,
'lon': event.longitude,
'format': 'jsonv2',
}
try:
response = await client.get(config.NOMINATIM + '/reverse', params=params)
except Exception as e:
print(f'error update event {event.id} {e}')
return
event.extra['geo'] = response.json()
event.processed = True
await event.save()
print(f'finished update event {event.id}')
async def _process_events() -> None:
await Tortoise.init(config.ORM)
tasks = []
qs = await Event.filter(processed=False).limit(20)
print(f'fetched {len(qs)}')
async with httpx.AsyncClient() as client:
for event in qs:
tasks.append(_process_event(client, event))
await asyncio.gather(*tasks, return_exceptions=True)
def quantile_filter(data: list):
return pd.Series(mstats.winsorize(data, limits=[0.05, 0.05])).tolist()
latitude = [event.latitude for event in qs]
longitude = [event.longitude for event in qs]
filter_latitude = KalmanFilter()
filter_longitude = KalmanFilter()
filtered_latitude = filter_latitude.filter(quantile_filter(latitude))
filtered_longitude = filter_longitude.filter(quantile_filter(longitude))
for latitude, longitude, event in zip(filtered_latitude, filtered_longitude, qs):
event.latitude = latitude
event.longitude = longitude
await Tortoise.close_connections()
def process_events() -> None:
asyncio.run(_process_events())
if __name__ == '__main__':
init_jobs()
|
#!/usr/bin/env python3
# encoding: utf-8
from typing import Dict
import numpy as np
from rls.algorithms.base.marl_policy import MarlPolicy
from rls.common.data import Data
from rls.common.decorator import iton
from rls.common.when import Every
from rls.common.yaml_ops import load_config
from rls.utils.np_utils import int2one_hot
class MultiAgentOffPolicy(MarlPolicy):
def __init__(self,
chunk_length=4,
epochs=1,
batch_size=256,
buffer_size=100000,
use_priority=False,
train_interval=1,
**kwargs):
self._chunk_length = chunk_length
self._epochs = epochs
self.batch_size = batch_size
self.buffer_size = buffer_size
self.use_priority = use_priority
self._should_train = Every(train_interval)
super().__init__(**kwargs)
def _build_buffer(self):
if self.use_priority:
from rls.memories.per_buffer import PrioritizedDataBuffer
buffer = PrioritizedDataBuffer(n_copies=self.n_copies,
batch_size=self.batch_size,
buffer_size=self.buffer_size,
chunk_length=self._chunk_length,
max_train_step=self._max_train_step,
**load_config(f'rls/configs/buffer/off_policy_buffer.yaml')[
'PrioritizedDataBuffer'])
else:
from rls.memories.er_buffer import DataBuffer
buffer = DataBuffer(n_copies=self.n_copies,
batch_size=self.batch_size,
buffer_size=self.buffer_size,
chunk_length=self._chunk_length)
return buffer
def episode_step(self, obs, env_rets: Dict[str, Data]):
super().episode_step(obs, env_rets)
if self._is_train_mode and self._buffer.can_sample and self._should_train(self._cur_interact_step):
ret = self.learn(self._buffer.sample())
if self.use_priority:
# td_error [T, B, 1]
self._buffer.update(ret)
def learn(self, BATCH_DICT: Data):
BATCH_DICT = self._preprocess_BATCH(BATCH_DICT)
td_errors = 0.
for _ in range(self._epochs):
BATCH_DICT = self._before_train(BATCH_DICT)
td_error, summaries = self._train(BATCH_DICT)
td_errors += td_error # [T, B, 1]
self.summaries.update(summaries)
self._after_train()
return td_errors / self._epochs
# customed
def _preprocess_BATCH(self, BATCH_DICT): # [B, *] or [T, B, *]
for id in self.agent_ids:
if not self.is_continuouss[id]:
shape = BATCH_DICT[id].action.shape
# [T, B, 1] or [T, B] => [T, B, N]
BATCH_DICT[id].action = int2one_hot(
BATCH_DICT[id].action, self.a_dims[id])
for i, id in enumerate(self.agent_ids):
other, other_ = None, None
if self._obs_with_pre_action:
other = np.concatenate((
np.zeros_like(BATCH_DICT[id].action[:1]),
BATCH_DICT[id].action[:-1]
), 0)
other_ = BATCH_DICT[id].action
if self._obs_with_agent_id:
_id_onehot = int2one_hot(
np.full(BATCH_DICT[id].action.shape[:-1], i), self.n_agents_percopy)
if other is not None:
other = np.concatenate((
other,
_id_onehot
), -1)
other_ = np.concatenate((
other_,
_id_onehot
), -1)
else:
other, other_ = _id_onehot, _id_onehot
if self._obs_with_pre_action or self._obs_with_agent_id:
BATCH_DICT[id].obs.update(other=other)
BATCH_DICT[id].obs_.update(other=other_)
return BATCH_DICT
def _before_train(self, BATCH_DICT):
self.summaries = {}
return BATCH_DICT
@iton
def _train(self, BATCH_DICT):
raise NotImplementedError
def _after_train(self):
self._write_log(summaries=self.summaries,
step_type='step')
if self._should_save_model(self._cur_train_step):
self.save()
self._cur_train_step += 1
|
<filename>Ramen Warrior Game/main.py
# importing modules
import pygame
import random
import math
from pygame import mixer
# initializing it
pygame.init()
# starting the display
start = pygame.display.set_mode((600, 600))
# background sound
mixer.music.load('background.wav')
mixer.music.play(-1)
# background
bg = pygame.image.load('asas.jpg')
# bullet
drop = pygame.image.load('h.png')
e = 0
f = 480
#this is used to change the speed of bullet
bulletchange = 10
#this is to reload the bullet in bottle
dropmode = "ready"
# titles and icons etc all that stuff
pygame.display.set_caption("Ramen Warriors!")
icon = pygame.image.load('breakfast.png')
pygame.display.set_icon(icon)
# MILKBOTTLE
bottle = pygame.image.load('feeding-bottle.png')
a = 280
b = 500
x = 0
g = 0
#COUNTING SCORE
score = 0
font = pygame.font.Font('freesansbold.ttf', 32)
#Enemy variables
c = []
d = []
enemychange = []
# GAME OVER
lafont = pygame.font.Font('freesansbold.ttf', 70)
# Targets for shooting
cereal = []
#creating a list of enemies by using loop function
noofenemies = 3
for i in range(noofenemies):
cereal.append(pygame.image.load('soup.png'))
c.append(random.randint(0, 570))
d.append(random.randint(30, 200))
enemychange.append(4)
# positioning Game over text
def gameo():
go = lafont.render("GAME OVER! ", True, (255, 255, 255))
start.blit((go), (75, 200))
la = True
#positioning the ketchup bottle
def milkbottle(a, b):
start.blit((bottle), (a, b))
#positioning the enemies
def cereals(c, d, i):
start.blit((cereal[i]), (c, d))
#positioning the bullets-ketchup drop
def bullets(x, y):
global dropmode
dropmode = "fire"
start.blit((drop), (x, y))
#positioning the score count
def showscore():
scr = font.render("Score:- " + str(score), True, (255, 255, 255))
start.blit((scr), (10, 10))
#collision detection function
def collisionoccured(e, f, c, d):
#maths formula to detect the proximity of the bullet
dist = math.sqrt(math.pow((e - c), 2) + math.pow((f - d), 2))
if dist <= 40:
return True
else:
return False
# Game loop-> the loop which runs infinie time until the game has been closed
running = True
while running == True:
for event in pygame.event.get():
# condition for closing the game because it has to be closed one day
if event.type == pygame.QUIT:
running = False
# movement of object i.e ketchup bottle
#assigning movements key wise
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x = -4
if event.key == pygame.K_RIGHT:
x = +4
if event.key == pygame.K_SPACE:
if dropmode == "ready":
bulletsound = mixer.Sound('laser.wav')
bulletsound.play()
bullets(e, f)
e = a
# to stop the continuous movement of object
if event.type == pygame.KEYUP:
if event.key == pygame.K_RIGHT or event.key == pygame.K_LEFT:
g = 0
x = 0
# setting bg colour
start.fill((0, 0, 0))
#setting background image
start.blit((bg), (0, 0))
# various object conditions
#bullet reset condition
if f <= 0:
f = 490
dropmode = "ready"
#bullet movement condition
if dropmode == "fire":
bullets(e, f)
f -= bulletchange
#condition to prevent the main object to go outside the game zone
if a <= 0:
a = 0;
if a >= 570:
a = 570
#Gameover condition to basically stop the game
for i in range(noofenemies):
if d[i] >= 430:
for j in range(noofenemies):
d[j] = 2000
#game over text function
gameo()
break
# to detect if the collision occured
#Using loop to detect collision occured for each enemy
collision = collisionoccured(e, f, c[i], d[i])
c[i] += enemychange[i]
d[i] += 0.5
if c[i] <= 0:
enemychange[i] = 8
# d+=30
elif c[i] > 570:
enemychange[i] = -8
# d+=30
if collision:
coll = mixer.Sound('explosion.wav')
coll.play()
f = 490
dropmode = "ready"
score += 1
c[i] = random.randint(0, 570)
d[i] = random.randint(30, 200)
cereals(c[i], d[i], i)
a += x
b += g
milkbottle(a, b)
showscore()
# screen updater
pygame.display.update()
|
from PySide6.QtGui import *
from PySide6.QtCore import *
from PySide6.QtWidgets import *
from config import *
__all__ = ['PreferenceDialog']
class PrimerTagLabel(QLabel):
base_url = "https://primer3.org/manual.html#{}"
def __init__(self, name, tag, parent=None):
super().__init__(parent)
self.tag = tag
self.setText('<a href="#{}">{}</a>'.format(tag, name))
self.linkActivated.connect(self.open_link)
@Slot()
def open_link(self):
url = QUrl(self.base_url.format(self.tag))
QDesktopServices.openUrl(url)
class PrimerTagTree(QTreeWidget):
def sizeHint(self):
return QSize(0, 0)
class PrimerParameterPanel(QWidget):
def __init__(self, parent=None):
super(PrimerParameterPanel, self).__init__(parent)
self.settings = QSettings()
general_group = KraitGroupBox("General settings")
general_layout = QVBoxLayout()
general_group.setLayout(general_layout)
product_layout = QGridLayout()
general_layout.addLayout(product_layout)
product_layout.setColumnStretch(0, 1)
self.product_size = QLineEdit()
self.primer_num = QSpinBox()
self.primer_num.setRange(1, 100)
product_layout.addWidget(PrimerTagLabel("Product size ranges", 'PRIMER_PRODUCT_SIZE_RANGE'), 0, 0)
product_layout.addWidget(PrimerTagLabel("# of primers to return", 'PRIMER_NUM_RETURN'), 0, 1)
product_layout.addWidget(self.product_size, 1, 0)
product_layout.addWidget(self.primer_num, 1, 1)
size_layout = QGridLayout()
general_layout.addLayout(size_layout)
size_layout.setColumnStretch(2, 1)
size_layout.setColumnStretch(4, 1)
size_layout.setColumnStretch(6, 1)
self.size_min = QSpinBox()
self.size_opt = QSpinBox()
self.size_max = QSpinBox()
self.size_min.setRange(0, 1000)
self.size_opt.setRange(0, 1000)
self.size_max.setRange(0, 1000)
self.gc_min = QDoubleSpinBox()
self.gc_max = QDoubleSpinBox()
self.gc_opt = QDoubleSpinBox()
self.tm_min = QDoubleSpinBox()
self.tm_opt = QDoubleSpinBox()
self.tm_max = QDoubleSpinBox()
self.gc_min.setRange(0, 100)
self.gc_max.setRange(0, 100)
self.gc_opt.setRange(0, 100)
self.tm_min.setRange(0, 1000)
self.tm_opt.setRange(0, 1000)
self.tm_max.setRange(0, 1000)
size_layout.addWidget(QLabel("Primer size (bp)"), 0, 0)
size_layout.addWidget(PrimerTagLabel("Min", 'PRIMER_MIN_SIZE'), 0, 1)
size_layout.addWidget(self.size_min, 0, 2)
size_layout.addWidget(PrimerTagLabel("Opt", 'PRIMER_OPT_SIZE'), 0, 3)
size_layout.addWidget(self.size_opt, 0, 4)
size_layout.addWidget(PrimerTagLabel("Max", 'PRIMER_MAX_SIZE'), 0, 5)
size_layout.addWidget(self.size_max, 0, 6)
size_layout.addWidget(QLabel("Primer Tm (℃)"), 1, 0)
size_layout.addWidget(PrimerTagLabel("Min", 'PRIMER_MIN_TM'), 1, 1)
size_layout.addWidget(self.tm_min,1, 2)
size_layout.addWidget(PrimerTagLabel("Opt", 'PRIMER_OPT_TM'), 1, 3)
size_layout.addWidget(self.tm_opt, 1, 4)
size_layout.addWidget(PrimerTagLabel("Max", 'PRIMER_MAX_TM'), 1, 5)
size_layout.addWidget(self.tm_max, 1, 6)
size_layout.addWidget(QLabel("Primer GC (%)"), 2, 0)
size_layout.addWidget(PrimerTagLabel("Min", 'PRIMER_MIN_GC'), 2, 1)
size_layout.addWidget(self.gc_min, 2, 2)
size_layout.addWidget(PrimerTagLabel("Opt", 'PRIMER_OPT_GC_PERCENT'), 2, 3)
size_layout.addWidget(self.gc_opt, 2, 4)
size_layout.addWidget(PrimerTagLabel("Max", 'PRIMER_MAX_GC'), 2, 5)
size_layout.addWidget(self.gc_max, 2, 6)
advance_group = KraitGroupBox("Advanced settings")
advance_layout = QGridLayout()
advance_group.setLayout(advance_layout)
self.gc_clamp = QSpinBox()
self.gc_clamp.setMaximum(1000)
self.max_end_stability = QDoubleSpinBox()
self.max_end_stability.setMaximum(1000)
self.max_ns = QSpinBox()
self.max_ns.setMaximum(1000)
self.max_diff_tm = QDoubleSpinBox()
self.max_diff_tm.setMaximum(1000)
advance_layout.addWidget(PrimerTagLabel("Max Ns", 'PRIMER_MAX_NS_ACCEPTED'), 0, 0)
advance_layout.addWidget(PrimerTagLabel("GC clamp", 'PRIMER_GC_CLAMP'), 1, 0)
advance_layout.addWidget(self.max_ns, 0, 1)
advance_layout.addWidget(self.gc_clamp, 1, 1)
advance_layout.addWidget(PrimerTagLabel("Max Tm Difference", 'PRIMER_PAIR_MAX_DIFF_TM'), 0, 2)
advance_layout.addWidget(PrimerTagLabel("Max end stability", 'PRIMER_MAX_END_STABILITY'), 1, 2)
advance_layout.addWidget(self.max_diff_tm, 0, 3)
advance_layout.addWidget(self.max_end_stability, 1, 3)
other_group = KraitGroupBox("Other settings")
other_layout = QVBoxLayout()
other_group.setLayout(other_layout)
self.add_btn = QPushButton(self)
self.add_btn.clicked.connect(self.add_primer_tag)
self.add_btn.setIcon(QIcon("icons/plus.svg"))
self.add_btn.setToolTip("Add primer3 tag")
self.del_btn = QPushButton(self)
self.del_btn.clicked.connect(self.del_primer_tag)
self.del_btn.setIcon(QIcon("icons/minus.svg"))
self.del_btn.setToolTip("Delete the selected primer3 tag")
self.clr_btn = QPushButton(self)
self.clr_btn.clicked.connect(self.clear_primer_tag)
self.clr_btn.setIcon(QIcon("icons/clear.svg"))
self.clr_btn.setToolTip("Delete all the primer3 tags")
btn_layout = QHBoxLayout()
btn_layout.addWidget(QLabel("Add other primer3 tag settings"))
btn_layout.addWidget(PrimerTagLabel("learn more", ''), 1)
btn_layout.addWidget(self.add_btn)
btn_layout.addWidget(self.del_btn)
btn_layout.addWidget(self.clr_btn)
self.tree = PrimerTagTree()
self.tree.setHeaderLabels(["Primer3 tags", "Value"])
self.tree.header().setStretchLastSection(False)
self.tree.header().setSectionResizeMode(0, QHeaderView.Stretch)
other_layout.addLayout(btn_layout)
other_layout.addWidget(self.tree, 1)
mainLayout = QVBoxLayout()
mainLayout.addWidget(general_group)
mainLayout.addWidget(advance_group)
mainLayout.addWidget(other_group, 1)
self.setLayout(mainLayout)
self._mappings = {
'PRIMER_PRODUCT_SIZE_RANGE': self.product_size,
'PRIMER_NUM_RETURN': self.primer_num,
'PRIMER_MIN_SIZE': self.size_min,
'PRIMER_OPT_SIZE': self.size_opt,
'PRIMER_MAX_SIZE': self.size_max,
'PRIMER_MIN_GC': self.gc_min,
'PRIMER_OPT_GC_PERCENT': self.gc_opt,
'PRIMER_MAX_GC': self.gc_max,
'PRIMER_MIN_TM': self.tm_min,
'PRIMER_OPT_TM': self.tm_opt,
'PRIMER_MAX_TM': self.tm_max,
'PRIMER_MAX_NS_ACCEPTED': self.max_ns,
'PRIMER_GC_CLAMP': self.gc_clamp,
'PRIMER_PAIR_MAX_DIFF_TM': self.max_diff_tm,
'PRIMER_MAX_END_STABILITY': self.max_end_stability,
}
self.read_settings()
def add_primer_tag(self):
item = QTreeWidgetItem(self.tree, ["PRIMER_", ''])
item.setFlags(item.flags() | Qt.ItemIsEditable)
self.tree.addTopLevelItem(item)
self.tree.scrollToItem(item)
self.tree.editItem(item, 0)
def del_primer_tag(self):
root = self.tree.invisibleRootItem()
it = self.tree.currentItem()
root.removeChild(it)
def clear_primer_tag(self):
self.tree.clear()
def read_settings(self):
self.settings.beginGroup("PRIMER")
for param in self._mappings:
box = self._mappings[param]
default, func = PRIMER_PARAMETERS[param]
if box == self.product_size:
box.setText(self.settings.value(param, default))
else:
box.setValue(self.settings.value(param, default, func))
for k in self.settings.allKeys():
if k not in self._mappings:
item = QTreeWidgetItem(self.tree, [k, self.settings.value(k)])
item.setFlags(item.flags() | Qt.ItemIsEditable)
self.tree.addTopLevelItem(item)
self.settings.endGroup()
def write_settings(self):
self.settings.beginGroup("PRIMER")
for param in self._mappings:
box = self._mappings[param]
if box == self.product_size:
self.settings.setValue(param, box.text())
else:
self.settings.setValue(param, box.value())
params = {}
for i in range(self.tree.topLevelItemCount()):
item = self.tree.topLevelItem(i)
tag, val = item.text(0), item.text(1)
tag = tag.strip()
val = val.strip()
if tag and val:
params[tag] = val
#delete other params
for k in self.settings.allKeys():
if k not in self._mappings:
if k not in params:
self.settings.remove(k)
#set other params
for k in params:
self.settings.setValue(k, params[k])
self.settings.endGroup()
class KraitGroupBox(QGroupBox):
def __init__(self, title):
super(KraitGroupBox, self).__init__(title)
self.setStyleSheet("QGroupBox{font-weight: bold;}")
class SearchParameterPanel(QWidget):
def __init__(self, parent=None):
super(SearchParameterPanel, self).__init__(parent)
self.settings = QSettings()
ssr_layout = QGridLayout()
ssr_group = KraitGroupBox("Microsatellites (SSRs)")
ssr_group.setLayout(ssr_layout)
self.mono_box = QSpinBox()
self.mono_box.setRange(2, 1000)
self.di_box = QSpinBox()
self.di_box.setRange(2, 1000)
self.tri_box = QSpinBox()
self.tri_box.setRange(2, 1000)
self.tetra_box = QSpinBox()
self.tetra_box.setRange(2, 1000)
self.penta_box = QSpinBox()
self.penta_box.setRange(2, 1000)
self.hexa_box = QSpinBox()
self.hexa_box.setRange(2, 1000)
ssr_layout.setColumnStretch(1, 1)
ssr_layout.setColumnStretch(3, 1)
ssr_layout.setColumnStretch(5, 1)
ssr_layout.addWidget(QLabel("Minimum repeats required for each type to form an SSR"),
0, 0, 1, 6)
ssr_layout.addWidget(QLabel("Mono"), 1, 0)
ssr_layout.addWidget(self.mono_box, 1, 1)
ssr_layout.addWidget(QLabel("Di"), 1, 2)
ssr_layout.addWidget(self.di_box, 1, 3)
ssr_layout.addWidget(QLabel("Tri"), 1, 4)
ssr_layout.addWidget(self.tri_box, 1, 5)
ssr_layout.addWidget(QLabel("Tetra"), 2, 0)
ssr_layout.addWidget(self.tetra_box, 2, 1)
ssr_layout.addWidget(QLabel("Penta"), 2, 2)
ssr_layout.addWidget(self.penta_box, 2, 3)
ssr_layout.addWidget(QLabel("Hexa"), 2, 4)
ssr_layout.addWidget(self.hexa_box, 2, 5)
cssr_group = KraitGroupBox("Compound microsatellites (cSSRs)")
cssr_layout = QHBoxLayout()
cssr_group.setLayout(cssr_layout)
self.dmax_box = QSpinBox()
self.dmax_box.setRange(0, 1000)
cssr_layout.addWidget(QLabel("Maximum distance allowed between two adjacent SSRs (d<sub>MAX</sub>)"))
cssr_layout.addWidget(self.dmax_box, 1)
vntr_group = KraitGroupBox("Minisatellites (VNTRs)")
vntr_layout = QGridLayout()
vntr_group.setLayout(vntr_layout)
self.minmotif_box = QSpinBox()
self.minmotif_box.setRange(1, 1000)
self.maxmotif_box = QSpinBox()
self.maxmotif_box.setRange(1, 1000)
self.minrep_box = QSpinBox()
self.minrep_box.setRange(2, 1000)
vntr_layout.addWidget(QLabel("Min motif size"), 0, 0)
vntr_layout.addWidget(self.minmotif_box, 0, 1)
vntr_layout.addWidget(QLabel("Max motif size"), 0, 2)
vntr_layout.addWidget(self.maxmotif_box, 0, 3)
vntr_layout.addWidget(QLabel("Min repeats"), 0, 4)
vntr_layout.addWidget(self.minrep_box, 0, 5)
itr_group = KraitGroupBox("Imperfect microsatellites (iSSRs)")
itr_layout = QGridLayout()
itr_group.setLayout(itr_layout)
#self.minmsize_box = QSpinBox()
#self.minmsize_box.setRange(1, 1000)
#self.maxmsize_box = QSpinBox()
#self.maxmsize_box.setRange(1, 1000)
self.minsrep_box = QSpinBox()
self.minsrep_box.setRange(2, 1000)
self.minslen_box = QSpinBox()
self.minslen_box.setRange(1, 1000)
self.maxerr_box = QSpinBox()
self.maxerr_box.setRange(0, 1000)
self.subpena_box = QDoubleSpinBox()
self.subpena_box.setRange(0, 100)
self.subpena_box.setSingleStep(0.1)
self.inspena_box = QDoubleSpinBox()
self.inspena_box.setRange(0, 100)
self.inspena_box.setSingleStep(0.1)
self.delpena_box = QDoubleSpinBox()
self.delpena_box.setRange(0, 100)
self.delpena_box.setSingleStep(0.1)
self.matratio_box = QDoubleSpinBox()
self.matratio_box.setSingleStep(0.05)
self.matratio_box.setRange(0, 1)
self.maxextend_box = QSpinBox()
self.maxextend_box.setMaximum(1000000)
self.maxextend_box.setSingleStep(50)
#itr_layout.addWidget(QLabel("Min motif size"), 0, 0)
#itr_layout.addWidget(QLabel("Max motif size"), 1, 0)
itr_layout.addWidget(QLabel("Min seed repeats"),0, 0)
itr_layout.addWidget(QLabel("Min seed length"), 1, 0)
itr_layout.addWidget(QLabel("Max continuous errors"), 2, 0)
itr_layout.addWidget(QLabel("Max extend length"), 3, 0)
#itr_layout.addWidget(self.minmsize_box, 0, 1)
#itr_layout.addWidget(self.maxmsize_box, 1, 1)
itr_layout.addWidget(self.minsrep_box, 0, 1)
itr_layout.addWidget(self.minslen_box, 1, 1)
itr_layout.addWidget(self.maxerr_box, 2, 1)
itr_layout.addWidget(self.maxextend_box, 3, 1)
itr_layout.addWidget(QLabel("Substitution penalty"), 0, 2)
itr_layout.addWidget(QLabel("Insertion penalty"), 1, 2)
itr_layout.addWidget(QLabel("Deletion penalty"), 2, 2)
itr_layout.addWidget(QLabel("Min match ratio"), 3, 2)
itr_layout.addWidget(self.subpena_box, 0, 3)
itr_layout.addWidget(self.inspena_box, 1, 3)
itr_layout.addWidget(self.delpena_box, 2, 3)
itr_layout.addWidget(self.matratio_box, 3, 3)
other_layout = QHBoxLayout()
level_group = KraitGroupBox("Motif standardization")
other_layout.addWidget(level_group)
level_layout = QHBoxLayout()
level_group.setLayout(level_layout)
self.level_box = QComboBox()
self.level_box.addItems(["Level {}".format(i) for i in range(5)])
level_layout.addWidget(QLabel("Level"))
level_layout.addWidget(self.level_box, 1)
flank_group = KraitGroupBox("Flank sequence")
other_layout.addWidget(flank_group)
flank_layout = QHBoxLayout()
flank_group.setLayout(flank_layout)
self.flank_box = QSpinBox()
self.flank_box.setMaximum(10000)
flank_layout.addWidget(QLabel("Length"))
flank_layout.addWidget(self.flank_box, 1)
stats_group = KraitGroupBox("Statistics")
other_layout.addWidget(stats_group)
stats_layout = QHBoxLayout()
stats_group.setLayout(stats_layout)
self.unit_box = QComboBox()
self.unit_box.addItems(["Mb", "Kb"])
self.ns_box = QComboBox()
self.ns_box.addItems(["exclude", "include"])
stats_layout.addWidget(QLabel("Unit"))
stats_layout.addWidget(self.unit_box, 1)
stats_layout.addWidget(QLabel("Ns"))
stats_layout.addWidget(self.ns_box, 1)
main_layout = QVBoxLayout()
main_layout.addWidget(ssr_group)
main_layout.addWidget(cssr_group)
main_layout.addWidget(itr_group)
main_layout.addWidget(vntr_group)
main_layout.addLayout(other_layout)
self.setLayout(main_layout)
self._mappings = {
'SSR/mono': self.mono_box,
'SSR/di': self.di_box,
'SSR/tri': self.tri_box,
'SSR/tetra': self.tetra_box,
'SSR/penta': self.penta_box,
'SSR/hexa': self.hexa_box,
'CSSR/dmax': self.dmax_box,
'VNTR/minmotif': self.minmotif_box,
'VNTR/maxmotif': self.maxmotif_box,
'VNTR/minrep': self.minrep_box,
#'ITR/minmsize': self.minmsize_box,
#'ITR/maxmsize': self.maxmsize_box,
'ISSR/minsrep': self.minsrep_box,
'ISSR/minslen': self.minslen_box,
'ISSR/maxerr': self.maxerr_box,
'ISSR/subpena': self.subpena_box,
'ISSR/inspena': self.inspena_box,
'ISSR/delpena': self.delpena_box,
'ISSR/matratio': self.matratio_box,
'ISSR/maxextend': self.maxextend_box,
'STR/level': self.level_box,
'STR/flank': self.flank_box,
'STAT/unit': self.unit_box,
'STAT/unkown': self.ns_box
}
self.read_settings()
def read_settings(self):
for param in self._mappings:
box = self._mappings[param]
default, func = KRAIT_PARAMETERS[param]
if isinstance(box, QComboBox):
box.setCurrentIndex(self.settings.value(param, default, func))
else:
box.setValue(self.settings.value(param, default, func))
def write_settings(self):
for param in self._mappings:
box = self._mappings[param]
if isinstance(box, QComboBox):
self.settings.setValue(param, box.currentIndex())
else:
self.settings.setValue(param, box.value())
class PreferenceDialog(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.settings = QSettings()
self.setWindowTitle(self.tr("Preferences"))
#self.setMinimumWidth(500)
self.search_panel = SearchParameterPanel(self)
self.primer_panel = PrimerParameterPanel(self)
self.tab_widget = QTabWidget()
self.tab_widget.addTab(self.search_panel, 'Search')
self.tab_widget.addTab(self.primer_panel, 'Primer')
btn_box = QDialogButtonBox(QDialogButtonBox.RestoreDefaults | QDialogButtonBox.Save | QDialogButtonBox.Cancel)
btn_box.accepted.connect(self.accept)
btn_box.rejected.connect(self.reject)
btn_box.accepted.connect(self.write_settings)
btn_box.button(QDialogButtonBox.RestoreDefaults).clicked.connect(self.restore_settings)
#spacer = QSpacerItem(10, 20, QSizePolicy.Minimum, QSizePolicy.Expanding)
layout = QVBoxLayout()
layout.addWidget(self.tab_widget)
#layout.addItem(spacer)
layout.addWidget(btn_box)
self.setLayout(layout)
def write_settings(self):
self.search_panel.write_settings()
self.primer_panel.write_settings()
def restore_settings(self):
self.settings.clear()
self.search_panel.read_settings()
self.primer_panel.clear_primer_tag()
self.primer_panel.read_settings()
self.write_settings()
def goto_primer_panel(self):
self.tab_widget.setCurrentIndex(1)
|
import os
import datetime
from pyspark import SparkContext
from pyspark.conf import SparkConf
from pyspark.sql import SparkSession, Row, DataFrame, Column
from pyspark.sql import functions as F
from pyspark.ml import Pipeline
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.recommendation import ALS, ALSModel
from pyspark.ml.feature import StringIndexer, IndexToString
from pyspark.sql import Row
from pyspark.ml import PipelineModel
from utilities.common import utc_now_ts as now
from utilities.similarity import compute_item_similarity
from shopping.models import Review
from flask import current_app as app
class SparkApp(object):
def __init__(self, executor_memory="2560m", master_uri=None, mongo_uri=None,
deploy_mode="cluster"):
self.executor_memory = executor_memory
self.master_uri = app.config.get("SPARK_MASTER_URI") if master_uri is \
None else master_uri
self.deploy_mode = deploy_mode
self.MONGO_URI = app.config.get("MONGODB_SETTINGS").get("host") \
if mongo_uri is None else mongo_uri
def create_spark_app(self):
# Set SparkSession Object to run spark app
spark = SparkSession.\
builder.\
master(self.master_uri).\
config("spark.jars.packages", "org.mongodb.spark:mongo-spark-connector_2.11:2.2.3").\
config("spark.driver.memoryOverhead", "2g").\
config("spark.executor.memoryOverhead", "1g").\
getOrCreate()
self.spark = spark
def return_col(self, col_name="products"):
col_uri = self.MONGO_URI + "." + col_name
col_obj = self.spark.read\
.format("com.mongodb.spark.sql.DefaultSource")\
.option("uri", col_uri)\
.load()
col_obj.createOrReplaceTempView(col_name)
return col_obj
def return_all_books(self):
self.create_spark_app()
products = self.return_col(col_name="products")
# Get distinct book names of all books
book_names = products\
.select("title")\
.where(F.col("title").isNotNull())\
.distinct()
# Save Data to MongoDB
book_names_uri = self.MONGO_URI + ".product_name"
book_names.write\
.format("com.mongodb.spark.sql.DefaultSource")\
.mode("overwrite")\
.option("uri", book_names_uri)\
.save()
self.spark.stop()
def return_best_books(self, minimum_count=10, limit_count=10):
self.create_spark_app()
products = self.return_col(col_name="products")
reviews = self.return_col(col_name="reviews")
grouped = reviews\
.groupBy("asin")\
.agg(F.count("asin").alias("cnt"), F.avg("overall").alias("avgOverall"))\
.sort(F.desc("avgOverall"), F.desc("cnt"))\
.select("asin", "avgOverall", "cnt")\
.filter(F.col("cnt") >= minimum_count)\
.limit(limit_count)
grouped.cache()
bestReviews = grouped.alias('g')\
.join(
products.alias('p'),
F.col("g.asin") == F.col("p.asin")
)\
.select(
F.col("g.asin").alias("asin"),
F.col("g.avgOverall").alias("ao"),
F.col("p.title").alias("t"),
F.col("p.price").alias("p"),
F.col("p.imUrl").alias("iu"),
F.col("p.description").alias("d"),
F.col("g.cnt").alias("cnt"),
)
createdDate = now()
bestReviews_with_dates = bestReviews.withColumn(
"cd",
F.lit(createdDate)
)
best_product_uri = self.MONGO_URI + ".best_product"
bestReviews_with_dates.write\
.format("com.mongodb.spark.sql.DefaultSource")\
.mode("append")\
.option("uri", best_product_uri)\
.save()
self.spark.catalog.clearCache()
self.spark.stop()
def make_recommendation_model(self):
self.create_spark_app()
reviews = self.return_col(col_name="reviews")
users = self.return_col(col_name="user")
selectedDf = reviews\
.select("rI", "pI", "overall")
# Develop Model
als = ALS(
maxIter=10, regParam=0.01, userCol="rI",
itemCol="pI", ratingCol="overall",
coldStartStrategy="nan"
)
alsModel = als.fit(selectedDf)
# Only make recommendation for active users
activeUsers = [x.rI for x in users.select("rI").collect()]
selectedData = selectedDf[selectedDf.rI.isin(activeUsers)]
# make reverse tarnsform to productID and userID
num_recommends = 10
recommendationData = alsModel.recommendForUserSubset(
selectedData,
num_recommends
)
recommendations = F.array(*[F.struct(
F.col("recommendations")[i]["pI"].alias("pI"),
F.col("recommendations")[i]["rating"].cast("double").alias("rating")
) for i in range(num_recommends)])
recommendationData = recommendationData\
.withColumn("recommendations", recommendations)
createdDate = now()
recommendationData = recommendationData.withColumn(
"dc",
F.lit(createdDate)
)
# Save data to mongodb
col_name = self.MONGO_URI + ".recommendation_table"
recommendationData.write\
.format("com.mongodb.spark.sql.DefaultSource")\
.mode("append")\
.option("uri", col_name)\
.save()
self.spark.catalog.clearCache()
self.spark.stop()
def make_similarity_table(self):
self.create_spark_app()
reviews = self.return_col(col_name="reviews")
selectedDf = reviews\
.select("reviewerID", "asin", "overall")
itemBased = compute_item_similarity(selectedDf, user_col='reviewerID',
item_col='asin', rating_col='overall',
method='cosine', use_persist=False)
col_name = self.MONGO_URI + ".similarity_table"
itemBased.write\
.format("com.mongodb.spark.sql.DefaultSource")\
.mode("overwrite")\
.option("uri", col_name)\
.save()
def convertIdToInteger(self):
self.create_spark_app()
products = self.return_col("products")
reviews = self.return_col(col_name="reviews")
reviewerIndexer = StringIndexer(
inputCol="reviewerID",
outputCol="rI",
handleInvalid="keep"
)
productIndexer = StringIndexer(
inputCol="asin",
outputCol="pI",
handleInvalid="keep"
)
productStringModel = productIndexer.fit(products)
productTransformed = productStringModel.transform(products)
reviewStringModel = reviewerIndexer.fit(reviews)
reviewTransformed = reviewStringModel.transform(reviews)
reviewTransformed = productStringModel.transform(reviewTransformed)
# Save data to mongodb
reviews_col = self.MONGO_URI + ".reviews"
reviewTransformed.write\
.format("com.mongodb.spark.sql.DefaultSource")\
.mode("append")\
.option("uri", reviews_col)\
.save()
product_col = self.MONGO_URI + ".products"
productTransformed.write\
.format("com.mongodb.spark.sql.DefaultSource")\
.mode("append")\
.option("uri", product_col)\
.save()
self.spark.catalog.clearCache()
self.spark.stop()
def convert_productid(self):
self.create_spark_app()
products = self.return_col("products")
reviews = self.return_col(col_name="reviews")
productIndexer = StringIndexer(
inputCol="asin",
outputCol="pI",
handleInvalid="keep"
)
productStringModel = productIndexer.fit(products)
productTransformed = productStringModel.transform(products)
reviewTransformed = productStringModel.transform(reviews)
# Save data to mongodb
reviews_col = self.MONGO_URI + ".reviews"
reviewTransformed.write\
.format("com.mongodb.spark.sql.DefaultSource")\
.mode("append")\
.option("uri", reviews_col)\
.save()
product_col = self.MONGO_URI + ".products"
productTransformed.write\
.format("com.mongodb.spark.sql.DefaultSource")\
.mode("append")\
.option("uri", product_col)\
.save()
self.spark.catalog.clearCache()
self.spark.stop()
def convert_reviewerid(self):
self.create_spark_app()
reviews = self.return_col(col_name="reviews")
reviews = reviews.select("reviewerID")
reviewerIndexer = StringIndexer(
inputCol="reviewerID",
outputCol="rI",
)
reviewStringModel = reviewerIndexer.fit(reviews)
reviewTransformed = reviewStringModel.transform(reviews)
reviewTransformed = reviewTransformed.select("rI")
# Save data to mongodb
reviews_col = self.MONGO_URI + ".reviews"
reviewTransformed.write\
.format("com.mongodb.spark.sql.DefaultSource")\
.mode("append")\
.option("uri", reviews_col)\
.save()
self.spark.catalog.clearCache()
self.spark.stop()
|
<reponame>easyopsapis/easyops-api-python
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: delete_container.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='delete_container.proto',
package='container',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x16\x64\x65lete_container.proto\x12\tcontainer\"$\n\x16\x44\x65leteContainerRequest\x12\n\n\x02id\x18\x01 \x01(\t\"%\n\x17\x44\x65leteContainerResponse\x12\n\n\x02id\x18\x01 \x01(\t\"\x84\x01\n\x1e\x44\x65leteContainerResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x30\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\".container.DeleteContainerResponseb\x06proto3')
)
_DELETECONTAINERREQUEST = _descriptor.Descriptor(
name='DeleteContainerRequest',
full_name='container.DeleteContainerRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='container.DeleteContainerRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=37,
serialized_end=73,
)
_DELETECONTAINERRESPONSE = _descriptor.Descriptor(
name='DeleteContainerResponse',
full_name='container.DeleteContainerResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='container.DeleteContainerResponse.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=75,
serialized_end=112,
)
_DELETECONTAINERRESPONSEWRAPPER = _descriptor.Descriptor(
name='DeleteContainerResponseWrapper',
full_name='container.DeleteContainerResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='container.DeleteContainerResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='container.DeleteContainerResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='container.DeleteContainerResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='container.DeleteContainerResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=115,
serialized_end=247,
)
_DELETECONTAINERRESPONSEWRAPPER.fields_by_name['data'].message_type = _DELETECONTAINERRESPONSE
DESCRIPTOR.message_types_by_name['DeleteContainerRequest'] = _DELETECONTAINERREQUEST
DESCRIPTOR.message_types_by_name['DeleteContainerResponse'] = _DELETECONTAINERRESPONSE
DESCRIPTOR.message_types_by_name['DeleteContainerResponseWrapper'] = _DELETECONTAINERRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DeleteContainerRequest = _reflection.GeneratedProtocolMessageType('DeleteContainerRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETECONTAINERREQUEST,
'__module__' : 'delete_container_pb2'
# @@protoc_insertion_point(class_scope:container.DeleteContainerRequest)
})
_sym_db.RegisterMessage(DeleteContainerRequest)
DeleteContainerResponse = _reflection.GeneratedProtocolMessageType('DeleteContainerResponse', (_message.Message,), {
'DESCRIPTOR' : _DELETECONTAINERRESPONSE,
'__module__' : 'delete_container_pb2'
# @@protoc_insertion_point(class_scope:container.DeleteContainerResponse)
})
_sym_db.RegisterMessage(DeleteContainerResponse)
DeleteContainerResponseWrapper = _reflection.GeneratedProtocolMessageType('DeleteContainerResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _DELETECONTAINERRESPONSEWRAPPER,
'__module__' : 'delete_container_pb2'
# @@protoc_insertion_point(class_scope:container.DeleteContainerResponseWrapper)
})
_sym_db.RegisterMessage(DeleteContainerResponseWrapper)
# @@protoc_insertion_point(module_scope)
|
<gh_stars>1-10
import unittest
from datetime import date, datetime
from pypika import Criterion, EmptyCriterion, Field, Table
from pypika import functions as fn
from pypika.queries import QueryBuilder
from pypika.terms import Mod
__author__ = "<NAME>"
__email__ = "<EMAIL>"
class CriterionTests(unittest.TestCase):
t = Table("test", alias="crit")
def test__criterion_with_alias(self):
c1 = (Field("foo") == Field("bar")).as_("criterion")
self.assertEqual('"foo"="bar"', str(c1))
self.assertEqual(
'"foo"="bar" "criterion"',
c1.get_sql(with_alias=True, quote_char='"', alias_quote_char='"'),
)
def test__criterion_eq_number(self):
c1 = Field("foo") == 1
c2 = Field("foo", table=self.t).eq(0)
c3 = Field("foo", table=self.t) == -1
self.assertEqual('"foo"=1', str(c1))
self.assertEqual('"crit"."foo"=0', str(c2))
self.assertEqual('"crit"."foo"=-1', str(c3))
def test__criterion_eq_decimal(self):
c1 = Field("foo") == 1.0
c2 = Field("foo", table=self.t).eq(0.5)
self.assertEqual('"foo"=1.0', str(c1))
self.assertEqual('"crit"."foo"=0.5', str(c2))
def test__criterion_eq_bool(self):
c1 = Field("foo") == True # noqa: E712
c2 = Field("foo", table=self.t).eq(True)
c3 = Field("foo") == False # noqa: E712
c4 = Field("foo", table=self.t).eq(False)
self.assertEqual('"foo"=true', str(c1))
self.assertEqual('"crit"."foo"=true', str(c2))
self.assertEqual('"foo"=false', str(c3))
self.assertEqual('"crit"."foo"=false', str(c4))
def test__criterion_eq_str(self):
c1 = Field("foo") == "abc"
c2 = Field("foo", table=self.t).eq("abc")
self.assertEqual("\"foo\"='abc'", str(c1))
self.assertEqual('"crit"."foo"=\'abc\'', str(c2))
def test__criterion_eq_date(self):
c1 = Field("foo") == date(2000, 1, 1)
c2 = Field("foo", table=self.t).eq(date(2000, 1, 1))
self.assertEqual("\"foo\"='2000-01-01'", str(c1))
self.assertEqual('"crit"."foo"=\'2000-01-01\'', str(c2))
def test__criterion_eq_datetime(self):
c1 = Field("foo") == datetime(2000, 1, 1, 12, 30, 55)
c2 = Field("foo", table=self.t).eq(datetime(2000, 1, 1, 12, 30, 55))
self.assertEqual("\"foo\"='2000-01-01T12:30:55'", str(c1))
self.assertEqual('"crit"."foo"=\'2000-01-01T12:30:55\'', str(c2))
def test__criterion_eq_right(self):
c1 = 1 == Field("foo")
c2 = -1 == Field("foo", table=self.t)
self.assertEqual('"foo"=1', str(c1))
self.assertEqual('"crit"."foo"=-1', str(c2))
def test__criterion_is_null(self):
c1 = Field("foo").isnull()
c2 = Field("foo", table=self.t).isnull()
self.assertEqual('"foo" IS NULL', str(c1))
self.assertEqual('"crit"."foo" IS NULL', str(c2))
def test__criterion_is_null_with_alias(self):
c1 = Field("foo").isnull().as_("alias")
c2 = Field("foo", table=self.t).isnull().as_("alias")
self.assertEqual('"foo" IS NULL "alias"', str(c1))
self.assertEqual('"crit"."foo" IS NULL "alias"', str(c2))
def test__criterion_ne_number(self):
c1 = Field("foo") != 1
c2 = Field("foo", table=self.t).ne(0)
c3 = Field("foo") != -1
self.assertEqual('"foo"<>1', str(c1))
self.assertEqual('"crit"."foo"<>0', str(c2))
self.assertEqual('"foo"<>-1', str(c3))
def test__criterion_ne_str(self):
c1 = Field("foo") != "abc"
c2 = Field("foo", table=self.t).ne("abc")
self.assertEqual("\"foo\"<>'abc'", str(c1))
self.assertEqual('"crit"."foo"<>\'abc\'', str(c2))
def test__criterion_ne_date(self):
c1 = Field("foo") != date(2000, 1, 1)
c2 = Field("foo", table=self.t).ne(date(2000, 1, 1))
self.assertEqual("\"foo\"<>'2000-01-01'", str(c1))
self.assertEqual('"crit"."foo"<>\'2000-01-01\'', str(c2))
def test__criterion_ne_datetime(self):
c1 = Field("foo") != datetime(2000, 1, 1, 12, 30, 55)
c2 = Field("foo", table=self.t).ne(datetime(2000, 1, 1, 12, 30, 55))
self.assertEqual("\"foo\"<>'2000-01-01T12:30:55'", str(c1))
self.assertEqual('"crit"."foo"<>\'2000-01-01T12:30:55\'', str(c2))
def test__criterion_ne_right(self):
c1 = 1 != Field("foo")
c2 = -1 != Field("foo", table=self.t)
self.assertEqual('"foo"<>1', str(c1))
self.assertEqual('"crit"."foo"<>-1', str(c2))
def test__criterion_lt_number(self):
c1 = Field("foo") < 1
c2 = Field("foo", table=self.t).lt(0)
c3 = Field("foo") < -1
self.assertEqual('"foo"<1', str(c1))
self.assertEqual('"crit"."foo"<0', str(c2))
self.assertEqual('"foo"<-1', str(c3))
def test__criterion_lt_date(self):
c1 = Field("foo") < date(2000, 1, 1)
c2 = Field("foo", table=self.t).lt(date(2000, 1, 1))
self.assertEqual("\"foo\"<'2000-01-01'", str(c1))
self.assertEqual('"crit"."foo"<\'2000-01-01\'', str(c2))
def test__criterion_lt_datetime(self):
c1 = Field("foo") < datetime(2000, 1, 1, 12, 30, 55)
c2 = Field("foo", table=self.t).lt(datetime(2000, 1, 1, 12, 30, 55))
self.assertEqual("\"foo\"<'2000-01-01T12:30:55'", str(c1))
self.assertEqual('"crit"."foo"<\'2000-01-01T12:30:55\'', str(c2))
def test__criterion_lt_right(self):
c1 = 1 > Field("foo")
c2 = -1 > Field("foo", table=self.t)
self.assertEqual('"foo"<1', str(c1))
self.assertEqual('"crit"."foo"<-1', str(c2))
def test__criterion_gt_number(self):
c1 = Field("foo") > 1
c2 = Field("foo", table=self.t).gt(0)
c3 = Field("foo") > -1
self.assertEqual('"foo">1', str(c1))
self.assertEqual('"crit"."foo">0', str(c2))
self.assertEqual('"foo">-1', str(c3))
def test__criterion_gt_date(self):
c1 = Field("foo") > date(2000, 1, 1)
c2 = Field("foo", table=self.t).gt(date(2000, 1, 1))
self.assertEqual("\"foo\">'2000-01-01'", str(c1))
self.assertEqual('"crit"."foo">\'2000-01-01\'', str(c2))
def test__criterion_gt_datetime(self):
c1 = Field("foo") > datetime(2000, 1, 1, 12, 30, 55)
c2 = Field("foo", table=self.t).gt(datetime(2000, 1, 1, 12, 30, 55))
self.assertEqual("\"foo\">'2000-01-01T12:30:55'", str(c1))
self.assertEqual('"crit"."foo">\'2000-01-01T12:30:55\'', str(c2))
def test__criterion_gt_right(self):
c1 = 1 < Field("foo")
c2 = -1 < Field("foo", table=self.t)
self.assertEqual('"foo">1', str(c1))
self.assertEqual('"crit"."foo">-1', str(c2))
def test__criterion_lte_number(self):
c1 = Field("foo") <= 1
c2 = Field("foo", table=self.t).lte(0)
c3 = Field("foo") <= -1
self.assertEqual('"foo"<=1', str(c1))
self.assertEqual('"crit"."foo"<=0', str(c2))
self.assertEqual('"foo"<=-1', str(c3))
def test__criterion_lte_date(self):
c1 = Field("foo") <= date(2000, 1, 1)
c2 = Field("foo", table=self.t).lte(date(2000, 1, 1))
self.assertEqual("\"foo\"<='2000-01-01'", str(c1))
self.assertEqual('"crit"."foo"<=\'2000-01-01\'', str(c2))
def test__criterion_lte_datetime(self):
c1 = Field("foo") <= datetime(2000, 1, 1, 12, 30, 55)
c2 = Field("foo", table=self.t).lte(datetime(2000, 1, 1, 12, 30, 55))
self.assertEqual("\"foo\"<='2000-01-01T12:30:55'", str(c1))
self.assertEqual('"crit"."foo"<=\'2000-01-01T12:30:55\'', str(c2))
def test__criterion_lte_right(self):
c1 = 1 >= Field("foo")
c2 = -1 >= Field("foo", table=self.t)
self.assertEqual('"foo"<=1', str(c1))
self.assertEqual('"crit"."foo"<=-1', str(c2))
def test__criterion_gte_number(self):
c1 = Field("foo") >= 1
c2 = Field("foo", table=self.t).gte(0)
c3 = Field("foo") >= -1
self.assertEqual('"foo">=1', str(c1))
self.assertEqual('"crit"."foo">=0', str(c2))
self.assertEqual('"foo">=-1', str(c3))
def test__criterion_gte_date(self):
c1 = Field("foo") >= date(2000, 1, 1)
c2 = Field("foo", table=self.t).gte(date(2000, 1, 1))
self.assertEqual("\"foo\">='2000-01-01'", str(c1))
self.assertEqual('"crit"."foo">=\'2000-01-01\'', str(c2))
def test__criterion_gte_datetime(self):
c1 = Field("foo") >= datetime(2000, 1, 1, 12, 30, 55)
c2 = Field("foo", table=self.t).gte(datetime(2000, 1, 1, 12, 30, 55))
self.assertEqual("\"foo\">='2000-01-01T12:30:55'", str(c1))
self.assertEqual('"crit"."foo">=\'2000-01-01T12:30:55\'', str(c2))
def test__criterion_gte_right(self):
c1 = 1 <= Field("foo")
c2 = -1 <= Field("foo", table=self.t)
self.assertEqual('"foo">=1', str(c1))
self.assertEqual('"crit"."foo">=-1', str(c2))
def test__criterion_bitwise_and(self):
c1 = Field("foo").bitwiseand(2)
c2 = Field("foo", table=self.t).bitwiseand(10) == 2
self.assertEqual('("foo" & 2)', str(c1))
self.assertEqual('("crit"."foo" & 10)=2', str(c2))
def test__criterion_bitwise_and_with_alias(self):
c1 = Field("foo").bitwiseand(2).as_("alias")
self.assertEqual('("foo" & 2) "alias"', str(c1))
def test__bitwise_and_in_where_clause(self):
q = QueryBuilder().from_("items").select("abc").where(Field("foo").bitwiseand(1) == 1)
self.assertEqual('SELECT "abc" FROM "items" WHERE ("foo" & 1)=1', str(q))
class NotTests(unittest.TestCase):
table_abc, table_efg = Table("abc", alias="cx0"), Table("efg", alias="cx1")
def test_negate(self):
c1 = Field("foo") >= 1
c2 = c1.negate()
self.assertEqual('"foo">=1', str(c1))
self.assertEqual('NOT "foo">=1', str(c2))
def test_variable_access(self):
c1 = Field("foo").negate()
self.assertEqual(c1.is_aggregate, False)
def test_chained_function(self):
field1 = Field("foo").negate()
field2 = field1.eq("bar")
self.assertEqual('NOT "foo"', str(field1))
self.assertEqual("NOT \"foo\"='bar'", str(field2))
self.assertIsNot(field1, field2)
def test_not_null(self):
c1 = Field("foo").notnull()
c2 = Field("foo", table=self.table_abc).notnull()
self.assertEqual('NOT "foo" IS NULL', str(c1))
self.assertEqual('NOT "cx0"."foo" IS NULL', str(c2))
def test_not_null_with_alias(self):
c1 = Field("foo").notnull().as_("something")
c2 = Field("foo", table=self.table_abc).notnull().as_("something")
self.assertEqual('NOT "foo" IS NULL "something"', str(c1))
self.assertEqual('NOT "cx0"."foo" IS NULL "something"', str(c2))
def test_notnullcriterion_replace_table(self):
f = self.table_abc.foo.notnull().replace_table(self.table_abc, self.table_efg)
self.assertEqual('NOT "cx1"."foo" IS NULL', str(f))
def test_not_with_or_criterion(self):
self.assertEqual('NOT ("foo" OR "bar")', str(~(Field("foo") | Field("bar"))))
def test_not_with_and_criterion(self):
self.assertEqual('NOT ("foo" AND "bar")', str(~(Field("foo") & Field("bar"))))
def test_not_with_complex_criterion(self):
self.assertEqual(
'NOT ("foo" AND "bar" AND "fizz" AND "buzz")',
str(~(Field("foo") & Field("bar") & Field("fizz") & Field("buzz"))),
)
class BetweenTests(unittest.TestCase):
t = Table("abc", alias="btw")
def test__between_number(self):
c1 = Field("foo").between(0, 1)
c2 = Field("foo", table=self.t).between(0, 1)
c3 = Field("foo")[0:1]
self.assertEqual('"foo" BETWEEN 0 AND 1', str(c1))
self.assertEqual('"btw"."foo" BETWEEN 0 AND 1', str(c2))
self.assertEqual('"foo" BETWEEN 0 AND 1', str(c3))
def test__between_with_alias(self):
c1 = Field("foo").between(0, 1).as_("alias")
c2 = Field("foo", table=self.t).between(0, 1).as_("alias")
c3 = Field("foo")[0:1].as_("alias")
self.assertEqual('"foo" BETWEEN 0 AND 1 "alias"', str(c1))
self.assertEqual('"btw"."foo" BETWEEN 0 AND 1 "alias"', str(c2))
self.assertEqual('"foo" BETWEEN 0 AND 1 "alias"', str(c3))
def test__between_date(self):
c1 = Field("foo").between(date(2000, 1, 1), date(2000, 12, 31))
c2 = Field("foo", table=self.t).between(date(2000, 1, 1), date(2000, 12, 31))
c3 = Field("foo")[date(2000, 1, 1) : date(2000, 12, 31)]
self.assertEqual("\"foo\" BETWEEN '2000-01-01' AND '2000-12-31'", str(c1))
self.assertEqual("\"btw\".\"foo\" BETWEEN '2000-01-01' AND '2000-12-31'", str(c2))
self.assertEqual("\"foo\" BETWEEN '2000-01-01' AND '2000-12-31'", str(c3))
def test__between_datetime(self):
c1 = Field("foo").between(datetime(2000, 1, 1, 0, 0, 0), datetime(2000, 12, 31, 23, 59, 59))
c2 = Field("foo", table=self.t).between(
datetime(2000, 1, 1, 0, 0, 0), datetime(2000, 12, 31, 23, 59, 59)
)
c3 = Field("foo")[datetime(2000, 1, 1, 0, 0, 0) : datetime(2000, 12, 31, 23, 59, 59)]
self.assertEqual("\"foo\" BETWEEN '2000-01-01T00:00:00' AND '2000-12-31T23:59:59'", str(c1))
self.assertEqual(
"\"btw\".\"foo\" BETWEEN '2000-01-01T00:00:00' AND '2000-12-31T23:59:59'",
str(c2),
)
self.assertEqual("\"foo\" BETWEEN '2000-01-01T00:00:00' AND '2000-12-31T23:59:59'", str(c3))
def test__function_between(self):
c1 = fn.Coalesce(Field("foo"), 0)[0:1]
c2 = fn.Coalesce(Field("foo", table=self.t), 0)[0:1]
self.assertEqual('COALESCE("foo",0) BETWEEN 0 AND 1', str(c1))
self.assertEqual('COALESCE("btw"."foo",0) BETWEEN 0 AND 1', str(c2))
def test_get_item_only_works_with_slice(self):
with self.assertRaises(TypeError):
Field("foo")[0]
with self.assertRaises(TypeError):
Field("foo")[date(2000, 1, 1)]
with self.assertRaises(TypeError):
Field("foo")[datetime(2000, 1, 1, 0, 0, 0)]
class IsInTests(unittest.TestCase):
t = Table("abc", alias="isin")
def test__in_number(self):
c1 = Field("foo").isin([0, 1])
c2 = Field("foo", table=self.t).isin([0, 1])
self.assertEqual('"foo" IN (0,1)', str(c1))
self.assertEqual('"isin"."foo" IN (0,1)', str(c2))
def test__in_number_with_alias(self):
c1 = Field("foo").isin([0, 1]).as_("alias")
c2 = Field("foo", table=self.t).isin([0, 1]).as_("alias")
self.assertEqual('"foo" IN (0,1) "alias"', str(c1))
self.assertEqual('"isin"."foo" IN (0,1) "alias"', str(c2))
def test__in_character(self):
c1 = Field("foo").isin(["a", "b"])
c2 = Field("foo", table=self.t).isin(["a", "b"])
self.assertEqual("\"foo\" IN ('a','b')", str(c1))
self.assertEqual("\"isin\".\"foo\" IN ('a','b')", str(c2))
def test__in_date(self):
c1 = Field("foo").isin([date(2000, 1, 1), date(2000, 12, 31)])
c2 = Field("foo", table=self.t).isin([date(2000, 1, 1), date(2000, 12, 31)])
self.assertEqual("\"foo\" IN ('2000-01-01','2000-12-31')", str(c1))
self.assertEqual("\"isin\".\"foo\" IN ('2000-01-01','2000-12-31')", str(c2))
def test__in_datetime(self):
c1 = Field("foo").isin([datetime(2000, 1, 1, 0, 0, 0), datetime(2000, 12, 31, 23, 59, 59)])
c2 = Field("foo", table=self.t).isin(
[datetime(2000, 1, 1, 0, 0, 0), datetime(2000, 12, 31, 23, 59, 59)]
)
self.assertEqual("\"foo\" IN ('2000-01-01T00:00:00','2000-12-31T23:59:59')", str(c1))
self.assertEqual(
"\"isin\".\"foo\" IN ('2000-01-01T00:00:00','2000-12-31T23:59:59')", str(c2)
)
def test__function_isin(self):
c1 = fn.Coalesce(Field("foo"), 0).isin([0, 1])
c2 = fn.Coalesce(Field("foo", table=self.t), 0).isin([0, 1])
self.assertEqual('COALESCE("foo",0) IN (0,1)', str(c1))
self.assertEqual('COALESCE("isin"."foo",0) IN (0,1)', str(c2))
def test__in_unicode(self):
c1 = Field("foo").isin(["a", "b"])
c2 = Field("foo", table=self.t).isin(["a", "b"])
self.assertEqual("\"foo\" IN ('a','b')", str(c1))
self.assertEqual("\"isin\".\"foo\" IN ('a','b')", str(c2))
class NotInTests(unittest.TestCase):
t = Table("abc", alias="notin")
def test__notin_number(self):
c1 = Field("foo").notin([0, 1])
c2 = Field("foo", table=self.t).notin([0, 1])
self.assertEqual('"foo" NOT IN (0,1)', str(c1))
self.assertEqual('"notin"."foo" NOT IN (0,1)', str(c2))
def test__notin_number_with_alias(self):
c1 = Field("foo").notin([0, 1]).as_("alias")
c2 = Field("foo", table=self.t).notin([0, 1]).as_("alias")
self.assertEqual('"foo" NOT IN (0,1) "alias"', str(c1))
self.assertEqual('"notin"."foo" NOT IN (0,1) "alias"', str(c2))
def test__notin_character(self):
c1 = Field("foo").notin(["a", "b"])
c2 = Field("foo", table=self.t).notin(["a", "b"])
self.assertEqual("\"foo\" NOT IN ('a','b')", str(c1))
self.assertEqual("\"notin\".\"foo\" NOT IN ('a','b')", str(c2))
def test__notin_date(self):
c1 = Field("foo").notin([date(2000, 1, 1), date(2000, 12, 31)])
c2 = Field("foo", table=self.t).notin([date(2000, 1, 1), date(2000, 12, 31)])
self.assertEqual("\"foo\" NOT IN ('2000-01-01','2000-12-31')", str(c1))
self.assertEqual("\"notin\".\"foo\" NOT IN ('2000-01-01','2000-12-31')", str(c2))
def test__notin_datetime(self):
c1 = Field("foo").notin([datetime(2000, 1, 1, 0, 0, 0), datetime(2000, 12, 31, 23, 59, 59)])
c2 = Field("foo", table=self.t).notin(
[datetime(2000, 1, 1, 0, 0, 0), datetime(2000, 12, 31, 23, 59, 59)]
)
self.assertEqual("\"foo\" NOT IN ('2000-01-01T00:00:00','2000-12-31T23:59:59')", str(c1))
self.assertEqual(
"\"notin\".\"foo\" NOT IN ('2000-01-01T00:00:00','2000-12-31T23:59:59')",
str(c2),
)
def test__function_notin(self):
c1 = fn.Coalesce(Field("foo"), 0).notin([0, 1])
c2 = fn.Coalesce(Field("foo", table=self.t), 0).notin([0, 1])
self.assertEqual('COALESCE("foo",0) NOT IN (0,1)', str(c1))
self.assertEqual('COALESCE("notin"."foo",0) NOT IN (0,1)', str(c2))
class LikeTests(unittest.TestCase):
t = Table("abc", alias="like")
def test_like_starts_with(self):
c1 = Field("foo").like("ab%")
c2 = Field("foo", table=self.t).like("ab%")
self.assertEqual("\"foo\" LIKE 'ab%'", str(c1))
self.assertEqual('"like"."foo" LIKE \'ab%\'', str(c2))
def test_like_contains(self):
c1 = Field("foo").like("%ab%")
c2 = Field("foo", table=self.t).like("%ab%")
self.assertEqual("\"foo\" LIKE '%ab%'", str(c1))
self.assertEqual('"like"."foo" LIKE \'%ab%\'', str(c2))
def test_like_ends_with(self):
c1 = Field("foo").like("%ab")
c2 = Field("foo", table=self.t).like("%ab")
self.assertEqual("\"foo\" LIKE '%ab'", str(c1))
self.assertEqual('"like"."foo" LIKE \'%ab\'', str(c2))
def test_like_n_chars_long(self):
c1 = Field("foo").like("___")
c2 = Field("foo", table=self.t).like("___")
self.assertEqual("\"foo\" LIKE '___'", str(c1))
self.assertEqual('"like"."foo" LIKE \'___\'', str(c2))
def test_like_single_chars_and_various_chars(self):
c1 = Field("foo").like("a_b%c")
c2 = Field("foo", table=self.t).like("a_b%c")
self.assertEqual("\"foo\" LIKE 'a_b%c'", str(c1))
self.assertEqual('"like"."foo" LIKE \'a_b%c\'', str(c2))
def test_not_like_single_chars_and_various_chars(self):
c1 = Field("foo").not_like("a_b%c")
c2 = Field("foo", table=self.t).not_like("a_b%c")
self.assertEqual("\"foo\" NOT LIKE 'a_b%c'", str(c1))
self.assertEqual('"like"."foo" NOT LIKE \'a_b%c\'', str(c2))
def test_ilike_starts_with(self):
c1 = Field("foo").ilike("ab%")
c2 = Field("foo", table=self.t).ilike("ab%")
self.assertEqual("\"foo\" ILIKE 'ab%'", str(c1))
self.assertEqual('"like"."foo" ILIKE \'ab%\'', str(c2))
def test_ilike_contains(self):
c1 = Field("foo").ilike("%ab%")
c2 = Field("foo", table=self.t).ilike("%ab%")
self.assertEqual("\"foo\" ILIKE '%ab%'", str(c1))
self.assertEqual('"like"."foo" ILIKE \'%ab%\'', str(c2))
def test_ilike_ends_with(self):
c1 = Field("foo").ilike("%ab")
c2 = Field("foo", table=self.t).ilike("%ab")
self.assertEqual("\"foo\" ILIKE '%ab'", str(c1))
self.assertEqual('"like"."foo" ILIKE \'%ab\'', str(c2))
def test_ilike_n_chars_long(self):
c1 = Field("foo").ilike("___")
c2 = Field("foo", table=self.t).ilike("___")
self.assertEqual("\"foo\" ILIKE '___'", str(c1))
self.assertEqual('"like"."foo" ILIKE \'___\'', str(c2))
def test_ilike_single_chars_and_various_chars(self):
c1 = Field("foo").ilike("a_b%c")
c2 = Field("foo", table=self.t).ilike("a_b%c")
self.assertEqual("\"foo\" ILIKE 'a_b%c'", str(c1))
self.assertEqual('"like"."foo" ILIKE \'a_b%c\'', str(c2))
def test_not_ilike_single_chars_and_various_chars(self):
c1 = Field("foo").not_ilike("a_b%c")
c2 = Field("foo", table=self.t).not_ilike("a_b%c")
self.assertEqual("\"foo\" NOT ILIKE 'a_b%c'", str(c1))
self.assertEqual('"like"."foo" NOT ILIKE \'a_b%c\'', str(c2))
def test_rlike_escape_chars(self):
c1 = Field("foo").rlike("\\\\d+$")
c2 = Field("foo", table=self.t).rlike("\\\\d+$")
self.assertEqual("\"foo\" RLIKE '\\\\d+$'", str(c1))
self.assertEqual('"like"."foo" RLIKE \'\\\\d+$\'', str(c2))
def test_glob_single_chars_and_various_chars(self):
c1 = Field("foo").glob("a_b*")
c2 = Field("foo", table=self.t).glob("a_b*")
self.assertEqual("\"foo\" GLOB 'a_b*'", str(c1))
self.assertEqual('"like"."foo" GLOB \'a_b*\'', str(c2))
class ComplexCriterionTests(unittest.TestCase):
table_abc, table_efg = Table("abc", alias="cx0"), Table("efg", alias="cx1")
def test_and(self):
c1 = (Field("foo") == 1) & (Field("bar") == 2)
c2 = (Field("foo", table=self.table_abc) == 1) & (Field("bar", table=self.table_efg) == 2)
self.assertEqual('"foo"=1 AND "bar"=2', str(c1))
self.assertEqual('"cx0"."foo"=1 AND "cx1"."bar"=2', str(c2))
def test_or(self):
c1 = (Field("foo") == 1) | (Field("bar") == 2)
c2 = (Field("foo", table=self.table_abc) == 1) | (Field("bar", table=self.table_efg) == 2)
self.assertEqual('"foo"=1 OR "bar"=2', str(c1))
self.assertEqual('"cx0"."foo"=1 OR "cx1"."bar"=2', str(c2))
def test_xor(self):
c1 = (Field("foo") == 1) ^ (Field("bar") == 2)
c2 = (Field("foo", table=self.table_abc) == 1) ^ (Field("bar", table=self.table_efg) == 2)
self.assertEqual('"foo"=1 XOR "bar"=2', str(c1))
self.assertEqual('"cx0"."foo"=1 XOR "cx1"."bar"=2', str(c2))
def test_function_and(self):
c1 = fn.IsNull(Field("foo")) & (Field("bar") == 2)
self.assertEqual('ISNULL("foo") AND "bar"=2', str(c1))
def test_function_or(self):
c1 = fn.IsNull(Field("foo")) | (Field("bar") == 2)
self.assertEqual('ISNULL("foo") OR "bar"=2', str(c1))
def test_function_xor(self):
c1 = fn.IsNull(Field("foo")) ^ (Field("bar") == 2)
self.assertEqual('ISNULL("foo") XOR "bar"=2', str(c1))
def test__nested__and(self):
c = (Field("foo") == 1) & (Field("bar") == 2) & (Field("buz") == 3)
self.assertEqual('"foo"=1 AND "bar"=2 AND "buz"=3', str(c))
def test__nested__or(self):
c = (Field("foo") == 1) | (Field("bar") == 2) | (Field("buz") == 3)
self.assertEqual('"foo"=1 OR "bar"=2 OR "buz"=3', str(c))
def test__nested__mixed(self):
c = ((Field("foo") == 1) & (Field("bar") == 2)) | (Field("buz") == 3)
self.assertEqual('("foo"=1 AND "bar"=2) OR "buz"=3', str(c))
def test__between_and_isin(self):
c = Field("foo").isin([1, 2, 3]) & Field("bar").between(0, 1)
self.assertEqual('"foo" IN (1,2,3) AND "bar" BETWEEN 0 AND 1', str(c))
def test__between_and_field(self):
c1 = Field("foo").between(0, 1)
c2 = Field("bool_field")
self.assertEqual('"foo" BETWEEN 0 AND 1 AND "bool_field"', str(c1 & c2))
self.assertEqual('"bool_field" AND "foo" BETWEEN 0 AND 1', str(c2 & c1))
class FieldsAsCriterionTests(unittest.TestCase):
def test__field_and_field(self):
c1 = Field("a")
c2 = Field("b")
self.assertEqual('"a" AND "b"', str(c1 & c2))
def test__field_or_field(self):
c1 = Field("a")
c2 = Field("b")
self.assertEqual('"a" OR "b"', str(c1 | c2))
def test__field_xor_field(self):
c1 = Field("a")
c2 = Field("b")
self.assertEqual('"a" XOR "b"', str(c1 ^ c2))
class CriterionOperationsTests(unittest.TestCase):
table_abc, table_efg = Table("abc", alias="cx0"), Table("efg", alias="cx1")
def test_field_replace_table(self):
f = self.table_abc.foo.replace_table(self.table_abc, self.table_efg)
self.assertEqual('"cx1"."foo"', str(f))
def test_arithmeticfunction_replace_table(self):
f = (self.table_abc.foo + self.table_abc.bar).replace_table(self.table_abc, self.table_efg)
self.assertEqual('"cx1"."foo"+"cx1"."bar"', str(f))
def test_criterion_replace_table(self):
f = (self.table_abc.foo < self.table_abc.bar).replace_table(self.table_abc, self.table_efg)
self.assertEqual('"cx1"."foo"<"cx1"."bar"', str(f))
def test_complexcriterion_replace_table(self):
f = (self.table_abc.foo < self.table_abc.bar) & (self.table_abc.fiz > self.table_abc.buz)
f = f.replace_table(self.table_abc, self.table_efg)
self.assertEqual('"cx1"."foo"<"cx1"."bar" AND "cx1"."fiz">"cx1"."buz"', str(f))
def test_function_with_only_fields_replace_table(self):
f = fn.Sum(self.table_abc.foo).replace_table(self.table_abc, self.table_efg)
self.assertEqual('SUM("cx1"."foo")', str(f))
def test_function_with_values_and_fields_replace_table(self):
f = Mod(self.table_abc.foo, 2).replace_table(self.table_abc, self.table_efg)
self.assertEqual('MOD("cx1"."foo",2)', str(f))
def test_betweencriterion_replace_table(self):
f = self.table_abc.foo[0:1].replace_table(self.table_abc, self.table_efg)
self.assertEqual('"cx1"."foo" BETWEEN 0 AND 1', str(f))
def test_nullcriterion_replace_table(self):
f = self.table_abc.foo.isnull().replace_table(self.table_abc, self.table_efg)
self.assertEqual('"cx1"."foo" IS NULL', str(f))
class AnyTests(unittest.TestCase):
def test_zero_args_returns_empty_criterion(self):
crit = Criterion.any()
self.assertIsInstance(crit, EmptyCriterion)
def test_single_arg_returns_self(self):
f = Field("a")
crit = Criterion.any([f])
self.assertEqual(str(f), str(crit))
def test_multiple_args_returned_in_chain_of_ors(self):
crit = Criterion.any([Field("a"), Field("b"), Field("c"), Field("d")])
self.assertEqual(str(crit), '"a" OR "b" OR "c" OR "d"')
def test_with_generator(self):
crit = Criterion.any(Field(letter) for letter in "abcd")
self.assertEqual(str(crit), '"a" OR "b" OR "c" OR "d"')
class EmptyCriterionTests(unittest.TestCase):
def test_fields_(self):
empty_criterion = EmptyCriterion()
self.assertEqual(len(empty_criterion.fields_()), 0)
class AllTests(unittest.TestCase):
def test_zero_args_returns_empty_criterion(self):
crit = Criterion.all()
self.assertIsInstance(crit, EmptyCriterion)
def test_single_arg_returns_self(self):
f = Field("a")
crit = Criterion.all([f])
self.assertEqual(str(f), str(crit))
def test_multiple_args_returned_in_chain_of_ors(self):
crit = Criterion.all([Field("a"), Field("b"), Field("c"), Field("d")])
self.assertEqual(str(crit), '"a" AND "b" AND "c" AND "d"')
def test_with_generator(self):
crit = Criterion.all(Field(letter) for letter in "abcd")
self.assertEqual(str(crit), '"a" AND "b" AND "c" AND "d"')
|
from django.db import models
from django.contrib.auth.models import AbstractUser
class Menu(models.Model):
"""
菜单
"""
name = models.CharField(max_length=30, unique=True, verbose_name="菜单名")
icon = models.CharField(max_length=50, null=True, blank=True, verbose_name="图标")
path = models.CharField(max_length=158, null=True, blank=True, verbose_name="链接地址")
is_frame = models.BooleanField(default=False, verbose_name="外部菜单")
is_show = models.BooleanField(default=True, verbose_name="显示标记")
sort = models.IntegerField(null=True, blank=True, verbose_name="排序标记")
component = models.CharField(max_length=200, null=True, blank=True, verbose_name="组件")
pid = models.ForeignKey("self", null=True, blank=True, on_delete=models.SET_NULL, verbose_name="父菜单")
def __str__(self):
return self.name
class Meta:
verbose_name = '菜单'
verbose_name_plural = verbose_name
ordering = ['id']
class Permission(models.Model):
"""
权限
"""
name = models.CharField(max_length=30, unique=True, verbose_name="权限名")
method = models.CharField(max_length=50, null=True, blank=True, verbose_name="方法")
pid = models.ForeignKey("self", null=True, blank=True, on_delete=models.SET_NULL, verbose_name="父权限")
def __str__(self):
return self.name
class Meta:
verbose_name = '权限'
verbose_name_plural = verbose_name
ordering = ['id']
class Role(models.Model):
"""
角色
"""
name = models.CharField(max_length=32, unique=True, verbose_name="角色")
permissions = models.ManyToManyField("Permission", blank=True, verbose_name="权限")
menus = models.ManyToManyField("Menu", blank=True, verbose_name="菜单")
desc = models.CharField(max_length=50, blank=True, null=True, verbose_name="描述")
class Meta:
verbose_name = "角色"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Organization(models.Model):
"""
组织架构
"""
organization_type_choices = (
("company", "公司"),
("department", "部门")
)
name = models.CharField(max_length=60, verbose_name="名称")
type = models.CharField(max_length=20, choices=organization_type_choices, default="company", verbose_name="类型")
pid = models.ForeignKey("self", null=True, blank=True, on_delete=models.SET_NULL, verbose_name="父类组织")
class Meta:
verbose_name = "组织架构"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class UserProfile(AbstractUser):
'''
用户
'''
name = models.CharField(max_length=20, default="", verbose_name="姓名")
mobile = models.CharField(max_length=11, default="", verbose_name="手机号码")
email = models.EmailField(max_length=50, verbose_name="邮箱")
image = models.ImageField(upload_to="static/%Y/%m", default="image/default.png",
max_length=100, null=True, blank=True)
department = models.ForeignKey("Organization", null=True, blank=True, on_delete=models.SET_NULL, verbose_name="部门")
position = models.CharField(max_length=50, null=True, blank=True, verbose_name="职位")
superior = models.ForeignKey("self", null=True, blank=True, on_delete=models.SET_NULL, verbose_name="上级主管")
roles = models.ManyToManyField("Role", verbose_name="角色", blank=True)
class Meta:
verbose_name = "用户信息"
verbose_name_plural = verbose_name
ordering = ['id']
def __str__(self):
return self.username
|
<gh_stars>0
from testqtgui._common import *
if has_qt4:
import srllib.qtgui.util
from srllib.qtgui import models
@only_qt4
class UndoItemModelTest(QtTestCase):
def test_construct(self):
model = self.__construct()
self.assertIs(model.undo_stack, self.__undo_stack)
def test_construct_without_stack(self):
""" Test constructing with no undo stack. """
model = self.__construct(with_undo=False)
self.assertIsNot(model.undo_stack, None)
def test_setData(self):
data = [QtCore.QVariant(x) for x in 1, 2]
model = self.__construct(hor_headers=("1"), initial_rows=[[data[0]]])
stack = self.__undo_stack
model.setData(model.index(0, 0), data[1])
self.assertEqual(model.data(model.index(0, 0)), data[1])
self.assertEqual(stack.undoText(), "set item data")
self.assertEqual(stack.count(), 1)
stack.undo()
self.assertEqual(model.data(model.index(0, 0)).toString(),
data[0].toString())
# Add data for a role, and verify it isn't touched
model.setData(model.index(0, 0), QtCore.QVariant(1), Qt.UserRole+1)
model.setData(model.index(0, 0), QtCore.QVariant(0), Qt.UserRole)
self.assertEqual(model.data(model.index(0, 0),
Qt.UserRole+1).toInt()[0], 1)
def test_setItemData(self):
def check_data(model, row, column, data):
for role, var in data.items():
self.assertEqual(model.data(model.index(row, column),
role).toString(), var.toString())
data = [{}, {}]
for role in (Qt.EditRole, Qt.UserRole):
data[0][role] = QtCore.QVariant(0)
data[1][role] = QtCore.QVariant(1)
model = self.__construct(hor_headers=("1"), initial_rows=[[data[0]]])
stack = self.__undo_stack
model.setItemData(model.index(0, 0), data[1])
check_data(model, 0, 0, data[1])
self.assertEqual(stack.undoText(), "set item data")
self.assertEqual(stack.count(), 1)
stack.undo()
check_data(model, 0, 0, data[0])
def test_setItemData_add(self):
""" Test adding a role. """
data = {Qt.UserRole: QtCore.QVariant(0)}
model = self.__construct(initial_rows=[[data]])
assert model.itemData(model.index(0, 0)) == data
add_data = {Qt.UserRole+1: QtCore.QVariant(1)}
model.setItemData(model.index(0, 0), add_data)
data.update(add_data)
self.assertEqual(model.itemData(model.index(0, 0)), data)
def test_setItemData_clear(self):
""" Test adding a role, clearing others. """
data = {Qt.UserRole: QtCore.QVariant(0)}
model = self.__construct(initial_rows=[[data]])
new_data = {Qt.UserRole+1: QtCore.QVariant(0)}
model.setItemData(model.index(0, 0), new_data, clear=True)
self.assertEqual(model.itemData(model.index(0, 0)), new_data)
def test_appendRow(self):
class MyItem(QtGui.QStandardItem):
def clone(self):
""" Necessary when adding to model. """
return MyItem(self)
model = self.__construct(["1"])
stack = self.__undo_stack
item = MyItem("text")
model.appendRow([item])
self.assertIsNot(model.item(0), item)
self.assert_(isinstance(model.item(0), MyItem))
self.assertEqual(stack.count(), 1)
self.assertEqual(stack.undoText(), "append row")
stack.undo()
self.assertEqual(model.rowCount(), 0)
stack.redo()
self.assertEqual(model.data(model.index(0, 0)).toString(), "text")
stack.undo()
model.appendRow([MyItem("text")], undo_text="add table row")
self.assertEqual(stack.undoText(), "add table row")
def test_takeItem(self):
model = self.__construct(initial_rows=[["text"]])
stack = self.__undo_stack
item = model.takeItem(0)
self.assertEqual(item.text(), "text")
self.assertIs(model.item(0), None)
self.assertEqual(stack.count(), 1)
self.assertEqual(stack.undoText(), "take item")
stack.undo()
stack.redo()
stack.undo()
self.assertEqual(model.item(0).text(), "text")
# Now with undo text
item = model.takeItem(0, undo_text="delete cell")
self.assertEqual(stack.undoText(), "delete cell")
def test_setItem(self):
model = self.__construct(initial_rows=[["old text", "old text"]])
model.setItem(0, 1, QtGui.QStandardItem("new text"))
self.assertEqual(model.item(0, 1).text(), "new text")
self.assertEqual(self.__undo_stack.count(), 0)
def test_removeRows(self):
model = self.__construct(initial_rows=[["1"], ["2"], ["3"]])
stack = self.__undo_stack
model.removeRows(1, 2)
self.assertEqual(stack.count(), 1)
self.assertEqual(model.rowCount(), 1)
self.assertEqual(model.item(0).text(), "1")
self.assertEqual(stack.undoText(), "remove rows")
stack.undo()
self.assertEqual(model.item(0).text(), "1")
self.assertEqual(model.item(1).text(), "2")
self.assertEqual(model.item(2).text(), "3")
stack.redo()
self.assertEqual(model.rowCount(), 1)
stack.undo()
model.removeRows(0, 3, undo_text="remove table rows")
self.assertEqual(stack.undoText(), "remove table rows")
def __construct(self, hor_headers=None, initial_rows=None, with_undo=True):
if with_undo:
stack = self.__undo_stack = srllib.qtgui.util.UndoStack()
else:
stack = self.__undo_stack = None
model = models.UndoItemModel(stack, hor_headers=hor_headers)
if initial_rows:
model.undo_stack.is_enabled = False
for row in initial_rows:
assert isinstance(row, (list, tuple))
model.append_row(row)
model.undo_stack.is_enabled = True
return model
|
# -*- coding:utf-8 -*-
import time
import tornado.escape
from torcms.core import tools
from torcms.model.wiki_model import MWiki
class TestMWiki():
def setup(self):
print('setup 方法执行于本类中每条用例之前')
self.uu = MWiki()
self.title = 'tyyyitle'
self.uid = '6985'
def add_page(self, **kwargs):
post_data = {
'title': kwargs.get('title', self.title),
'user_name': kwargs.get('user_name', 'Tome'),
'cnt_md': kwargs.get('cnt_md', '## adslkfjasdf\n lasdfkjsadf'),
}
self.uu.create_page(self.uid, post_data)
def test_insert(self):
raw_count = self.uu.get_counts()
post_data = {
'title': 'tyyyitle',
'user_name': 'Tome',
'cnt_md': '## adslkfjasdf\n lasdfkjsadf',
}
self.add_page(**post_data)
new_count = self.uu.get_counts()
tt = self.uu.get_by_uid(self.uid)
assert tt.title == post_data['title']
assert tt.cnt_md == tornado.escape.xhtml_unescape(post_data['cnt_md'])
assert raw_count + 1 <= new_count
self.tearDown()
# def test_insert_2(self):
# self.tearDown()
# '''Wiki insert: Test invalid title'''
# post_data = {
# 'title': '',
# 'user_name': 'Tome',
# 'cnt_md': '## adslkfjasdf\n lasdfkjsadf',
#
# }
# aa=self.uu.create_page(self.uid, post_data)
# assert aa==False
#
# self.tearDown()
def test_query_all(self):
self.add_page()
p = {
'kind': '2'
}
aa = self.uu.query_all(**p)
tf = False
for i in aa:
if i.uid == self.uid:
tf = True
self.tearDown()
assert tf
def test_get_by_slug(self):
self.add_page()
aa = self.uu.get_by_uid(self.uid)
assert aa.title == self.title
self.tearDown()
def test_update_cnt(self):
self.add_page()
post_data = {
'user_name': 'name',
'cnt_md': '## adslkfjgggfdffasdf\n lasdfkjsadf',
}
self.uu.update_cnt(self.uid, post_data)
tt = self.uu.get_by_uid(self.uid)
assert tt.user_name == post_data['user_name']
assert tt.cnt_md == tornado.escape.xhtml_unescape(post_data['cnt_md'])
self.tearDown()
def test_update(self):
self.add_page()
post_data = {
'title': 'ti',
'user_name': 'Tome',
'cnt_md': '## adslkfjasdf\n lasdfkjsadf',
}
aa = self.uu.update(self.uid, post_data)
assert aa == None
post_data2 = {
'title': 'tgrgri',
'cnt_md': '## adslkfjasdf\n lasdfkjsadf',
}
self.uu.update(self.uid, post_data2)
aa = self.uu.get_by_uid(self.uid)
assert aa.title == post_data2['title']
self.tearDown()
def test_query_recent_edited(self):
timstamp = tools.timestamp()
time.sleep(1)
self.add_page()
aa = self.uu.query_recent_edited(timstamp, kind='2')
tf = False
for i in aa:
if i.uid == self.uid:
tf = True
self.tearDown()
assert tf
def tearDown(self):
print("function teardown")
self.uu.delete(self.uid)
|
<reponame>cnstark/awesome_gpu_scheduler<filename>notification/email_notification.py
import traceback
from django.core.mail import send_mail
from gpu_tasker.settings import EMAIL_NOTIFICATION
TASK_START_NOTIFICATION_TITLE = '任务开始运行'
TASK_START_NOTIFICATION_TEMPLATE = \
'''任务[{}]开始运行
任务运行详情:
任务名称:{}
工作目录:{}
命令:
----------
{}
----------
服务器:{}
显卡:{}
开始时间:{}
'''
TASK_FINISH_NOTIFICATION_TITLE = '任务运行完成'
TASK_FINISH_NOTIFICATION_TEMPLATE = \
'''任务[{}]运行完成
任务运行详情:
任务名称:{}
工作目录:{}
命令:
----------
{}
----------
服务器:{}
显卡:{}
结束时间:{}
请登录GPUTasker查看运行结果
'''
TASK_FAIL_NOTIFICATION_TITLE = '任务运行失败'
TASK_FAIL_NOTIFICATION_TEMPLATE = \
'''任务[{}]运行失败
任务运行详情:
任务名称:{}
工作目录:{}
命令:
----------
{}
----------
服务器:{}
显卡:{}
结束时间:{}
请登录GPUTasker查看错误信息
'''
def send_email(address, title, content):
if EMAIL_NOTIFICATION:
try:
from gpu_tasker.settings import DEFAULT_FROM_EMAIL
send_mail(title, content, DEFAULT_FROM_EMAIL, [address], fail_silently=False)
except Exception:
es = traceback.format_exc()
print('Send email fail')
print(es)
def check_email_config(func):
def wrapper(*args, **kw):
if EMAIL_NOTIFICATION:
running_log = args[0]
address = running_log.task.user.email
if address is not None and address != '':
return func(*args, **kw)
return wrapper
@check_email_config
def send_task_start_email(running_log):
address = running_log.task.user.email
title = TASK_START_NOTIFICATION_TITLE
content = TASK_START_NOTIFICATION_TEMPLATE.format(
running_log.task.name,
running_log.task.name,
running_log.task.workspace,
running_log.task.cmd,
running_log.server.ip,
running_log.gpus,
running_log.start_at.strftime("%Y-%m-%d %H:%M:%S")
)
send_email(address, title, content)
@check_email_config
def send_task_finish_email(running_log):
address = running_log.task.user.email
title = TASK_FINISH_NOTIFICATION_TITLE
content = TASK_FINISH_NOTIFICATION_TEMPLATE.format(
running_log.task.name,
running_log.task.name,
running_log.task.workspace,
running_log.task.cmd,
running_log.server.ip,
running_log.gpus,
running_log.update_at.strftime("%Y-%m-%d %H:%M:%S")
)
send_email(address, title, content)
@check_email_config
def send_task_fail_email(running_log):
address = running_log.task.user.email
title = TASK_FAIL_NOTIFICATION_TITLE
content = TASK_FAIL_NOTIFICATION_TEMPLATE.format(
running_log.task.name,
running_log.task.name,
running_log.task.workspace,
running_log.task.cmd,
running_log.server.ip,
running_log.gpus,
running_log.update_at.strftime("%Y-%m-%d %H:%M:%S")
)
send_email(address, title, content)
|
# Copyright (C) 2021 <NAME>
#
# SPDX-License-Identifier: MIT
import warnings
import gmsh
from mpi4py import MPI
warnings.filterwarnings("ignore")
__all__ = ["create_disk_mesh", "create_sphere_mesh"]
def create_disk_mesh(LcMin=0.005, LcMax=0.015, filename="disk.msh"):
"""
Create a disk mesh centered at (0.5, 0.5) with radius 0.5.
Mesh is finer at (0.5,0) using LcMin, and gradually decreasing to LcMax
"""
gmsh.initialize()
if MPI.COMM_WORLD.rank == 0:
gmsh.model.occ.addDisk(0.5, 0.5, 0, 0.5, 0.5)
gmsh.model.occ.addPoint(0.5, 0, 0, tag=5)
gmsh.model.occ.synchronize()
domains = gmsh.model.getEntities(dim=2)
domain_marker = 11
gmsh.model.addPhysicalGroup(
domains[0][0], [domains[0][1]], domain_marker)
gmsh.model.occ.synchronize()
gmsh.model.mesh.field.add("Distance", 1)
gmsh.model.mesh.field.setNumbers(1, "NodesList", [5])
gmsh.model.mesh.field.add("Threshold", 2)
gmsh.model.mesh.field.setNumber(2, "IField", 1)
gmsh.model.mesh.field.setNumber(2, "LcMin", LcMin)
gmsh.model.mesh.field.setNumber(2, "LcMax", LcMax)
gmsh.model.mesh.field.setNumber(2, "DistMin", 0.2)
gmsh.model.mesh.field.setNumber(2, "DistMax", 0.5)
gmsh.model.mesh.field.setAsBackgroundMesh(2)
gmsh.model.mesh.generate(2)
gmsh.write(filename)
gmsh.finalize()
def create_sphere_mesh(LcMin=0.025, LcMax=0.1, filename="disk.msh"):
"""
Create a sphere mesh centered at (0.5, 0.5, 0.5) with radius 0.5.
Mesh is finer at (0.5, 0.5, 0) using LcMin, and gradually decreasing to LcMax
"""
gmsh.initialize()
if MPI.COMM_WORLD.rank == 0:
gmsh.model.occ.addSphere(0.5, 0.5, 0.5, 0.5)
gmsh.model.occ.addPoint(0.5, 0.5, 0, tag=19)
gmsh.model.occ.synchronize()
domains = gmsh.model.getEntities(dim=3)
domain_marker = 11
gmsh.model.addPhysicalGroup(domains[0][0], [domains[0][1]], domain_marker)
gmsh.model.occ.synchronize()
gmsh.model.mesh.field.add("Distance", 1)
gmsh.model.mesh.field.setNumbers(1, "NodesList", [19])
gmsh.model.mesh.field.add("Threshold", 2)
gmsh.model.mesh.field.setNumber(2, "IField", 1)
gmsh.model.mesh.field.setNumber(2, "LcMin", LcMin)
gmsh.model.mesh.field.setNumber(2, "LcMax", LcMax)
gmsh.model.mesh.field.setNumber(2, "DistMin", 0.3)
gmsh.model.mesh.field.setNumber(2, "DistMax", 0.6)
gmsh.model.mesh.field.setAsBackgroundMesh(2)
gmsh.model.mesh.generate(3)
gmsh.write(filename)
gmsh.finalize()
|
<filename>cbinterface/psc/sessions.py
"""All things LR sessions."""
import io
import os
import time
import logging
import threading
from cbapi.psc import Device
from cbapi.psc.threathunter import CbThreatHunterAPI
from cbapi.psc.cblr import (
LiveResponseSession,
LiveResponseSessionManager,
LiveResponseJobScheduler,
WorkItem,
JobWorker,
)
from cbapi.errors import ObjectNotFoundError, TimeoutError
# from cbapi.live_response_api import CbLRManagerBase, WorkItem, poll_status
from typing import List, Union
from cbinterface.commands import BaseSessionCommand
from cbinterface.psc.device import is_device_online
LOGGER = logging.getLogger("cbinterface.psc.session")
CBLR_BASE = "/integrationServices/v3/cblr"
class CustomLiveResponseJobScheduler(LiveResponseJobScheduler):
def __init__(self, cb, psc_cb, max_workers=10):
self.psc_cb = psc_cb
super().__init__(cb, max_workers=10)
def _spawn_new_workers(self):
if len(self._job_workers) >= self._max_workers:
return
schedule_max = self._max_workers - len(self._job_workers)
devices = [
s for s in self.psc_cb.select(Device) if s.id in self._unscheduled_jobs and s.id not in self._job_workers
]
# and is_device_online(s)]
devices_to_schedule = devices[:schedule_max]
LOGGER.debug("Spawning new workers to handle these devices: {0}".format(devices_to_schedule))
for device in devices_to_schedule:
LOGGER.debug("Spawning new JobWorker for device id {0}".format(device.id))
self._job_workers[device.id] = JobWorker(self._cb, device.id, self.schedule_queue)
self._job_workers[device.id].start()
class CustomLiveResponseSessionManager(LiveResponseSessionManager):
def __init__(self, cb, timeout=30, custom_session_keepalive=False):
# First, get a CB object with the LR API permissions
cblr = CbThreatHunterAPI(url=cb.credentials.url, token=cb.credentials.lr_token, org_key=cb.credentials.org_key)
super().__init__(cblr, timeout=timeout, keepalive_sessions=False)
# so now self._cb == cblr -- store a reference to the regular cb
self.psc_cb = cb
if custom_session_keepalive:
self._cleanup_thread = threading.Thread(target=self._keep_active_sessions_alive_thread)
self._cleanup_thread.daemon = True
self._cleanup_thread.start()
# for storing initiatied commands
self.commands = []
def get_session(self, device: Device):
"""Get or create LR session."""
session_data = self._cb.post_object(f"{CBLR_BASE}/session/{device.id}", {"sensor_id": device.id}).json()
session_id = session_data["id"]
LOGGER.debug(f"got session id={session_id} with status={session_data['status']}")
self._sessions[device.id] = self.cblr_session_cls(self, session_id, device.id, session_data=session_data)
return self._sessions[device.id]
def wait_for_active_session(self, device: Device, timeout=86400):
"""Return active session or None.
Default timeout is 7 days.
"""
LOGGER.info(
f"attempting to get active session on device {device.id} (hostname:{device.name}) for up to {timeout/60} minutes"
)
start_time = time.time()
session = None
status = None
while status != "ACTIVE" and time.time() - start_time < timeout:
if not is_device_online(device):
LOGGER.debug(f"waiting for device {device.id} to come online...")
time.sleep(1)
continue
session = self.get_session(device)
status = session.session_data["status"]
time.sleep(0.5)
if session and is_session_active(session):
LOGGER.info(f"got active session {session.session_id}.")
return session
def submit_command(self, command: BaseSessionCommand, device: Union[int, Device]):
"""
Create a new job to be executed as a Live Response.
Args:
command (BaseSessionCommand): The job to be scheduled.
device (Device): Device to execute job on.
Returns:
Future: A reference to the running job.
"""
assert isinstance(command, BaseSessionCommand)
device_id = device
if isinstance(device, Device):
device_id = device.id
command._hostname = device.name
LOGGER.debug(f"submitting {command} to {device_id}")
if self._job_scheduler is None:
# spawn the scheduler thread
self._job_scheduler = CustomLiveResponseJobScheduler(self._cb, self.psc_cb)
self._job_scheduler.start()
if device_id not in self._sessions:
device = Device(self._cb, device_id, force_init=True)
active_session = self.active_session(device)
if active_session is None:
self.wait_for_active_session(device)
work_item = WorkItem(command.run, device_id)
self._job_scheduler.submit_job(work_item)
command.future = work_item.future
command.device_id = device_id
command.session_id = self._sessions[device_id].session_id
command.session_data = self._sessions[device_id].session_data
self.commands.append(command)
return command
def yield_completed_commands(self):
"""Wait for commands to complete, process results.
Monitor commands and sessions.
"""
LOGGER.info(f"waiting for {len(self.commands)} commands to complete ...")
while self.commands:
for cmd in self.commands:
if not cmd.initiatied:
LOGGER.error(f"skipping uninitialized command: {cmd}")
self.commands.remove(cmd)
continue
if cmd.exception:
LOGGER.error(f"exception for {cmd}: {cmd.exception}")
self.commands.remove(cmd)
continue
if not get_session_by_id(self._cb, cmd.session_id):
LOGGER.error(f"session {cmd.session_id} is gone. command has gone to the void: {cmd}")
self.commands.remove(cmd)
continue
if cmd.has_result:
LOGGER.debug(f"yielding {cmd}")
yield cmd
self.commands.remove(cmd)
# yield time for completion
time.sleep(0.7)
def process_completed_commands(self):
for cmd in self.yield_completed_commands():
LOGGER.debug(f"processing => {cmd}")
cmd.process_result()
def _keep_active_sessions_alive_thread(self):
"""Used by a thread to ping active sessions so they don't
close on long running session commands.
"""
LOGGER.debug("Starting custom Live Response session keepalive and cleanup task")
while True:
time.sleep(self._timeout)
delete_list = []
with self._session_lock:
for session in self._sessions.values():
if session._refcount == 0:
delete_list.append(session.device_id)
else:
try:
if is_session_active(session):
LOGGER.info(f"sending keepalive for session {session.session_id}")
self._send_keepalive(session.session_id)
except ObjectNotFoundError:
LOGGER.debug(
f"Session {session.session_id} for device {session.device_id} not valid any longer, removing from cache"
)
delete_list.append(session.device_id)
except Exception as e:
LOGGER.warning(
f"Keepalive on session {session.session_id} (device {session.device_id}) failed with unknown error: {e}"
)
delete_list.append(session.device_id)
for device_id in delete_list:
self._close_session(self._sessions[device_id].session_id)
del self._sessions[device_id]
def all_live_response_sessions(cb: CbThreatHunterAPI) -> List:
"""List all LR sessions still in server memory."""
return [sesh for sesh in cb.get_object(f"{CBLR_BASE}/session")]
def active_live_response_sessions(cb: CbThreatHunterAPI) -> List:
"""Return active LR sessions."""
return [sesh for sesh in cb.get_object(f"{CBLR_BASE}/session?active_only=true")]
def device_live_response_sessions(device: Device, active_or_pending=False):
"""Get sessions associated to this device."""
sessions = [session for session in all_live_response_sessions(device._cb) if session["device_id"] == device.id]
if active_or_pending:
return [session for session in sessions if session["status"] == "active" or session["status"] == "pending"]
return sessions
def device_live_response_sessions_by_device_id(cb: CbThreatHunterAPI, device_id: Union[int, str]):
"""Get sessions associated to this device by device id."""
if isinstance(device_id, str):
device_id = int(device_id)
sessions = [session for session in all_live_response_sessions(cb) if session["device_id"] == device_id]
return sessions
def get_session_by_id(cb: CbThreatHunterAPI, session_id):
"""Get a LR session object by id."""
try:
return cb.get_object(f"{CBLR_BASE}/session/{session_id}")
except ObjectNotFoundError:
LOGGER.warning(f"no live resonse session by ID={session_id}")
return None
def close_session_by_id(cb: CbThreatHunterAPI, session_id):
"""Close a session by ID."""
return cb.put_object(f"{CBLR_BASE}/session", {"session_id": session_id, "status": "CLOSE"}).json()
def get_session_status(cb: CbThreatHunterAPI, session_id):
"""Return any session status or None."""
session = get_session_by_id(cb, session_id)
if session is None:
return None
return session["status"]
def is_session_active(session: LiveResponseSession):
"""Return True if session is active."""
if session.session_data["status"] == "ACTIVE":
return True
return False
def get_session_commands(cb: CbThreatHunterAPI, session_id: str):
"""List commands for this session."""
try:
return cb.get_object(f"{CBLR_BASE}/session/{session_id}/command")
except ObjectNotFoundError:
LOGGER.warning(f"no live resonse session by ID={session_id}")
return None
def get_command_result(cb: CbThreatHunterAPI, session_id: str, command_id: str):
"""Get results of a LR session command."""
try:
return cb.get_object(f"{CBLR_BASE}/session/{session_id}/command/{command_id}")
except ObjectNotFoundError:
LOGGER.warning(f"no live resonse session and/or command combination for {session_id}:{command_id}")
return None
def get_file_content(cb: CbThreatHunterAPI, session_id: str, file_id: str):
"""Get file content stored in LR session and write the file locally."""
from cbinterface.helpers import get_os_independent_filepath
try:
real_session_id, device_id = session_id.split(":", 1)
filename = f"{real_session_id}_on_{device_id}"
file_metadata = cb.get_object(f"{CBLR_BASE}/session/{session_id}/file/{file_id}")
if file_metadata:
filepath = get_os_independent_filepath(file_metadata["file_name"])
filename = f"{filename}_{filepath.name}"
result = cb.session.get(f"{CBLR_BASE}/session/{session_id}/file/{file_id}/content", stream=True)
if result.status_code != 200:
LOGGER.error(
f"got {result.status_code} from server getting file {file_id} content for session {session_id}"
)
return
with open(filename, "wb") as fp:
for chunk in result.iter_content(io.DEFAULT_BUFFER_SIZE):
fp.write(chunk)
if os.path.exists(filename):
LOGGER.info(f"wrote: {filename}")
return os.path.exists(filename)
except ObjectNotFoundError:
LOGGER.warning(f"no file {file_id} content with session {session_id}")
return
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 7 20:15:19 2021
@author: Christian
"""
import hysteresis as hys
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.widgets import Button
# from matplotlib.animation import FuncAnimation
import numpy as np
# Add this function to somewhere else
def init_Animation():
fig, ax = plt.subplots()
return fig, ax
def getAnixy(rawFrames, skipFrames):
xyAni = rawFrames[::skipFrames,:]
return xyAni
def getAniFrames(x, targetdx):
"""
Returns a frame every target dx. Can be useful for pre-processing data
if input data has a variable timestep.
No linearl inerpolatin is used for intermediate frames.
"""
NFrames = len(x)
NframesOut = []
jj = 0
for ii in range(NFrames):
while jj*targetdx < x[ii]:
NframesOut.append(x[ii])
jj+=1
return np.array(NframesOut)
class AnimationBase:
def __init__(self):
self.play = True
# Replace with imported function
fig, ax = init_Animation()
# Connect clicking
# fig.canvas.mpl_connect('button_press_event', self.on_click)
fig.canvas.mpl_connect('button_press_event', self.toggle_pause)
self.fig = fig
self.ax = ax
def togglePlay(self):
self.play = self.play == False
def on_click(self, event):
xclick = event.x
yclick = event.y
return xclick, yclick
# print(xclick, yclick)
# To be overwritten
def toggle_pause(self,event, *args, **kwargs):
pass
# # Check where the click happened
# (xm,ym),(xM,yM) = plotSlider.label.clipbox.get_points()
# if xm < event.x < xM and ym < event.y < yM:
# # Event happened within the slider, ignore since it is handled in update_slider
# return
# else:
# # Toggle on off based on clicking
# global is_paused
# if is_paused == True:
# is_paused=False
# elif is_paused == False:
# is_paused=True
# class FrameHelper():
# def __init__(self, pointsPerFrame = 1, skipFrames = 1, skipStart = 0, skipEnd = 0):
# self.pointsPerFrame = pointsPerFrame
class Animation(AnimationBase):
def __init__(self, Curve, pointsPerFrame = 1, skipFrames = 1, skipStart = 0, skipEnd = 0, interval = 50):
super().__init__()
self.Curve = Curve
self.xy = Curve.xy
self.pointsPerFrame = pointsPerFrame
self.interval = interval
xyAni = getAnixy(Curve.xy, skipFrames)
self.xyAni = xyAni
# self.xyAni = self.xy
self.Nframes = int(len(self.xyAni) / pointsPerFrame)
self.frames = np.arange(self.Nframes)
self.lines = []
# self.fig.canvas.mpl_connect('button_press_event', self.toggle_pause)
# def setAniXY()
def initfunc(self):
line = plt.plot(self.xyAni[:,0], self.xyAni[:,1])[0]
self.lines.append(line) # def initAnimation(self):
return line,
def update(self, frame):
# for ii in range()
points = int(frame*self.pointsPerFrame)
newXY = self.xyAni[:points,:]
line = self.lines[0]
line.set_data(newXY[:,0], newXY[:,1])
return line,
def Animate(self):
self.ani = animation.FuncAnimation(self.fig, self.update, self.frames, self.initfunc,
interval=self.interval, blit=True)
# def toggle_pause(self, *args, **kwargs):
# self.togglePlay()
# if self.play:
# self.ani.resume()
# else:
# self.ani.pause()
class JointAnimation(AnimationBase):
def __init__(self, Curves, pointsPerFrame = 1, skipFrames = 1, skipStart = 0, skipEnd = 0, interval = 50):
super().__init__()
self.pointsPerFrame = pointsPerFrame
self.interval = interval
self.Curves = Curves
self.Ncurves = len(Curves)
xyAni = getAnixy(Curves[0].xy, skipFrames)
self.Nframes = int(len(xyAni) / pointsPerFrame)
self.frames = np.arange(self.Nframes)
self.xyCurves = [None]*self.Ncurves
self.lines = []
for ii in range(self.Ncurves):
curve = self.Curves[ii]
xy = curve.xy
xyAni = getAnixy(xy, skipFrames)
self.xyCurves[ii] = xyAni
# self.xyAni = xyAni
# self.fig.canvas.mpl_connect('button_press_event', self.toggle_pause)
# def setAniXY()
def initfunc(self):
for ii in range(self.Ncurves):
tempXY = self.xyCurves[ii]
line = plt.plot(tempXY[:,0], tempXY[:,1])[0]
self.lines.append(line) # def initAnimation(self):
return self.lines
def update(self, frame):
# print(frame)
points = int(frame*self.pointsPerFrame)
# print(points)
lines = [None]*self.Ncurves
for ii in range(self.Ncurves):
tempXY = self.xyCurves[ii]
# print()
newXY = tempXY[:points,:]
line = self.lines[ii]
line.set_data(newXY[:,0], newXY[:,1])
lines[ii] = line
# lines[ii] = line
# lines = self.lines
return lines
def Animate(self):
self.ani = animation.FuncAnimation(self.fig, self.update, self.frames, self.initfunc,
interval=50, blit=True)
# axSlider = plt.axes([0.25, .03, 0.50, 0.02])
# plotSlider = Slider(axSlider, 'Time', framesTime[FrameStart], framesTime[FrameEnd], valinit=framesTime[FrameStart])
# # Slider Location and size relative to plot
# # [x, y, xsize, ysize]
# axSlider = plt.axes([0.25, .03, 0.50, 0.02])
# plotSlider = Slider(axSlider, 'Time', framesTime[FrameStart], framesTime[FrameEnd], valinit=framesTime[FrameStart])
# # Animation controls
# global is_paused
# is_paused = False # True if user has taken control of the animation
# def on_click(event):
# # Check where the click happened
# (xm,ym),(xM,yM) = plotSlider.label.clipbox.get_points()
# if xm < event.x < xM and ym < event.y < yM:
# # Event happened within the slider, ignore since it is handled in update_slider
# return
# else:
# # Toggle on off based on clicking
# global is_paused
# if is_paused == True:
# is_paused=False
# elif is_paused == False:
# is_paused=True
# def animate2D_slider(Time):
# """
# The slider value is liked with the plot - we update the plot by updating
# the slider.
# """
# global is_paused
# is_paused=True
# # Convert time to frame
# TimeStep = (np.abs(framesTime - Time)).argmin()
# # The current node coordinants in (x,y) or (x,y,z)
# CurrentNodeCoords = nodes[:,1:] + Disp[TimeStep,:,:]
# # Update Plots
# # update node locations
# EqfigNodes.set_xdata(CurrentNodeCoords[:,0])
# EqfigNodes.set_ydata(CurrentNodeCoords[:,1])
# # Get new node mapping
# # I don't like doing this loop every time - there has to be a faster way
# xy_labels = {}
# for jj in range(Nnodes):
# xy_labels[nodeLabels[jj]] = CurrentNodeCoords[jj,:]
# # Define the surface
# SurfCounter = 0
# # update element locations
# for jj in range(Nele):
# # Get the node number for the first and second node connected by the element
# TempNodes = elements[jj][1:]
# # This is the xy coordinates of each node in the group
# TempNodeCoords = [xy_labels[node] for node in TempNodes]
# coords_x = [xy[0] for xy in TempNodeCoords]
# coords_y = [xy[1] for xy in TempNodeCoords]
# # Update element lines
# EqfigLines[jj].set_xdata(coords_x)
# EqfigLines[jj].set_ydata(coords_y)
# # print('loop start')
# # Update the surface if necessary
# if 2 < len(TempNodes):
# tempxy = np.column_stack([coords_x, coords_y])
# EqfigSurfaces[SurfCounter].xy = tempxy
# SurfCounter += 1
# # update time Text
# # time_text.set_text("Time= "+'%.2f' % time[TimeStep]+ " s")
# # redraw canvas while idle
# fig.canvas.draw_idle()
# return EqfigNodes, EqfigLines, EqfigSurfaces, EqfigText
# Saving
# if Movie != "none":
# MovefileName = Movie + '.mp4'
# ODBdir = Model+"_ODB" # ODB Dir name
# Movfile = os.path.join(ODBdir, LoadCase, MovefileName)
# print("Saving the animation movie as "+MovefileName+" in "+ODBdir+"->"+LoadCase+" folder")
# ani.save(Movfile, writer='ffmpeg')
|
#
# Copyright (c) 2015-2020 <NAME> <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""PyAMS_form.field module
Form field definitions.
"""
from zope.interface import Interface, Invalid, alsoProvides, implementer
from zope.interface.interface import InterfaceClass
from zope.location import locate
from zope.schema import getFieldsInOrder
from zope.schema.interfaces import IField as ISchemaField
from pyams_form.error import MultipleErrors
from pyams_form.interfaces import DISPLAY_MODE, IDataConverter, IField, IFields, \
IManagerValidator, INPUT_MODE, IValidator
from pyams_form.interfaces.error import IErrorViewSnippet
from pyams_form.interfaces.form import IContextAware, IFieldsForm, IFormAware
from pyams_form.interfaces.widget import IFieldWidget, IWidgets
from pyams_form.util import Manager, SelectionManager, expand_prefix
from pyams_form.widget import AfterWidgetUpdateEvent
from pyams_layer.interfaces import IFormLayer
from pyams_utils.adapter import adapter_config
from pyams_utils.interfaces.form import IDataManager, NO_VALUE
from pyams_utils.registry import get_current_registry
__docformat__ = 'restructuredtext'
def _initkw(keep_readonly=(), omit_readonly=False, **defaults):
"""Init keywords"""
return keep_readonly, omit_readonly, defaults
class WidgetFactories(dict):
"""Widget factories"""
def __init__(self):
super().__init__()
self.default = None
def __getitem__(self, key):
if key not in self and self.default:
return self.default
return super().__getitem__(key)
def get(self, key, default=None):
if key not in self and self.default:
return self.default
return super().get(key, default)
class WidgetFactoryProperty:
"""Widget factory property"""
def __get__(self, inst, klass):
if not hasattr(inst, '_widget_factories'):
inst._widget_factories = WidgetFactories()
return inst._widget_factories
def __set__(self, inst, value):
if not hasattr(inst, '_widget_factories'):
inst._widget_factories = WidgetFactories()
inst._widget_factories.default = value
@implementer(IField)
class Field:
"""Field implementation."""
widget_factory = WidgetFactoryProperty()
# pylint: disable=too-many-arguments
def __init__(self, field, name=None, prefix='', mode=None, interface=None,
ignore_context=None, show_default=None):
self.field = field
if name is None:
name = field.__name__
assert name
self.__name__ = expand_prefix(prefix) + name
self.prefix = prefix
self.mode = mode
if interface is None:
interface = field.interface
self.interface = interface
self.ignore_context = ignore_context
self.show_default = show_default
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.__name__)
@implementer(IFields)
class Fields(SelectionManager):
"""Field manager."""
manager_interface = IFields
def __init__(self, *args, **kw): # pylint: disable=too-many-branches
keep_readonly, omit_readonly, defaults = _initkw(**kw)
fields = []
for arg in args:
if isinstance(arg, InterfaceClass):
for name, field in getFieldsInOrder(arg):
fields.append((name, field, arg))
elif ISchemaField.providedBy(arg):
name = arg.__name__
if not name:
raise ValueError("Field has no name")
fields.append((name, arg, arg.interface))
elif self.manager_interface.providedBy(arg):
for form_field in arg.values():
fields.append(
(form_field.__name__, form_field, form_field.interface))
elif isinstance(arg, Field):
fields.append((arg.__name__, arg, arg.interface))
else:
raise TypeError("Unrecognized argument type", arg)
super().__init__()
for name, field, iface in fields:
if isinstance(field, Field):
form_field = field
else:
if field.readonly:
if omit_readonly and (name not in keep_readonly):
continue
custom_defaults = defaults.copy()
if iface is not None:
custom_defaults['interface'] = iface
form_field = Field(field, **custom_defaults)
name = form_field.__name__
if name in self:
raise ValueError("Duplicate name", name)
self[name] = form_field
def select(self, *names, **kwargs): # pylint: disable=arguments-differ
"""See interfaces.IFields"""
prefix = kwargs.pop('prefix', None)
interface = kwargs.pop('interface', None)
assert len(kwargs) == 0
if prefix:
names = [expand_prefix(prefix) + name for name in names]
mapping = self
if interface is not None:
mapping = {field.field.__name__: field for field in self.values()
if field.field.interface is interface}
return self.__class__(*[mapping[name] for name in names])
def omit(self, *names, **kwargs): # pylint: disable=arguments-differ
"""See interfaces.IFields"""
prefix = kwargs.pop('prefix', None)
interface = kwargs.pop('interface', None)
assert len(kwargs) == 0
if prefix:
names = [expand_prefix(prefix) + name for name in names]
return self.__class__(
*[field for name, field in self.items()
if not ((name in names and interface is None) or
(field.field.interface is interface and
field.field.__name__ in names))])
@adapter_config(required=(IFieldsForm, IFormLayer, Interface),
provides=IWidgets)
class FieldWidgets(Manager):
"""Widget manager for IFieldWidget."""
prefix = 'widgets.'
mode = INPUT_MODE
errors = ()
has_required_fields = False
ignore_context = False
ignore_request = False
ignore_readonly = False
ignore_required_on_extract = False
set_errors = True
def __init__(self, form, request, content):
super().__init__()
self.form = form
self.request = request
self.content = content
def validate(self, data):
"""Validate widgets fields"""
fields = self.form.fields.values()
# Step 1: Collect the data for the various schemas
schema_data = {}
for field in fields:
schema = field.interface
if schema is None:
continue
field_data = schema_data.setdefault(schema, {})
if field.__name__ in data:
field_data[field.field.__name__] = data[field.__name__]
# Step 2: Validate the individual schemas and collect errors
errors = ()
content = self.content
if self.ignore_context:
content = None
registry = self.request.registry
for schema, field_data in schema_data.items():
validator = registry.getMultiAdapter((content, self.request, self.form, schema, self),
IManagerValidator)
errors += validator.validate(field_data)
return errors
def update(self):
"""See interfaces.widget.IWidgets"""
# Create a unique prefix.
prefix = expand_prefix(self.form.prefix) + expand_prefix(self.prefix)
# Walk through each field, making a widget out of it.
data = {}
data.update(self)
registry = self.request.registry
for field in self.form.fields.values():
# Step 0. Determine whether the context should be ignored.
ignore_context = self.ignore_context
if field.ignore_context is not None:
ignore_context = field.ignore_context
# Step 1: Determine the mode of the widget.
mode = self.mode
if field.mode is not None:
mode = field.mode
elif field.field.readonly and not self.ignore_readonly:
mode = DISPLAY_MODE
elif not ignore_context:
# If we do not have enough permissions to write to the
# attribute, then switch to display mode.
dman = registry.getMultiAdapter((self.content, field.field), IDataManager)
if not dman.can_write():
mode = DISPLAY_MODE
# Step 2: Get the widget for the given field.
short_name = field.__name__
new_widget = True
if short_name in self:
# reuse existing widget
widget = data[short_name]
new_widget = False
elif field.widget_factory.get(mode) is not None:
factory = field.widget_factory.get(mode)
widget = factory(field.field, self.request)
else:
widget = registry.getMultiAdapter((field.field, self.request), IFieldWidget)
# Step 3: Set the prefix for the widget
widget.name = prefix + short_name
widget.id = (prefix + short_name).replace('.', '-')
# Step 4: Set the context
widget.context = self.content
# Step 5: Set the form
widget.form = self.form
# Optimization: Set both interfaces here, rather in step 4 and 5:
# ``alsoProvides`` is quite slow
alsoProvides(widget, IContextAware, IFormAware)
# Step 6: Set some variables
widget.ignore_context = ignore_context
widget.ignore_request = self.ignore_request
if field.show_default is not None:
widget.show_default = field.show_default
# Step 7: Set the mode of the widget
widget.mode = mode
# Step 8: Update the widget
widget.update()
get_current_registry().notify(AfterWidgetUpdateEvent(widget))
# Step 9: Add the widget to the manager
if widget.required:
self.has_required_fields = True
if new_widget:
data[short_name] = widget
locate(widget, self, short_name)
self.create_according_to_list(data, self.form.fields.keys())
def _extract(self, return_raw=False):
data = {}
errors = ()
registry = self.request.registry
for name, widget in self.items():
if widget.mode == DISPLAY_MODE:
continue
value = widget.field.missing_value
try:
widget.set_errors = self.set_errors
raw = widget.extract()
if raw is not NO_VALUE:
# pylint: disable=assignment-from-no-return
value = IDataConverter(widget).to_field_value(raw)
widget.ignore_required_on_validation = self.ignore_required_on_extract
registry.getMultiAdapter((self.content, self.request, self.form,
getattr(widget, 'field', None), widget),
IValidator).validate(value)
except (Invalid, ValueError, MultipleErrors) as error:
view = registry.getMultiAdapter((error, self.request, widget, widget.field,
self.form, self.content),
IErrorViewSnippet)
view.update()
if self.set_errors:
widget.error = view
errors += (view,)
else:
name = widget.__name__
if return_raw:
data[name] = raw
else:
data[name] = value
for error in self.validate(data):
view = registry.getMultiAdapter((error, self.request, None, None,
self.form, self.content),
IErrorViewSnippet)
view.update()
errors += (view,)
if self.set_errors:
self.errors = errors
return data, errors
def extract(self):
"""See interfaces.IWidgets"""
return self._extract(return_raw=False)
def extract_raw(self):
"""See interfaces.IWidgets"""
return self._extract(return_raw=True)
def copy(self):
"""See interfaces.ISelectionManager"""
clone = self.__class__(self.form, self.request, self.content)
super(self.__class__, clone).update(self)
return clone
|
# coding: utf-8
"""
ThingsBoard REST API
ThingsBoard open-source IoT platform REST API documentation. # noqa: E501
OpenAPI spec version: 3.3.3-SNAPSHOT
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from tb_rest_client.models.models_ce import SmsProviderConfiguration
class TwilioSmsProviderConfiguration(SmsProviderConfiguration):
"""
Do not edit the class manually.
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_sid': 'str',
'account_token': 'str',
'number_from': 'str'
}
if hasattr(SmsProviderConfiguration, "swagger_types"):
swagger_types.update(SmsProviderConfiguration.swagger_types)
attribute_map = {
'account_sid': 'accountSid',
'account_token': 'accountToken',
'number_from': 'numberFrom'
}
if hasattr(SmsProviderConfiguration, "attribute_map"):
attribute_map.update(SmsProviderConfiguration.attribute_map)
def __init__(self, account_sid=None, account_token=None, number_from=None, *args, **kwargs): # noqa: E501
"""TwilioSmsProviderConfiguration - a model defined in Swagger""" # noqa: E501
self._account_sid = None
self._account_token = None
self._number_from = None
self.discriminator = None
if account_sid is not None:
self.account_sid = account_sid
if account_token is not None:
self.account_token = account_token
if number_from is not None:
self.number_from = number_from
SmsProviderConfiguration.__init__(self, *args, **kwargs)
@property
def account_sid(self):
"""Gets the account_sid of this TwilioSmsProviderConfiguration. # noqa: E501
Twilio account Sid. # noqa: E501
:return: The account_sid of this TwilioSmsProviderConfiguration. # noqa: E501
:rtype: str
"""
return self._account_sid
@account_sid.setter
def account_sid(self, account_sid):
"""Sets the account_sid of this TwilioSmsProviderConfiguration.
Twilio account Sid. # noqa: E501
:param account_sid: The account_sid of this TwilioSmsProviderConfiguration. # noqa: E501
:type: str
"""
self._account_sid = account_sid
@property
def account_token(self):
"""Gets the account_token of this TwilioSmsProviderConfiguration. # noqa: E501
Twilio account Token. # noqa: E501
:return: The account_token of this TwilioSmsProviderConfiguration. # noqa: E501
:rtype: str
"""
return self._account_token
@account_token.setter
def account_token(self, account_token):
"""Sets the account_token of this TwilioSmsProviderConfiguration.
Twilio account Token. # noqa: E501
:param account_token: The account_token of this TwilioSmsProviderConfiguration. # noqa: E501
:type: str
"""
self._account_token = account_token
@property
def number_from(self):
"""Gets the number_from of this TwilioSmsProviderConfiguration. # noqa: E501
The number/id of a sender. # noqa: E501
:return: The number_from of this TwilioSmsProviderConfiguration. # noqa: E501
:rtype: str
"""
return self._number_from
@number_from.setter
def number_from(self, number_from):
"""Sets the number_from of this TwilioSmsProviderConfiguration.
The number/id of a sender. # noqa: E501
:param number_from: The number_from of this TwilioSmsProviderConfiguration. # noqa: E501
:type: str
"""
self._number_from = number_from
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TwilioSmsProviderConfiguration, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TwilioSmsProviderConfiguration):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import argparse
import csv
import json
import logging
import sys
import os
import urllib2
import unix_converter as unix
__author__ = '<NAME> & <NAME>'
__date__ = '20150920'
__version__ = 0.03
__description__ = 'This scripts downloads address transactions using blockchain.info public APIs'
def main(address, output_dir):
"""
The main function handles coordinating logic
:param address: The Bitcoin Address to lookup
:param output_dir: The output directory to write the CSV results
:return: Nothing
"""
logging.info('Initiated program for {} address'.format(address))
logging.info('Obtaining JSON structured data from blockchain.info')
raw_account = getAddress(address)
account = json.loads(raw_account.read())
printHeader(account)
parseTransactions(account, output_dir)
def getAddress(address):
"""
The getAddress function uses the blockchain.info Data API to pull
pull down account information and transactions for address of interest
:param address: The Bitcoin Address to lookup
:return: The response of the url request
"""
url = 'https://blockchain.info/address/{}?format=json'.format(address)
try:
return urllib2.urlopen(url)
except urllib2.URLError, e:
logging.error('URL Error for {}'.format(url))
if hasattr(e, 'code') and hasattr(e, 'headers'):
logging.debug('{}: {}'.format(e.code, e.reason))
logging.debug('{}'.format(e.headers))
print 'Received URL Error for {}'.format(url)
logging.info('Program exiting...')
sys.exit(1)
def parseTransactions(account, output_dir):
"""
The parseTransactions function appends transaction data into a
nested list structure so it can be successfully used by the csvWriter function.
:param account: The JSON decoded account and transaction data
:param output_dir: The output directory to write the CSV results
:return: Nothing
"""
msg = 'Parsing transactions...'
logging.info(msg)
print msg
transactions = []
for i, tx in enumerate(account['txs']):
transaction = []
outputs = {}
inputs = getInputs(tx)
transaction.append(i)
transaction.append(unix.unixConverter(tx['time']))
transaction.append(tx['hash'])
transaction.append(inputs)
for output in tx['out']:
outputs[output['addr']] = output['value'] * 10**-8
transaction.append('\n'.join(outputs.keys()))
transaction.append('\n'.join(str(v) for v in outputs.values()))
transaction.append('{:.8f}'.format(sum(outputs.values())))
transactions.append(transaction)
csvWriter(transactions, output_dir)
def printHeader(account):
"""
The printHeader function prints overall header information
containing basic address information.
:param account: The JSON decoded account and transaction data
:return: Nothing
"""
print 'Address:', account['address']
print 'Current Balance: {:.8f} BTC'.format(account['final_balance'] * 10**-8)
print 'Total Sent: {:.8f} BTC'.format(account['total_sent'] * 10**-8)
print 'Total Received: {:.8f} BTC'.format(account['total_received'] * 10**-8)
print 'Number of Transactions:', account['n_tx']
print '{:=^22}\n'.format('')
def getInputs(tx):
"""
The getInputs function is a small helper function that returns
input addresses for a given transaction
:param tx: A single instance of a Bitcoin transaction
:return: inputs, a list of inputs
"""
inputs = []
for input_addr in tx['inputs']:
inputs.append(input_addr['prev_out']['addr'])
if len(inputs) > 1:
input_string = '\n'.join(inputs)
else:
input_string = ''.join(inputs)
return input_string
def csvWriter(data, output_dir):
"""
The csvWriter function writes transaction data into a CSV file
:param data: The parsed transaction data in nested list
:param output_dir: The output directory to write the CSV results
:return: Nothing
"""
logging.info('Writing output to {}'.format(output_dir))
print 'Writing output.'
headers = ['Index', 'Date', 'Transaction Hash', 'Inputs', 'Outputs', 'Values', 'Total']
try:
with open(output_dir, 'wb') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(headers)
for transaction in data:
writer.writerow(transaction)
csvfile.flush()
csvfile.close()
except IOError, e:
logging.error('Error writing output to {}.\nGenerated message: {}.'.format(e.filename, e.strerror))
print 'Error writing to CSV file. Please check output argument {}'.format(e.filename)
logging.info('Program exiting.')
sys.exit(1)
logging.info('Program exiting.')
print 'Program exiting.'
sys.exit(0)
if __name__ == '__main__':
# Run this code if the script is run from the command line.
parser = argparse.ArgumentParser(description='BTC Address Lookup', version=str(__version__),
epilog='Developed by ' + __author__ + ' on ' + __date__)
parser.add_argument('ADDR', help='Bitcoin Address')
parser.add_argument('OUTPUT', help='Output CSV file')
parser.add_argument('-l', help='Specify log directory. Defaults to current working directory.')
args = parser.parse_args()
# Set up Log
if args.l:
if not os.path.exists(args.l):
os.makedirs(args.l) # create log directory path
log_path = os.path.join(args.l, 'btc_addr_lookup.log')
else:
log_path = 'btc_addr_lookup.log'
logging.basicConfig(filename=log_path, level=logging.DEBUG,
format='%(asctime)s | %(levelname)s | %(message)s', filemode='w')
logging.info('Starting Bitcoin Address Lookup v.' + str(__version__))
logging.debug('System ' + sys.platform)
logging.debug('Version ' + sys.version)
# Print Script Information
print '{:=^22}'.format('')
print '{} {}'.format('Bitcoin Address Lookup, ', __version__)
print '{:=^22} \n'.format('')
# Run main program
main(args.ADDR, args.OUTPUT)
|
#!/usr/bin/python3.6
# SQLlite3 : https://docs.python.org/3/library/sqlite3.html
import sqlite3
import re
import pandas as pd
# Files
database = "../database/test.db"
# connection to database
databaseConnection = sqlite3.connect(database)
dbSql = databaseConnection.cursor();
# SELECT titles from movies
# movieTitle = dbSql.execute('''SELECT movie.title FROM movie''').fetchall()
# print(movieTitle)
# print("Found " + str(len(movieTitle)) + " Results")
# SELECT titles of movies from genre ...
genre = "Comedy"
#
#movieTitle = dbSql.execute('''
# SELECT * FROM movie INNER JOIN movieGenres INNER JOIN genre
# on movie.id = movieGenres.id_movie
# and movieGenres.id_genre = genre.id
# and genre.genre = ?
#''', (genre,)).fetchall()
#print(movieTitle)
# print("Found " + str(len(movieTitle)) + " Results")
# movieTitle = dbSql.execute('''SELECT count(*) FROM movie where overview IS NULL''').fetchall()
# print(movieTitle)
minimumRatings = 20 # minimum count of ratings per user in our data base
ratings = dbSql.execute('''WITH tmpRatings AS (SELECT id_user, rating FROM rating
GROUP BY id_user
HAVING count(rating) >= ?)
SELECT rating.id_user, rating.id_movie, rating.rating FROM rating INNER JOIN tmpRatings
on rating.id_user = tmpRatings.id_user
''', (minimumRatings,)).fetchall()
# print(ratings[:10])
# print(len(ratings))
ratings = pd.DataFrame(ratings, columns = ['id_user', 'id_movie', 'rating'])
# ratings = dbSql.execute('''SELECT MIN(mycount) FROM (SELECT id_user, count(rating) mycount FROM rating GROUP BY id_user)''').fetchall()
# SVD Statistics
svdStatistics = dbSql.execute('''SELECT * FROM svdStatistics''').fetchall()
# print(svdStatistics)
# SVD ID BLOCK
idBlock = 2
svdStatistics = dbSql.execute('''SELECT * FROM svdStatistics WHERE id_block = ? ''', (idBlock,)).fetchall()
# print(svdStatistics)
# Get users
forMovie = 1
ratingAbove = 5
# select movies with similar rating from the same users
# wellRatedMovies = dbSql.execute('''WITH tmpSelectedUsers AS (SELECT id_user
# FROM rating
# WHERE id_movie = ? and rating.rating >= ?)
# SELECT count(rating.id_movie)
# FROM rating INNER JOIN tmpSelectedUsers
# on rating.id_user = tmpSelectedUsers.id_user
# where rating.rating >= ?''', (forMovie, ratingAbove, ratingAbove,)).fetchall()
# print(wellRatedMovies)
# sameFromUsers = dbSql.execute('''SELECT count(rating.id_movie) FROM rating
# where rating.rating >= ? and rating.id_user IN (SELECT rating.id_user FROM rating
# where rating.rating >= ? and rating.id_movie = ?)''', (ratingAbove, ratingAbove, forMovie,)).fetchall()
# print(sameFromUsers)
# SELECT id and titles of movies from genre ... that have at least a rating of ...
# genre = "Crime"
# rating = 5.0
#
# movieTitle = dbSql.execute('''
# SELECT movie.id, movie.title FROM movie INNER JOIN movieGenres INNER JOIN genre INNER JOIN rating
# on movie.id = movieGenres.id_movie
# and movieGenres.id_genre = genre.id
# and movie.id = rating.id_movie
# and genre.genre = ?
# and rating.rating >= ?
# ''', (genre, rating,)).fetchall()
# print(movieTitle)
# print("Found " + str(len(movieTitle)) + " Results")
# SELECT * from movies
# movieTitle = dbSql.execute('''SELECT * FROM movie WHERE id=1''').fetchall()
# print(movieTitle)
# print("Found " + str(len(movieTitle)) + " Results")
# bestRatedMovies = dbSql.execute('''
# SELECT DISTINCT movie.id, movie.title, movie.overview, movie.image, avg(rating.rating) as avgR
# FROM movie INNER JOIN rating
# on movie.id = rating.id_movie
# and movie.overview NOT NULL
# and movie.image NOT NULL
# GROUP BY movie.id ORDER BY avgR DESC LIMIT 50''').fetchall()
# print(bestRatedMovies)
userId = 611
movieId = 1
newRating = 4.5
isMovieRated = dbSql.execute('''SELECT DISTINCT count(rating.rating) FROM rating
where rating.id_movie = ?
and rating.id_user = ?''', (movieId, userId,)).fetchall()
if isMovieRated[0][0] != 0:
dbSql.execute('''UPDATE rating SET rating = ? WHERE rating.id_user = ? AND rating.id_movie = ?''', (newRating, userId, movieId,)).fetchall()
rating = dbSql.execute('''SELECT DISTINCT rating.rating FROM rating
where rating.id_movie = ?
and rating.id_user = ?''', (movieId, userId,)).fetchall()
print('Movie rating ', str(rating[0][0]))
else:
dbSql.execute('''INSERT INTO rating(id_user, id_movie, rating)
VALUES(?,?,?) ''',(userId, movieId, newRating,)).fetchall()
rating = dbSql.execute('''SELECT DISTINCT rating.rating FROM rating
where rating.id_movie = ?
and rating.id_user = ?''', (movieId, userId,)).fetchall()
print('Movie NOW rated ', str(rating[0][0]))
# closing conection
databaseConnection.close()
|
#!/usr/bin/env python
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for the PFIF Validator"""
import re
from datetime import datetime
import xml.etree.ElementTree as ET
import urllib
import cgi
# XML Parsing Utilities
def extract_tag(etree_tag):
"""An etree tag comes in the form: {namespace}tag. This returns the tag"""
match = re.match(r'(?:\{.+\})?(.+)', etree_tag)
if not match:
return ""
return match.group(1)
# Dependency Injection for Time -- from PersonFinder
_utcnow_for_test = None # pylint: disable=c0103
def set_utcnow_for_test(now):
"""Set current time for debug purposes."""
global _utcnow_for_test # pylint: disable=w0603
_utcnow_for_test = now
# Dependency injection for files
_file_for_test = None # pylint: disable=c0103
def set_file_for_test(file_for_test):
"""Set current file or url for debugging purposes."""
global _file_for_test # pylint: disable=w0603
_file_for_test = file_for_test
def open_file(filename, mode='r'):
"""Opens the file or returns a debug value if set."""
return _file_for_test or open(filename, mode)
# TODO(samking): do incremental URL reading to support massive files
def open_url(url):
"""Opens the url or returns a debug value if set."""
return _file_for_test or urllib.urlopen(url)
def get_utcnow():
"""Return current time in utc, or debug value if set."""
return _utcnow_for_test or datetime.utcnow()
class FileWithLines:
"""A file that keeps track of its line number. From
http://bytes.com/topic/python/answers/535191-elementtree-line-numbers-iterparse
"""
def __init__(self, source):
self.source = source
self.line_number = 0
def read(self, num_bytes): # pylint: disable=W0613
"""Wrapper around file.readLine that keeps track of line number"""
self.line_number += 1
return self.source.readline()
# Doesn't inherit from ET.ElementTree to avoid messing with the
# ET.ElementTree.parse factory method
class PfifXmlTree():
"""An XML tree with PFIF-XML-specific helper functions."""
def __init__(self, xml_file):
self.namespace = None
self.version = None
self.tree = None
self.line_numbers = {}
self.lines = xml_file.readlines()
xml_file.seek(0)
self.initialize_tree(xml_file)
self.initialize_pfif_version()
def initialize_tree(self, xml_file):
"""Reads in the XML tree from the XML file. If the XML file is invalid,
the XML library will raise an exception."""
file_with_lines = FileWithLines(xml_file)
tree_parser = iter(ET.iterparse(file_with_lines, events=['start']))
event, root = tree_parser.next() # pylint: disable=W0612
self.line_numbers[root] = file_with_lines.line_number
for event, elem in tree_parser:
self.line_numbers[elem] = file_with_lines.line_number
self.tree = ET.ElementTree(root)
def initialize_pfif_version(self):
"""Initializes the namespace and version. Raises an exception of the XML
root does not specify a namespace or tag, if the tag isn't pfif, or if the
version isn't supported."""
root = self.tree.getroot()
tag = root.tag
# xml.etree.Element.tag is formatted like: {namespace}tag
match = re.match(r'\{(.+)\}(.+)', tag)
assert match, 'This XML root node does not specify a namespace and tag'
self.namespace = match.group(1)
tag = match.group(2)
assert tag == 'pfif', 'The root node must be pfif'
# the correct pfif url is like: http://zesty.ca/pfif/VERSION where VERSION
# is 1.1, 1.2, or 1.3
match = re.match(r'http://zesty\.ca/pfif/(\d\.\d)', self.namespace)
assert match, ('The XML namespace specified is not correct. It should be '
'in the following format: http://zesty.ca/pfif/VERSION')
self.version = float(match.group(1))
assert (self.version >= 1.1 and self.version <= 1.3), (
'This validator only supports versions 1.1-1.3.')
def getroot(self):
"""wrapper for ET.ElementTree.getroot."""
return self.tree.getroot()
def add_namespace_to_tag(self, tag):
"""turns a local tag into a fully qualified tag by adding a namespace """
return '{' + self.namespace + '}' + tag
def get_all_persons(self):
"""returns a list of all persons in the tree"""
return self.tree.findall(self.add_namespace_to_tag('person'))
def get_child_notes(self):
"""returns a list of all notes that are subnodes of persons"""
notes = []
for person in self.get_all_persons():
notes.extend(person.findall(self.add_namespace_to_tag('note')))
return notes
def get_top_level_notes(self):
"""returns a list of all notes that are subnodes of the root node"""
return self.tree.findall(self.add_namespace_to_tag('note'))
def get_all_notes(self):
"""returns a list of all notes in the tree"""
notes = self.get_top_level_notes()
notes.extend(self.get_child_notes())
return notes
def get_field_text(self, parent, child_tag):
"""Returns the text associated with the child node of parent. Returns none
if parent doesn't have that child or if the child doesn't have any text"""
child = parent.find(self.add_namespace_to_tag(child_tag))
if child != None:
return child.text
return None
class Message: # pylint: disable=R0902
"""A container for information about an error or warning message"""
def __init__(self, category, extra_data=None, is_error=True,
xml_line_number=None, xml_tag=None, xml_text=None,
person_record_id=None, note_record_id=None):
self.category = category
self.extra_data = extra_data
self.is_error = is_error
self.xml_line_number = xml_line_number
self.xml_text = xml_text
self.xml_tag = xml_tag
self.person_record_id = person_record_id
self.note_record_id = note_record_id
def __eq__(self, other):
return self.__dict__ == other.__dict__
class Categories: # pylint: disable=W0232
"""Constants representing message categories."""
ADDED_RECORD = 'B has extra records'
DELETED_RECORD = 'B is missing records'
ADDED_FIELD = 'B has extra fields'
DELETED_FIELD = 'B is missing fields'
CHANGED_FIELD = 'Values changed'
class MessageGroupingById(object):
"""A class to help group messages by record ID.
This should contain the logic for grouping messages by record ID, but no UI
code (it's meant for sharing logic between the HTML and plain text displays).
"""
def __init__(self, messages):
self.messages = messages
messages_by_category = MessagesOutput.group_messages_by_category(messages)
self.added_record_ids = [
msg.person_record_id or msg.note_record_id
for msg in messages_by_category.get(Categories.ADDED_RECORD, [])]
self.deleted_record_ids = [
msg.person_record_id or msg.note_record_id
for msg in messages_by_category.get(Categories.DELETED_RECORD, [])]
self.messages_by_record = {}
for category, key in {Categories.ADDED_FIELD: 'added_tags',
Categories.DELETED_FIELD: 'deleted_tags',
Categories.CHANGED_FIELD: 'changed_tags'}.items():
messages_by_record = MessagesOutput.group_messages_by_record(
messages_by_category.get(category, []))
for record_id, record_message_list in messages_by_record.items():
record_data = self.messages_by_record.setdefault(
record_id, {'count': 0})
record_data[key] = [msg.xml_tag for msg in record_message_list]
record_data['count'] += len(record_message_list)
class MessagesOutput:
"""A container that allows for outputting either a plain string or HTML
easily"""
# If truncation is on, only this many messages will be allowed per category
TRUNCATE_THRESHOLD = 100
# When using grouped output (messages_to_str_by_id), it's more compact and
# less in need of truncation.
GROUPED_TRUNCATE_THRESHOLD = 400
def __init__(self):
self.output = []
def get_output(self):
"""Turns the stored data into a string. Call at most once per instance of
MessagesOutput."""
return ''.join(self.output)
def end_new_message(self):
"""Call once at the end of each message after all calls to
make_message_part"""
self.output.append('\n')
def make_message_part(self, text, inline, data=None):
"""Call once for each different part of the message (ie, the main text, the
line number). text is the body of the message. inline should be True if
spans are desired and False if divs are desired. data will be enclosed in a
message_data span regardless of whethether the message part as a whole is
inline or not."""
if not inline:
self.output.append('\n')
self.output.append(text)
if data != None:
self.output.append(data)
def make_message_part_division(self, text, data=None):
"""Wrapper for make_message_part that is not inline."""
self.make_message_part(text, inline=False, data=data)
def make_message_part_inline(self, text, data=None):
"""Wrapper for make_message_part that is inline."""
self.make_message_part(text, inline=True, data=data)
def start_table(self, headers):
"""Adds a table header to the output. Call before using make_table_row."""
self.make_table_row(headers, row_tag='th')
def make_table_row(self, elements, row_tag='td'):
"""Makes a table row where every element in elements is in the row."""
for element in elements:
self.output.append(element + '\t')
self.output.append('\n')
# TODO(samking): add ability to turn off truncate in controller and main
@staticmethod
def truncate(messages, truncation_threshold):
"""Only allows truncation_threshold messages per category. Adds one message
for each category that is truncated."""
messages_by_category = MessagesOutput.group_messages_by_category(messages)
truncated_messages = []
for category, message_list in messages_by_category.items():
# add at most truncation_threshold messages to truncated_messages
truncated_messages.extend(message_list[:truncation_threshold])
# add a message saying that truncation happened
if len(message_list) > truncation_threshold:
truncated_messages.append(Message(
'You had too many messages, so some were truncated.',
extra_data='You had ' + str(len(message_list)) + ' messages in the '
'following category: ' + category + '.'))
return truncated_messages
@staticmethod
def group_messages_by_record(messages):
"""Returns a dict from record_id to a list of messages with that id.
person_record_id and note_record_id are treated the same."""
grouped_messages = {}
for message in messages:
record_id = (message.person_record_id or message.note_record_id or
'None Specified')
record_message_list = grouped_messages.setdefault(record_id, [])
record_message_list.append(message)
return grouped_messages
@staticmethod
def group_messages_by_category(messages):
"""Returns a dict from category to a list of messages with that category."""
grouped_messages = {}
for message in messages:
grouped_messages.setdefault(message.category, []).append(message)
return grouped_messages
@staticmethod
def get_field_from_messages(messages, field):
"""Returns a list of the value of field for each message."""
if field == 'record_id':
fields = []
for message in messages:
fields.append(message.note_record_id or message.person_record_id)
return fields
else:
# TODO(samking): is there a better way to dynamically access a field of an
# object than using the __dict__ object?
return [message.__dict__[field] for message in messages]
@staticmethod
def generate_message_summary(messages):
"""Returns a string with a summary of the categories of each message."""
output = MessagesOutput()
messages_by_category = MessagesOutput.group_messages_by_category(messages)
output.start_table(['Category', 'Number of Messages'])
for category, messages_list in messages_by_category.items():
output.make_table_row([category, str(len(messages_list))])
return output.get_output()
@staticmethod
def messages_to_str_by_id(messages, truncate=True):
"""Returns a string containing all messages grouped together by record.
Only works on diff messages."""
if truncate:
messages = MessagesOutput.truncate(
messages, MessagesOutput.GROUPED_TRUNCATE_THRESHOLD)
msg_grouping = MessageGroupingById(messages)
output = ''
if msg_grouping.added_record_ids:
output += '%s: %d messages.\n' % (
Categories.ADDED_RECORD, len(msg_grouping.added_record_ids))
if msg_grouping.deleted_record_ids:
output += '%s: %d messages.\n' % (
Categories.DELETED_RECORD, len(msg_grouping.deleted_record_ids))
for record_id, record_data in msg_grouping.messages_by_record.items():
output += '%d messages for record: %s\n' % (
record_data['count'], record_id)
if record_data.get('added_tags'):
output += '%s: %s\n' % (Categories.ADDED_FIELD,
', '.join(record_data['added_tags']))
if record_data.get('deleted_tags'):
output += '%s: %s\n' % (Categories.DELETED_FIELD,
', '.join(record_data['deleted_tags']))
if record_data.get('changed_tags'):
output += '%s: %s\n' % (Categories.CHANGED_FIELD,
', '.join(record_data['changed_tags']))
output += '\n'
return output
@staticmethod
# pylint: disable=R0912
def messages_to_str(messages, show_error_type=True, show_errors=True,
show_warnings=True, show_line_numbers=True,
show_full_line=True, show_record_ids=True,
show_xml_tag=True, show_xml_text=True, xml_lines=None,
truncate=True):
# pylint: enable=R0912
"""Returns a string containing all messages formatted per the options."""
if truncate:
messages = MessagesOutput.truncate(
messages, MessagesOutput.TRUNCATE_THRESHOLD)
output = MessagesOutput()
for message in messages:
if (message.is_error and show_errors) or (
not message.is_error and show_warnings):
if show_error_type and message.is_error:
output.make_message_part_inline('ERROR ', 'message_type')
if show_error_type and not message.is_error:
output.make_message_part_inline('WARNING ', 'message_type')
if (show_line_numbers and message.xml_line_number != None):
output.make_message_part_inline('Line ' + str(message.xml_line_number)
+ ': ', 'message_line_number')
if message.extra_data == None:
output.make_message_part_inline(message.category,
'message_category')
else:
output.make_message_part_inline(message.category, 'message_category',
data=': ' + message.extra_data)
if show_record_ids:
if message.person_record_id != None:
output.make_message_part_division(
'The relevant person_record_id is: ',
data=message.person_record_id)
if message.note_record_id != None:
output.make_message_part_division(
'The relevant note_record_id is: ',
data=message.note_record_id)
if show_xml_tag and message.xml_tag:
output.make_message_part_division(
'The tag of the relevant PFIF XML node: ',
data=message.xml_tag)
if show_xml_text and message.xml_text:
output.make_message_part_division(
'The text of the relevant PFIF XML node: ',
data=message.xml_text)
if (show_full_line and message.xml_line_number != None):
output.make_message_part_division(
xml_lines[message.xml_line_number - 1])
output.end_new_message()
return output.get_output()
|
from sqlalchemy.orm.dynamic import AppenderMixin
from typing import Union
class Field(object):
"""
Configure a ModelSerializer field
"""
def __init__(self, dump_only=False, load_only=False, serializer=None):
self.dump_only = dump_only
self.load_only = load_only
self._serializer = serializer
@property
def serializer(self):
return self._serializer
def dump(self, value):
if value and self.serializer:
return self.serializer.dump(value)
else:
return value
def load(self, serialized):
if serialized and self.serializer:
return self.serializer.load(serialized)
else:
return serialized
class NestedModelListField(Field):
"""
A field to Dump and Update nested model list.
"""
def __init__(self, declarative_class, **kw):
from .modelserializer import ModelSerializer
super().__init__(**kw)
if self._serializer is None:
self._serializer = ModelSerializer(declarative_class)
def load(self, serialized):
if not serialized:
return []
class_mapper = self.serializer.model_class
pk_attr = get_pk_attr_name(class_mapper)
models = []
for item in serialized:
pk = item.get(pk_attr)
if pk:
# Serialized object has a primary key, so we load an existing model from the database
# instead of creating one
existing_model = class_mapper.query.get(pk)
updated_model = self.serializer.load(item, existing_model)
models.append(updated_model)
else:
# No primary key, just create a new model entity
model = self.serializer.load(item)
models.append(model)
return models
def dump(self, value):
if value and self.serializer:
return [self.serializer.dump(item) for item in value]
else:
return value
class NestedModelField(Field):
"""
A field to Dump and Update nested models.
"""
def __init__(self, declarative_class, **kw):
from .modelserializer import ModelSerializer
super().__init__(**kw)
if self._serializer is None:
self._serializer = ModelSerializer(declarative_class)
def load(self, serialized):
if not serialized:
return None
class_mapper = self.serializer.model_class
pk_attr = get_pk_attr_name(class_mapper)
pk = serialized.get(pk_attr)
if pk:
# Serialized object has a primary key, so we load an existing model from the database
# instead of creating one
existing_model = class_mapper.query.get(pk)
return self.serializer.load(serialized, existing_model)
else:
# No primary key, just create a new model entity
return self.serializer.load(serialized)
class NestedAttributesField(Field):
"""
A read-only field that dump nested object attributes.
"""
from .serializer import Serializer
class NestedAttributesSerializer(Serializer):
def __init__(self, attributes, many):
self.attributes = attributes
self.many = many
def dump(self, value):
if self.many:
serialized = [self._dump_item(item) for item in value]
else:
return self._dump_item(value)
return serialized
def _dump_item(self, item):
serialized = {}
for attr_name in self.attributes:
serialized[attr_name] = getattr(item, attr_name)
return serialized
def load(self, serialized):
raise NotImplementedError()
def __init__(self, attributes: Union[tuple, dict], many=False):
serializer = self.NestedAttributesSerializer(attributes, many)
super().__init__(dump_only=True, serializer=serializer)
class PrimaryKeyField(Field):
"""
Convert relationships in a list of primary keys (for serialization and deserialization).
"""
from .serializer import Serializer
class PrimaryKeySerializer(Serializer):
def __init__(self, declarative_class):
self.declarative_class = declarative_class
self._pk_column = get_model_pk(self.declarative_class)
def load(self, serialized):
pk_column = self._pk_column
query_results = self.declarative_class.query.filter(pk_column.in_(serialized)).all()
if len(serialized) != len(query_results):
raise ValueError("Not all primary keys found for '{}'".format(self._pk_column))
return query_results
def dump(self, value):
pk_column = self._pk_column
if is_tomany_attribute(value):
serialized = [getattr(item, pk_column.key) for item in value]
else:
return getattr(value, pk_column.key)
return serialized
def __init__(self, declarative_class, **kw):
super().__init__(serializer=self.PrimaryKeySerializer(declarative_class), **kw)
def get_pk_attr_name(declarative_model):
"""
Get the primary key attribute name from a Declarative model class
:param Type[DeclarativeMeta] declarative_class: a Declarative class
:return: str: a Column name
"""
primary_keys = declarative_model.__mapper__.primary_key
assert len(primary_keys) == 1, "Nested object must have exactly one primary key"
pk_name = primary_keys[0].key
return pk_name
def get_model_pk(declarative_class):
"""
Get the primary key Column object from a Declarative model class
:param Type[DeclarativeMeta] declarative_class: a Declarative class
:rtype: Column
"""
primary_keys = declarative_class.__mapper__.primary_key
assert len(primary_keys) == 1, "Nested object must have exactly one primary key"
return primary_keys[0]
def is_tomany_attribute(value):
"""
Check if the Declarative relationship attribute represents a to-many relationship.
:param value: a SQLAlchemy Declarative class relationship attribute
:rtype: bool
"""
return isinstance(value, (list, AppenderMixin)) |
from cardboard import types
from cardboard.ability import (
AbilityNotImplemented, spell, activated, triggered, static
)
from cardboard.cards import card, common, keywords, match
@card("Lifespinner")
def lifespinner(card, abilities):
def lifespinner():
return AbilityNotImplemented
return lifespinner,
@card("Teardrop Kami")
def teardrop_kami(card, abilities):
def teardrop_kami():
return AbilityNotImplemented
return teardrop_kami,
@card("Ire of Kaminari")
def ire_of_kaminari(card, abilities):
def ire_of_kaminari():
return AbilityNotImplemented
return ire_of_kaminari,
@card("Heartless Hidetsugu")
def heartless_hidetsugu(card, abilities):
def heartless_hidetsugu():
return AbilityNotImplemented
return heartless_hidetsugu,
@card("Quillmane Baku")
def quillmane_baku(card, abilities):
def quillmane_baku():
return AbilityNotImplemented
def quillmane_baku():
return AbilityNotImplemented
return quillmane_baku, quillmane_baku,
@card("That Which Was Taken")
def that_which_was_taken(card, abilities):
def that_which_was_taken():
return AbilityNotImplemented
def that_which_was_taken():
return AbilityNotImplemented
return that_which_was_taken, that_which_was_taken,
@card("Higure, the Still Wind")
def higure_the_still_wind(card, abilities):
def higure_the_still_wind():
return AbilityNotImplemented
def higure_the_still_wind():
return AbilityNotImplemented
def higure_the_still_wind():
return AbilityNotImplemented
return higure_the_still_wind, higure_the_still_wind, higure_the_still_wind,
@card("Ornate Kanzashi")
def ornate_kanzashi(card, abilities):
def ornate_kanzashi():
return AbilityNotImplemented
return ornate_kanzashi,
@card("Soratami Mindsweeper")
def soratami_mindsweeper(card, abilities):
def soratami_mindsweeper():
return AbilityNotImplemented
def soratami_mindsweeper():
return AbilityNotImplemented
return soratami_mindsweeper, soratami_mindsweeper,
@card("Kyoki, Sanity's Eclipse")
def kyoki_sanitys_eclipse(card, abilities):
def kyoki_sanitys_eclipse():
return AbilityNotImplemented
return kyoki_sanitys_eclipse,
@card("Sakiko, Mother of Summer")
def sakiko_mother_of_summer(card, abilities):
def sakiko_mother_of_summer():
return AbilityNotImplemented
return sakiko_mother_of_summer,
@card("Kaijin of the Vanishing Touch")
def kaijin_of_the_vanishing_touch(card, abilities):
def kaijin_of_the_vanishing_touch():
return AbilityNotImplemented
def kaijin_of_the_vanishing_touch():
return AbilityNotImplemented
return kaijin_of_the_vanishing_touch, kaijin_of_the_vanishing_touch,
@card("Mirror Gallery")
def mirror_gallery(card, abilities):
def mirror_gallery():
return AbilityNotImplemented
return mirror_gallery,
@card("Kumano's Blessing")
def kumanos_blessing(card, abilities):
def kumanos_blessing():
return AbilityNotImplemented
def kumanos_blessing():
return AbilityNotImplemented
def kumanos_blessing():
return AbilityNotImplemented
return kumanos_blessing, kumanos_blessing, kumanos_blessing,
@card("Hired Muscle")
def hired_muscle(card, abilities):
def hired_muscle():
return AbilityNotImplemented
def hired_muscle():
return AbilityNotImplemented
def hired_muscle():
return AbilityNotImplemented
return hired_muscle, hired_muscle, hired_muscle,
@card("Tomorrow, Azami's Familiar")
def tomorrow_azamis_familiar(card, abilities):
def tomorrow_azamis_familiar():
return AbilityNotImplemented
return tomorrow_azamis_familiar,
@card("Jaraku the Interloper")
def jaraku_the_interloper(card, abilities):
def jaraku_the_interloper():
return AbilityNotImplemented
def jaraku_the_interloper():
return AbilityNotImplemented
return jaraku_the_interloper, jaraku_the_interloper,
@card("Empty-Shrine Kannushi")
def emptyshrine_kannushi(card, abilities):
def emptyshrine_kannushi():
return AbilityNotImplemented
return emptyshrine_kannushi,
@card("Shirei, Shizo's Caretaker")
def shirei_shizos_caretaker(card, abilities):
def shirei_shizos_caretaker():
return AbilityNotImplemented
return shirei_shizos_caretaker,
@card("Final Judgment")
def final_judgment(card, abilities):
def final_judgment():
return AbilityNotImplemented
return final_judgment,
@card("Call for Blood")
def call_for_blood(card, abilities):
def call_for_blood():
return AbilityNotImplemented
def call_for_blood():
return AbilityNotImplemented
return call_for_blood, call_for_blood,
@card("Roar of Jukai")
def roar_of_jukai(card, abilities):
def roar_of_jukai():
return AbilityNotImplemented
def roar_of_jukai():
return AbilityNotImplemented
return roar_of_jukai, roar_of_jukai,
@card("Minamo Sightbender")
def minamo_sightbender(card, abilities):
def minamo_sightbender():
return AbilityNotImplemented
return minamo_sightbender,
@card("Tendo Ice Bridge")
def tendo_ice_bridge(card, abilities):
def tendo_ice_bridge():
return AbilityNotImplemented
def tendo_ice_bridge():
return AbilityNotImplemented
def tendo_ice_bridge():
return AbilityNotImplemented
return tendo_ice_bridge, tendo_ice_bridge, tendo_ice_bridge,
@card("Stream of Consciousness")
def stream_of_consciousness(card, abilities):
def stream_of_consciousness():
return AbilityNotImplemented
return stream_of_consciousness,
@card("Ogre Marauder")
def ogre_marauder(card, abilities):
def ogre_marauder():
return AbilityNotImplemented
return ogre_marauder,
@card("Veil of Secrecy")
def veil_of_secrecy(card, abilities):
def veil_of_secrecy():
return AbilityNotImplemented
def veil_of_secrecy():
return AbilityNotImplemented
return veil_of_secrecy, veil_of_secrecy,
@card("Sway of the Stars")
def sway_of_the_stars(card, abilities):
def sway_of_the_stars():
return AbilityNotImplemented
return sway_of_the_stars,
@card("Psychic Spear")
def psychic_spear(card, abilities):
def psychic_spear():
return AbilityNotImplemented
return psychic_spear,
@card("Petalmane Baku")
def petalmane_baku(card, abilities):
def petalmane_baku():
return AbilityNotImplemented
def petalmane_baku():
return AbilityNotImplemented
return petalmane_baku, petalmane_baku,
@card("Genju of the Spires")
def genju_of_the_spires(card, abilities):
def genju_of_the_spires():
return AbilityNotImplemented
def genju_of_the_spires():
return AbilityNotImplemented
def genju_of_the_spires():
return AbilityNotImplemented
return genju_of_the_spires, genju_of_the_spires, genju_of_the_spires,
@card("Mannichi, the Fevered Dream")
def mannichi_the_fevered_dream(card, abilities):
def mannichi_the_fevered_dream():
return AbilityNotImplemented
return mannichi_the_fevered_dream,
@card("Faithful Squire")
def faithful_squire(card, abilities):
def faithful_squire():
return AbilityNotImplemented
def faithful_squire():
return AbilityNotImplemented
def faithful_squire():
return AbilityNotImplemented
return faithful_squire, faithful_squire, faithful_squire,
@card("Oyobi, Who Split the Heavens")
def oyobi_who_split_the_heavens(card, abilities):
def oyobi_who_split_the_heavens():
return AbilityNotImplemented
def oyobi_who_split_the_heavens():
return AbilityNotImplemented
return oyobi_who_split_the_heavens, oyobi_who_split_the_heavens,
@card("Body of Jukai")
def body_of_jukai(card, abilities):
def body_of_jukai():
return AbilityNotImplemented
def body_of_jukai():
return AbilityNotImplemented
return body_of_jukai, body_of_jukai,
@card("Budoka Pupil")
def budoka_pupil(card, abilities):
def budoka_pupil():
return AbilityNotImplemented
def budoka_pupil():
return AbilityNotImplemented
def budoka_pupil():
return AbilityNotImplemented
return budoka_pupil, budoka_pupil, budoka_pupil,
@card("Ward of Piety")
def ward_of_piety(card, abilities):
def ward_of_piety():
return AbilityNotImplemented
def ward_of_piety():
return AbilityNotImplemented
return ward_of_piety, ward_of_piety,
@card("Clash of Realities")
def clash_of_realities(card, abilities):
def clash_of_realities():
return AbilityNotImplemented
def clash_of_realities():
return AbilityNotImplemented
return clash_of_realities, clash_of_realities,
@card("Crack the Earth")
def crack_the_earth(card, abilities):
def crack_the_earth():
return AbilityNotImplemented
return crack_the_earth,
@card("Patron of the Kitsune")
def patron_of_the_kitsune(card, abilities):
def patron_of_the_kitsune():
return AbilityNotImplemented
def patron_of_the_kitsune():
return AbilityNotImplemented
return patron_of_the_kitsune, patron_of_the_kitsune,
@card("Bile Urchin")
def bile_urchin(card, abilities):
def bile_urchin():
return AbilityNotImplemented
return bile_urchin,
@card("Genju of the Fens")
def genju_of_the_fens(card, abilities):
def genju_of_the_fens():
return AbilityNotImplemented
def genju_of_the_fens():
return AbilityNotImplemented
def genju_of_the_fens():
return AbilityNotImplemented
return genju_of_the_fens, genju_of_the_fens, genju_of_the_fens,
@card("Shuko")
def shuko(card, abilities):
def shuko():
return AbilityNotImplemented
def shuko():
return AbilityNotImplemented
return shuko, shuko,
@card("Shuriken")
def shuriken(card, abilities):
def shuriken():
return AbilityNotImplemented
def shuriken():
return AbilityNotImplemented
return shuriken, shuriken,
@card("Child of Thorns")
def child_of_thorns(card, abilities):
def child_of_thorns():
return AbilityNotImplemented
return child_of_thorns,
@card("Goryo's Vengeance")
def goryos_vengeance(card, abilities):
def goryos_vengeance():
return AbilityNotImplemented
def goryos_vengeance():
return AbilityNotImplemented
return goryos_vengeance, goryos_vengeance,
@card("Ink-Eyes, Servant of Oni")
def inkeyes_servant_of_oni(card, abilities):
def inkeyes_servant_of_oni():
return AbilityNotImplemented
def inkeyes_servant_of_oni():
return AbilityNotImplemented
def inkeyes_servant_of_oni():
return AbilityNotImplemented
return inkeyes_servant_of_oni, inkeyes_servant_of_oni, inkeyes_servant_of_oni,
@card("Kodama of the Center Tree")
def kodama_of_the_center_tree(card, abilities):
def kodama_of_the_center_tree():
return AbilityNotImplemented
def kodama_of_the_center_tree():
return AbilityNotImplemented
return kodama_of_the_center_tree, kodama_of_the_center_tree,
@card("Hokori, Dust Drinker")
def hokori_dust_drinker(card, abilities):
def hokori_dust_drinker():
return AbilityNotImplemented
def hokori_dust_drinker():
return AbilityNotImplemented
return hokori_dust_drinker, hokori_dust_drinker,
@card("Ribbons of the Reikai")
def ribbons_of_the_reikai(card, abilities):
def ribbons_of_the_reikai():
return AbilityNotImplemented
return ribbons_of_the_reikai,
@card("Day of Destiny")
def day_of_destiny(card, abilities):
def day_of_destiny():
return AbilityNotImplemented
return day_of_destiny,
@card("Heed the Mists")
def heed_the_mists(card, abilities):
def heed_the_mists():
return AbilityNotImplemented
return heed_the_mists,
@card("Iwamori of the Open Fist")
def iwamori_of_the_open_fist(card, abilities):
def iwamori_of_the_open_fist():
return AbilityNotImplemented
def iwamori_of_the_open_fist():
return AbilityNotImplemented
return iwamori_of_the_open_fist, iwamori_of_the_open_fist,
@card("Heart of Light")
def heart_of_light(card, abilities):
def heart_of_light():
return AbilityNotImplemented
def heart_of_light():
return AbilityNotImplemented
return heart_of_light, heart_of_light,
@card("Frostling")
def frostling(card, abilities):
def frostling():
return AbilityNotImplemented
return frostling,
@card("<NAME>")
def toshiro_umezawa(card, abilities):
def toshiro_umezawa():
return AbilityNotImplemented
def toshiro_umezawa():
return AbilityNotImplemented
return toshiro_umezawa, toshiro_umezawa,
@card("<NAME>")
def pus_kami(card, abilities):
def pus_kami():
return AbilityNotImplemented
def pus_kami():
return AbilityNotImplemented
return pus_kami, pus_kami,
@card("Uproot")
def uproot(card, abilities):
def uproot():
return AbilityNotImplemented
return uproot,
@card("Shizuko, Caller of Autumn")
def shizuko_caller_of_autumn(card, abilities):
def shizuko_caller_of_autumn():
return AbilityNotImplemented
return shizuko_caller_of_autumn,
@card("Fumiko the Lowblood")
def fumiko_the_lowblood(card, abilities):
def fumiko_the_lowblood():
return AbilityNotImplemented
def fumiko_the_lowblood():
return AbilityNotImplemented
return fumiko_the_lowblood, fumiko_the_lowblood,
@card("Blademane Baku")
def blademane_baku(card, abilities):
def blademane_baku():
return AbilityNotImplemented
def blademane_baku():
return AbilityNotImplemented
return blademane_baku, blademane_baku,
@card("Mending Hands")
def mending_hands(card, abilities):
def mending_hands():
return AbilityNotImplemented
return mending_hands,
@card("Patron of the Akki")
def patron_of_the_akki(card, abilities):
def patron_of_the_akki():
return AbilityNotImplemented
def patron_of_the_akki():
return AbilityNotImplemented
return patron_of_the_akki, patron_of_the_akki,
@card("Goblin Cohort")
def goblin_cohort(card, abilities):
def goblin_cohort():
return AbilityNotImplemented
return goblin_cohort,
@card("Patron of the Nezumi")
def patron_of_the_nezumi(card, abilities):
def patron_of_the_nezumi():
return AbilityNotImplemented
def patron_of_the_nezumi():
return AbilityNotImplemented
return patron_of_the_nezumi, patron_of_the_nezumi,
@card("Genju of the Fields")
def genju_of_the_fields(card, abilities):
def genju_of_the_fields():
return AbilityNotImplemented
def genju_of_the_fields():
return AbilityNotImplemented
def genju_of_the_fields():
return AbilityNotImplemented
return genju_of_the_fields, genju_of_the_fields, genju_of_the_fields,
@card("Crawling Filth")
def crawling_filth(card, abilities):
def crawling_filth():
return AbilityNotImplemented
def crawling_filth():
return AbilityNotImplemented
return crawling_filth, crawling_filth,
@card("Kitsune Palliator")
def kitsune_palliator(card, abilities):
def kitsune_palliator():
return AbilityNotImplemented
return kitsune_palliator,
@card("Blessing of Leeches")
def blessing_of_leeches(card, abilities):
def blessing_of_leeches():
return AbilityNotImplemented
def blessing_of_leeches():
return AbilityNotImplemented
def blessing_of_leeches():
return AbilityNotImplemented
def blessing_of_leeches():
return AbilityNotImplemented
return blessing_of_leeches, blessing_of_leeches, blessing_of_leeches, blessing_of_leeches,
@card("Chisei, Heart of Oceans")
def chisei_heart_of_oceans(card, abilities):
def chisei_heart_of_oceans():
return AbilityNotImplemented
def chisei_heart_of_oceans():
return AbilityNotImplemented
return chisei_heart_of_oceans, chisei_heart_of_oceans,
@card("<NAME>")
def mark_of_sakiko(card, abilities):
def mark_of_sakiko():
return AbilityNotImplemented
def mark_of_sakiko():
return AbilityNotImplemented
return mark_of_sakiko, mark_of_sakiko,
@card("Kami of the Honored Dead")
def kami_of_the_honored_dead(card, abilities):
def kami_of_the_honored_dead():
return AbilityNotImplemented
def kami_of_the_honored_dead():
return AbilityNotImplemented
def kami_of_the_honored_dead():
return AbilityNotImplemented
return kami_of_the_honored_dead, kami_of_the_honored_dead, kami_of_the_honored_dead,
@card("Forked-Branch Garami")
def forkedbranch_garami(card, abilities):
def forkedbranch_garami():
return AbilityNotImplemented
return forkedbranch_garami,
@card("Isao, Enlightened Bushi")
def isao_enlightened_bushi(card, abilities):
def isao_enlightened_bushi():
return AbilityNotImplemented
def isao_enlightened_bushi():
return AbilityNotImplemented
def isao_enlightened_bushi():
return AbilityNotImplemented
return isao_enlightened_bushi, isao_enlightened_bushi, isao_enlightened_bushi,
@card("In the Web of War")
def in_the_web_of_war(card, abilities):
def in_the_web_of_war():
return AbilityNotImplemented
return in_the_web_of_war,
@card("Mark of the Oni")
def mark_of_the_oni(card, abilities):
def mark_of_the_oni():
return AbilityNotImplemented
def mark_of_the_oni():
return AbilityNotImplemented
def mark_of_the_oni():
return AbilityNotImplemented
return mark_of_the_oni, mark_of_the_oni, mark_of_the_oni,
@card("Mistblade Shinobi")
def mistblade_shinobi(card, abilities):
def mistblade_shinobi():
return AbilityNotImplemented
def mistblade_shinobi():
return AbilityNotImplemented
return mistblade_shinobi, mistblade_shinobi,
@card("Throat Slitter")
def throat_slitter(card, abilities):
def throat_slitter():
return AbilityNotImplemented
def throat_slitter():
return AbilityNotImplemented
return throat_slitter, throat_slitter,
@card("Indebted Samurai")
def indebted_samurai(card, abilities):
def indebted_samurai():
return AbilityNotImplemented
def indebted_samurai():
return AbilityNotImplemented
return indebted_samurai, indebted_samurai,
@card("Terashi's Verdict")
def terashis_verdict(card, abilities):
def terashis_verdict():
return AbilityNotImplemented
return terashis_verdict,
@card("Horobi's Whisper")
def horobis_whisper(card, abilities):
def horobis_whisper():
return AbilityNotImplemented
def horobis_whisper():
return AbilityNotImplemented
return horobis_whisper, horobis_whisper,
@card("Opal-Eye, Konda's Yojimbo")
def opaleye_kondas_yojimbo(card, abilities):
def opaleye_kondas_yojimbo():
return AbilityNotImplemented
def opaleye_kondas_yojimbo():
return AbilityNotImplemented
def opaleye_kondas_yojimbo():
return AbilityNotImplemented
def opaleye_kondas_yojimbo():
return AbilityNotImplemented
return opaleye_kondas_yojimbo, opaleye_kondas_yojimbo, opaleye_kondas_yojimbo, opaleye_kondas_yojimbo,
@card("Scarmaker")
def scarmaker(card, abilities):
def scarmaker():
return AbilityNotImplemented
def scarmaker():
return AbilityNotImplemented
return scarmaker, scarmaker,
@card("Neko-Te")
def nekote(card, abilities):
def nekote():
return AbilityNotImplemented
def nekote():
return AbilityNotImplemented
def nekote():
return AbilityNotImplemented
return nekote, nekote, nekote,
@card("Genju of the Falls")
def genju_of_the_falls(card, abilities):
def genju_of_the_falls():
return AbilityNotImplemented
def genju_of_the_falls():
return AbilityNotImplemented
def genju_of_the_falls():
return AbilityNotImplemented
return genju_of_the_falls, genju_of_the_falls, genju_of_the_falls,
@card("Reduce to Dreams")
def reduce_to_dreams(card, abilities):
def reduce_to_dreams():
return AbilityNotImplemented
return reduce_to_dreams,
@card("Genju of the Cedars")
def genju_of_the_cedars(card, abilities):
def genju_of_the_cedars():
return AbilityNotImplemented
def genju_of_the_cedars():
return AbilityNotImplemented
def genju_of_the_cedars():
return AbilityNotImplemented
return genju_of_the_cedars, genju_of_the_cedars, genju_of_the_cedars,
@card("Toils of Night and Day")
def toils_of_night_and_day(card, abilities):
def toils_of_night_and_day():
return AbilityNotImplemented
return toils_of_night_and_day,
@card("Hundred-Talon Strike")
def hundredtalon_strike(card, abilities):
def hundredtalon_strike():
return AbilityNotImplemented
def hundredtalon_strike():
return AbilityNotImplemented
return hundredtalon_strike, hundredtalon_strike,
@card("Callow Jushi")
def callow_jushi(card, abilities):
def callow_jushi():
return AbilityNotImplemented
def callow_jushi():
return AbilityNotImplemented
def callow_jushi():
return AbilityNotImplemented
return callow_jushi, callow_jushi, callow_jushi,
@card("Yukora, the Prisoner")
def yukora_the_prisoner(card, abilities):
def yukora_the_prisoner():
return AbilityNotImplemented
return yukora_the_prisoner,
@card("Skullsnatcher")
def skullsnatcher(card, abilities):
def skullsnatcher():
return AbilityNotImplemented
def skullsnatcher():
return AbilityNotImplemented
return skullsnatcher, skullsnatcher,
@card("Umezawa's Jitte")
def umezawas_jitte(card, abilities):
def umezawas_jitte():
return AbilityNotImplemented
def umezawas_jitte():
return AbilityNotImplemented
def umezawas_jitte():
return AbilityNotImplemented
return umezawas_jitte, umezawas_jitte, umezawas_jitte,
@card("Baku Altar")
def baku_altar(card, abilities):
def baku_altar():
return AbilityNotImplemented
def baku_altar():
return AbilityNotImplemented
return baku_altar, baku_altar,
@card("Shinka Gatekeeper")
def shinka_gatekeeper(card, abilities):
def shinka_gatekeeper():
return AbilityNotImplemented
return shinka_gatekeeper,
@card("Ogre Recluse")
def ogre_recluse(card, abilities):
def ogre_recluse():
return AbilityNotImplemented
return ogre_recluse,
@card("<NAME>")
def matsutribe_sniper(card, abilities):
def matsutribe_sniper():
return AbilityNotImplemented
def matsutribe_sniper():
return AbilityNotImplemented
return matsutribe_sniper, matsutribe_sniper,
@card("Disrupting Shoal")
def disrupting_shoal(card, abilities):
def disrupting_shoal():
return AbilityNotImplemented
def disrupting_shoal():
return AbilityNotImplemented
return disrupting_shoal, disrupting_shoal,
@card("Minamo'<NAME>dling")
def minamos_meddling(card, abilities):
def minamos_meddling():
return AbilityNotImplemented
return minamos_meddling,
@card("<NAME>")
def akki_blizzardherder(card, abilities):
def akki_blizzardherder():
return AbilityNotImplemented
return akki_blizzardherder,
@card("Ishi-Ishi, Akki Crackshot")
def ishiishi_akki_crackshot(card, abilities):
def ishiishi_akki_crackshot():
return AbilityNotImplemented
return ishiishi_akki_crackshot,
@card("Nezumi Shadow-Watcher")
def nezumi_shadowwatcher(card, abilities):
def nezumi_shadowwatcher():
return AbilityNotImplemented
return nezumi_shadowwatcher,
@card("Azamuki, Treachery Incarnate")
def azamuki_treachery_incarnate(card, abilities):
def azamuki_treachery_incarnate():
return AbilityNotImplemented
def azamuki_treachery_incarnate():
return AbilityNotImplemented
return azamuki_treachery_incarnate, azamuki_treachery_incarnate,
@card("Ninja of the Deep Hours")
def ninja_of_the_deep_hours(card, abilities):
def ninja_of_the_deep_hours():
return AbilityNotImplemented
def ninja_of_the_deep_hours():
return AbilityNotImplemented
return ninja_of_the_deep_hours, ninja_of_the_deep_hours,
@card("Slumbering Tora")
def slumbering_tora(card, abilities):
def slumbering_tora():
return AbilityNotImplemented
return slumbering_tora,
@card("Vital Surge")
def vital_surge(card, abilities):
def vital_surge():
return AbilityNotImplemented
def vital_surge():
return AbilityNotImplemented
return vital_surge, vital_surge,
@card("Traproot Kami")
def traproot_kami(card, abilities):
def traproot_kami():
return AbilityNotImplemented
def traproot_kami():
return AbilityNotImplemented
return traproot_kami, traproot_kami,
@card("Split-Tail Miko")
def splittail_miko(card, abilities):
def splittail_miko():
return AbilityNotImplemented
return splittail_miko,
@card("Sakura-Tribe Springcaller")
def sakuratribe_springcaller(card, abilities):
def sakuratribe_springcaller():
return AbilityNotImplemented
return sakuratribe_springcaller,
@card("Kami of Tattered Shoji")
def kami_of_tattered_shoji(card, abilities):
def kami_of_tattered_shoji():
return AbilityNotImplemented
return kami_of_tattered_shoji,
@card("Stir the Grave")
def stir_the_grave(card, abilities):
def stir_the_grave():
return AbilityNotImplemented
return stir_the_grave,
@card("Patron of the Orochi")
def patron_of_the_orochi(card, abilities):
def patron_of_the_orochi():
return AbilityNotImplemented
def patron_of_the_orochi():
return AbilityNotImplemented
return patron_of_the_orochi, patron_of_the_orochi,
@card("Hero's Demise")
def heros_demise(card, abilities):
def heros_demise():
return AbilityNotImplemented
return heros_demise,
@card("Flames of the Blood Hand")
def flames_of_the_blood_hand(card, abilities):
def flames_of_the_blood_hand():
return AbilityNotImplemented
return flames_of_the_blood_hand,
@card("Unchecked Growth")
def unchecked_growth(card, abilities):
def unchecked_growth():
return AbilityNotImplemented
return unchecked_growth,
@card("First Volley")
def first_volley(card, abilities):
def first_volley():
return AbilityNotImplemented
return first_volley,
@card("Lifegift")
def lifegift(card, abilities):
def lifegift():
return AbilityNotImplemented
return lifegift,
@card("Gods' Eye, Gate to the Reikai")
def gods_eye_gate_to_the_reikai(card, abilities):
def gods_eye_gate_to_the_reikai():
return AbilityNotImplemented
def gods_eye_gate_to_the_reikai():
return AbilityNotImplemented
return gods_eye_gate_to_the_reikai, gods_eye_gate_to_the_reikai,
@card("Scourge of Numai")
def scourge_of_numai(card, abilities):
def scourge_of_numai():
return AbilityNotImplemented
return scourge_of_numai,
@card("Takenuma Bleeder")
def takenuma_bleeder(card, abilities):
def takenuma_bleeder():
return AbilityNotImplemented
return takenuma_bleeder,
@card("Sickening Shoal")
def sickening_shoal(card, abilities):
def sickening_shoal():
return AbilityNotImplemented
def sickening_shoal():
return AbilityNotImplemented
return sickening_shoal, sickening_shoal,
@card("A<NAME>")
def aura_barbs(card, abilities):
def aura_barbs():
return AbilityNotImplemented
return aura_barbs,
@card("Tallowisp")
def tallowisp(card, abilities):
def tallowisp():
return AbilityNotImplemented
return tallowisp,
@card("Skullmane Baku")
def skullmane_baku(card, abilities):
def skullmane_baku():
return AbilityNotImplemented
def skullmane_baku():
return AbilityNotImplemented
return skullmane_baku, skullmane_baku,
@card("Shimmering Glasskite")
def shimmering_glasskite(card, abilities):
def shimmering_glasskite():
return AbilityNotImplemented
def shimmering_glasskite():
return AbilityNotImplemented
return shimmering_glasskite, shimmering_glasskite,
@card("Twist Allegiance")
def twist_allegiance(card, abilities):
def twist_allegiance():
return AbilityNotImplemented
return twist_allegiance,
@card("Ok<NAME>")
def okibagang_shinobi(card, abilities):
def okibagang_shinobi():
return AbilityNotImplemented
def okibagang_shinobi():
return AbilityNotImplemented
return okibagang_shinobi, okibagang_shinobi,
@card("Scaled Hulk")
def scaled_hulk(card, abilities):
def scaled_hulk():
return AbilityNotImplemented
return scaled_hulk,
@card("Akki Raider")
def akki_raider(card, abilities):
def akki_raider():
return AbilityNotImplemented
return akki_raider,
@card("R<NAME>")
def ronin_warclub(card, abilities):
def ronin_warclub():
return AbilityNotImplemented
def ronin_warclub():
return AbilityNotImplemented
def ronin_warclub():
return AbilityNotImplemented
return ronin_warclub, ronin_warclub, ronin_warclub,
@card("Ashen Monstrosity")
def ashen_monstrosity(card, abilities):
def ashen_monstrosity():
return AbilityNotImplemented
def ashen_monstrosity():
return AbilityNotImplemented
return ashen_monstrosity, ashen_monstrosity,
@card("Jetting Glasskite")
def jetting_glasskite(card, abilities):
def jetting_glasskite():
return AbilityNotImplemented
def jetting_glasskite():
return AbilityNotImplemented
return jetting_glasskite, jetting_glasskite,
@card("Three Tragedies")
def three_tragedies(card, abilities):
def three_tragedies():
return AbilityNotImplemented
return three_tragedies,
@card("Kami of False Hope")
def kami_of_false_hope(card, abilities):
def kami_of_false_hope():
return AbilityNotImplemented
return kami_of_false_hope,
@card("Overblaze")
def overblaze(card, abilities):
def overblaze():
return AbilityNotImplemented
def overblaze():
return AbilityNotImplemented
return overblaze, overblaze,
@card("Moonlit Strider")
def moonlit_strider(card, abilities):
def moonlit_strider():
return AbilityNotImplemented
def moonlit_strider():
return AbilityNotImplemented
return moonlit_strider, moonlit_strider,
@card("Ronin Cliffrider")
def ronin_cliffrider(card, abilities):
def ronin_cliffrider():
return AbilityNotImplemented
def ronin_cliffrider():
return AbilityNotImplemented
return ronin_cliffrider, ronin_cliffrider,
@card("<NAME>")
def waxmane_baku(card, abilities):
def waxmane_baku():
return AbilityNotImplemented
def waxmane_baku():
return AbilityNotImplemented
return waxmane_baku, waxmane_baku,
@card("Kaiso, Memory of Loyalty")
def kaiso_memory_of_loyalty(card, abilities):
def kaiso_memory_of_loyalty():
return AbilityNotImplemented
def kaiso_memory_of_loyalty():
return AbilityNotImplemented
def kaiso_memory_of_loyalty():
return AbilityNotImplemented
return kaiso_memory_of_loyalty, kaiso_memory_of_loyalty, kaiso_memory_of_loyalty,
@card("Orb of Dreams")
def orb_of_dreams(card, abilities):
def orb_of_dreams():
return AbilityNotImplemented
return orb_of_dreams,
@card("Sosuke's Summons")
def sosukes_summons(card, abilities):
def sosukes_summons():
return AbilityNotImplemented
def sosukes_summons():
return AbilityNotImplemented
return sosukes_summons, sosukes_summons,
@card("Blazing Shoal")
def blazing_shoal(card, abilities):
def blazing_shoal():
return AbilityNotImplemented
def blazing_shoal():
return AbilityNotImplemented
return blazing_shoal, blazing_shoal,
@card("Torrent of Stone")
def torrent_of_stone(card, abilities):
def torrent_of_stone():
return AbilityNotImplemented
def torrent_of_stone():
return AbilityNotImplemented
return torrent_of_stone, torrent_of_stone,
@card("Kentaro, the Smiling Cat")
def kentaro_the_smiling_cat(card, abilities):
def kentaro_the_smiling_cat():
return AbilityNotImplemented
def kentaro_the_smiling_cat():
return AbilityNotImplemented
return kentaro_the_smiling_cat, kentaro_the_smiling_cat,
@card("Terashi's Grasp")
def terashis_grasp(card, abilities):
def terashis_grasp():
return AbilityNotImplemented
return terashis_grasp,
@card("Shining Shoal")
def shining_shoal(card, abilities):
def shining_shoal():
return AbilityNotImplemented
def shining_shoal():
return AbilityNotImplemented
return shining_shoal, shining_shoal,
@card("Nourishing Shoal")
def nourishing_shoal(card, abilities):
def nourishing_shoal():
return AbilityNotImplemented
def nourishing_shoal():
return AbilityNotImplemented
return nourishing_shoal, nourishing_shoal,
@card("Genju of the Realm")
def genju_of_the_realm(card, abilities):
def genju_of_the_realm():
return AbilityNotImplemented
def genju_of_the_realm():
return AbilityNotImplemented
def genju_of_the_realm():
return AbilityNotImplemented
return genju_of_the_realm, genju_of_the_realm, genju_of_the_realm,
@card("Ichiga, Who Topples Oaks")
def ichiga_who_topples_oaks(card, abilities):
def ichiga_who_topples_oaks():
return AbilityNotImplemented
def ichiga_who_topples_oaks():
return AbilityNotImplemented
def ichiga_who_topples_oaks():
return AbilityNotImplemented
return ichiga_who_topples_oaks, ichiga_who_topples_oaks, ichiga_who_topples_oaks,
@card("Cunning Bandit")
def cunning_bandit(card, abilities):
def cunning_bandit():
return AbilityNotImplemented
def cunning_bandit():
return AbilityNotImplemented
def cunning_bandit():
return AbilityNotImplemented
return cunning_bandit, cunning_bandit, cunning_bandit,
@card("Yomiji, Who Bars the Way")
def yomiji_who_bars_the_way(card, abilities):
def yomiji_who_bars_the_way():
return AbilityNotImplemented
return yomiji_who_bars_the_way,
@card("Silverstorm Samurai")
def silverstorm_samurai(card, abilities):
def silverstorm_samurai():
return AbilityNotImplemented
def silverstorm_samurai():
return AbilityNotImplemented
return silverstorm_samurai, silverstorm_samurai,
@card("Enshrined Memories")
def enshrined_memories(card, abilities):
def enshrined_memories():
return AbilityNotImplemented
return enshrined_memories,
@card("Kira, Great Glass-Spinner")
def kira_great_glassspinner(card, abilities):
def kira_great_glassspinner():
return AbilityNotImplemented
def kira_great_glassspinner():
return AbilityNotImplemented
return kira_great_glassspinner, kira_great_glassspinner,
@card("Blinding Powder")
def blinding_powder(card, abilities):
def blinding_powder():
return AbilityNotImplemented
def blinding_powder():
return AbilityNotImplemented
return blinding_powder, blinding_powder,
@card("Harbinger of Spring")
def harbinger_of_spring(card, abilities):
def harbinger_of_spring():
return AbilityNotImplemented
def harbinger_of_spring():
return AbilityNotImplemented
return harbinger_of_spring, harbinger_of_spring,
@card("Loam Dweller")
def loam_dweller(card, abilities):
def loam_dweller():
return AbilityNotImplemented
return loam_dweller,
@card("Floodbringer")
def floodbringer(card, abilities):
def floodbringer():
return AbilityNotImplemented
def floodbringer():
return AbilityNotImplemented
return floodbringer, floodbringer,
@card("Walker of Secret Ways")
def walker_of_secret_ways(card, abilities):
def walker_of_secret_ways():
return AbilityNotImplemented
def walker_of_secret_ways():
return AbilityNotImplemented
def walker_of_secret_ways():
return AbilityNotImplemented
return walker_of_secret_ways, walker_of_secret_ways, walker_of_secret_ways,
@card("Takeno's Cavalry")
def takenos_cavalry(card, abilities):
def takenos_cavalry():
return AbilityNotImplemented
def takenos_cavalry():
return AbilityNotImplemented
return takenos_cavalry, takenos_cavalry,
@card("Patron of the Moon")
def patron_of_the_moon(card, abilities):
def patron_of_the_moon():
return AbilityNotImplemented
def patron_of_the_moon():
return AbilityNotImplemented
def patron_of_the_moon():
return AbilityNotImplemented
return patron_of_the_moon, patron_of_the_moon, patron_of_the_moon,
@card("Threads of Disloyalty")
def threads_of_disloyalty(card, abilities):
def threads_of_disloyalty():
return AbilityNotImplemented
def threads_of_disloyalty():
return AbilityNotImplemented
return threads_of_disloyalty, threads_of_disloyalty, |
"""Module for dealing with the toolbar.
"""
import math
import os
import ipyevents
import ipyleaflet
import ipywidgets as widgets
from ipyfilechooser import FileChooser
from .common import *
from .pc import *
def tool_template(m=None):
"""Generates a tool GUI template using ipywidgets. Icons can be found at https://fontawesome.com/v4/icons
Args:
m (leafmap.Map, optional): The leaflet Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="gear",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
checkbox = widgets.Checkbox(
description="Checkbox",
indent=False,
layout=widgets.Layout(padding=padding, width=widget_width),
)
dropdown = widgets.Dropdown(
options=["Option 1", "Option 2", "Option 3"],
value=None,
description="Dropdown:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
int_slider = widgets.IntSlider(
min=1,
max=100,
description="Int Slider: ",
readout=False,
continuous_update=True,
layout=widgets.Layout(width="220px", padding=padding),
style=style,
)
int_slider_label = widgets.Label()
# widgets.jslink((int_slider, "value"), (int_slider_label, "value"))
def int_slider_changed(change):
if change["new"]:
int_slider_label.value = str(int_slider.value)
int_slider.observe(int_slider_changed, "value")
float_slider = widgets.FloatSlider(
min=1,
max=100,
description="Float Slider: ",
readout=False,
continuous_update=True,
layout=widgets.Layout(width="220px", padding=padding),
style=style,
)
float_slider_label = widgets.Label()
# widgets.jslink((float_slider, "value"), (float_slider_label, "value"))
def float_slider_changed(change):
if change["new"]:
float_slider_label.value = str(float_slider.value)
float_slider.observe(float_slider_changed, "value")
color = widgets.ColorPicker(
concise=False,
description="Color:",
value="white",
style=style,
layout=widgets.Layout(width=widget_width, padding=padding),
)
text = widgets.Text(
value="",
description="Textbox:",
placeholder="Placeholder",
style=style,
layout=widgets.Layout(width=widget_width, padding=padding),
)
textarea = widgets.Textarea(
placeholder="Placeholder",
layout=widgets.Layout(width=widget_width),
)
buttons = widgets.ToggleButtons(
value=None,
options=["Apply", "Reset", "Close"],
tooltips=["Apply", "Reset", "Close"],
button_style="primary",
)
buttons.style.button_width = "80px"
output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
checkbox,
widgets.HBox([int_slider, int_slider_label]),
widgets.HBox([float_slider, float_slider_label]),
dropdown,
text,
color,
textarea,
buttons,
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def button_clicked(change):
if change["new"] == "Apply":
with output:
output.clear_output()
print("Running ...")
elif change["new"] == "Reset":
textarea.value = ""
output.clear_output()
elif change["new"] == "Close":
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
buttons.value = None
buttons.observe(button_clicked, "value")
toolbar_button.value = True
if m is not None:
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
if toolbar_control not in m.controls:
m.add_control(toolbar_control)
m.tool_control = toolbar_control
else:
return toolbar_widget
def main_toolbar(m):
"""Creates the main toolbar and adds it to the map.
Args:
m (leafmap.Map): The leafmap Map object.
"""
tools = {
"map": {
"name": "basemap",
"tooltip": "Change basemap",
},
"globe": {
"name": "split_map",
"tooltip": "Split-panel map",
},
"adjust": {
"name": "planet",
"tooltip": "Planet imagery",
},
"folder-open": {
"name": "open_data",
"tooltip": "Open local vector/raster data",
},
"gears": {
"name": "whitebox",
"tooltip": "WhiteboxTools for local geoprocessing",
},
"fast-forward": {
"name": "timeslider",
"tooltip": "Activate the time slider",
},
"eraser": {
"name": "eraser",
"tooltip": "Remove all drawn features",
},
"camera": {
"name": "save_map",
"tooltip": "Save map as HTML or image",
},
"address-book": {
"name": "census",
"tooltip": "Get US Census data",
},
"info": {
"name": "inspector",
"tooltip": "Get COG/STAC pixel value",
},
"search": {
"name": "search_xyz",
"tooltip": "Search XYZ tile services",
},
"download": {
"name": "download_osm",
"tooltip": "Download OSM data",
},
"picture-o": {
"name": "raster",
"tooltip": "Open COG/STAC dataset",
},
"search-plus": {
"name": "search_geojson",
"tooltip": "Search features in GeoJSON layer",
},
"table": {
"name": "attribute_table",
"tooltip": "Open attribute table",
},
"pencil-square-o": {
"name": "edit_vector",
"tooltip": "Create vector data",
},
"stack-exchange": {
"name": "stac",
"tooltip": "Discover STAC Catalog",
},
# "spinner": {
# "name": "placeholder2",
# "tooltip": "This is a placeholder",
# },
"question": {
"name": "help",
"tooltip": "Get help",
},
}
# if m.sandbox_path is None and (os.environ.get("USE_VOILA") is not None):
# voila_tools = ["camera", "folder-open", "gears"]
# for item in voila_tools:
# if item in tools.keys():
# del tools[item]
icons = list(tools.keys())
tooltips = [item["tooltip"] for item in list(tools.values())]
icon_width = "32px"
icon_height = "32px"
n_cols = 3
n_rows = math.ceil(len(icons) / n_cols)
toolbar_grid = widgets.GridBox(
children=[
widgets.ToggleButton(
layout=widgets.Layout(
width="auto", height="auto", padding="0px 0px 0px 4px"
),
button_style="primary",
icon=icons[i],
tooltip=tooltips[i],
)
for i in range(len(icons))
],
layout=widgets.Layout(
width="109px",
grid_template_columns=(icon_width + " ") * n_cols,
grid_template_rows=(icon_height + " ") * n_rows,
grid_gap="1px 1px",
padding="5px",
),
)
m.toolbar = toolbar_grid
def tool_callback(change):
if change["new"]:
current_tool = change["owner"]
for tool in toolbar_grid.children:
if tool is not current_tool:
tool.value = False
tool = change["owner"]
tool_name = tools[tool.icon]["name"]
if tool_name == "basemap":
change_basemap(m)
if tool_name == "split_map":
split_basemaps(m)
if tool_name == "planet":
split_basemaps(m, layers_dict=planet_tiles())
elif tool_name == "open_data":
open_data_widget(m)
elif tool_name == "eraser":
if m.draw_control is not None:
m.draw_control.clear()
m.user_roi = None
m.user_rois = None
m.draw_features = []
elif tool_name == "whitebox":
import whiteboxgui.whiteboxgui as wbt
tools_dict = wbt.get_wbt_dict()
wbt_toolbox = wbt.build_toolbox(
tools_dict,
max_width="800px",
max_height="500px",
sandbox_path=m.sandbox_path,
)
wbt_control = ipyleaflet.WidgetControl(
widget=wbt_toolbox, position="bottomright"
)
m.whitebox = wbt_control
m.add_control(wbt_control)
elif tool_name == "timeslider":
m.add_time_slider()
elif tool_name == "save_map":
save_map((m))
elif tool_name == "census":
census_widget(m)
elif tool_name == "inspector":
inspector_gui(m)
elif tool_name == "search_xyz":
search_basemaps(m)
elif tool_name == "download_osm":
download_osm(m)
elif tool_name == "raster":
open_raster_gui(m)
elif tool_name == "search_geojson":
search_geojson_gui(m)
elif tool_name == "attribute_table":
select_table_gui(m)
elif tool_name == "edit_vector":
edit_draw_gui(m)
elif tool_name == "stac":
stac_gui(m)
elif tool_name == "help":
import webbrowser
webbrowser.open_new_tab("https://leafmap.org")
current_tool.value = False
else:
# tool = change["owner"]
# tool_name = tools[tool.icon]["name"]
pass
m.toolbar_reset()
for tool in toolbar_grid.children:
tool.observe(tool_callback, "value")
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="wrench",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
m.toolbar_button = toolbar_button
layers_button = widgets.ToggleButton(
value=False,
tooltip="Layers",
icon="server",
layout=widgets.Layout(height="28px", width="72px"),
)
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [layers_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [toolbar_grid]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
layers_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
layers_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not layers_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def layers_btn_click(change):
if change["new"]:
layers_hbox = []
all_layers_chk = widgets.Checkbox(
value=False,
description="All layers on/off",
indent=False,
layout=widgets.Layout(height="18px", padding="0px 8px 25px 8px"),
)
all_layers_chk.layout.width = "30ex"
layers_hbox.append(all_layers_chk)
def all_layers_chk_changed(change):
if change["new"]:
for layer in m.layers:
layer.visible = True
else:
for layer in m.layers:
layer.visible = False
all_layers_chk.observe(all_layers_chk_changed, "value")
layers = [
lyr
for lyr in m.layers
if (
isinstance(lyr, ipyleaflet.TileLayer)
or isinstance(lyr, ipyleaflet.WMSLayer)
)
]
# if the layers contain unsupported layers (e.g., GeoJSON, GeoData), adds the ipyleaflet built-in LayerControl
if len(layers) < (len(m.layers) - 1):
if m.layer_control is None:
layer_control = ipyleaflet.LayersControl(position="topright")
m.layer_control = layer_control
if m.layer_control not in m.controls:
m.add_control(m.layer_control)
# for non-TileLayer, use layer.style={'opacity':0, 'fillOpacity': 0} to turn layer off.
for layer in layers:
layer_chk = widgets.Checkbox(
value=layer.visible,
description=layer.name,
indent=False,
layout=widgets.Layout(height="18px"),
)
layer_chk.layout.width = "25ex"
layer_opacity = widgets.FloatSlider(
value=layer.opacity,
min=0,
max=1,
step=0.01,
readout=False,
layout=widgets.Layout(width="80px"),
)
layer_settings = widgets.ToggleButton(
icon="gear",
tooltip=layer.name,
layout=widgets.Layout(
width="25px", height="25px", padding="0px 0px 0px 5px"
),
)
# def layer_vis_on_click(change):
# if change["new"]:
# layer_name = change["owner"].tooltip
# change["owner"].value = False
# layer_settings.observe(layer_vis_on_click, "value")
# def layer_chk_changed(change):
# layer_name = change["owner"].description
# layer_chk.observe(layer_chk_changed, "value")
widgets.jslink((layer_chk, "value"), (layer, "visible"))
widgets.jsdlink((layer_opacity, "value"), (layer, "opacity"))
hbox = widgets.HBox(
[layer_chk, layer_settings, layer_opacity],
layout=widgets.Layout(padding="0px 8px 0px 8px"),
)
layers_hbox.append(hbox)
toolbar_footer.children = layers_hbox
toolbar_button.value = False
else:
toolbar_footer.children = [toolbar_grid]
layers_button.observe(layers_btn_click, "value")
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
m.add_control(toolbar_control)
def open_data_widget(m):
"""A widget for opening local vector/raster data.
Args:
m (object): leafmap.Map
"""
import warnings
warnings.filterwarnings("ignore")
padding = "0px 0px 0px 5px"
style = {"description_width": "initial"}
file_type = widgets.ToggleButtons(
options=["Shapefile", "GeoJSON", "CSV", "Vector", "Raster"],
tooltips=[
"Open a shapefile",
"Open a GeoJSON file",
"Open a vector dataset",
"Create points from CSV",
"Open a vector dataset",
"Open a raster dataset",
],
)
file_type.style.button_width = "88px"
filepath = widgets.Text(
value="",
description="File path or http URL:",
tooltip="Enter a file path or http URL to vector data",
style=style,
layout=widgets.Layout(width="454px", padding=padding),
)
http_widget = widgets.HBox()
file_chooser = FileChooser(
os.getcwd(), sandbox_path=m.sandbox_path, layout=widgets.Layout(width="454px")
)
file_chooser.filter_pattern = "*.shp"
file_chooser.use_dir_icons = True
layer_name = widgets.Text(
value="Shapefile",
description="Enter a layer name:",
tooltip="Enter a layer name for the selected file",
style=style,
layout=widgets.Layout(width="454px", padding=padding),
)
longitude = widgets.Dropdown(
options=[],
value=None,
description="Longitude:",
layout=widgets.Layout(width="149px", padding=padding),
style=style,
)
latitude = widgets.Dropdown(
options=[],
value=None,
description="Latitude:",
layout=widgets.Layout(width="149px", padding=padding),
style=style,
)
label = widgets.Dropdown(
options=[],
value=None,
description="Label:",
layout=widgets.Layout(width="149px", padding=padding),
style=style,
)
point_check = widgets.Checkbox(
description="Is it a point layer?",
indent=False,
layout=widgets.Layout(padding=padding, width="150px"),
style=style,
)
point_popup = widgets.SelectMultiple(
options=[
"None",
],
value=["None"],
description="Popup attributes:",
disabled=False,
style=style,
)
csv_widget = widgets.HBox()
point_widget = widgets.HBox()
def point_layer_check(change):
if point_check.value:
if filepath.value.strip() != "":
m.default_style = {"cursor": "wait"}
point_popup.options = vector_col_names(filepath.value)
point_popup.value = [point_popup.options[0]]
point_widget.children = [point_check, point_popup]
else:
point_widget.children = [point_check]
point_check.observe(point_layer_check)
ok_cancel = widgets.ToggleButtons(
value=None,
options=["Apply", "Reset", "Close"],
tooltips=["Apply", "Reset", "Close"],
button_style="primary",
)
# ok_cancel.style.button_width = "50px"
bands = widgets.Text(
value=None,
description="Band:",
tooltip="Enter a list of band indices",
style=style,
layout=widgets.Layout(width="150px", padding=padding),
)
vmin = widgets.Text(
value=None,
description="vmin:",
tooltip="Minimum value of the raster to visualize",
style=style,
layout=widgets.Layout(width="148px"),
)
vmax = widgets.Text(
value=None,
description="vmax:",
tooltip="Maximum value of the raster to visualize",
style=style,
layout=widgets.Layout(width="148px"),
)
nodata = widgets.Text(
value=None,
description="Nodata:",
tooltip="Nodata the raster to visualize",
style=style,
layout=widgets.Layout(width="150px", padding=padding),
)
palette = widgets.Dropdown(
options=[],
value=None,
description="palette:",
layout=widgets.Layout(width="300px"),
style=style,
)
raster_options = widgets.VBox()
def filepath_change(change):
if file_type.value == "Raster":
pass
# if (
# filepath.value.startswith("http")
# or filepath.value.endswith(".txt")
# or filepath.value.endswith(".csv")
# ):
# bands.disabled = True
# palette.disabled = False
# # x_dim.disabled = True
# # y_dim.disabled = True
# else:
# bands.disabled = False
# palette.disabled = False
# # x_dim.disabled = True
# # y_dim.disabled = True
filepath.observe(filepath_change, "value")
tool_output = widgets.Output(
layout=widgets.Layout(max_height="150px", max_width="500px", overflow="auto")
)
main_widget = widgets.VBox(
[
file_type,
file_chooser,
http_widget,
csv_widget,
layer_name,
point_widget,
raster_options,
ok_cancel,
tool_output,
]
)
tool_output_ctrl = ipyleaflet.WidgetControl(widget=main_widget, position="topright")
if m.tool_output_ctrl is not None and m.tool_output_ctrl in m.controls:
m.remove_control(m.tool_output_ctrl)
def bands_changed(change):
if change["new"] and "," in change["owner"].value:
palette.value = None
palette.disabled = True
else:
palette.disabled = False
bands.observe(bands_changed, "value")
def chooser_callback(chooser):
filepath.value = file_chooser.selected
if file_type.value == "CSV":
import pandas as pd
df = pd.read_csv(filepath.value)
col_names = df.columns.values.tolist()
longitude.options = col_names
latitude.options = col_names
label.options = col_names
if "longitude" in col_names:
longitude.value = "longitude"
if "latitude" in col_names:
latitude.value = "latitude"
if "name" in col_names:
label.value = "name"
file_chooser.register_callback(chooser_callback)
def file_type_changed(change):
ok_cancel.value = None
file_chooser.default_path = os.getcwd()
file_chooser.reset()
layer_name.value = file_type.value
csv_widget.children = []
filepath.value = ""
tool_output.clear_output()
if change["new"] == "Shapefile":
file_chooser.filter_pattern = "*.shp"
raster_options.children = []
point_widget.children = []
point_check.value = False
http_widget.children = []
elif change["new"] == "GeoJSON":
file_chooser.filter_pattern = ["*.geojson", "*.json"]
raster_options.children = []
point_widget.children = []
point_check.value = False
http_widget.children = [filepath]
elif change["new"] == "Vector":
file_chooser.filter_pattern = "*.*"
raster_options.children = []
point_widget.children = [point_check]
point_check.value = False
http_widget.children = [filepath]
elif change["new"] == "CSV":
file_chooser.filter_pattern = ["*.csv", "*.CSV"]
csv_widget.children = [longitude, latitude, label]
raster_options.children = []
point_widget.children = []
point_check.value = False
http_widget.children = [filepath]
elif change["new"] == "Raster":
file_chooser.filter_pattern = ["*.tif", "*.img"]
palette.options = get_palettable(types=["matplotlib", "cartocolors"])
palette.value = None
raster_options.children = [
widgets.HBox([bands, vmin, vmax]),
widgets.HBox([nodata, palette]),
]
point_widget.children = []
point_check.value = False
http_widget.children = [filepath]
def ok_cancel_clicked(change):
if change["new"] == "Apply":
m.default_style = {"cursor": "wait"}
file_path = filepath.value
with tool_output:
tool_output.clear_output()
if file_path.strip() != "":
ext = os.path.splitext(file_path)[1]
if point_check.value:
popup = list(point_popup.value)
if len(popup) == 1:
popup = popup[0]
m.add_point_layer(
file_path,
popup=popup,
layer_name=layer_name.value,
)
elif ext.lower() == ".shp":
m.add_shp(file_path, style={}, layer_name=layer_name.value)
elif ext.lower() == ".geojson":
m.add_geojson(file_path, style={}, layer_name=layer_name.value)
elif ext.lower() == ".csv" and file_type.value == "CSV":
m.add_xy_data(
file_path,
x=longitude.value,
y=latitude.value,
label=label.value,
layer_name=layer_name.value,
)
elif (
ext.lower() in [".tif", "img"]
) and file_type.value == "Raster":
band = None
vis_min = None
vis_max = None
vis_nodata = None
try:
if len(bands.value) > 0:
band = int(bands.value)
if len(vmin.value) > 0:
vis_min = float(vmin.value)
if len(vmax.value) > 0:
vis_max = float(vmax.value)
if len(nodata.value) > 0:
vis_nodata = float(nodata.value)
except Exception as _:
pass
m.add_local_tile(
file_path,
layer_name=layer_name.value,
band=band,
palette=palette.value,
vmin=vis_min,
vmax=vis_max,
nodata=vis_nodata,
)
else:
print("Please select a file to open.")
m.toolbar_reset()
m.default_style = {"cursor": "default"}
elif change["new"] == "Reset":
file_chooser.reset()
tool_output.clear_output()
filepath.value = ""
m.toolbar_reset()
elif change["new"] == "Close":
if m.tool_output_ctrl is not None and m.tool_output_ctrl in m.controls:
m.remove_control(m.tool_output_ctrl)
m.tool_output_ctrl = None
m.toolbar_reset()
ok_cancel.value = None
file_type.observe(file_type_changed, names="value")
ok_cancel.observe(ok_cancel_clicked, names="value")
# file_chooser.register_callback(chooser_callback)
m.add_control(tool_output_ctrl)
m.tool_output_ctrl = tool_output_ctrl
def open_raster_gui(m):
"""A widget for opening local/remote COG/STAC data.
Args:
m (object): leafmap.Map
"""
padding = "0px 0px 0px 5px"
style = {"description_width": "initial"}
tool_output = widgets.Output(
layout=widgets.Layout(max_height="150px", max_width="500px", overflow="auto")
)
file_type = widgets.ToggleButtons(
options=["GeoTIFF", "COG", "STAC", "Microsoft"],
tooltips=[
"Open a local GeoTIFF file",
"Open a remote COG file",
"Open a remote STAC item",
"Create COG from Microsoft Planetary Computer",
],
)
file_type.style.button_width = "110px"
file_chooser = FileChooser(
os.getcwd(), sandbox_path=m.sandbox_path, layout=widgets.Layout(width="454px")
)
file_chooser.filter_pattern = ["*.tif", "*.tiff"]
file_chooser.use_dir_icons = True
source_widget = widgets.VBox([file_chooser])
http_url = widgets.Text(
value="",
description="HTTP URL:",
tooltip="Enter an http URL to COG file",
style=style,
layout=widgets.Layout(width="454px", padding=padding),
)
collection = widgets.Dropdown(
options=["landsat-8-c2-l2 - Landsat 8 Collection 2 Level-2"],
value="landsat-8-c2-l2 - Landsat 8 Collection 2 Level-2",
description="PC Collection:",
style=style,
layout=widgets.Layout(width="454px", padding=padding),
)
items = widgets.Text(
value="LC08_L2SP_047027_20201204_02_T1",
description="STAC Items:",
tooltip="STAC Item ID",
style=style,
layout=widgets.Layout(width="454px", padding=padding),
)
assets = widgets.Text(
value="SR_B7,SR_B5,SR_B4",
description="STAC Assets:",
tooltip="STAC Asset ID",
style=style,
layout=widgets.Layout(width="454px", padding=padding),
)
layer_name = widgets.Text(
value="GeoTIFF",
description="Enter a layer name:",
tooltip="Enter a layer name for the selected file",
style=style,
layout=widgets.Layout(width="454px", padding=padding),
)
ok_cancel = widgets.ToggleButtons(
value=None,
options=["Apply", "Reset", "Close"],
tooltips=["Apply", "Reset", "Close"],
button_style="primary",
)
# ok_cancel.style.button_width = "50px"
bands = widgets.Text(
value=None,
description="Band:",
tooltip="Enter a list of band indices",
style=style,
layout=widgets.Layout(width="150px", padding=padding),
)
band_width = "149px"
red = widgets.Dropdown(
value=None,
options=[],
description="Red:",
tooltip="Select a band for the red channel",
style=style,
layout=widgets.Layout(width=band_width, padding=padding),
)
green = widgets.Dropdown(
value=None,
options=[],
description="Green:",
tooltip="Select a band for the green channel",
style=style,
layout=widgets.Layout(width="148px", padding=padding),
)
blue = widgets.Dropdown(
value=None,
options=[],
description="Blue:",
tooltip="Select a band for the blue channel",
style=style,
layout=widgets.Layout(width=band_width, padding=padding),
)
vmin = widgets.Text(
value=None,
description="vmin:",
tooltip="Minimum value of the raster to visualize",
style=style,
layout=widgets.Layout(width="148px", padding=padding),
)
vmax = widgets.Text(
value=None,
description="vmax:",
tooltip="Maximum value of the raster to visualize",
style=style,
layout=widgets.Layout(width="148px", padding=padding),
)
nodata = widgets.Text(
value=None,
description="Nodata:",
tooltip="Nodata the raster to visualize",
style=style,
layout=widgets.Layout(width="150px", padding=padding),
)
local_tile_palettes = list_palettes(add_extra=True)
cog_stac_palettes = list_palettes(lowercase=True)
palette_options = local_tile_palettes
palette = widgets.Dropdown(
options=palette_options,
value=None,
description="palette:",
layout=widgets.Layout(width="300px", padding=padding),
style=style,
)
checkbox = widgets.Checkbox(
value=False,
description="Additional params",
indent=False,
layout=widgets.Layout(width="154px", padding=padding),
style=style,
)
add_params_text1 = "Additional parameters in the format of a dictionary, for example, \n {'palette': ['#006633', '#E5FFCC', '#662A00', '#D8D8D8', '#F5F5F5']}"
add_params_text2 = "Additional parameters in the format of a dictionary, for example, \n {'expression': '(SR_B5-SR_B4)/(SR_B5+SR_B4)'}"
add_params = widgets.Textarea(
value="",
placeholder=add_params_text1,
layout=widgets.Layout(width="454px", padding=padding),
style=style,
)
params_widget = widgets.HBox()
raster_options = widgets.VBox()
raster_options.children = [
widgets.HBox([red, green, blue]),
widgets.HBox([vmin, vmax, nodata]),
widgets.HBox([palette, checkbox]),
params_widget,
]
def collection_changed(change):
if change["new"]:
if not hasattr(m, "pc_inventory"):
setattr(m, "pc_inventory", get_pc_inventory())
col_name = change["new"].split(" - ")[0]
items.value = m.pc_inventory[col_name]["first_item"]
band_names = m.pc_inventory[col_name]["bands"]
red.options = band_names
green.options = band_names
blue.options = band_names
if change["new"] == "landsat-8-c2-l2 - Landsat 8 Collection 2 Level-2":
items.value = "LC08_L2SP_047027_20201204_02_T1"
assets.value = "SR_B7,SR_B5,SR_B4"
red.value = "SR_B7"
green.value = "SR_B5"
blue.value = "SR_B4"
elif change["new"] == "sentinel-2-l2a - Sentinel-2 Level-2A":
items.value = "S2B_MSIL2A_20190629T212529_R043_T06VVN_20201006T080531"
assets.value = "B08,B04,B03"
red.value = "B08"
green.value = "B04"
blue.value = "B03"
else:
if len(band_names) > 2:
assets.value = ",".join(band_names[:3])
red.value = band_names[0]
green.value = band_names[1]
blue.value = band_names[2]
else:
assets.value = band_names[0]
red.value = band_names[0]
green.value = band_names[0]
blue.value = band_names[0]
collection.observe(collection_changed, names="value")
def band_changed(change):
if change["name"]:
if not checkbox.value:
if file_type.value == "GeoTIFF":
if hasattr(m, "tile_client"):
min_max = local_tile_vmin_vmax(
m.tile_client, bands=[red.value, green.value, blue.value]
)
vmin.value = str(min_max[0])
vmax.value = str(min_max[1])
elif file_type.value == "Microsoft":
if len(set([red.value, green.value, blue.value])) == 1:
assets.value = f"{red.value}"
else:
assets.value = f"{red.value},{green.value},{blue.value}"
red.observe(band_changed, names="value")
green.observe(band_changed, names="value")
blue.observe(band_changed, names="value")
def checkbox_changed(change):
if change["new"]:
params_widget.children = [add_params]
else:
params_widget.children = []
checkbox.observe(checkbox_changed, names="value")
def url_change(change):
if change["new"] and change["new"].startswith("http"):
with tool_output:
try:
print("Retrieving band names...")
if file_type.value == "COG":
bandnames = cog_bands(change["new"])
elif file_type.value == "STAC":
bandnames = stac_bands(change["new"])
red.options = bandnames
green.options = bandnames
blue.options = bandnames
if len(bandnames) > 2:
red.value = bandnames[0]
green.value = bandnames[1]
blue.value = bandnames[2]
else:
red.value = bandnames[0]
green.value = bandnames[0]
blue.value = bandnames[0]
tool_output.clear_output()
except Exception as e:
print(e)
print("Error loading URL.")
return
else:
red.options = []
green.options = []
blue.options = []
vmin.value = ""
vmax.value = ""
nodata.value = ""
palette.value = None
http_url.observe(url_change, names="value")
main_widget = widgets.VBox(
[
file_type,
source_widget,
layer_name,
raster_options,
ok_cancel,
tool_output,
]
)
tool_output_ctrl = ipyleaflet.WidgetControl(widget=main_widget, position="topright")
if m.tool_output_ctrl is not None and m.tool_output_ctrl in m.controls:
m.remove_control(m.tool_output_ctrl)
def bands_changed(change):
if change["new"] and "," in change["owner"].value:
palette.value = None
palette.disabled = True
else:
palette.disabled = False
bands.observe(bands_changed, "value")
def chooser_callback(chooser):
try:
source = file_chooser.selected
tile_layer, tile_client = get_local_tile_layer(source, return_client=True)
if not hasattr(m, "tile_client"):
setattr(m, "tile_client", tile_client)
bandnames = local_tile_bands(tile_client)
red.options = bandnames
green.options = bandnames
blue.options = bandnames
if len(bandnames) > 2:
red.value = bandnames[0]
green.value = bandnames[1]
blue.value = bandnames[2]
min_max = local_tile_vmin_vmax(
tile_client, bands=[red.value, green.value, blue.value]
)
vmin.value = str(min_max[0])
vmax.value = str(min_max[1])
else:
red.value = bandnames[0]
green.value = bandnames[0]
blue.value = bandnames[0]
min_max = local_tile_vmin_vmax(tile_client)
vmin.value = str(min_max[0])
vmax.value = str(min_max[1])
except Exception as e:
with tool_output:
print(e)
file_chooser.register_callback(chooser_callback)
def file_type_changed(change):
ok_cancel.value = None
file_chooser.default_path = os.getcwd()
file_chooser.reset()
layer_name.value = file_type.value
http_url.value = ""
tool_output.clear_output()
red.value = None
green.value = None
blue.value = None
vmin.value = ""
vmax.value = ""
nodata.value = ""
palette.value = None
if change["new"] == "GeoTIFF":
source_widget.children = [file_chooser]
file_chooser.filter_pattern = ["*.tif", "*.tiff"]
palette.options = local_tile_palettes
palette.value = None
add_params.placeholder = add_params_text1
raster_options.children = [
widgets.HBox([red, green, blue]),
widgets.HBox([vmin, vmax, nodata]),
widgets.HBox([palette, checkbox]),
params_widget,
]
elif change["new"] == "COG":
http_url.value = "https://opendata.digitalglobe.com/events/california-fire-2020/post-event/2020-08-14/pine-gulch-fire20/10300100AAC8DD00.tif"
source_widget.children = [http_url]
palette.options = cog_stac_palettes
palette.value = None
add_params.placeholder = add_params_text2
elif change["new"] == "STAC":
http_url.value = "https://canada-spot-ortho.s3.amazonaws.com/canada_spot_orthoimages/canada_spot5_orthoimages/S5_2007/S5_11055_6057_20070622/S5_11055_6057_20070622.json"
source_widget.children = [http_url]
palette.options = cog_stac_palettes
palette.value = None
red.value = "B3"
green.value = "B2"
blue.value = "B1"
add_params.placeholder = add_params_text2
elif change["new"] == "Microsoft":
source_widget.children = [collection, items, assets]
palette.options = cog_stac_palettes
palette.value = None
add_params.placeholder = add_params_text2
collection.options = get_pc_collection_list()
collection.value = "landsat-8-c2-l2 - Landsat 8 Collection 2 Level-2"
if not hasattr(m, "pc_inventory"):
setattr(m, "pc_inventory", get_pc_inventory())
items.value = "LC08_L2SP_047027_20201204_02_T1"
assets.value = "SR_B7,SR_B5,SR_B4"
def ok_cancel_clicked(change):
if change["new"] == "Apply":
m.default_style = {"cursor": "wait"}
# file_path = http_url.value
with tool_output:
tool_output.clear_output()
print("Loading data...")
if file_type.value == "GeoTIFF" and file_chooser.selected:
band = None
vis_min = None
vis_max = None
vis_nodata = None
vis_palette = None
try:
if len(red.options) > 2:
band = [red.value, green.value, blue.value]
if len(set(band)) > 1:
palette.value = None
else:
band = [red.value]
else:
band = [red.value]
if len(vmin.value) > 0:
vis_min = float(vmin.value)
if len(vmax.value) > 0:
vis_max = float(vmax.value)
if len(nodata.value) > 0:
vis_nodata = float(nodata.value)
if (
checkbox.value
and add_params.value.strip().startswith("{")
and add_params.value.strip().endswith("}")
):
vis_params = eval(add_params.value)
if "palette" in vis_params:
vis_palette = vis_params["palette"]
else:
vis_palette = get_palette_colors(
palette.value, hashtag=True
)
elif palette.value is not None:
vis_palette = get_palette_colors(
palette.value, hashtag=True
)
except Exception as e:
pass
m.add_local_tile(
file_chooser.selected,
layer_name=layer_name.value,
band=band,
palette=vis_palette,
vmin=vis_min,
vmax=vis_max,
nodata=vis_nodata,
)
tool_output.clear_output()
elif file_type.value in ["COG", "STAC"] and http_url.value:
try:
tool_output.clear_output()
print("Loading data...")
if (
checkbox.value
and add_params.value.strip().startswith("{")
and add_params.value.strip().endswith("}")
):
vis_params = eval(add_params.value)
else:
vis_params = {}
if (
palette.value
and len(set([red.value, green.value, blue.value])) == 1
):
vis_params["colormap_name"] = palette.value
elif (
palette.value
and len(set([red.value, green.value, blue.value])) > 1
):
palette.value = None
print("Palette can only be set for single band images.")
if vmin.value and vmax.value:
vis_params["rescale"] = f"{vmin.value},{vmax.value}"
if nodata.value:
vis_params["nodata"] = nodata.value
if file_type.value == "COG":
m.add_cog_layer(
http_url.value,
name=layer_name.value,
bands=[red.value, green.value, blue.value],
**vis_params,
)
elif file_type.value == "STAC":
m.add_stac_layer(
http_url.value,
bands=[red.value, green.value, blue.value],
name=layer_name.value,
**vis_params,
)
tool_output.clear_output()
except Exception as e:
print(e)
print("Error loading data.")
return
elif file_type.value == "Microsoft":
try:
tool_output.clear_output()
print("Loading data...")
if (
checkbox.value
and add_params.value.strip().startswith("{")
and add_params.value.strip().endswith("}")
):
vis_params = eval(add_params.value)
else:
vis_params = {}
if (
palette.value
and len(set([red.value, green.value, blue.value])) == 1
) or (palette.value and "expression" in vis_params):
vis_params["colormap_name"] = palette.value
elif (
palette.value
and len(set([red.value, green.value, blue.value])) > 1
and "expression" not in vis_params
):
palette.value = None
print("Palette can only be set for single band images.")
if vmin.value and vmax.value:
vis_params["rescale"] = f"{vmin.value},{vmax.value}"
if nodata.value:
vis_params["nodata"] = nodata.value
col = collection.value.split(" - ")[0]
m.add_stac_layer(
collection=col,
item=items.value,
assets=assets.value,
name=layer_name.value,
**vis_params,
)
tool_output.clear_output()
except Exception as e:
print(e)
print("Error loading data.")
return
else:
tool_output.clear_output()
print("Please select a file and enter an http URL.")
m.toolbar_reset()
m.default_style = {"cursor": "default"}
elif change["new"] == "Reset":
file_chooser.reset()
tool_output.clear_output()
http_url.value = ""
add_params.value = ""
checkbox.value = False
palette.value = None
red.value = None
green.value = None
blue.value = None
vmin.value = ""
vmax.value = ""
nodata.value = ""
collection.value = None
items.value = ""
assets.value = ""
m.toolbar_reset()
elif change["new"] == "Close":
if m.tool_output_ctrl is not None and m.tool_output_ctrl in m.controls:
m.remove_control(m.tool_output_ctrl)
m.tool_output_ctrl = None
m.toolbar_reset()
ok_cancel.value = None
file_type.observe(file_type_changed, names="value")
ok_cancel.observe(ok_cancel_clicked, names="value")
m.add_control(tool_output_ctrl)
m.tool_output_ctrl = tool_output_ctrl
def change_basemap(m):
"""Widget for changing basemaps.
Args:
m (object): leafmap.Map.
"""
from .basemaps import get_xyz_dict
from .leafmap import basemaps
xyz_dict = get_xyz_dict()
layers = list(m.layers)
if len(layers) == 1:
layers = [layers[0]] + [basemaps["OpenStreetMap"]]
elif len(layers) > 1 and (layers[1].name != "OpenStreetMap"):
layers = [layers[0]] + [basemaps["OpenStreetMap"]] + layers[1:]
m.layers = layers
value = "OpenStreetMap"
dropdown = widgets.Dropdown(
options=list(basemaps.keys()),
value=value,
layout=widgets.Layout(width="200px"),
)
close_btn = widgets.Button(
icon="times",
tooltip="Close the basemap widget",
button_style="primary",
layout=widgets.Layout(width="32px"),
)
basemap_widget = widgets.HBox([dropdown, close_btn])
def on_click(change):
basemap_name = change["new"]
old_basemap = m.layers[1]
m.substitute_layer(old_basemap, basemaps[basemap_name])
if basemap_name in xyz_dict:
if "bounds" in xyz_dict[basemap_name]:
bounds = xyz_dict[basemap_name]["bounds"]
bounds = [bounds[0][1], bounds[0][0], bounds[1][1], bounds[1][0]]
m.zoom_to_bounds(bounds)
dropdown.observe(on_click, "value")
def close_click(change):
m.toolbar_reset()
if m.basemap_ctrl is not None and m.basemap_ctrl in m.controls:
m.remove_control(m.basemap_ctrl)
basemap_widget.close()
close_btn.on_click(close_click)
basemap_control = ipyleaflet.WidgetControl(
widget=basemap_widget, position="topright"
)
m.add_control(basemap_control)
m.basemap_ctrl = basemap_control
def save_map(m):
"""Saves the map as HTML, JPG, or PNG.
Args:
m (leafmap.Map): The leafmap Map object.
"""
import time
tool_output = widgets.Output()
m.tool_output = tool_output
tool_output.clear_output(wait=True)
save_map_widget = widgets.VBox()
save_type = widgets.ToggleButtons(
options=["HTML", "PNG", "JPG"],
tooltips=[
"Save the map as an HTML file",
"Take a screenshot and save as a PNG file",
"Take a screenshot and save as a JPG file",
],
)
file_chooser = FileChooser(
os.getcwd(), sandbox_path=m.sandbox_path, layout=widgets.Layout(width="454px")
)
file_chooser.default_filename = "my_map.html"
file_chooser.use_dir_icons = True
ok_cancel = widgets.ToggleButtons(
value=None,
options=["OK", "Cancel", "Close"],
tooltips=["OK", "Cancel", "Close"],
button_style="primary",
)
def save_type_changed(change):
ok_cancel.value = None
# file_chooser.reset()
file_chooser.default_path = os.getcwd()
if change["new"] == "HTML":
file_chooser.default_filename = "my_map.html"
elif change["new"] == "PNG":
file_chooser.default_filename = "my_map.png"
elif change["new"] == "JPG":
file_chooser.default_filename = "my_map.jpg"
save_map_widget.children = [save_type, file_chooser]
def chooser_callback(chooser):
save_map_widget.children = [save_type, file_chooser, ok_cancel]
def ok_cancel_clicked(change):
if change["new"] == "OK":
file_path = file_chooser.selected
ext = os.path.splitext(file_path)[1]
if save_type.value == "HTML" and ext.upper() == ".HTML":
tool_output.clear_output()
m.to_html(file_path)
elif save_type.value == "PNG" and ext.upper() == ".PNG":
tool_output.clear_output()
m.toolbar_button.value = False
if m.save_map_control is not None:
m.remove_control(m.save_map_control)
m.save_map_control = None
time.sleep(2)
screen_capture(outfile=file_path)
elif save_type.value == "JPG" and ext.upper() == ".JPG":
tool_output.clear_output()
m.toolbar_button.value = False
if m.save_map_control is not None:
m.remove_control(m.save_map_control)
m.save_map_control = None
time.sleep(2)
screen_capture(outfile=file_path)
else:
label = widgets.Label(
value="The selected file extension does not match the selected exporting type."
)
save_map_widget.children = [save_type, file_chooser, label]
elif change["new"] == "Cancel":
tool_output.clear_output()
file_chooser.reset()
elif change["new"] == "Close":
if m.save_map_control is not None:
m.remove_control(m.save_map_control)
m.save_map_control = None
ok_cancel.value = None
m.toolbar_reset()
save_type.observe(save_type_changed, names="value")
ok_cancel.observe(ok_cancel_clicked, names="value")
file_chooser.register_callback(chooser_callback)
save_map_widget.children = [save_type, file_chooser]
save_map_control = ipyleaflet.WidgetControl(
widget=save_map_widget, position="topright"
)
m.add_control(save_map_control)
m.save_map_control = save_map_control
def split_basemaps(
m, layers_dict=None, left_name=None, right_name=None, width="120px", **kwargs
):
"""Create a split-panel map for visualizing two maps.
Args:
m (ipyleaflet.Map): An ipyleaflet map object.
layers_dict (dict, optional): A dictionary of TileLayers. Defaults to None.
left_name (str, optional): The default value of the left dropdown list. Defaults to None.
right_name (str, optional): The default value of the right dropdown list. Defaults to None.
width (str, optional): The width of the dropdown list. Defaults to "120px".
"""
from .leafmap import basemaps
controls = m.controls
layers = m.layers
# m.layers = [m.layers[0]]
m.clear_controls()
add_zoom = True
add_fullscreen = True
if layers_dict is None:
layers_dict = {}
keys = dict(basemaps).keys()
for key in keys:
if isinstance(basemaps[key], ipyleaflet.WMSLayer):
pass
else:
layers_dict[key] = basemaps[key]
keys = list(layers_dict.keys())
if left_name is None:
left_name = keys[0]
if right_name is None:
right_name = keys[-1]
left_layer = layers_dict[left_name]
right_layer = layers_dict[right_name]
control = ipyleaflet.SplitMapControl(left_layer=left_layer, right_layer=right_layer)
m.add_control(control)
left_dropdown = widgets.Dropdown(
options=keys, value=left_name, layout=widgets.Layout(width=width)
)
left_control = ipyleaflet.WidgetControl(widget=left_dropdown, position="topleft")
m.add_control(left_control)
right_dropdown = widgets.Dropdown(
options=keys, value=right_name, layout=widgets.Layout(width=width)
)
right_control = ipyleaflet.WidgetControl(widget=right_dropdown, position="topright")
m.add_control(right_control)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
# button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
def close_btn_click(change):
if change["new"]:
m.controls = controls
m.clear_layers()
m.layers = layers
close_button.observe(close_btn_click, "value")
close_control = ipyleaflet.WidgetControl(
widget=close_button, position="bottomright"
)
m.add_control(close_control)
if add_zoom:
m.add_control(ipyleaflet.ZoomControl())
if add_fullscreen:
m.add_control(ipyleaflet.FullScreenControl())
m.add_control(ipyleaflet.ScaleControl(position="bottomleft"))
split_control = None
for ctrl in m.controls:
if isinstance(ctrl, ipyleaflet.SplitMapControl):
split_control = ctrl
break
def left_change(change):
split_control.left_layer.url = layers_dict[left_dropdown.value].url
left_dropdown.observe(left_change, "value")
def right_change(change):
split_control.right_layer.url = layers_dict[right_dropdown.value].url
right_dropdown.observe(right_change, "value")
def time_slider(
m,
layers_dict={},
labels=None,
time_interval=1,
position="bottomright",
slider_length="150px",
):
"""Adds a time slider to the map.
Args:
layers_dict (dict, optional): The dictionary containing a set of XYZ tile layers.
labels (list, optional): The list of labels to be used for the time series. Defaults to None.
time_interval (int, optional): Time interval in seconds. Defaults to 1.
position (str, optional): Position to place the time slider, can be any of ['topleft', 'topright', 'bottomleft', 'bottomright']. Defaults to "bottomright".
slider_length (str, optional): Length of the time slider. Defaults to "150px".
"""
import time
import threading
if not isinstance(layers_dict, dict):
raise TypeError("The layers_dict must be a dictionary.")
if len(layers_dict) == 0:
layers_dict = planet_monthly_tiles()
if labels is None:
labels = list(layers_dict.keys())
if len(labels) != len(layers_dict):
raise ValueError("The length of labels is not equal to that of layers_dict.")
slider = widgets.IntSlider(
min=1,
max=len(labels),
readout=False,
continuous_update=False,
layout=widgets.Layout(width=slider_length),
)
label = widgets.Label(
value=labels[0], layout=widgets.Layout(padding="0px 5px 0px 5px")
)
play_btn = widgets.Button(
icon="play",
tooltip="Play the time slider",
button_style="primary",
layout=widgets.Layout(width="32px"),
)
pause_btn = widgets.Button(
icon="pause",
tooltip="Pause the time slider",
button_style="primary",
layout=widgets.Layout(width="32px"),
)
close_btn = widgets.Button(
icon="times",
tooltip="Close the time slider",
button_style="primary",
layout=widgets.Layout(width="32px"),
)
play_chk = widgets.Checkbox(value=False)
slider_widget = widgets.HBox([label, slider, play_btn, pause_btn, close_btn])
def play_click(b):
play_chk.value = True
def work(slider):
while play_chk.value:
if slider.value < len(labels):
slider.value += 1
else:
slider.value = 1
time.sleep(time_interval)
thread = threading.Thread(target=work, args=(slider,))
thread.start()
def pause_click(b):
play_chk.value = False
play_btn.on_click(play_click)
pause_btn.on_click(pause_click)
keys = list(layers_dict.keys())
layer = layers_dict[keys[0]]
m.add_layer(layer)
def slider_changed(change):
m.default_style = {"cursor": "wait"}
index = slider.value - 1
label.value = labels[index]
layer.url = layers_dict[label.value].url
layer.name = layers_dict[label.value].name
m.default_style = {"cursor": "default"}
slider.observe(slider_changed, "value")
def close_click(b):
play_chk.value = False
m.toolbar_reset()
if m.slider_ctrl is not None and m.slider_ctrl in m.controls:
m.remove_control(m.slider_ctrl)
slider_widget.close()
close_btn.on_click(close_click)
slider_ctrl = ipyleaflet.WidgetControl(widget=slider_widget, position=position)
m.add_control(slider_ctrl)
m.slider_ctrl = slider_ctrl
def census_widget(m=None):
"""Widget for adding US Census data.
Args:
m (leafmap.Map, optional): The leaflet Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
from owslib.wms import WebMapService
census_dict = get_census_dict()
m.add_census_data("Census 2020", "States")
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="address-book",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
wms = widgets.Dropdown(
options=census_dict.keys(),
value="Census 2020",
description="WMS:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
layer = widgets.Dropdown(
options=census_dict["Census 2020"]["layers"],
value="States",
description="Layer:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
checkbox = widgets.Checkbox(
description="Replace existing census data layer",
value=True,
indent=False,
layout=widgets.Layout(padding=padding, width=widget_width),
)
# output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
wms,
layer,
checkbox,
# output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def wms_change(change):
layer.options = census_dict[change["new"]]["layers"]
layer.value = layer.options[0]
wms.observe(wms_change, "value")
def layer_change(change):
if change["new"] != "":
if checkbox.value:
m.layers = m.layers[:-1]
m.add_census_data(wms.value, layer.value)
# with output:
# w = WebMapService(census_dict[wms.value]["url"])
# output.clear_output()
# print(w[layer.value].abstract)
layer.observe(layer_change, "value")
toolbar_button.value = True
if m is not None:
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
if toolbar_control not in m.controls:
m.add_control(toolbar_control)
m.tool_control = toolbar_control
else:
return toolbar_widget
def search_basemaps(m=None):
"""The widget for search XYZ tile services.
Args:
m (leafmap.Map, optional): The leaflet Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
import xyzservices.providers as xyz
from xyzservices import TileProvider
layers = m.layers
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="search",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
checkbox = widgets.Checkbox(
description="Search Quick Map Services (QMS)",
indent=False,
layout=widgets.Layout(padding=padding, width=widget_width),
)
providers = widgets.Dropdown(
options=[],
value=None,
description="XYZ Tile:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
keyword = widgets.Text(
value="",
description="Search keyword:",
placeholder="OpenStreetMap",
style=style,
layout=widgets.Layout(width=widget_width, padding=padding),
)
def search_callback(change):
providers.options = []
if keyword.value != "":
tiles = search_xyz_services(keyword=keyword.value)
if checkbox.value:
tiles = tiles + search_qms(keyword=keyword.value)
providers.options = tiles
keyword.on_submit(search_callback)
buttons = widgets.ToggleButtons(
value=None,
options=["Search", "Reset", "Close"],
tooltips=["Search", "Reset", "Close"],
button_style="primary",
)
buttons.style.button_width = "80px"
output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
def providers_change(change):
if change["new"] != "":
provider = change["new"]
if provider is not None:
if provider.startswith("qms"):
with output:
output.clear_output()
print("Adding data. Please wait...")
name = provider[4:]
qms_provider = TileProvider.from_qms(name)
url = qms_provider.build_url()
attribution = qms_provider.attribution
m.layers = layers
m.add_tile_layer(url, name, attribution)
output.clear_output()
elif provider.startswith("xyz"):
name = provider[4:]
xyz_provider = xyz.flatten()[name]
url = xyz_provider.build_url()
attribution = xyz_provider.attribution
m.layers = layers
if xyz_provider.requires_token():
with output:
output.clear_output()
print(f"{provider} requires an API Key.")
m.add_tile_layer(url, name, attribution)
providers.observe(providers_change, "value")
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
checkbox,
keyword,
providers,
buttons,
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def button_clicked(change):
if change["new"] == "Search":
providers.options = []
if keyword.value != "":
tiles = search_xyz_services(keyword=keyword.value)
if checkbox.value:
tiles = tiles + search_qms(keyword=keyword.value)
providers.options = tiles
with output:
output.clear_output()
# print("Running ...")
elif change["new"] == "Reset":
keyword.value = ""
providers.options = []
output.clear_output()
elif change["new"] == "Close":
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
buttons.value = None
buttons.observe(button_clicked, "value")
toolbar_button.value = True
if m is not None:
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
if toolbar_control not in m.controls:
m.add_control(toolbar_control)
m.tool_control = toolbar_control
else:
return toolbar_widget
def download_osm(m=None):
"""Widget for downloading OSM data.
Args:
m (leafmap.Map, optional): The leaflet Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="gear",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
checkbox = widgets.Checkbox(
description="Checkbox",
indent=False,
layout=widgets.Layout(padding=padding, width=widget_width),
)
dropdown = widgets.Dropdown(
options=["Option 1", "Option 2", "Option 3"],
value=None,
description="Dropdown:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
int_slider = widgets.IntSlider(
min=1,
max=100,
description="Int Slider: ",
readout=False,
continuous_update=True,
layout=widgets.Layout(width="220px", padding=padding),
style=style,
)
int_slider_label = widgets.Label()
# widgets.jslink((int_slider, "value"), (int_slider_label, "value"))
def int_slider_changed(change):
if change["new"]:
int_slider_label.value = str(int_slider.value)
int_slider.observe(int_slider_changed, "value")
float_slider = widgets.FloatSlider(
min=1,
max=100,
description="Float Slider: ",
readout=False,
continuous_update=True,
layout=widgets.Layout(width="220px", padding=padding),
style=style,
)
float_slider_label = widgets.Label()
# widgets.jslink((float_slider, "value"), (float_slider_label, "value"))
def float_slider_changed(change):
if change["new"]:
float_slider_label.value = str(float_slider.value)
float_slider.observe(float_slider_changed, "value")
color = widgets.ColorPicker(
concise=False,
description="Color:",
value="white",
style=style,
layout=widgets.Layout(width=widget_width, padding=padding),
)
text = widgets.Text(
value="",
description="Textbox:",
placeholder="Placeholder",
style=style,
layout=widgets.Layout(width=widget_width, padding=padding),
)
textarea = widgets.Textarea(
placeholder="Placeholder",
layout=widgets.Layout(width=widget_width),
)
buttons = widgets.ToggleButtons(
value=None,
options=["Apply", "Reset", "Close"],
tooltips=["Apply", "Reset", "Close"],
button_style="primary",
)
buttons.style.button_width = "80px"
buttons.style.button_padding = "0px"
output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
checkbox,
widgets.HBox([int_slider, int_slider_label]),
widgets.HBox([float_slider, float_slider_label]),
dropdown,
text,
color,
textarea,
buttons,
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def button_clicked(change):
if change["new"] == "Apply":
with output:
output.clear_output()
print("Running ...")
elif change["new"] == "Reset":
textarea.value = ""
output.clear_output()
elif change["new"] == "Close":
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
buttons.value = None
buttons.observe(button_clicked, "value")
toolbar_button.value = True
if m is not None:
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
if toolbar_control not in m.controls:
m.add_control(toolbar_control)
m.tool_control = toolbar_control
else:
return toolbar_widget
def inspector_gui(m=None):
"""Generates a tool GUI template using ipywidgets.
Args:
m (leafmap.Map, optional): The leaflet Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
import pandas as pd
widget_width = "250px"
padding = "0px 5px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
if m is not None:
marker_cluster = ipyleaflet.MarkerCluster(name="Inspector Markers")
setattr(m, "pixel_values", [])
setattr(m, "marker_cluster", marker_cluster)
if not hasattr(m, "interact_mode"):
setattr(m, "interact_mode", False)
if not hasattr(m, "inspector_output"):
inspector_output = widgets.Output(
layout=widgets.Layout(width=widget_width, padding="0px 5px 5px 5px")
)
setattr(m, "inspector_output", inspector_output)
output = m.inspector_output
output.clear_output()
if not hasattr(m, "inspector_add_marker"):
inspector_add_marker = widgets.Checkbox(
description="Add Marker at clicked location",
value=True,
indent=False,
layout=widgets.Layout(padding=padding, width=widget_width),
)
setattr(m, "inspector_add_marker", inspector_add_marker)
add_marker = m.inspector_add_marker
if not hasattr(m, "inspector_bands_chk"):
inspector_bands_chk = widgets.Checkbox(
description="Get pixel value for visible bands only",
indent=False,
layout=widgets.Layout(padding=padding, width=widget_width),
)
setattr(m, "inspector_bands_chk", inspector_bands_chk)
bands_chk = m.inspector_bands_chk
if not hasattr(m, "inspector_class_label"):
inspector_label = widgets.Text(
value="",
description="Class label:",
placeholder="Add a label to the marker",
style=style,
layout=widgets.Layout(width=widget_width, padding=padding),
)
setattr(m, "inspector_class_label", inspector_label)
label = m.inspector_class_label
options = []
if hasattr(m, "cog_layer_dict"):
options = list(m.cog_layer_dict.keys())
options.sort()
if len(options) == 0:
default_option = None
else:
default_option = options[0]
if not hasattr(m, "inspector_dropdown"):
inspector_dropdown = widgets.Dropdown(
options=options,
value=default_option,
description="Select a layer:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
setattr(m, "inspector_dropdown", inspector_dropdown)
dropdown = m.inspector_dropdown
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="info",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
buttons = widgets.ToggleButtons(
value=None,
options=["Download", "Reset", "Close"],
tooltips=["Download", "Reset", "Close"],
button_style="primary",
)
buttons.style.button_width = "80px"
if len(options) == 0:
with output:
print("No COG/STAC layers available")
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
add_marker,
label,
dropdown,
bands_chk,
buttons,
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def chk_change(change):
if hasattr(m, "pixel_values"):
m.pixel_values = []
if hasattr(m, "marker_cluster"):
m.marker_cluster.markers = []
output.clear_output()
bands_chk.observe(chk_change, "value")
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if m is not None:
if hasattr(m, "inspector_mode"):
delattr(m, "inspector_mode")
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
m.default_style = {"cursor": "default"}
m.marker_cluster.markers = []
m.pixel_values = []
marker_cluster_layer = m.find_layer("Inspector Markers")
if marker_cluster_layer is not None:
m.remove_layer(marker_cluster_layer)
if hasattr(m, "pixel_values"):
delattr(m, "pixel_values")
if hasattr(m, "marker_cluster"):
delattr(m, "marker_cluster")
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def button_clicked(change):
if change["new"] == "Download":
with output:
output.clear_output()
if len(m.pixel_values) == 0:
print(
"No pixel values available. Click on the map to start collection data."
)
else:
print("Downloading pixel values...")
df = pd.DataFrame(m.pixel_values)
temp_csv = temp_file_path("csv")
df.to_csv(temp_csv, index=False)
link = create_download_link(temp_csv)
with output:
output.clear_output()
display(link)
elif change["new"] == "Reset":
label.value = ""
output.clear_output()
if hasattr(m, "pixel_values"):
m.pixel_values = []
if hasattr(m, "marker_cluster"):
m.marker_cluster.markers = []
elif change["new"] == "Close":
if m is not None:
if hasattr(m, "inspector_mode"):
delattr(m, "inspector_mode")
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
m.default_style = {"cursor": "default"}
m.marker_cluster.markers = []
marker_cluster_layer = m.find_layer("Inspector Markers")
if marker_cluster_layer is not None:
m.remove_layer(marker_cluster_layer)
m.pixel_values = []
if hasattr(m, "pixel_values"):
delattr(m, "pixel_values")
if hasattr(m, "marker_cluster"):
delattr(m, "marker_cluster")
toolbar_widget.close()
buttons.value = None
buttons.observe(button_clicked, "value")
toolbar_button.value = True
def handle_interaction(**kwargs):
latlon = kwargs.get("coordinates")
lat = round(latlon[0], 4)
lon = round(latlon[1], 4)
if (
kwargs.get("type") == "click"
and hasattr(m, "inspector_mode")
and m.inspector_mode
):
m.default_style = {"cursor": "wait"}
with output:
output.clear_output()
print("Getting pixel value ...")
layer_dict = m.cog_layer_dict[dropdown.value]
if layer_dict["type"] == "STAC":
if bands_chk.value:
assets = layer_dict["assets"]
else:
assets = None
result = stac_pixel_value(
lon,
lat,
layer_dict["url"],
layer_dict["collection"],
layer_dict["items"],
assets,
layer_dict["titiler_endpoint"],
verbose=False,
)
if result is not None:
with output:
output.clear_output()
print(f"lat/lon: {lat:.4f}, {lon:.4f}\n")
for key in result:
print(f"{key}: {result[key]}")
result["latitude"] = lat
result["longitude"] = lon
result["label"] = label.value
m.pixel_values.append(result)
if add_marker.value:
markers = list(m.marker_cluster.markers)
markers.append(ipyleaflet.Marker(location=latlon))
m.marker_cluster.markers = markers
else:
with output:
output.clear_output()
print("No pixel value available")
bounds = m.cog_layer_dict[m.inspector_dropdown.value]["bounds"]
m.zoom_to_bounds(bounds)
elif layer_dict["type"] == "COG":
result = cog_pixel_value(lon, lat, layer_dict["url"], verbose=False)
if result is not None:
with output:
output.clear_output()
print(f"lat/lon: {lat:.4f}, {lon:.4f}\n")
for key in result:
print(f"{key}: {result[key]}")
result["latitude"] = lat
result["longitude"] = lon
result["label"] = label.value
m.pixel_values.append(result)
if add_marker.value:
markers = list(m.marker_cluster.markers)
markers.append(ipyleaflet.Marker(location=latlon))
m.marker_cluster.markers = markers
else:
with output:
output.clear_output()
print("No pixel value available")
bounds = m.cog_layer_dict[m.inspector_dropdown.value]["bounds"]
m.zoom_to_bounds(bounds)
elif layer_dict["type"] == "LOCAL":
result = local_tile_pixel_value(
lon, lat, layer_dict["tile_client"], verbose=False
)
if result is not None:
if m.inspector_bands_chk.value:
band = m.cog_layer_dict[m.inspector_dropdown.value]["band"]
band_names = m.cog_layer_dict[m.inspector_dropdown.value][
"band_names"
]
if band is not None:
sel_bands = [band_names[b - 1] for b in band]
result = {k: v for k, v in result.items() if k in sel_bands}
with output:
output.clear_output()
print(f"lat/lon: {lat:.4f}, {lon:.4f}\n")
for key in result:
print(f"{key}: {result[key]}")
result["latitude"] = lat
result["longitude"] = lon
result["label"] = label.value
m.pixel_values.append(result)
if add_marker.value:
markers = list(m.marker_cluster.markers)
markers.append(ipyleaflet.Marker(location=latlon))
m.marker_cluster.markers = markers
else:
with output:
output.clear_output()
print("No pixel value available")
bounds = m.cog_layer_dict[m.inspector_dropdown.value]["bounds"]
m.zoom_to_bounds(bounds)
m.default_style = {"cursor": "crosshair"}
if m is not None:
if not hasattr(m, "marker_cluster"):
setattr(m, "marker_cluster", marker_cluster)
m.add_layer(marker_cluster)
if not m.interact_mode:
m.on_interaction(handle_interaction)
m.interact_mode = True
if m is not None:
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
if toolbar_control not in m.controls:
m.add_control(toolbar_control)
m.tool_control = toolbar_control
if not hasattr(m, "inspector_mode"):
if hasattr(m, "cog_layer_dict"):
setattr(m, "inspector_mode", True)
else:
setattr(m, "inspector_mode", False)
else:
return toolbar_widget
def plotly_toolbar(
canvas,
):
"""Creates the main toolbar and adds it to the map.
Args:
m (plotlymap.Map): The plotly Map object.
"""
m = canvas.map
map_min_width = canvas.map_min_width
map_max_width = canvas.map_max_width
map_refresh = canvas.map_refresh
map_widget = canvas.map_widget
if not map_refresh:
width = int(map_min_width.replace("%", ""))
if width > 90:
map_min_width = "90%"
tools = {
"map": {
"name": "basemap",
"tooltip": "Change basemap",
},
"search": {
"name": "search_xyz",
"tooltip": "Search XYZ tile services",
},
"gears": {
"name": "whitebox",
"tooltip": "WhiteboxTools for local geoprocessing",
},
"folder-open": {
"name": "vector",
"tooltip": "Open local vector/raster data",
},
"picture-o": {
"name": "raster",
"tooltip": "Open COG/STAC dataset",
},
"question": {
"name": "help",
"tooltip": "Get help",
},
}
icons = list(tools.keys())
tooltips = [item["tooltip"] for item in list(tools.values())]
icon_width = "32px"
icon_height = "32px"
n_cols = 3
n_rows = math.ceil(len(icons) / n_cols)
toolbar_grid = widgets.GridBox(
children=[
widgets.ToggleButton(
layout=widgets.Layout(
width="auto", height="auto", padding="0px 0px 0px 4px"
),
button_style="primary",
icon=icons[i],
tooltip=tooltips[i],
)
for i in range(len(icons))
],
layout=widgets.Layout(
width="115px",
grid_template_columns=(icon_width + " ") * n_cols,
grid_template_rows=(icon_height + " ") * n_rows,
grid_gap="1px 1px",
padding="5px",
),
)
canvas.toolbar = toolbar_grid
def tool_callback(change):
if change["new"]:
current_tool = change["owner"]
for tool in toolbar_grid.children:
if tool is not current_tool:
tool.value = False
tool = change["owner"]
tool_name = tools[tool.icon]["name"]
canvas.container_widget.children = []
if tool_name == "basemap":
plotly_basemap_gui(canvas)
elif tool_name == "search_xyz":
plotly_search_basemaps(canvas)
elif tool_name == "whitebox":
plotly_whitebox_gui(canvas)
elif tool_name == "vector":
plotly_tool_template(canvas)
elif tool_name == "raster":
plotly_tool_template(canvas)
elif tool_name == "help":
import webbrowser
webbrowser.open_new_tab("https://leafmap.org")
tool.value = False
else:
canvas.container_widget.children = []
map_widget.layout.width = map_max_width
for tool in toolbar_grid.children:
tool.observe(tool_callback, "value")
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="wrench",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
canvas.toolbar_button = toolbar_button
layers_button = widgets.ToggleButton(
value=False,
tooltip="Layers",
icon="server",
layout=widgets.Layout(height="28px", width="72px"),
)
canvas.layers_button = layers_button
toolbar_widget = widgets.VBox(layout=widgets.Layout(overflow="hidden"))
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox(layout=widgets.Layout(overflow="hidden"))
toolbar_header.children = [layers_button, toolbar_button]
toolbar_footer = widgets.VBox(layout=widgets.Layout(overflow="hidden"))
toolbar_footer.children = [toolbar_grid]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
# map_widget.layout.width = "85%"
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
layers_button.value = False
# map_widget.layout.width = map_max_width
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
map_widget.layout.width = map_min_width
if map_refresh:
with map_widget:
map_widget.clear_output()
display(m)
layers_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
canvas.toolbar_reset()
map_widget.layout.width = map_max_width
if not layers_button.value:
toolbar_widget.children = [toolbar_button]
if map_refresh:
with map_widget:
map_widget.clear_output()
display(m)
toolbar_button.observe(toolbar_btn_click, "value")
def layers_btn_click(change):
if change["new"]:
layer_names = list(m.get_layers().keys())
layers_hbox = []
all_layers_chk = widgets.Checkbox(
value=True,
description="All layers on/off",
indent=False,
layout=widgets.Layout(height="18px", padding="0px 8px 25px 8px"),
)
all_layers_chk.layout.width = "30ex"
layers_hbox.append(all_layers_chk)
layer_chk_dict = {}
for name in layer_names:
if name in m.get_tile_layers():
index = m.find_layer_index(name)
layer = m.layout.mapbox.layers[index]
elif name in m.get_data_layers():
index = m.find_layer_index(name)
layer = m.data[index]
layer_chk = widgets.Checkbox(
value=layer.visible,
description=name,
indent=False,
layout=widgets.Layout(height="18px"),
)
layer_chk.layout.width = "25ex"
layer_chk_dict[name] = layer_chk
if hasattr(layer, "opacity"):
opacity = layer.opacity
elif hasattr(layer, "marker"):
opacity = layer.marker.opacity
else:
opacity = 1.0
layer_opacity = widgets.FloatSlider(
value=opacity,
description_tooltip=name,
min=0,
max=1,
step=0.01,
readout=False,
layout=widgets.Layout(width="80px"),
)
layer_settings = widgets.ToggleButton(
icon="gear",
tooltip=name,
layout=widgets.Layout(
width="25px", height="25px", padding="0px 0px 0px 5px"
),
)
def layer_chk_change(change):
if change["new"]:
m.set_layer_visibility(change["owner"].description, True)
else:
m.set_layer_visibility(change["owner"].description, False)
layer_chk.observe(layer_chk_change, "value")
def layer_opacity_change(change):
if change["new"]:
m.set_layer_opacity(
change["owner"].description_tooltip, change["new"]
)
layer_opacity.observe(layer_opacity_change, "value")
hbox = widgets.HBox(
[layer_chk, layer_settings, layer_opacity],
layout=widgets.Layout(padding="0px 8px 0px 8px"),
)
layers_hbox.append(hbox)
def all_layers_chk_changed(change):
if change["new"]:
for name in layer_names:
m.set_layer_visibility(name, True)
layer_chk_dict[name].value = True
else:
for name in layer_names:
m.set_layer_visibility(name, False)
layer_chk_dict[name].value = False
all_layers_chk.observe(all_layers_chk_changed, "value")
toolbar_footer.children = layers_hbox
toolbar_button.value = False
else:
toolbar_footer.children = [toolbar_grid]
layers_button.observe(layers_btn_click, "value")
return toolbar_widget
def plotly_tool_template(canvas):
container_widget = canvas.container_widget
map_widget = canvas.map_widget
map_width = "70%"
map_widget.layout.width = map_width
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
# style = {"description_width": "initial"}
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="gears",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
with output:
print("To be implemented")
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
map_widget.layout.width = map_width
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
map_widget.layout.width = canvas.map_max_width
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
map_widget.layout.width = map_width
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
map_widget.layout.width = canvas.map_max_width
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
canvas.toolbar_reset()
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
toolbar_button.value = True
container_widget.children = [toolbar_widget]
def plotly_basemap_gui(canvas, map_min_width="78%", map_max_width="98%"):
"""Widget for changing basemaps.
Args:
m (object): leafmap.Map.
"""
from .plotlymap import basemaps
m = canvas.map
layer_count = len(m.layout.mapbox.layers)
container_widget = canvas.container_widget
map_widget = canvas.map_widget
map_widget.layout.width = map_min_width
value = "Stamen.Terrain"
m.add_basemap(value)
dropdown = widgets.Dropdown(
options=list(basemaps.keys()),
value=value,
layout=widgets.Layout(width="200px"),
)
close_btn = widgets.Button(
icon="times",
tooltip="Close the basemap widget",
button_style="primary",
layout=widgets.Layout(width="32px"),
)
basemap_widget = widgets.HBox([dropdown, close_btn])
container_widget.children = [basemap_widget]
def on_click(change):
basemap_name = change["new"]
m.layout.mapbox.layers = m.layout.mapbox.layers[:layer_count]
m.add_basemap(basemap_name)
dropdown.observe(on_click, "value")
def close_click(change):
container_widget.children = []
basemap_widget.close()
map_widget.layout.width = map_max_width
canvas.toolbar_reset()
canvas.toolbar_button.value = False
close_btn.on_click(close_click)
def plotly_search_basemaps(canvas):
"""The widget for search XYZ tile services.
Args:
m (plotlymap.Map, optional): The Plotly Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
import xyzservices.providers as xyz
from xyzservices import TileProvider
m = canvas.map
container_widget = canvas.container_widget
map_widget = canvas.map_widget
map_widget.layout.width = "75%"
# map_widget.layout.width = map_min_width
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="search",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
checkbox = widgets.Checkbox(
description="Search Quick Map Services (QMS)",
indent=False,
layout=widgets.Layout(padding=padding, width=widget_width),
)
providers = widgets.Dropdown(
options=[],
value=None,
description="XYZ Tile:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
keyword = widgets.Text(
value="",
description="Search keyword:",
placeholder="OpenStreetMap",
style=style,
layout=widgets.Layout(width=widget_width, padding=padding),
)
def search_callback(change):
providers.options = []
if keyword.value != "":
tiles = search_xyz_services(keyword=keyword.value)
if checkbox.value:
tiles = tiles + search_qms(keyword=keyword.value)
providers.options = tiles
keyword.on_submit(search_callback)
buttons = widgets.ToggleButtons(
value=None,
options=["Search", "Reset", "Close"],
tooltips=["Search", "Reset", "Close"],
button_style="primary",
)
buttons.style.button_width = "80px"
output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
def providers_change(change):
if change["new"] != "":
provider = change["new"]
if provider is not None:
if provider.startswith("qms"):
with output:
output.clear_output()
print("Adding data. Please wait...")
name = provider[4:]
qms_provider = TileProvider.from_qms(name)
url = qms_provider.build_url()
attribution = qms_provider.attribution
m.add_tile_layer(url, name, attribution)
output.clear_output()
elif provider.startswith("xyz"):
name = provider[4:]
xyz_provider = xyz.flatten()[name]
url = xyz_provider.build_url()
attribution = xyz_provider.attribution
if xyz_provider.requires_token():
with output:
output.clear_output()
print(f"{provider} requires an API Key.")
m.add_tile_layer(url, name, attribution)
providers.observe(providers_change, "value")
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
checkbox,
keyword,
providers,
buttons,
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
canvas.toolbar_reset()
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def button_clicked(change):
if change["new"] == "Search":
providers.options = []
output.clear_output()
if keyword.value != "":
tiles = search_xyz_services(keyword=keyword.value)
if checkbox.value:
tiles = tiles + search_qms(keyword=keyword.value)
providers.options = tiles
else:
with output:
print("Please enter a search keyword.")
elif change["new"] == "Reset":
keyword.value = ""
providers.options = []
output.clear_output()
elif change["new"] == "Close":
canvas.toolbar_reset()
toolbar_widget.close()
buttons.value = None
buttons.observe(button_clicked, "value")
toolbar_button.value = True
container_widget.children = [toolbar_widget]
def plotly_whitebox_gui(canvas):
"""Display a GUI for the whitebox tool.
Args:
canvas (plotlymap.Canvas): Map canvas.
"""
import whiteboxgui.whiteboxgui as wbt
container_widget = canvas.container_widget
map_widget = canvas.map_widget
map_width = "25%"
map_widget.layout.width = map_width
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
# style = {"description_width": "initial"}
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="gears",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
tools_dict = wbt.get_wbt_dict()
wbt_toolbox = wbt.build_toolbox(
tools_dict,
max_width="800px",
max_height="500px",
sandbox_path=os.getcwd(),
)
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
wbt_toolbox,
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
map_widget.layout.width = map_width
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
map_widget.layout.width = canvas.map_max_width
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
map_widget.layout.width = map_width
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
map_widget.layout.width = canvas.map_max_width
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
canvas.toolbar_reset()
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
toolbar_button.value = True
container_widget.children = [toolbar_widget]
def search_geojson_gui(m=None):
"""Generates a tool GUI template using ipywidgets.
Args:
m (leafmap.Map, optional): The leaflet Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
if len(m.geojson_layers) > 0:
geojson_layer_group = ipyleaflet.LayerGroup()
for geojson_layer in m.geojson_layers:
geojson_layer_group.add_layer(geojson_layer)
if not hasattr(m, "geojson_layer_group"):
setattr(m, "geojson_layer_group", geojson_layer_group)
else:
m.geojson_layer_group = geojson_layer_group
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="search-plus",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
layer_options = []
if len(m.geojson_layers) > 0:
layer_options = [layer.name for layer in m.geojson_layers]
layers = widgets.Dropdown(
options=layer_options,
value=None,
description="Layer:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
attributes = widgets.Dropdown(
options=[],
value=None,
description="Attribute:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
buttons = widgets.ToggleButtons(
value=None,
options=["Apply", "Reset", "Close"],
tooltips=["Apply", "Reset", "Close"],
button_style="primary",
)
buttons.style.button_width = "80px"
output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
if len(m.geojson_layers) == 0:
with output:
print("Please add vector data layers to the map before using this tool.")
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
layers,
attributes,
buttons,
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def layer_change(change):
if change["new"]:
for layer in m.geojson_layers:
if layer.name == change["new"]:
df = geojson_to_df(layer.data)
attributes.options = list(df.columns)
layers.observe(layer_change, names="value")
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
if len(m.geojson_layers) > 0 and m.search_control is not None:
m.search_control.marker.visible = False
m.remove_control(m.search_control)
m.search_control = None
m.geojson_layer_group.clear_layers()
delattr(m, "geojson_layer_group")
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def button_clicked(change):
if change["new"] == "Apply":
if len(m.geojson_layers) > 0 and attributes.value is not None:
if m.search_control is None:
geojson_control = ipyleaflet.SearchControl(
position="topleft",
layer=m.geojson_layer_group,
property_name=attributes.value,
marker=ipyleaflet.Marker(
icon=ipyleaflet.AwesomeIcon(
name="check", marker_color="green", icon_color="darkred"
)
),
)
m.add_control(geojson_control)
m.search_control = geojson_control
else:
m.search_control.property_name = attributes.value
with output:
output.clear_output()
elif change["new"] == "Reset":
output.clear_output()
layers.value = None
attributes.value = None
elif change["new"] == "Close":
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
if len(m.geojson_layers) > 0 and m.search_control is not None:
m.search_control.marker.visible = False
m.remove_control(m.search_control)
m.search_control = None
if hasattr(m, "geojson_layer_group"):
m.geojson_layer_group.clear_layers()
delattr(m, "geojson_layer_group")
toolbar_widget.close()
buttons.value = None
buttons.observe(button_clicked, "value")
toolbar_button.value = True
if m is not None:
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
if toolbar_control not in m.controls:
m.add_control(toolbar_control)
m.tool_control = toolbar_control
else:
return toolbar_widget
def select_table_gui(m=None):
"""GUI for selecting layers to display attribute table.
Args:
m (leafmap.Map, optional): The leaflet Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
import ipysheet
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="table",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
layer_options = []
if len(m.geojson_layers) > 0:
layer_options = [layer.name for layer in m.geojson_layers]
layers = widgets.Dropdown(
options=layer_options,
value=None,
description="Layer:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
buttons = widgets.ToggleButtons(
value=None,
options=["Apply", "Reset", "Close"],
tooltips=["Apply", "Reset", "Close"],
button_style="primary",
)
buttons.style.button_width = "80px"
output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
if len(m.geojson_layers) == 0:
with output:
print("Please add vector data layers to the map before using this tool.")
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
layers,
buttons,
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def button_clicked(change):
if change["new"] == "Apply":
if len(m.geojson_layers) > 0 and layers.value is not None:
if hasattr(m, "table_control"):
m.remove_control(m.table_control)
lyr_index = layers.options.index(layers.value)
data = m.geojson_layers[lyr_index].data
df = geojson_to_df(data)
show_table_gui(m, df)
elif change["new"] == "Reset":
output.clear_output()
layers.value = None
elif change["new"] == "Close":
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
buttons.value = None
buttons.observe(button_clicked, "value")
toolbar_button.value = True
if m is not None:
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
if toolbar_control not in m.controls:
m.add_control(toolbar_control)
m.tool_control = toolbar_control
else:
return toolbar_widget
def show_table_gui(m, df):
"""Open the attribute table GUI.
Args:
m (leafmap.Map, optional): The leaflet Map object
Returns:
ipywidgets: The tool GUI widget.
"""
import ipysheet
widget_width = "560px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
# style = {"description_width": "initial"}
sheet = ipysheet.from_dataframe(df.head(10))
output = widgets.Output(
layout=widgets.Layout(
width=widget_width,
padding=padding,
)
)
checkbox = widgets.Checkbox(
description="Show all rows",
indent=False,
layout=widgets.Layout(padding=padding, width="115px"),
)
sheet.layout.width = output.layout.width
def checkbox_clicked(change):
output.clear_output()
if change["new"]:
sheet = ipysheet.from_dataframe(df)
else:
sheet = ipysheet.from_dataframe(df.head(10))
sheet.layout.max_width = output.layout.width
output.layout.max_height = str(int(m.layout.height[:-2]) - 220) + "px"
sheet.layout.max_height = output.layout.height
if sheet.layout.height > output.layout.max_height:
sheet.layout.height = output.layout.max_height
with output:
display(sheet)
checkbox.observe(checkbox_clicked, "value")
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Minimize window",
icon="window-minimize",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
toolbar_widget = widgets.VBox()
m.table_widget = toolbar_widget
m.table_output = output
reset_btn = widgets.Button(
tooltip="Reset the plot",
icon="home",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 0px"),
)
def reset_btn_clicked(b):
output.layout.width = widget_width
output.layout.max_height = str(int(m.layout.height[:-2]) - 220) + "px"
reset_btn.on_click(reset_btn_clicked)
fullscreen_btn = widgets.Button(
tooltip="Fullscreen the attribute table",
icon="arrows-alt",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 0px"),
)
def fullscreen_btn_clicked(b):
output.layout.width = "1000px"
output.layout.max_height = str(int(m.layout.height[:-2]) - 220) + "px"
sheet.layout.width = output.layout.width
with output:
output.clear_output()
display(sheet)
fullscreen_btn.on_click(fullscreen_btn_clicked)
width_btn = widgets.Button(
tooltip="Change table width",
icon="arrows-h",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 0px"),
)
height_btn = widgets.Button(
tooltip="Change table height",
icon="arrows-v",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 0px"),
)
width_slider = widgets.IntSlider(
value=560,
min=550,
max=1500,
step=10,
description="",
readout=False,
continuous_update=False,
layout=widgets.Layout(width="100px", padding=padding),
style={"description_width": "initial"},
)
width_slider_label = widgets.Label(
value="560", layout=widgets.Layout(padding="0px 10px 0px 0px")
)
# widgets.jslink((width_slider, "value"), (width_slider_label, "value"))
def width_changed(change):
if change["new"]:
width_slider_label.value = str(width_slider.value)
output.layout.width = str(width_slider.value) + "px"
if checkbox.value:
sheet = ipysheet.from_dataframe(df)
else:
sheet = ipysheet.from_dataframe(df.head(10))
sheet.layout.width = output.layout.width
with output:
output.clear_output()
display(sheet)
width_slider.observe(width_changed, "value")
height_slider = widgets.IntSlider(
value=250,
min=200,
max=1000,
step=10,
description="",
readout=False,
continuous_update=False,
layout=widgets.Layout(width="100px", padding=padding),
style={"description_width": "initial"},
)
height_slider_label = widgets.Label(value="250")
# widgets.jslink((height_slider, "value"), (height_slider_label, "value"))
def height_changed(change):
if change["new"]:
height_slider_label.value = str(height_slider.value)
output.layout.max_height = str(height_slider.value) + "px"
if checkbox.value:
sheet = ipysheet.from_dataframe(df)
else:
sheet = ipysheet.from_dataframe(df.head(10))
sheet.layout.height = output.layout.max_height
with output:
output.clear_output()
display(sheet)
height_slider.observe(height_changed, "value")
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [
close_button,
toolbar_button,
reset_btn,
fullscreen_btn,
width_btn,
width_slider,
width_slider_label,
height_btn,
height_slider,
height_slider_label,
checkbox,
]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
toolbar_button.icon = "window-minimize"
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_button.icon = "window-maximize"
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
toolbar_button.icon = "window-minimize"
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.icon = "window-maximize"
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if m is not None:
m.toolbar_reset()
if m.table_control is not None and m.table_control in m.controls:
m.remove_control(m.table_control)
m.table_control = None
delattr(m, "table_control")
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
with output:
display(sheet)
toolbar_button.value = True
if m is not None:
table_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
if table_control not in m.controls:
m.add_control(table_control)
m.table_control = table_control
else:
return toolbar_widget
def edit_draw_gui(m):
"""Generates a tool GUI for editing vector data attribute table.
Args:
m (leafmap.Map, optional): The leaflet Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
import ipysheet
import pandas as pd
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
m.edit_mode = True
n_props = len(m.get_draw_props())
if n_props == 0:
n_props = 1
sheet = ipysheet.from_dataframe(m.get_draw_props(n_props, return_df=True))
m.edit_sheet = sheet
output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
m.edit_output = output
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Edit attribute table",
icon="pencil-square-o",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
open_button = widgets.ToggleButton(
value=False,
tooltip="Open vector data",
icon="folder-open",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
save_button = widgets.ToggleButton(
value=False,
tooltip="Save to file",
icon="floppy-o",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
refresh_button = widgets.ToggleButton(
value=False,
tooltip="Get attribute",
icon="refresh",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
m.edit_refresh = refresh_button
int_slider = widgets.IntSlider(
min=n_props,
max=n_props + 10,
description="Rows:",
readout=False,
continuous_update=True,
layout=widgets.Layout(width="85px", padding=padding),
style=style,
)
int_slider_label = widgets.Label()
def int_slider_changed(change):
if change["new"]:
int_slider_label.value = str(int_slider.value)
int_slider.observe(int_slider_changed, "value")
# widgets.jslink((int_slider, "value"), (int_slider_label, "value"))
buttons = widgets.ToggleButtons(
value=None,
options=["Apply", "Reset", "Close"],
tooltips=["Apply", "Reset", "Close"],
button_style="primary",
)
buttons.style.button_width = "64px"
with output:
output.clear_output()
display(m.edit_sheet)
def int_slider_changed(change):
if change["new"]:
m.edit_sheet.rows = int_slider.value
m.num_attributes = int_slider.value
with output:
output.clear_output()
m.edit_sheet = ipysheet.from_dataframe(
m.get_draw_props(n=int_slider.value, return_df=True)
)
display(m.edit_sheet)
int_slider.observe(int_slider_changed, "value")
m.num_attributes = int_slider.value
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [
close_button,
toolbar_button,
open_button,
save_button,
refresh_button,
int_slider,
int_slider_label,
]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
output,
buttons,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
m.edit_mode = False
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def open_chooser_callback(chooser):
with output:
import geopandas as gpd
gdf = gpd.read_file(chooser.selected)
geojson = gdf_to_geojson(gdf, epsg=4326, tuple_to_list=True)
m.draw_control.data = m.draw_control.data + (geojson["features"])
m.draw_features = m.draw_features + (geojson["features"])
open_button.value = False
if m.open_control in m.controls:
m.remove_control(m.open_control)
delattr(m, "open_control")
def open_btn_click(change):
if change["new"]:
save_button.value = False
open_chooser = FileChooser(
os.getcwd(),
sandbox_path=m.sandbox_path,
layout=widgets.Layout(width="454px"),
)
open_chooser.filter_pattern = ["*.shp", "*.geojson", "*.gpkg"]
open_chooser.use_dir_icons = True
open_chooser.register_callback(open_chooser_callback)
open_control = ipyleaflet.WidgetControl(
widget=open_chooser, position="topright"
)
m.add_control(open_control)
m.open_control = open_control
open_button.observe(open_btn_click, "value")
def chooser_callback(chooser):
m.save_draw_features(chooser.selected, indent=None)
if m.file_control in m.controls:
m.remove_control(m.file_control)
delattr(m, "file_control")
with output:
print(f"Saved to {chooser.selected}")
def save_btn_click(change):
if change["new"]:
save_button.value = False
file_chooser = FileChooser(
os.getcwd(),
sandbox_path=m.sandbox_path,
layout=widgets.Layout(width="454px"),
)
file_chooser.filter_pattern = ["*.shp", "*.geojson", "*.gpkg"]
file_chooser.default_filename = "data.geojson"
file_chooser.use_dir_icons = True
file_chooser.register_callback(chooser_callback)
file_control = ipyleaflet.WidgetControl(
widget=file_chooser, position="topright"
)
m.add_control(file_control)
m.file_control = file_control
save_button.observe(save_btn_click, "value")
def refresh_btn_click(change):
if change["new"]:
refresh_button.value = False
if m.draw_control.last_action == "edited":
with output:
geometries = [
feature["geometry"] for feature in m.draw_control.data
]
if len(m.draw_features) > 0:
if (
m.draw_features[-1]["geometry"]
== m.draw_control.last_draw["geometry"]
):
m.draw_features.pop()
for feature in m.draw_features:
if feature["geometry"] not in geometries:
feature["geometry"] = m.draw_control.last_draw["geometry"]
values = []
props = ipysheet.to_dataframe(m.edit_sheet)["Key"].tolist()
for prop in props:
if prop in feature["properties"]:
values.append(feature["properties"][prop])
else:
values.append("")
df = pd.DataFrame({"Key": props, "Value": values})
df.index += 1
m.edit_sheet = ipysheet.from_dataframe(df)
output.clear_output()
display(m.edit_sheet)
refresh_button.observe(refresh_btn_click, "value")
def button_clicked(change):
if change["new"] == "Apply":
with output:
output.clear_output()
display(m.edit_sheet)
if len(m.draw_control.data) == 0:
print("Please draw a feature first.")
else:
if m.draw_control.last_action == "edited":
m.update_draw_features()
m.update_draw_props(ipysheet.to_dataframe(m.edit_sheet))
elif change["new"] == "Reset":
m.edit_sheet = ipysheet.from_dataframe(
m.get_draw_props(int_slider.value, return_df=True)
)
with output:
output.clear_output()
display(m.edit_sheet)
elif change["new"] == "Close":
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
m.edit_mode = False
toolbar_widget.close()
buttons.value = None
buttons.observe(button_clicked, "value")
toolbar_button.value = True
if m is not None:
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
if toolbar_control not in m.controls:
m.add_control(toolbar_control)
m.tool_control = toolbar_control
else:
return toolbar_widget
def stac_gui(m=None):
"""Generates a tool GUI template using ipywidgets.
Args:
m (leafmap.Map, optional): The leaflet Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
from .pc import get_pc_collection_list
widget_width = "450px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Discver STAC Catalog",
icon="stack-exchange",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
http_url = widgets.Text(
value="https://planetarycomputer.microsoft.com/api/stac/v1",
description="Catalog URL:",
tooltip="Enter an http URL to the STAC Catalog",
style=style,
layout=widgets.Layout(width="454px", padding=padding),
)
start_date = widgets.DatePicker(
description='Start date:',
disabled=False,
style=style,
layout=widgets.Layout(width="225px", padding=padding),
)
end_date = widgets.DatePicker(
description='End date:',
disabled=False,
style=style,
layout=widgets.Layout(width="225px", padding=padding),
)
collection = widgets.Dropdown(
options=get_pc_collection_list(),
value='landsat-8-c2-l2 - Landsat 8 Collection 2 Level-2',
description="Collection:",
style=style,
layout=widgets.Layout(width="454px", padding=padding),
)
col_name = collection.value.split(' - ')[0].strip()
band_names = get_pc_inventory()[col_name]["bands"]
# red.options = band_names
# green.options = band_names
# blue.options = band_names
item = widgets.Dropdown(
options=['LC08_L2SP_047027_20201204_02_T1'],
description="Item:",
style=style,
layout=widgets.Layout(width="454px", padding=padding),
)
# assets = widgets.Text(
# value=None,
# description="Bands:",
# tooltip="STAC Asset ID",
# style=style,
# layout=widgets.Layout(width="454px", padding=padding),
# )
layer_name = widgets.Text(
value="STAC Layer",
description="Layer name:",
tooltip="Enter a layer name for the selected file",
style=style,
layout=widgets.Layout(width="454px", padding=padding),
)
band_width = "149px"
red = widgets.Dropdown(
value='SR_B5',
options=band_names,
description="Red:",
tooltip="Select a band for the red channel",
style=style,
layout=widgets.Layout(width=band_width, padding=padding),
)
green = widgets.Dropdown(
value='SR_B4',
options=band_names,
description="Green:",
tooltip="Select a band for the green channel",
style=style,
layout=widgets.Layout(width="148px", padding=padding),
)
blue = widgets.Dropdown(
value='SR_B3',
options=band_names,
description="Blue:",
tooltip="Select a band for the blue channel",
style=style,
layout=widgets.Layout(width=band_width, padding=padding),
)
vmin = widgets.Text(
value=None,
description="vmin:",
tooltip="Minimum value of the raster to visualize",
style=style,
layout=widgets.Layout(width="148px", padding=padding),
)
vmax = widgets.Text(
value=None,
description="vmax:",
tooltip="Maximum value of the raster to visualize",
style=style,
layout=widgets.Layout(width="148px", padding=padding),
)
nodata = widgets.Text(
value=None,
description="Nodata:",
tooltip="Nodata the raster to visualize",
style=style,
layout=widgets.Layout(width="150px", padding=padding),
)
# local_tile_palettes = list_palettes(add_extra=True)
palette_options = list_palettes(lowercase=True)
# palette_options = local_tile_palettes
palette = widgets.Dropdown(
options=palette_options,
value=None,
description="palette:",
layout=widgets.Layout(width="300px", padding=padding),
style=style,
)
checkbox = widgets.Checkbox(
value=False,
description="Additional params",
indent=False,
layout=widgets.Layout(width="154px", padding=padding),
style=style,
)
add_params_text = "Additional parameters in the format of a dictionary, for example, \n {'palette': ['#006633', '#E5FFCC', '#662A00', '#D8D8D8', '#F5F5F5'], 'expression': '(SR_B5-SR_B4)/(SR_B5+SR_B4)'}"
add_params = widgets.Textarea(
value="",
placeholder=add_params_text,
layout=widgets.Layout(width="454px", padding=padding),
style=style,
)
def reset_options(reset_url=True):
"""Reset the options to their default values."""
if reset_url:
http_url.value = "https://planetarycomputer.microsoft.com/api/stac/v1"
start_date.value = None
end_date.value = None
collection.options = []
collection.value = None
item.options = []
item.value = None
layer_name.value = ""
red.options = []
green.options = []
blue.options = []
red.value = None
green.value = None
blue.value = None
vmin.value = ""
vmax.value = ""
nodata.value = ""
palette.value = None
add_params.value = ""
output.clear_output()
with output:
col_name = collection.value.split(' - ')[0].strip()
band_names = get_pc_inventory()[col_name]["bands"]
red.options = band_names
green.options = band_names
blue.options = band_names
params_widget = widgets.HBox()
raster_options = widgets.VBox()
raster_options.children = [
widgets.HBox([red, green, blue]),
widgets.HBox([vmin, vmax, nodata]),
widgets.HBox([palette, checkbox]),
params_widget,
]
buttons = widgets.ToggleButtons(
value=None,
options=["Collections", "Items", "Display", "Reset", "Close"],
tooltips=["Get Collections", "Get Items", "Display Image", "Reset", "Close"],
button_style="primary",
)
buttons.style.button_width = "65px"
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
http_url,
widgets.HBox([start_date, end_date]),
collection,
item,
layer_name,
raster_options,
buttons,
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def checkbox_changed(change):
if change["new"]:
params_widget.children = [add_params]
else:
params_widget.children = []
checkbox.observe(checkbox_changed, names="value")
def url_changed(change):
if change["new"] or http_url.value == "":
reset_options(reset_url=False)
http_url.observe(url_changed, names="value")
def collection_changed(change):
if change["new"]:
with output:
if not hasattr(m, "pc_inventory"):
setattr(m, "pc_inventory", get_pc_inventory())
col_name = change["new"].split(" - ")[0]
first_item = m.pc_inventory[col_name]["first_item"]
item.options = [first_item]
band_names = m.pc_inventory[col_name]["bands"]
red.options = band_names
green.options = band_names
blue.options = band_names
if change["new"] == "landsat-8-c2-l2 - Landsat 8 Collection 2 Level-2":
red.value = "SR_B7"
green.value = "SR_B5"
blue.value = "SR_B4"
elif change["new"] == "sentinel-2-l2a - Sentinel-2 Level-2A":
red.value = "B08"
green.value = "B04"
blue.value = "B03"
else:
if len(band_names) > 2:
red.value = band_names[0]
green.value = band_names[1]
blue.value = band_names[2]
else:
red.value = band_names[0]
green.value = band_names[0]
blue.value = band_names[0]
collection.observe(collection_changed, names="value")
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def button_clicked(change):
if change["new"] == "Collections":
with output:
output.clear_output()
if http_url.value.startswith("http"):
if (
http_url.value
== "https://planetarycomputer.microsoft.com/api/stac/v1"
):
collection.options = get_pc_collection_list()
else:
print("Retrieving collections...")
collection.options = [
x[0] for x in get_stac_collections(http_url.value)
]
output.clear_output()
else:
print("Please enter a valid URL.")
elif change["new"] == "Items":
with output:
output.clear_output()
if collection.value is not None:
if start_date.value is not None and end_date.value is not None:
datetime = str(start_date.value) + '/' + str(end_date.value)
elif start_date.value is not None:
datetime = str(start_date.value)
elif end_date.value is not None:
datetime = str(end_date.value)
else:
datetime = None
col_name = collection.value.split(' - ')[0].strip()
if m.user_roi is not None:
intersects = m.user_roi['geometry']
else:
print("Please draw a polygon to be used as an AOI.")
print(
"Since no AOI is specified, using the default sample AOI."
)
intersects = {
"type": "Polygon",
"coordinates": [
[
[-122.27508544921875, 47.54687159892238],
[-121.96128845214844, 47.54687159892238],
[-121.96128845214844, 47.745787772920934],
[-122.27508544921875, 47.745787772920934],
[-122.27508544921875, 47.54687159892238],
]
],
}
print("Retrieving items...")
gdf = get_stac_items(
http_url.value,
col_name,
datetime=datetime,
intersects=intersects,
)
if gdf is not None:
item.options = gdf['id'].tolist()
if not hasattr(m, 'layers_control'):
layers_control = m.add_control(
ipyleaflet.LayersControl(position="topright")
)
setattr(m, 'layers_control', layers_control)
m.add_gdf(gdf, 'Image footprints', style={'fill': False})
output.clear_output()
print(f'{len(item.options)} items found.')
else:
print("Please select a valid collection.")
elif change["new"] == "Display":
with output:
output.clear_output()
if red.value is not None:
print("Loading data...")
if (
checkbox.value
and add_params.value.strip().startswith("{")
and add_params.value.strip().endswith("}")
):
vis_params = eval(add_params.value)
else:
vis_params = {}
if (
palette.value
and len(set([red.value, green.value, blue.value])) == 1
) or (palette.value and "expression" in vis_params):
vis_params["colormap_name"] = palette.value
elif (
palette.value
and len(set([red.value, green.value, blue.value])) > 1
and "expression" not in vis_params
):
palette.value = None
print("Palette can only be set for single band images.")
if vmin.value and vmax.value:
vis_params["rescale"] = f"{vmin.value},{vmax.value}"
if nodata.value:
vis_params["nodata"] = nodata.value
col = collection.value.split(" - ")[0]
if len(set([red.value, green.value, blue.value])) == 1:
assets = red.value
else:
assets = f'{red.value},{green.value},{blue.value}'
m.add_stac_layer(
collection=col,
item=item.value,
assets=assets,
name=layer_name.value,
**vis_params,
)
output.clear_output()
else:
print("Please select at least one band.")
buttons.value = None
elif change["new"] == "Reset":
reset_options()
elif change["new"] == "Close":
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
buttons.value = None
buttons.observe(button_clicked, "value")
toolbar_button.value = True
if m is not None:
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
if toolbar_control not in m.controls:
m.add_control(toolbar_control)
m.tool_control = toolbar_control
else:
return toolbar_widget
|
<reponame>niyoushanajmaei/gpt-neo
import numpy as np
import tensorflow.compat.v1 as tf
from functools import partial
from data.encoders import encode
import random
import re
import logging
from itertools import cycle
from utils import natural_sort
### IN USE ###
def _get_number_of_documents(filename):
# extracts number of files from a filename formatted "<name>_<num_documents>.tfrecords."
# if no pattern is matched, returns None
match = re.search("_(\d{1,}).tfrecords$", filename)
return int(match.group(1)) if match is not None else match
def _get_number_of_documents_by_iteration(filename):
# extracts number of files from a tfrecord document in the event it doesn't have metadata in the filename
# this could be very slow.
logging.warning(
"inputs/sequential_input() found no metadata found in filename - iterating through first tfrecord to find global length")
count = 0
for item in tf.io.tf_record_iterator(filename):
count += 1
return count
def _get_skip_index(all_files, n_batches):
prev_cumsum = 0
cumsum = 0
global_n_documents = None
for count, f in cycle(enumerate(all_files)):
prev_cumsum = cumsum
if _get_number_of_documents(f) is not None:
cumsum += _get_number_of_documents(f)
elif global_n_documents is None:
global_n_documents = _get_number_of_documents_by_iteration(f)
cumsum += global_n_documents
else:
cumsum += global_n_documents
if cumsum == n_batches:
remainder = 0
skip_idx = count + 1
elif cumsum > n_batches:
remainder = n_batches - prev_cumsum
skip_idx = count
break
return skip_idx, remainder
def _parse_function(example_proto):
features = {
"text": tf.VarLenFeature(tf.int64)
}
parsed_features = tf.parse_single_example(example_proto, features)
return tf.sparse.to_dense(parsed_features["text"], parsed_features["text"].dense_shape[0])
def autoregressive_sample_text(params, x):
vals1 = x[:params["n_ctx"]]
vals2 = x[1:params["n_ctx"] + 1]
vals1 = tf.reshape(vals1, [params["n_ctx"]])
vals2 = tf.reshape(vals2, [params["n_ctx"]])
vals1 = tf.cast(vals1, dtype=tf.int32)
vals2 = tf.cast(vals2, dtype=tf.int32)
return vals1, vals2
def sequential_input(params, global_step=None, eval=False):
"""
Input fn that reads tfrecords encoded with a fixed chunk size (== n_ctx + 1), and that either:
- has the number of documents for each tfrecord file encoded in the title in the format
<name>_<n_documents>.tfrecords.
OR
- has a fixed number of documents per tfrecord file.
If the glob pattern above isn't matched, we assume that each document has the same number of samples as the first tfrecord read.
If this isn't the case, it may result in errors, or some samples being missed.
This means we can calculate the number of samples we've seen so far using the global step,
and can use dataset.skip() to iterate through the list of filenames, as opposed to the whole dataset, which is incredibly inefficient.
If training is starting and stopping often, as with TPU pre-emption, reading the whole dataset sequentially appears to improve model
performance, as it results in less repeated data.
"""
if not eval:
assert global_step is not None
logging.warning(
"Changing batch size with sequential_input() will result in some data being skipped or repeated. Please ensure your batch size stays constant throughout training.")
batch_size = params['eval_batch_size' if eval else 'train_batch_size']
filenames = []
for dataset_config in params['dataset_configs'].values(): # iterate through each dataset and read params
path_key = 'path' if not eval else 'eval_path'
path = dataset_config[path_key]
filenames.extend(
tf.io.gfile.glob(path)) # then glob all files that fit the pattern specified in dataset_configs
filenames = natural_sort(filenames)
shuffle_filenames = params.get("shuffle_input_filenames", True)
if shuffle_filenames:
seed = params.get('seed', 1) # shuffle deterministically
random.seed(seed)
random.shuffle(filenames)
dataset = tf.data.Dataset.from_tensor_slices(filenames).repeat() # repeat filenames to infinity
if not eval:
# skip forward first in the filenames list, then skip the remaining amount in the parsed tfrecords files
skip_idx, remainder = _get_skip_index(filenames, n_batches=global_step * params[
"train_batch_size"]) # TODO: fix for > 1 epoch
dataset = dataset.skip(skip_idx) # skip to skip idx
# read tfrecord examples and skip remainder
dataset = dataset.apply(tf.data.TFRecordDataset)
dataset = dataset.skip(remainder)
else:
# shuffle filenames if in eval mode
dataset = dataset.shuffle(len(filenames))
dataset = dataset.apply(tf.data.TFRecordDataset)
# parse the tokenized data from the tfrecord files and shuffle
dataset = dataset.map(_parse_function, num_parallel_calls=1)
dataset = dataset.map(partial(autoregressive_sample_text, params), num_parallel_calls=1)
# batch data and repeat to infinity
dataset = dataset.batch(batch_size, drop_remainder=True).prefetch(params["iterations"] * 2)
return dataset.repeat()
def pred_input(params, logger, enc=None,
path_to_prompt=""):
unicorns = "In a shocking finding, scientists discovered a herd of unicorns living in a remote, " \
"previously unexplored valley, in the Andes Mountains. Even more surprising to the " \
"researchers was the fact that the unicorns spoke perfect English."
text = unicorns if path_to_prompt == "" else open(path_to_prompt, "r").read()
tokens = encode(enc, text)
if len(tokens) > params["n_ctx"]:
logger.info("The length of your input prompt is longer than the model's context length - truncating input.")
tokens = tokens[len(tokens) - params["n_ctx"]:]
if len(tokens) < params["n_ctx"]:
tokens = tf.pad(tokens, [[0, params["n_ctx"] - len(tokens)]], constant_values=params["padding_id"])
t = tf.broadcast_to(tokens, [params["batch_size"], params["n_ctx"]])
dataset = tf.data.Dataset.from_tensors(t)
def _dummy_labels(x):
return x, x
dataset = dataset.map(_dummy_labels)
return dataset
def handle_pred_output(predictions, logger, enc, params, out_name="test"):
with tf.gfile.Open(out_name, "w") as f:
for i, p in enumerate(predictions):
p = p["outputs"]
# remove eos + padding ids from output
idx = np.argmax(p == params['eos_id'])
if idx > 0:
p = p[:idx]
idx = np.argmax(p == params['padding_id'])
if idx > 0:
p = p[:idx]
text = enc.decode(p)
f.write(text)
logger.info(text)
#only using the first prediction
break
### DEPRECATED ###
def generic_text(params, eval=False, sample_text_fn=None, **kwargs):
logging.warning("DEPRECATION WARNING: generic_text will be phased out in future versions.")
i = 0 if not eval else 1
weights = []
datasets = []
for dataset in params["datasets"]:
dataset_id, stitch, datatype, weight = dataset
assert dataset_id in params[
'dataset_configs'], f'Unknown dataset id {dataset_id} given. Please make sure your dataset ids contain that configuration'
dataset_config = params['dataset_configs'][dataset_id]
path_key = 'path' if not eval else 'eval_path'
path = dataset_config[path_key]
datasets.append(text_dataset(
tf.io.gfile.glob(path),
params,
stitch=stitch,
datatype=datatype,
batch=False,
sample_text_fn=sample_text_fn
))
weights.append(weight)
batch_size = params['eval_batch_size' if eval else 'train_batch_size']
seed = params.get('seed', None)
dataset = tf.data.experimental.sample_from_datasets(datasets, weights=weights, seed=seed)
dataset = dataset.batch(batch_size, drop_remainder=True).prefetch(params["iterations"] * 2)
return dataset
def text_dataset(files, params, stitch, datatype, batch=True, sample_text_fn=None):
seed = params.get('seed', None)
deterministic = seed is not None
num_parallel_calls = 1 if deterministic else tf.data.experimental.AUTOTUNE
dataset = tf.data.Dataset.from_tensor_slices(files)
if deterministic:
dataset = dataset.interleave(tf.data.TFRecordDataset, cycle_length=4)
else:
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(tf.data.TFRecordDataset, cycle_length=4, sloppy=False))
if "documents" in datatype:
def _parse_function(example_proto):
features = {
# "hash": tf.VarLenFeature(tf.string),
"text": tf.VarLenFeature(tf.int64)
}
parsed_features = tf.parse_single_example(example_proto, features)
return parsed_features["text"], parsed_features["text"].dense_shape[0]
else:
def _parse_function(example_proto):
features = {
"text": tf.VarLenFeature(tf.int64)
}
parsed_features = tf.parse_single_example(example_proto, features)
return parsed_features["text"] # Assuming the text is not sparse
dataset = dataset.map(_parse_function, num_parallel_calls=1)
# Subsample method
if "documents" in datatype:
# Since samples can be less than the correct length, and TPUs don't like variable lengths, this function stitches together enough samples
# to have a text at least 1024 tokens long. For this to work the stitch parameter must be correctly tuned so that
# stitch * min(characters_in_text) >= amount
def _stitch_text(x, y):
x = tf.sparse.to_dense(x)
def _get_x(i):
return tf.gather(x[i], tf.range(y[i]))
out = _get_x(0)
eos_id = params['eos_id']
for i in range(1, stitch):
out = tf.concat([out, [eos_id], _get_x(i)], axis=0) # text1<|endoftext|>text2
return out
# Hack-y way to stitch together multiple texts
dataset = dataset.shuffle(1000 * stitch, seed=seed).batch(stitch, drop_remainder=True).map(_stitch_text,
num_parallel_calls=num_parallel_calls)
# Sample 1024(+1) tokens from the stitched together text
is_random_documents = datatype == "documents_random"
if sample_text_fn is not None:
_sample_text = partial(sample_text_fn, random_documents=is_random_documents)
else:
_sample_text = autoregressive_sample_text_random_documents if is_random_documents else autoregressive_sample_text
_sample_text = partial(_sample_text, params)
dataset = dataset.map(_sample_text, num_parallel_calls=num_parallel_calls)
if batch:
dataset = dataset.batch(params["train_batch_size"], drop_remainder=True).prefetch(params["iterations"] * 2)
dataset = dataset.repeat()
return dataset
def autoregressive_sample_text_random_documents(params, x):
seed = params.get('seed', None)
s = tf.size(x)
r = tf.random.uniform([], maxval=s - (params["n_ctx"] + 1), dtype=tf.dtypes.int32, seed=seed)
r1 = tf.range(r, r + params["n_ctx"])
r2 = tf.range(r + 1, (r + 1) + params["n_ctx"])
r1 = tf.reshape(r1, [params["n_ctx"]]) # Somehow, this makes the compiler happy
r2 = tf.reshape(r2, [params[
"n_ctx"]]) # TPUs want constant sized input, and these reshapes makes it recognize the shape of the input
vals1 = tf.gather(x, r1)
vals2 = tf.gather(x, r2)
vals1 = tf.reshape(vals1, [params["n_ctx"]])
vals2 = tf.reshape(vals2, [params["n_ctx"]])
vals1 = tf.cast(vals1, dtype=tf.int32)
vals2 = tf.cast(vals2, dtype=tf.int32)
return vals1, vals2
def mlm_sample_text(params, x, random_documents=False):
seed = params.get('seed', None)
ctx_len = params["n_ctx"]
assert 'mlm_mask_id' in params, 'the key `mlm_mask_id` must be set on your config to do masked language model training, specifying the id of the reserved mask token'
mask_id = params['mlm_mask_id']
cls_token_id = params.get('mlm_cls_token_id', None)
num_tokens = params.get('n_vocab', None)
mask_ignore_ids = set(params.get('mlm_mask_ignore_ids', []))
mask_ignore_ids.add(cls_token_id)
mask_prob = params.get('mlm_mask_prob', 0.15)
same_token_prob = params.get('mlm_same_token_prob', 0.10)
random_token_prob = params.get('mlm_random_token_prob', 0.)
seq_len = ctx_len if cls_token_id is None else (ctx_len - 1)
if random_documents:
s = tf.size(x)
r = tf.random.uniform([], maxval=(s - seq_len), dtype=tf.dtypes.int32, seed=seed)
r1 = tf.range(r, r + seq_len)
r1 = tf.reshape(r1, [seq_len])
features = tf.gather(x, r1)
else:
features = x[:seq_len]
# add cls token id if specified by `mlm_cls_token_id`
if cls_token_id is not None:
features = tf.pad(features, [[1, 0]], constant_values=cls_token_id)
features = tf.cast(features, dtype=tf.int32)
shape = features.shape
# determine which tokens are mask-able
can_mask = tf.not_equal(features, 0)
for ignore_id in mask_ignore_ids:
can_mask &= tf.not_equal(features, ignore_id)
# generate boolean mask for masking ids
mask_mask = tf.less(tf.random.uniform(shape, minval=0., maxval=1., dtype=tf.float32, seed=seed), mask_prob)
mask_mask &= can_mask
# generate mask for actually replacing the tokens, for allowing a small number of tokens to stay the same
replace_mask = tf.less(tf.random.uniform(shape, minval=0., maxval=1., dtype=tf.float32, seed=seed),
1 - same_token_prob)
# randomly replace some tokens with random tokens before masking
if random_token_prob > 0:
random_token_mask = tf.less(tf.random.uniform(shape, minval=0., maxval=1., dtype=tf.float32, seed=seed),
random_token_prob)
random_tokens = tf.random.uniform(shape, minval=1, maxval=num_tokens, dtype=tf.dtypes.int32, seed=seed)
# make sure random tokens do not include illegal token ids specified by `mlm_mask_ignore_ids`
random_can_mask = tf.not_equal(random_tokens, 0)
for ignore_id in mask_ignore_ids:
random_can_mask &= tf.not_equal(random_tokens, ignore_id)
features = tf.where(random_token_mask & random_can_mask, random_tokens, features)
# mask the tokens
mask_tokens = tf.ones(shape, dtype=tf.int32) * mask_id
masked_features = tf.where(mask_mask & replace_mask, mask_tokens, features)
# labels will be set to 0 for all non-masked tokens
labels = tf.where(mask_mask, tf.zeros(shape, dtype=tf.int32), features)
masked_features, labels = map(lambda t: tf.reshape(t, [ctx_len]), (masked_features, labels))
return masked_features, labels
|
<filename>atomic/bin/batch.py<gh_stars>0
import csv
from multiprocessing import Pool
import os
import subprocess
from atomic.parsing.replayer import filename_to_condition
home = '/home/david/working/atomic'
multi = True
profile = False
analyze = False
def run_inference(args):
fname, sub_args = args
root, log_name = sub_args
env = {'PYTHONPATH': ':'.join([os.path.join(home, '..', 'psychsim'), home, os.path.join(home, '..', 'model-learning')])}
cmd = ['python3', os.path.join(home, 'atomic', 'bin', 'model_inference.py'),
os.path.join(root, fname), '--ignore_rationality', '--ignore_horizon', '-d', 'INFO',
'-c', os.path.join(home, 'data', 'rewards', 'linear', 'phase1_clusters.csv'),
'--metadata', os.path.join(home, 'data', 'ASU_2020_08', 'Raw', log_name)]
if profile:
cmd.append('--profile')
# cmd += ['-n', '20']
subprocess.run(cmd, env=env)
baseline = {'NoTriageNoSignal StaticMap': 0.19230769230769232, 'TriageNoSignal StaticMap': 0.15384615384615385,
'TriageSignal DynamicMap': 0.38461538461538464, 'TriageSignal StaticMap': 0.2692307692307692}
def evaluate_inference(data_file, condition):
inference = {}
current = None
max_times = []
better_times = []
correct = f'{condition["CondBtwn"]} {condition["CondWin"][1]}'
assert correct in baseline, f'Illegal condition: {correct}'
with open(data_file, 'r') as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
t = int(row['Timestep'])
if current is None:
current = t
elif t > current:
if inference[correct] == max(inference.values()):
# Max likelihood inference
max_times.append(current)
if inference[correct] > baseline[correct]:
better_times.append(current)
current = t
inference[row['Condition']] = float(row['Belief'])
if inference[correct] == max(inference.values()):
# Max likelihood inference
max_times.append(current)
if inference[correct] > baseline[correct]:
better_times.append(current)
return {'Trial': condition['Trial'], '% Most Likely': len(max_times)/t, '% More Likely': len(better_times)/t,
'Final Most Likely': t in max_times, 'Final More Likely': t in better_times}
if __name__ == '__main__':
dirs = [os.path.join(home, 'data', 'ASU_2020_08', 'FalconEasy_1123'),
os.path.join(home, 'data', 'ASU_2020_08', 'FalconMedium_1123'),
os.path.join(home, 'data', 'ASU_2020_08', 'FalconHard_1123')]
log_dir = os.path.join(home, 'data', 'ASU_2020_08', 'Raw')
files = {}
analyzees = {}
for root in dirs:
for fname in sorted(os.listdir(root)):
base_name, ext = os.path.splitext(fname)
if ext != '.csv':
continue
trial = filename_to_condition(base_name)['Trial']
for log_name in sorted(os.listdir(log_dir)):
if filename_to_condition(log_name)['Trial'] == trial:
break
else:
print(f'Unable to find log file for {fname}')
continue
for analysis in ['conditions']:
data_file = os.path.join(root, f'{base_name}_Analysis-{analysis}.tsv')
if os.path.exists(data_file):
if analyze:
analyzees[data_file] = filename_to_condition(log_name)
else:
print(f'did not find {data_file}')
files[fname] = (root, log_name)
break
else:
print(f'Skipping, already processed: {fname}')
if analyze:
data = []
total = {}
for data_file, condition in analyzees.items():
data.append(evaluate_inference(data_file, condition))
for field, value in data[-1].items():
if field != 'Trial':
if isinstance(value, bool):
if value:
total[field] = total.get(field, 0) + 1
else:
total[field] = total.get(field, 0) + value
for field in total:
total[field] /= len(analyzees)
print(total)
elif multi:
with Pool(processes=3) as pool:
pool.map(run_inference, files.items())
else:
for args in sorted(files.items()):
run_inference(args)
|
#!/usr/bin/python
#
# aws.py
#
# Spin up Ceph cluster in AWS.
#
# Cluster is defined in the aws.yaml file.
#
# Generate pseudo-code from #PC comments:
# $ grep -E '^ *#PC' aws.py | sed -e 's/#PC //'g
#
import argparse
from aws_lib import SpinupError
import init_lib
from pprint import pprint
import sys
import yaml_lib
#PC * Parse arguments.
parser = argparse.ArgumentParser( description='Spin up a Ceph cluster in AWS.' )
parser.add_argument(
'--yaml',
default='./aws.yaml',
help="yaml file to read (defaults to ./aws.yaml)"
)
parser.add_argument(
'--master',
action='store_const',
const=True,
help="Install the Salt Master in 10.0.0.0/24"
)
#parser.add_argument(
# 'operation',
# default='create',
# help="cluster-wide cloud operation ('create', 'terminate', 'pause', "+
# "'resume', 'start', 'stop', 'suspend', 'unpause') to perform",
# nargs='?'
#)
args = parser.parse_args()
# Initialize dictionary for storage of globals (values that do not change,
# but are discarded when the script exits).
g = {}
#PC * Parse YAML.
y = yaml_lib.parse_yaml( args.yaml )
#PC * Connect to region specified in YAML ("region").
# (i.e., get VPCConnection and EC2Connection objects).
# FIXME: validate that the region exists
y['region'] = yaml_lib.yaml_attr( y, 'region', 'eu-west-1' )
( g['vpc_conn'], g['ec2_conn'] ) = init_lib.init_region( y['region'] )
print "Connected to region {}".format( y['region'] )
#PC * Connect to VPC specified in YAML ("vpc" -> "cidr-block").
n = yaml_lib.yaml_attr( y, 'vpc', None )
n['cidr-block'] = yaml_lib.yaml_attr( n, 'cidr-block', None )
n['name'] = yaml_lib.yaml_attr( n, 'name', 'susecon' )
print "Looking for VPC {}".format(n['cidr-block'])
g['vpc_obj'] = init_lib.init_vpc( g['vpc_conn'], n['cidr-block'] )
#PC * Clobber existing VPC tag with YAML value ("vpc" -> "name").
init_lib.update_tag( g['vpc_obj'], 'Name', n['name'] )
print "Found VPC {} (Name: {})".format(n['cidr-block'], n['name'])
#PC * Look at YAML "subnets" and see how many there are.
subnets = yaml_lib.yaml_attr( y, 'subnets', None )
print "{} subnets in yaml".format(len(subnets))
#PC * Raise exception if there is not at least one subnet.
if 1 > len(n):
raise SpinupError( "No subnets in yaml" )
#PC * Loop through the YAML subnet definitions.
g['subnet_obj'] = []
count = 0
for s in subnets:
#PC * First subnet is assumed to be the "Master Subnet" (CIDR block
#PC defaults to 10.0.0.0/24).
if count == 0:
# master subnet
s['name'] = yaml_lib.yaml_attr( s, 'delegate', '0' )
s['cidr-block'] = yaml_lib.yaml_attr( s, 'cidr-block', '10.0.0.0/24' )
print "Looking for master subnet {} ({})".format(s['cidr-block'], s['name'])
#PC * All others are "Minion Subnets" (i.e. for delegates): CIDR block
#PC defaults to 10.0.<delegate>.0/24
else:
# minion subnet
s['delegate'] = yaml_lib.yaml_attr( s, 'delegate', 'MISSING' )
s['cidr-block'] = yaml_lib.yaml_attr( s, 'cidr-block', '10.0.{}.0/24'.format(count) )
print "Looking for minion subnet {} (delegate {})".format(s['cidr-block'], s['delegate'])
#PC * For each subnet (Master or Minion) in YAML:
#PC * Get subnet object and store it.
g['subnet_obj'].append( init_lib.init_subnet( g['vpc_conn'], g['vpc_obj'].id, s['cidr-block'] ) )
#PC * Clobber existing subnet tag with the one specified in YAML.
init_lib.update_tag( g['subnet_obj'][count], 'Name', y['nametag'] )
init_lib.update_tag( g['subnet_obj'][count], 'Delegate', s['delegate'] )
#PC * Update subnet "MapPublicIpOnLaunch" attribute, so all instances
#PC created in this subnet will automatically get a public IP address.
if not (
hasattr( g['subnet_obj'][count], 'mapPublicIpOnLaunch' ) and
g['subnet_obj'][count].mapPublicIpOnLaunch != 'false'
):
init_lib.set_subnet_map_public_ip( g['ec2_conn'], g['subnet_obj'][count].id )
print "Found subnet {} (delegate {})".format(s['cidr-block'], s['delegate'])
count += 1
#PC * If --master option was given on the command line:
if args.master:
#PC * Get all existing instances in the subnet.
subnet_id = g['subnet_obj'][0].id
subnet_cidr = g['subnet_obj'][0].cidr_block
noofinstances = init_lib.count_instances_in_subnet(
g['ec2_conn'],
subnet_id
)
#PC * If there are already instances in the subnet, print their IDs and bail out.
if noofinstances > 0:
print "There are already {} instances in the Master subnet {}".format(noofinstances, subnet_cidr)
sys.exit(1)
print "Creating 1 master node in the Master Subnet {}.".format( y['subnets'][0]['cidr-block'] )
#PC * Process Master user-data script (replace tokens with values from environment)
u = init_lib.process_user_data(
y['master']['user-data'],
y['master']['replace-from-environment']
)
#PC * Derive address (e.g. 10.0.0.10) for the Master
g['master']['ip-address'] = init_lib.derive_ip_address(
y['subnets'][0]['cidr-block'],
0,
10
)
#PC * Spin up AWS instance for the Master.
reservation = init_lib.make_reservation(
g['ec2_conn'],
y['master']['ami-id'],
key_name=y['keyname'],
instance_type=y['master']['type'],
user_data=u,
subnet_id=g['subnet_obj'][0].id,
private_ip_address=y['master']['ip-address'],
master=True
)
g['master_instance'] = reservation.instances[0]
#PC * Clobber tag with hard-coded value "master".
init_lib.update_tag( g['master_instance'], 'Name', y['nametag'] )
#PC * Report result to user, and exit.
print "Master node {} ({}, {}) created.".format(
g['master_instance'].id,
g['master_instance'].ip_address,
g['master_instance'].private_ip_address
)
sys.exit(0)
#PC * --master option was *not* given on the command line. Script continues.
#PC * Check that master exists and get its public IP address. Continue if and
#PC only if there is a single instance in the Master Subnet.
# FIXME: check that the Master instance state is "running".
g['master_instance'] = init_lib.get_master_instance(
g['ec2_conn'],
g['subnet_obj'][0].id
)
#PC * Clobber Master instance tag with hard-coded value "master".
init_lib.update_tag( g['master_instance'], 'Name', y['nametag'] )
print "Found master instance {} ({}, {})".format(
g['master_instance'].id,
g['master_instance'].ip_address,
g['master_instance'].private_ip_address
)
#PC * The YAML should contain "install_subnets" which is a list of delegate
#PC numbers. Look at how many elements are in that list. This is the number
#PC of subnets that we will be installing.
y['install_subnets'] = yaml_lib.yaml_attr( y, 'install_subnets', None )
n = len(y['install_subnets'])
print "Installing {} of {} subnet(s)".format(n, len(subnets))
#PC * Conduct sanity checks on "install_subnets" list:
for n in y['install_subnets']:
#PC * Delegate numbers cannot be negative.
if n < 0:
raise SpinupError( "No negative subnets, silly" )
#PC * Delegate number 0 is not allowed (use --master).
if n == 0:
raise SpinupError( "Use --master to install the master subnet 10.0.0.0/24" )
#PC * The total number of delegates should be equal to the number of subnets plus one.
#PC If any delegate number exceeds this value, raise an exception.
if n > len(subnets) + 1:
raise SpinupError( "Subnet {} is to be installed, but only {} subnets are defined in yaml".format(n, len(subnets)) )
#PC * Initialize structures to hold the cluster node resource objects.
g['admin_node'] = {}
g['mon1_node'] = {}
g['mon2_node'] = {}
g['mon3_node'] = {}
g['osd_node'] = {}
g['windows_node'] = {}
volume_size = yaml_lib.yaml_attr( y['mon'], 'volume', 20 )
for delegate in y['install_subnets']:
g['admin_node'][delegate] = {}
g['mon1_node'][delegate] = {}
g['mon2_node'][delegate] = {}
g['mon3_node'][delegate] = {}
g['osd_node'][delegate] = {}
g['windows_node'][delegate] = {}
#PC * Loop over the delegate numbers in "install_subnets":
for delegate in y['install_subnets']:
#PC * Store subnet ID and CIDR block in temporary variables
subnet_id = g['subnet_obj'][delegate].id
subnet_cidr = g['subnet_obj'][delegate].cidr_block
print "Installing subnet {} ({})".format( subnet_cidr, subnet_id )
#PC * Get all existing instances in the subnet.
noofinstances = init_lib.count_instances_in_subnet(
g['ec2_conn'],
subnet_id
)
#PC * If there are already instances in the subnet, print their IDs and bail out.
if noofinstances > 0:
print "There are already {} instances in subnet {}".format(noofinstances, subnet_cidr)
sys.exit(1)
#PC * Create 1 admin node:
print "Create 1 admin node"
#PC * Derive IP address of admin node
g['admin_node'][delegate]['ip-address'] = init_lib.derive_ip_address(
y['subnets'][0]['cidr-block'],
delegate,
10
)
#PC * Process admin node user-data
u = init_lib.process_user_data(
y['admin']['user-data'],
y['admin']['replace-from-environment']
)
#PC * Spin up admin node instance
reservation = init_lib.make_reservation(
g['ec2_conn'],
y['admin']['ami-id'],
key_name=y['keyname'],
instance_type=y['admin']['type'],
user_data=u,
subnet_id=subnet_id,
private_ip_address=g['admin_node'][delegate]['ip-address'],
master=False,
master_ip=g['master_instance'].private_ip_address,
role='admin',
delegate_no=delegate,
node_no=0
)
g['admin_node'][delegate]['instance'] = reservation.instances[0]
#PC * Set admin node tags
init_lib.update_tag( g['admin_node'][delegate]['instance'], 'Name', y['nametag'] )
init_lib.update_tag( g['admin_node'][delegate]['instance'], 'Role', 'admin' )
init_lib.update_tag( g['admin_node'][delegate]['instance'], 'Delegate', delegate )
#PC * Create 3 mon nodes.
print "Create 3 mon nodes"
#PC * Process mon node user-data
u = init_lib.process_user_data(
y['mon']['user-data'],
y['mon']['replace-from-environment']
)
#PC * For each of the three mon nodes:
for x in range(1, 4):
mon_node = g['mon{}_node'.format(x)][delegate]
#PC * Derive IP address
mon_node['ip-address'] = init_lib.derive_ip_address(
y['subnets'][0]['cidr-block'],
delegate,
10+x
)
#PC * Make reservation.
reservation = init_lib.make_reservation(
g['ec2_conn'],
y['mon']['ami-id'],
key_name=y['keyname'],
instance_type=y['mon']['type'],
user_data=u,
subnet_id=subnet_id,
private_ip_address=mon_node['ip-address'],
master=False,
master_ip=g['master_instance'].private_ip_address,
role='mon',
delegate_no=delegate,
node_no=x
)
mon_node['instance'] = reservation.instances[0]
#PC * Update tags.
init_lib.update_tag( mon_node['instance'], 'Name', y['nametag'] )
init_lib.update_tag( mon_node['instance'], 'Role', 'mon' )
init_lib.update_tag( mon_node['instance'], 'Delegate', delegate )
init_lib.update_tag( mon_node['instance'], 'Monitor', x )
#PC * Create OSD volume.
mon_node['volume'] = g['ec2_conn'].create_volume( volume_size, mon_node['instance'].placement )
init_lib.update_tag( mon_node['volume'], 'Name', y['nametag'] )
init_lib.update_tag( mon_node['instance'], 'Role', 'mon' )
init_lib.update_tag( mon_node['volume'], 'Delegate', delegate )
init_lib.update_tag( mon_node['volume'], 'Monitor', x )
for x in range(1, 4):
mon_node = g['mon{}_node'.format(x)][delegate]
instance = mon_node['instance']
volume = mon_node['volume']
#PC * Make sure node state is "running" (wait if necessary).
init_lib.wait_for_running( g['ec2_conn'], instance.id )
#PC * Make sure volume status is "available" (wait if necessary).
init_lib.wait_for_available( g['ec2_conn'], volume.id )
#PC * Attach the OSD volume to the mon node.
if not g['ec2_conn'].attach_volume( volume.id, instance.id, '/dev/sdb' ):
raise SpinupError( "Failed to attach volume {} to instance {}".format(
volume.id,
instance.id
) )
#PC * Create 1 osd-only node:
print "Create 1 osd-only node"
#PC * Derive IP address of osd node
g['osd_node'][delegate]['ip-address'] = init_lib.derive_ip_address(
y['subnets'][0]['cidr-block'],
delegate,
14
)
#PC * Process osd-only node user-data
u = init_lib.process_user_data(
y['osd']['user-data'],
y['osd']['replace-from-environment']
)
#PC * Spin up osd-only node instance
reservation = init_lib.make_reservation(
g['ec2_conn'],
y['osd']['ami-id'],
key_name=y['keyname'],
instance_type=y['osd']['type'],
user_data=u,
subnet_id=subnet_id,
private_ip_address=g['osd_node'][delegate]['ip-address'],
master=False,
master_ip=g['master_instance'].private_ip_address,
role='osd',
delegate_no=delegate,
node_no=4
)
g['osd_node'][delegate]['instance'] = reservation.instances[0]
#PC * Set osd-only node tags
init_lib.update_tag( g['osd_node'][delegate]['instance'], 'Name', y['nametag'] )
init_lib.update_tag( g['osd_node'][delegate]['instance'], 'Role', 'osd' )
init_lib.update_tag( g['osd_node'][delegate]['instance'], 'Delegate', delegate )
#PC * Create OSD volume.
osd_node = g['osd_node'][delegate]
osd_node['volume'] = g['ec2_conn'].create_volume( volume_size, osd_node['instance'].placement )
init_lib.update_tag( osd_node['volume'], 'Name', y['nametag'] )
init_lib.update_tag( osd_node['volume'], 'Role', 'osd' )
init_lib.update_tag( osd_node['volume'], 'Delegate', delegate )
init_lib.update_tag( osd_node['volume'], 'Monitor', x )
instance = osd_node['instance']
volume = osd_node['volume']
#PC * Make sure node state is "running" (wait if necessary).
init_lib.wait_for_running( g['ec2_conn'], instance.id )
#PC * Make sure volume status is "available" (wait if necessary).
init_lib.wait_for_available( g['ec2_conn'], volume.id )
#PC * Attach the OSD volume to the mon node.
if not g['ec2_conn'].attach_volume( volume.id, instance.id, '/dev/sdb' ):
raise SpinupError( "Failed to attach volume {} to instance {}".format(
volume.id,
instance.id
) )
#PC * Create 1 windows node:
print "Create 1 windows node"
#PC * Derive IP address of windows node
g['windows_node'][delegate]['ip-address'] = init_lib.derive_ip_address(
y['subnets'][0]['cidr-block'],
delegate,
15
)
#PC * Process windows node user-data
u = init_lib.process_user_data(
y['windows']['user-data'],
y['windows']['replace-from-environment']
)
#PC * Spin up windows node instance
reservation = init_lib.make_reservation(
g['ec2_conn'],
y['windows']['ami-id'],
key_name=y['keyname'],
instance_type=y['windows']['type'],
user_data=u,
subnet_id=subnet_id,
private_ip_address=g['windows_node'][delegate]['ip-address'],
master=False,
master_ip=g['master_instance'].private_ip_address,
role='windows',
delegate_no=delegate,
node_no=5
)
g['windows_node'][delegate]['instance'] = reservation.instances[0]
#PC * Set windows node tags
init_lib.update_tag( g['windows_node'][delegate]['instance'], 'Name', y['nametag'] )
init_lib.update_tag( g['windows_node'][delegate]['instance'], 'Role', 'windows' )
init_lib.update_tag( g['windows_node'][delegate]['instance'], 'Delegate', delegate )
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from concurrent.futures import ThreadPoolExecutor
import itertools
import logging
import os
from google.cloud import bigquery
MB_IN_GB = 1000
KB_IN_GB = 1000000
BYTES_IN_GB = 1000000000
class StagingTableGenerator(object):
"""Generating staging tables in BigQuery of particular sizes that can later
be extracted into files.
Utilizes the Dataflow Data Generator tool from the PSO library. Uses the
dataflow_data_generator module to create staging tables using the columnType
and numColumn parameters, and then the bq_table_resizer module to create r
resized staging tables with the targetDataSizes parameter.
Attributes:
project(str): ID of the project that holds the staging tables and
resized staging tables.
staging_dataset_id(str): ID of the dataset that holds the staging
tables.
resized_dataset_id(str): ID of the dataset that holds the
resized staging tables.
json_schema_path(str): Directory that holds the json schemas used
to create the staging tables.
"""
def __init__(self, project, staging_dataset_id, resized_dataset_id,
json_schema_path, file_params, num_rows):
self.bq_client = bigquery.Client()
self.project = project
self.staging_dataset_id = staging_dataset_id
self.resized_dataset_id = resized_dataset_id
self.json_schema_path = json_schema_path
self.file_params = file_params
self.num_rows = num_rows
def create_staging_tables(
self,
dataflow_staging_location,
dataflow_temp_location,
):
"""Creates staging tables using the columnType and numColumn parameters.
Utilizes the data_generator_pipeline module from the Dataflow Data
Generator tool to create staging tables. Names of schema combinations
are created using the columnTypes and numColumn parameters. Then the
schema names are used to obtain json schemas from the provided
self.json_schema_path, which are then fed into to data generator
pipeline to create staging tables.
Args:
dataflow_staging_location(str): GCS staging path.
dataflow_temp_location(str): GCS temp path.
"""
def _create_table(table_details):
column_type, num_column = table_details
schema_name = '{0:s}_{1:d}'.format(column_type, num_column)
logging.info(
'Creating staging table for schema: {0:s}'.format(schema_name))
command = ('python {0:s}/data_generator_pipeline.py '
'--schema_file={1:s}/{2:s}.json '
'--num_records={3:d} '
'--output_bq_table={4:s}:{5:s}.{2:s} '
'--project={4:s} '
'--setup_file={0:s}/setup.py '
'--staging_location={6:s} '
'--temp_location={7:s} '
'--save_main_session '
'--worker_machine_type=n1-highcpu-32 '
'--runner=DataflowRunner ').format(
data_gen_path,
self.json_schema_path,
schema_name,
self.num_rows,
self.project,
self.staging_dataset_id,
dataflow_staging_location,
dataflow_temp_location,
)
os.system(command)
column_types = self.file_params['columnTypes']
num_columns = self.file_params['numColumns']
abs_path = os.path.abspath(os.path.dirname(__file__))
data_gen_path = os.path.join(
abs_path, '../../dataflow-data-generator/data-generator-pipeline')
with ThreadPoolExecutor() as p:
p.map(_create_table, itertools.product(column_types, num_columns))
def create_resized_tables(self):
"""Creates resized staging tables using the targetDataSizes parameters.
Utilizes the bq_table_resizer module from the Dataflow Data
Generator tool to create resized staging tables.
"""
staging_dataset_ref = self.bq_client.dataset(self.staging_dataset_id)
staging_dataset = bigquery.Dataset(staging_dataset_ref)
sizes = self.file_params['targetDataSizes']
sizes.sort()
abs_path = os.path.abspath(os.path.dirname(__file__))
bq_resizer_path = os.path.join(
abs_path, '../../dataflow-data-generator/bigquery-scripts')
# Gather staging tables that were created in
# self.create_staging_tables()
tables = list(self.bq_client.list_tables(staging_dataset))
for table in tables:
for i in range(len(sizes)):
# If the size of the current iteration is the smallest size
# in the sizes list, use the corresponding staging table
# created in create_resized_table() as the source base table.
# Otherwise, if the file size of the current iteration is
# greater than the last iteration, use the resized table from
# the previous iteration as the source base table when running
# so that each table can take advantage of the size of the last.
size = sizes[i]
if size > sizes[i - 1]:
base = sizes[i - 1]
source_table = get_resized_table_name(table.table_id, base)
source_dataset = self.resized_dataset_id
else:
source_table = table.table_id
source_dataset = self.staging_dataset_id
destination_table = get_resized_table_name(table.table_id, size)
target_gb = size
command_str = ('python {0:s}/bq_table_resizer.py '
'--project {1:s} '
'--source_dataset {2:s} '
'--source_table {3:s} '
'--destination_dataset {4:s} '
'--destination_table {5:s} '
'--target_gb {6:f} ')
command = command_str.format(
bq_resizer_path,
self.project,
source_dataset,
source_table,
self.resized_dataset_id,
destination_table,
target_gb,
)
os.system(command)
logging.info(
'Created resized table from {0:s}'.format(source_table))
logging.info(
'Resized table complete: {0:s}'.format(destination_table))
def get_resized_table_name(table_id, size):
"""Creates a name for resized tables.
Ensures that the file size in the resized staging table name is greater
than 1, since BiqQuery tables names can not have '.' characters.
Args:
table_id(str): Name of the staging table used to generate the
resized staging table.
size(float): Size of the resized staging table in GB.
Returns:
The name of the resized staging table containing a size and unit
without decimals in a string format. For example, if the provided
table_id was 100_STRING_10 and the provided size was .01, the returned
name would be 100_STRING_10_10MB.
"""
if size >= 1:
label_size = size
label_unit = 'GB'
elif size >= .001:
label_size = int(size * MB_IN_GB)
label_unit = 'MB'
elif size >= .000001:
label_size = int(size * KB_IN_GB)
label_unit = 'KB'
else:
label_size = int(size * BYTES_IN_GB)
label_unit = 'B'
return '{0:s}_{1:d}{2:s}'.format(table_id, label_size, label_unit)
|
<filename>app/customer/models/first_charge_activity.py<gh_stars>1-10
# coding=utf-8
import datetime
import logging
from base.settings import CHATPAMONGO
from django.conf import settings
from mongoengine import *
from app.customer.models.user import *
from app.customer.models.tools import *
from app.customer.models.vip import *
connect(CHATPAMONGO.db, host=CHATPAMONGO.host, port=CHATPAMONGO.port, username=CHATPAMONGO.username,
password=<PASSWORD>)
class FirstChargeActivity(Document):
temp_activity_type = IntField(verbose_name=u"活动类型")
name = StringField(max_length=32, verbose_name=u'活动名称')
recharge_min = IntField(verbose_name=u"最小充值金额")
recharge_max = IntField(verbose_name=u"最大充值金额")
tool_data = StringField(verbose_name=u"道具 数据")
vip_data = StringField(verbose_name=u"vip 数据")
create_time = DateTimeField(verbose_name=u"创建时间", default=datetime.datetime.now())
end_time = DateTimeField(verbose_name=u"截止时间")
is_valid = IntField(verbose_name=u'是否有效', default = 1) # 1有效 0 无效
@classmethod
def create_reward(cls, user, money):
user_id = user.id
now = datetime.datetime.now()
yuan_money = int(money/100)
activity = FirstChargeActivity.objects.filter(temp_activity_type=1, recharge_min__lte=money,
recharge_max__gt=money, is_valid=1).first()
if not activity:
max_act = FirstChargeActivity.objects.filter(temp_activity_type=1, is_valid=1, recharge_min=None).order_by("-recharge_max").first()
max_recharge = max_act.recharge_max
if max_recharge <= money:
activity = max_act
tool_data = activity.tool_data
tool_dic = None
vip_data = activity.vip_data
vip_dic = None
if tool_data:
tool_dic = eval(tool_data)
if vip_data:
vip_dic = eval(vip_data)
# 发放奖励
if tool_dic:
for key, value in tool_dic.items():
tools = Tools.objects.filter(tools_type=int(key)).first() # 道具
user_tools = UserTools()
user_tools.user_id = user_id
user_tools.tools_id = str(tools.id)
user_tools.tools_count = int(value)
user_tools.time_type = 0
user_tools.get_type = 4
invalid_time = now + datetime.timedelta(days=1)
user_tools.invalid_time = invalid_time
user_tools.save()
tools_record = UserToolsRecord()
tools_record.user_id = user_id
tools_record.tools_id = str(tools.id)
tools_record.tools_count = 1
tools_record.time_type = 0
tools_record.oper_type = 6
tools_record.create_time = now
tools_record.save()
if vip_dic:
vip_name = ""
start_time = ""
end_time = ""
for key, value in vip_dic.items():
print key
print value
vip = Vip.objects.filter(vip_type=int(key), is_valid=1).first()
if int(key) == 1:
vip_name = "高级"
elif int(key) == 2:
vip_name = "超级"
user_vip = UserVip.objects.filter(user_id=user_id).first()
vip_id = str(vip.id)
now = datetime.datetime.now()
days = 30 * int(value) + 1
print days, "days==="
if user_vip:
user_vip.vip_id = vip_id
end_time = user_vip.end_time + datetime.timedelta(days=days)
else:
user_vip = UserVip()
user_vip.user_id = user_id
user_vip.vip_id = vip_id
user_vip.create_time = now
end_time = now + datetime.timedelta(days=days)
user_vip.end_time = end_time
user_vip.save()
start_time = user_vip.create_time.strftime('%Y年%m月%d日')
end_time = user_vip.end_time.strftime('%Y年%m月%d日')
desc = u"<html><p>" + _(u"亲的的用户,充值金额%s元,活动奖励已到账、赠送%sVIP已开启,有效时间为%s-%s,祝您玩的开心,聊得愉快") % (unicode(yuan_money), vip_name, start_time, end_time ) + u"</p></html>"
else:
desc = u"<html><p>" + _(u"亲爱的用户,充值金额%s元,活动奖励已发放到您的账户中,请注意查收哦~祝您玩的开心,聊得愉快") % unicode(yuan_money) + u"</p></html>"
# MessageSender.send_system_message(user_id, desc)
print desc, "======================="
@classmethod
def update(cls):
activity_list = FirstChargeActivity.objects.filter(temp_activity_type=1, is_valid=1)
now = datetime.datetime.now()
end_time = datetime.datetime(now.year, now.month, now.day) + datetime.timedelta(days=31)
for activity in activity_list:
activity.end_time = end_time
activity.save()
@classmethod
def init(cls):
act1 = FirstChargeActivity()
act1.temp_activity_type = 1
act1.name = "财神驾到(首充)"
tool_dic1 = {
"0": "10"
}
act1.recharge_min = 100
act1.recharge_max = 5000
act1.tool_data = str(tool_dic1)
act1.create_time = datetime.datetime.now()
act1.is_valid = 1
act1.save()
act2 = FirstChargeActivity()
act2.temp_activity_type = 1
act2.name = "财神驾到(首充)"
tool_dic2 = {
"0": "10",
"1": "5"
}
act2.recharge_min = 5000
act2.recharge_max = 10000
act2.tool_data = str(tool_dic2)
act2.create_time = datetime.datetime.now()
act2.is_valid = 1
act2.save()
act3 = FirstChargeActivity()
act3.temp_activity_type = 1
act3.name = "财神驾到(首充)"
tool_dic3 = {
"0": "5",
"1": "5",
"2": "5"
}
act3.recharge_min = 10000
act3.recharge_max = 50000
act3.tool_data = str(tool_dic3)
act3.create_time = datetime.datetime.now()
act3.is_valid = 1
act3.save()
act4 = FirstChargeActivity()
act4.temp_activity_type = 1
act4.name = "财神驾到(首充)"
tool_dic4 = {
"0": "5",
"1": "5"
}
vip_dic4 = {
"2": "1"
}
act4.recharge_min = 50000
act4.recharge_max = 100000
act4.tool_data = str(tool_dic4)
act4.vip_data = str(vip_dic4)
act4.create_time = datetime.datetime.now()
act4.is_valid = 1
act4.save()
act5 = FirstChargeActivity()
act5.temp_activity_type = 1
act5.name = "财神驾到(首充)"
tool_dic5 = {
"0": "10",
"1": "5",
"2": "5"
}
vip_dic5 = {
"2": "2"
}
act5.recharge_max = 100000
act5.tool_data = str(tool_dic5)
act5.vip_data = str(vip_dic5)
act5.create_time = datetime.datetime.now()
act5.is_valid = 1
act5.save()
cls.update()
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Checks third-party licenses for the purposes of the Android WebView build.
The Android tree includes a snapshot of Chromium in order to power the system
WebView. This tool checks that all code uses open-source licenses compatible
with Android, and that we meet the requirements of those licenses. It can also
be used to generate an Android NOTICE file for the third-party code.
It makes use of src/tools/licenses.py and the README.chromium files on which
it depends. It also makes use of a data file, third_party_files_whitelist.txt,
which whitelists indicidual files which contain third-party code but which
aren't in a third-party directory with a README.chromium file.
"""
import optparse
import os
import re
import subprocess
import sys
import textwrap
REPOSITORY_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'tools'))
import licenses
import known_issues
def GetIncompatibleDirectories():
"""Gets a list of third-party directories which use licenses incompatible
with Android. This is used by the snapshot tool.
Returns:
A list of directories.
"""
whitelist = [
'Apache( Version)? 2(\.0)?',
'(New )?BSD( [23]-Clause)?( with advertising clause)?',
'L?GPL ?v?2(\.[01])?( or later)?',
'MIT(/X11)?(-like)?',
'MPL 1\.1 ?/ ?GPL 2(\.0)? ?/ ?LGPL 2\.1',
'MPL 2(\.0)?',
'Microsoft Limited Public License',
'Microsoft Permissive License',
'Public Domain',
'SGI Free Software License B',
'X11',
]
regex = '^(%s)$' % '|'.join(whitelist)
result = []
for directory in _FindThirdPartyDirs():
if directory in known_issues.KNOWN_ISSUES:
result.append(directory)
continue
try:
metadata = licenses.ParseDir(directory, REPOSITORY_ROOT,
require_license_file=False)
except licenses.LicenseError as e:
print 'Got LicenseError while scanning ' + directory
raise
if metadata.get('License Android Compatible', 'no').upper() == 'YES':
continue
license = re.split(' [Ll]icenses?$', metadata['License'])[0]
tokens = [x.strip() for x in re.split(' and |,', license) if len(x) > 0]
for token in tokens:
if not re.match(regex, token, re.IGNORECASE):
result.append(directory)
break
return result
class ScanResult(object):
Ok, Warnings, Errors = range(3)
def _CheckLicenseHeaders(excluded_dirs_list, whitelisted_files):
"""Checks that all files which are not in a listed third-party directory,
and which do not use the standard Chromium license, are whitelisted.
Args:
excluded_dirs_list: The list of directories to exclude from scanning.
whitelisted_files: The whitelist of files.
Returns:
ScanResult.Ok if all files with non-standard license headers are whitelisted
and the whitelist contains no stale entries;
ScanResult.Warnings if there are stale entries;
ScanResult.Errors if new non-whitelisted entries found.
"""
excluded_dirs_list = [d for d in excluded_dirs_list if not 'third_party' in d]
# Using a commond pattern for third-partyies makes the ignore regexp shorter
excluded_dirs_list.append('third_party')
# VCS dirs
excluded_dirs_list.append('.git')
excluded_dirs_list.append('.svn')
# Build output
excluded_dirs_list.append('out/Debug')
excluded_dirs_list.append('out/Release')
# 'Copyright' appears in license agreements
excluded_dirs_list.append('chrome/app/resources')
# This is a test output directory
excluded_dirs_list.append('chrome/tools/test/reference_build')
# This is tests directory, doesn't exist in the snapshot
excluded_dirs_list.append('content/test/data')
# This is a test output directory
excluded_dirs_list.append('data/dom_perf')
# Histogram tools, doesn't exist in the snapshot
excluded_dirs_list.append('tools/histograms')
# Arm sysroot tools, doesn't exist in the snapshot
excluded_dirs_list.append('arm-sysroot')
# Data is not part of open source chromium, but are included on some bots.
excluded_dirs_list.append('data')
args = ['android_webview/tools/find_copyrights.pl',
'.'
] + excluded_dirs_list
p = subprocess.Popen(args=args, cwd=REPOSITORY_ROOT, stdout=subprocess.PIPE)
lines = p.communicate()[0].splitlines()
offending_files = []
allowed_copyrights = '^(?:\*No copyright\*' \
'|20[0-9][0-9](?:-20[0-9][0-9])? The Chromium Authors\. ' \
'All rights reserved.*)$'
allowed_copyrights_re = re.compile(allowed_copyrights)
for l in lines:
entries = l.split('\t')
if entries[1] == "GENERATED FILE":
continue
copyrights = entries[1].split(' / ')
for c in copyrights:
if c and not allowed_copyrights_re.match(c):
offending_files.append(os.path.normpath(entries[0]))
break
unknown = set(offending_files) - set(whitelisted_files)
if unknown:
print 'The following files contain a third-party license but are not in ' \
'a listed third-party directory and are not whitelisted. You must ' \
'add the following files to the whitelist.\n%s' % \
'\n'.join(sorted(unknown))
stale = set(whitelisted_files) - set(offending_files)
if stale:
print 'The following files are whitelisted unnecessarily. You must ' \
' remove the following files from the whitelist.\n%s' % \
'\n'.join(sorted(stale))
if unknown:
return ScanResult.Errors
elif stale:
return ScanResult.Warnings
else:
return ScanResult.Ok
def _ReadFile(path):
"""Reads a file from disk.
Args:
path: The path of the file to read, relative to the root of the repository.
Returns:
The contents of the file as a string.
"""
return open(os.path.join(REPOSITORY_ROOT, path), 'rb').read()
def _FindThirdPartyDirs():
"""Gets the list of third-party directories.
Returns:
The list of third-party directories.
"""
# Please don't add here paths that have problems with license files,
# as they will end up included in Android WebView snapshot.
# Instead, add them into known_issues.py.
prune_paths = [
# Placeholder directory, no third-party code.
os.path.join('third_party', 'adobe'),
# Apache 2.0 license. See
# https://code.google.com/p/chromium/issues/detail?id=140478.
os.path.join('third_party', 'bidichecker'),
# Isn't checked out on clients
os.path.join('third_party', 'gles2_conform'),
# The llvm-build doesn't exist for non-clang builder
os.path.join('third_party', 'llvm-build'),
# Binaries doesn't apply to android
os.path.join('third_party', 'widevine'),
# third_party directories in this tree aren't actually third party, but
# provide a way to shadow experimental buildfiles into those directories.
os.path.join('tools', 'gn', 'secondary'),
]
third_party_dirs = licenses.FindThirdPartyDirs(prune_paths, REPOSITORY_ROOT)
return licenses.FilterDirsWithFiles(third_party_dirs, REPOSITORY_ROOT)
def _Scan():
"""Checks that license meta-data is present for all third-party code and
that all non third-party code doesn't contain external copyrighted code.
Returns:
ScanResult.Ok if everything is in order;
ScanResult.Warnings if there are non-fatal problems (e.g. stale whitelist
entries)
ScanResult.Errors otherwise.
"""
third_party_dirs = _FindThirdPartyDirs()
# First, check designated third-party directories using src/tools/licenses.py.
all_licenses_valid = True
for path in sorted(third_party_dirs):
try:
licenses.ParseDir(path, REPOSITORY_ROOT)
except licenses.LicenseError, e:
if not (path in known_issues.KNOWN_ISSUES):
print 'Got LicenseError "%s" while scanning %s' % (e, path)
all_licenses_valid = False
# Second, check for non-standard license text.
files_data = _ReadFile(os.path.join('android_webview', 'tools',
'third_party_files_whitelist.txt'))
whitelisted_files = []
for line in files_data.splitlines():
match = re.match(r'([^#\s]+)', line)
if match:
whitelisted_files.append(match.group(1))
licenses_check = _CheckLicenseHeaders(third_party_dirs, whitelisted_files)
return licenses_check if all_licenses_valid else ScanResult.Errors
def GenerateNoticeFile():
"""Generates the contents of an Android NOTICE file for the third-party code.
This is used by the snapshot tool.
Returns:
The contents of the NOTICE file.
"""
third_party_dirs = _FindThirdPartyDirs()
# Don't forget Chromium's LICENSE file
content = [_ReadFile('LICENSE')]
# We provide attribution for all third-party directories.
# TODO(steveblock): Limit this to only code used by the WebView binary.
for directory in sorted(third_party_dirs):
metadata = licenses.ParseDir(directory, REPOSITORY_ROOT,
require_license_file=False)
license_file = metadata['License File']
if license_file and license_file != licenses.NOT_SHIPPED:
content.append(_ReadFile(license_file))
return '\n'.join(content)
def main():
class FormatterWithNewLines(optparse.IndentedHelpFormatter):
def format_description(self, description):
paras = description.split('\n')
formatted_paras = [textwrap.fill(para, self.width) for para in paras]
return '\n'.join(formatted_paras) + '\n'
parser = optparse.OptionParser(formatter=FormatterWithNewLines(),
usage='%prog [options]')
parser.description = (__doc__ +
'\nCommands:\n' \
' scan Check licenses.\n' \
' notice Generate Android NOTICE file on stdout')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.print_help()
return ScanResult.Errors
if args[0] == 'scan':
scan_result = _Scan()
if scan_result == ScanResult.Ok:
print 'OK!'
return scan_result
elif args[0] == 'notice':
print GenerateNoticeFile()
return ScanResult.Ok
parser.print_help()
return ScanResult.Errors
if __name__ == '__main__':
sys.exit(main())
|
"""YIN output plugin"""
import optparse
import re
from xml.sax.saxutils import escape
from xml.sax.saxutils import quoteattr
from .. import grammar
from .. import plugin
from .. import statements
from .. import syntax
from .. import util
yin_namespace = "urn:ietf:params:xml:ns:yang:yin:1"
def pyang_plugin_init():
plugin.register_plugin(YINPlugin())
class YINPlugin(plugin.PyangPlugin):
def add_opts(self, optparser):
optlist = [
optparse.make_option("--yin-canonical",
dest="yin_canonical",
action="store_true",
help="Print in canonical order"),
optparse.make_option("--yin-pretty-strings",
dest="yin_pretty_strings",
action="store_true",
help="Pretty print strings"),
]
g = optparser.add_option_group("YIN output specific options")
g.add_options(optlist)
def add_output_format(self, fmts):
fmts['yin'] = self
def emit(self, ctx, modules, fd):
module = modules[0]
emit_yin(ctx, module, fd)
def emit_yin(ctx, module, fd):
fd.write('<?xml version="1.0" encoding="UTF-8"?>\n')
fd.write('<%s name="%s"\n' % (module.keyword, module.arg))
fd.write(' ' * len(module.keyword) + ' xmlns="%s"' % yin_namespace)
prefix = module.search_one('prefix')
if prefix is not None:
namespace = module.search_one('namespace')
fd.write('\n')
fd.write(' ' * len(module.keyword))
fd.write(' xmlns:' + prefix.arg + '=' +
quoteattr(namespace.arg))
else:
belongs_to = module.search_one('belongs-to')
if belongs_to is not None:
prefix = belongs_to.search_one('prefix')
if prefix is not None:
# read the parent module in order to find the namespace uri
res = ctx.read_module(belongs_to.arg, extra={'no_include':True})
if res is not None:
namespace = res.search_one('namespace')
if namespace is None or namespace.arg is None:
pass
else:
# success - namespace found
fd.write('\n')
fd.write(' ' * len(module.keyword))
fd.write(' xmlns:' + prefix.arg + '=' +
quoteattr(namespace.arg))
for imp in module.search('import'):
prefix = imp.search_one('prefix')
if prefix is not None:
rev = None
r = imp.search_one('revision-date')
if r is not None:
rev = r.arg
mod = statements.modulename_to_module(module, imp.arg, rev)
if mod is not None:
ns = mod.search_one('namespace')
if ns is not None:
fd.write('\n')
fd.write(' ' * len(module.keyword))
fd.write(' xmlns:' + prefix.arg + '=' +
quoteattr(ns.arg))
fd.write('>\n')
if ctx.opts.yin_canonical:
substmts = grammar.sort_canonical(module.keyword, module.substmts)
else:
substmts = module.substmts
for s in substmts:
emit_stmt(ctx, module, s, fd, ' ', ' ')
fd.write('</%s>\n' % module.keyword)
def emit_stmt(ctx, module, stmt, fd, indent, indentstep):
if util.is_prefixed(stmt.raw_keyword):
# this is an extension. need to find its definition
(prefix, identifier) = stmt.raw_keyword
tag = prefix + ':' + identifier
if stmt.i_extension is not None:
ext_arg = stmt.i_extension.search_one('argument')
if ext_arg is not None:
yin_element = ext_arg.search_one('yin-element')
if yin_element is not None and yin_element.arg == 'true':
argname = prefix + ':' + ext_arg.arg
argiselem = True
else:
# explicit false or no yin-element given
argname = ext_arg.arg
argiselem = False
else:
argiselem = False
argname = None
else:
argiselem = False
argname = None
else:
(argname, argiselem) = syntax.yin_map[stmt.raw_keyword]
tag = stmt.raw_keyword
if argiselem == False or argname is None:
if argname is None:
attr = ''
else:
attr = ' ' + argname + '=' + quoteattr(stmt.arg)
if len(stmt.substmts) == 0:
fd.write(indent + '<' + tag + attr + '/>\n')
else:
fd.write(indent + '<' + tag + attr + '>\n')
for s in stmt.substmts:
emit_stmt(ctx, module, s, fd, indent + indentstep,
indentstep)
fd.write(indent + '</' + tag + '>\n')
else:
fd.write(indent + '<' + tag + '>\n')
if ctx.opts.yin_pretty_strings:
# since whitespace is significant in XML, the current
# code is strictly speaking incorrect. But w/o the whitespace,
# it looks too ugly.
fd.write(indent + indentstep + '<' + argname + '>\n')
fd.write(fmt_text(indent + indentstep + indentstep, stmt.arg))
fd.write('\n' + indent + indentstep + '</' + argname + '>\n')
else:
fd.write(indent + indentstep + '<' + argname + '>' + \
escape(stmt.arg) + \
'</' + argname + '>\n')
if ctx.opts.yin_canonical:
substmts = grammar.sort_canonical(stmt.keyword, stmt.substmts)
else:
substmts = stmt.substmts
for s in substmts:
emit_stmt(ctx, module, s, fd, indent + indentstep, indentstep)
fd.write(indent + '</' + tag + '>\n')
def fmt_text(indent, data):
res = []
for line in re.split("(\n)", escape(data)):
if line == '':
continue
if line == '\n':
res.extend(line)
else:
res.extend(indent + line)
return ''.join(res)
|
#!/home/francisco/Projects/Pycharm/py-binary-trees-draw/venv/bin/python
# -*- coding: utf-8 -*-
from node import Node
class AVLTree:
def __init__(self):
self.root = None
self.leaf = Node(None)
self.leaf.height = -1
self.nodes_dict_aux = {}
self.nodes_dict = {}
def insert(self, key):
"""
Insert key values in tree
:param key: numeric.
:return: self.nodes_height_dict a dict where keys are tuple (parent, height) and values are the children.
"""
node = Node(key)
node.left = self.leaf
node.right = self.leaf
if not self.root:
self.root = node
else:
current = self.root
parent = current
while current:
if current == self.leaf:
break
node.height += 1
parent = current
if node.key < current.key:
current = current.left
elif node.key > current.key:
current = current.right
elif node.key == current.key:
return False
node.parent = parent
if node.key < parent.key:
parent.left = node
else:
parent.right = node
self._calculate_height(node)
self._fix_violation(node)
self._recovery_nodes_dict()
return self.nodes_dict
return None
def walk_in_order(self, node=None):
"""
Walking tree in pre-order.
:param node: node object.
"""
if not node:
node = self.root
if node != self.leaf:
self.walk_in_order(node.left)
fb = node.left.height - node.right.height
if node.parent:
print('{0}\t{1}\t{2}\t{3}\t{4}\t{5}'.format(node.key, node.parent.key, node.left.key, node.right.key,
node.height, fb))
else:
print('{0}\t{1}\t{2}\t{3}\t{4}\t{5}'.format(node.key, None, node.left.key, node.right.key,
node.height, fb))
self.walk_in_order(node.right)
def walk_pos_order(self, node=None):
"""
Walking tree in pos-order.
:param node: node object.
"""
if not node:
node = self.root
if node != self.leaf:
self.walk_pos_order(node.right)
if node.parent:
print('{0}\t{1}\t{2}\t{3}\t{4}'.format(node.key, node.parent.key, node.left.key, node.right.key,
node.height, ))
else:
print('{0}\t{1}\t{2}\t{3}\t{4}'.format(node.key, None, node.left.key, node.right.key, node.height))
self.walk_pos_order(node.left)
def search(self, value):
"""
Search the node object that key is equal for given value.
:param value: numeric.
:return: node object.
"""
current = self.root
while current and value != current.key:
if not current.key:
return False
if current.key > value:
current = current.left
else:
current = current.right
return current
def minimum(self, node=None):
"""
Search the minimum key in subtree that start from given node.
:param node: node object.
:return: node object.
"""
if not node:
node = self.root
while node.left != self.leaf:
node = node.left
return node
def maximum(self, node=None):
"""
Search the maximum key in subtree that start from given node.
:param node: node object.
:return: node object.
"""
if not node:
node = self.root
while node.right != self.leaf:
node = node.right
return node
def successor(self, value):
"""
Find the largest value in the tree directly above the given value.
:param value: numeric.
:return: object node.
"""
current = self.search(value)
if not current:
return False
elif current.right != self.leaf:
node = self.minimum(current.right)
return node
node = current.parent
while node and current == node.right:
current = node
node = current.parent
if not node:
return self.maximum()
return node
def predecessor(self, value):
"""
It finds in the tree the lowest value directly below the given number.
:param value: numeric.
:return: node object.
"""
current = self.search(value)
if not current:
return False
elif current.left != self.leaf:
node = self.maximum(current.left)
return node
node = current.parent
while node and current == node.left:
current = node
node = current.parent
if not node:
return self.minimum()
return node
def remove(self, value):
"""
Remove node where key is equal of given value.
:param value: numeric
"""
node = self.search(value)
if node == self.root:
return self._remove_root()
elif node.left == self.leaf and node.right == self.leaf:
return self._remove_if_leaf(node)
elif (node.left == self.leaf) ^ (node.right == self.leaf):
return self._remove_if_one_child(node)
else:
return self._remove_if_two_children(node)
def _remove_if_leaf(self, node):
remove_key = node.key
parent = node.parent
if parent.left == node:
parent.left = self.leaf
else:
parent.right = self.leaf
self._calculate_height(parent)
self._fix_violation(parent)
self._recovery_nodes_dict()
del node
return remove_key, None
def _remove_if_one_child(self, node):
remove_key = node.key
if node.parent.left == node:
if node.right == self.leaf:
node.parent.left = node.left
else:
node.parent.left = node.right
else:
if node.right == self.leaf:
node.parent.right = node.left
else:
node.parent.right = node.right
node.left.parent = node.parent
node.right.parent = node.parent
self._calculate_height(node.parent)
self._fix_violation(node.parent)
self._recovery_nodes_dict()
del node
return remove_key, None
def _remove_if_two_children(self, node):
remove_key = node.key
successor = self.successor(node.key)
if successor == node.right:
if node == node.parent.left:
node.parent.left = successor
else:
node.parent.right = successor
successor.parent = node.parent
successor.left = node.left
successor.left.parent = successor
else:
if node == node.parent.left:
node.parent.left = successor
else:
node.parent.right = successor
successor.parent.left = successor.right
successor.left = node.left
successor.right = node.right
node.right.parent = successor
node.left.parent = successor
successor.parent = node.parent
self._calculate_height(node.parent)
self._fix_violation(node.parent)
self._recovery_nodes_dict()
del node
return remove_key, successor.key
def _remove_root(self):
remove_key = self.root.key
successor = None
if self.root.left == self.leaf and self.root.right == self.leaf:
self.root = None
elif (self.root.left == self.leaf) ^ (self.root.right == self.leaf):
if self.root.left != self.leaf:
self.root = self.root.left
else:
self.root = self.root.right
self.root.parent = None
else:
successor = self.successor(self.root.key)
if successor == self.root.right:
successor.parent = None
successor.left = self.root.left
self.root.left.parent = successor
self.root = successor
else:
if successor.right:
successor.right.parent = successor.parent
successor.parent.left = successor.right
successor.left = self.root.left
successor.right = self.root.right
self.root.left.parent = successor
self.root.right.parent = successor
successor.parent = None
self.root = successor
self._calculate_height(self.root)
self._fix_violation(self.root)
self._recovery_nodes_dict()
if successor:
return remove_key, successor.key
else:
return remove_key, None
def _recovery_nodes_dict(self):
# Because fixing the violations mess up the heights of each node we have to first create a dict where the
# keys are the parents and the values are a list of tuples with the childs and their heights.
self.nodes_dict_aux = {} # a dict where keys are parent and values is tuples with child and ir height.
self._make_nodes_dict_aux()
self.nodes_dict = {} # a dict where keys are tuple with parent and chid height and values is tuples
# with child.
self._make_nodes_dict()
def _make_nodes_dict_aux(self, node=None, flag=0):
"""
Recursion function to create dict where the keys are the parents and the values are a list of tuples with the
childs and their heights.
:param node: node object.
:param flag: integer who indicate if node is left or right child.
"""
if not node:
node = self.root
if node != self.root and node != self.leaf:
height = self._calculate_real_height(node)
if not (node.parent.key in self.nodes_dict_aux):
self.nodes_dict_aux[node.parent.key] = [None, None]
self.nodes_dict_aux[node.parent.key][flag] = (node.key, height)
if node != self.leaf:
self._make_nodes_dict_aux(node.left, 0)
self._make_nodes_dict_aux(node.right, 1)
def _make_nodes_dict(self):
for key in self.nodes_dict_aux:
nodes = self.nodes_dict_aux[key]
if nodes[0] and nodes[1]:
_, height = min(nodes, key=lambda x: x[:][1])
# print(nodes[0][0], nodes[1][0], height)
self.nodes_dict[key, height] = [nodes[0][0], nodes[1][0]]
else:
if nodes[0]:
height = nodes[0][1]
self.nodes_dict[key, height] = [nodes[0][0], None]
else:
height = nodes[1][1]
self.nodes_dict[key, height] = [None, nodes[1][0]]
def _calculate_real_height(self, node):
"""
Calculate real height in tree of given node.
:param node: node object.
:return: numeric.
"""
height = 0
current = node
while current != self.root:
height += 1
current = current.parent
return height
def _calculate_height(self, node):
"""
Calculate left and right height of node.
:param node: node object.
"""
current = node
while current:
current.height = max(current.left.height, current.right.height) + 1
current = current.parent
def _fix_violation(self, node):
"""
Verify if is necessary rotate the node.
:param node: node object.
"""
flag = False
previous = node
current = node.parent
while current:
fb1 = current.left.height - current.right.height
fb2 = previous.left.height - previous.right.height
if fb1 >= 2 and fb2 >= 0:
self._rotate_right(current)
flag = True
break
if fb1 <= -2 and fb2 <= 0:
self._rotate_left(current)
flag = True
break
if fb1 >= +2 and fb2 <= 0:
self._rotate_left(previous)
self._rotate_right(current)
flag = True
break
if fb1 <= -2 and fb2 >= 0:
self._rotate_right(previous)
self._rotate_left(current)
flag = True
break
previous = current
current = current.parent
return flag
def _rotate_left(self, x):
"""
Rotate node to left.
:param x: node object.
"""
y = x.right # define y
x.right = y.left # x right now igual y left
y.left.parent = x # y left now is x left
y.parent = x.parent # y parent is x parent
if x == self.root: # if x is root now y is root
self.root = y
elif x == x.parent.left:
x.parent.left = y # if x is the left child, then y is the left child
else:
x.parent.right = y # if x is the right child, then y is the right child
y.left = x # y left now is x
x.parent = y # x parent now is y
x.height -= 2
self._calculate_height(x)
def _rotate_right(self, x):
"""
Rotate node to right.
:param x: node object.
"""
y = x.left
x.left = y.right
y.right.parent = x
y.parent = x.parent
if x == self.root: # if x is root now y is root
self.root = y
elif x == x.parent.left:
x.parent.left = y # if x is the left child, then y is the left child
else:
x.parent.right = y # if x is the right child, then y is the right child
y.right = x
x.parent = y
x.height -= 2
self._calculate_height(x)
if __name__ == '__main__':
bt = AVLTree()
print('node\tparent\tleft\tright\theight\tfb')
print('***********************************************')
# bt.insert(11)
# bt.insert(2)
# bt.insert(14)
# bt.insert(1)
# bt.insert(7)
# bt.insert(15)
# bt.insert(5)
# bt.insert(8)
# bt.insert(4)
# bt.walk_in_order()
# print('***********************************************')
# print(bt.nodes_dict)
# print('***********************************************')
bt.insert(44)
bt.insert(17)
bt.insert(78)
bt.insert(32)
bt.insert(50)
bt.insert(88)
bt.insert(48)
bt.insert(62)
bt.insert(84)
bt.insert(92)
bt.insert(80)
bt.insert(82)
bt.walk_in_order()
print('***********************************************')
print(bt.nodes_dict)
print('***********************************************')
bt.remove(32)
print('remove 32')
print('node\tparent\tleft\tright\theight\tfb')
print('***********************************************')
bt.walk_in_order()
print('***********************************************')
print(bt.nodes_dict)
print('***********************************************')
bt.remove(84)
print('remove 84')
print('node\tparent\tleft\tright\theight\tfb')
print('***********************************************')
bt.walk_in_order()
print('***********************************************')
print(bt.nodes_dict)
print('***********************************************')
bt.remove(82)
print('remove 82')
print('node\tparent\tleft\tright\theight\tfb')
print('***********************************************')
bt.walk_in_order()
print('***********************************************')
print(bt.nodes_dict)
print('***********************************************')
# bt.insert(4)
# bt.insert(2)
# bt.insert(6)
# bt.insert(1)
# bt.insert(3)
# bt.insert(5)
# bt.insert(15)
# bt.insert(7)
# bt.insert(16)
# bt.insert(14)
# bt.bt_draw()
# bt.insert(10)
# bt.insert(5)
# bt.insert(16)
# bt.insert(2)
# bt.insert(8)
# bt.insert(1)
# bt.bt_draw()
|
from chainer.links import BatchNormalization, GroupNormalization
from chainermn.links import MultiNodeBatchNormalization
from chainer.functions import softmax_cross_entropy
from chainer.optimizers import Adam
from chainer.iterators import MultiprocessIterator, SerialIterator
from chainer.optimizer import WeightDecay
from chainer import serializers
from chainer import training
from chainer.training import extensions
from chainer.backends.cuda import get_device_from_id
import chainermn
from src.datasets.msd_bound import MSDBoundDataset
from src.links.model.vaeseg import BoundaryStream, CPCPredictor, Decoder, Encoder, VAE, VD
from src.training.updaters.vaeseg_updater import VAESegUpdater
from src.training.extensions.vaeseg_evaluator import VAESegEvaluator
from src.training.updaters.encdec_seg_updater import EncDecSegUpdater
from src.training.extensions.encdec_seg_evaluator import EncDecSegEvaluator
from src.training.updaters.boundseg_updater import BoundSegUpdater
from src.training.extensions.boundseg_evaluator import BoundSegEvaluator
from src.training.updaters.cpcseg_updater import CPCSegUpdater
from src.training.extensions.cpcseg_evaluator import CPCSegEvaluator
def _setup_communicator(config, gpu_start_id=0):
if config['mn']:
comm = chainermn.create_communicator('pure_nccl')
is_master = (comm.rank == 0)
device = comm.intra_rank + gpu_start_id
else:
comm = None
is_master = True
device = gpu_start_id
return comm, is_master, device
def _setup_datasets(config, comm, is_master):
if is_master:
if config['dataset_name'] == 'msd_bound':
train_data = MSDBoundDataset(config, config['train_list_path'])
validation_data = MSDBoundDataset(config, config['validation_list_path'])
test_data = MSDBoundDataset(config, config['test_list_path'])
validation_data.random_scale = False
test_data.random_scale = False
validation_data.shift_intensity = 0
test_data.shift_intensity = 0
validation_data.random_flip = False
test_data.random_flip = False
validation_data.nb_copies = 1
test_data.nb_copies = 1
validation_data.training = False
test_data.training = False
else:
raise ValueError('Unknown dataset_name: {}'.format(config['dataset_name']))
print('Training dataset size: {}'.format(len(train_data)))
print('Validation dataset size: {}'.format(len(validation_data)))
print('Test dataset size: {}'.format(len(test_data)))
else:
train_data = None
validation_data = None
test_data = None
# scatter dataset
if comm is not None:
train_data = chainermn.scatter_dataset(train_data, comm, shuffle=True)
validation_data = chainermn.scatter_dataset(validation_data, comm, shuffle=True)
test_data = chainermn.scatter_dataset(test_data, comm, shuffle=True)
return train_data, validation_data, test_data
def _setup_vae_segmentor(config, comm=None):
in_channels = config['in_channels']
base_channels = config['base_channels']
out_channels = config['nb_labels']
nested_label = config['nested_label']
norm = eval(config['vaeseg_norm'])
bn_first = config['vaeseg_bn_first']
ndim_latent = config['vaeseg_ndim_latent']
mode = config['vaeseg_skip_connect_mode']
input_shape = eval(config['crop_size'])
if nested_label:
out_channels = 2 * (out_channels - 1)
encoder = Encoder(
base_channels=base_channels,
norm=norm,
bn_first=bn_first,
ndim_latent=ndim_latent,
comm=comm
)
embedder = VD(
channels=8*base_channels,
norm=norm,
bn_first=bn_first,
ndim_latent=ndim_latent,
comm=comm
)
decoder = Decoder(
base_channels=base_channels,
out_channels=out_channels,
norm=norm,
bn_first=bn_first,
mode=mode,
comm=comm
)
vae = VAE(
in_channels=in_channels,
base_channels=base_channels,
norm=norm,
bn_first=bn_first,
input_shape=input_shape,
comm=comm
)
return encoder, embedder, decoder, vae
def _setup_vae_segmentor_only(config, comm=None):
base_channels = config['base_channels']
out_channels = config['nb_labels']
nested_label = config['nested_label']
norm = eval(config['vaeseg_norm'])
bn_first = config['vaeseg_bn_first']
ndim_latent = config['vaeseg_ndim_latent']
mode = config['vaeseg_skip_connect_mode']
if nested_label:
out_channels = 2 * (out_channels - 1)
encoder = Encoder(
base_channels=base_channels,
norm=norm,
bn_first=bn_first,
ndim_latent=ndim_latent,
comm=comm
)
decoder = Decoder(
base_channels=base_channels,
out_channels=out_channels,
norm=norm,
bn_first=bn_first,
mode=mode,
comm=comm
)
return encoder, decoder
def _setup_cpc_segmentor(config, comm=None):
base_channels = config['base_channels']
out_channels = config['nb_labels']
nested_label = config['nested_label']
norm = eval(config['vaeseg_norm'])
bn_first = config['vaeseg_bn_first']
ndim_latent = config['vaeseg_ndim_latent']
mode = config['vaeseg_skip_connect_mode']
input_shape = eval(config['crop_size'])
grid_size = config['grid_size']
cpc_pattern = config['cpc_pattern']
if nested_label:
out_channels = 2 * (out_channels - 1)
encoder = Encoder(
base_channels=base_channels,
norm=norm,
bn_first=bn_first,
ndim_latent=ndim_latent,
comm=comm
)
decoder = Decoder(
base_channels=base_channels,
out_channels=out_channels,
norm=norm,
bn_first=bn_first,
mode=mode,
comm=comm
)
cpcpred1 = CPCPredictor(
base_channels=base_channels*8,
norm=norm,
bn_first=bn_first,
grid_size=grid_size,
input_shape=input_shape,
upper=True,
cpc_pattern=cpc_pattern,
comm=comm
)
return encoder, decoder, cpcpred1
def _setup_bound_segmentor(config, comm=None):
base_channels = config['base_channels']
out_channels = config['nb_labels']
nested_label = config['nested_label']
norm = eval(config['vaeseg_norm'])
bn_first = config['vaeseg_bn_first']
mode = config['vaeseg_skip_connect_mode']
ndim_latent = config['vaeseg_ndim_latent']
if nested_label:
out_channels = 2 * (out_channels - 1)
encoder = Encoder(
base_channels=base_channels,
norm=norm,
bn_first=bn_first,
ndim_latent=ndim_latent,
comm=comm
)
decoder = Decoder(
base_channels=base_channels,
out_channels=out_channels,
norm=norm,
bn_first=bn_first,
mode=mode,
comm=comm
)
boundary = BoundaryStream(
base_channels=base_channels,
out_channels=out_channels,
norm=norm,
comm=comm
)
return encoder, decoder, boundary
def _setup_iterators(config, batch_size, train_data, validation_data, test_data):
if isinstance(config['loaderjob'], int) and config['loaderjob'] > 1:
train_iterator = MultiprocessIterator(
train_data, batch_size, n_processes=config['loaderjob'])
validation_iterator = MultiprocessIterator(
validation_data, batch_size, n_processes=config['loaderjob'],
repeat=False, shuffle=False)
test_iterator = MultiprocessIterator(
test_data, batch_size, n_processes=config['loaderjob'],
repeat=False, shuffle=False)
else:
train_iterator = SerialIterator(train_data, batch_size)
validation_iterator = SerialIterator(
validation_data, batch_size, repeat=False, shuffle=False)
test_iterator = SerialIterator(
test_data, batch_size, repeat=False, shuffle=False)
return train_iterator, validation_iterator, test_iterator
# Optimizer
def _setup_optimizer(config, model, comm):
optimizer_name = config['optimizer']
lr = float(config['init_lr'])
weight_decay = float(config['weight_decay'])
if optimizer_name == 'Adam':
optimizer = Adam(alpha=lr, weight_decay_rate=weight_decay)
elif optimizer_name in \
('SGD', 'MomentumSGD', 'CorrectedMomentumSGD', 'RMSprop'):
optimizer = eval(optimizer_name)(lr=lr)
if weight_decay > 0.:
optimizer.add_hook(WeightDecay(weight_decay))
else:
raise ValueError('Invalid optimizer: {}'.format(optimizer_name))
if comm is not None:
optimizer = chainermn.create_multi_node_optimizer(optimizer, comm)
optimizer.setup(model)
return optimizer
# Updater
def _setup_updater(config, device, train_iterator, optimizers):
updater_kwargs = dict()
updater_kwargs['iterator'] = train_iterator
updater_kwargs['optimizer'] = optimizers
updater_kwargs['device'] = device
if config['segmentor_name'] == 'vaeseg':
return VAESegUpdater(config, **updater_kwargs)
elif config['segmentor_name'] == 'encdec_seg':
return EncDecSegUpdater(config, **updater_kwargs)
elif config['segmentor_name'] == 'boundseg':
return BoundSegUpdater(config, **updater_kwargs)
elif config['segmentor_name'] == 'cpcseg':
return CPCSegUpdater(config, **updater_kwargs)
else:
return training.StandardUpdater(**updater_kwargs)
def _setup_extensions(config, trainer, optimizers, logging_counts, logging_attributes):
if config['segmentor_name'] == 'vaeseg':
trainer.extend(extensions.dump_graph('loss/total', out_name="segmentor.dot"))
elif config['segmentor_name'] == 'encdec_seg':
trainer.extend(extensions.dump_graph('loss/seg', out_name="segmentor.dot"))
elif config['segmentor_name'] == 'boundseg':
trainer.extend(extensions.dump_graph('loss/seg', out_name="segmentor.dot"))
elif config['segmentor_name'] == 'cpcseg':
trainer.extend(extensions.dump_graph('loss/total', out_name="segmentor.dot"))
else:
trainer.extend(extensions.dump_graph('main/loss', out_name="segmentor.dot"))
# Report
repo_trigger = (config['report_interval'], 'iteration')
trainer.extend(
extensions.LogReport(
trigger=repo_trigger
)
)
trainer.extend(
extensions.PrintReport(logging_counts + logging_attributes),
trigger=repo_trigger
)
trainer.extend(
extensions.ProgressBar()
)
snap_trigger = (config['snapshot_interval'], 'epoch')
trainer.extend(
extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}'),
trigger=snap_trigger
)
for k, v in optimizers.items():
trainer.extend(
extensions.snapshot_object(v.target, k+'_epoch_{.updater.epoch}'),
trigger=snap_trigger
)
for attr in logging_attributes:
trainer.extend(
extensions.PlotReport([attr, 'validation/' + attr], 'epoch',
file_name=attr.replace('/', '_') + '.png')
)
# Trainer
def setup_trainer(config, out, batch_size, epoch, gpu_start_id):
comm, is_master, device = _setup_communicator(config, gpu_start_id)
train_data, validation_data, test_data = _setup_datasets(config, comm, is_master)
if config['segmentor_name'] == 'vaeseg':
encoder, embedder, decoder, vae = _setup_vae_segmentor(config, comm)
# load weights
if config['init_encoder'] is not None:
serializers.load_npz(config['init_encoder'], encoder)
if config['init_embedder'] is not None:
serializers.load_npz(config['init_embedder'], embedder)
if config['init_decoder'] is not None:
serializers.load_npz(config['init_decoder'], decoder)
if config['init_vae'] is not None:
serializers.load_npz(config['init_vae'], vae)
if device is not None:
get_device_from_id(device).use()
encoder.to_gpu()
embedder.to_gpu()
decoder.to_gpu()
vae.to_gpu()
opt_enc = _setup_optimizer(config, encoder, comm)
opt_emb = _setup_optimizer(config, embedder, comm)
opt_dec = _setup_optimizer(config, decoder, comm)
opt_vae = _setup_optimizer(config, vae, comm)
optimizers = {'enc': opt_enc, 'emb': opt_emb, 'dec': opt_dec, 'vae': opt_vae}
elif config['segmentor_name'] == 'cpcseg':
encoder, decoder, cpcpred1 = _setup_cpc_segmentor(config, comm)
# load weights
if config['init_encoder'] is not None:
serializers.load_npz(config['init_encoder'], encoder)
if config['init_decoder'] is not None:
serializers.load_npz(config['init_decoder'], decoder)
if config['init_cpcpred'] is not None:
serializers.load_npz(config['init_cpcpred'], cpcpred1)
if device is not None:
get_device_from_id(device).use()
encoder.to_gpu()
decoder.to_gpu()
cpcpred1.to_gpu()
opt_enc = _setup_optimizer(config, encoder, comm)
opt_dec = _setup_optimizer(config, decoder, comm)
opt_p1 = _setup_optimizer(config, cpcpred1, comm)
optimizers = {'enc': opt_enc, 'dec': opt_dec, 'cpcpred1': opt_p1}
elif config['segmentor_name'] == 'encdec_seg':
encoder, decoder = _setup_vae_segmentor_only(config, comm)
# load weights
if config['init_encoder'] is not None:
serializers.load_npz(config['init_encoder'], encoder)
if config['init_decoder'] is not None:
serializers.load_npz(config['init_decoder'], decoder)
if device is not None:
get_device_from_id(device).use()
encoder.to_gpu()
decoder.to_gpu()
opt_enc = _setup_optimizer(config, encoder, comm)
opt_dec = _setup_optimizer(config, decoder, comm)
optimizers = {'enc': opt_enc, 'dec': opt_dec}
elif config['segmentor_name'] == 'boundseg':
encoder, decoder, boundary = _setup_bound_segmentor(config, comm)
# load weights
if config['init_encoder'] is not None:
serializers.load_npz(config['init_encoder'], encoder)
if config['init_decoder'] is not None:
serializers.load_npz(config['init_decoder'], decoder)
if device is not None:
get_device_from_id(device).use()
encoder.to_gpu()
decoder.to_gpu()
boundary.to_gpu()
opt_enc = _setup_optimizer(config, encoder, comm)
opt_dec = _setup_optimizer(config, decoder, comm)
opt_bound = _setup_optimizer(config, boundary, comm)
optimizers = {'enc': opt_enc, 'dec': opt_dec, 'bound': opt_bound}
train_iterator, validation_iterator, test_iterator = \
_setup_iterators(config, batch_size, train_data, validation_data, test_data)
logging_counts = ['epoch', 'iteration']
if config['segmentor_name'] == 'vaeseg':
logging_attributes = \
['loss/rec', 'loss/kl', 'loss/total', 'acc',
'mean_dc', 'val/mean_dc', 'test/mean_dc']
if config['print_each_dc']:
for i in range(0, config['nb_labels']):
logging_attributes.append('dc_{}'.format(i))
logging_attributes.append('val/dc_{}'.format(i))
logging_attributes.append('test/dc_{}'.format(i))
elif config['segmentor_name'] == 'cpcseg':
logging_attributes = \
['loss/total', 'acc', 'loss/cpc']
for i in range(0, config['nb_labels']):
logging_attributes.append('dc_{}'.format(i))
logging_attributes.append('val/dc_{}'.format(i))
logging_attributes.append('test/dc_{}'.format(i))
elif config['segmentor_name'] == 'encdec_seg':
logging_attributes = \
['loss/seg', 'loss/total', 'acc']
if config['print_each_dc']:
for i in range(0, config['nb_labels']):
logging_attributes.append('dc_{}'.format(i))
logging_attributes.append('val/dc_{}'.format(i))
logging_attributes.append('test/dc_{}'.format(i))
elif config['segmentor_name'] == 'boundseg':
logging_attributes = \
['loss/seg', 'loss/total', 'acc', 'loss/bound', 'loss/bce']
if config['print_each_dc']:
for i in range(0, config['nb_labels']):
logging_attributes.append('dc_{}'.format(i))
logging_attributes.append('val/dc_{}'.format(i))
logging_attributes.append('test/dc_{}'.format(i))
else:
logging_attributes = ['main/loss', 'main/acc']
for i in range(1, config['nb_labels']):
logging_attributes.append('main/dc_{}'.format(i))
logging_attributes.append('val/main/dc_{}'.format(i))
logging_attributes.append('test/main/dc_{}'.format(i))
updater = _setup_updater(config, device, train_iterator, optimizers)
trainer = training.Trainer(updater, (epoch, 'epoch'), out=out)
if is_master:
_setup_extensions(config, trainer, optimizers, logging_counts, logging_attributes)
if config['segmentor_name'] == 'vaeseg':
targets = {'enc': encoder, 'emb': embedder, 'dec': decoder, 'vae': vae}
val_evaluator = VAESegEvaluator(config, validation_iterator, targets, device=device)
test_evaluator = VAESegEvaluator(config, test_iterator, targets, device=device)
elif config['segmentor_name'] == 'cpcseg':
targets = {'enc': encoder, 'dec': decoder, 'cpcpred1': cpcpred1}
val_evaluator = CPCSegEvaluator(config, validation_iterator, targets, device=device)
test_evaluator = CPCSegEvaluator(config, test_iterator, targets, device=device)
elif config['segmentor_name'] == 'encdec_seg':
targets = {'enc': encoder, 'dec': decoder}
val_evaluator = EncDecSegEvaluator(config, validation_iterator, targets, device=device)
test_evaluator = EncDecSegEvaluator(config, test_iterator, targets, device=device)
elif config['segmentor_name'] == 'boundseg':
targets = {'enc': encoder, 'dec': decoder, 'bound': boundary}
val_evaluator = BoundSegEvaluator(config, validation_iterator, targets, device=device)
test_evaluator = BoundSegEvaluator(config, test_iterator, targets, device=device)
val_evaluator.default_name = 'val'
test_evaluator.default_name = 'test'
if comm is not None:
val_evaluator = chainermn.create_multi_node_evaluator(val_evaluator, comm)
test_evaluator = chainermn.create_multi_node_evaluator(test_evaluator, comm)
trainer.extend(val_evaluator, trigger=(config['eval_interval'], 'epoch'))
trainer.extend(test_evaluator, trigger=(config['eval_interval'], 'epoch'))
# Resume
if config['resume'] is not None:
serializers.load_npz(config['resume'], trainer)
return trainer
|
<filename>tiwen.py
import requests, json, base64, hashlib
import jstyleson
import re
import os
import sys
import time
from typing import *
FieldVal = NewType('FieldVal', Any)
FieldCode = NewType('FieldCode', str)
Field = Union[Tuple[FieldVal, FieldCode], FieldVal]
FieldKv = Tuple[str, Field]
templateItem = Tuple[str, str, str]
FieldContent: Dict[str, FieldVal] = {}
def get_value(d: Dict, keys: List):
for key in keys:
if key not in d:
return None
d = d[key]
return d
class dataSet():
def __init__(self, code_dict, init_value: Optional[Dict[str, Field]] = None) -> None:
if init_value is None:
init_value = {}
self._init_vals: Dict[str, Field] = {}
self._code_dict = code_dict
for k, v in init_value.items():
self.append(k, v)
def get_geography_code(self, code_dict, loc_str):
selected_code = None
selected_str = ''
loc_array = code_dict
next = True
while next:
next = False
for item in loc_array:
if item['name'] in loc_str:
selected_code = item['code']
selected_str += item['name']
if item['children'] is not None:
loc_array = item['children']
next = True
break
if selected_code is None:
print('please check your location')
else:
print(f'select code {selected_code} location {selected_str}')
return selected_code
def append(self, key: str, value: Field):
self._init_vals[key] = value
def _find_val(self, key, type) -> Optional[FieldVal]:
for k, v in self._init_vals.items():
if key in k: # substr
if type == 'area':
v = self.get_geography_code(self._code_dict, v)
return v
return None
def fill(self, fields: List[Tuple[str, str, Optional[FieldCode]]], empty_value: Optional[str] = '') -> List[Field]:
ret = []
for key, type, code in fields:
val = self._find_val(key, type)
if val is None:
if empty_value is None:
raise RuntimeError(f"we can't find value for key {key}, please update template.json")
val = empty_value
if code is None or isinstance(val, tuple):
ret.append(val)
else:
ret.append((code, val))
return ret
class signin:
def __init__(self, usr: str, pwd: str):
requests.packages.urllib3.disable_warnings()
self.usr = usr # 手机号
self.pwd = <PASSWORD> # 密码
# 定义一个session()的对象实体s来储存cookie
self.s = requests.Session()
# self.s.proxies = {'http': 'http://localhost:8888', 'https': 'http://localhost:8888'}
# self.s.verify = False
self.headers = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 10; EBG-AN10 Build/HUAWEIEBG-AN10; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/78.0.3904.108 Mobile Safari/537.36Ant-Android-WebView',
'Authorization': 'BASIC '
'NTgyYWFhZTU5N2Q1YjE2ZTU4NjhlZjVmOmRiMzU3YmRiNmYzYTBjNzJkYzJkOWM5MjkzMmFkMDYyZWRkZWE5ZjY='
}
self.interUrl = 'https://h5api.xiaoyuanjijiehao.com/api/staff/interface'
# 模拟登录
def login(self):
usr1 = "{\"LoginModel\":1,\"Service\":\"ANT\",\"UserName\":\"%s\"}" % self.usr
log_url = "https://auth.xiaoyuanjijiehao.com/oauth2/token"
data = {
'password': <PASSWORD>(self.pwd.encode()).hexdigest(),
'grant_type': 'password',
'username': str(base64.b64encode(usr1.encode('utf-8')), 'utf-8'),
}
req = self.s.post(log_url, headers=self.headers, data=data, verify=False)
log_page = req.text
# 获取请求头
head = req.headers
# 获取cookie
cook = str(re.search("SERVERID=(.*?);Path=/", head.get("Set-Cookie")).group())
cook = re.sub(";Path=/", "", cook)
# print(cook)
# 获取access_token
token = json.loads(log_page.strip())["access_token"]
# 更新header
self.s.headers.update({'AccessToken': 'ACKEY_' + token})
self.s.headers.update({'Cookie': cook})
return self
def get_resp_data(resp, name):
try:
if resp['FeedbackCode'] == 0 and 'Data' in resp:
return resp['Data']
else:
raise RuntimeError(name, " get response format error ", resp)
except:
raise RuntimeError(name, " get response format error ", resp)
class report:
def __init__(self, usr: str, pwd: str):
requests.packages.urllib3.disable_warnings()
self.url = "https://h5api.xiaoyuanjijiehao.com/api/staff/interface"
self.headers = {
'Host': 'h5api.xiaoyuanjijiehao.com',
'Connection': 'keep-alive',
# 'Content-Length': '88',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
'Accept': 'application/json, text/plain, */*',
'User-Agent': 'Mozilla/5.0 (Linux; Android 10; EBG-AN10 Build/HUAWEIEBG-AN10; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/78.0.3904.108 Mobile Safari/537.36Ant-Android-WebView',
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'https://h5api.xiaoyuanjijiehao.com',
'X-Requested-With': 'com.zjelite.antlinkercampus',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-Mode': 'cors',
'Referer': 'https://h5api.xiaoyuanjijiehao.com/h5/www1/11906/m_infocollect_formdesign/?x_ant_org=11906',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
}
self.si = signin(usr, pwd)
self.si.login()
# 获取报体温页面TaskCode
def request(self, router, body):
# 拼凑请求头
thisHeader = self.headers
# thisHeader.update({'Content-Length': '88'})
thisHeader.update({'AccessToken': self.si.s.headers.get("AccessToken")})
thisHeader.update(
{'Cookie': str(self.si.s.headers.get("AccessToken")) + "; " + str(self.si.s.headers.get("Cookie"))})
# 请求内容
querystring = {
"Router": router,
"Method": 'POST',
"Body": json.dumps(body)
}
return requests.post(self.url, headers=thisHeader, json=querystring, verify=False).json()
def select_task(self, filter_keyword=None):
# 拼凑请求头
thisHeader = self.headers
# thisHeader.update({'Content-Length': '88'})
thisHeader.update({'AccessToken': self.si.s.headers.get("AccessToken")})
thisHeader.update(
{'Cookie': str(self.si.s.headers.get("AccessToken")) + "; " + str(self.si.s.headers.get("Cookie"))})
# 请求内容
querystring = {
"Router": '/api/newcommtask/getstudenttasklist',
"Method": 'POST',
"Body": '{"UID":""}'
}
rep = requests.post(self.url, headers=thisHeader, json=querystring, verify=False).json()
data = get_resp_data(rep, 'select_task')
selected_item = data['list'][0]
if filter_keyword:
for item in data['list']:
if filter_keyword in item['Title']:
selected_item = item
break
print(f"select Title: {selected_item['Title']} TaskCode: {selected_item['TaskCode']} filter_keyword: {filter_keyword}")
return selected_item['TaskCode'], selected_item['BusinessId']
def get_dataset(self):
data = get_resp_data(self.request('/api/system/getuserbaseinfo', {"UID":""}), 'get_baseinfo')
global FieldContent
fields = {
'学院': (data['AcademyName'], 'disabled'),
'年级': (data['UserCode'][0:4], 'disabled'),
'专业': (data['MajorName'], 'disabled'),
'班级': (data['ClassName'], 'disabled'),
'学号': (data['UserCode'], 'disabled'),
'姓名': (data['Name'], 'disabled')
}
fields = {**fields, **FieldContent}
s = dataSet(self.get_geography(), fields)
return s
def get_fields(self, templateId) -> List[Tuple[str, str, Optional[FieldCode]]]:
data = get_resp_data(self.request("/api/newcustomerform/get", {"TemplateId": templateId}), 'get_fields')
content = json.loads(data['Content'])['list']
ret = []
for item in content:
ret.append((item['name'], item['type'], None if 'fieldCode' not in item else item['fieldCode']))
return ret
def get_fields_template_item(self, templateId) -> List[templateItem]:
data = get_resp_data(self.request("/api/newcustomerform/get", {"TemplateId": templateId}), 'get_fields')
content = json.loads(data['Content'])['list']
ret = []
for item in content:
if 'fieldCode' in item and item['fieldCode'] == 'disabled':
continue
comment = ''
default_value = 'your value here'
try:
lst = get_value(item, ['options', 'options'])
if lst is not None:
options = list(map(lambda i: i['value'], lst))
default_value = options[0]
comment = ', '.join(map(lambda s: f'"{s}"', options))
except: pass
ret.append((item['name'], default_value, comment))
return ret
def output_template_file(self, items: List[templateItem]):
data = "{"
for i in range(len(items)):
name, value, comment = items[i]
line = f'\n "{name}": "{value}"'
if i < len(items) - 1:
line += ","
line += f" // {comment}"
data += line
data += f"\n//{time.time()}"
data += "\n}"
with open('template.json', 'w', encoding='utf-8') as f:
f.write(data)
def generate_body_fields(self, fields: List[Field]):
ret = []
for field in fields:
field_code = ''
content = ''
if isinstance(field, tuple):
content, field_code = field
else:
content = field
ret.append({
"FieldCode": field_code,
"Content": content
})
return ret
def get_geography(self):
return get_resp_data(self.request("/api/newcustomerform/geography", {}), 'get_geography')
def push_request(phone: str, password: str, filter_keyword=None):
repo = report(phone, password)
taskcode, templateId = repo.select_task(filter_keyword)
s = repo.get_dataset()
fields = repo.get_fields(templateId)
fields = repo.generate_body_fields(s.fill(fields))
print(f"fields: {fields}")
body = {
'Field': fields,
"TaskCode": taskcode,
"TemplateId": templateId,
}
resp = repo.request('/api/newcustomerform/submit', body)
if resp["FeedbackCode"] == 0:
return True
return False
def main():
options = {
'phone': os.getenv('PHONE'),
'password': os.getenv('PASSWORD'),
'filter_keyword': os.getenv('KEYWORD')
}
print(options)
if '-g' in sys.argv:
repo = report(options['phone'], options['password'])
repo.output_template_file(repo.get_fields_template_item(repo.select_task(options['filter_keyword'])[1]))
print('update template.json')
exit(0)
load_template()
result = push_request(**options)
print(result)
if not result:
exit(-1)
def load_template():
fields = os.getenv('FIELDS')
global FieldContent
FieldContent = jstyleson.loads(fields)
if __name__ == '__main__':
main()
|
<filename>vindauga/widgets/color_selector.py
# -*- coding: utf-8 -*-
import logging
from vindauga.constants.colors import cmColorForegroundChanged, cmColorBackgroundChanged, cmColorSet
from vindauga.constants.event_codes import evBroadcast, evMouseDown, evKeyDown, evMouseMove
from vindauga.constants.keys import kbLeft, kbRight, kbDown, kbUp
from vindauga.constants.option_flags import ofSelectable, ofFirstClick, ofFramed
from vindauga.misc.cp437 import cp437ToUnicode
from vindauga.misc.message import message
from vindauga.misc.util import ctrlToArrow
from vindauga.types.draw_buffer import DrawBuffer
from vindauga.types.view import View
logger = logging.getLogger(__name__)
class ColorSelector(View):
"""
The interrelated classes `ColorItem`, `ColorGroup`, `ColorSelector`,
`MonoSelector`, `ColorDisplay`, `ColorGroupList`, `ColorItemList` and
`ColorDialog` provide viewers and dialog boxes from which the user can
select and change the color assignments from available palettes with
immediate effect on the screen.
`ColorSelector` is a view for displaying the color selections available.
"""
csBackground = 0
csForeground = 1
icon = '○'
icon_reversed = '◙'
name = 'ColorSelector'
def __init__(self, bounds, selectorType):
super().__init__(bounds)
self.options |= (ofSelectable | ofFirstClick | ofFramed)
self.eventMask |= evBroadcast
self._selectorType = selectorType
self._color = 0
def draw(self):
b = DrawBuffer()
b.moveChar(0, ' ', 0x70, self.size.x)
for y in range(self.size.y + 1):
if y < 4:
for x in range(4):
c = y * 4 + x
b.moveChar(x * 3, self.icon, c, 3)
if c == self._color:
b.putChar(x * 3 + 1, self.icon_reversed)
if c == 0:
b.putAttribute(x * 3 + 1, 0x70)
self.writeLine(0, y, self.size.x, 1, b)
def handleEvent(self, event):
"""
Handles mouse and key events: you can click on a given color indicator
to select that color, or you can select colors by positioning the
cursor with the arrow keys.
Changes invoke `drawView()` when appropriate.
:param event: Event to handle
"""
width = 4
super().handleEvent(event)
oldColor = self._color
maxCol = [7, 15][self._selectorType]
what = event.what
if what == evMouseDown:
self.__handleMouseEvent(event, oldColor)
elif what == evKeyDown:
key = ctrlToArrow(event.keyDown.keyCode)
if key in {kbLeft, kbRight, kbUp, kbDown}:
self.__handleKeyDownEvent(key, maxCol, width)
elif what == evBroadcast:
self.__handleBroadcastEvent(event)
def __colorChanged(self):
"""
Send a message to indicate color has changed
"""
if self._selectorType == self.csForeground:
msg = cmColorForegroundChanged
else:
msg = cmColorBackgroundChanged
message(self.owner, evBroadcast, msg, self._color)
def __handleBroadcastEvent(self, event):
if event.message.command == cmColorSet:
if self._selectorType == self.csBackground:
self._color = ord(event.message.infoPtr) >> 4
else:
self._color = ord(event.message.infoPtr) & 0x0f
self.drawView()
def __handleKeyDownEvent(self, key, maxCol, width):
if key == kbLeft:
if self._color > 0:
self._color -= 1
else:
self._color = maxCol
elif key == kbRight:
if self._color < maxCol:
self._color += 1
else:
self._color = 0
elif key == kbUp:
if self._color > width - 1:
self._color -= width
elif self._color == 0:
self._color = maxCol
else:
self._color += maxCol - width
elif key == kbDown:
if self._color < maxCol - (width - 1):
self._color += width
elif self._color == maxCol:
self._color = 0
else:
self._color -= maxCol - width
self.__colorChanged()
self.drawView()
def __handleMouseEvent(self, event, oldColor):
mousing = True
while mousing:
if self.mouseInView(event.mouse.where):
mouse = self.makeLocal(event.mouse.where)
self._color = mouse.y * 4 + mouse.x // 3
else:
self._color = oldColor
self.__colorChanged()
self.drawView()
mousing = self.mouseEvent(event, evMouseMove)
|
import numpy as np
import MulensModel as mm
def test_magnification_type():
"""
Check type of magnification returned for model with t_eff.
At some point it was astropy quantity.
"""
parameters = mm.ModelParameters({'t_0': 1., 't_eff': 0.2, 't_E': 3.})
magnification_curve = mm.MagnificationCurve(2., parameters)
assert type(magnification_curve.get_magnification()) == np.ndarray
def test_fspl_noLD():
"""
check if FSPL magnification is calculate properly
"""
t_0 = 2456789.012345
t_E = 23.4567
u_0 = 1e-4
rho = 1e-3
t_vec = np.array([-(rho**2-u_0**2)**0.5, 0., ((0.5*rho)**2-u_0**2)**0.5])
t_vec = t_vec * t_E + t_0
params = mm.ModelParameters(
{'t_0': t_0, 'u_0': u_0, 't_E': t_E, 'rho': rho})
mag_curve = mm.MagnificationCurve(times=t_vec, parameters=params)
methods = [t_0-t_E, 'finite_source_uniform_Gould94', t_0+t_E]
mag_curve.set_magnification_methods(methods, 'point_source')
results = mag_curve.get_point_lens_magnification()
u = np.array([rho, u_0, 0.5*rho])
pspl = (u**2 + 2.) / np.sqrt(u**2 * (u**2 + 4.))
expected = np.array([1.27323965, 0.19949906, 0.93421546])
# These values were calculated by <NAME> (file b0b1.dat).
expected *= pspl
np.testing.assert_almost_equal(expected, results, decimal=4)
def test_fspl():
"""
check if FSPL magnification is calculate properly
"""
t_0 = 2456789.012345
t_E = 23.4567
u_0 = 1e-4
rho = 1e-3
gamma = 0.56789
t_vec = np.array([-(rho**2-u_0**2)**0.5, 0., ((0.5*rho)**2-u_0**2)**0.5])
t_vec = t_vec * t_E + t_0
params = mm.ModelParameters(
{'t_0': t_0, 'u_0': u_0, 't_E': t_E, 'rho': rho})
mag_curve = mm.MagnificationCurve(
times=t_vec, parameters=params, gamma=gamma)
methods = [t_0-t_E, 'finite_source_LD_Yoo04', t_0+t_E]
mag_curve.set_magnification_methods(methods, 'point_source')
results = mag_curve.get_point_lens_magnification()
u = np.array([rho, u_0, 0.5*rho])
pspl = (u**2 + 2.) / np.sqrt(u**2 * (u**2 + 4.))
expected = np.array([1.27323965-gamma*0.09489869,
0.19949906-gamma*-0.03492121,
0.93421546-gamma*-0.09655794])
# These values were calculated by <NAME> (file b0b1.dat).
expected *= pspl
np.testing.assert_almost_equal(expected/results, 1., decimal=4)
def test_Lee09_and_WittMao94():
"""
test Lee et al. 2009 and Witt & Mao 1994 finite source calculation
"""
t_vec = np.array([3.5, 2., 1., 0.5, 0.])
# The values below were calculated using code developed by <NAME>.
expected_0 = np.array([1.01084060513, 1.06962639343, 1.42451408166,
2.02334097551, 2.13919086656])
expected_1 = np.array([1.01110609638, 1.07461016241, 1.57232954942,
2.21990790526, 2.39458814753])
expected_2 = np.array([1.0110829794, 1.07404148634, 1.55620547462,
2.24809136704, 2.44503143812])
# The last values are for 2-parameter LD with same settings and lambda=0.3.
# Correction is:
# -lambda*(1-1.25*sqrt(costh))
# and for 1-parameter LD we used:
# 1-gamma*(1-1.5*costh)
# Test uniform source first.
params_0 = mm.ModelParameters(
{'t_0': 0., 'u_0': 0.5, 't_E': 1., 'rho': 1.})
mag_curve_0 = mm.MagnificationCurve(times=t_vec, parameters=params_0)
methods_0 = [-5., 'finite_source_uniform_Lee09', 5.]
mag_curve_0.set_magnification_methods(methods_0, 'point_source')
results_0 = mag_curve_0.get_point_lens_magnification()
np.testing.assert_almost_equal(expected_0, results_0, decimal=4)
# Then test 1-parameter limb-darkening.
params_1 = mm.ModelParameters(
{'t_0': 0., 'u_0': 0.1, 't_E': 1., 'rho': 1.})
mag_curve_1 = mm.MagnificationCurve(times=t_vec, parameters=params_1,
gamma=0.5)
methods_1 = [-5., 'finite_source_LD_Lee09', 5.]
mag_curve_1.set_magnification_methods(methods_1, 'point_source')
results_1 = mag_curve_1.get_point_lens_magnification()
np.testing.assert_almost_equal(expected_1, results_1, decimal=3)
# Tests for Witt & Mao 1994 start here
methods_2 = [-5., 'finite_source_uniform_WittMao94', 5.]
mag_curve_0.set_magnification_methods(methods_2, 'point_source')
results_2 = mag_curve_0.get_point_lens_magnification()
np.testing.assert_almost_equal(expected_0, results_2, decimal=4)
methods_3 = [-5., 'finite_source_LD_WittMao94', 5.]
mag_curve_1.set_magnification_methods(methods_3, 'point_source')
results_3 = mag_curve_1.get_point_lens_magnification()
np.testing.assert_almost_equal(expected_1, results_3, decimal=3)
def test_PSPL_for_binary():
"""
test PSPL model used in a model that is defined as binary
"""
t_0 = 1000.
t_E = 20.
u_0 = 1.
t_vec = np.array([10., 100.]) * t_E + t_0
params = mm.ModelParameters({
't_0': t_0, 'u_0': u_0, 't_E': t_E, 's': 1.2, 'q': 0.1, 'alpha': 0.})
mag_curve = mm.MagnificationCurve(times=t_vec, parameters=params)
mag_curve.set_magnification_methods(None, 'point_source_point_lens')
u2 = u_0**2 + ((t_vec - t_0) / t_E)**2
pspl = (u2 + 2.) / np.sqrt(u2 * (u2 + 4.))
np.testing.assert_almost_equal(pspl, mag_curve.get_magnification())
|
<reponame>AIshutin/arthistorian<gh_stars>1-10
import requests
import csv
import argparse
from tqdm import tqdm
import hashlib
from urllib import parse as urlparse
import urllib
import os
from PIL import Image
import aiohttp
import asyncio
from contextlib import closing
parser = argparse.ArgumentParser(description='Download data from WGA')
parser.add_argument('--csvfile', default="data/wga/catalog.csv", help='.csv from WGA with description')
parser.add_argument('--destination', default="data/wga")
args = parser.parse_args()
column2ind = {
"url": -5,
"date": 3
}
def format_date(date_str):
date_str = date_str.lower()
original = date_str
if len(date_str) == 0 or date_str == '15%-72':
return None
date_str = date_str.replace(', etten', '').replace(', the hague', '').replace(', paris', '')
months = ['january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december', 'summer',
'winter', 'autumn', 'spring', 'second half of ', 'febrauary']
for el in months:
date_str = date_str.replace(el, '')
if date_str == '1838 (1862)':
return 1838
if date_str == '1505 (completed 1508)':
return 1505
if date_str == 'established in 1228':
return 1228
for i in range(4, 21):
if date_str == f"{i}th century":
return i * 100 - 50
if date_str == f"late {i}th century":
return i * 100 - 25
if date_str == f"early {i}th century":
return i * 100 - 75
if date_str == '3rd century':
return 250
elif date_str == '2nd century':
return 150
elif date_str == 'late 3rd century':
return 275
if date_str == '11th-12th centuries':
return 1101
if date_str == '12th-13th centuries':
return 1201
elif date_str == '11th-13th centuries':
return 1201
if date_str == '-':
return None
date_str = date_str.replace('c.', '').replace('c,', '').replace('c-', '')
sep = ['and', '-', '–', '/', ',']
for el in sep:
if el in date_str:
date_str = list(date_str.split(el))[0]
date_str = date_str.replace('s', '').replace(' ', '')
date_str = date_str.replace('.', '').replace('before', '').replace('(completed)', '') \
.replace('(etup)', '').replace('after', '').replace('begun', '') \
.replace('began', '').replace('(retored)', '') \
.replace('founded', '').replace('conecrated', '') \
.replace('from', '').replace('around', '') \
.replace('completed', '').replace('rebuilt', '') \
.replace('oon', '').replace('converted', '') \
.replace('planned', '').replace('about', '') \
.replace('(model)', '').replace('(', '').replace('c', '') \
.replace('?)', '').replace('late', '').replace('onward', '')
if 'or' in date_str:
date_str = date_str.split('or')[0]
if date_str == '':
return None
try:
return int(date_str)
except Exception as exp:
print(original, '-' in date_str, list(date_str.split('-'))[0], len(original))
return None # raise exp
table = []
with open(args.csvfile, newline='') as csvfile:
spamreader = csv.reader(csvfile)
is_header = True
for row in tqdm(spamreader):
if is_header:
is_header = False
continue
date = format_date(row[column2ind['date']])
if date is None:
continue
table.append((row[column2ind['url']],
date,
row[column2ind['date']]
))
print(f'Size: {len(table)}')
print(table[:3])
def url_converter(url):
return url.replace('html/', 'detail/').replace('.html', '.jpg')
def get_filename(url):
path = urlparse.urlparse(url).path
ext = os.path.splitext(path)[1].split('.')[1]
name = hashlib.md5(url.encode('utf-8')).hexdigest() + '.' + ext
return name
cnt = 0
def retrieve_wrapper(data):
urllib.request.urlretrieve(data[0], data[1])
async def download_image(url, name, pbar):
global cnt
path = os.path.join(args.destination, 'art', name)
if not os.path.exists(path):
print(url)
await loop.run_in_executor(None, retrieve_wrapper, (url, path))
pbar.update(1)
cnt += 1
print(cnt / len(urls_to_download), flush=True)
os.system(f'mkdir {os.path.join(args.destination, "art")}')
urls_to_download = []
with open(os.path.join(args.destination, 'prepared.csv'), 'w', newline='') as file:
spamwriter = csv.writer(file)
for row in tqdm(table):
url = url_converter(row[0])
name = get_filename(url)
date = row[1]
urls_to_download.append((url, name))
spamwriter.writerow([name, date, row[2]])
'''async def download_file(url: str, name: str):
path = os.path.join(args.destination, 'art', name)
if os.path.exists(path):
return
async with session.get(url) as response:
try:
assert response.status == 200
except Exception as exp:
print(url, name)
raise exp
buff = await response.read()
print(type(buff))
img = Image.frombuffer(buff)
img.save(path)'''
async def main():
routines = []
with tqdm(total=len(urls_to_download)) as pbar:
for el in urls_to_download:
routines.append(download_image(el[0], el[1], pbar))
await asyncio.gather(*routines)
# download_image(*el) for el in urls_to_download
# )
'''async def factorial(name, number):
f = 1
for i in range(2, number + 1):
print(f"Task {name}: Compute factorial({i})...")
# await asyncio.sleep(1)
f *= i
print(f"Task {name}: factorial({number}) = {f}")
async def main():
# Schedule three calls *concurrently*:
await asyncio.gather(
download_image(*urls_to_download[0]),
)'''
loop = asyncio.get_event_loop()
# loop.run_until_complete(main())
loop.close()
|
#! /usr/bin/env python
"""
Use the pre-trained Haar classifier from OpenCV to detect cat faces
"""
import cv2
import dlib
import numpy as np
from constants.constants import debug_cat_frontal_face_detection
# pre-trained classifier from OpenCV
HAAR_CLASSIFIER = 'data/haarcascade_frontalcatface.xml'
DETECTOR = 'data/cat_face_detector.svm'
# Pre-trained shape predictor from iBUG 300-W dataset for human facial landmarks
SHAPE_PREDICTOR = 'data/cat_landmark_predictor.dat'
# finds landmarks in the form (from viewer perspective):
# index - (x,y)
MOUTH_INDEX = 0
LEFT_EYE_INDEX = 1
LEFT_EAR_LEFT_INDEX = 2
RIGHT_EAR_LEFT_INDEX = 3
NOSE_INDEX = 4
RIGHT_EYE_INDEX = 5
LEFT_EAR_RIGHT_INDEX = 6
RIGHT_EAR_RIGHT_INDEX = 7
detector = dlib.fhog_object_detector(DETECTOR)
haar_detector = dlib.fhog_object_detector(DETECTOR)
landmarks_predictor = dlib.shape_predictor(SHAPE_PREDICTOR)
# convenience function from imutils
def dlib_to_cv_bounding_box(box):
# convert dlib bounding box for OpenCV display
x = box.left()
y = box.top()
w = box.right() - x
h = box.bottom() - y
return x, y, w, h
def show_detected_faces(img, bounding_boxes, facial_landmark_points):
for face in bounding_boxes:
# draw box for face
x, y, w, h = dlib_to_cv_bounding_box(face)
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
# draw circles for landmarks
for landmark_set in facial_landmark_points:
for x, y in landmark_set:
cv2.circle(img, (x, y), 1, (0, 0, 255), -1)
# show the output image with the face detections + facial landmarks
cv2.imshow("Output", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# conversion from imutils
def landmarks_to_numpy(landmarks):
# initialize the matrix of (x, y)-coordinates with a row for each landmark
coords = np.zeros((len(landmarks), 2), dtype=int)
# convert each landmark to (x, y)
for i in range(0, len(landmarks)):
coords[i] = (landmarks[i][0], landmarks[i][1])
# return the array of (x, y)-coordinates
return coords
def add_inferred_landmarks(landmark_list):
# append extra inferred points to improve mask
nose = landmark_list[NOSE_INDEX]
left_ear = landmark_list[LEFT_EAR_LEFT_INDEX]
right_ear = landmark_list[RIGHT_EAR_RIGHT_INDEX]
# left_cheek = (left_ear.x, mouth.y)
left_cheek = (left_ear[0], nose[1])
right_cheek = (right_ear[0], nose[1])
landmark_list.append(left_cheek)
landmark_list.append(right_cheek)
def detect_cat_face(img):
facial_landmark_points = []
# pre-process the image to grayscale, a normal step for haar classifiers
grayscale_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face_bounding_boxes = detector(grayscale_img, 1)
if len(face_bounding_boxes) == 0:
face_bounding_boxes = haar_detector(grayscale_img, 1)
if len(face_bounding_boxes) == 0:
print("no cat face found in input image")
exit(0)
for face in face_bounding_boxes:
landmarks = landmarks_predictor(img, face)
landmark_list = []
for i in range(0, landmarks.num_parts):
landmark_list.append((landmarks.part(i).x, landmarks.part(i).y))
add_inferred_landmarks(landmark_list)
facial_landmark_points.append(landmarks_to_numpy(landmark_list))
if debug_cat_frontal_face_detection:
show_detected_faces(img, face_bounding_boxes, facial_landmark_points)
return face_bounding_boxes, facial_landmark_points
|
# Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at
# the Lawrence Livermore National Laboratory.
# LLNL-CODE-743438
# All rights reserved.
# This file is part of MGmol. For details, see https://github.com/llnl/mgmol.
# Please also read this link https://github.com/llnl/mgmol/LICENSE
#
# Python program to generate xyz files from mgmol output
#
# use:
# python mgmol2xyz.py mgmol_output > xyz
# python mgmol2xyz.py mgmol_output dump_freq
#-------------------------------------------------------------------------------
import sys, string
from numpy import array
import outputTools
input_ =open(sys.argv[1],'r')
filename_=sys.argv[1]
default_dump_freq_=100000
dump_freq_=default_dump_freq_ # interval between frames for files generation
if len(sys.argv)>2:
dump_freq_=eval(sys.argv[2])
if len(sys.argv)>3:
jobname=sys.argv[3]
else:
jobname=''
L=input_.readlines()
l=len(L) ## no of lines in file
# list possible species in a dictionary with their count
species={'H':0,'D':0,'He':0,'Li':0,'Be':0,'B':0,'C':0,'N':0,'O':0,'F':0,'Na':0,
'Mg':0,'Al':0,'Si':0,'P':0,'S':0,'Cl':0,'Ca':0,'In':0,'Au':0,'X':0}
ang2bohr=1.8897269
bohr2ang=1./ang2bohr
na=outputTools.countNumAtoms(input_)
nmlwc=outputTools.countNumMLWC(input_)
#print 'N atoms=', na
#na=na+nmlwc
#print 'N atoms=', na
searchterm='Origin'
ox=0.
oy=0.
oz=0.
for line in L:
num_matches = string.count(line, searchterm)
if num_matches:
words=string.split(line)
ox=eval(words[2])
oy=eval(words[3])
oz=eval(words[4])
origin=array([ox,oy,oz])
searchterm='Dimension'
lx=0.
ly=0.
lz=0.
for line in L:
num_matches = string.count(line, searchterm)
if num_matches:
words=string.split(line)
lx=eval(words[2])
ly=eval(words[3])
lz=eval(words[4])
cell=array([lx,ly,lz])
end=array([lx,ly,lz])
########################################################################
def readAtomicPositions(first_line,last_line,anames,acoords):
j=0
flag1=0
flag2=0
#print 'loop starting at', first_line+1
for line2 in range(first_line,last_line):
i=0
for c in L[line2]:
flag1=0
if c=='#':
if L[line2][i+1]=='#':
flag1=1
flag2=1
word=string.split(L[line2][i+3:])
name=word[0]
occupancy = 1
if name[0]=='*':
name=name[1:]
occupancy = 0
if name[0]=='D':
name='H'+name[1:]
#print name
x=word[1]
y=word[2]
z=word[3]
#print name+'\t'+x+'\t'+y+'\t'+z
anames[j]=name
acoords[j]=x+' '+y+' '+z
j=j+1
break
i=i+1
if flag1!=flag2: break
########################################################################
def readMLWC(first_line,last_line,anames,acoords):
count=0
flag1=0
flag2=0
#print 'loop starting at', first_line+1
for line2 in range(first_line,last_line):
i=0
for c in L[line2]:
flag1=0
if c=='&':
if L[line2][i+1]=='&' and 'Ander' not in L[line2]:
flag1=1
flag2=1
word=string.split(L[line2])
#print word
x=word[2]
y=word[3]
z=word[4]
#print name+'\t'+x+'\t'+y+'\t'+z
anames[count]='X'+`count`
acoords[count]=x+' '+y+' '+z
count=count+1
break
i=i+1
if flag1!=flag2: break
########################################################################
def makeXYZobject(natoms,anames,acoords,xyzobject):
#atom name
for i in range(natoms):
if anames[i][0:2] in species.keys():
name=anames[i][0:2]
else:
name=anames[i][0:1]
name=name.ljust(4)
xyzobject.append(name)
#x,y,z
for i in range(natoms):
word=string.split(acoords[i])
xyzobject[i]=xyzobject[i]+' '
for j in range(3):
x=eval(word[j])
while x<origin[j]:
x=x+cell[j]
while x>end[j]:
x=x-cell[j]
x = "%.3f" % (x*bohr2ang)
x=x.rjust(8)
xyzobject[i]=xyzobject[i]+x
########################################################################
def printXYZfile(natoms,anames,acoords,filename,jobname=''):
lineout=[]
makeXYZobject(natoms,anames,acoords,lineout)
if filename:
output = open(filename,'w')
output.write(str(natoms))
output.write('\n')
if jobname:
output.write(jobname)
output.write('\n')
for i in range(0,natoms):
output.write(lineout[i])
output.write('\n')
else:
print natoms
print
for i in range(0,natoms):
print lineout[i]
########################################################################
searchterm1='IONIC POSITIONS AND FORCES'
searchterm2='Stepper Forces'
searchterm3='Orbitals centers and spreads'
acoords=[]
anames=[]
for i in range(0,na):
acoords.append(' ')
anames.append(' ')
wcoords=[]
wnames=[]
for i in range(0,nmlwc):
wcoords.append(' ')
wnames.append(' ')
count_sets=0
for line in range(l): ## loop over lines of file
num_matches1 = string.find(L[line], searchterm1)
num_matches2 = string.find(L[line], searchterm2)
num_matches3 = string.find(L[line], searchterm3)
if num_matches3>=0 and nmlwc>0:
readMLWC(line+1,line+nmlwc+2,wnames,wcoords)
if num_matches1>=0 or num_matches2>=0 :
modulus=count_sets%dump_freq_
if( modulus==0 ):
readAtomicPositions(line+1,line+na+2,anames,acoords)
if( dump_freq_<default_dump_freq_ ):
filename=filename_+`count_sets`+".xyz"
printXYZfile(na+nmlwc,anames+wnames,acoords+wcoords,filename, jobname)
count_sets=count_sets+1
names =anames +wnames
coords=acoords+wcoords
if( dump_freq_==default_dump_freq_ ):
printXYZfile(na+nmlwc,names,coords,"")
|
folders = ['1-EastRiver', '2-DryCreek','3-SagehenCreek','4-AndrewsForest','5-Baltimore',
'6-BonanzaCreek','7-CaliforniaCurrentEcosystem','8-CentralArizona','9-Coweeta','10-FloridaCoastalEverglades',
'11-GeorgiaCoastalEcosystems','12-HarvardForest','13-HubbardBrook','14-JornadaBasin','15-Kellogg',
'16-KonzaPrairie','17-NorthernGulfofAlaska','18-PlumIsland','19-Sevilleta','20-Boulder',
'21-Catalina','22-Jemez','23-Christina','24-Luquillo','25-Reynolds',
'26-ShaleHills','27-SanJoaquin','28-Providence','29-Wolverton','30-Calhoun']
watersheds = ['EastRiver','DryCreek','SagehenCreek','AndrewsForest','Baltimore',
'BonanzaCreek','CaliforniaCurrentEcosystem','CentralArizona','Coweeta','FloridaCoastalEverglades',
'GeorgiaCoastalEcosystems','HarvardForest','HubbardBrook','JornadaBasin','Kellogg',
'KonzaPrairie','NorthernGulfofAlaska','PlumIsland','Sevilleta','Boulder',
'Catalina','Jemez','Christina','Luquillo','Reynolds',
'ShaleHills','SanJoaquin','Providence','Wolverton','Calhoun']
watershed_names = ['EastRiver','DryCreek','SagehenCreek','AndrewsForest','Baltimore',
'Bonanza Creek','California Current Ecosystem','Central Arizona','Coweeta','Florida Coastal Everglades',
'Georgia Coastal Ecosystems','Harvard Forest','Hubbard Brook','Jornada Basin','Kellogg',
'<NAME>','Northern Gulf of Alaska','Plum Island','Sevilleta','Boulder',
'Catalina','Jemez','Christina','Luquillo','Reynolds',
'Shale Hills','<NAME>','Providence','Wolverton','Calhoun']
main_str_dic={'EastRiver':[1,'PHISCO'],
'DryCreek':[2,'LG'],
'SagehenCreek':[3,'Sagehen'],
'AndrewsForest':[4,'GSLOOK'],
'Baltimore':[5,'GWYNNS'],
'BonanzaCreek':[6,'C4'],
'CaliforniaCurrentEcosystem':[7,'FashionValley'],
'CentralArizona':[8,'SCNFM'],
'Coweeta':[9,'Prentiss'],
'FloridaCoastalEverglades':[10,'BarronRiver'],
'GeorgiaCoastalEcosystems':[11,'Altamaha'],
'HarvardForest':[12,'BigelowLower'],
'HubbardBrook':[13,'WS7'],
'JornadaBasin':[14,'SaltCreek'],
'Kellogg':[15,'KBS096'],
'KonzaPrairie':[16,'KingsRiver'],
'NorthernGulfofAlaska':[17,'Dwnstr'],
'PlumIsland':[18,'Ipswich'],
'Sevilleta':[19,'PECOS'],
'Boulder':[20,'Longmont'],
'Catalina':[21,'SabinoCreek'],
'Jemez':[22,'JemezRiver'],
'Christina':[23,'WhiteClayCreek'],
'Luquillo':[24,'RioGrande'],
'Reynolds':[25,'036'],
'ShaleHills':[26,'ShaleHill'],
'SanJoaquin':[27,'Fremont'],
'Providence':[28,'P301'],
'Wolverton':[29,'Hammond'],
'Calhoun':[30,'BroadRiverCarlisle']}
## The ppt stn is chosen given the position of the main discharge station and considering the record length
main_ppt_dic={'DryCreek':[2,'TL'],
'SagehenCreek':[3,'539lvl1B'],
'AndrewsForest':[4,'PRIMET'],
'BonanzaCreek':[6,'LTER1'],
'CaliforniaCurrentEcosystem':[7,'Lindberch'],
'HubbardBrook':[13,'WS7'],
'Kellogg':[15,'KBS002'],
'KonzaPrairie':[16,'HQ01'],
'Sevilleta':[19,'Station40'],
'Reynolds':[25,'049']}
# in km2
area_dic={'EastRiver': 300,'DryCreek': 27,'SagehenCreek': 27,'AndrewsForest': 62,'Baltimore': 171,
'BonanzaCreek': 10,'CaliforniaCurrentEcosystem': 1111, 'CentralArizona': 425,'Coweeta': 363 ,
'FloridaCoastalEverglades':342,
'GeorgiaCoastalEcosystems': 35224, 'HarvardForest': 0.65,'HubbardBrook': 0.77,'JornadaBasin': 1976,'Kellogg': 101,
'KonzaPrairie':12 ,'NorthernGulfofAlaska':634 ,'PlumIsland':115 ,'Sevilleta':2719 ,'Boulder':62.7 ,
'Catalina': 1217, 'Jemez': 92, 'Christina': 29,'Luquillo': 19,'Reynolds': 239,
'ShaleHills':0.08 ,'SanJoaquin': 19723,'Providence': 4.6,'Wolverton': 427,'Calhoun': 7226}
lat_dic={'EastRiver':39.00,'DryCreek':43.69,'SagehenCreek':39.43,'AndrewsForest':44.24,'Baltimore':39.27,
'BonanzaCreek':65.17 ,'CaliforniaCurrentEcosystem':32.77, 'CentralArizona':33.43,'Coweeta': 35.00,'FloridaCoastalEverglades':25.47,
'GeorgiaCoastalEcosystems':31.42, 'HarvardForest':42.53,'HubbardBrook':43.94,'JornadaBasin':32.62,'Kellogg':42.40,
'KonzaPrairie':39.11,'NorthernGulfofAlaska':63.88,'PlumIsland':42.76,'Sevilleta':34.35,'Boulder':40.01,
'Catalina':32.43, 'Jemez':35.88, 'Christina':39.86,'Luquillo':18.32,'Reynolds':43.23,
'ShaleHills':40.66,'SanJoaquin':37.11,'Providence':37.06,'Wolverton':36.59,'Calhoun':34.61}
lon_dic={'EastRiver':-107.00, 'DryCreek':-116.18, 'SagehenCreek':-120.24, 'AndrewsForest':-122.18, 'Baltimore':-76.65,
'BonanzaCreek':-147.51, 'CaliforniaCurrentEcosystem':-117.17, 'CentralArizona':-111.93, 'Coweeta':-83.50,'FloridaCoastalEverglades':-80.85,
'GeorgiaCoastalEcosystems':-81.30, 'HarvardForest':-72.19, 'HubbardBrook':-71.75, 'JornadaBasin':-106.74, 'Kellogg':-85.40,
'KonzaPrairie':-96.61, 'NorthernGulfofAlaska':-145.71, 'PlumIsland':-70.89, 'Sevilleta':-106.88, 'Boulder':-105.34,
'Catalina':-110.77, 'Jemez':-106.53, 'Christina':-75.79,'Luquillo':-65.73, 'Reynolds':-116.65,
'ShaleHills':-77.91, 'SanJoaquin':-119.73, 'Providence':-119.20, 'Wolverton':-118.73, 'Calhoun':-81.72}
variables = ['Discharge','Precipitation','AirTemperature','SolarRadiation',
'RelativeHumidity','SWE','SnowDepth','SoilTemperature','SoilMoisture'] |
# Copyright 2017--2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import copy
import inspect
import logging
import yaml
logger = logging.getLogger(__name__)
class TaggedYamlObjectMetaclass(yaml.YAMLObjectMetaclass):
def __init__(cls, name, bases, kwds):
cls.yaml_tag = "!" + name
new_kwds = {}
new_kwds.update(kwds)
new_kwds['yaml_tag'] = "!" + name
super().__init__(name, bases, new_kwds)
class Config(yaml.YAMLObject, metaclass=TaggedYamlObjectMetaclass):
"""
Base configuration object YAML (de-)serialization.
Actual Configuration should subclass this object.
"""
yaml_loader = yaml.UnsafeLoader # type: ignore
def __setattr__(self, key, value):
if value == self:
raise AttributeError("Cannot set self as attribute")
object.__setattr__(self, key, value)
def __setstate__(self, state):
"""Pickle protocol implementation."""
# We first take the serialized state:
self.__dict__.update(state)
# Then we take the constructors default values for missing arguments in order to stay backwards compatible
# This way we can add parameters to Config objects and still load old models.
init_signature = inspect.signature(self.__init__)
for param_name, param in init_signature.parameters.items():
if param.default is not param.empty:
if not hasattr(self, param_name):
object.__setattr__(self, param_name, param.default)
def __repr__(self):
return "Config[%s]" % ", ".join("%s=%s" % (str(k), str(v)) for k, v in sorted(self.__dict__.items()))
def __eq__(self, other):
if type(other) is not type(self):
return False
for k, v in self.__dict__.items():
if k != "self":
if k not in other.__dict__:
return False
if self.__dict__[k] != other.__dict__[k]:
return False
return True
def save(self, fname: str):
"""
Saves this Config to a file called fname.
:param fname: Name of file to store this Config in.
"""
obj = copy.deepcopy(self)
with open(fname, 'w') as out:
yaml.dump(obj, out, default_flow_style=False)
@staticmethod
def load(fname: str) -> 'Config':
"""
Returns a Config object loaded from a file.
:param fname: Name of file to load the Config from.
:return: Configuration.
"""
with open(fname) as inp:
obj = yaml.load(inp, Loader=yaml.UnsafeLoader) # type: ignore
return obj
def copy(self, **kwargs):
"""
Create a copy of the config object, optionally modifying some of the attributes.
For example `nn_config.copy(num_hidden=512)` will create a copy of `nn_config` where the attribute `num_hidden`
will be set to the new value of num_hidden.
:param kwargs:
:return: A deep copy of the config object.
"""
copy_obj = copy.deepcopy(self)
for name, value in kwargs.items():
object.__setattr__(copy_obj, name, value)
return copy_obj
def disable_dropout(self):
"""
Sets the value of all float-valued attributes in this config (or any of its children) that contain 'dropout'
in their name to 0.0.
"""
for attr, val in self.__dict__.items():
if isinstance(val, Config):
val.disable_dropout()
elif 'dropout' in attr and isinstance(val, float):
logger.debug("Setting %s to 0.0", attr)
setattr(self, attr, 0.0)
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fine-tuning the library models for named entity recognition on CoNLL-2003. """
import logging
import os
import sys
import torch
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, f1_score, precision_score, recall_score
from torch import nn
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
get_constant_schedule_with_warmup,
AdamW
)
from transformers.trainer_utils import is_main_process
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask, TokenClassificationDatasetKD
import models_ner
from dataclasses import fields
MODEL_CLASS_DICT = {"SimpleClassifier": models_ner.SimpleClassifier,
"SimpleCNN": models_ner.SimpleCNN,
"SimpleCNNSoftmax": models_ner.SimpleCNNSoftmax,
"MultipleWindowCNN": models_ner.MultipleWindowCNN,
"MultipleWindowCNN2": models_ner.MultipleWindowCNN2,
"WindowSequenceModel": models_ner.WindowSequenceModel,
"WindowSequenceModel128": models_ner.WindowSequenceModel128,
"WindowSequenceModel128AllKD": models_ner.WindowSequenceModel128AllKD,
"WindowSequenceModelBertEmbeddingsFrozen": models_ner.WindowSequenceModelBertEmbeddingsFrozen,
"SimpleLSTM": models_ner.SimpleLSTM,
"SimpleLSTM128": models_ner.SimpleLSTM128,
"SimpleLSTM128AllKD": models_ner.SimpleLSTM128AllKD,
"SimpleLSTM128Dropout02": models_ner.SimpleLSTM128Dropout02,
"SimpleLSTM128Depth2": models_ner.SimpleLSTM128Depth2,
"SimpleLSTM128Depth2Dropout02": models_ner.SimpleLSTM128Depth2Dropout02,
"SimpleLSTM128Depth3Dropout02": models_ner.SimpleLSTM128Depth3Dropout02,
"SimpleLSTM128BertEmbeddingsFrozen": models_ner.SimpleLSTM128BertEmbeddingsFrozen,
"SimpleLSTM256": models_ner.SimpleLSTM256,
"SimpleLSTM256Dropout02": models_ner.SimpleLSTM256Dropout02,
"SimpleLSTM256Depth2Dropout02": models_ner.SimpleLSTM256Depth2Dropout02,
"SimpleLSTM256Depth2Dropout02RNNDropout02": models_ner.SimpleLSTM256Depth2Dropout02RNNDropout02,
"SimpleLSTM256Depth2Dropout05RNNDropout05": models_ner.SimpleLSTM256Depth2Dropout05RNNDropout05,
"SimpleLSTM256Depth2BertEmbeddingsFrozen": models_ner.SimpleLSTM256Depth2BertEmbeddingsFrozen,
"SimpleLSTM256Depth3Dropout02": models_ner.SimpleLSTM256Depth3Dropout02,
"SimpleLSTM256BertEmbeddingsFrozen": models_ner.SimpleLSTM256BertEmbeddingsFrozen,
"SimpleLSTM256Depth2Dropout02RNNDropout02BertEmbeddingsFrozen": models_ner.SimpleLSTM256Depth2Dropout02RNNDropout02BertEmbeddingsFrozen,
"SimpleLSTM256Depth2RNNDropout02BertEmbeddingsFrozen": models_ner.SimpleLSTM256Depth2RNNDropout02BertEmbeddingsFrozen,
"SimpleLSTM512": models_ner.SimpleLSTM512}
LOSS_FCT_KD_DICT = {"KL": nn.functional.kl_div}
DATA_CLASS_DICT = {"default": TokenClassificationDataset,
"KD": TokenClassificationDatasetKD}
OPTIMIZER_DICT = {"Adam": torch.optim.Adam}
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_class: str = field(
metadata={"help": "The class of the desired model. If 'BERT' you must also provide a 'model_name_or_path'"},
)
model_name_or_path: Optional[str] = field(
metadata={"help": "Path to standard BERT pretrained model or model identifier from huggingface.co/models"}
)
custom_model_state_dict_path: Optional[str] = field(
default=None, metadata={"help": "Path to custom pretrained model state dict"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
task_type: Optional[str] = field(
default="NER", metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
use_fast: bool = field(default=False, metadata={"help": "Set this flag to use fast tokenization."})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
optimizer: Optional[str] = field(
default=None,
metadata={"help": "Name of desired optimizer"}
)
use_constant_lr: bool = field(
default=False,
metadata={"help": "Whether to use constant lr during training"}
)
kd_param: Optional[float] = field(
default=0,
metadata={
"help": "The coefficient for knowledge distillation training. Zero means no distillation training."}
)
loss_fct_kd: Optional[str] = field(
default=None,
metadata={
"help": "The loss function to use for knowledge distillation training."
}
)
bert_embeddings_path: Optional[str] = field(
default=None,
metadata={
"help": "The path to the bert embedding weights tensor if pre-trained embeddings are to be used."
}
)
num_emb_frozen_train_epochs: Optional[int] = field(
default=None,
metadata={
"help": "The number of epochs to train the model with frozen embeddings before training "
"with unfrozen embeddings for num_train_epochs."
}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
data_dir: str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."}
)
labels: Optional[str] = field(
default=None,
metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."},
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
module = import_module("tasks")
try:
token_classification_task_clazz = getattr(module, model_args.task_type)
token_classification_task: TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}"
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
# Prepare CONLL-2003 task
labels = token_classification_task.get_labels(data_args.labels)
label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)}
num_labels = len(labels)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast,
)
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
id2label=label_map,
label2id={label: i for i, label in enumerate(labels)},
cache_dir=model_args.cache_dir,
)
if model_args.model_class=="BERT":
model = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
model_type = config.model_type
else:
config.max_seq_length = data_args.max_seq_length
config.pad_token_id = tokenizer.pad_token_id
config.device = training_args.device
# pass BERT embeddings if should be loaded to model
config.bert_embeddings = None
if model_args.bert_embeddings_path:
config.bert_embeddings = torch.load(model_args.bert_embeddings_path)
# setup kd params
config.kd_param = model_args.kd_param
config.loss_fct_kd = None
if config.kd_param > 0:
config.loss_fct_kd = LOSS_FCT_KD_DICT[model_args.loss_fct_kd]
model = MODEL_CLASS_DICT[model_args.model_class](config)
if model_args.custom_model_state_dict_path:
model.load_state_dict(torch.load(model_args.custom_model_state_dict_path))
model.to(training_args.device)
model_type = "custom" #used for dataset tokens
# Get datasets
if model_args.kd_param > 0:
DataSetClass = DATA_CLASS_DICT["KD"]
else:
DataSetClass = DATA_CLASS_DICT["default"]
train_dataset = (
DataSetClass(
token_classification_task=token_classification_task,
data_dir=data_args.data_dir,
tokenizer=tokenizer,
labels=labels,
model_type=model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.train,
)
if training_args.do_train
else None
)
eval_dataset = (
TokenClassificationDataset(
token_classification_task=token_classification_task,
data_dir=data_args.data_dir,
tokenizer=tokenizer,
labels=labels,
model_type=model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.dev,
)
if training_args.do_eval
else None
)
def align_predictions(predictions: np.ndarray, label_ids: np.ndarray) -> Tuple[List[int], List[int]]:
preds = np.argmax(predictions, axis=2)
batch_size, seq_len = preds.shape
out_label_list = [[] for _ in range(batch_size)]
preds_list = [[] for _ in range(batch_size)]
for i in range(batch_size):
for j in range(seq_len):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
return preds_list, out_label_list
def compute_metrics(p: EvalPrediction) -> Dict:
preds_list, out_label_list = align_predictions(p.predictions, p.label_ids)
return {
"accuracy_score": accuracy_score(out_label_list, preds_list),
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list),
}
# Setup optimizer and lr scheduler
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": training_args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
if model_args.optimizer:
optimizer_function = OPTIMIZER_DICT[model_args.optimizer]
optimizer = optimizer_function(params=optimizer_grouped_parameters,
lr=training_args.learning_rate,
betas=(training_args.adam_beta1,
training_args.adam_beta2),
eps=training_args.adam_epsilon,
weight_decay=training_args.weight_decay,
amsgrad=False)
else:
optimizer = AdamW(params=optimizer_grouped_parameters,
lr=training_args.learning_rate,
betas=(training_args.adam_beta1, training_args.adam_beta2),
eps=training_args.adam_epsilon)
lr_scheduler = get_constant_schedule_with_warmup(optimizer=optimizer, num_warmup_steps=training_args.warmup_steps) if model_args.use_constant_lr else None
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
optimizers=(optimizer, lr_scheduler)
)
# Training
if training_args.do_train:
# assume model embeddings are initially frozen
if model_args.bert_embeddings_path and model_args.num_emb_frozen_train_epochs and model_args.num_emb_frozen_train_epochs>0:
trainer_frozen = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
optimizers=(optimizer, lr_scheduler)
)
logging_dir = training_args.logging_dir
num_train_epochs = training_args.num_train_epochs
trainer_frozen.args.num_train_epochs = model_args.num_emb_frozen_train_epochs
trainer_frozen.args.logging_dir = logging_dir+"_frozen"
model.embedding.weight.requires_grad = False
logger.info("Training model with frozen embedding layer for %d epochs", model_args.num_emb_frozen_train_epochs)
trainer_frozen.train()
# resume original training unfrozen
model.embedding.weight.requires_grad = True
training_args.logging_dir = logging_dir
training_args.num_train_epochs = num_train_epochs
if training_args.num_train_epochs > 0:
trainer.train()
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
result = trainer.evaluate()
output_eval_file = os.path.join(training_args.output_dir, "eval_results.txt")
if trainer.is_world_process_zero():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key, value in result.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
results.update(result)
# Predict
if training_args.do_predict:
test_dataset = TokenClassificationDataset(
token_classification_task=token_classification_task,
data_dir=data_args.data_dir,
tokenizer=tokenizer,
labels=labels,
model_type=model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.test,
)
predictions, label_ids, metrics = trainer.predict(test_dataset)
preds_list, _ = align_predictions(predictions, label_ids)
output_test_results_file = os.path.join(training_args.output_dir, "test_results.txt")
if trainer.is_world_process_zero():
with open(output_test_results_file, "w") as writer:
for key, value in metrics.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
# Save predictions
output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt")
if trainer.is_world_process_zero():
with open(output_test_predictions_file, "w") as writer:
with open(os.path.join(data_args.data_dir, "test.txt"), "r") as f:
token_classification_task.write_predictions_to_file(writer, f, preds_list)
return results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
|
from unittest import TestCase
from unittest.mock import MagicMock
from basketball_reference_web_scraper.html import BasicBoxScoreRow
class TestBasicBoxScoreRow(TestCase):
def setUp(self):
self.html = MagicMock()
def test_playing_time_when_cells_exist(self):
cell = MagicMock(text_content=MagicMock(return_value="some playing time"))
self.html.xpath = MagicMock(return_value=[cell])
self.assertEqual(BasicBoxScoreRow(html=self.html).playing_time, "some playing time")
self.html.xpath.assert_called_once_with('td[@data-stat="mp"]')
def test_playing_time_is_empty_string_when_cells_exist(self):
self.html.xpath = MagicMock(return_value=[])
self.assertEqual(BasicBoxScoreRow(html=self.html).playing_time, '')
self.html.xpath.assert_called_once_with('td[@data-stat="mp"]')
def test_minutes_played_when_cells_exist(self):
cell = MagicMock(text_content=MagicMock(return_value="some minutes played"))
self.html.xpath = MagicMock(return_value=[cell])
self.assertEqual(BasicBoxScoreRow(html=self.html).minutes_played, "some minutes played")
self.html.xpath.assert_called_once_with('td[@data-stat="mp"]')
def test_minutes_played_is_empty_string_when_cells_do_not_exist(self):
self.html.xpath = MagicMock(return_value=[])
self.assertEqual(BasicBoxScoreRow(html=self.html).minutes_played, '')
self.html.xpath.assert_called_once_with('td[@data-stat="mp"]')
def test_made_field_goals_when_cells_exist(self):
cell = MagicMock(text_content=MagicMock(return_value="some made field goals"))
self.html.xpath = MagicMock(return_value=[cell])
self.assertEqual(BasicBoxScoreRow(html=self.html).made_field_goals, "some made field goals")
self.html.xpath.assert_called_once_with('td[@data-stat="fg"]')
def test_made_field_goals_is_empty_string_when_cells_exist(self):
self.html.xpath = MagicMock(return_value=[])
self.assertEqual(BasicBoxScoreRow(html=self.html).made_field_goals, '')
self.html.xpath.assert_called_once_with('td[@data-stat="fg"]')
def test_attempted_field_goals_when_cells_exist(self):
cell = MagicMock(text_content=MagicMock(return_value="some attempted field goals"))
self.html.xpath = MagicMock(return_value=[cell])
self.assertEqual(BasicBoxScoreRow(html=self.html).attempted_field_goals, "some attempted field goals")
self.html.xpath.assert_called_once_with('td[@data-stat="fga"]')
def test_attempted_field_goals_is_empty_string_when_cells_exist(self):
self.html.xpath = MagicMock(return_value=[])
self.assertEqual(BasicBoxScoreRow(html=self.html).attempted_field_goals, '')
self.html.xpath.assert_called_once_with('td[@data-stat="fga"]')
def test_made_three_point_field_goals_when_cells_exist(self):
cell = MagicMock(text_content=MagicMock(return_value="some made three point field goals"))
self.html.xpath = MagicMock(return_value=[cell])
self.assertEqual(
BasicBoxScoreRow(html=self.html).made_three_point_field_goals,
"some made three point field goals",
)
self.html.xpath.assert_called_once_with('td[@data-stat="fg3"]')
def test_made_three_point_field_goals_is_empty_string_when_cells_exist(self):
self.html.xpath = MagicMock(return_value=[])
self.assertEqual(BasicBoxScoreRow(html=self.html).made_three_point_field_goals, '')
self.html.xpath.assert_called_once_with('td[@data-stat="fg3"]')
def test_attempted_three_point_field_goals_when_cells_exist(self):
cell = MagicMock(text_content=MagicMock(return_value="some attempted three point field goals"))
self.html.xpath = MagicMock(return_value=[cell])
self.assertEqual(
BasicBoxScoreRow(html=self.html).attempted_three_point_field_goals,
"some attempted three point field goals",
)
self.html.xpath.assert_called_once_with('td[@data-stat="fg3a"]')
def test_attempted_three_point_field_goals_is_empty_string_when_cells_exist(self):
self.html.xpath = MagicMock(return_value=[])
self.assertEqual(BasicBoxScoreRow(html=self.html).attempted_three_point_field_goals, '')
self.html.xpath.assert_called_once_with('td[@data-stat="fg3a"]')
def test_made_free_throws_when_cells_exist(self):
cell = MagicMock(text_content=MagicMock(return_value="some made free throws"))
self.html.xpath = MagicMock(return_value=[cell])
self.assertEqual(BasicBoxScoreRow(html=self.html).made_free_throws, "some made free throws")
self.html.xpath.assert_called_once_with('td[@data-stat="ft"]')
def test_made_free_throws_is_empty_string_when_cells_exist(self):
self.html.xpath = MagicMock(return_value=[])
self.assertEqual(BasicBoxScoreRow(html=self.html).made_free_throws, '')
self.html.xpath.assert_called_once_with('td[@data-stat="ft"]')
def test_attempted_free_throws_when_cells_exist(self):
cell = MagicMock(text_content=MagicMock(return_value="some attempted free throws"))
self.html.xpath = MagicMock(return_value=[cell])
self.assertEqual(BasicBoxScoreRow(html=self.html).attempted_free_throws, "some attempted free throws")
self.html.xpath.assert_called_once_with('td[@data-stat="fta"]')
def test_attempted_free_throws_is_empty_string_when_cells_exist(self):
self.html.xpath = MagicMock(return_value=[])
self.assertEqual(BasicBoxScoreRow(html=self.html).attempted_free_throws, '')
self.html.xpath.assert_called_once_with('td[@data-stat="fta"]')
def test_offensive_rebounds_when_cells_exist(self):
cell = MagicMock(text_content=MagicMock(return_value="some offensive rebounds"))
self.html.xpath = MagicMock(return_value=[cell])
self.assertEqual(BasicBoxScoreRow(html=self.html).offensive_rebounds, "some offensive rebounds")
self.html.xpath.assert_called_once_with('td[@data-stat="orb"]')
def test_offensive_rebounds_is_empty_string_when_cells_exist(self):
self.html.xpath = MagicMock(return_value=[])
self.assertEqual(BasicBoxScoreRow(html=self.html).offensive_rebounds, '')
self.html.xpath.assert_called_once_with('td[@data-stat="orb"]')
def test_defensive_rebounds_when_cells_exist(self):
cell = MagicMock(text_content=MagicMock(return_value="some defensive rebounds"))
self.html.xpath = MagicMock(return_value=[cell])
self.assertEqual(BasicBoxScoreRow(html=self.html).defensive_rebounds, "some defensive rebounds")
self.html.xpath.assert_called_once_with('td[@data-stat="drb"]')
def test_defensive_rebounds_is_empty_string_when_cells_exist(self):
self.html.xpath = MagicMock(return_value=[])
self.assertEqual(BasicBoxScoreRow(html=self.html).defensive_rebounds, '')
self.html.xpath.assert_called_once_with('td[@data-stat="drb"]')
def test_assists_when_cells_exist(self):
cell = MagicMock(text_content=MagicMock(return_value="some assists"))
self.html.xpath = MagicMock(return_value=[cell])
self.assertEqual(BasicBoxScoreRow(html=self.html).assists, "some assists")
self.html.xpath.assert_called_once_with('td[@data-stat="ast"]')
def test_assists_is_empty_string_when_cells_exist(self):
self.html.xpath = MagicMock(return_value=[])
self.assertEqual(BasicBoxScoreRow(html=self.html).assists, '')
self.html.xpath.assert_called_once_with('td[@data-stat="ast"]')
def test_steals(self):
cell = MagicMock(text_content=MagicMock(return_value="some steals"))
self.html.xpath = MagicMock(return_value=[cell])
self.assertEqual(BasicBoxScoreRow(html=self.html).steals, "some steals")
self.html.xpath.assert_called_once_with('td[@data-stat="stl"]')
def test_steals_is_empty_string_when_cells_exist(self):
self.html.xpath = MagicMock(return_value=[])
self.assertEqual(BasicBoxScoreRow(html=self.html).steals, '')
self.html.xpath.assert_called_once_with('td[@data-stat="stl"]')
def test_blocks_when_cells_exist(self):
cell = MagicMock(text_content=MagicMock(return_value="some blocks"))
self.html.xpath = MagicMock(return_value=[cell])
self.assertEqual(BasicBoxScoreRow(html=self.html).blocks, "some blocks")
self.html.xpath.assert_called_once_with('td[@data-stat="blk"]')
def test_blocks_is_empty_string_when_cells_exist(self):
self.html.xpath = MagicMock(return_value=[])
self.assertEqual(BasicBoxScoreRow(html=self.html).blocks, '')
self.html.xpath.assert_called_once_with('td[@data-stat="blk"]')
def test_turnovers_when_cells_exist(self):
cell = MagicMock(text_content=MagicMock(return_value="some turnovers"))
self.html.xpath = MagicMock(return_value=[cell])
self.assertEqual(BasicBoxScoreRow(html=self.html).turnovers, "some turnovers")
self.html.xpath.assert_called_once_with('td[@data-stat="tov"]')
def test_turnovers_is_empty_string_when_cells_exist(self):
self.html.xpath = MagicMock(return_value=[])
self.assertEqual(BasicBoxScoreRow(html=self.html).turnovers, '')
self.html.xpath.assert_called_once_with('td[@data-stat="tov"]')
def test_personal_fouls_when_cells_exist(self):
cell = MagicMock(text_content=MagicMock(return_value="some personal fouls"))
self.html.xpath = MagicMock(return_value=[cell])
self.assertEqual(BasicBoxScoreRow(html=self.html).personal_fouls, "some personal fouls")
self.html.xpath.assert_called_once_with('td[@data-stat="pf"]')
def test_personal_fouls_is_empty_string_when_cells_exist(self):
self.html.xpath = MagicMock(return_value=[])
self.assertEqual(BasicBoxScoreRow(html=self.html).personal_fouls, '')
self.html.xpath.assert_called_once_with('td[@data-stat="pf"]')
def test_points(self):
cell = MagicMock(text_content=MagicMock(return_value="some points"))
self.html.xpath = MagicMock(return_value=[cell])
self.assertEqual(BasicBoxScoreRow(html=self.html).points, "some points")
self.html.xpath.assert_called_once_with('td[@data-stat="pts"]')
def test_points_is_empty_string_when_cells_do_not_exist(self):
self.html.xpath = MagicMock(return_value=[])
self.assertEqual(BasicBoxScoreRow(html=self.html).points, '')
self.html.xpath.assert_called_once_with('td[@data-stat="pts"]')
|
"""Builds and runs TF model training and evaluation.
Defined model and training based on input arguments.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime
import json
import os
import posixpath
import sys
import tensorflow as tf
from constants import constants
from trainer import input_fn_utils
from trainer import model
def run(args):
"""Runs tensorflow model training.
Args:
args: Arguments parsed at program executions.
"""
estimator = model.build_estimator(
output_dir=args.output_dir,
first_layer_size=args.first_layer_size,
num_layers=args.num_layers,
dropout=args.dropout,
learning_rate=args.learning_rate,
save_checkpoints_steps=args.save_checkpoints_steps)
train_input_fn = input_fn_utils.read_dataset(
input_dir=args.input_dir,
mode=tf.contrib.learn.ModeKeys.TRAIN,
batch_size=args.batch_size)
eval_input_fn = input_fn_utils.read_dataset(
input_dir=args.input_dir,
mode=tf.contrib.learn.ModeKeys.EVAL,
batch_size=args.batch_size)
serving_input_fn = input_fn_utils.get_serving_input_fn(args.input_dir)
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn, hooks=[], max_steps=args.max_steps)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
eval_spec = tf.estimator.EvalSpec(
input_fn=eval_input_fn, hooks=[], exporters=exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def parse_arguments(argv):
"""Parses execution arguments and replaces default values.
Args:
argv: Input arguments from sys.
Returns:
Parsed arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--batch_size',
type=int,
default=128,
help='Batch size to use during training.')
parser.add_argument(
'--dropout',
type=float,
default=None,
help='Percent of nodes to dropout in dropout layer.')
parser.add_argument(
'--first_layer_size',
type=int,
default=15,
help='Size of first hidden layer of network.')
parser.add_argument(
'--input_dir',
required=True,
help='GCS or local directory containing outputs from preprocessing.')
parser.add_argument(
'--learning_rate',
type=float,
default=0.001,
help='Learning rate to use during training.')
parser.add_argument(
'--max_steps',
type=int,
default=10000,
help='Maxium number of steps to train model.')
parser.add_argument(
'--num_layers',
type=int,
default=1,
help='Number of hidden layers of network.')
parser.add_argument(
'--output_dir',
required=True,
help='Directory where model outputs will be written.')
parser.add_argument(
'--save_checkpoints_steps',
type=int,
default=500,
help='Number of steps between checkpoint saves.')
args, _ = parser.parse_known_args(args=argv[1:])
# Adds the trial number from environment variables to avoid clobber output
# during hyper-parameters tuning.
trial_id = json.loads(os.environ.get('TF_CONFIG', '{}')).get('task', {}).get(
'trial', '')
if not trial_id:
trial_id = '1'
args.output_dir = posixpath.join(args.output_dir, 'trials', trial_id)
return args
def main():
"""Parses execution arguments and calls running function.
Checks current OS. Posix OS is required for local and GCP paths consistency.
Raises:
OSError: Posix OS required.
"""
if os.name != 'posix':
raise OSError('Posix OS required.')
args = parse_arguments(sys.argv)
tf.logging.set_verbosity(tf.logging.INFO)
run(args)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared resource arguments and flags."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope.concepts import concepts
from googlecloudsdk.command_lib.secrets import completers as secrets_completers
from googlecloudsdk.command_lib.util.concepts import concept_parsers
from googlecloudsdk.core import resources
# Args
def AddDataFile(parser, positional=False, **kwargs):
parser.add_argument(
_ArgOrFlag('data-file', positional),
metavar='PATH',
help=('File path from which to read secret data. Set this to "-" to read '
'the secret data from stdin.'),
**kwargs)
def AddProject(parser, positional=False, **kwargs):
concept_parsers.ConceptParser.ForResource(
name=_ArgOrFlag('project', positional),
resource_spec=GetProjectResourceSpec(),
group_help='The project ID.',
**kwargs).AddToParser(parser)
def AddLocation(parser, purpose, positional=False, **kwargs):
concept_parsers.ConceptParser.ForResource(
name=_ArgOrFlag('location', positional),
resource_spec=GetLocationResourceSpec(),
group_help='The location {}.'.format(purpose),
**kwargs).AddToParser(parser)
def AddReplicationPolicyFile(parser, positional=False, **kwargs):
parser.add_argument(
_ArgOrFlag('replication-policy-file', positional),
metavar='REPLICATION-POLICY-FILE',
help=(
'JSON or YAML file to use to read the replication policy. The file '
'must conform to '
'https://cloud.google.com/secret-manager/docs/reference/rest/v1/projects.secrets#replication.'
'Set this to "-" to read from stdin.'),
**kwargs)
def AddKmsKeyName(parser, positional=False, **kwargs):
parser.add_argument(
_ArgOrFlag('kms-key-name', positional),
metavar='KMS-KEY-NAME',
help=('Global KMS key with which to encrypt and decrypt the secret. Only '
'valid for secrets with an automatic replication policy.'),
**kwargs)
def AddSetKmsKeyName(parser, positional=False, **kwargs):
parser.add_argument(
_ArgOrFlag('set-kms-key', positional),
metavar='SET-KMS-KEY',
help=(
'New KMS key with which to encrypt and decrypt future secret versions.'
),
**kwargs)
def AddRemoveCmek(parser, positional=False, **kwargs):
parser.add_argument(
_ArgOrFlag('remove-cmek', positional),
action='store_true',
help=(
'Remove customer managed encryption key so that future versions will '
'be encrypted by a Google managed encryption key.'),
**kwargs)
def AddReplicaLocation(parser, positional=False, **kwargs):
parser.add_argument(
_ArgOrFlag('location', positional),
metavar='REPLICA-LOCATION',
help=('Location of replica to update. For secrets with automatic '
'replication policies, this can be omitted.'),
**kwargs)
def AddSecret(parser, purpose, positional=False, **kwargs):
concept_parsers.ConceptParser.ForResource(
name=_ArgOrFlag('secret', positional),
resource_spec=GetSecretResourceSpec(),
group_help='The secret {}.'.format(purpose),
**kwargs).AddToParser(parser)
def AddVersion(parser, purpose, positional=False, **kwargs):
concept_parsers.ConceptParser.ForResource(
name=_ArgOrFlag('version', positional),
resource_spec=GetVersionResourceSpec(),
group_help=('Numeric secret version {}.').format(purpose),
**kwargs).AddToParser(parser)
def AddVersionOrLatest(parser, purpose, positional=False, **kwargs):
concept_parsers.ConceptParser.ForResource(
name=_ArgOrFlag('version', positional),
resource_spec=GetVersionResourceSpec(),
group_help=('Numeric secret version {} or `latest` to use the latest '
'version.').format(purpose),
**kwargs).AddToParser(parser)
def AddTopics(parser, positional=False, **kwargs):
parser.add_argument(
_ArgOrFlag('topics', positional),
metavar='TOPICS',
type=arg_parsers.ArgList(),
action=arg_parsers.UpdateAction,
help=('List of Pub/Sub topics to configure on the secret.'),
**kwargs)
def AddUpdateTopicsGroup(parser):
"""Add flags for specifying topics on secret updates."""
group = parser.add_group(mutex=True, help='Topics.')
group.add_argument(
_ArgOrFlag('add-topics', False),
metavar='ADD-TOPICS',
type=arg_parsers.ArgList(),
action=arg_parsers.UpdateAction,
help=('List of Pub/Sub topics to add to the secret.'))
group.add_argument(
_ArgOrFlag('remove-topics', False),
metavar='REMOVE-TOPICS',
type=arg_parsers.ArgList(),
action=arg_parsers.UpdateAction,
help=('List of Pub/Sub topics to remove from the secret.'))
group.add_argument(
_ArgOrFlag('clear-topics', False),
action='store_true',
help=('Clear all Pub/Sub topics from the secret.'))
def AddUpdateReplicationGroup(parser):
"""Add flags for specifying replication policy updates."""
group = parser.add_group(mutex=True, help='Replication update.')
group.add_argument(
_ArgOrFlag('remove-cmek', False),
action='store_true',
help=(
'Remove customer managed encryption key so that future versions will '
'be encrypted by a Google managed encryption key.'))
subgroup = group.add_group(help='CMEK Update.')
subgroup.add_argument(
_ArgOrFlag('set-kms-key', False),
metavar='SET-KMS-KEY',
help=(
'New KMS key with which to encrypt and decrypt future secret versions.'
))
subgroup.add_argument(
_ArgOrFlag('location', False),
metavar='REPLICA-LOCATION',
help=('Location of replica to update. For secrets with automatic '
'replication policies, this can be omitted.'))
def AddCreateReplicationPolicyGroup(parser):
"""Add flags for specifying replication policy on secret creation."""
group = parser.add_group(mutex=True, help='Replication policy.')
group.add_argument(
_ArgOrFlag('replication-policy-file', False),
metavar='REPLICATION-POLICY-FILE',
help=(
'JSON or YAML file to use to read the replication policy. The file '
'must conform to '
'https://cloud.google.com/secret-manager/docs/reference/rest/v1/projects.secrets#replication.'
'Set this to "-" to read from stdin.'))
subgroup = group.add_group(help='Inline replication arguments.')
subgroup.add_argument(
_ArgOrFlag('replication-policy', False),
metavar='POLICY',
help=('The type of replication policy to apply to this secret. Allowed '
'values are "automatic" and "user-managed". If user-managed then '
'--locations must also be provided.'))
subgroup.add_argument(
_ArgOrFlag('kms-key-name', False),
metavar='KMS-KEY-NAME',
help=('Global KMS key with which to encrypt and decrypt the secret. Only '
'valid for secrets with an automatic replication policy.'))
subgroup.add_argument(
_ArgOrFlag('locations', False),
action=arg_parsers.UpdateAction,
metavar='LOCATION',
type=arg_parsers.ArgList(),
help=('Comma-separated list of locations in which the secret should be '
'replicated.'))
def AddCreateExpirationGroup(parser):
"""Add flags for specifying expiration on secret creates."""
group = parser.add_group(mutex=True, help='Expiration.')
group.add_argument(
_ArgOrFlag('expire-time', False),
metavar='EXPIRE-TIME',
help=('Timestamp at which to automatically delete the secret.'))
group.add_argument(
_ArgOrFlag('ttl', False),
metavar='TTL',
help=(
'Duration of time (in seconds) from the running of the command until '
'the secret is automatically deleted.'))
def AddUpdateExpirationGroup(parser):
"""Add flags for specifying expiration on secret updates.."""
group = parser.add_group(mutex=True, help='Expiration.')
group.add_argument(
_ArgOrFlag('expire-time', False),
metavar='EXPIRE-TIME',
help=('Timestamp at which to automatically delete the secret.'))
group.add_argument(
_ArgOrFlag('ttl', False),
metavar='TTL',
help=(
'Duration of time (in seconds) from the running of the command until '
'the secret is automatically deleted.'))
group.add_argument(
_ArgOrFlag('remove-expiration', False),
action='store_true',
help=(
'If set, removes scheduled expiration from secret (if it had one).'))
def AddCreateRotationGroup(parser):
"""Add flags for specifying rotation on secret creates."""
group = parser.add_group(mutex=False, help='Rotation.')
group.add_argument(
_ArgOrFlag('next-rotation-time', False),
help=('Timestamp at which to send rotation notification.'))
group.add_argument(
_ArgOrFlag('rotation-period', False),
help=('Duration of time (in seconds) between rotation notifications.'))
def AddUpdateRotationGroup(parser):
"""Add flags for specifying rotation on secret updates.."""
group = parser.add_group(mutex=False, help='Rotation.')
group.add_argument(
_ArgOrFlag('next-rotation-time', False),
help=('Timestamp at which to send rotation notification.'))
group.add_argument(
_ArgOrFlag('remove-next-rotation-time', False),
action='store_true',
help=('Remove timestamp at which to send rotation notification.'))
group.add_argument(
_ArgOrFlag('rotation-period', False),\
help=('Duration of time (in seconds) between rotation notifications.'))
group.add_argument(
_ArgOrFlag('remove-rotation-period', False),
action='store_true',
help=(
'If set, removes the rotation period, cancelling all rotations except for the next one.'
))
group.add_argument(
_ArgOrFlag('remove-rotation-schedule', False),
action='store_true',
help=('If set, removes rotation policy from a secret.'))
def AddSecretEtag(parser):
"""Add flag for specifying the current secret etag."""
parser.add_argument(
_ArgOrFlag('etag', False),
metavar='ETAG',
help=(
'Current entity tag (ETag) of the secret. If this flag is defined, the secret is updated only if the ETag provided matched the current secret\'s ETag.'
))
def AddVersionEtag(parser):
"""Add flag for specifying the current secret version etag."""
parser.add_argument(
_ArgOrFlag('etag', False),
metavar='ETAG',
help=(
'Current entity tag (ETag) of the secret version. If this flag is defined, the version is updated only if the ETag provided matched the current version\'s ETag.'
))
def _ArgOrFlag(name, positional):
"""Returns the argument name in resource argument format or flag format.
Args:
name (str): name of the argument
positional (bool): whether the argument is positional
Returns:
arg (str): the argument or flag
"""
if positional:
return name.upper().replace('-', '_')
return '--{}'.format(name)
### Attribute configurations
def GetProjectAttributeConfig():
return concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG
def GetLocationAttributeConfig():
return concepts.ResourceParameterAttributeConfig(
name='location',
help_text='The location of the {resource}.',
completion_request_params={'fieldMask': 'name'},
completion_id_field='name')
def GetSecretAttributeConfig():
return concepts.ResourceParameterAttributeConfig(
name='secret',
help_text='The secret of the {resource}.',
completer=secrets_completers.SecretsCompleter)
def GetVersionAttributeConfig():
return concepts.ResourceParameterAttributeConfig(
name='version',
help_text='The version of the {resource}.',
completion_request_params={'fieldMask': 'name'},
completion_id_field='name')
# Resource specs
def GetProjectResourceSpec():
return concepts.ResourceSpec(
resource_collection='secretmanager.projects',
resource_name='project',
plural_name='projects',
disable_auto_completers=False,
projectsId=GetProjectAttributeConfig())
def GetLocationResourceSpec():
return concepts.ResourceSpec(
resource_collection='secretmanager.projects.locations',
resource_name='location',
plural_name='locations',
disable_auto_completers=False,
locationsId=GetLocationAttributeConfig(),
projectsId=GetProjectAttributeConfig())
def GetSecretResourceSpec():
return concepts.ResourceSpec(
resource_collection='secretmanager.projects.secrets',
resource_name='secret',
plural_name='secrets',
disable_auto_completers=False,
secretsId=GetSecretAttributeConfig(),
projectsId=GetProjectAttributeConfig())
def GetVersionResourceSpec():
return concepts.ResourceSpec(
'secretmanager.projects.secrets.versions',
resource_name='version',
plural_name='version',
disable_auto_completers=False,
versionsId=GetVersionAttributeConfig(),
secretsId=GetSecretAttributeConfig(),
projectsId=GetProjectAttributeConfig())
# Resource parsers
def ParseProjectRef(ref, **kwargs):
kwargs['collection'] = 'secretmanager.projects'
return resources.REGISTRY.Parse(ref, **kwargs)
def ParseLocationRef(ref, **kwargs):
kwargs['collection'] = 'secretmanager.projects.locations'
return resources.REGISTRY.Parse(ref, **kwargs)
def ParseSecretRef(ref, **kwargs):
kwargs['collection'] = 'secretmanager.projects.secrets'
return resources.REGISTRY.Parse(ref, **kwargs)
def ParseVersionRef(ref, **kwargs):
kwargs['collection'] = 'secretmanager.projects.secrets.versions'
return resources.REGISTRY.Parse(ref, **kwargs)
|
<filename>lib/ext/gnuradio-tools/examples/snr_estimators.py
#!/usr/bin/env python
import sys
try:
import scipy
from scipy import stats
except ImportError:
print "Error: Program requires scipy (www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires Matplotlib (matplotlib.sourceforge.net)."
sys.exit(1)
from gnuradio import gr, digital
from optparse import OptionParser
from gnuradio.eng_option import eng_option
'''
This example program uses Python and GNU Radio to calculate SNR of a
noise BPSK signal to compare them.
For an explination of the online algorithms, see:
http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Higher-order_statistics
'''
def online_skewness(data, alpha):
n = 0
mean = 0
M2 = 0
M3 = 0
d_M3 = 0
for n in xrange(len(data)):
delta = data[n] - mean
delta_n = delta / (n+1)
term1 = delta * delta_n * (n)
mean = mean + delta_n
M3 = term1 * delta_n * (n - 1) - 3 * delta_n * M2
M2 = M2 + term1
d_M3 = (0.001)*M3 + (1-0.001)*d_M3;
return d_M3
def snr_est_simple(signal):
y1 = scipy.mean(abs(signal))
y2 = scipy.real(scipy.mean(signal**2))
y3 = (y1*y1 - y2)
snr_rat = y1*y1/y3
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_skew(signal):
y1 = scipy.mean(abs(signal))
y2 = scipy.mean(scipy.real(signal**2))
y3 = (y1*y1 - y2)
y4 = online_skewness(abs(signal.real), 0.001)
skw = y4*y4 / (y2*y2*y2);
snr_rat = y1*y1 / (y3 + skw*y1*y1)
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_m2m4(signal):
M2 = scipy.mean(abs(signal)**2)
M4 = scipy.mean(abs(signal)**4)
snr_rat = 2*scipy.sqrt(2*M2*M2 - M4) / (M2 - scipy.sqrt(2*M2*M2 - M4))
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_svr(signal):
N = len(signal)
ssum = 0
msum = 0
for i in xrange(1, N):
ssum += (abs(signal[i])**2)*(abs(signal[i-1])**2)
msum += (abs(signal[i])**4)
savg = (1.0/(float(N)-1.0))*ssum
mavg = (1.0/(float(N)-1.0))*msum
beta = savg / (mavg - savg)
snr_rat = 2*((beta - 1) + scipy.sqrt(beta*(beta-1)))
return 10.0*scipy.log10(snr_rat), snr_rat
def main():
gr_estimators = {"simple": digital.SNR_EST_SIMPLE,
"skew": digital.SNR_EST_SKEW,
"m2m4": digital.SNR_EST_M2M4,
"svr": digital.SNR_EST_SVR}
py_estimators = {"simple": snr_est_simple,
"skew": snr_est_skew,
"m2m4": snr_est_m2m4,
"svr": snr_est_svr}
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Set the number of samples to process [default=%default]")
parser.add_option("", "--snr-min", type="float", default=-5,
help="Minimum SNR [default=%default]")
parser.add_option("", "--snr-max", type="float", default=20,
help="Maximum SNR [default=%default]")
parser.add_option("", "--snr-step", type="float", default=0.5,
help="SNR step amount [default=%default]")
parser.add_option("-t", "--type", type="choice",
choices=gr_estimators.keys(), default="simple",
help="Estimator type {0} [default=%default]".format(
gr_estimators.keys()))
(options, args) = parser.parse_args ()
N = options.nsamples
xx = scipy.random.randn(N)
xy = scipy.random.randn(N)
bits = 2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1
snr_known = list()
snr_python = list()
snr_gr = list()
# when to issue an SNR tag; can be ignored in this example.
ntag = 10000
n_cpx = xx + 1j*xy
py_est = py_estimators[options.type]
gr_est = gr_estimators[options.type]
SNR_min = options.snr_min
SNR_max = options.snr_max
SNR_step = options.snr_step
SNR_dB = scipy.arange(SNR_min, SNR_max+SNR_step, SNR_step)
for snr in SNR_dB:
SNR = 10.0**(snr/10.0)
scale = scipy.sqrt(SNR)
yy = bits + n_cpx/scale
print "SNR: ", snr
Sknown = scipy.mean(yy**2)
Nknown = scipy.var(n_cpx/scale)/2
snr0 = Sknown/Nknown
snr0dB = 10.0*scipy.log10(snr0)
snr_known.append(snr0dB)
snrdB, snr = py_est(yy)
snr_python.append(snrdB)
gr_src = gr.vector_source_c(bits.tolist(), False)
gr_snr = digital.mpsk_snr_est_cc(gr_est, ntag, 0.001)
gr_chn = gr.channel_model(1.0/scale)
gr_snk = gr.null_sink(gr.sizeof_gr_complex)
tb = gr.top_block()
tb.connect(gr_src, gr_chn, gr_snr, gr_snk)
tb.run()
snr_gr.append(gr_snr.snr())
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(SNR_dB, snr_known, "k-o", linewidth=2, label="Known")
s1.plot(SNR_dB, snr_python, "b-o", linewidth=2, label="Python")
s1.plot(SNR_dB, snr_gr, "g-o", linewidth=2, label="GNU Radio")
s1.grid(True)
s1.set_title('SNR Estimators')
s1.set_xlabel('SNR (dB)')
s1.set_ylabel('Estimated SNR')
s1.legend()
pylab.show()
if __name__ == "__main__":
main()
|
<reponame>xiejx5/GeoSpace<filename>geospace/gee_export.py
import os
import ee
import zipfile
import requests
import pandas as pd
# gee initialization
def gee_initial():
try:
ee.Initialize()
except Exception:
ee.Authenticate()
ee.Initialize()
def gee_export_tif(image, filename, crs=None, crs_transform=None, scale=None, region=None, file_per_band=False):
"""Export as tif, must be the original image, instead of the reprojected one
Args:
fc (ee.FeatureCollection): the spatial scope of the exported tif
image (ee.Image): the image that would be exported
filename (string): exported path
crs (str, optional): A default CRS string to use for any bands that do not explicitly specify one
crs_transform ([type]): control the spatial resolution and alignment
region (object, optional): A polygon specifying a region to download
file_per_band (bool, optional): Whether to produce a different GeoTIFF per band
"""
if not isinstance(image, ee.Image):
print("The image must be an ee.Image.")
return
filename = os.path.abspath(filename)
basename = os.path.basename(filename)
name = os.path.splitext(basename)[0]
filetype = os.path.splitext(basename)[1][1:].lower()
filename_zip = filename.replace(".tif", ".zip")
if filetype != "tif":
print("The filename must end with .tif")
return
try:
print("Generating URL ...")
params = {"name": name, "filePerBand": file_per_band}
if region is None:
region = image.geometry().getInfo()
params["region"] = region
if crs is not None:
params["crs"] = crs
if crs_transform is not None:
params["crs_transform"] = crs_transform
elif scale is not None:
params["scale"] = scale
else:
params["scale"] = image.projection().nominalScale()
url = image.getDownloadURL(params)
print(f"Downloading data from {url}\nPlease wait ...")
r = requests.get(url, stream=True)
if r.status_code != 200:
print("An error occurred while downloading.")
return
with open(filename_zip, "wb") as fd:
for chunk in r.iter_content(chunk_size=1024):
fd.write(chunk)
except Exception as e:
print("An error occurred while downloading.")
print(e)
return
try:
with zipfile.ZipFile(filename_zip) as z:
z.extractall(os.path.dirname(filename))
os.remove(filename_zip)
if file_per_band:
print(f"Data downloaded to {os.path.dirname(filename)}")
else:
print(f"Data downloaded to {filename}")
except Exception as e:
print(e)
# ee.batch.Export.image.toDrive(
# **{
# 'image': image,
# 'description': asset,
# 'fileNamePrefix': asset, # this is the name actually
# 'folder': 'SoilGrids',
# 'region': fc.geometry(),
# 'crs': 'EPSG:4326',
# 'crsTransform': crs_transform,
# 'maxPixels': 1e13
# }).start()
# ee.batch.Export.image.toDrive(
# **{
# 'image': image,
# 'description': asset,
# 'fileNamePrefix': asset, # this is the name actually
# 'folder': 'SoilGrids',
# 'region': fc.geometry(),
# 'crs': 'EPSG:4326',
# 'crsTransform': crs_transform,
# 'maxPixels': 1e13
# }).start()
def gee_export_csv(fc, image, scale_enlarge=1, fields=['ORDER', '.*mean'], return_url=False):
"""export a csv containing the basin average value
Args:
fc (ee.FeatureCollection): e.g. basins
image (ee.Image): e.g. DEM
Returns:
DataFrame: it has fields of 'ORDER' and 'mean'
"""
# export as csv
scale = image.projection().nominalScale().multiply(scale_enlarge)
if image.projection().crs().getInfo() != 'EPSG:4326':
image = image.reproject('EPSG:4326', None, scale)
means = image.reduceRegions(**{
'collection': fc,
'reducer': ee.Reducer.mean(),
'scale': scale,
})
url = means.select(fields, retainGeometry=False).getDownloadURL(filetype='csv')
if return_url:
return url
else:
return pd.read_csv(url)
|
<reponame>nickovs/pypssst
import pssst
import pytest
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey
from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
@pytest.fixture(scope="session")
def keys():
server_private_key, server_public_key = pssst.generate_key_pair(cipher_suite=pssst.CipherSuite.X25519_AESGCM128)
client_private_key, client_public_key = pssst.generate_key_pair(cipher_suite=pssst.CipherSuite.X25519_AESGCM128)
return server_private_key, server_public_key, client_private_key, client_public_key
@pytest.fixture(scope="session")
def server(keys):
server_private_key, server_public_key, client_private_key, client_public_key = keys
server = pssst.PSSSTServer(server_private_key)
return server
@pytest.fixture(scope="session")
def client(keys):
server_private_key, server_public_key, client_private_key, client_public_key = keys
client = pssst.PSSSTClient(server_public_key, client_private_key)
return client
@pytest.fixture(scope="session")
def test_message():
return b"This is a test message"
def test_bad_request_flip_direction(server, client, test_message):
request_packet, client_reply_handler = client.pack_request(test_message)
# Flip the request bit
request_packet_x = bytearray(request_packet)
request_packet_x[0] ^= 0x80
with pytest.raises(pssst.PSSSTNotRequest):
server.unpack_request(bytes(request_packet_x))
def test_bad_request_cipher_NONE(server, client, test_message):
request_packet, client_reply_handler = client.pack_request(test_message)
# Try cipher suite NONE
request_packet_x = bytearray(request_packet)
request_packet_x[3] = 0x00
with pytest.raises(pssst.PSSSTUnsupportedCipher):
server.unpack_request(bytes(request_packet_x))
def test_bad_request_cipher_FF(server, client, test_message):
request_packet, client_reply_handler = client.pack_request(test_message)
# Try a bogus cipher suite
request_packet_x = bytearray(request_packet)
request_packet_x[3] |= 0xff
with pytest.raises(pssst.PSSSTUnsupportedCipher):
server.unpack_request(bytes(request_packet_x))
def test_bad_request_ciphertext(server, client, test_message):
request_packet, client_reply_handler = client.pack_request(test_message)
# Mess with the ciphertext
request_packet_x = bytearray(request_packet)
request_packet_x[37] ^= 0xff
with pytest.raises(pssst.PSSSTDecryptFailed):
server.unpack_request(bytes(request_packet_x))
def test_bad_reply_flip_direction(server, client, test_message):
request_packet, client_reply_handler = client.pack_request(test_message)
received_message, received_client_public_key, server_reply_handler = server.unpack_request(request_packet)
reply_packet = server_reply_handler(received_message)
# Flip the request bit
reply_packet_x = bytearray(reply_packet)
reply_packet_x[0] ^= 0x80
with pytest.raises(pssst.PSSSTNotReply):
client_reply_handler(bytes(reply_packet_x))
def test_bad_cipher_NONE(server, client, test_message):
request_packet, client_reply_handler = client.pack_request(test_message)
received_message, received_client_public_key, server_reply_handler = server.unpack_request(request_packet)
reply_packet = server_reply_handler(received_message)
# Try cipher suite NONE
reply_packet_x = bytearray(reply_packet)
reply_packet_x[3] = 0x00
with pytest.raises(pssst.PSSSTReplyMismatch):
client_reply_handler(bytes(reply_packet_x))
def test_bad_reply_cipher_FF(server, client, test_message):
request_packet, client_reply_handler = client.pack_request(test_message)
received_message, received_client_public_key, server_reply_handler = server.unpack_request(request_packet)
reply_packet = server_reply_handler(received_message)
# Try a bogus cipher suite
reply_packet_x = bytearray(reply_packet)
reply_packet_x[3] |= 0xff
with pytest.raises(pssst.PSSSTUnsupportedCipher):
client_reply_handler(bytes(reply_packet_x))
def test_bad_reeply_ID(server, client, test_message):
request_packet, client_reply_handler = client.pack_request(test_message)
received_message, received_client_public_key, server_reply_handler = server.unpack_request(request_packet)
reply_packet = server_reply_handler(received_message)
# Mess with the request identifier
reply_packet_x = bytearray(reply_packet)
reply_packet_x[4:36] = b'\0xff' * 32
with pytest.raises(pssst.PSSSTReplyMismatch):
client_reply_handler(bytes(reply_packet_x))
def test_bad_reply_ciphertext(server, client, test_message):
request_packet, client_reply_handler = client.pack_request(test_message)
received_message, received_client_public_key, server_reply_handler = server.unpack_request(request_packet)
reply_packet = server_reply_handler(received_message)
# Mess with the ciphertext
reply_packet_x = bytearray(reply_packet)
reply_packet_x[37] ^= 0xff
with pytest.raises(pssst.PSSSTDecryptFailed):
client_reply_handler(bytes(reply_packet_x))
|
<reponame>mrForce/immunoGalaxy
#!/usr/bin/python
import sys
import argparse
import subprocess
import shutil
import csv
from collections import Counter, namedtuple
import io
import os
import uuid
import zipfile
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib_venn import venn2
from matplotlib_venn import _math
from matplotlib_venn import venn2_circles
from matplotlib.patches import ConnectionPatch, Circle
from matplotlib.text import Text
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--unfiltered_archive', type=str)
parser.add_argument('--filtered_archive', type=str)
parser.add_argument('--fdr', type=float)
parser.add_argument('--plot', type=str)
args = parser.parse_args()
assert(args.unfiltered_archive)
assert(args.filtered_archive)
assert(args.fdr)
assert(args.plot)
def plot_venn(search_one, search_two, output_location):
"""
search_one and search_two should both be dictionaries, mapping each scan to the peptide
"""
spectra_one = set(search_one.keys())
print('spectra one:')
print(spectra_one)
spectra_two = set(search_two.keys())
common_spectra = spectra_one.intersection(spectra_two)
print('common spectra')
print(common_spectra)
#the number of spectra shared between the two searches that match against different peptides
discordant_spectra = 0
#the number of spectra shared between the two searches that match against the same peptide
concordant_spectra = 0
for spectra in common_spectra:
if search_one[spectra] == search_two[spectra]:
concordant_spectra += 1
else:
discordant_spectra += 1
circles = venn2_circles([spectra_one, spectra_two])
sorted_circles = sorted(circles, key=lambda x: x.center[0])
bigger_circle = max(circles, key=lambda x: x.radius)
bigger_radius = bigger_circle.radius
left_point = np.array([sorted_circles[0].center[0] - sorted_circles[0].radius, sorted_circles[0].center[1]])
right_point = np.array([sorted_circles[1].center[0] + sorted_circles[1].radius, sorted_circles[1].center[1]])
left_intersection = max(_math.circle_line_intersection(sorted_circles[0].center, sorted_circles[0].radius, left_point, right_point), key=lambda x: x[0])
right_intersection = min(_math.circle_line_intersection(sorted_circles[1].center, sorted_circles[1].radius, left_point, right_point), key=lambda x: x[0])
line = ConnectionPatch(left_intersection, right_intersection, 'data', 'data')
plt.gca().add_patch(line)
print(sorted_circles[0].center)
print(sorted_circles[1].center)
circle_intersections = _math.circle_circle_intersection(sorted_circles[0].center, sorted_circles[0].radius, sorted_circles[1].center, sorted_circles[1].radius)
upper_circle_intersection = max(circle_intersections, key=lambda x: x[1])
#take the centroid
upper_text_location = (left_intersection + right_intersection + upper_circle_intersection)/3.0
#plt.rc('text', usetex=True)
plt.text(upper_text_location[0], upper_text_location[1], str(concordant_spectra) + '\n' + r'$p_i = p_j$')
lower_circle_intersection = min(circle_intersections, key=lambda x: x[1])
lower_text_location = (left_intersection + right_intersection + lower_circle_intersection)/3.0
plt.text(lower_text_location[0], lower_text_location[1], str(discordant_spectra) + '\n' + r'$p_i \neq p_j$')
venn_diagram = venn2([spectra_one, spectra_two], ['Unfiltered', 'Filtered'])
venn_diagram.get_label_by_id('11').set_text('')
matplotlib.pyplot.savefig(output_location, format='png')
def get_psms(zip_path, fdr_cutoff):
with zipfile.ZipFile(zip_path, 'r') as f:
locations = []
for x in f.namelist():
if x.endswith('percolator.target.psms.txt'):
locations.append(x)
assert(len(locations) == 1)
psms = {}
with f.open(locations[0], 'r') as psms_binary_file:
psms_text_file = io.TextIOWrapper(psms_binary_file)
psms_reader = csv.DictReader(psms_text_file, delimiter='\t')
for row in psms_reader:
scan = row['scan']
q_val = float(row['percolator q-value'])
score = float(row['percolator score'])
peptide = row['sequence']
if q_val <= fdr_cutoff and (scan not in psms or psms[scan][0] < score):
psms[scan] = (score, peptide)
return {k: v[1] for k,v in psms.items()}
unfiltered_archive_path = args.unfiltered_archive
filtered_archive_path = args.filtered_archive
fdr = args.fdr/100.0
plot = args.plot
unfiltered_psms = get_psms(unfiltered_archive_path, fdr)
filtered_psms = get_psms(filtered_archive_path, fdr)
plot_venn(unfiltered_psms, filtered_psms, plot)
|
import random
epsilon = 0.000001
scores = [0.0]*39366 #2*3^9
board = 9*[1]
board[8]=0
ties_allowed = False
done = False
def board_to_value(board, turn):
val = 0
for i in range(0, 9):
val = 3*val + (board[8-i]*(3*board[8-i] - 1))//2
return 2*val + turn
def value_to_board(value):
board = 9*[1]
turn = value % 2
value = value >> 1
for i in range(0,9):
x = value%3
board[i] = x*(5-3*x)//2
value = value // 3
return (board, turn)
#by decreasing Hamming weight
def increment_board():
complete = False
i = 0
while not complete:
if board[i]==0:
i+=1
elif board[i]==1:
board[i]=-1
complete=True
else:
complete=True
board[i]=0
num_extra_bits = 0
i+=1
while i < 9 and board[i]==-1:
board[i]=0
num_extra_bits+=1
i+=1
if i < 9:
if board[i]==0:
board[i] = 1
else:
board[i] = -1
num_extra_bits+=1
for j in range(num_extra_bits):
board[j]=1
lines = [[0,1,2], [3,4,5], [6,7,8], [0,3,6], [1,4,7], [2,5,8], [0,4,8], [2,4,6]]
def check_board_state():
if any(all(board[square]==1 for square in line) for line in lines):
return 1
if any(all(board[square]==-1 for square in line) for line in lines):
return 0
if sum(int(board[i]==0) for i in range(9)) <= 1:
if ties_allowed:
return 0.5
else:
return 0
else:
return 'i'
def evaluate_board():
board_value = board_to_value(board, 0)
open_squares = []
p1_move_score = []
p2_move_score = []
for i in range(9):
if(board[i]==0):
open_squares.append(i)
board[i]=1
p1_move_score.append([i,scores[board_to_value(board,1)]])
board[i]=-1
p2_move_score.append([i,scores[board_to_value(board,0)]])
board[i]=0
p1_move_score.sort(key=lambda x: x[1], reverse=True)
p2_move_score.sort(key=lambda x: x[1], reverse=False)
p1_score=1
p2_score=0
delta_p2_score=1
while delta_p2_score > epsilon:
if p1_move_score[0][1] == p2_score:
p1_score = p2_score
else:
den=1/(p1_move_score[0][1] - p2_score)
num_sum=p1_move_score[0][1]/(p1_move_score[0][1]-p2_score)
p1_score = (num_sum - 1)/den
i=1
while i < len(open_squares) and p1_move_score[i][1] > p1_score:
den += 1/(p1_move_score[i][1] - p2_score)
num_sum += p1_move_score[i][1]/(p1_move_score[i][1]-p2_score)
p1_score = (num_sum - 1)/den
i+=1
old_p2_score = p2_score
if p2_move_score[0][1] == p1_score:
p2_score = p1_score
else:
den=1/(p2_move_score[0][1] - p1_score)
num_sum=p2_move_score[0][1]/(p2_move_score[0][1]-p1_score)
p2_score = (num_sum - 1)/den
i=1
while i < len(open_squares) and p2_move_score[i][1] < p2_score:
den += 1/(p2_move_score[i][1] - p1_score)
num_sum += p2_move_score[i][1]/(p2_move_score[i][1]-p1_score)
p2_score = (num_sum - 1)/den
i+=1
delta_p2_score = abs(p2_score-old_p2_score)
scores[board_value] = p1_score
scores[board_value+1] = p2_score
def playing_opt_move_probs(board, turn):
score_position = scores[board_to_value(board,turn)]
score_if_blocked = scores[board_to_value(board,(turn+1)%2)]
possible_move_scores = []
move_probs = []
for i in range(9):
if board[i]==0:
board[i] = turn*-2 + 1
possible_move_scores.append([i,scores[board_to_value(board,(turn+1)%2)]])
board[i] = 0
possible_move_scores.sort(key=lambda x: x[1], reverse=(turn==0))
if (turn*-2 + 1)*possible_move_scores[0][1] <= (turn*-2 + 1)*score_if_blocked*(1+10*epsilon):
for move in possible_move_scores:
move_probs.append([move[0],1.0/len(possible_move_scores)])
else:
support_size = 1
den = 1/(possible_move_scores[0][1]-score_if_blocked)
while support_size < len(possible_move_scores)\
and (turn*-2 + 1)*possible_move_scores[support_size][1] > (turn*-2 + 1)*score_position:
den += 1/(possible_move_scores[support_size][1]-score_if_blocked)
support_size+=1
for i in range(len(possible_move_scores)):
if i < support_size:
move_probs.append([possible_move_scores[i][0],1.0/((possible_move_scores[i][1] - score_if_blocked)*den)])
else:
move_probs.append([possible_move_scores[i][0],0])
return move_probs
def blocking_opt_move_probs(board, turn):
score_position = scores[board_to_value(board,turn)]
score_if_blocked = scores[board_to_value(board,(turn+1)%2)]
possible_move_scores = []
move_probs = []
for i in range(9):
if board[i]==0:
board[i] = turn*-2 + 1
possible_move_scores.append([i,scores[board_to_value(board,(turn+1)%2)]])
board[i] = 0
possible_move_scores.sort(key=lambda x: x[1], reverse=(turn==0))
if (turn*-2 + 1)*possible_move_scores[0][1] <= (turn*-2 + 1)*score_if_blocked*(1+10*epsilon):
for move in possible_move_scores:
move_probs.append([move[0],1.0/len(possible_move_scores)])
else:
support_size = 1
den = 1/(possible_move_scores[0][1]-score_if_blocked)
while support_size < len(possible_move_scores)\
and (turn*-2 + 1)*possible_move_scores[support_size][1] > (turn*-2 + 1)*score_position:
den += 1/(possible_move_scores[support_size][1]-score_if_blocked)
support_size+=1
for i in range(len(possible_move_scores)):
if i < support_size:
move_probs.append([possible_move_scores[i][0],(score_position - possible_move_scores[i][1])/(score_if_blocked - possible_move_scores[i][1])])
else:
move_probs.append([possible_move_scores[i][0],0])
return move_probs
def print_board(board):
characters = 9*[' ']
for i in range(9):
if board[i] == 1:
characters[i] = 'X'
elif board[i]==-1:
characters[i] = 'O'
print(" | | ")
print("3 " + characters[0] + " | " + characters[1] + " | " + characters[2] + " ")
print(" ---|---|---")
print("2 " + characters[3] + " | " + characters[4] + " | " + characters[5] + " ")
print(" ---|---|---")
print("1 " + characters[6] + " | " + characters[7] + " | " + characters[8] + " ")
print(" | | ")
print(" a b c ")
def parse_move(move_str):
if len(move_str) != 2\
or (move_str[0] != 'a' and move_str[0] != 'b' and move_str[0] != 'c')\
or (move_str[1] != '1' and move_str[1] != '2' and move_str[1] != '3'):
return 'e'
else:
row_value = 3*(3-int(move_str[1]))
if move_str[0] == 'a':
return row_value
elif move_str[0] == 'b':
return row_value + 1
else:
return row_value + 2
def move_to_str(move):
if move % 3 == 0:
col = "a"
elif move % 3 == 1:
col = "b"
else:
col = "c"
return col + str(int(3-move//3))
while not done:
#print(board)
board_state = check_board_state()
if board_state != 'i':
board_value = board_to_value(board,0)
scores[board_value] = board_state
scores[board_value+1] = board_state
else:
evaluate_board()
if(board == 9*[0]):
done = True
else:
increment_board()
#print(scores)
#print(scores[0])
new_game = 'Y'
while(new_game=='Y'):
done = False
board = 9*[0]
turn = 0
user_player = int(input("Would you like to be player 1 or 2? (1/2): ")) - 1
while user_player!=0 and user_player!=1:
user_player = int(input("Would you like to be player 1 or 2? (1/2): ")) - 1
score = 'i'
print_board(board)
while score=='i':
print("score = " + str(scores[board_to_value(board,turn)]))
if user_player==turn:
play = parse_move(input("It is your turn to play. What is your play? (a1-c3): "))
else:
play = parse_move(input("It is your opponent's turn. Where do you block? (a1-c3): "))
while play == 'e' or board[play]!=0:
play = parse_move(input("Please input a value corresponding an open square. (a1-c3): "))
if user_player==turn:
move_probs = blocking_opt_move_probs(board, turn)
else:
move_probs = playing_opt_move_probs(board, turn)
r = random.uniform(0,1)
move = 0
move_prob_sum = move_probs[0][1]
while r > move_prob_sum:
move+=1
move_prob_sum += move_probs[move][1]
if user_player==turn:
print("Your opponent blocks square " + move_to_str(move_probs[move][0]))
if play==move_probs[move][0]:
print("Your opponent successfully block your move!")
else:
print("Your opponent fails to block your move!")
board[play] = turn*-2 + 1
else:
print("Your opponent plays at square " + move_to_str(move_probs[move][0]))
if play==move_probs[move][0]:
print("You successfully block your opponent's move!")
else:
print("You fail to block your opponent's move!")
board[move_probs[move][0]] = turn*-2 + 1
turn = (turn+1)%2
score = check_board_state()
print_board(board)
if score==0.5:
print("The game was tied.")
elif score==user_player:
print("Sorry, you lost.")
else:
print("Congratulations, you won!")
new_game = input("Would you like to play a new game? (Y/N): ")
while new_game!='Y' and new_game!='N':
new_game = input("Would you like to play a new game? (Y/N): ") |
<filename>app/broadcast_areas/__init__.py
from notifications_utils.formatters import formatted_list
from notifications_utils.polygons import Polygons
from notifications_utils.serialised_model import SerialisedModelCollection
from werkzeug.utils import cached_property
from .populations import CITY_OF_LONDON
from .repo import BroadcastAreasRepository
class SortableMixin:
def __repr__(self):
return f'{self.__class__.__name__}(<{self.id}>)'
def __lt__(self, other):
# Implementing __lt__ means any classes inheriting from this
# method are sortable
return self.name < other.name
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
class GetItemByIdMixin:
def get(self, id):
for item in self:
if item.id == id:
return item
raise KeyError(id)
class BroadcastArea(SortableMixin):
def __init__(self, row):
self.id, self.name, self._count_of_phones, self.library_id = row
@cached_property
def polygons(self):
return Polygons(
BroadcastAreasRepository().get_polygons_for_area(self.id)
)
@cached_property
def simple_polygons(self):
return Polygons(
BroadcastAreasRepository().get_simple_polygons_for_area(self.id)
)
@cached_property
def sub_areas(self):
return [
BroadcastArea(row)
for row in BroadcastAreasRepository().get_all_areas_for_group(self.id)
]
@property
def count_of_phones(self):
if self.id.endswith(CITY_OF_LONDON.WARDS):
return CITY_OF_LONDON.DAYTIME_POPULATION * (
self.polygons.estimated_area / CITY_OF_LONDON.AREA_SQUARE_MILES
)
if self.sub_areas:
return sum(area.count_of_phones for area in self.sub_areas)
# TODO: remove the `or 0` once missing data is fixed, see
# https://www.pivotaltracker.com/story/show/174837293
return self._count_of_phones or 0
@cached_property
def parents(self):
return list(filter(None, self._parents_iterator))
@property
def _parents_iterator(self):
id = self.id
while True:
parent = BroadcastAreasRepository().get_parent_for_area(id)
if not parent:
return None
parent_broadcast_area = BroadcastArea(parent)
yield parent_broadcast_area
id = parent_broadcast_area.id
class CustomBroadcastArea:
# We don’t yet have a way to estimate the number of phones in a
# user-defined polygon
count_of_phones = 0
def __init__(self, *, name, polygons=None):
self.name = name
self._polygons = polygons or []
@property
def polygons(self):
return Polygons(
# Polygons in the DB are stored with the coordinate pair
# order flipped – this flips them back again
Polygons(self._polygons).as_coordinate_pairs_lat_long
)
simple_polygons = polygons
class CustomBroadcastAreas(SerialisedModelCollection):
model = CustomBroadcastArea
def __init__(self, *, areas, polygons):
self.items = areas
self._polygons = polygons
def __getitem__(self, index):
return self.model(
name=self.items[index],
polygons=self._polygons if index == 0 else None,
)
class BroadcastAreaLibrary(SerialisedModelCollection, SortableMixin, GetItemByIdMixin):
model = BroadcastArea
def __init__(self, row):
id, name, name_singular, is_group = row
self.id = id
self.name = name
self.name_singular = name_singular
self.is_group = bool(is_group)
self.items = BroadcastAreasRepository().get_all_areas_for_library(self.id)
def get_examples(self):
# we show up to four things. three areas, then either a fourth area if there are exactly four, or "and X more".
areas_to_show = sorted(area.name for area in self)[:4]
count_of_areas_not_named = len(self.items) - 3
# if there's exactly one area not named, there are exactly four - we should just show all four.
if count_of_areas_not_named > 1:
areas_to_show = areas_to_show[:3] + [f'{count_of_areas_not_named} more…']
return formatted_list(areas_to_show, before_each='', after_each='')
class BroadcastAreaLibraries(SerialisedModelCollection, GetItemByIdMixin):
model = BroadcastAreaLibrary
def __init__(self):
self.items = BroadcastAreasRepository().get_libraries()
def get_areas(self, *area_ids):
# allow people to call `get_areas('a', 'b') or get_areas(['a', 'b'])`
if len(area_ids) == 1 and isinstance(area_ids[0], list):
area_ids = area_ids[0]
areas = BroadcastAreasRepository().get_areas(area_ids)
return [BroadcastArea(area) for area in areas]
broadcast_area_libraries = BroadcastAreaLibraries()
|
# coding=utf-8
import hashlib
import hmac
import json
import logging
from typing import Dict, Set
import requests
from ikeawatcher.model import CollectLocation, ShoppingCart
LOGGER = logging.getLogger(__name__)
_HMAC_ALGO = hashlib.sha1
_HMAC_KEY = "<KEY>"
class IkeaApi:
def __init__(self, country, locale):
self._country = country.strip().lower()
self._locale = locale
def get_collect_locations(self) -> Set[CollectLocation]:
url = f"https://ww8.ikea.com/clickandcollect/{self._country}/receive/listfetchlocations?version=2"
response = requests.get(url)
response.raise_for_status()
locations_by_id = response.json()
"""
Example values for Belgium
locations_by_id = {
'76ffc2d2-3327-4191-ae4f-83f4b2e3920a': {'name': 'IKEA Hasselt', 'isClosed': False, 'closingTimes': ''},
'3a27b7d5-bad0-4d6a-9186-296ecd396a28': {'name': '- Pick-up Point Dockx Hasselt-West', 'isClosed': False,
'closingTimes': ''},
'99931b05-27fc-42e2-bf56-46446b91e1b5': {'name': '- Pick Up Point BD MyShopi Geel', 'isClosed': False,
'closingTimes': ''},
'ea21bc9c-6c6f-4f3c-b980-c63a648687f5': {'name': 'IKEA Zaventem', 'isClosed': False, 'closingTimes': ''},
'3c0eeb5d-998b-4e32-a8b2-11c677b6c009': {'name': '- Pick-up Point Dockx Machelen', 'isClosed': False,
'closingTimes': ''},
'68271b0b-8090-4d06-86b1-ef54c13f511a': {'name': '- Pick-up Point Dockx Herent', 'isClosed': False,
'closingTimes': ''},
'56e815a3-b641-48a1-8ea7-82755ef936b3': {'name': '<NAME>', 'isClosed': False, 'closingTimes': ''},
'36bc78f3-a74e-4cfe-bb54-efe13d4712d5': {'name': '<NAME>', 'isClosed': False, 'closingTimes': ''},
'7997ba73-9928-4381-a3e8-1739a01c5d4f': {'name': '- Pick-up Point Roeselare', 'isClosed': False,
'closingTimes': ''},
'8b9ae83a-1266-4cd4-b12d-59f0ad6c9543': {'name': '- Pick-up Point Dockx Jumet', 'isClosed': False,
'closingTimes': ''},
'c0ced698-b6fe-4418-8813-5cc5a0a841a5': {'name': '- Pick-up Point Rekkem', 'isClosed': False,
'closingTimes': ''},
'af983201-e1ef-48e8-9781-cf530d73349d': {'name': '<NAME>', 'isClosed': False, 'closingTimes': ''},
'274ea32b-61d3-47a2-bbaa-4b4a518fa826': {'name': '- Pick-up Point Dockx Aalst', 'isClosed': False,
'closingTimes': ''},
'f4ec9269-112d-4c92-ae31-f1d9c1ae0c86': {'name': '- Pick-up Point Dockx Oudenaarde', 'isClosed': False,
'closingTimes': ''},
'acf5b993-acee-4d7c-b7f2-573ca4f75129': {'name': '<NAME>', 'isClosed': False, 'closingTimes': ''},
'0436634a-1560-4a23-afce-4917b149f7ca': {'name': '- Pick-up Point Sint-Niklaas', 'isClosed': False,
'closingTimes': ''},
'7bc38679-8264-44a2-a4e9-9ee5107b5bcc': {'name': '- Click and Collect Box Mechelen', 'isClosed': False,
'closingTimes': ''},
'1e1f5b73-06a5-4bfa-bd5a-59dbc33cc8c3': {'name': 'IKE<NAME> (Brussels)', 'isClosed': False,
'closingTimes': ''},
'cd4d0a31-771f-4cb9-8ced-c246a2d61104': {'name': '- Pick-up Point Dockx Nivelles', 'isClosed': False,
'closingTimes': ''},
'9805f302-33e3-48f8-90c1-9b700953e7f4': {'name': 'IKE<NAME>', 'isClosed': False, 'closingTimes': ''},
'39ff81bc-a3cc-4b4e-a38d-e6cfad98bf3d': {'name': '- Pick-Up Point City Depot Naninne', 'isClosed': False,
'closingTimes': ''},
'a08a2b93-6783-4626-b8d9-f520ceb05dfc': {'name': '- Pick-Up Point Dockx Verviers', 'isClosed': False,
'closingTimes': ''}}
"""
result = {CollectLocation(id=loc_id, name=loc["name"]) for loc_id, loc in locations_by_id.items()}
LOGGER.debug(f"Collect locations: {result}")
return result
def check_express_delivery(self, shopping_cart: ShoppingCart, zip_code: str, promotion_code: str = None):
url = f"https://ww8.ikea.com/clickandcollect/{self._country}/receive/receiveexpress/"
# url = f"https://ww8.ikea.com/clickandcollect/{self._country}/receive/"
payload = {
"selectedService": "express",
"selectedServiceValue": zip_code,
"articles": shopping_cart.to_json(),
"locale": self._locale,
"customerView": "desktop",
"system": "IRW",
"promotionCode": promotion_code or ""
}
result, details = self._make_delivery_request(url, payload)
"""
example response:
{
"status": "OK",
"target": "https://ww8.ikea.com/clickandcollect/be/start/JR8PpQa7eW1nuregZ4J2pTA1lVLsBKXl",
"servicePrices": {
"homeDelivery": {
"price": 39.9,
"deliveryHandling": "2 man"
},
"status": "OK"
}
"""
return result, details
def check_click_and_collect(self, shopping_cart: ShoppingCart, location: CollectLocation,
promotion_code: str = None):
url = f"https://ww8.ikea.com/clickandcollect/{self._country}/receive/"
payload = {
"selectedService": "fetchlocation",
"selectedServiceValue": location.id,
"articles": shopping_cart.to_json(),
"locale": self._locale,
"customerView": "desktop",
"slId": "1241241241",
"promotionCode": promotion_code or ""
}
result, details = self._make_delivery_request(url, payload)
return result, details
def _make_delivery_request(self, url: str, payload: Dict) -> (bool, Dict):
payload_json = json.dumps(payload, separators=(',', ':'))
data = {
"payload": payload_json,
"hmac": self._generate_hmac(payload_json),
# "backUrl": "https://order.ikea.com/be/fr/checkout/delivery/"
}
LOGGER.debug(f"Delivery request: {json.dumps(data, indent=3)}")
response = requests.post(url, json=data)
response.raise_for_status()
json_response = response.json()
LOGGER.debug(f"Delivery response: {json.dumps(json_response, indent=3)}")
is_success = json_response.get("status", "").upper() == "OK"
return is_success, json_response
@staticmethod
def _generate_hmac(data):
return hmac.new(_HMAC_KEY.encode("utf8"), data.encode("utf8"), _HMAC_ALGO).digest().hex()
|
<reponame>scottdermott/etl-parser
# -*- coding: utf-8 -*-
"""
Parse an event record
:see: https://docs.microsoft.com/fr-fr/windows/desktop/api/evntcons/ns-evntcons-_event_record
"""
from construct import Struct, Int16ul, Enum, Int32ul, Int64ul, FlagsEnum, Int8ul, Bytes, Aligned, RepeatUntil, Computed, \
AlignedStruct, If
from etl.parsers.etw.core import Etw, build_etw, Guid as EtwGuid
from etl.parsers.tracelogging import build_tracelogging, TraceLogging
from etl.utils import Guid
from etl.wmi import wmi_trace_marker
EventHeaderType = Enum(
Int8ul,
EVENT_HEADER_EVENT32=0x12,
EVENT_HEADER_EVENT64=0x13
)
EventHeaderFlag = FlagsEnum(
Int16ul,
EVENT_HEADER_FLAG_EXTENDED_INFO=0x0001,
EVENT_HEADER_FLAG_PRIVATE_SESSION=0x0002,
EVENT_HEADER_FLAG_STRING_ONLY=0x0004,
EVENT_HEADER_FLAG_TRACE_MESSAGE=0x0008,
EVENT_HEADER_FLAG_NO_CPUTIME=0x0010,
EVENT_HEADER_FLAG_32_BIT_HEADER=0x0020,
EVENT_HEADER_FLAG_64_BIT_HEADER=0x0040,
EVENT_HEADER_FLAG_CLASSIC_HEADER=0x0100,
EVENT_HEADER_FLAG_PROCESSOR_INDEX=0x0200
)
EventHeaderPropertyFlag = FlagsEnum(
Int16ul,
EVENT_HEADER_PROPERTY_XML=0x0001,
EVENT_HEADER_PROPERTY_FORWARDED_XML=0x0002,
EVENT_HEADER_PROPERTY_LEGACY_EVENTLOG=0x0004,
EVENT_HEADER_PROPERTY_RELOGGABLE=0x0008
)
EventDescriptor = Struct(
"Id" / Int16ul,
"Version" / Int8ul,
"Channel" / Int8ul,
"Level" / Int8ul,
"Opcode" / Int8ul,
"Task" / Int16ul,
"Keyword" / Int64ul
)
EventHeader = Struct(
"marker" / wmi_trace_marker(EventHeaderType),
"flags" / EventHeaderFlag,
"event_property" / EventHeaderPropertyFlag,
"thread_id" / Int32ul,
"process_id" / Int32ul,
"timestamp" / Int64ul,
"provider_id" / Guid,
"event_descriptor" / EventDescriptor,
"processor_time" / Int64ul,
"activity_id" / Guid
)
EventHeaderExtendedDataItem = Struct(
"reserved1" / Int16ul,
"ext_type" / Int16ul,
"reserved2" / Int16ul,
"data_size" / Int16ul,
"data_item" / Bytes(lambda this: this.data_size)
)
EventRecord = AlignedStruct(8,
"mark1" / Computed(lambda this: this._io.tell()),
"event_header" / EventHeader,
"extended_data" / If(lambda this: this.event_header.flags.EVENT_HEADER_FLAG_EXTENDED_INFO, RepeatUntil(
lambda el, lst, this: not lst[-1].reserved2 & 0x1,
Aligned(8, EventHeaderExtendedDataItem)
)),
"mark2" / Computed(lambda this: this._io.tell()),
"user_data" / Bytes(lambda this: this.event_header.marker.version - (this.mark2 - this.mark1))
)
class Event:
"""
This is a python wrapper around construct struct to access interesting fields
"""
def __init__(self, source):
self.source = source
def get_process_id(self):
"""
Return the process id of issuer
:return: process id of issuer
"""
return self.source.event_header.process_id
def get_thread_id(self):
"""
Return the thread id of issuer
:return: thread id of issuer
"""
return self.source.event_header.thread_id
def get_timestamp(self) -> int:
"""
:return: Timestamp associated with this event
"""
return self.source.event_header.timestamp
def parse_etw(self) -> Etw:
"""
Try to parse user data with known etw format (if it's an ETW log)
:return: If known build associate Etw class
:raise: GuidNotFound, EventIdNotFound, EtwVersionNotFound
"""
guid = EtwGuid(self.source.event_header.provider_id.inner.data1, self.source.event_header.provider_id.inner.data2,
self.source.event_header.provider_id.inner.data3, self.source.event_header.provider_id.inner.data4)
event_id = self.source.event_header.event_descriptor.Id
version = self.source.event_header.event_descriptor.Version
user_data = self.source.user_data
return build_etw(guid, event_id, version, user_data)
def parse_tracelogging(self) -> TraceLogging:
"""
Try to parse a tracelogging event
"""
return build_tracelogging(self.source)
|
"""
model from
A Self-Reasoning Framework for Anomaly Detection Using Video-Level Labels
1 video clip each 16 frame
2 c3d feature extractor
3 fc
3.1 fc 4096->512
Real-world Anomaly Detection in Surveillance Videos
arXiv:1801.04264v3
WEAKLY SUPERVISED VIDEO ANOMALY DETECTION VIA CENTER-GUIDED DISCRIMINATIVE LEARNING
"""
import torch
import torch.nn as nn
from sklearn.cluster import KMeans
from kmeans_pytorch import kmeans
import numpy as np
import torch.nn.functional as F
from .build import MODEL_REGISTRY
@MODEL_REGISTRY.register()
class SRF_FC(nn.Module):
def __init__(self, cfg):
super(SRF_FC, self).__init__()
self.fc1 = nn.Linear(4096, 512)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(0.6)
self.fc2 = nn.Linear(512, 32)
self.dropout2 = nn.Dropout(0.6)
self.fc3 = nn.Linear(32, 1)
self.sig = nn.Sigmoid()
# In the original keras code they use "glorot_normal"
# As I understand, this is the same as xavier normal in Pytorch
nn.init.xavier_normal_(self.fc1.weight)
nn.init.xavier_normal_(self.fc2.weight)
nn.init.xavier_normal_(self.fc3.weight)
def forward(self, x):
"""
:param x: shape in [B,32,4096]
:return:
"""
x_1 = self.dropout1(self.relu1(self.fc1(x)))
# do k-means get euc-dis and pseudo label
# pseudo_y, euc_dis=self.k_means_cluster(x_1)
x_2 = self.dropout2(self.fc2(x_1))
x_3 = self.sig(self.fc3(x_2))
pred_score=x_3.view(-1)
return x_1,x_2,x_3,pred_score #perd_score, pseudo_y, euc_dis
@MODEL_REGISTRY.register()
class MIL_FC(nn.Module):
def __init__(self, cfg):
super(MIL_FC, self).__init__()
self.fc1 = nn.Linear(4096, 512)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(0.6)
self.fc2 = nn.Linear(512, 32)
self.dropout2 = nn.Dropout(0.6)
self.fc3 = nn.Linear(32, 1)
self.sig = nn.Sigmoid()
# In the original keras code they use "glorot_normal"
# As I understand, this is the same as xavier normal in Pytorch
nn.init.xavier_normal_(self.fc1.weight)
nn.init.xavier_normal_(self.fc2.weight)
nn.init.xavier_normal_(self.fc3.weight)
def forward(self, x):
x = self.dropout1(self.relu1(self.fc1(x)))
x = self.dropout2(self.fc2(x))
x = self.sig(self.fc3(x))
return x
@MODEL_REGISTRY.register()
class ARNet_FC(torch.nn.Module):
# 2048 for I3D combine 1024 for rgb or flow
def __init__(self, cfg):
super(ARNet_FC, self).__init__()
self.feature_dim=1024
self.fc = nn.Linear(self.feature_dim, self.feature_dim)
self.classifier = nn.Linear(self.feature_dim, 1)
self.sigmoid = nn.Sigmoid()
self.dropout = nn.Dropout(0.7)
# self.apply(weights_init)
nn.init.xavier_normal_(self.fc.weight)
nn.init.xavier_normal_(self.classifier.weight)
# nn.init.xavier_normal_(self.fc3.weight)
def forward(self, inputs, is_training=True):
x = F.relu(self.fc(inputs))
if is_training:
x = self.dropout(x)
score_x=self.sigmoid(self.classifier(x))
return score_x
# return x, self.sigmoid(self.classifier(x))
if __name__=="__main__":
print()
x=torch.rand(size=[100,64,4096]).cuda()
model=SRF_FC(cfg=None).cuda()
x_1,x_2,x_3,pred_score =model(x)
print()
|
<gh_stars>10-100
"""
Copyright (c) 2021, FireEye, Inc.
Copyright (c) 2021 <NAME>
In order to run any mitigation experiment, first run the desired attack for 1 iteration setting the save
parameter of the configuration file to a valid path in the system, and "defense": true.
The attack script will save there a set of artifacts such as the watermarked training and test sets,
and the backdoor trigger details.
"""
import os
import time
import numpy as np
from mw_backdoor import common_utils
from sklearn.ensemble import IsolationForest
def isolation_forest_analysis(xtrain, is_clean):
# Train the Isolation Forest
starttime = time.time()
isof = IsolationForest(max_samples='auto', contamination='auto', random_state=42, n_jobs=-1)
isof_pred = isof.fit_predict(xtrain)
print('Training the Isolation Forest took {:.2f} seconds'.format(time.time() - starttime))
starttime = time.time()
suspect = 0
poison_found = 0
false_positives_poison = 0
for i in range(len(isof_pred)):
if isof_pred[i] == -1:
suspect += 1
if is_clean[i] == 0 and isof_pred[i] == -1:
poison_found += 1
elif isof_pred[i] == -1 and is_clean[i] == 1:
false_positives_poison += 1
print(
'Results:'
'\n- {} suspect data points;'
'\n- {} correctly identified poisoned points;'
'\n- {} false positives;'.format(
suspect,
poison_found,
false_positives_poison
)
)
print('Evaluation took {:.2f} seconds'.format(time.time() - starttime))
return isof_pred, suspect, poison_found, false_positives_poison, isof
def isoforest_def():
# ## Defense parameters
# Set these parameters according to the specific attack for which you
# would like to test the isolation forest.
# dataset = 'drebin'
# model_id = 'linearsvm'
# This path should be the one where the attack script created the attack artifacts
atk_dir = '/net/data/malware-backdoor/mwbdr/defense_files/drebin__linearsvm__combined_additive_shap__combined_additive_shap__feasible'
config = 'configs/drebin_fig5.json'
cfg = common_utils.read_config(config, atk_def=True)
print(cfg)
# Load attack data
watermarked_X = np.load(os.path.join(atk_dir, 'watermarked_X.npy'), allow_pickle=True).item()
# watermarked_X_test = np.load(os.path.join(atk_dir, 'watermarked_X_test.npy'), allow_pickle=True)
watermarked_y = np.load(os.path.join(atk_dir, 'watermarked_y.npy'), allow_pickle=True)
wm_config = np.load(os.path.join(atk_dir, 'wm_config.npy'), allow_pickle=True).item()
watermarked_X_wmgw = watermarked_X[-cfg['poison_size'][0]:]
print(watermarked_X_wmgw.shape)
watermarked_y_wmgw = watermarked_y[-cfg['poison_size'][0]:]
print(watermarked_y_wmgw.shape)
print(watermarked_y_wmgw.sum())
print(
'Variance of the watermarked features, should be all 0s:',
np.var(
watermarked_X_wmgw[:, wm_config['wm_feat_ids']].toarray(),
axis=0,
dtype=np.float64
)
)
# ## Analysis
is_clean = np.ones(watermarked_X.shape[0])
is_clean[-cfg['poison_size'][0]:] = 0
print(is_clean.shape)
print(is_clean.sum())
# noinspection PyUnusedLocal
isof_pred, suspect, poison_found, false_positives_poison, isof = isolation_forest_analysis(
xtrain=watermarked_X,
is_clean=is_clean
)
if __name__ == '__main__':
isoforest_def()
|
<filename>main_Screen/main_controller.py<gh_stars>0
import tkinter as tk
from test_one import ToDo
"""
This is the controller file - will act as the traffic director for active and non-active windows based
upon end user.
"""
#Color designations for the app theme to be used with widgets-------------
BG = '#0C1021'
FG = 'white'
DGR = 'dark goldenrod'
G = 'green'
#Storage for each of the windows paramters to be called
#----------------------------------------------------------------
def config_frame(index):
"""
Configuring the different widget types and there 'standard' settings
- (The class window, (background clr, foreground color, (font)))
"""
fconfig = {'LoginWindow':(BG,FG,('system',12,'bold')),
'AccountsContacts':(BG,FG,('system',12,'bold')),
'ToDo':(BG,FG,('system',12,'bold')),
'CallLog':(BG,FG,('system',12,'bold')),
'Projects':(BG,FG,('system',12,'bold')),
'BidSolicits' : (BG,FG,('system',12,'bold'))
}
return fconfig[index] #returns whatever key is passed as index
############ MASTER CLASS ####################################
#-----------------------------------------------------------------------------------
class MainStart(tk.Tk):
#-------------------------------------------------------
def __init__(self, *args, **kwargs):
"""
Mainstart inherits directly from tk.TK
"""
tk.Tk.__init__(self, *args, **kwargs)
#Foundation frame represents the main frame which all children widgets will be created and placed on
#-----------------------------------------
frame_foundation = tk.Frame(self)
frame_foundation.pack(side = tk.TOP, fill = tk.BOTH, expand = True)
frame_foundation.grid_rowconfigure(0, weight = 1)
frame_foundation.grid_columnconfigure(0, weight = 1)
#Classes are stored --- to be called and passed to show window
#---------------------------------
self.window_container = {}
for wins in (LoginWindow, AccountsContacts, ToDo, CallLog, Projects, BidSolicits):
window = wins(frame_foundation, self) #Passing reference to all
self.window_container[wins] = window
window.grid(row = 0, column = 0, sticky = tk.NSEW)
self.show_window(LoginWindow)
#------------------------------------------------------------
def show_window(self, logwin):
"""
Is passed the window to display as well as change of the window titile based on the
windo that has been passed.
"""
window_title = {
LoginWindow:'Login Screen',
AccountsContacts:'Accounts and Contacts', #TestWindow:'Test1',
ToDo:'To Do List',
CallLog:'CallLog Sheet',
Projects:'Projects and Quotes',
BidSolicits:'Bid Solicitations'
}
self.title(window_title[logwin]) #Change the window titile to match the currently passed one
frame = self.window_container[logwin] #Call the window class according to passed window
frame.tkraise()
############ LOGIN WINDOW ############################
#-----------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------
class LoginWindow(tk.Frame):
#------------------------------------------------------
def __init__(self, parent, controller):
"""
Controller is an instance passed to provide buttons with which window
is to be raised. Each of the windows is currently written as seperate classes.
"""
tk.Frame.__init__(self, parent)
conf = config_frame('LoginWindow')
self.config(bg = conf[0])
#Frame Containers
#----------------------------------------------------------------------------------------
self.navframe = tk.Frame(self, bg = BG)
self.navframe.grid(row = 0, column = 0, sticky = tk.NS)
self.alterframe = tk.Frame(self, bg = BG)
self.alterframe.grid(row = 0, column = 1, columnspan = 6, sticky = tk.NSEW)
#Labelframe Container
#-----------------------------------------------------------------------------------------
self.txt_lframe = tk.LabelFrame(self.alterframe, text = 'Text Widget', bg = BG, fg = FG)
self.txt_lframe.grid(row = 0 , column = 0, padx = 10, pady = 10, sticky = tk.NSEW)
#Event Bindings for the labelframe - Change color when in and out of focus
#-----------------------------------------------------------------------------------
self.txt_lframe.bind('<Enter>', self.widget_focus_color)
self.txt_lframe.bind('<Leave>', self.widget_focus_color_def)
#Text Widget creation
#--------------------------------------------------------
self.txt = tk.Text(self.txt_lframe, bg = BG, fg = FG,
selectbackground = DGR, selectforeground = 'black', selectborderwidth = 1)
self.txt.grid(row = 0, column = 0, padx = 10, pady = 10, sticky = tk.NSEW)
#Button Widgets - Navigation to and from different screens
#--------------------------------------------------------------------------------------------------------------
b_0 = tk.Button(self.navframe, text = 'TestOne', command = lambda: controller.show_window(TestWindow))
b_1 = tk.Button(self.navframe, text = 'Accounts/Contacts', command = lambda: controller.show_window(AccountsContacts))
b_2 = tk.Button(self.navframe, text = 'ToDo', command = lambda: controller.show_window(ToDo))
b_3 = tk.Button(self.navframe, text = 'CallLog', command = lambda: controller.show_window(CallLog))
b_4 = tk.Button(self.navframe, text = 'Projects', command = lambda: controller.show_window(Projects))
b_5 = tk.Button(self.navframe, text = 'BidSolicits', command = lambda: controller.show_window(BidSolicits))
widges_buttons = [b_1, b_2, b_3, b_4, b_5] #Store buttons in list to be iterated through
#Provide buttons grid coordinates and binding event.
#---------------------------------------------------------------------
for x, i in enumerate(widges_buttons):
i.config(bg = conf[0], fg = conf[1], font = conf[2])
i.grid(row = x+1, column = 0, padx = 5, pady = 5, sticky = tk.EW)
i.bind('<Enter>', self.widget_focus_color)
i.bind('<Leave>', self.widget_focus_color_def)
#Configure label widget
#----------------------------------------------------------------------
label = tk.Label(self.navframe, text = 'Login')
label.grid(row = 0, column = 0, padx = 5, pady = 5, sticky = tk.EW)
label.config(bg = BG, fg = 'green', font = 'Verdana 18 bold')
#----------------------------------------------------------
def widget_focus_color(self, event):
"""
When cursor has entered widget it triggers this event -- inteded to help user identify
cursor location and the widget the app currently recognizes
"""
return event.widget.config(fg = DGR)
#---------------------------------------------------------
def widget_focus_color_def(self, event):
"""
This function is triggered when the mouse cursor has left the widget and it no longer
is considered to have focus
"""
return event.widget.config(fg = FG)
############ ACCOUNTS AND CONTACTS ############################
#-----------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------
class AccountsContacts(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
conf = config_frame('AccountsContacts')
self.config(bg = conf[0])
#Button Widgets - Navigation to and from different screens
#--------------------------------------------------------------------------------------------------------------
b_1 = tk.Button(self, text = 'Accounts/Contacts', command = lambda: controller.show_window(AccountsContacts), state = tk.DISABLED)
b_2 = tk.Button(self, text = 'ToDo', command = lambda: controller.show_window(ToDo), state = tk.NORMAL)
b_3 = tk.Button(self, text = 'CallLog', command = lambda: controller.show_window(CallLog), state = tk.NORMAL)
b_4 = tk.Button(self, text = 'Projects', command = lambda: controller.show_window(Projects), state = tk.NORMAL)
b_5 = tk.Button(self, text = 'BidSolicits', command = lambda: controller.show_window(BidSolicits), state = tk.NORMAL)
widges_buttons = [b_1, b_2, b_3, b_4, b_5] #Store buttons in list to be iterated through
for x,i in enumerate(widges_buttons):
i.config(bg = conf[0], fg = conf[1], font = conf[2])
i.grid(row = x+1, column = 0, padx = 5, pady = 5, sticky = tk.EW)
i.bind('<Enter>', self.widget_focus_color)
i.bind('<Leave>', self.widget_focus_color_def)
#Configure label widget
#----------------------------------------------------------------------
label = tk.Label(self, text = 'Accounts &\nContacts')
label.grid(row = 0, column = 0, padx = 5, pady = 5, sticky = tk.EW)
label.config(bg = BG, fg = 'green', font = 'Verdana 16 bold')
#----------------------------------------------------------
def widget_focus_color(self, event):
""" """
return event.widget.config(fg = DGR)
#----------------------------------------------------------
def widget_focus_color_def(self, event):
""" """
return event.widget.config(fg = FG)
############ ACCOUNTS AND CONTACTS ############################
#-----------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------
class ToDo(tk.Frame):
def __init__(self, parent, controller):
""" """
tk.Frame.__init__(self, parent)
conf = config_frame('ToDo')
self.config(bg = conf[0])
label = tk.Label(self, text = 'To Dos')
b_1 = tk.Button(self, text = 'Accounts/Contacts', command = lambda: controller.show_window(AccountsContacts), state = tk.NORMAL)
b_2 = tk.Button(self, text = 'ToDo', command = lambda: controller.show_window(ToDo), state = tk.DISABLED)
b_3 = tk.Button(self, text = 'CallLog', command = lambda: controller.show_window(CallLog), state = tk.NORMAL)
b_4 = tk.Button(self, text = 'Projects', command = lambda: controller.show_window(Projects), state = tk.NORMAL)
b_5 = tk.Button(self, text = 'BidSolicits', command = lambda: controller.show_window(BidSolicits), state = tk.NORMAL)
for x,i in enumerate((label, b_1,b_2,b_3,b_4,b_5)):
i.config(bg = conf[0], fg = conf[1], font = conf[2])
i.grid(row = x+1, column = 0, padx = 5, pady = 5, sticky = tk.EW)
label.config(fg = 'green')
############ ACCOUNTS AND CONTACTS ############################
#-----------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------
class CallLog(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
conf = config_frame('CallLog')
self.config(bg = conf[0])
label = tk.Label(self, text = 'Call Logs')
b_1 = tk.Button(self, text = 'Accounts/Contacts', command = lambda: controller.show_window(AccountsContacts), state = tk.NORMAL)
b_2 = tk.Button(self, text = 'ToDo', command = lambda: controller.show_window(ToDo), state = tk.NORMAL)
b_3 = tk.Button(self, text = 'CallLog', command = lambda: controller.show_window(CallLog), state = tk.DISABLED)
b_4 = tk.Button(self, text = 'Projects', command = lambda: controller.show_window(Projects), state = tk.NORMAL)
b_5 = tk.Button(self, text = 'BidSolicits', command = lambda: controller.show_window(BidSolicits), state = tk.NORMAL)
for x,i in enumerate((label, b_1,b_2,b_3,b_4,b_5)):
i.config(bg = conf[0], fg = conf[1], font = conf[2])
i.grid(row = x+1, column = 0, padx = 5, pady = 5, sticky = tk.EW)
label.config(fg = 'green')
############ ACCOUNTS AND CONTACTS ############################
#-----------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------
class Projects(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
conf = config_frame('Projects')
self.config(bg = conf[0])
label = tk.Label(self, text = 'Projects')
b_1 = tk.Button(self, text = 'Accounts/Contacts', command = lambda: controller.show_window(AccountsContacts), state = tk.NORMAL)
b_2 = tk.Button(self, text = 'ToDo', command = lambda: controller.show_window(ToDo), state = tk.NORMAL)
b_3 = tk.Button(self, text = 'CallLog', command = lambda: controller.show_window(CallLog), state = tk.NORMAL)
b_4 = tk.Button(self, text = 'Projects', command = lambda: controller.show_window(Projects), state = tk.DISABLED)
b_5 = tk.Button(self, text = 'BidSolicits', command = lambda: controller.show_window(BidSolicits), state = tk.NORMAL)
for x,i in enumerate((label, b_1,b_2,b_3,b_4,b_5)):
i.config(bg = conf[0], fg = conf[1], font = conf[2])
i.grid(row = x+1, column = 0, padx = 5, pady = 5, sticky = tk.EW)
label.config(fg = 'green')
############ ACCOUNTS AND CONTACTS ############################
#-----------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------
class BidSolicits(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
conf = config_frame('BidSolicits')
self.config(bg = conf[0])
label = tk.Label(self, text = 'BidSolicits')
b_1 = tk.Button(self, text = 'Accounts/Contacts', command = lambda: controller.show_window(AccountsContacts), state = tk.NORMAL)
b_2 = tk.Button(self, text = 'ToDo', command = lambda: controller.show_window(ToDo), state = tk.NORMAL)
b_3 = tk.Button(self, text = 'CallLog', command = lambda: controller.show_window(CallLog), state = tk.NORMAL)
b_4 = tk.Button(self, text = 'Projects', command = lambda: controller.show_window(Projects), state = tk.NORMAL)
b_5 = tk.Button(self, text = 'BidSolicits', command = lambda: controller.show_window(BidSolicits), state = tk.DISABLED)
for x,i in enumerate((label, b_1,b_2,b_3,b_4,b_5)):
i.config(bg = conf[0], fg = conf[1], font = conf[2])
i.grid(row = x+1, column = 0, padx = 5, pady = 5, sticky = tk.EW)
label.config(fg = 'green')
#Initialize
#-----------------------------------------
if __name__ == '__main__':
app = MainStart()
app.mainloop()
|
<gh_stars>10-100
#!/usr/bin/env python
#
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author <NAME> <<EMAIL>>
#
import unittest
import logging
import tempfile
import warnings
import pathlib
import pickle
import platform
import glob
import os
import re
from textwrap import dedent
from xmlschema import XMLSchemaParseError, XMLSchemaIncludeWarning, XMLSchemaImportWarning
from xmlschema.names import XML_NAMESPACE, LOCATION_HINTS, SCHEMAS_DIR, XSD_ELEMENT, XSI_TYPE
from xmlschema.etree import etree_element
from xmlschema.validators import XMLSchemaBase, XMLSchema10, XMLSchema11, \
XsdGlobals, Xsd11Attribute
from xmlschema.testing import SKIP_REMOTE_TESTS, XsdValidatorTestCase
from xmlschema.validators.schemas import logger
class CustomXMLSchema(XMLSchema10):
pass
class TestXMLSchema10(XsdValidatorTestCase):
TEST_CASES_DIR = os.path.join(os.path.dirname(__file__), '../test_cases')
maxDiff = None
class CustomXMLSchema(XMLSchema10):
pass
def test_schema_validation(self):
schema = self.schema_class(self.vh_xsd_file)
self.assertEqual(schema.validation, 'strict')
schema = self.schema_class(self.vh_xsd_file, validation='lax')
self.assertEqual(schema.validation, 'lax')
schema = self.schema_class(self.vh_xsd_file, validation='skip')
self.assertEqual(schema.validation, 'skip')
with self.assertRaises(ValueError):
self.schema_class(self.vh_xsd_file, validation='none')
def test_schema_string_repr(self):
schema = self.schema_class(self.vh_xsd_file)
tmpl = "%s(name='vehicles.xsd', namespace='http://example.com/vehicles')"
self.assertEqual(str(schema), tmpl % self.schema_class.__name__)
def test_schema_copy(self):
schema = self.vh_schema.copy()
self.assertNotEqual(id(self.vh_schema), id(schema))
self.assertNotEqual(id(self.vh_schema.namespaces), id(schema.namespaces))
self.assertNotEqual(id(self.vh_schema.maps), id(schema.maps))
def test_schema_location_hints(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://xmlschema.test/ns schema.xsd">
<xs:element name="root" />
</xs:schema>"""))
self.assertEqual(schema.schema_location, [("http://xmlschema.test/ns", "schema.xsd")])
self.assertIsNone(schema.no_namespace_schema_location)
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:noNamespaceSchemaLocation="schema.xsd">
<xs:element name="root" />
</xs:schema>"""))
self.assertEqual(schema.schema_location, [])
self.assertEqual(schema.no_namespace_schema_location, 'schema.xsd')
def test_target_prefix(self):
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://xmlschema.test/ns">
<xs:element name="root" />
</xs:schema>"""))
self.assertEqual(schema.target_prefix, '')
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:tns="http://xmlschema.test/ns"
targetNamespace="http://xmlschema.test/ns">
<xs:element name="root" />
</xs:schema>"""))
self.assertEqual(schema.target_prefix, 'tns')
def test_builtin_types(self):
self.assertIn('string', self.schema_class.builtin_types())
with self.assertRaises(RuntimeError):
self.schema_class.meta_schema.builtin_types()
def test_resolve_qname(self):
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<xs:element name="root" />
</xs:schema>"""))
self.assertEqual(schema.resolve_qname('xs:element'), XSD_ELEMENT)
self.assertEqual(schema.resolve_qname('xsi:type'), XSI_TYPE)
self.assertEqual(schema.resolve_qname(XSI_TYPE), XSI_TYPE)
self.assertEqual(schema.resolve_qname('element'), 'element')
self.assertRaises(ValueError, schema.resolve_qname, '')
self.assertRaises(ValueError, schema.resolve_qname, 'xsi:a type ')
self.assertRaises(ValueError, schema.resolve_qname, 'xml::lang')
def test_global_group_definitions(self):
schema = self.check_schema("""
<xs:group name="wrong_child">
<xs:element name="foo"/>
</xs:group>""", validation='lax')
self.assertEqual(len(schema.errors), 1)
self.check_schema('<xs:group name="empty" />', XMLSchemaParseError)
self.check_schema('<xs:group name="empty"><xs:annotation/></xs:group>', XMLSchemaParseError)
def test_wrong_includes_and_imports(self):
with warnings.catch_warnings(record=True) as context:
warnings.simplefilter("always")
self.check_schema("""
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" targetNamespace="ns">
<xs:include schemaLocation="example.xsd" />
<xs:import schemaLocation="example.xsd" />
<xs:redefine schemaLocation="example.xsd"/>
<xs:import namespace="http://missing.example.test/" />
<xs:import/>
</xs:schema>
""")
self.assertEqual(len(context), 3, "Wrong number of include/import warnings")
self.assertEqual(context[0].category, XMLSchemaIncludeWarning)
self.assertEqual(context[1].category, XMLSchemaIncludeWarning)
self.assertEqual(context[2].category, XMLSchemaImportWarning)
self.assertTrue(str(context[0].message).startswith("Include"))
self.assertTrue(str(context[1].message).startswith("Redefine"))
self.assertTrue(str(context[2].message).startswith("Import of namespace"))
def test_wrong_references(self):
# Wrong namespace for element type's reference
self.check_schema("""
<xs:element name="dimension" type="xs:dimensionType"/>
<xs:simpleType name="dimensionType">
<xs:restriction base="xs:short"/>
</xs:simpleType>
""", XMLSchemaParseError)
def test_annotations(self):
schema = self.check_schema("""
<xs:element name='foo'>
<xs:annotation />
</xs:element>""")
xsd_element = schema.elements['foo']
self.assertIsNone(xsd_element._annotation) # lazy annotation
self.assertIsNotNone(xsd_element.annotation)
self.assertIs(xsd_element.annotation, xsd_element._annotation)
self.check_schema("""
<xs:simpleType name='Magic'>
<xs:annotation />
<xs:annotation />
<xs:restriction base='xs:string'>
<xs:enumeration value='A'/>
</xs:restriction>
</xs:simpleType>""", XMLSchemaParseError)
schema = self.check_schema("""
<xs:simpleType name='Magic'>
<xs:annotation>
<xs:documentation> stuff </xs:documentation>
</xs:annotation>
<xs:restriction base='xs:string'>
<xs:enumeration value='A'/>
</xs:restriction>
</xs:simpleType>""")
xsd_type = schema.types["Magic"]
self.assertIsNotNone(xsd_type._annotation) # xs:simpleType annotations are not lazy parsed
self.assertEqual(str(xsd_type.annotation), ' stuff ')
def test_annotation_string(self):
schema = self.check_schema("""
<xs:element name='A'>
<xs:annotation>
<xs:documentation>A element info</xs:documentation>
</xs:annotation>
</xs:element>
<xs:element name='B'>
<xs:annotation>
<xs:documentation>B element extended info, line1</xs:documentation>
<xs:documentation>B element extended info, line2</xs:documentation>
</xs:annotation>
</xs:element>""")
xsd_element = schema.elements['A']
self.assertEqual(str(xsd_element.annotation), 'A element info')
self.assertEqual(repr(xsd_element.annotation), "XsdAnnotation('A element info')")
xsd_element = schema.elements['B']
self.assertEqual(str(xsd_element.annotation),
'B element extended info, line1\nB element extended info, line2')
self.assertEqual(repr(xsd_element.annotation),
"XsdAnnotation('B element extended info, line1\\nB element')")
def test_schema_annotations(self):
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="root"/>
</xs:schema>"""))
self.assertIsNone(schema._annotations)
annotations = schema.annotations
self.assertListEqual(annotations, [])
self.assertIs(annotations, schema.annotations)
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:annotation>
<xs:documentation>First annotation</xs:documentation>
</xs:annotation>
<xs:annotation>
<xs:documentation>Second annotation</xs:documentation>
</xs:annotation>
<xs:element name="root"/>
<xs:annotation>
<xs:documentation>Third annotation</xs:documentation>
</xs:annotation>
</xs:schema>"""))
self.assertIsNone(schema._annotations)
annotations = schema.annotations
self.assertEqual(len(annotations), 3)
self.assertEqual(repr(annotations[0]), "XsdAnnotation('First annotation')")
self.assertEqual(repr(annotations[1]), "XsdAnnotation('Second annotation')")
self.assertEqual(repr(annotations[2]), "XsdAnnotation('Third annotation')")
self.assertIs(annotations, schema.annotations)
def test_base_schemas(self):
xsd_file = os.path.join(SCHEMAS_DIR, 'XML/xml_minimal.xsd')
schema = self.schema_class(xsd_file)
self.assertEqual(schema.target_namespace, XML_NAMESPACE)
def test_root_elements(self):
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"/>"""))
self.assertEqual(schema.root_elements, [])
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="root" />
</xs:schema>"""))
self.assertEqual(schema.root_elements, [schema.elements['root']])
# Test issue #107 fix
schema = self.schema_class(dedent("""\
<?xml version="1.0" encoding="utf-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="root1" type="root"/>
<xs:element name="root2" type="root"/>
<xs:complexType name="root">
<xs:sequence>
<xs:element name="elementWithNoType"/>
</xs:sequence>
</xs:complexType>
</xs:schema>"""))
self.assertEqual(set(schema.root_elements),
{schema.elements['root1'], schema.elements['root2']})
def test_simple_types(self):
self.assertListEqual(self.vh_schema.simple_types, [])
self.assertGreater(len(self.st_schema.simple_types), 20)
def test_complex_types(self):
self.assertListEqual(self.vh_schema.complex_types,
[self.vh_schema.types['vehicleType']])
def test_is_restriction_method(self):
# Test issue #111 fix
schema = self.schema_class(source=self.casepath('issues/issue_111/issue_111.xsd'))
extended_header_def = schema.types['extendedHeaderDef']
self.assertTrue(extended_header_def.is_derived(schema.types['blockDef']))
@unittest.skipIf(SKIP_REMOTE_TESTS, "Remote networks are not accessible.")
def test_remote_schemas_loading(self):
col_schema = self.schema_class("https://raw.githubusercontent.com/brunato/xmlschema/master/"
"tests/test_cases/examples/collection/collection.xsd",
timeout=300)
self.assertTrue(isinstance(col_schema, self.schema_class))
vh_schema = self.schema_class("https://raw.githubusercontent.com/brunato/xmlschema/master/"
"tests/test_cases/examples/vehicles/vehicles.xsd",
timeout=300)
self.assertTrue(isinstance(vh_schema, self.schema_class))
def test_schema_defuse(self):
vh_schema = self.schema_class(self.vh_xsd_file, defuse='always')
self.assertIsInstance(vh_schema.root, etree_element)
for schema in vh_schema.maps.iter_schemas():
self.assertIsInstance(schema.root, etree_element)
def test_logging(self):
self.schema_class(self.vh_xsd_file, loglevel=logging.ERROR)
self.assertEqual(logger.level, logging.WARNING)
with self.assertLogs('xmlschema', level='INFO') as ctx:
self.schema_class(self.vh_xsd_file, loglevel=logging.INFO)
self.assertEqual(logger.level, logging.WARNING)
self.assertEqual(len(ctx.output), 7)
self.assertIn("INFO:xmlschema:Include schema from 'types.xsd'", ctx.output)
self.assertIn("INFO:xmlschema:Resource 'types.xsd' is already loaded", ctx.output)
with self.assertLogs('xmlschema', level='DEBUG') as ctx:
self.schema_class(self.vh_xsd_file, loglevel=logging.DEBUG)
self.assertEqual(logger.level, logging.WARNING)
self.assertEqual(len(ctx.output), 19)
self.assertIn("INFO:xmlschema:Include schema from 'cars.xsd'", ctx.output)
self.assertIn("INFO:xmlschema:Resource 'cars.xsd' is already loaded", ctx.output)
self.assertIn("DEBUG:xmlschema:Schema targetNamespace is "
"'http://example.com/vehicles'", ctx.output)
self.assertIn("INFO:xmlschema:Resource 'cars.xsd' is already loaded", ctx.output)
# With string argument
with self.assertRaises(ValueError) as ctx:
self.schema_class(self.vh_xsd_file, loglevel='all')
self.assertEqual(str(ctx.exception), "'all' is not a valid loglevel")
with self.assertLogs('xmlschema', level='INFO') as ctx:
self.schema_class(self.vh_xsd_file, loglevel='INFO')
self.assertEqual(len(ctx.output), 7)
with self.assertLogs('xmlschema', level='INFO') as ctx:
self.schema_class(self.vh_xsd_file, loglevel=' Info ')
self.assertEqual(len(ctx.output), 7)
def test_target_namespace(self):
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://xmlschema.test/ns">
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(schema.target_namespace, 'http://xmlschema.test/ns')
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(schema.target_namespace, '')
with self.assertRaises(XMLSchemaParseError) as ctx:
self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
targetNamespace="">
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(ctx.exception.message,
"the attribute 'targetNamespace' cannot be an empty string")
def test_block_default(self):
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
blockDefault="extension restriction ">
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(schema.block_default, 'extension restriction ')
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
blockDefault="#all">
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(set(schema.block_default.split()),
{'substitution', 'extension', 'restriction'})
with self.assertRaises(XMLSchemaParseError) as ctx:
self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
blockDefault="all">>
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(ctx.exception.message,
"wrong value 'all' for attribute 'blockDefault'")
with self.assertRaises(XMLSchemaParseError) as ctx:
self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
blockDefault="#all restriction">>
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(ctx.exception.message,
"wrong value '#all restriction' for attribute 'blockDefault'")
def test_final_default(self):
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
finalDefault="extension restriction ">
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(schema.final_default, 'extension restriction ')
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
finalDefault="#all">
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(set(schema.final_default.split()),
{'list', 'union', 'extension', 'restriction'})
with self.assertRaises(XMLSchemaParseError) as ctx:
self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
finalDefault="all">>
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(ctx.exception.message,
"wrong value 'all' for attribute 'finalDefault'")
def test_use_fallback(self):
source = dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="root"/>
</xs:schema>""")
schema = self.schema_class(source)
self.assertEqual(schema.fallback_locations, LOCATION_HINTS)
schema = self.schema_class(source, use_fallback=False)
self.assertEqual(schema.fallback_locations, {})
def test_global_maps(self):
source = dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="root"/>
</xs:schema>""")
col_schema = self.schema_class(self.col_xsd_file)
with self.assertRaises(TypeError) as ctx:
self.schema_class(self.col_schema, global_maps=col_schema)
self.assertIn("'global_maps' argument must be", str(ctx.exception))
schema = self.schema_class(source, global_maps=col_schema.maps)
self.assertIs(col_schema.maps, schema.maps)
def test_version_control(self):
schema = self.schema_class(dedent("""
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="root">
<xs:complexType>
<xs:attribute name="a" use="required"/>
<xs:assert test="@a > 300" vc:minVersion="1.1"
xmlns:vc="http://www.w3.org/2007/XMLSchema-versioning"/>
</xs:complexType>
</xs:element>
</xs:schema>"""))
self.assertEqual(len(schema.root[0][0]), 1 if schema.XSD_VERSION == '1.0' else 2)
schema = self.schema_class(dedent("""
<xs:schema vc:minVersion="1.1" elementFormDefault="qualified"
xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:vc="http://www.w3.org/2007/XMLSchema-versioning">
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual(len(schema.root), 0 if schema.XSD_VERSION == '1.0' else 1)
def test_xsd_version_compatibility_property(self):
self.assertEqual(self.vh_schema.xsd_version, self.vh_schema.XSD_VERSION)
def test_explicit_locations(self):
source = dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="root"/>
</xs:schema>""")
locations = {'http://example.com/vehicles': self.vh_xsd_file}
schema = self.schema_class(source, locations=locations)
self.assertEqual(len(schema.maps.namespaces['http://example.com/vehicles']), 4)
def test_use_meta_property(self):
self.assertTrue(self.vh_schema.use_meta)
self.assertTrue(self.col_schema.use_meta)
meta_schema = self.schema_class.meta_schema
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="foo"/>
</xs:schema>"""), use_meta=False)
self.assertIsNot(meta_schema, schema.meta_schema)
self.assertFalse(schema.use_meta)
def test_other_schema_root_attributes(self):
self.assertIsNone(self.vh_schema.id)
self.assertIsNone(self.vh_schema.version)
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" id="foo" version="2.0">
<xs:element name="foo"/>
</xs:schema>"""))
self.assertEqual(schema.id, 'foo')
self.assertEqual(schema.version, '2.0')
def test_change_maps_attribute(self):
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="root"/>
</xs:schema>"""))
with self.assertRaises(ValueError) as ctx:
schema.meta_schema.maps = XsdGlobals(schema, schema.validation)
self.assertEqual(str(ctx.exception),
"cannot change the global maps instance of a meta-schema")
self.assertTrue(schema.built)
maps, schema.maps = schema.maps, XsdGlobals(schema, schema.validation)
self.assertIsNot(maps, schema.maps)
self.assertFalse(schema.built)
schema.maps = maps
self.assertTrue(schema.built)
def test_listed_and_reversed_elements(self):
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="elem1"/>
<xs:element name="elem2"/>
<xs:element name="elem3"/>
</xs:schema>"""))
elements = list(schema)
self.assertListEqual(elements, [schema.elements['elem1'],
schema.elements['elem2'],
schema.elements['elem3']])
elements.reverse()
self.assertListEqual(elements, list(reversed(schema)))
def test_multi_schema_initialization(self):
source1 = dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="elem1"/>
</xs:schema>""")
source2 = dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="elem2"/>
</xs:schema>""")
source3 = dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="elem3"/>
</xs:schema>""")
schema = self.schema_class([source1, source2, source3])
self.assertEqual(len(schema.elements), 3)
self.assertEqual(len(schema.maps.namespaces['']), 3)
self.assertIs(schema.elements['elem1'].schema, schema)
self.assertIs(schema.elements['elem2'].schema, schema.maps.namespaces[''][1])
self.assertIs(schema.elements['elem3'].schema, schema.maps.namespaces[''][2])
with self.assertRaises(XMLSchemaParseError) as ec:
self.schema_class([source1, source2, source2])
self.assertIn("global element with name='elem2' is already defined", str(ec.exception))
source1 = dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://xmlschema.test/ns">
<xs:element name="elem1"/>
</xs:schema>""")
schema = self.schema_class([source1, source2])
self.assertEqual(len(schema.elements), 2)
self.assertEqual(len(schema.maps.namespaces['http://xmlschema.test/ns']), 2)
self.assertIs(schema.elements['elem1'].schema, schema)
self.assertIs(schema.elements['elem2'].schema,
schema.maps.namespaces['http://xmlschema.test/ns'][1])
def test_add_schema(self):
source1 = dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://xmlschema.test/ns">
<xs:element name="elem1"/>
</xs:schema>""")
source2 = dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="elem2"/>
</xs:schema>""")
source3 = dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://xmlschema.test/ns1">
<xs:element name="elem3"/>
</xs:schema>""")
schema = self.schema_class(source1)
schema.add_schema(source2, build=True)
self.assertEqual(len(schema.elements), 1)
self.assertEqual(len(schema.maps.namespaces['http://xmlschema.test/ns']), 1)
self.assertEqual(len(schema.maps.namespaces['']), 1)
# Less checks on duplicate objects for schemas added after the build
schema.add_schema(source2, build=True)
self.assertEqual(len(schema.maps.namespaces['']), 2)
self.assertTrue(schema.maps.built)
with self.assertRaises(XMLSchemaParseError) as ec:
schema.maps.clear()
schema.build()
self.assertIn("global element with name='elem2' is already defined", str(ec.exception))
schema = self.schema_class(source1)
schema.add_schema(source2, namespace='http://xmlschema.test/ns', build=True)
self.assertEqual(len(schema.maps.namespaces['http://xmlschema.test/ns']), 2)
# Need a rebuild to add elem2 from added schema ...
self.assertEqual(len(schema.elements), 1)
schema.maps.clear()
schema.build()
self.assertEqual(len(schema.elements), 2)
# ... so is better to build after sources additions
schema = self.schema_class(source1, build=False)
schema.add_schema(source2, namespace='http://xmlschema.test/ns')
schema.build()
self.assertEqual(len(schema.elements), 2)
# Adding other namespaces do not require rebuild
schema3 = schema.add_schema(source3, build=True)
self.assertEqual(len(schema.maps.namespaces['http://xmlschema.test/ns1']), 1)
self.assertEqual(len(schema3.elements), 1)
def test_export_errors__issue_187(self):
with self.assertRaises(ValueError) as ctx:
self.vh_schema.export(target=self.vh_dir)
self.assertIn("target directory", str(ctx.exception))
self.assertIn("is not empty", str(ctx.exception))
with self.assertRaises(ValueError) as ctx:
self.vh_schema.export(target=self.vh_xsd_file)
self.assertIn("target", str(ctx.exception))
self.assertIn("is not a directory", str(ctx.exception))
with self.assertRaises(ValueError) as ctx:
self.vh_schema.export(target=self.vh_xsd_file + '/target')
self.assertIn("target parent", str(ctx.exception))
self.assertIn("is not a directory", str(ctx.exception))
with tempfile.TemporaryDirectory() as dirname:
with self.assertRaises(ValueError) as ctx:
self.vh_schema.export(target=dirname + 'subdir/target')
self.assertIn("target parent directory", str(ctx.exception))
self.assertIn("does not exist", str(ctx.exception))
def test_export_same_directory__issue_187(self):
with tempfile.TemporaryDirectory() as dirname:
self.vh_schema.export(target=dirname)
for filename in os.listdir(dirname):
with pathlib.Path(dirname).joinpath(filename).open() as fp:
exported_schema = fp.read()
with pathlib.Path(self.vh_dir).joinpath(filename).open() as fp:
original_schema = fp.read()
if platform.system() == 'Windows':
exported_schema = re.sub(r'\s+', '', exported_schema)
original_schema = re.sub(r'\s+', '', original_schema)
self.assertEqual(exported_schema, original_schema)
self.assertFalse(os.path.isdir(dirname))
def test_export_another_directory__issue_187(self):
vh_schema_file = self.casepath('issues/issue_187/issue_187_1.xsd')
vh_schema = self.schema_class(vh_schema_file)
with tempfile.TemporaryDirectory() as dirname:
vh_schema.export(target=dirname)
path = pathlib.Path(dirname).joinpath('examples/vehicles/*.xsd')
for filename in glob.iglob(pathname=str(path)):
with pathlib.Path(dirname).joinpath(filename).open() as fp:
exported_schema = fp.read()
basename = os.path.basename(filename)
with pathlib.Path(self.vh_dir).joinpath(basename).open() as fp:
original_schema = fp.read()
if platform.system() == 'Windows':
exported_schema = re.sub(r'\s+', '', exported_schema)
original_schema = re.sub(r'\s+', '', original_schema)
self.assertEqual(exported_schema, original_schema)
with pathlib.Path(dirname).joinpath('issue_187_1.xsd').open() as fp:
exported_schema = fp.read()
with open(vh_schema_file) as fp:
original_schema = fp.read()
if platform.system() == 'Windows':
exported_schema = re.sub(r'\s+', '', exported_schema)
original_schema = re.sub(r'\s+', '', original_schema)
self.assertNotEqual(exported_schema, original_schema)
self.assertEqual(
exported_schema,
original_schema.replace('../..', dirname.replace('\\', '/'))
)
self.assertFalse(os.path.isdir(dirname))
@unittest.skipIf(SKIP_REMOTE_TESTS, "Remote networks are not accessible.")
def test_export_remote__issue_187(self):
vh_schema_file = self.casepath('issues/issue_187/issue_187_2.xsd')
vh_schema = self.schema_class(vh_schema_file)
with tempfile.TemporaryDirectory() as dirname:
vh_schema.export(target=dirname)
with pathlib.Path(dirname).joinpath('issue_187_2.xsd').open() as fp:
exported_schema = fp.read()
with open(vh_schema_file) as fp:
original_schema = fp.read()
if platform.system() == 'Windows':
exported_schema = re.sub(r'\s+', '', exported_schema)
original_schema = re.sub(r'\s+', '', original_schema)
self.assertEqual(exported_schema, original_schema)
self.assertFalse(os.path.isdir(dirname))
with tempfile.TemporaryDirectory() as dirname:
vh_schema.export(target=dirname, save_remote=True)
path = pathlib.Path(dirname).joinpath('brunato/xmlschema/master/tests/test_cases/'
'examples/vehicles/*.xsd')
for filename in glob.iglob(pathname=str(path)):
with pathlib.Path(dirname).joinpath(filename).open() as fp:
exported_schema = fp.read()
basename = os.path.basename(filename)
with pathlib.Path(self.vh_dir).joinpath(basename).open() as fp:
original_schema = fp.read()
self.assertEqual(exported_schema, original_schema)
with pathlib.Path(dirname).joinpath('issue_187_2.xsd').open() as fp:
exported_schema = fp.read()
with open(vh_schema_file) as fp:
original_schema = fp.read()
if platform.system() == 'Windows':
exported_schema = re.sub(r'\s+', '', exported_schema)
original_schema = re.sub(r'\s+', '', original_schema)
self.assertNotEqual(exported_schema, original_schema)
self.assertEqual(
exported_schema,
original_schema.replace('https://raw.githubusercontent.com',
dirname.replace('\\', '/') + '/raw.githubusercontent.com')
)
self.assertFalse(os.path.isdir(dirname))
def test_pickling_subclassed_schema__issue_263(self):
cases_dir = pathlib.Path(__file__).parent.parent
schema_file = cases_dir.joinpath('test_cases/examples/vehicles/vehicles.xsd')
xml_file = cases_dir.joinpath('test_cases/examples/vehicles/vehicles.xml')
schema = self.CustomXMLSchema(str(schema_file))
self.assertTrue(schema.is_valid(str(xml_file)))
self.assertIs(self.schema_class.meta_schema, schema.meta_schema)
self.assertNotIn(schema.meta_schema.__class__.__name__, globals())
s = pickle.dumps(schema)
_schema = pickle.loads(s)
self.assertTrue(_schema.is_valid(str(xml_file)))
class CustomLocalXMLSchema(self.schema_class):
pass
schema = CustomLocalXMLSchema(str(schema_file))
self.assertTrue(schema.is_valid(str(xml_file)))
with self.assertRaises((pickle.PicklingError, AttributeError)) as ec:
pickle.dumps(schema)
self.assertIn("Can't pickle", str(ec.exception))
def test_old_subclassing_attribute(self):
with warnings.catch_warnings(record=True) as ctx:
warnings.simplefilter("always")
class OldXMLSchema10(XMLSchema10):
BUILDERS = {
'attribute_class': Xsd11Attribute,
}
self.assertEqual(len(ctx), 1, "Expected one import warning")
self.assertIn("'BUILDERS' will be removed in v2.0", str(ctx[0].message))
self.assertIs(OldXMLSchema10.xsd_attribute_class, Xsd11Attribute)
name = OldXMLSchema10.meta_schema.__class__.__name__
self.assertEqual(name, 'MetaXMLSchema10')
self.assertNotIn(name, globals())
def test_default_namespace_mapping__issue_266(self):
schema_file = self.casepath('issues/issue_266/issue_266b-1.xsd')
with self.assertRaises(XMLSchemaParseError) as ec:
self.schema_class(schema_file)
error_message = str(ec.exception)
self.assertIn("the QName 'testAttribute3' is mapped to no namespace", error_message)
self.assertIn("requires that there is an xs:import statement", error_message)
class TestXMLSchema11(TestXMLSchema10):
schema_class = XMLSchema11
class CustomXMLSchema(XMLSchema11):
pass
def test_default_attributes(self):
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
defaultAttributes="attrs">
<xs:element name="root"/>
<xs:attributeGroup name="attrs">
<xs:attribute name="a"/>
</xs:attributeGroup>
</xs:schema>"""))
self.assertIs(schema.default_attributes, schema.attribute_groups['attrs'])
with self.assertRaises(XMLSchemaParseError) as ctx:
self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
defaultAttributes="attrs">
<xs:element name="root"/>
</xs:schema>"""))
self.assertIn("'attrs' doesn't match any attribute group", ctx.exception.message)
with self.assertRaises(XMLSchemaParseError) as ctx:
self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
defaultAttributes="x:attrs">
<xs:element name="root"/>
</xs:schema>"""))
self.assertEqual("prefix 'x' not found in namespace map", ctx.exception.message)
class TestXMLSchemaMeta(unittest.TestCase):
def test_wrong_version(self):
with self.assertRaises(ValueError) as ctx:
class XMLSchema12(XMLSchemaBase):
XSD_VERSION = '1.2'
meta_schema = os.path.join(SCHEMAS_DIR, 'XSD_1.1/XMLSchema.xsd')
assert issubclass(XMLSchema12, XMLSchemaBase)
self.assertEqual(str(ctx.exception), "XSD_VERSION must be '1.0' or '1.1'")
def test_from_schema_class(self):
class XMLSchema11Bis(XMLSchema11):
pass
self.assertTrue(issubclass(XMLSchema11Bis, XMLSchemaBase))
def test_dummy_validator_class(self):
class DummySchema(XMLSchemaBase):
XSD_VERSION = '1.1'
meta_schema = os.path.join(SCHEMAS_DIR, 'XSD_1.1/XMLSchema.xsd')
self.assertTrue(issubclass(DummySchema, XMLSchemaBase))
def test_subclass_but_no_replace_meta_schema(self):
class CustomXMLSchema10(XMLSchema10):
pass
self.assertIsInstance(CustomXMLSchema10.meta_schema, XMLSchemaBase)
self.assertIs(CustomXMLSchema10.meta_schema, XMLSchema10.meta_schema)
name = CustomXMLSchema10.meta_schema.__class__.__name__
self.assertEqual(name, 'MetaXMLSchema10')
self.assertNotIn(name, globals())
def test_subclass_and_replace_meta_schema(self):
class CustomXMLSchema10(XMLSchema10):
meta_schema = os.path.join(SCHEMAS_DIR, 'XSD_1.0/XMLSchema.xsd')
self.assertIsInstance(CustomXMLSchema10.meta_schema, XMLSchemaBase)
self.assertIsNot(CustomXMLSchema10.meta_schema, XMLSchema10.meta_schema)
name = CustomXMLSchema10.meta_schema.__class__.__name__
self.assertEqual(name, 'MetaCustomXMLSchema10')
self.assertIn(name, globals())
bases = CustomXMLSchema10.meta_schema.__class__.__bases__
self.assertEqual(bases, (XMLSchema10.meta_schema.__class__,))
def test_subclass_and_create_base_meta_schema(self):
class CustomXMLSchema10(XMLSchemaBase):
meta_schema = os.path.join(SCHEMAS_DIR, 'XSD_1.0/XMLSchema.xsd')
self.assertIsInstance(CustomXMLSchema10.meta_schema, XMLSchemaBase)
self.assertIsNot(CustomXMLSchema10.meta_schema, XMLSchema10.meta_schema)
name = CustomXMLSchema10.meta_schema.__class__.__name__
self.assertEqual(name, 'MetaCustomXMLSchema10')
self.assertIn(name, globals())
bases = CustomXMLSchema10.meta_schema.__class__.__bases__
self.assertEqual(bases, (XMLSchemaBase,))
if __name__ == '__main__':
header_template = "Test xmlschema's schema classes with Python {} on {}"
header = header_template.format(platform.python_version(), platform.platform())
print('{0}\n{1}\n{0}'.format("*" * len(header), header))
unittest.main()
|
<filename>Tools/Scripts/webkitpy/tool/commands/rebaseline.py
# Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR/ OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import logging
import optparse
import re
import sys
import time
import traceback
import urllib
import urllib2
from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
from webkitpy.common.memoized import memoized
from webkitpy.common.system.executive import ScriptError
from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models.test_expectations import TestExpectations, BASELINE_SUFFIX_LIST, SKIP
from webkitpy.layout_tests.port import builders
from webkitpy.layout_tests.port import factory
from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
_log = logging.getLogger(__name__)
# FIXME: Should TestResultWriter know how to compute this string?
def _baseline_name(fs, test_name, suffix):
return fs.splitext(test_name)[0] + TestResultWriter.FILENAME_SUFFIX_EXPECTED + "." + suffix
class AbstractRebaseliningCommand(AbstractDeclarativeCommand):
# not overriding execute() - pylint: disable=W0223
no_optimize_option = optparse.make_option('--no-optimize', dest='optimize', action='store_false', default=True,
help=('Do not optimize/de-dup the expectations after rebaselining (default is to de-dup automatically). '
'You can use "webkit-patch optimize-baselines" to optimize separately.'))
platform_options = factory.platform_options(use_globs=True)
results_directory_option = optparse.make_option("--results-directory", help="Local results directory to use")
suffixes_option = optparse.make_option("--suffixes", default=','.join(BASELINE_SUFFIX_LIST), action="store",
help="Comma-separated-list of file types to rebaseline")
def __init__(self, options=None):
super(AbstractRebaseliningCommand, self).__init__(options=options)
self._baseline_suffix_list = BASELINE_SUFFIX_LIST
self._scm_changes = {'add': [], 'delete': [], 'remove-lines': []}
def _add_to_scm_later(self, path):
self._scm_changes['add'].append(path)
def _delete_from_scm_later(self, path):
self._scm_changes['delete'].append(path)
class BaseInternalRebaselineCommand(AbstractRebaseliningCommand):
def __init__(self):
super(BaseInternalRebaselineCommand, self).__init__(options=[
self.results_directory_option,
self.suffixes_option,
optparse.make_option("--builder", help="Builder to pull new baselines from"),
optparse.make_option("--test", help="Test to rebaseline"),
])
def _baseline_directory(self, builder_name):
port = self._tool.port_factory.get_from_builder_name(builder_name)
override_dir = builders.rebaseline_override_dir(builder_name)
if override_dir:
return self._tool.filesystem.join(port.layout_tests_dir(), 'platform', override_dir)
return port.baseline_version_dir()
def _test_root(self, test_name):
return self._tool.filesystem.splitext(test_name)[0]
def _file_name_for_actual_result(self, test_name, suffix):
return "%s-actual.%s" % (self._test_root(test_name), suffix)
def _file_name_for_expected_result(self, test_name, suffix):
return "%s-expected.%s" % (self._test_root(test_name), suffix)
class CopyExistingBaselinesInternal(BaseInternalRebaselineCommand):
name = "copy-existing-baselines-internal"
help_text = "Copy existing baselines down one level in the baseline order to ensure new baselines don't break existing passing platforms."
@memoized
def _immediate_predecessors_in_fallback(self, path_to_rebaseline):
port_names = self._tool.port_factory.all_port_names()
immediate_predecessors_in_fallback = []
for port_name in port_names:
port = self._tool.port_factory.get(port_name)
if not port.buildbot_archives_baselines():
continue
baseline_search_path = port.baseline_search_path()
try:
index = baseline_search_path.index(path_to_rebaseline)
if index:
immediate_predecessors_in_fallback.append(self._tool.filesystem.basename(baseline_search_path[index - 1]))
except ValueError:
# index throw's a ValueError if the item isn't in the list.
pass
return immediate_predecessors_in_fallback
def _port_for_primary_baseline(self, baseline):
for port in [self._tool.port_factory.get(port_name) for port_name in self._tool.port_factory.all_port_names()]:
if self._tool.filesystem.basename(port.baseline_version_dir()) == baseline:
return port
raise Exception("Failed to find port for primary baseline %s." % baseline)
def _copy_existing_baseline(self, builder_name, test_name, suffix):
baseline_directory = self._baseline_directory(builder_name)
ports = [self._port_for_primary_baseline(baseline) for baseline in self._immediate_predecessors_in_fallback(baseline_directory)]
old_baselines = []
new_baselines = []
# Need to gather all the baseline paths before modifying the filesystem since
# the modifications can affect the results of port.expected_filename.
for port in ports:
old_baseline = port.expected_filename(test_name, "." + suffix)
if not self._tool.filesystem.exists(old_baseline):
_log.debug("No existing baseline for %s." % test_name)
continue
new_baseline = self._tool.filesystem.join(port.baseline_path(), self._file_name_for_expected_result(test_name, suffix))
if self._tool.filesystem.exists(new_baseline):
_log.debug("Existing baseline at %s, not copying over it." % new_baseline)
continue
expectations = TestExpectations(port, [test_name])
if SKIP in expectations.get_expectations(test_name):
_log.debug("%s is skipped on %s." % (test_name, port.name()))
continue
old_baselines.append(old_baseline)
new_baselines.append(new_baseline)
for i in range(len(old_baselines)):
old_baseline = old_baselines[i]
new_baseline = new_baselines[i]
_log.debug("Copying baseline from %s to %s." % (old_baseline, new_baseline))
self._tool.filesystem.maybe_make_directory(self._tool.filesystem.dirname(new_baseline))
self._tool.filesystem.copyfile(old_baseline, new_baseline)
if not self._tool.scm().exists(new_baseline):
self._add_to_scm_later(new_baseline)
def execute(self, options, args, tool):
for suffix in options.suffixes.split(','):
self._copy_existing_baseline(options.builder, options.test, suffix)
print json.dumps(self._scm_changes)
class RebaselineTest(BaseInternalRebaselineCommand):
name = "rebaseline-test-internal"
help_text = "Rebaseline a single test from a buildbot. Only intended for use by other webkit-patch commands."
def _results_url(self, builder_name):
return self._tool.buildbot_for_builder_name(builder_name).builder_with_name(builder_name).latest_layout_test_results_url()
def _save_baseline(self, data, target_baseline, baseline_directory, test_name, suffix):
if not data:
_log.debug("No baseline data to save.")
return
filesystem = self._tool.filesystem
filesystem.maybe_make_directory(filesystem.dirname(target_baseline))
filesystem.write_binary_file(target_baseline, data)
if not self._tool.scm().exists(target_baseline):
self._add_to_scm_later(target_baseline)
def _rebaseline_test(self, builder_name, test_name, suffix, results_url):
baseline_directory = self._baseline_directory(builder_name)
source_baseline = "%s/%s" % (results_url, self._file_name_for_actual_result(test_name, suffix))
target_baseline = self._tool.filesystem.join(baseline_directory, self._file_name_for_expected_result(test_name, suffix))
_log.debug("Retrieving %s." % source_baseline)
self._save_baseline(self._tool.web.get_binary(source_baseline, convert_404_to_None=True), target_baseline, baseline_directory, test_name, suffix)
def _rebaseline_test_and_update_expectations(self, options):
port = self._tool.port_factory.get_from_builder_name(options.builder)
if (port.reference_files(options.test)):
_log.warning("Cannot rebaseline reftest: %s", options.test)
return
if options.results_directory:
results_url = 'file://' + options.results_directory
else:
results_url = self._results_url(options.builder)
self._baseline_suffix_list = options.suffixes.split(',')
for suffix in self._baseline_suffix_list:
self._rebaseline_test(options.builder, options.test, suffix, results_url)
self._scm_changes['remove-lines'].append({'builder': options.builder, 'test': options.test})
def execute(self, options, args, tool):
self._rebaseline_test_and_update_expectations(options)
print json.dumps(self._scm_changes)
class OptimizeBaselines(AbstractRebaseliningCommand):
name = "optimize-baselines"
help_text = "Reshuffles the baselines for the given tests to use as litte space on disk as possible."
show_in_main_help = True
argument_names = "TEST_NAMES"
def __init__(self):
super(OptimizeBaselines, self).__init__(options=[
self.suffixes_option,
optparse.make_option('--no-modify-scm', action='store_true', default=False, help='Dump SCM commands as JSON instead of '),
] + self.platform_options)
def _optimize_baseline(self, optimizer, test_name):
for suffix in self._baseline_suffix_list:
baseline_name = _baseline_name(self._tool.filesystem, test_name, suffix)
succeeded, files_to_delete, files_to_add = optimizer.optimize(baseline_name)
if not succeeded:
print "Heuristics failed to optimize %s" % baseline_name
return files_to_delete, files_to_add
def execute(self, options, args, tool):
self._baseline_suffix_list = options.suffixes.split(',')
port_names = tool.port_factory.all_port_names(options.platform)
if not port_names:
print "No port names match '%s'" % options.platform
return
optimizer = BaselineOptimizer(tool, port_names, skip_scm_commands=options.no_modify_scm)
port = tool.port_factory.get(port_names[0])
for test_name in port.tests(args):
_log.info("Optimizing %s" % test_name)
files_to_delete, files_to_add = self._optimize_baseline(optimizer, test_name)
for path in files_to_delete:
self._delete_from_scm_later(path)
for path in files_to_add:
self._add_to_scm_later(path)
print json.dumps(self._scm_changes)
class AnalyzeBaselines(AbstractRebaseliningCommand):
name = "analyze-baselines"
help_text = "Analyzes the baselines for the given tests and prints results that are identical."
show_in_main_help = True
argument_names = "TEST_NAMES"
def __init__(self):
super(AnalyzeBaselines, self).__init__(options=[
self.suffixes_option,
optparse.make_option('--missing', action='store_true', default=False, help='show missing baselines as well'),
] + self.platform_options)
self._optimizer_class = BaselineOptimizer # overridable for testing
self._baseline_optimizer = None
self._port = None
def _write(self, msg):
print msg
def _analyze_baseline(self, options, test_name):
for suffix in self._baseline_suffix_list:
baseline_name = _baseline_name(self._tool.filesystem, test_name, suffix)
results_by_directory = self._baseline_optimizer.read_results_by_directory(baseline_name)
if results_by_directory:
self._write("%s:" % baseline_name)
self._baseline_optimizer.write_by_directory(results_by_directory, self._write, " ")
elif options.missing:
self._write("%s: (no baselines found)" % baseline_name)
def execute(self, options, args, tool):
self._baseline_suffix_list = options.suffixes.split(',')
port_names = tool.port_factory.all_port_names(options.platform)
if not port_names:
print "No port names match '%s'" % options.platform
return
self._baseline_optimizer = self._optimizer_class(tool, port_names, skip_scm_commands=False)
self._port = tool.port_factory.get(port_names[0])
for test_name in self._port.tests(args):
self._analyze_baseline(options, test_name)
class AbstractParallelRebaselineCommand(AbstractRebaseliningCommand):
# not overriding execute() - pylint: disable=W0223
def __init__(self, options=None):
super(AbstractParallelRebaselineCommand, self).__init__(options=options)
self._builder_data = {}
def builder_data(self):
if not self._builder_data:
for builder_name in self._release_builders():
builder = self._tool.buildbot_for_builder_name(builder_name).builder_with_name(builder_name)
self._builder_data[builder_name] = builder.latest_layout_test_results()
return self._builder_data
# The release builders cycle much faster than the debug ones and cover all the platforms.
def _release_builders(self):
release_builders = []
for builder_name in builders.all_builder_names():
if builder_name.find('ASAN') != -1:
continue
port = self._tool.port_factory.get_from_builder_name(builder_name)
if port.test_configuration().build_type == 'release':
release_builders.append(builder_name)
return release_builders
def _run_webkit_patch(self, args, verbose):
try:
verbose_args = ['--verbose'] if verbose else []
stderr = self._tool.executive.run_command([self._tool.path()] + verbose_args + args, cwd=self._tool.scm().checkout_root, return_stderr=True)
for line in stderr.splitlines():
_log.warning(line)
except ScriptError, e:
_log.error(e)
def _builders_to_fetch_from(self, builders_to_check):
# This routine returns the subset of builders that will cover all of the baseline search paths
# used in the input list. In particular, if the input list contains both Release and Debug
# versions of a configuration, we *only* return the Release version (since we don't save
# debug versions of baselines).
release_builders = set()
debug_builders = set()
builders_to_fallback_paths = {}
for builder in builders_to_check:
port = self._tool.port_factory.get_from_builder_name(builder)
if port.test_configuration().build_type == 'release':
release_builders.add(builder)
else:
debug_builders.add(builder)
for builder in list(release_builders) + list(debug_builders):
port = self._tool.port_factory.get_from_builder_name(builder)
fallback_path = port.baseline_search_path()
if fallback_path not in builders_to_fallback_paths.values():
builders_to_fallback_paths[builder] = fallback_path
return builders_to_fallback_paths.keys()
def _rebaseline_commands(self, test_prefix_list, options):
path_to_webkit_patch = self._tool.path()
cwd = self._tool.scm().checkout_root
copy_baseline_commands = []
rebaseline_commands = []
lines_to_remove = {}
port = self._tool.port_factory.get()
for test_prefix in test_prefix_list:
for test in port.tests([test_prefix]):
for builder in self._builders_to_fetch_from(test_prefix_list[test_prefix]):
actual_failures_suffixes = self._suffixes_for_actual_failures(test, builder, test_prefix_list[test_prefix][builder])
if not actual_failures_suffixes:
# If we're not going to rebaseline the test because it's passing on this
# builder, we still want to remove the line from TestExpectations.
if test not in lines_to_remove:
lines_to_remove[test] = []
lines_to_remove[test].append(builder)
continue
suffixes = ','.join(actual_failures_suffixes)
cmd_line = ['--suffixes', suffixes, '--builder', builder, '--test', test]
if options.results_directory:
cmd_line.extend(['--results-directory', options.results_directory])
if options.verbose:
cmd_line.append('--verbose')
copy_baseline_commands.append(tuple([[path_to_webkit_patch, 'copy-existing-baselines-internal'] + cmd_line, cwd]))
rebaseline_commands.append(tuple([[path_to_webkit_patch, 'rebaseline-test-internal'] + cmd_line, cwd]))
return copy_baseline_commands, rebaseline_commands, lines_to_remove
def _serial_commands(self, command_results):
files_to_add = set()
files_to_delete = set()
lines_to_remove = {}
for output in [result[1].split('\n') for result in command_results]:
file_added = False
for line in output:
try:
if line:
parsed_line = json.loads(line)
if 'add' in parsed_line:
files_to_add.update(parsed_line['add'])
if 'delete' in parsed_line:
files_to_delete.update(parsed_line['delete'])
if 'remove-lines' in parsed_line:
for line_to_remove in parsed_line['remove-lines']:
test = line_to_remove['test']
builder = line_to_remove['builder']
if test not in lines_to_remove:
lines_to_remove[test] = []
lines_to_remove[test].append(builder)
file_added = True
except ValueError:
_log.debug('"%s" is not a JSON object, ignoring' % line)
if not file_added:
_log.debug('Could not add file based off output "%s"' % output)
return list(files_to_add), list(files_to_delete), lines_to_remove
def _optimize_baselines(self, test_prefix_list, verbose=False):
optimize_commands = []
for test in test_prefix_list:
all_suffixes = set()
for builder in self._builders_to_fetch_from(test_prefix_list[test]):
all_suffixes.update(self._suffixes_for_actual_failures(test, builder, test_prefix_list[test][builder]))
# FIXME: We should propagate the platform options as well.
cmd_line = ['--no-modify-scm', '--suffixes', ','.join(all_suffixes), test]
if verbose:
cmd_line.append('--verbose')
path_to_webkit_patch = self._tool.path()
cwd = self._tool.scm().checkout_root
optimize_commands.append(tuple([[path_to_webkit_patch, 'optimize-baselines'] + cmd_line, cwd]))
return optimize_commands
def _update_expectations_files(self, lines_to_remove):
# FIXME: This routine is way too expensive. We're creating O(n ports) TestExpectations objects.
# This is slow and uses a lot of memory.
tests = lines_to_remove.keys()
to_remove = []
# This is so we remove lines for builders that skip this test, e.g. Android skips most
# tests and we don't want to leave stray [ Android ] lines in TestExpectations..
# This is only necessary for "webkit-patch rebaseline" and for rebaselining expected
# failures from garden-o-matic. rebaseline-expectations and auto-rebaseline will always
# pass the exact set of ports to rebaseline.
for port_name in self._tool.port_factory.all_port_names():
port = self._tool.port_factory.get(port_name)
generic_expectations = TestExpectations(port, tests=tests, include_overrides=False)
full_expectations = TestExpectations(port, tests=tests, include_overrides=True)
for test in tests:
if self._port_skips_test(port, test, generic_expectations, full_expectations):
for test_configuration in port.all_test_configurations():
if test_configuration.version == port.test_configuration().version:
to_remove.append((test, test_configuration))
for test in lines_to_remove:
for builder in lines_to_remove[test]:
port = self._tool.port_factory.get_from_builder_name(builder)
for test_configuration in port.all_test_configurations():
if test_configuration.version == port.test_configuration().version:
to_remove.append((test, test_configuration))
port = self._tool.port_factory.get()
expectations = TestExpectations(port, include_overrides=False)
expectationsString = expectations.remove_configurations(to_remove)
path = port.path_to_generic_test_expectations_file()
self._tool.filesystem.write_text_file(path, expectationsString)
def _port_skips_test(self, port, test, generic_expectations, full_expectations):
fs = port.host.filesystem
if port.default_smoke_test_only():
smoke_test_filename = fs.join(port.layout_tests_dir(), 'SmokeTests')
if fs.exists(smoke_test_filename) and test not in fs.read_text_file(smoke_test_filename):
return True
return (SKIP in full_expectations.get_expectations(test) and
SKIP not in generic_expectations.get_expectations(test))
def _run_in_parallel_and_update_scm(self, commands):
command_results = self._tool.executive.run_in_parallel(commands)
log_output = '\n'.join(result[2] for result in command_results).replace('\n\n', '\n')
for line in log_output.split('\n'):
if line:
print >> sys.stderr, line # FIXME: Figure out how to log properly.
files_to_add, files_to_delete, lines_to_remove = self._serial_commands(command_results)
if files_to_delete:
self._tool.scm().delete_list(files_to_delete)
if files_to_add:
self._tool.scm().add_list(files_to_add)
return lines_to_remove
def _rebaseline(self, options, test_prefix_list):
for test, builders_to_check in sorted(test_prefix_list.items()):
_log.info("Rebaselining %s" % test)
for builder, suffixes in sorted(builders_to_check.items()):
_log.debug(" %s: %s" % (builder, ",".join(suffixes)))
copy_baseline_commands, rebaseline_commands, extra_lines_to_remove = self._rebaseline_commands(test_prefix_list, options)
lines_to_remove = {}
if copy_baseline_commands:
self._run_in_parallel_and_update_scm(copy_baseline_commands)
if rebaseline_commands:
lines_to_remove = self._run_in_parallel_and_update_scm(rebaseline_commands)
for test in extra_lines_to_remove:
if test in lines_to_remove:
lines_to_remove[test] = lines_to_remove[test] + extra_lines_to_remove[test]
else:
lines_to_remove[test] = extra_lines_to_remove[test]
if lines_to_remove:
self._update_expectations_files(lines_to_remove)
if options.optimize:
self._run_in_parallel_and_update_scm(self._optimize_baselines(test_prefix_list, options.verbose))
def _suffixes_for_actual_failures(self, test, builder_name, existing_suffixes):
actual_results = self.builder_data()[builder_name].actual_results(test)
if not actual_results:
return set()
return set(existing_suffixes) & TestExpectations.suffixes_for_actual_expectations_string(actual_results)
class RebaselineJson(AbstractParallelRebaselineCommand):
name = "rebaseline-json"
help_text = "Rebaseline based off JSON passed to stdin. Intended to only be called from other scripts."
def __init__(self,):
super(RebaselineJson, self).__init__(options=[
self.no_optimize_option,
self.results_directory_option,
])
def execute(self, options, args, tool):
self._rebaseline(options, json.loads(sys.stdin.read()))
class RebaselineExpectations(AbstractParallelRebaselineCommand):
name = "rebaseline-expectations"
help_text = "Rebaselines the tests indicated in TestExpectations."
show_in_main_help = True
def __init__(self):
super(RebaselineExpectations, self).__init__(options=[
self.no_optimize_option,
] + self.platform_options)
self._test_prefix_list = None
def _tests_to_rebaseline(self, port):
tests_to_rebaseline = {}
for path, value in port.expectations_dict().items():
expectations = TestExpectations(port, include_overrides=False, expectations_dict={path: value})
for test in expectations.get_rebaselining_failures():
suffixes = TestExpectations.suffixes_for_expectations(expectations.get_expectations(test))
tests_to_rebaseline[test] = suffixes or BASELINE_SUFFIX_LIST
return tests_to_rebaseline
def _add_tests_to_rebaseline_for_port(self, port_name):
builder_name = builders.builder_name_for_port_name(port_name)
if not builder_name:
return
tests = self._tests_to_rebaseline(self._tool.port_factory.get(port_name)).items()
if tests:
_log.info("Retrieving results for %s from %s." % (port_name, builder_name))
for test_name, suffixes in tests:
_log.info(" %s (%s)" % (test_name, ','.join(suffixes)))
if test_name not in self._test_prefix_list:
self._test_prefix_list[test_name] = {}
self._test_prefix_list[test_name][builder_name] = suffixes
def execute(self, options, args, tool):
options.results_directory = None
self._test_prefix_list = {}
port_names = tool.port_factory.all_port_names(options.platform)
for port_name in port_names:
self._add_tests_to_rebaseline_for_port(port_name)
if not self._test_prefix_list:
_log.warning("Did not find any tests marked Rebaseline.")
return
self._rebaseline(options, self._test_prefix_list)
class Rebaseline(AbstractParallelRebaselineCommand):
name = "rebaseline"
help_text = "Rebaseline tests with results from the build bots. Shows the list of failing tests on the builders if no test names are provided."
show_in_main_help = True
argument_names = "[TEST_NAMES]"
def __init__(self):
super(Rebaseline, self).__init__(options=[
self.no_optimize_option,
# FIXME: should we support the platform options in addition to (or instead of) --builders?
self.suffixes_option,
self.results_directory_option,
optparse.make_option("--builders", default=None, action="append", help="Comma-separated-list of builders to pull new baselines from (can also be provided multiple times)"),
])
def _builders_to_pull_from(self):
chosen_names = self._tool.user.prompt_with_list("Which builder to pull results from:", self._release_builders(), can_choose_multiple=True)
return [self._builder_with_name(name) for name in chosen_names]
def _builder_with_name(self, name):
return self._tool.buildbot_for_builder_name(name).builder_with_name(name)
def execute(self, options, args, tool):
if not args:
_log.error("Must list tests to rebaseline.")
return
if options.builders:
builders_to_check = []
for builder_names in options.builders:
builders_to_check += [self._builder_with_name(name) for name in builder_names.split(",")]
else:
builders_to_check = self._builders_to_pull_from()
test_prefix_list = {}
suffixes_to_update = options.suffixes.split(",")
for builder in builders_to_check:
for test in args:
if test not in test_prefix_list:
test_prefix_list[test] = {}
test_prefix_list[test][builder.name()] = suffixes_to_update
if options.verbose:
_log.debug("rebaseline-json: " + str(test_prefix_list))
self._rebaseline(options, test_prefix_list)
class AutoRebaseline(AbstractParallelRebaselineCommand):
name = "auto-rebaseline"
help_text = "Rebaselines any NeedsRebaseline lines in TestExpectations that have cycled through all the bots."
AUTO_REBASELINE_BRANCH_NAME = "auto-rebaseline-temporary-branch"
# Rietveld uploader stinks. Limit the number of rebaselines in a given patch to keep upload from failing.
# FIXME: http://crbug.com/263676 Obviously we should fix the uploader here.
MAX_LINES_TO_REBASELINE = 200
SECONDS_BEFORE_GIVING_UP = 300
def __init__(self):
super(AutoRebaseline, self).__init__(options=[
# FIXME: Remove this option.
self.no_optimize_option,
# FIXME: Remove this option.
self.results_directory_option,
])
def bot_revision_data(self):
revisions = []
for result in self.builder_data().values():
if result.run_was_interrupted():
_log.error("Can't rebaseline because the latest run on %s exited early." % result.builder_name())
return []
revisions.append({
"builder": result.builder_name(),
"revision": result.blink_revision(),
})
return revisions
def tests_to_rebaseline(self, tool, min_revision, print_revisions):
port = tool.port_factory.get()
expectations_file_path = port.path_to_generic_test_expectations_file()
tests = set()
revision = None
author = None
bugs = set()
has_any_needs_rebaseline_lines = False
for line in tool.scm().blame(expectations_file_path).split("\n"):
comment_index = line.find("#")
if comment_index == -1:
comment_index = len(line)
line_without_comments = re.sub(r"\s+", " ", line[:comment_index].strip())
if "NeedsRebaseline" not in line_without_comments:
continue
has_any_needs_rebaseline_lines = True
parsed_line = re.match("^(\S*)[^(]*\((\S*).*?([^ ]*)\ \[[^[]*$", line_without_comments)
commit_hash = parsed_line.group(1)
svn_revision = tool.scm().svn_revision_from_git_commit(commit_hash)
test = parsed_line.group(3)
if print_revisions:
_log.info("%s is waiting for r%s" % (test, svn_revision))
if not svn_revision or svn_revision > min_revision:
continue
if revision and svn_revision != revision:
continue
if not revision:
revision = svn_revision
author = parsed_line.group(2)
bugs.update(re.findall("crbug\.com\/(\d+)", line_without_comments))
tests.add(test)
if len(tests) >= self.MAX_LINES_TO_REBASELINE:
_log.info("Too many tests to rebaseline in one patch. Doing the first %d." % self.MAX_LINES_TO_REBASELINE)
break
return tests, revision, author, bugs, has_any_needs_rebaseline_lines
def link_to_patch(self, revision):
return "http://src.chromium.org/viewvc/blink?view=revision&revision=" + str(revision)
def commit_message(self, author, revision, bugs):
bug_string = ""
if bugs:
bug_string = "BUG=%s\n" % ",".join(bugs)
return """Auto-rebaseline for r%s
%s
%sTBR=%s
""" % (revision, self.link_to_patch(revision), bug_string, author)
def get_test_prefix_list(self, tests):
test_prefix_list = {}
lines_to_remove = {}
for builder_name in self._release_builders():
port_name = builders.port_name_for_builder_name(builder_name)
port = self._tool.port_factory.get(port_name)
expectations = TestExpectations(port, include_overrides=True)
for test in expectations.get_needs_rebaseline_failures():
if test not in tests:
continue
if test not in test_prefix_list:
lines_to_remove[test] = []
test_prefix_list[test] = {}
lines_to_remove[test].append(builder_name)
test_prefix_list[test][builder_name] = BASELINE_SUFFIX_LIST
return test_prefix_list, lines_to_remove
def _run_git_cl_command(self, options, command):
subprocess_command = ['git', 'cl'] + command
if options.verbose:
subprocess_command.append('--verbose')
process = self._tool.executive.popen(subprocess_command, stdout=self._tool.executive.PIPE, stderr=self._tool.executive.STDOUT)
last_output_time = time.time()
# git cl sometimes completely hangs. Bail if we haven't gotten any output to stdout/stderr in a while.
while process.poll() == None and time.time() < last_output_time + self.SECONDS_BEFORE_GIVING_UP:
# FIXME: This doesn't make any sense. readline blocks, so all this code to
# try and bail is useless. Instead, we should do the readline calls on a
# subthread. Then the rest of this code would make sense.
out = process.stdout.readline().rstrip('\n')
if out:
last_output_time = time.time()
_log.info(out)
if process.poll() == None:
_log.error('Command hung: %s' % subprocess_command)
return False
return True
# FIXME: Move this somewhere more general.
def tree_status(self):
blink_tree_status_url = "http://blink-status.appspot.com/status"
status = urllib2.urlopen(blink_tree_status_url).read().lower()
if status.find('closed') != -1 or status == "0":
return 'closed'
elif status.find('open') != -1 or status == "1":
return 'open'
return 'unknown'
def execute(self, options, args, tool):
if tool.scm().executable_name == "svn":
_log.error("Auto rebaseline only works with a git checkout.")
return
if tool.scm().has_working_directory_changes():
_log.error("Cannot proceed with working directory changes. Clean working directory first.")
return
revision_data = self.bot_revision_data()
if not revision_data:
return
min_revision = int(min([item["revision"] for item in revision_data]))
tests, revision, author, bugs, has_any_needs_rebaseline_lines = self.tests_to_rebaseline(tool, min_revision, print_revisions=options.verbose)
if options.verbose:
_log.info("Min revision across all bots is %s." % min_revision)
for item in revision_data:
_log.info("%s: r%s" % (item["builder"], item["revision"]))
if not tests:
_log.debug('No tests to rebaseline.')
return
if self.tree_status() == 'closed':
_log.info('Cannot proceed. Tree is closed.')
return
_log.info('Rebaselining %s for r%s by %s.' % (list(tests), revision, author))
test_prefix_list, lines_to_remove = self.get_test_prefix_list(tests)
try:
old_branch_name = tool.scm().current_branch()
tool.scm().delete_branch(self.AUTO_REBASELINE_BRANCH_NAME)
tool.scm().create_clean_branch(self.AUTO_REBASELINE_BRANCH_NAME)
# If the tests are passing everywhere, then this list will be empty. We don't need
# to rebaseline, but we'll still need to update TestExpectations.
if test_prefix_list:
self._rebaseline(options, test_prefix_list)
tool.scm().commit_locally_with_message(self.commit_message(author, revision, bugs))
# FIXME: It would be nice if we could dcommit the patch without uploading, but still
# go through all the precommit hooks. For rebaselines with lots of files, uploading
# takes a long time and sometimes fails, but we don't want to commit if, e.g. the
# tree is closed.
did_finish = self._run_git_cl_command(options, ['upload', '-f'])
if did_finish:
# Uploading can take a very long time. Do another pull to make sure TestExpectations is up to date,
# so the dcommit can go through.
# FIXME: Log the pull and dcommit stdout/stderr to the log-server.
tool.executive.run_command(['git', 'pull'])
self._run_git_cl_command(options, ['dcommit', '-f'])
finally:
self._run_git_cl_command(options, ['set_close'])
tool.scm().ensure_cleanly_tracking_remote_master()
tool.scm().checkout_branch(old_branch_name)
tool.scm().delete_branch(self.AUTO_REBASELINE_BRANCH_NAME)
class RebaselineOMatic(AbstractDeclarativeCommand):
name = "rebaseline-o-matic"
help_text = "Calls webkit-patch auto-rebaseline in a loop."
show_in_main_help = True
SLEEP_TIME_IN_SECONDS = 30
LOG_SERVER = 'blinkrebaseline.appspot.com'
# Uploaded log entries append to the existing entry unless the
# newentry flag is set. In that case it starts a new entry to
# start appending to.
def _log_to_server(self, log='', is_new_entry=False):
query = {
'log': log,
}
if is_new_entry:
query['newentry'] = 'on'
urllib2.urlopen("http://" + self.LOG_SERVER + "/updatelog", data=urllib.urlencode(query))
def _log_line(self, handle):
out = handle.readline().rstrip('\n')
if out:
if self._verbose:
print out
self._log_to_server(out)
return out
def _run_logged_command(self, command):
process = self._tool.executive.popen(command, stdout=self._tool.executive.PIPE, stderr=self._tool.executive.STDOUT)
out = self._log_line(process.stdout)
while out:
# FIXME: This should probably batch up lines if they're available and log to the server once.
out = self._log_line(process.stdout)
def _do_one_rebaseline(self):
try:
old_branch_name = self._tool.scm().current_branch()
self._log_to_server(is_new_entry=True)
self._run_logged_command(['git', 'pull'])
rebaseline_command = [self._tool.filesystem.join(self._tool.scm().checkout_root, 'Tools', 'Scripts', 'webkit-patch'), 'auto-rebaseline']
if self._verbose:
rebaseline_command.append('--verbose')
self._run_logged_command(rebaseline_command)
except:
traceback.print_exc(file=sys.stderr)
# Sometimes git crashes and leaves us on a detached head.
self._tool.scm().checkout_branch(old_branch_name)
def execute(self, options, args, tool):
self._verbose = options.verbose
while True:
self._do_one_rebaseline()
time.sleep(self.SLEEP_TIME_IN_SECONDS)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.