text stringlengths 957 885k |
|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- Python -*-
"""
Chat program
ROSPEEXから
入力された文章を使い、DoCoMoAPIで会話する
The project is hosted on GitHub where your could fork the project or report
issues. Visit https://github.com/roboworks/
:copyright: (c) 2015 by Hiroyuki Okada, All rights reserved.
:license: MIT License (MIT), http://www.opensource.org/licenses/MIT
"""
__author__ = '<NAME>'
__version__ = '0.1'
import sys
import string
import time
import datetime
import re
sys.path.append(".")
import urllib2
import urllib
import json
import rospy
from std_msgs.msg import String
# rospeex
from rospeex_if import ROSpeexInterface
# jsk
from jsk_gui_msgs.msg import VoiceMessage
from jsk_gui_msgs.msg import Tablet
from trcp_chat.srv import *
from trcp_chat.msg import *
_chat_={
"utt":"",
"context":"aaabbbccc111222333",
"nickname":"あかね",
"nickname_y":"アカネ",
"sex":"女",
"bloodtype":"O",
"birthdateY":1990,
"birthdateM":2,
"birthdateD":5,
"age":25,
"constellations":"水瓶",
"place":"大阪",
"mode":"dialog",
"t":"20"
}
_nowmode = "CHAT"
class ChatTRCP(object):
""" ChatTRCP class """
def __init__(self):
""" Initializer """
def run(self):
""" run ros node """
# initialize ros node
rospy.init_node('ChatTRCP')
rospy.loginfo("start DoCoMo Chat TRCP node")
""" for ROSpeexInterface """
self.rospeex = ROSpeexInterface()
self.rospeex.init()
self.rospeex.register_sr_response( self.sr_response )
self.rospeex.set_spi_config(language='ja', engine='nict')
"""日本語(英語もある)でNICT(Googleもある)"""
"""launchファイ決めてもいいけど、動的に変更する?"""
"""とりあえず、現状は決め打ち"""
self.lang = 'ja'
self.input_engine = 'nict'
self.rospeex.set_spi_config(language='ja',engine='nict')
""" for jsk voice understanding """
rospy.Subscriber("/Tablet/voice", VoiceMessage, self.jsk_voice)
""" 発話理解APIの準備 """
self.req = DoCoMoUnderstandingReq()
self.req.projectKey = 'OSU'
self.req.appName = ''
self.req.appKey = 'hoge_app01'
self.req.clientVer = '1.0.0'
self.req.dialogMode = 'off'
self.req.language = 'ja'
self.req.userId = '12 123456 123456 0'
self.req.lat = '139.766084'
self.req.lon = '35.681382'
""" 雑談対話APIの準備 """
self.req_chat = DoCoMoChatReq()
self.req_chat.utt = ""
self.req_chat.context = _chat_["context"]
self.req_chat.nickname = _chat_["nickname"]
self.req_chat.nickname_y = _chat_["nickname_y"]
self.req_chat.sex = _chat_["sex"]
self.req_chat.bloodtype = _chat_["bloodtype"]
self.req_chat.birthdateY = _chat_["birthdateY"]
self.req_chat.birthdateM = _chat_["birthdateM"]
self.req_chat.birthdateD = _chat_["birthdateD"]
self.req_chat.age = _chat_["age"]
self.req_chat.constellations = _chat_["constellations"]
self.req_chat.place = _chat_["place"]
self.req_chat.mode = _chat_["mode"]
self.req_chat.t = _chat_["t"]
rospy.wait_for_service('docomo_sentenceunderstanding')
self.understanding = rospy.ServiceProxy('docomo_sentenceunderstanding',DoCoMoUnderstanding)
rospy.wait_for_service('docomo_qa')
self.qa = rospy.ServiceProxy('docomo_qa',DoCoMoQa)
rospy.wait_for_service('docomo_chat')
self.chat = rospy.ServiceProxy('docomo_chat',DoCoMoChat)
self.resp_understanding = DoCoMoUnderstandingRes()
_nowmode = "CHAT"
rospy.spin()
def jsk_voice(self,data):
# print len(data.texts)
# for elem in data.texts:
# print elem
rospy.loginfo("jsk_voice:%s", data.texts[0])
self.chat(data.texts[0])
def sr_response(self, message):
# Rospeexを使うと、文字列の最後に「。」が付くので削除する
src = message
sr_dst=src.replace('。', '')
rospy.loginfo("rospeex:%s", sr_dst)
chat(sr_dst)
def chat(self, message):
rospy.loginfo("chat:%s", message)
#message が特定のキーワードであれば、それに対応した処理を行う
""" 時間 ->現在時刻を答える"""
time = re.compile('(?P<time>何時)').search(message)
if time is not None:
rospy.loginfo("What Time is it now? :%s", message)
d = datetime.datetime.today()
text = u'%d時%d分です。'%(d.hour, d.minute)
# rospeex reply
self.rospeex.say(text, 'ja', 'nict')
return True
# 特定のキーワード処理はここまで
print _nowmode
try:
""" もし現在の会話モードが「しりとり」なら
文章理解APIをスキップする
それ以外なら、文章理解APIで文章を解析する
"""
if _nowmode == "CHAIN":
self.resp_understanding.success = True
self.resp_understanding.response.commandId = "BC00101"
self.resp_understanding.response.utteranceText = message
else:
self.req.utteranceText = message
self.resp_understanding = self.understanding(self.req)
if self.resp_understanding.success:
commandId = self.resp_understanding.response.commandId
rospy.loginfo("<<< %s", commandId)
if commandId == "BC00101":
"""雑談"""
rospy.loginfo("TRCP:Chat")
self.res_chat = self.chat(self.req_chat)
rospy.loginfo("TRCP Chat response:%s",self.res_chat.response)
elif commandId == "BK00101":
"""知識検索"""
rospy.loginfo("TRCP:Q&A")
self.req_qa = DoCoMoQaReq()
self.req_qa.text = self.resp_understanding.response.utteranceText
print self.resp_understanding.response.utteranceText
res_qa = self.qa(self.req_qa)
rospy.loginfo("TRCP Q&A response:%s",res_qa.response.code)
self.rospeex.say(res_qa.response.textForSpeech , 'ja', 'nict')
else:
"""発話理解APIがエラーのとき"""
rospy.loginfo("DoCoMo 発話理解API failed")
pass
except:
"""対話プログラムのどこかでエラーのとき"""
rospy.loginfo("error")
pass
return True
if __name__ == '__main__':
try:
node = ChatTRCP()
node.run()
except rospy.ROSInterruptException:
pass
|
import sys
import os
import logging
import matplotlib.pyplot as plt
from numpy import median, zeros, nan, nanmedian, sqrt, mean, std, loadtxt, linspace, \
zeros_like, divide, asarray
from astropy.constants import G, R_sun, M_sun, R_jup, M_jup, R_earth, M_earth
from astropy.coordinates import SkyCoord
from pathlib import Path
from astrosource.utils import AstrosourceException
logger = logging.getLogger('astrosource')
def bls(t, x, qmi, qma, fmin, df, nf, nb, startPeriod, dp):
"""First trial, BLS algorithm, only minor modification from author's code
Output parameters:
~~~~~~~~~~~~~~~~~~
p = array {p(i)}, containing the values of the BLS spectrum
at the i-th frequency value -- the frequency values are
computed as f = fmin + (i-1)*df
bper = period at the highest peak in the frequency spectrum
bpow = value of {p(i)} at the highest peak
depth= depth of the transit at *bper*
qtran= fractional transit length [ T_transit/bper ]
in1 = bin index at the start of the transit [ 0 < in1 < nb+1 ]
in2 = bin index at the end of the transit [ 0 < in2 < nb+1 ]
Remarks:
~~~~~~~~
-- *fmin* MUST be greater than *1/total time span*
-- *nb* MUST be lower than *nbmax*
-- Dimensions of arrays {y(i)} and {ibi(i)} MUST be greater than
or equal to *nbmax*.
-- The lowest number of points allowed in a single bin is equal
to MAX(minbin,qmi*N), where *qmi* is the minimum transit
length/trial period, *N* is the total number of data points,
*minbin* is the preset minimum number of the data points per
bin.
"""
n = len(t)
rn = len(x)
#! use try
if n != rn:
raise AstrosourceException("Different size of array, t and x")
rn = float(rn) # float of n
minbin = 5
nbmax = 2000
if nb > nbmax:
raise AstrosourceException("Error: NB > NBMAX!")
tot = t[-1] - t[0] # total time span
if fmin < 1.0/tot:
raise AstrosourceException("Error: fmin < 1/T")
# parameters in binning (after folding)
kmi = int(qmi*nb) # nb is number of bin -> a single period
if kmi < 1:
kmi = 1
kma = int(qma*nb) + 1
kkmi = rn*qmi # to check the bin size
if kkmi < minbin:
kkmi = minbin
# For the extension of arrays (edge effect: transit happen at the edge of data set)
nb1 = nb + 1
nbkma = nb + kma
# Data centering
t1 = t[0]
u = t - t1
s = median(x) # ! Modified
v = x - s
bpow = 0.0
p = zeros(nf)
# setup array for power vs period plot
powerPeriod=[]
# Start period search
for jf in range(nf):
#f0 = fmin + df*jf # iteration in frequency not period
#p0 = 1.0/f0
# Actually iterate in period
p0 = startPeriod + dp*jf
f0 = 1.0/p0
# Compute folded time series with p0 period
ibi = zeros(nbkma)
y = zeros(nbkma)
# Median version
yMedian = zeros(shape=(nf,n))
yMedian.fill(nan)
for i in range(n):
ph = u[i]*f0 # instead of t mod P, he use t*f then calculate the phase (less computation)
ph = ph - int(ph)
j = int(nb*ph) # data to a bin
ibi[j] = ibi[j] + 1 # number of data in a bin
y[j] = y[j] + v[i] # sum of light in a bin
yMedian[j][i]=v[i]
# Repopulate y[j] and ibi[j] with the median value
for i in range(nb+1):
#logger.debug(i)
ibi[i]=1
y[i]=nanmedian(yMedian[i,:])
# Extend the arrays ibi() and y() beyond nb by wrapping
for j in range(nb1, nbkma):
jnb = j - nb
ibi[j] = ibi[jnb]
y[j] = y[jnb]
# Compute BLS statictics for this trial period
power = 0.0
for i in range(nb): # shift the test period
s = 0.0
k = 0
kk = 0
nb2 = i + kma
# change the size of test period (from kmi to kma)
for j in range(i, nb2):
k = k + 1
kk = kk + ibi[j]
s = s + y[j]
if k < kmi: continue # only calculate SR for test period > kmi
if kk < kkmi: continue #
rn1 = float(kk)
powo = s*s/(rn1*(rn - rn1))
if powo > power: # save maximum SR in a test period
power = powo # SR value
jn1 = i #
jn2 = j
rn3 = rn1
s3 = s
power = sqrt(power)
p[jf] = power
powerPeriod.append([p0,power])
if power > bpow:
# If it isn't an resonance of a day
if not ((p0 > 0.95 and p0 < 1.05) or (p0 > 1.95 and p0 < 2.05) or (p0 > 2.98 and p0 < 3.02) or (p0 > 6.65 and p0 < 6.67) or (p0 > 3.32 and p0 < 3.34) or (p0 > 3.64 and p0 < 3.68)):
bpow = power # Save the absolute maximum of SR
in1 = jn1
in2 = jn2
qtran = rn3/rn
# depth = -s3*rn/(rn3*(rn - rn3))
# ! Modified
high = -s3/(rn - rn3)
low = s3/rn3
depth = high - low
bper = p0
sde = (bpow - mean(p))/std(p) # signal detection efficiency
return bpow, in1, in2, qtran, depth, bper, sde, p, high, low, powerPeriod
def plot_bls(paths, startPeriod=0.1, endPeriod=3.0, nf=1000, nb=200, qmi=0.01, qma=0.1):
'''
Input parameters:
~~~~~~~~~~~~~~~~~
n = number of data points
t = array {t(i)}, containing the time values of the time series
x = array {x(i)}, containing the data values of the time series
u = temporal/work/dummy array, must be dimensioned in the
calling program in the same way as {t(i)}
v = the same as {u(i)}
nf = number of frequency points in which the spectrum is computed
fmin = minimum frequency (MUST be > 0)
df = frequency step
nb = number of bins in the folded time series at any test period
qmi = minimum fractional transit length to be tested
qma = maximum fractional transit length to be tested
paths = dict of Path objects
'''
# Get list of phot files
trimPath = paths['parent'] / "trimcats"
eelbsPath = paths['parent'] / "eelbs"
# check directory structure
if not trimPath.exists():
os.makedirs(trimPath)
if not eelbsPath.exists():
os.makedirs(eelbsPath)
fileList = paths['outcatPath'].glob('*diffExcel*csv')
r=0
# calculate period range
fmin = 1/endPeriod
fmax = 1/startPeriod
df = (fmax-fmin)/nf
dp = (endPeriod-startPeriod)/nf
for filename in fileList:
photFile = loadtxt(paths['outcatPath'] / Path(filename).name, delimiter=',')
logger.debug('**********************')
logger.debug(f'Testing: {filename}')
t = photFile[:,0]
f = photFile[:,1]
res = bls(t, f, qmi, qma, fmin, df, nf, nb, startPeriod, dp)
if not res:
raise AstrosourceException("BLS fit failed")
else: # If it did not fail, then do the rest.
logger.debug(f'Best SR: {res[0]}')
logger.debug(f'Ingress: {res[1]}')
logger.debug(f'Egress: {res[2]}')
logger.debug(f'q: {res[3]}')
logger.debug(f'Depth: {res[4]}')
logger.debug(f'Period: {res[5]}')
logger.debug(f'SDE: {res[6]}')
t1 = t[0]
u = t - t1
s = mean(f)
v = f - s
f0 = 1.0/res[5] # freq = 1/T
nbin = nb # number of bin
n = len(t)
ibi = zeros(nbin)
y = zeros(nbin)
phase = linspace(0.0, 1.0, nbin)
for i in range(n):
ph = u[i]*f0
ph = ph - int(ph)
j = int(nbin*ph) # data to a bin
ibi[j] += 1.0 # number of data in a bin
y[j] = y[j] + v[i] # sum of light in a bin
plt.figure(figsize=(15,6))
powerPeriod=asarray(res[10])
plt.subplot(1, 2, 1)
plt.plot(powerPeriod[:,0], powerPeriod[:,1], 'r.')
plt.title("EELBS Period Trials")
plt.xlabel(r"Trialled Period")
plt.ylabel(r"Likelihood")
plt.subplot(1, 2, 2)
plt.plot(phase, divide(y, ibi, out=zeros_like(y), where=ibi!=0), 'r.')
fite = zeros(nbin) + res[8] # H
fite[res[1]:res[2]+1] = res[9] # L
plt.plot(phase, fite)
plt.gca().invert_yaxis()
plt.title("\nDepth: "+ str(-res[4]) + " " + "Period: {0} d bin: {1}".format(1/f0, nbin))
plt.xlabel(r"Phase ($\phi$)")
plt.ylabel(r"Mean value of $x(\phi)$ in a bin")
plt.tight_layout()
filebase = str(filename).split("/")[-1].split("\\")[-1].replace(".csv","").replace("_calibExcel","")
plot_filename = "{}_EELBS_Plot.png".format(filebase)
plt.savefig(eelbsPath / plot_filename)
logger.info("Saved {}".format(plot_filename))
plt.clf()
# Write text file
texFileName=eelbsPath / '{}_EELBS_Statistics.txt'.format(filebase)
logger.info("Saved {}".format(texFileName))
with open(texFileName, "w") as f:
f.write("Best SR: " +str(res[0])+"\n")
f.write("Ingress: " + str(res[1])+"\n")
f.write("Egress: "+ str(res[2])+"\n")
f.write("nq: "+ str(res[3])+"\n")
f.write("Depth: "+ str(-res[4])+"\n")
f.write("Period: "+ str(res[5])+"\n")
f.write("SDE: "+ str(res[6])+"\n")
return
|
<filename>src/args.py
import os
import re
import argparse
import json
import pydash as _
import fs
import handlebars
from configure import fake_config
def parse():
# Instantiate the parser
parser = argparse.ArgumentParser(description='savetube: apply your youtube metadata to id3 tags')
parser.add_argument(
'--dry',
type=str,
nargs=2,
metavar=('youtube_video_id', 'savetuberc_path'),
help='Prints the raw data as well as the render_data that is available to you once parsed. Incompatible with the --watch and --out flags.'
)
parser.add_argument(
'--dry_out',
type=str,
nargs=3,
metavar=('src_root', 'src_filename', 'out_template'),
help='Dry-run the out_template given the src_root and src_filename provided. Must be used jointly with --dry'
)
parser.add_argument(
'--watch',
type=str,
nargs='+',
metavar=('dir', 'dir'),
help='Directories to watch. Supports multiple independent directories by specifying them after the --watch flag. The number of watch directories must match the number of out directory templates')
parser.add_argument(
'--recursive',
action='store_true',
default=False,
help='Specifies if watched directories should be recursively watched.'
)
parser.add_argument(
'--out',
type=str,
nargs='+',
metavar=('out_template', 'out_template'),
help='Template path for the destination filename. Supports multiple independent destinations by specifying them after the --out flag. The number of out directory templates must match the number of watch directories. Non-existent directories will be created '
)
args = parser.parse_args()
if __name__ == "__main__":
print(args)
# Error handling for proper usage
if args.dry is not None and (args.watch is not None or args.out is not None):
parser.error("ParserError: --dry flag cannot be combined with --watch or --out flags")
elif args.dry is None:
if args.watch is None or args.out is None:
if args.watch is None:
parser.error("ParserError: --watch flag is required")
if args.out is None:
parser.error("ParserError: --out flag is required")
elif len(args.watch) != len(args.out):
parser.error("ParserError: --watch and --out flags should have the same number of values")
# Convert args to usable configs
if args.dry is not None:
return {
'dry': True,
'dry_opts': {
'out': args.dry_out is not None,
'out_src_root': None if args.dry_out is None else args.dry_out[0],
'out_src_filename': None if args.dry_out is None else args.dry_out[1],
'out_template': None if args.dry_out is None else args.dry_out[2],
'rc_filename': args.dry[1],
'youtube_video_id': args.dry[0],
},
'recursive': args.recursive,
'watchers': [],
}
else:
# validate out_templates
fake_config_data = fake_config()
invalid_out_templates = [e for v in args.out if not validate_handlebars(v, fake_config_data)]
if len(invalid_out_templates):
parser.error(f"ParserError: The following arguments(s) to the --out flag are not valid templates: {json.dumps(invalidTemplates)}")
return {
'dry': False,
'recursive': args.recursive,
'watchers': get_watcher_configs_from_args(args),
}
def validate_handlebars(template, data):
try:
handlebars.render(template, data)
return True
except re.error:
return False
def get_watcher_configs_from_args(args):
watch_and_out_pairs = zip(args.watch, args.out)
watchers = None
if args.recursive:
watchers = _.flatten([
[
{
'rc_filename': rc_filename,
'watch': os.path.dirname(rc_filename),
'watch_root': root_folder[:-1] if root_folder.endswith('/') else root_folder,
'out_template': out,
}
for rc_filename in fs.find_pattern('savetuberc.json', root_folder)
]
for root_folder, out in watch_and_out_pairs
])
else:
watchers = [
{
'rc_filename': os.path.join(os.path.dirname(root_folder), 'savetuberc.json'),
'watch': os.path.dirname(root_folder),
'watch_root': root_folder[:-1] if root_folder.endswith('/') else root_folder,
'out_template': out,
} for root_folder, out in watch_and_out_pairs
]
return watchers
# def main():
# config = parse()
# print(config)
# #############################################
# if __name__ == "__main__":
# main()
# ############################################# |
import re, sys, numpy, math
from collections import Counter
from scipy.sparse import lil_matrix
import scipy.sparse.linalg
doc_counters = []
corpus_counts = Counter()
doc_text = []
print ("reading")
# for TF-IDF
document_frequency = Counter()
with open(sys.argv[1], encoding="utf-8") as reader:
for line in reader:
fields = line.rstrip().split("\t")
if len(fields) == 3:
tag = fields[1]
tokens = fields[2].lower().split()
doc_counter = Counter(tokens)
corpus_counts.update(doc_counter)
document_frequency.update( doc_counter.keys() )
doc_counters.append(doc_counter)
doc_text.append(fields[2])
num_docs = len(doc_counters)
## construct a vocabulary list in reverse order by corpus count
vocabulary = [ w for w, c in corpus_counts.most_common() if c > 5 ]
# maps strings to integers
reverse_vocab = { word:i for (i, word) in enumerate(vocabulary) }
vocab_size = len(vocabulary)
idf_weights = { word:-math.log( document_frequency[word] / num_docs ) for word in vocabulary }
print("constructing matrix")
doc_word_counts = lil_matrix((num_docs, vocab_size))
for doc_id, doc_counter in enumerate(doc_counters):
words = list([word for word in doc_counter if word in reverse_vocab])
counts = [doc_counter[word] for word in words]
weighted_counts = [ idf_weights[word] * doc_counter[word] for word in words ]
word_ids = [reverse_vocab[word] for word in words]
# have info based on documents (rows). easier to set rows
doc_word_counts[doc_id,word_ids] = weighted_counts
doc_word_counts = doc_word_counts.tocsr()
print("running SVD")
doc_vectors, singular_values, word_vectors = scipy.sparse.linalg.svds(doc_word_counts, 100)
word_vectors = word_vectors.T
def rank_words(x):
return sorted(zip(x, vocabulary))
def rank_docs(x):
return sorted(zip(x, doc_text))
def l2_norm(matrix):
row_norms = numpy.sqrt(numpy.sum(matrix ** 2, axis = 1))
return matrix / row_norms[:, numpy.newaxis]
# cosine sim becomes dot product
# so word_vectors.dot(word_vectors[154,:])
# gives you matrix of cosine similarities
# words that occur in most similar contexts to python
# sorted_words[-10:]
# [(0.6332042503921438, 'databricks'), (0.6366757752082062, 'open'),
# (0.646656838057807, 'server'), (0.6489888390810046, 'package'),
# (0.651368378524946, 'ide'), (0.6608083695292203, 'studio'),
# (0.6664462671950556, 'ides'), (0.6733420856087191, 'rstudio'),
# (0.6861970891874979, 'mac'), (0.9999999999999998, 'python')]
# looking up "r"
# >>> reverse_vocab["r"]
# 171
# >>> sorted_words = rank_words(word_vectors.dot( word_vectors[171,:]))
# >>> sorted_words[-10:]
# [(0.5578353259297886, 'ggplot2'),
# (0.5608580506753871, 'postgres'),
# (0.5609395631290913, 'constants'),
# (0.5687079034804157, 'mac'),
# (0.6185301636704101, 'oracle'),
# (0.6279484374038404, 'package'),
# (0.6354393705297439, 'ide'),
# (0.6529268897910219, 'law'), (0.7137968712936943, 'ides'), (1.0, 'r')]
# >>> doc_vectors[1,-1]
# 0.005451273761441059
# >>> singular_values[-1]
# 1540.2000204745748
# >>> singular_values[-1] * doc_vectors[1,-1]
# 8.396051958984032
# >>> word_vectors[0:10,-1]
# array([0.62016613, 0.31118427, 0.26832617, 0.20399631, 0.25420701,
# 0.21965151, 0.20298278, 0.16092729, 0.13567712, 0.12362675])
# >>> word_vectors[0:10,-1] * singular_values[-1] * doc_vectors[1,-1]
# array([5.20694702, 2.61271927, 2.25288045, 1.71276363, 2.13433526,
# 1.84420549, 1.70425396, 1.35115388, 1.13915218, 1.03797662])
# >>> vocabulary[:10]
# ['the', 'to', 'a', 'i', 'of', 'is', 'and', 'in', 'you', 'for']
# >>> doc_counters[1]
# Counter({'the': 7, 'to': 6, 'you': 3, 'code': 3, 'images': 2, 'show': 2, 'have':2, 'files': 2, 'how': 2, 'if': 1, 'need': 1, 'work': 1, 'on': 1, 'using': 1, 'python': 1, 'preferred': 1, 'library': 1, 'is': 1, 'pil': 1, 'here': 1, 'i': 1, 'a': 1, 'function': 1, 'do': 1, 'modifications': 1, 'delimited': 1, 'this': 1, 'makes': 1, 'no': 1, 'effort': 1, 'manage': 1, 'multiple': 1, 'or': 1, 'name': 1, 'converted': 1, 'but': 1, 'it': 1, 'does': 1, 'modify': 1, 'in': 1, 'ways': 1, 'asked': 1, 'test': 1, 'save': 1, 'image': 1, 'see': 1, 'docs': 1})
#
# So we expect 'the' to occur 5 times
# 'to' to occur 2 times
# 'a' to occur 2 times |
import copy
import pickle
import os
DEBUG = False
MIN_PROB = 1e-12#float('-inf')
# amount the value is allowed to be off for convergence
# the smaller the longer the training takes
EPSILON = 1.0e-9
# test if the two sets are equivalent
def is_converged(t, last_t):
for (e_j, f_i) in t.keys():
if abs(t[(e_j, f_i)] - last_t[(e_j, f_i)]) > EPSILON: return False
return True
# learning translation probabilitity distributions from sentence-aligned parallel text
# expectation maximization algorithm
# Input: set of sentence pairs (e,f), t_table to initialize t, number of maximum iterations
# optional: filenames to save t_table and a_table in files
# Output: translation prob. t (lexical translation) and a (alignment)
# S.99 Figure 4.7
# TODO e = [None] + e
def EM_IBM_Model_2(e_set, f_set, ibm1_t, max_steps, filename_t=None, filename_a=None):
if DEBUG: print('start training IBM Model 2')
# initialize t(e|f) with IBM Model 1
t = copy.deepcopy(ibm1_t)
# initialize a(i|j, l_e, l_f)
e_lengths = [len(e) for e in e_set]
f_lengths = [len(f) for f in f_set]
a = {}
for k in range(0, len(e_set)): # for every sentence
e = e_set[k]
f = f_set[k]
l_e = len(e)
l_f = len(f)
for j in range(0, l_e):
for i in range(0, l_f):
a[(i, j, l_e, l_f)] = 1.0 / (l_f + 1)
# sets of distinct words
last_t = {k:1 for k,_ in t.items()} # to fail first comparison
step = 0
# iterate until convergence
while not (is_converged(t, last_t)) and step < max_steps:
# initialize
count = {}
total = {}
total_a = {}
count_a = {}
for k in range(0, len(e_set)):
e = e_set[k]
f = f_set[k]
l_e = len(e)
l_f = len(f)
# compute normalization
s_total = {}
for j in range(0, l_e):
e_j = e[j]
s_total[e_j] = 0
for i in range(0, l_f):
f_i = f[i]
s_total[e_j] += t[(e_j, f_i)] * a[(i, j, l_e, l_f)]
# collect counts
for j in range(0, l_e):
for i in range(0, l_f):
e_j = e[j]
f_i = f[i]
c = t[(e_j, f_i)] * a[(i, j, l_e, l_f)] / s_total[e_j]
count[(e_j, f_i)] = count.get((e_j, f_i), 0) + c
total[f_i] = total.get(f_i, 0) + c
count_a[(i, j, l_e, l_f)] = count_a.get((i, j, l_e, l_f), 0) + c
total_a[(j, l_e, l_f)] = total_a.get((j, l_e, l_f), MIN_PROB) + c
# estimate probabilities, max(t,MIN_PROB) cause they can never be 0
last_t = copy.deepcopy(t)
t = {x:0 for x in t}
a = {x:0 for x in a}
for (e_j, f_i) in count.keys():
if count[(e_j, f_i)] != 0:
t[(e_j, f_i)] = count[(e_j, f_i)] / total[f_i]
for (i,j,l_e,l_f) in count_a.keys():
if count_a[(i, j, l_e, l_f)] != 0:
a[(i, j, l_e, l_f)] = count_a[(i, j, l_e, l_f)] / total_a[(j, l_e, l_f)]
step += 1
if DEBUG and step % 25 == 0: print('step', step,'of', max_steps)
if DEBUG: print('IBM Model 2 training finished.')
if filename_a is not None:
if DEBUG: print('Save a table in', filename_a)
path, _ = os.path.split(filename_a)
os.makedirs(path, exist_ok=True)
f2 = open(filename_a, "wb", pickle.HIGHEST_PROTOCOL)
pickle.dump(a, f2)
f2.close()
if filename_t is not None:
if DEBUG: print('Save t table in', filename_t)
path, _ = os.path.split(filename_t)
os.makedirs(path, exist_ok=True)
f = open(filename_t, "wb", pickle.HIGHEST_PROTOCOL)
pickle.dump(t, f)
f.close()
return t, a
# p(e|f) for IBM Model 2
# S.98 Figure 4.26 + Errata
#Input: sentences e and f, epsilon, t-table and a-table
def prob_e_given_f_2(e, f, epsilon, t, a):
new = False
all_new = True
l_e = len(e)
l_f = len(f)
prod = 1
for j in range(0, l_e):
e_j = e[j]
sum = 0.0
for i in range(0, l_f):
f_i = f[i]
if (e_j, f_i) in t.keys() and (i, j, l_e, l_f) in a.keys():
sum += t[(e_j, f_i)] * a[(i, j, l_e, l_f)]
if sum != 0:
prod *= sum # here underflow possible and a value <= 0
all_new = False
else:
new = True
if all_new:
if DEBUG: print('None of the contained words were in the training set.')
elif new:
if DEBUG: print('Some contained words were not in the training set.')
if prod == 1 and all_new: return 0.0
else: return epsilon * prod
|
<filename>src/mainwindow/__init__.py
from PyQt4 import QtGui
from ags_service_publisher.runner import Runner, root_logger
from ags_service_publisher.logging_io import setup_logger
from aboutdialog import AboutDialog
from helpers.arcpyhelpers import get_install_info
from helpers.pathhelpers import get_app_path, get_config_dir, get_log_dir, get_report_dir
from helpers.texthelpers import escape_html
from loghandlers.qtloghandler import QtLogHandler
from mainwindow_ui import Ui_MainWindow
from publishdialog import PublishDialog
from mxdreportdialog import MXDReportDialog
from datasetusagesreportdialog import DatasetUsagesReportDialog
from datastoresreportdialog import DataStoresReportDialog
from resultdialog import ResultDialog
from workers.subprocessworker import SubprocessWorker
from workers.workerpool import WorkerPool
log = setup_logger(__name__)
class MainWindow(QtGui.QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.setupUi(self)
self.actionPublish_Services.triggered.connect(self.show_publish_dialog)
self.actionMXD_Data_Sources_Report.triggered.connect(self.show_mxd_report_dialog)
self.actionDataset_Usages_Report.triggered.connect(self.show_dataset_usages_report_dialog)
self.actionData_Stores_Report.triggered.connect(self.show_data_stores_report_dialog)
self.actionGetInstallInfo.triggered.connect(self.get_install_info)
self.actionGetExecutablePath.triggered.connect(self.get_executable_path)
self.actionAbout.triggered.connect(self.about)
self.actionTestLogWindow.triggered.connect(self.test_log_window)
self.actionExit.triggered.connect(self.close)
self.worker_pool = WorkerPool()
self.log_handler = QtLogHandler()
self.log_handler.messageEmitted.connect(self.log_message)
root_logger.addHandler(self.log_handler)
self.config_dir = get_config_dir()
self.log_dir = get_log_dir()
self.report_dir = get_report_dir()
def closeEvent(self, event):
log.debug('closeEvent triggered')
result = QtGui.QMessageBox.question(
self,
'Exit - AGS Service Publisher',
'Are you sure you want to exit?',
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No
)
if result == QtGui.QMessageBox.Yes:
self.worker_pool.stop_all_workers()
log.debug('Exiting application!')
event.accept()
else:
log.debug('Ignoring closeEvent')
event.ignore()
def publish_services(self, included_configs, included_services, included_envs, included_instances, create_backups):
runner = Runner(config_dir=self.config_dir, log_dir=self.log_dir)
worker = SubprocessWorker(
target=runner.run_batch_publishing_job,
kwargs={
'included_configs': included_configs,
'included_services': included_services,
'included_envs': included_envs,
'included_instances': included_instances,
'create_backups': create_backups
}
)
worker.messageEmitted.connect(self.handle_worker_message)
worker.resultEmitted.connect(self.handle_worker_result)
self.worker_pool.add_worker(worker)
self.worker_pool.start_worker(worker.id)
def mxd_data_sources_report(self, included_configs, included_services, included_envs, output_filename):
runner = Runner(config_dir=self.config_dir, log_dir=self.log_dir, report_dir=self.report_dir)
worker = SubprocessWorker(
target=runner.run_mxd_data_sources_report,
kwargs={
'included_configs': included_configs,
'included_services': included_services,
'included_envs': included_envs,
'output_filename': output_filename,
'warn_on_validation_errors': True
}
)
worker.messageEmitted.connect(self.handle_worker_message)
worker.resultEmitted.connect(self.handle_worker_result)
self.worker_pool.add_worker(worker)
self.worker_pool.start_worker(worker.id)
def dataset_usages_report(self, included_datasets, included_envs, included_instances, output_filename):
runner = Runner(config_dir=self.config_dir, log_dir=self.log_dir, report_dir=self.report_dir)
worker = SubprocessWorker(
target=runner.run_dataset_usages_report,
kwargs={
'included_datasets': included_datasets,
'included_envs': included_envs,
'included_instances': included_instances,
'output_filename': output_filename
}
)
worker.messageEmitted.connect(self.handle_worker_message)
worker.resultEmitted.connect(self.handle_worker_result)
self.worker_pool.add_worker(worker)
self.worker_pool.start_worker(worker.id)
def data_stores_report(self, included_envs, included_instances, output_filename):
runner = Runner(config_dir=self.config_dir, log_dir=self.log_dir, report_dir=self.report_dir)
worker = SubprocessWorker(
target=runner.run_data_stores_report,
kwargs={
'included_envs': included_envs,
'included_instances': included_instances,
'output_filename': output_filename
}
)
worker.messageEmitted.connect(self.handle_worker_message)
worker.resultEmitted.connect(self.handle_worker_result)
self.worker_pool.add_worker(worker)
self.worker_pool.start_worker(worker.id)
def get_install_info(self):
result_dialog = ResultDialog(self)
try:
result_dialog.setWindowTitle('ArcGIS Install Info - AGS Service Publisher')
result_dialog.setIcon(QtGui.QMessageBox.Information)
result_dialog.setText(str(get_install_info()))
except StandardError as e:
result_dialog.setWindowTitle('Error - AGS Service Publisher')
result_dialog.setIcon(QtGui.QMessageBox.Critical)
result_dialog.setText(str(e))
finally:
result_dialog.exec_()
def get_executable_path(self):
result_dialog = ResultDialog(self)
try:
result_dialog.setWindowTitle('Executable Path - AGS Service Publisher')
result_dialog.setIcon(QtGui.QMessageBox.Information)
result_dialog.setText(get_app_path())
except StandardError as e:
result_dialog.setWindowTitle('Error - AGS Service Publisher')
result_dialog.setIcon(QtGui.QMessageBox.Critical)
result_dialog.setText(str(e))
finally:
result_dialog.exec_()
def about(self):
about_dialog = AboutDialog(self)
about_dialog.exec_()
def show_publish_dialog(self):
try:
publish_dialog = PublishDialog(self)
publish_dialog.publishSelected.connect(self.publish_services)
publish_dialog.exec_()
except Exception:
log.exception('An error occurred while showing the Publish dialog')
raise
def show_mxd_report_dialog(self):
try:
mxd_report_dialog = MXDReportDialog(self)
mxd_report_dialog.runReport.connect(self.mxd_data_sources_report)
mxd_report_dialog.exec_()
except Exception:
log.exception('An error occurred while showing the MXD Report dialog')
raise
def show_dataset_usages_report_dialog(self):
try:
dataset_usages_report_dialog = DatasetUsagesReportDialog(self)
dataset_usages_report_dialog.runReport.connect(self.dataset_usages_report)
dataset_usages_report_dialog.exec_()
except Exception:
log.exception('An error occurred while showing the Dataset Usages Report dialog')
raise
def show_data_stores_report_dialog(self):
try:
data_stores_report_dialog = DataStoresReportDialog(self)
data_stores_report_dialog.runReport.connect(self.data_stores_report)
data_stores_report_dialog.exec_()
except Exception:
log.exception('An error occurred while showing the Data Stores Report dialog')
def test_log_window(self):
self.log_info_message('info')
self.log_debug_message('debug')
self.log_success_message('success')
self.log_warning_message('warning')
self.log_error_message('error')
def handle_worker_message(self, worker_id, level, message):
message = 'Worker {}: {}'.format(worker_id, message)
self.log_message(level, message)
def handle_worker_result(self, worker_id, exitcode, result):
log.debug('Worker {} resulted in exitcode {} with result value: {}'.format(worker_id, exitcode, result))
if exitcode == 0:
self.log_success_message(result)
else:
self.log_error_message(result)
def log_message(self, level, message):
if level == 'INFO':
self.log_info_message(message)
elif level == 'DEBUG':
self.log_debug_message(message)
elif level == 'WARNING':
self.log_warning_message(message)
elif level == 'ERROR':
self.log_error_message(message)
else:
raise RuntimeError('Unknown message level: {}'.format(level))
def log_info_message(self, message):
self.logWindow.appendHtml('<font color="black">{}</font>'.format(escape_html(message)))
def log_debug_message(self, message):
self.logWindow.appendHtml('<font color="gray">{}</font>'.format(escape_html(message)))
def log_warning_message(self, message):
self.logWindow.appendHtml('<font color="blue">{}</font>'.format(escape_html(message)))
def log_success_message(self, message):
self.logWindow.appendHtml('<font color="green">{}</font>'.format(escape_html(message)))
def log_error_message(self, message):
self.logWindow.appendHtml('<font color="red">{}</font>'.format(escape_html(message)))
|
<gh_stars>0
import hashlib
import multiprocessing
import os
import platform
import psutil
import socket
import subprocess
import sys
def collect_ci_info():
d = dict()
# Test for jenkins
if "BUILD_NUMBER" in os.environ:
if "BRANCH_NAME" in os.environ or "JOB_NAME" in os.environ:
br = os.environ["BRANCH_NAME"] if "BRANCH_NAME" in os.environ else os.environ["JOB_NAME"]
d = dict(pipeline_branch=br, pipeline_build_no=os.environ["BUILD_NUMBER"], __ci__='jenkinsci')
# Test for CircleCI
if "CIRCLE_JOB" in os.environ and "CIRCLE_BUILD_NUM" in os.environ:
d = dict(pipeline_branch=os.environ["CIRCLE_JOB"], pipeline_build_no=os.environ["CIRCLE_BUILD_NUM"],
__ci__='circleci')
# Test for TravisCI
if "TRAVIS_BUILD_NUMBER" in os.environ and "TRAVIS_BUILD_ID" in os.environ:
d = dict(pipeline_branch=os.environ["TRAVIS_BUILD_ID"], pipeline_build_no=os.environ["TRAVIS_BUILD_NUMBER"],
__ci__='travisci')
# Test for DroneCI
if "DRONE_REPO_BRANCH" in os.environ and "DRONE_BUILD_NUMBER" in os.environ:
d = dict(pipeline_branch=os.environ["DRONE_REPO_BRANCH"], pipeline_build_no=os.environ["DRONE_BUILD_NUMBER"],
__ci__='droneci')
# Test for Gitlab CI
if "CI_JOB_NAME" in os.environ and "CI_PIPELINE_ID" in os.environ:
d = dict(pipeline_branch=os.environ["CI_JOB_NAME"], pipeline_build_no=os.environ["CI_PIPELINE_ID"],
__ci__='gitlabci')
return d
def determine_scm_revision():
for cmd in [r'git rev-parse HEAD', r'p4 changes -m1 \#have']:
p = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
p_out, _ = p.communicate()
if p.returncode == 0:
return p_out.decode().split('\n')[0]
return ''
def _get_cpu_string():
if platform.system().lower() == "darwin":
old_path = os.environ['PATH']
os.environ['PATH'] = old_path + ':' + '/usr/sbin'
ret = subprocess.check_output('sysctl -n machdep.cpu.brand_string', shell=True).decode().strip()
os.environ['PATH'] = old_path
return ret
elif platform.system().lower() == 'linux':
with open('/proc/cpuinfo', 'r', encoding='utf-8') as f:
lines = [i for i in f if i.startswith('model name')]
if lines:
return lines[0].split(':')[1].strip()
return platform.processor()
class ExecutionContext:
def __init__(self):
self.__cpu_count = multiprocessing.cpu_count()
self.__cpu_vendor = _get_cpu_string()
self.__cpu_freq_base = psutil.cpu_freq().current
self.__proc_typ = platform.processor()
self.__tot_mem = int(psutil.virtual_memory().total / 1024**2)
self.__fqdn = socket.getfqdn()
self.__machine = platform.machine()
self.__arch = platform.architecture()[0]
self.__system = f'{platform.system()} - {platform.release()}'
self.__py_ver = sys.version
def to_dict(self):
return dict(cpu_count=self.cpu_count,
cpu_frequency=self.cpu_frequency,
cpu_type=self.cpu_type,
cpu_vendor=self.cpu_vendor,
ram_total=self.ram_total,
machine_node=self.fqdn,
machine_type=self.machine,
machine_arch=self.architecture,
system_info=self.system_info,
python_info=self.python_info,
h=self.hash())
@property
def cpu_count(self):
return self.__cpu_count
@property
def cpu_frequency(self):
return self.__cpu_freq_base
@property
def cpu_type(self):
return self.__proc_typ
@property
def cpu_vendor(self):
return self.__cpu_vendor
@property
def ram_total(self):
return self.__tot_mem
@property
def fqdn(self):
return self.__fqdn
@property
def machine(self):
return self.__machine
@property
def architecture(self):
return self.__arch
@property
def system_info(self):
return self.__system
@property
def python_info(self):
return self.__py_ver
def hash(self):
hr = hashlib.md5()
hr.update(str(self.__cpu_count).encode())
hr.update(str(self.__cpu_freq_base).encode())
hr.update(str(self.__proc_typ).encode())
hr.update(str(self.__tot_mem).encode())
hr.update(str(self.__fqdn).encode())
hr.update(str(self.__machine).encode())
hr.update(str(self.__arch).encode())
hr.update(str(self.__system).encode())
hr.update(str(self.__py_ver).encode())
return hr.hexdigest()
|
import logging
import datetime
import json
logger = logging.getLogger(__name__)
from stix2patterns_translator.pattern_objects import ObservationExpression, ComparisonExpression, \
ComparisonExpressionOperators, ComparisonComparators, Pattern, \
CombinedComparisonExpression, CombinedObservationExpression, ObservationOperators
from stix2patterns_translator.errors import SearchFeatureNotSupportedError
from stix_shifter.src.transformers import TimestampToEpoch, ValueTransformer
def _fetch_network_protocol_mapping():
try:
map_file = open(
'stix_shifter/src/modules/qradar/json/network_protocol_map.json').read()
map_data = json.loads(map_file)
return map_data
except Exception as ex:
print('exception in reading mapping file:', ex)
return {}
class AqlQueryStringPatternTranslator:
comparator_lookup = {
ComparisonExpressionOperators.And: "AND",
ComparisonExpressionOperators.Or: "OR",
ComparisonComparators.GreaterThan: ">",
ComparisonComparators.GreaterThanOrEqual: ">=",
ComparisonComparators.LessThan: "<",
ComparisonComparators.LessThanOrEqual: "<=",
ComparisonComparators.Equal: "=",
ComparisonComparators.NotEqual: "!=",
ComparisonComparators.Like: "LIKE",
ComparisonComparators.In: "IN",
ComparisonComparators.Matches: 'MATCHES',
ObservationOperators.Or: 'OR',
# Treat AND's as OR's -- Unsure how two ObsExps wouldn't cancel each other out.
ObservationOperators.And: 'OR'
}
def __init__(self, pattern: Pattern, data_model_mapper):
self.dmm = data_model_mapper
self.pattern = pattern
self.select_prefix = 'SELECT * FROM events WHERE'
self.translated = self.parse_expression(pattern)
@staticmethod
def _format_set(values) -> str:
gen = values.element_iterator()
return "({})".format(' OR '.join([AqlQueryStringPatternTranslator._escape_value(value) for value in gen]))
@staticmethod
def _format_match(value) -> str:
raw = AqlQueryStringPatternTranslator._escape_value(value)
if raw[0] == "^":
raw = raw[1:]
else:
raw = ".*" + raw
if raw[-1] == "$":
raw = raw[0:-1]
else:
raw = raw + ".*"
return "\'{}\'".format(raw)
@staticmethod
def _format_equality(value) -> str:
return '\'{}\''.format(value)
@staticmethod
def _format_like(value) -> str:
value = value.replace('%', '*')
value = value.replace('_', '?')
return AqlQueryStringPatternTranslator._escape_value(value)
@staticmethod
def _escape_value(value, comparator=None) -> str:
if isinstance(value, str):
return '{}'.format(value.replace('\\', '\\\\').replace('\"', '\\"').replace('(', '\\(').replace(')', '\\)'))
else:
return value
@staticmethod
def _negate_comparison(comparison_string):
return "NOT({})".format(comparison_string)
def _parse_expression(self, expression) -> str:
if isinstance(expression, ComparisonExpression): # Base Case
# Resolve STIX Object Path to a field in the target Data Model
stix_object, stix_field = expression.object_path.split(':')
# Multiple QRadar fields may map to the same STIX Object
mapped_fields_array = self.dmm.map_field(stix_object, stix_field)
# Resolve the comparison symbol to use in the query string (usually just ':')
comparator = self.comparator_lookup[expression.comparator]
if stix_field == 'protocols[*]':
map_data = _fetch_network_protocol_mapping()
try:
expression.value = map_data[expression.value.lower()]
except Exception as protocol_key:
raise KeyError(
"Network protocol {} is not supported.".format(protocol_key))
elif stix_field == 'start' or stix_field == 'end':
transformer = TimestampToEpoch()
expression.value = transformer.transform(expression.value)
# Some values are formatted differently based on how they're being compared
if expression.comparator == ComparisonComparators.Matches: # needs forward slashes
value = self._format_match(expression.value)
# should be (x, y, z, ...)
elif expression.comparator == ComparisonComparators.In:
value = self._format_set(expression.value)
elif expression.comparator == ComparisonComparators.Equal or expression.comparator == ComparisonComparators.NotEqual:
# Should be in single-quotes
value = self._format_equality(expression.value)
# '%' -> '*' wildcard, '_' -> '?' single wildcard
elif expression.comparator == ComparisonComparators.Like:
value = self._format_like(expression.value)
else:
value = self._escape_value(expression.value)
comparison_string = ""
mapped_fields_count = len(mapped_fields_array)
for mapped_field in mapped_fields_array:
comparison_string += "{mapped_field} {comparator} {value}".format(
mapped_field=mapped_field, comparator=comparator, value=value)
if (mapped_fields_count > 1):
comparison_string += " OR "
mapped_fields_count -= 1
if(len(mapped_fields_array) > 1):
# More than one AQL field maps to the STIX attribute so group the ORs.
grouped_comparison_string = "(" + comparison_string + ")"
comparison_string = grouped_comparison_string
if expression.comparator == ComparisonComparators.NotEqual:
comparison_string = self._negate_comparison(comparison_string)
if expression.negated:
comparison_string = self._negate_comparison(comparison_string)
return "{comparison}".format(comparison=comparison_string)
elif isinstance(expression, CombinedComparisonExpression):
query_string = "{} {} {}".format(self._parse_expression(expression.expr1),
self.comparator_lookup[expression.operator],
self._parse_expression(expression.expr2))
return query_string
elif isinstance(expression, ObservationExpression):
return self._parse_expression(expression.comparison_expression)
elif isinstance(expression, CombinedObservationExpression):
operator = self.comparator_lookup[expression.operator]
return "{expr1} {operator} {expr2}".format(expr1=self._parse_expression(expression.expr1),
operator=operator,
expr2=self._parse_expression(expression.expr2))
elif isinstance(expression, Pattern):
return "{opening_select} {expr}".format(opening_select=self.select_prefix, expr=self._parse_expression(expression.expression))
else:
raise RuntimeError("Unknown Recursion Case for expression={}, type(expression)={}".format(
expression, type(expression)))
def parse_expression(self, pattern: Pattern):
return self._parse_expression(pattern)
def translate_pattern(pattern: Pattern, data_model_mapping):
x = AqlQueryStringPatternTranslator(pattern, data_model_mapping)
select_statement = x.dmm.map_selections()
return x.translated.replace('*', select_statement, 1)
|
from . import abbr_patterns as ap
area_ptrn = r"\b(ha|(f(er)?|rúm)[pnµmcsdk]?m\b\.?)|[pnµmcsdk]?m[²2³3]"
def make_area_dict():
area_dict = {"((\W|^)(" + ap.accdatgen_words_comb + ") ((\d{1,2}\.)?(\d{3}\.?)*(\d*1|\d,\d*1))) ha\.?(\W|$)": "\g<1> hektara\g<14>",
"((\W|^)(" + ap.accgen_words + ") ((\d{1,2}\.)?(\d{3}\.?)*(\d*[02-9]|\d,\d*[02-9]))) ha\.?(\W|$)": "\g<1> hektara\g<12>",
"((\W|^)(" + ap.accgen_words + ") (((\d{1,2}\.)?(\d{3}\.?)*|\d+)(,\d+)?)?) " + ap.amounts + " ha\.?(\W|$)": "\g<1> \g<13> hektara\g<16>",
"((\W|^)(" + ap.dat_words + ") ((\d{1,2}\.)?(\d{3}\.?)*(\d*[02-9]|\d,\d*[02-9]))) ha\.?(\W|$)": "\g<1> hekturum\g<10>",
"((\W|^)(" + ap.dat_words + ") (((\d{1,2}\.)?(\d{3}\.?)*|\d+)(,\d+)?)?) " + ap.amounts + " ha\.?(\W|$)": "\g<1> \g<11> hekturum\g<14>",
"(1) ha\.?(\W|$)": "\g<1> hektari\g<2>",
"([02-9]|" + ap.amounts + ") ha\.?(\W|$)": "\g<1> hektarar \g<3>"}
dimension_after = [("²", "fer"),
("2", "fer"),
("³", "rúm"),
("3", "rúm")]
dimension_before = [("f", "fer"),
("fer", "fer"),
("rúm", "rúm")]
prefix_meter_dimension = [("", ""),
("p", "píkó"),
("n", "nanó"),
("µ", "míkró"),
("m", "milli"),
("[cs]", "senti"),
("d", "desi"),
("k", "kíló")]
for letter, prefix in prefix_meter_dimension:
for superscript, dimension in dimension_after:
area_dict.update({"((\W|^)(" + ap.accdatgen_words_comb + ") ((\d{1,2}\.)?(\d{3}\.?)*(\d*1|\d,\d*1))) " + letter + "m" + superscript + "(\W|$)": "\g<1> " + dimension + prefix + "metra\g<14>"})
area_dict.update({"((\W|^)(" + ap.accgen_words + ") ((\d{1,2}\.)?(\d{3}\.?)*(\d*[02-9]|\d,\d*[02-9]))) " + letter + "m" + superscript + "(\W|$)": "\g<1> " + dimension + prefix + "metra\g<12>"})
area_dict.update({"((\W|^)(" + ap.accgen_words + ") (((\d{1,2}\.)?(\d{3}\.?)*|\d+)(,\d+)?)?) " + ap.amounts + " " + letter + "m" + superscript + "(\W|$)": "\g<1> \g<13> " + dimension + prefix + "metra\g<16>"})
area_dict.update({"((\W|^)(" + ap.dat_words + ") ((\d{1,2}\.)?(\d{3}\.?)*(\d*[02-9]|\d,\d*[02-9]))) " + letter + "m" + superscript + "(\W|$)": "\g<1> " + dimension + prefix + "metrum\g<10>"})
area_dict.update({"((\W|^)(" + ap.dat_words + ") (((\d{1,2}\.)?(\d{3}\.?)*|\d+)(,\d+)?)?) " + ap.amounts + " " + letter + "m" + superscript + "(\W|$)": "\g<1> \g<11> " + dimension + prefix + "metrum\g<14>"})
area_dict.update({"(1 )" + letter + "m" + superscript + "(\W|$)": "\g<1>" + dimension + prefix + "metri\g<2>"})
area_dict.update({"([02-9]|" + ap.amounts + ") " + letter + "m" + superscript + "(\W|$)": "\g<1> " + dimension + prefix + "metrar \g<3>"})
for letter, prefix in prefix_meter_dimension:
for preprefix, dimension in dimension_before:
area_dict.update({"((\W|^)(" + ap.accdatgen_words_comb + ") ((\d{1,2}\.)?(\d{3}\.?)*(\d*1|\d,\d*1))) " + preprefix + letter + "m\.?(\W|$)": "\g<1> " + dimension + prefix + "metra\g<14>"})
area_dict.update({"((\W|^)(" + ap.accgen_words + ") ((\d{1,2}\.)?(\d{3}\.?)*(\d*[02-9]|\d,\d*[02-9]))) " + preprefix + letter + "m\.?(\W|$)": "\g<1> " + dimension + prefix + "metra\g<12>"})
area_dict.update({"((\W|^)(" + ap.accgen_words + ") (((\d{1,2}\.)?(\d{3}\.?)*|\d+)(,\d+)?)?) " + ap.amounts + " " + preprefix + letter + "m\.?(\W|$)": "\g<1> \g<13> " + dimension + prefix + "metra\g<16>"})
area_dict.update({"((\W|^)(" + ap.dat_words + ") ((\d{1,2}\.)?(\d{3}\.?)*(\d*[02-9]|\d,\d*[02-9]))) " + preprefix + letter + "m\.?(\W|$)": "\g<1> " + dimension + prefix + "metrum\g<10>"})
area_dict.update({"((\W|^)(" + ap.dat_words + ") (((\d{1,2}\.)?(\d{3}\.?)*|\d+)(,\d+)?)?) " + ap.amounts + " " + preprefix + letter + "m\.?(\W|$)": "\g<1> \g<11> " + dimension + prefix + "metrum\g<14>"})
area_dict.update({"(1 )" + preprefix + letter + "m\.?(\W|$)": "\g<1>" + dimension + prefix + "metri\g<2>"})
area_dict.update({"([02-9]|" + ap.amounts + ") " + preprefix + letter + "m\.?(\W|$)": "\g<1> " + dimension + prefix + "metrar \g<3>"})
return area_dict |
import os
import lib.warning as warning
from amino.socket import Callbacks
from lib.logger import log
from lib.obscene import Obscene
import time
import datetime
import random
class MessageHandler(Callbacks):
def __init__(self, client, selected_chats):
"""
Build the callback handler.
This is meant to be subclassed, where desided methods would be redefined.
client: Client to be used
"""
self.client = client
# self.auto_sms()
self.private_chat_for_spam = False
self.methods = {
1000: self._resolve_chat_message
}
self.chat_methods = {
"0:0": self.on_text_message,
"0:100": self.on_image_message,
"0:103": self.on_youtube_message,
"2:110": self.on_voice_message,
"3:113": self.on_sticker_message,
"101:0": self.on_group_member_join,
"102:0": self.on_group_member_leave,
"103:0": self.on_chat_invite
}
self.selected_chats = selected_chats
with open(os.getcwd() + '/warning.txt', 'r', encoding='UTF-8') as warning_file:
self.warning_text = warning_file.read()
warning_file.close()
with open(os.getcwd() + '/data/txt/hi18chat.txt', 'r', encoding='UTF-8') as hi18chat_file:
self.hi18chat_text = hi18chat_file.read()
hi18chat_file.close()
with open(os.getcwd() + '/data/txt/hi18chat2.txt', 'r', encoding='UTF-8') as hi18chat2_file:
self.hi18chat2_text = hi18chat2_file.read()
hi18chat2_file.close()
with open(os.getcwd() + '/data/txt/hi18chat3.txt', 'r', encoding='UTF-8') as hi18chat3_file:
self.hi18chat3_text = hi18chat3_file.read()
hi18chat3_file.close()
def on_text_message(self, data):
data = data['o']['chatMessage']
user_id = data['author']['uid']
user_reputation = data['author']['reputation']
user_nickname = data['author']['nickname']
user_level = data['author']['level']
thread_id = data['threadId']
message_text = data['content']
#Если смс от бота, он не реагирует.
if user_id == self.client.uid:
return False
if user_nickname == "Линчеватель":
user_nickname = "Лянча"
if user_nickname == "denvin":
user_nickname = "Скунс"
ob = Obscene()
for i in self.selected_chats:
#log(message_text)
#атюки
#if self.private_chat_for_spam == False and i.uid == thread_id:
#self.private_chat_for_spam == True
#self.auto_sms(i)
if i.uid == thread_id and (message_text.lower()).find('цаца шлюха') != -1:
log(str(datetime.datetime.now())+" Функция - цаца шлюха")
i.send_text_message(user_nickname+", мамку твою раком трахала)")
return True
if i.uid == thread_id and (message_text.lower()).find('цаца стерва') != -1:
log(str(datetime.datetime.now())+" Функция - цаца стерва")
i.send_text_message(user_nickname+", в сраку твого батю перла,)")
return True
if i.uid == thread_id and (message_text.lower()).find('цаца салам') != -1:
log(str(datetime.datetime.now())+" Функция - цаца салам")
i.send_text_message(user_nickname+", аллейкум салам")
return True
if i.uid == thread_id and (message_text.lower()).find('цаца привет') != -1 or (message_text.lower()).find('цаца ку') != -1 or (message_text.lower()).find('цаца здравствуй') != -1:
log(str(datetime.datetime.now())+" Функция - цаца привет")
i.send_text_message(user_nickname+", салам")
return True
if i.uid == thread_id and not ob.is_clear(message_text):
user_warnings = warning.warning(user_id)
log(f"Пользователь {user_nickname} ({user_warnings}) нарушает правила сообщества в чате {i.title}.")
i.send_text_message(self.warning_text.replace('{name}', user_nickname).replace('{warnings}', str(user_warnings)))
return True
if i.uid == thread_id and ((message_text.lower()).find('линч это копенгаген') != -1 or (message_text.lower()).find('линч це копенгаген') != -1 or (message_text.lower()).find('линчеватель це копенгаген') != -1 or (message_text.lower()).find('линчеватель это копенгаген') != -1):
log(str(datetime.datetime.now())+" Функция - линч это")
i.send_text_message(user_nickname+", <NAME>")
return True
if i.uid == thread_id and ((message_text.lower()).find('линч это дублин') != -1 or (message_text.lower()).find('линч це дублин') != -1 or (message_text.lower()).find('линчеватель це дублин') != -1 or (message_text.lower()).find('линчеватель это дублин') != -1):
log(str(datetime.datetime.now())+" Функция - линч это")
i.send_text_message(user_nickname+", <NAME>")
return True
if i.uid == thread_id and ((message_text.lower()).find('линч это харьков') != -1 or (message_text.lower()).find('линч це харьков') != -1 or (message_text.lower()).find('линчеватель це харьков') != -1 or (message_text.lower()).find('линчеватель это харьков') != -1):
log(str(datetime.datetime.now())+" Функция - линч это")
i.send_text_message(user_nickname+", да, одноветочный.")
return True
if i.uid == thread_id and (message_text.lower()).find('люда з села') != -1 or (message_text.lower()).find('люда з сила') != -1:
log(str(datetime.datetime.now())+" Функция - люда з села")
i.send_text_message(user_nickname+", ага, із забитого нахуй.")
return True
if i.uid == thread_id and (message_text.lower()).find('стелла собака') != -1:
log(str(datetime.datetime.now())+" Функция - стелла собака")
i.send_text_message(user_nickname+", вроді гавкає.")
return True
if i.uid == thread_id and (message_text.lower()).find('кусь токсик') != -1:
log(str(datetime.datetime.now())+" Функция - кусь токсик")
i.send_text_message(user_nickname+", кто такой єтот ваш кусь?")
return True
if i.uid == thread_id and (message_text.lower()).find('го гс') != -1:
log(str(datetime.datetime.now())+" Функция - го гс")
i.send_text_message(user_nickname+", опять по бл**ям? Ви ж старі як дінозаври..")
return True
if i.uid == thread_id and (message_text.lower()).find('кпоп норм') != -1:
log(str(datetime.datetime.now())+" Функция - кпоп норм")
i.send_text_message(user_nickname+", а твоє єбало нє норм")
return True
if i.uid == thread_id and (message_text.lower()).find('цаца крутая') != -1 or (message_text.lower()).find('цаца классная') != -1 or (message_text.lower()).find('цаца хорошая') != -1:
log(str(datetime.datetime.now())+" Функция - цаца крутая")
i.send_text_message(user_nickname+", спасибо, а ты секс :3")
return True
if i.uid == thread_id and (message_text.lower()).find('цаца бот') != -1 or (message_text.lower()).find('цаца робот') != -1 or (message_text.lower()).find('цаца компютер') != -1:
log(str(datetime.datetime.now())+" Функция - цаца бот")
i.send_text_message(user_nickname+", я думала ти мені друг((")
return True
if i.uid == thread_id and ((message_text.lower()).find('рома бандера') != -1 or (message_text.lower()).find('рома укроп') != -1 or (message_text.lower()).find('рома хохол') != -1):
log(str(datetime.datetime.now())+" Функция - рома бандера")
i.send_text_message(user_nickname+", <NAME>, а ти сосеш.")
return True
if i.uid == thread_id and (message_text.lower()).find('давно тебя не было в уличных гонках') != -1:
log(str(datetime.datetime.now())+" Функция - уличные гонки")
i.send_text_message(user_nickname+", так пизданула наче <NAME>.")
return True
if i.uid == thread_id and (message_text.lower()).find('хохлушка') != -1:
log(str(datetime.datetime.now())+" Функция - хохлушка")
i.send_text_message(user_nickname+", не хохлушка а багіня.")
return True
if i.uid == thread_id and (message_text.lower()).find('вы все зёпы') != -1:
log(str(datetime.datetime.now())+" Функция - Вы все зёпы")
i.send_text_message(user_nickname+", посмокчи мою чорну єлду.")
return True
if i.uid == thread_id and (message_text.lower()).find('рома алкаш') != -1:
log(str(datetime.datetime.now())+" Функция - рома алкаш")
i.send_text_message(user_nickname+", Рома професійний соміль'є.")
return True
if i.uid == thread_id and (message_text.lower()).find('пососеш') != -1:
log(str(datetime.datetime.now())+" Функция - пососеш")
i.send_text_message(user_nickname+", хто я?)))0)0")
return True
if i.uid == thread_id and (message_text.lower()).find('цаца ролл') == 0:
random.seed()
rand = random.randint(0,100)
log(str(datetime.datetime.now())+" Функция - цаца ролл: "+str(rand))
i.send_text_message(user_nickname+", твоё случайное число: "+str(rand))
return True
if i.uid == thread_id and (message_text.lower()).find('цаца ду ') == 0:
#rand = random.randint(0,100)
text_plus = message_text[8:len(message_text)]
if random.randint(0,100) > 49:
log(str(datetime.datetime.now())+" Функция - цаца ду: успешно")
i.send_text_message("[I]"+text_plus+"[Успешно]")
else:
log(str(datetime.datetime.now())+" Функция - цаца ду: неуспешно")
i.send_text_message("[I]"+text_plus+"[Неуспешно]")
return True
if i.uid == thread_id and (message_text.lower()).find('цаца дия ') == 0:
#rand = random.randint(0,100)
text_plus = message_text[9:len(message_text)]
if random.randint(0,100) > 49:
log(str(datetime.datetime.now())+" Функция - цаца дия: успешно")
i.send_text_message("[I]"+user_nickname+" "+text_plus+"[Успешно]")
else:
log(str(datetime.datetime.now())+" Функция - цаца дия: неуспешно")
i.send_text_message("[I]"+user_nickname+" "+text_plus+"[Неуспешно]")
return True
if i.uid == thread_id and (message_text.lower()).find('цаца ты кто') != -1:
log(str(datetime.datetime.now())+" Функция - цаца ты кто")
i.send_text_message(user_nickname+", я тебе ничего не расскажу, а вдруг ты нацик?")
return True
if i.uid == thread_id and (message_text.lower()).find('цаца') == 0:
log(str(datetime.datetime.now())+" Функция - цаца")
i.send_text_message(user_nickname+", не трогай мене, животне вонюче. Я вам не нанімалась -_-")
return True
if i.uid == thread_id and (message_text.lower()).find('стелла це машка') != -1:
log(str(datetime.datetime.now())+" Функция - стелла це машка")
i.send_text_message(user_nickname+", Абсолютно с тобой сагласна, гдє подпісивать?")
return True
def on_group_member_join(self, data):
data = data['o']['chatMessage']
user_id = data['author']['uid']
user_nickname = data['author']['nickname']
thread_id = data['threadId']
#Если смс от бота, он не реагирует.
if user_id == self.client.uid:
return False
for i in self.selected_chats:
if i.uid == thread_id:
i.send_text_message(self.hi18chat_text.replace('{name}', user_nickname))
i.send_text_message(self.hi18chat2_text)
i.send_text_message(self.hi18chat3_text)
# def auto_sms(self, chat):
# while True:
# chat.send_text_message("huy sosi")
# time.sleep(180)
|
<filename>EU_Open_Spending_Visualization/data/datacleaning.py
import csv
import json
data = []
delimiter = ','
# for each year the same procedure applies:
# remove rows with data that we will not use (for now)
# check each cell if it is empty or only a space > return "missing"
# check each cell if it is multiline, merge this into one line (since we do not have a solution at this time)
# save cleaned file
#############################################################################
datafile = open("original/EC_2007_dirty.csv", "r")
csvdata = csv.reader(datafile, delimiter=';')
for row in csvdata:
data.append(row)
# remove redundant data
for row in data:
del row[12]
del row[10]
del row[9]
del row[8]
del row[7]
del row[5]
del row[3]
del row[2]
# correct missing values
j = 0
for row in data:
i = 0
for column in row:
if '\n' in column:
data[j][i] = ' / '.join(column.split('\n'))
if column == '' or column == ' ':
data[j][i] = 'missing'
i += 1
j += 1
with open('cleaned/EC_2007.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=delimiter, quoting=csv.QUOTE_MINIMAL)
for row in data:
writer.writerow(row)
datafile.close()
csvfile.close()
data = []
#############################################################################
datafile = open("original/EC_2008_dirty.csv", 'U')
csvdata = csv.reader(datafile, delimiter=';', quoting=csv.QUOTE_MINIMAL)
for row in csvdata:
data.append(row)
# remove redundant data
for row in data:
del row[12]
del row[10]
del row[9]
del row[8]
del row[7]
del row[5]
del row[3]
del row[2]
# correct missing values
j = 0
for row in data:
i = 0
for column in row:
if '\n' in column:
data[j][i] = ' / '.join(column.split('\n'))
if column == '' or column == ' ':
data[j][i] = 'missing'
i += 1
j += 1
with open('cleaned/EC_2008.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=delimiter)
for row in data:
writer.writerow(row)
datafile.close()
csvfile.close()
data = []
#############################################################################
datafile = open("original/EC_2009_dirty.csv")
csvdata = csv.reader(datafile, delimiter=';', quoting=csv.QUOTE_MINIMAL)
for row in csvdata:
data.append(row)
# remove redundant data
for row in data:
del row[12]
del row[10]
del row[9]
del row[8]
del row[5]
del row[3]
del row[2]
# correct missing values
j = 0
for row in data:
i = 0
for column in row:
if '\n' in column:
data[j][i] = ' / '.join(column.split('\n'))
if column == '' or column == ' ':
data[j][i] = 'missing'
i += 1
j += 1
with open('cleaned/EC_2009.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=delimiter)
for row in data:
writer.writerow(row)
datafile.close()
csvfile.close()
data = []
#############################################################################
datafile = open("original/EC_2010_dirty.csv")
csvdata = csv.reader(datafile, delimiter=';', quoting=csv.QUOTE_MINIMAL)
for row in csvdata:
data.append(row)
# # remove redundant data
del row[12]
del row[11]
del row[10]
del row[7]
del row[5]
del row[4]
del row[3]
del row[0]
# correct missing values
j = 0
for row in data:
i = 0
if row[0] == '' or row[0] == ' ':
row[0] = data[j-1][0]
if row[6] == '' or row[6] == ' ':
row[5] = row[4]
for column in row:
if '\n' in column:
data[j][i] = ' / '.join(column.split('\n'))
if column == '' or column == ' ' or column == '-':
data[j][i] = 'missing'
i += 1
j += 1
for row in data:
del row[4]
del row[-1]
with open('cleaned/EC_2010.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=delimiter)
for row in data:
writer.writerow(row)
datafile.close()
csvfile.close()
data = []
#############################################################################
datafile = open("original/EC_2011_dirty.csv")
csvdata = csv.reader(datafile, delimiter=';', quoting=csv.QUOTE_MINIMAL)
for row in csvdata:
data.append(row)
# # remove redundant data
for row in data:
del row[12]
del row[11]
del row[10]
del row[7]
del row[5]
del row[4]
del row[3]
del row[0]
# correct missing values
j = 0
for row in data:
i = 0
if row[0] == '' or row[0] == ' ':
row[0] = data[j-1][0]
if row[6] == '' or row[6] == ' ':
row[5] = row[4]
for column in row:
if '\n' in column:
data[j][i] = ' / '.join(column.split('\n'))
if column == '' or column == ' ' or column == '-':
data[j][i] = 'missing'
i += 1
j += 1
for row in data:
del row[4]
del row[-1]
with open('cleaned/EC_2011.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=delimiter)
for row in data:
writer.writerow(row)
datafile.close()
csvfile.close()
data = []
#############################################################################
datafile = open("original/EC_2012_dirty.csv")
csvdata = csv.reader(datafile, delimiter=';', quoting=csv.QUOTE_MINIMAL)
for row in csvdata:
data.append(row)
# remove redundant data
for row in data:
del row[13]
del row[12]
del row[11]
del row[10]
del row[7]
del row[5]
del row[4]
del row[3]
del row[0]
# correct missing values
j = 0
for row in data:
i = 0
if row[0] == '' or row[0] == ' ':
row[0] = data[j-1][0]
if row[7] == "Office for Infrastructure and Logistics in Brussels" or row[5] == '' or row[5] == ' ':
row[5] = row[4]
for column in row:
if '\n' in column:
data[j][i] = ' / '.join(column.split('\n'))
if column == '' or column == ' ' or column == '-':
data[j][i] = 'missing'
i += 1
j += 1
for row in data:
del row[4]
del row[-1]
with open('cleaned/EC_2012.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=delimiter)
for row in data:
writer.writerow(row)
datafile.close()
csvfile.close()
data = []
############################################################################
datafile = open("original/EC_2013_dirty.csv")
csvdata = csv.reader(datafile, delimiter=';', quoting=csv.QUOTE_MINIMAL)
for row in csvdata:
data.append(row)
# remove redundant data
for row in data:
del row[12]
del row[11]
del row[10]
del row[7]
del row[5]
del row[4]
del row[3]
del row[0]
# correct missing values
j = 0
for row in data:
i = 0
if row[0] == '' or row[0] == ' ':
row[0] = data[j-1][0]
if row[7] == "Office for Infrastructure and Logistics in Brussels" or ((row[7] == '' or row[7] == ' ') and data[j-1][7] == "Office for Infrastructure and Logistics in Brussels"):
row[5] = row[4]
row[7] = "Office for Infrastructure and Logistics in Brussels"
for column in row:
if '\n' in column:
data[j][i] = ' / '.join(column.split('\n'))
if column == '' or column == ' ' or column == '-':
data[j][i] = 'missing'
i += 1
j += 1
for row in data:
del row[4]
del row[-1]
with open('cleaned/EC_2013.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=delimiter)
for row in data:
writer.writerow(row)
datafile.close()
csvfile.close()
data = [] |
"""
Tools for running CMake in setup phase
"""
__all__ = [
"CMakeExtension",
"CMakeBuild",
"find_package",
"WITH_CMAKE",
]
import argparse
import os
import subprocess
from tempfile import TemporaryDirectory
from setuptools import Extension
from setuptools.command.build_ext import build_ext
from .raiseif import raiseif
try:
import cmake
WITH_CMAKE = True
except ModuleNotFoundError:
WITH_CMAKE = False
mark = raiseif(not WITH_CMAKE, ImportError("Please install `lyncs_setuptools[pylint]`"))
class CMakeExtension(Extension):
"Setup extension for CMake"
def __init__(self, name, source_dir=".", cmake_args=None, post_build=None):
source_dir = source_dir or "."
sources = [source_dir + "/CMakeLists.txt"]
if os.path.exists(source_dir + "/patches"):
for filename in os.listdir(source_dir + "/patches"):
sources += [source_dir + "/patches/" + filename]
Extension.__init__(self, name, sources=sources)
self.source_dir = os.path.abspath(source_dir)
self.cmake_args = (
[cmake_args] if isinstance(cmake_args, str) else (cmake_args or [])
)
self.post_build = post_build
class CMakeBuild(build_ext):
"Build phase of the CMakeExtension"
def run(self):
"build_ext function that manages the installation"
for ext in self.extensions:
if isinstance(ext, CMakeExtension):
self.build_extension(ext)
self.extensions.remove(ext)
if self.extensions:
build_ext.run(self)
def get_install_dir(self, ext):
"Returns the installation directory (the module base directory)"
return os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
def build_extension(self, ext):
"Runs the CMake scripts in the build phase"
try:
out = subprocess.check_output(["cmake", "--version"])
except OSError as err:
raise OSError(
"CMake must be installed to build the following extensions: " + ext.name
) from err
cmake_args = ["-DEXTERNAL_INSTALL_LOCATION=" + self.get_install_dir(ext)]
cmake_args += ext.cmake_args
cfg = "Debug" if self.debug else "Release"
build_args = ["--config", cfg]
cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg]
build_args += ["--", "-j", str(abs(os.cpu_count() - 1) or 1)]
env = os.environ.copy()
env["CXXFLAGS"] = '{} -DVERSION_INFO=\\"{}\\"'.format(
env.get("CXXFLAGS", ""), self.distribution.get_version()
)
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
out = subprocess.check_output(
["cmake", ext.source_dir] + cmake_args, cwd=self.build_temp, env=env
)
out += subprocess.check_output(
["cmake", "--build", "."] + build_args, cwd=self.build_temp
)
print(out.decode())
if ext.post_build:
ext.post_build(self, ext)
@mark
def get_version(verbose=False):
"""Returns CMake version"""
if not verbose:
return ".".join(cmake.__version__.split(".")[:3])
out = subprocess.check_output(
["cmake", "--version"], stderr=subprocess.DEVNULL
).decode()
for part in out.split():
if part[0].isdigit():
return part
raise RuntimeError("Could not deduce version")
CMAKE_FIND = """
cmake_minimum_required(VERSION ${CMAKE_VERSION})
project(LYNCS)
get_cmake_property(_before VARIABLES)
find_package(%s)
get_cmake_property(_after VARIABLES)
foreach (_name ${_after})
if((NOT _name IN_LIST _before) AND (NOT _name STREQUAL "_before"))
message(STATUS "VAR ${_name} = ${${_name}}")
endif()
endforeach()
"""
@mark
def parse_value(val):
"Parse a CMake value"
if not isinstance(val, str):
return val
try:
return int(val)
except ValueError:
pass
if ";" in val:
return val.split(";")
if val.lower() == "true":
return True
if val.lower() == "false":
return False
return val
@mark
def find_package(name, clean=True):
"""
Returns the output of find_package by CMake.
If clean, returns post-processed values.
Otherwise all the variables and value from CMake.
"""
with TemporaryDirectory() as temp_dir:
with open(temp_dir + "/CMakeLists.txt", "w") as cmake_file:
cmake_file.write(CMAKE_FIND % name)
out = subprocess.check_output(
["cmake", "."], cwd=temp_dir, stderr=subprocess.DEVNULL
)
lines = tuple(
line.split()[2:]
for line in out.decode().split("\n")
if line.startswith("-- VAR")
)
assert all((len(line) >= 2 and "=" in line for line in lines))
values = {line[0]: " ".join(line[2:]) if len(line) >= 3 else None for line in lines}
if not clean:
return values
return {
key[len(name) + 1 :].lower(): parse_value(val)
for key, val in values.items()
if key.startswith(name + "_")
}
@mark
def print_find_package():
"Returns the values of find_package"
parser = argparse.ArgumentParser(
"Returns the variables defined by CMake find_package"
)
parser.add_argument("package", nargs=1, help="The package to find, e.g. MPI")
parser.add_argument(
"-v",
"--verbose",
action="count",
default=0,
help="prints all the values without cleaning them",
)
args = parser.parse_args()
out = find_package(args.package[0], clean=args.verbose == 0)
for key, val in out.items():
print(key + ":", val)
CMAKE_VARS = """
cmake_minimum_required(VERSION ${CMAKE_VERSION})
project(LYNCS)
get_cmake_property(_vars VARIABLES)
foreach (_name ${_vars})
if(NOT _name STREQUAL "_vars")
message(STATUS "VAR ${_name} = ${${_name}}")
endif()
endforeach()
"""
@mark
def get_variables():
"""
Returns the output of find_package by CMake.
If clean, returns post-processed values.
Otherwise all the variables and values from CMake.
"""
with TemporaryDirectory() as temp_dir:
with open(temp_dir + "/CMakeLists.txt", "w") as cmake_file:
cmake_file.write(CMAKE_VARS)
out = subprocess.check_output(
["cmake", "."], cwd=temp_dir, stderr=subprocess.DEVNULL
)
lines = tuple(
line.split()[2:]
for line in out.decode().split("\n")
if line.startswith("-- VAR")
)
assert all((len(line) >= 2 and "=" in line for line in lines))
values = {line[0]: " ".join(line[2:]) if len(line) >= 3 else None for line in lines}
return {key: parse_value(val) for key, val in values.items()}
|
<reponame>baishancloud/mysql-devops
#!/usr/bin/env python2
# coding: utf-8
import copy
import unittest
from pykit import utfjson
from pykit import ututil
from pykit.ectypes import (
BlockDesc,
BlockExists,
BlockGroup,
BlockGroupID,
BlockID,
BlockNotFoundError,
BlockTypeNotSupportReplica,
BlockTypeNotSupported,
DriveID,
BlockIndex,
)
dd = ututil.dd
_ec_config = {
'in_idc': [4, 2],
'cross_idc': [2, 1],
'ec_policy': 'lrc',
'data_replica': 3
}
_empty_group = BlockGroup({
'config': {
'in_idc': [4, 2],
'ec_policy': 'lrc',
'cross_idc': [2, 1],
'data_replica': 3
},
'blocks': {},
'idcs': ['a', 'b', 'c'],
'block_group_id': 'g000640000000123'
})
class TestBlockGroupID(unittest.TestCase):
def test_new(self):
block_group_id = 'g000640000000123'
bgid = BlockGroupID(64, 123)
self.assertEqual(block_group_id, str(bgid))
bgid = BlockGroupID(block_group_id)
self.assertEqual((64, 123), bgid.as_tuple())
bgid = BlockGroupID(bgid)
self.assertEqual((64, 123), bgid.as_tuple())
def test_new_invalid(self):
block_group_id_invalid = 'g00064000000012345'
self.assertRaises(ValueError, BlockGroupID, block_group_id_invalid)
def test_tostr(self):
block_group_id = 'g000640000000123'
bgid = BlockGroupID(block_group_id)
self.assertEqual(block_group_id, str(bgid))
self.assertEqual(block_group_id, '{0}'.format(bgid))
self.assertEqual("'g000640000000123'", repr(bgid))
class TestBlockGroup(unittest.TestCase):
def setUp(self):
self.foo_block = BlockDesc({
'block_id': BlockID('d0', 'g000640000000123', '0000',
DriveID('idc000' 'c62d8736c7280002'), 1),
'size': 1000,
'range': ['0a', '0b'],
'is_del': 0
})
def test_new(self):
g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)
self.assertEqual(_empty_group, g)
# test lacking of arg
self.assertRaises(TypeError, BlockGroup, block_group_id='g000640000000123', idcs=[])
self.assertRaises(TypeError, BlockGroup, block_group_id='g000640000000123', config=_ec_config)
self.assertRaises(TypeError, BlockGroup, idcs=[], config=_ec_config)
def test_json(self):
g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)
rst = utfjson.dump(g)
expected = ('{"config": {"in_idc": [4, 2], "ec_policy": "lrc", "cross_idc": [2, 1], '
'"data_replica": 3}, "blocks": {}, "idcs": ["a", "b", "c"], '
'"block_group_id": "g000640000000123"}')
self.assertEqual(expected, rst)
loaded = BlockGroup(utfjson.load(rst))
self.assertEqual(g, loaded)
def test_new_deref_config(self):
cnf = copy.deepcopy(_ec_config)
b = BlockGroup(block_group_id='g000640000000123', config=cnf, idcs=['a', 'b', 'c'])
a = copy.deepcopy(b['config'])
b['config']['in_idc'] = [10, 11]
self.assertNotEqual(a, b)
a = copy.deepcopy(b['config'])
b['config']['cross_idc'] = [10, 11]
self.assertNotEqual(a, b)
a = copy.deepcopy(b['config'])
b['config']['ec_policy'] = 'foo'
self.assertNotEqual(a, b)
a = copy.deepcopy(b['config'])
b['config']['data_replica'] = 100
self.assertNotEqual(a, b)
def test_get_block(self):
g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)
block = g.get_block('0000')
self.assertIsNone(block)
block = g.get_block('9999')
self.assertIsNone(block)
with self.assertRaises(BlockNotFoundError):
g.get_block('9999', raise_error=True)
g.add_block(self.foo_block)
block = g.get_block(self.foo_block['block_id'].block_index)
self.assertDictEqual(self.foo_block, block)
with self.assertRaises(BlockNotFoundError):
g.get_block('0002', raise_error=True)
with self.assertRaises(ValueError):
g.get_block('d0g0006400000001230000c62d2')
def test_mark_delete_block(self):
g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)
g.add_block(self.foo_block)
g.mark_delete_block('0000')
block = g.get_block('0000')
self.assertEqual(1, block['is_del'])
self.assertRaises(BlockNotFoundError, g.mark_delete_block, '9999')
def test_delete_block(self):
g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)
self.assertIsNone(g.get_block('0000'))
g.add_block(self.foo_block)
self.assertIsNotNone(g.get_block('0000'))
g.delete_block('0000')
self.assertIsNone(g.get_block('0000'))
g.delete_block('0000')
self.assertIsNone(g.get_block('0000'))
def test_replace_block(self):
g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)
prev = g.add_block(self.foo_block)
self.assertIsNone(prev)
block = g.get_block('0000')
self.assertEqual(0, block['is_del'])
prev = g.add_block(self.foo_block, replace=True)
self.assertEqual(self.foo_block, prev)
self.assertRaises(BlockExists, g.add_block, self.foo_block)
self.assertRaises(BlockExists, g.add_block, self.foo_block, replace=False)
def test_get_free_block_index(self):
g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)
g.add_block(self.foo_block)
self.assertDictEqual({'a': ['0001', '0002', '0003'],
'b': ['0100', '0101', '0102', '0103']},
g.get_free_block_indexes('d0'))
self.assertDictEqual({'a': ['0004', '0005'],
'b': ['0104', '0105']},
g.get_free_block_indexes('dp'))
self.assertDictEqual({'c': ['0200', '0201', '0202', '0203'], },
g.get_free_block_indexes('x0'))
self.assertDictEqual({'c': ['0204', '0205'], },
g.get_free_block_indexes('xp'))
self.assertDictEqual(
{
'a': ['0001', '0002', '0003'],
'b': ['0100', '0101', '0102', '0103'],
'c': [],
},
g.get_free_block_indexes('d0', get_all=True))
self.assertDictEqual(
{
'a': ['0004', '0005'],
'b': ['0104', '0105'],
'c': [],
},
g.get_free_block_indexes('dp', get_all=True))
self.assertDictEqual(
{
'a': [],
'b': [],
'c': ['0200', '0201', '0202', '0203'],
},
g.get_free_block_indexes('x0', get_all=True))
self.assertDictEqual(
{
'a': [],
'b': [],
'c': ['0204', '0205'],
},
g.get_free_block_indexes('xp', get_all=True))
def test_get_block_type(self):
g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)
self.assertEqual('d0', g.get_block_type('0000'))
self.assertEqual('dp', g.get_block_type('0004'))
self.assertEqual('d1', g.get_block_type('0006'))
self.assertEqual('d0', g.get_block_type('0100'))
self.assertEqual('dp', g.get_block_type('0104'))
self.assertEqual('d1', g.get_block_type('0106'))
self.assertEqual('x0', g.get_block_type('0200'))
self.assertEqual('xp', g.get_block_type('0204'))
self.assertRaises(BlockTypeNotSupported, g.get_block_type, '0299')
self.assertRaises(BlockTypeNotSupported, g.get_block_type, '0900')
def test_get_block_idc(self):
g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)
self.assertEqual('a', g.get_block_idc('0000'))
self.assertEqual('b', g.get_block_idc('0100'))
self.assertEqual('c', g.get_block_idc('0200'))
d0 = BlockDesc({
'block_id': BlockID('d0', 'g000640000000123', '0000',
DriveID('idc000' 'c62d8736c7280002'), 1),
'size': 1000,
'range': ['0a', '0b'],
'is_del': 0
})
g.add_block(d0)
self.assertEqual('a', g.get_block_idc('0000'))
def test_get_replica_index_not_include_me(self):
g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)
self.assertEqual(['0006', '0010'], g.get_replica_indexes('0000', include_me=False))
self.assertEqual(['0000', '0010'], g.get_replica_indexes('0006', include_me=False))
self.assertEqual(['0000', '0006'], g.get_replica_indexes('0010', include_me=False))
with self.assertRaises(BlockTypeNotSupportReplica):
g.get_replica_indexes('0004', include_me=False)
with self.assertRaises(BlockTypeNotSupportReplica):
g.get_replica_indexes('0204', include_me=False)
def test_classify_blocks(self):
gid = 'g000640000000123'
g = BlockGroup(block_group_id=gid, idcs=['a', 'b', 'c'], config=_ec_config)
blks = g.classify_blocks(0, only_primary=True)
self.assertEqual([], blks['ec'] + blks['replica'] + blks['mark_del'])
base_blk = BlockDesc({
'size': 1000,
'range': ['0a', '0b'],
'is_del': 0
})
ec_blk_idxes = ['0000', '0001']
replica_blk_idxes = ['0002', '0008', '0012']
mark_del_idxes = ['0003', '0004']
for i, idx in enumerate(ec_blk_idxes + replica_blk_idxes + mark_del_idxes):
typ = g.get_block_type(idx)
blkid = BlockID(typ, gid, idx, DriveID('idc000' 'c62d8736c7280002'), i)
blk = copy.deepcopy(base_blk)
blk['block_id'] = blkid
if idx in mark_del_idxes:
blk['is_del'] = 1
g.add_block(blk)
for only_primary in (True, False):
blks = g.classify_blocks(0, only_primary)
blk_idxes = []
for blk in blks['ec'] + blks['replica'] + blks['mark_del']:
idx = BlockID(blk['block_id']).block_index
blk_idxes.append(idx)
expect_ids = copy.deepcopy(ec_blk_idxes)
#'0004' in ec_blk_idxes is parity, so should not in mark_del
if only_primary is True:
expect_ids += replica_blk_idxes[:1] + mark_del_idxes[:1]
else:
expect_ids += replica_blk_idxes + mark_del_idxes[:1]
self.assertEqual(expect_ids, blk_idxes)
def test_get_parities(self):
gid = 'g000640000000123'
g = BlockGroup(block_group_id=gid, idcs=['a', 'b', 'c'], config=_ec_config)
parities = g.get_parities(idc_index=0)
self.assertEqual([], parities)
base_parity = BlockDesc({
'size': 1000,
'range': ['0a', '0b'],
'is_del': 0
})
parity_idxes = ['0004', '0005']
for i, idx in enumerate(parity_idxes):
blkid = BlockID('dp', gid, idx, DriveID('idc000' 'c62d8736c7280002'), i)
parity = copy.deepcopy(base_parity)
parity['block_id'] = blkid
g.add_block(parity)
idxes = g.get_parity_indexes(idc_index=0)
self.assertEqual(parity_idxes, idxes)
parities = g.get_parities(idc_index=0)
idxes = []
for p in parities:
idx = BlockID(p['block_id']).block_index
idxes.append(idx)
self.assertEqual(parity_idxes, idxes)
def make_test_block_group(self, blk_idxes, config=None):
gid = 'g000640000000123'
base_blk = BlockDesc({
'size': 1000,
'range': ['0a', '0b'],
'is_del': 0
})
if config is None:
config = _ec_config
num_idcs = sum(config['cross_idc'])
idcs = ['idc' + (str(i).rjust(3, '0')) for i in range(num_idcs)]
bg = BlockGroup(block_group_id=gid, idcs=idcs, config=config)
for i, bi in enumerate(blk_idxes):
bi = BlockIndex(bi)
typ = bg.get_block_type(bi)
drive_id = DriveID(idcs[int(bi[0])] + 'c62d8736c7280002')
blkid = BlockID(typ, gid, bi, drive_id, i)
blk = copy.deepcopy(base_blk)
blk['block_id'] = blkid
bg.add_block(blk)
return bg
def test_is_ec_block(self):
idc_idx = 0
ec_blk_idxes = ['0000', '0001', '0005']
replica_blk_idxes = ['0002', '0008', '0012']
bg = self.make_test_block_group(ec_blk_idxes + replica_blk_idxes)
with self.assertRaises(BlockNotFoundError):
gid = 'g000640000000123'
bid = BlockID('dp', gid, '0001', DriveID('idc000' 'ab2d8736c7280002'), 0)
bg.is_ec_block(bid)
act_ec_blk_idxes = []
nr_data, nr_parity = bg['config']['in_idc']
for i in range(0, nr_data + nr_parity):
bi = BlockIndex(idc_idx, i)
blk = bg.get_block(bi)
if blk is None:
continue
if bg.is_ec_block(blk['block_id']):
act_ec_blk_idxes.append(bi)
self.assertListEqual(ec_blk_idxes, act_ec_blk_idxes)
def test_get_ec_blocks(self):
idc_idx = 0
ec_blk_idxes = ['0000', '0001']
replica_blk_idxes = ['0002', '0008', '0012']
bg = self.make_test_block_group(ec_blk_idxes + replica_blk_idxes)
ec_blks = bg.indexes_to_blocks(ec_blk_idxes)
act_ec_blks = bg.get_ec_blocks(idc_idx)
self.assertListEqual(ec_blks, act_ec_blks)
def test_get_ec_broken_blocks(self):
idc_idx = 0
ec_blk_idxes = ['0000', '0001', '0003']
replica_blk_idxes = ['0002', '0008', '0012']
bg = self.make_test_block_group(ec_blk_idxes + replica_blk_idxes)
broken_blk_idxes = ec_blk_idxes[1:]
broken_blks = bg.indexes_to_blocks(broken_blk_idxes)
broken_ec_bids = [blk['block_id'] for blk in broken_blks]
act_broken_blks = bg.get_ec_broken_blocks(idc_idx, broken_ec_bids)
self.assertListEqual(broken_blks, act_broken_blks)
def test_get_ec_block_ids(self):
idc_idx = 0
ec_blk_idxes = ['0000', '0001', '0003']
replica_blk_idxes = ['0002', '0008', '0012']
bg = self.make_test_block_group(ec_blk_idxes + replica_blk_idxes)
ec_blks = bg.indexes_to_blocks(ec_blk_idxes)
ec_bids = [blk['block_id'] for blk in ec_blks]
act_ec_bids = bg.get_ec_block_ids(idc_idx)
self.assertListEqual(ec_bids, act_ec_bids)
def test_get_replica_blocks(self):
ec_blk_idxes = ['0000', '0001', '0003']
replica_blk_idxes = ['0002', '0008', '0012']
bg = self.make_test_block_group(ec_blk_idxes + replica_blk_idxes)
replica_blks = bg.indexes_to_blocks(replica_blk_idxes)
for blk in replica_blks:
bid = blk['block_id']
act_replica_blks = bg.get_replica_blocks(bid)
self.assertListEqual(replica_blks, act_replica_blks)
_replica_blks = copy.deepcopy(replica_blks)
_replica_blks.remove(blk)
act_replica_blks = bg.get_replica_blocks(bid, include_me=False)
self.assertListEqual(_replica_blks, act_replica_blks)
def test_get_block_byid(self):
blk_idxes = ['0000', '0001', '0002', '0003', '0008', '0012']
bg = self.make_test_block_group(blk_idxes)
blks = bg.indexes_to_blocks(blk_idxes)
bids = [blk['block_id'] for blk in blks]
act_blks = []
for bid in bids:
act_blks.append(bg.get_block_byid(bid))
self.assertListEqual(blks, act_blks)
|
<filename>paprika/actions/tripolis/SendEmail.py
from paprika.actions.Actionable import Actionable
from paprika.repositories.ProcessActionPropertyRepository import ProcessActionPropertyRepository
from paprika.repositories.ProcessPropertyRepository import ProcessPropertyRepository
from paprika.repositories.ProcessRepository import ProcessRepository
from paprika.system.logger.Logger import Logger
from paprika.system.JsonExt import JsonExt
from paprika.services.RestRequest import RestRequest
from paprika.actions.ProcessException import ProcessException
from paprika.system.ExpressionParser import ExpressionParser
import json
class SendEmail(Actionable):
def __init__(self):
Actionable.__init__(self)
def execute(self, connector, process_action):
logger = Logger(connector, self)
job_name = process_action['job_name']
process_repository = ProcessRepository(connector)
process_property_repository = ProcessPropertyRepository(connector)
process_action_property_repository = ProcessActionPropertyRepository(connector)
print json.dumps(process_action)
# retrieve the file properties
process = process_repository.find_by_id(process_action['pcs_id'])
print json.dumps(process)
# retrieve the payload if present
payload = JsonExt.loads(process_property_repository.get_property(process, 'payload'))
print json.dumps(payload)
auth_info = json.loads(process_action_property_repository.get_property(process_action, 'auth_info'))
database = process_action_property_repository.get_property(process_action, 'database')
workspace = process_action_property_repository.get_property(process_action, 'workspace')
contact_group = process_action_property_repository.get_property(process_action, 'contact_group')
group_type = process_action_property_repository.get_property(process_action, 'group_type')
direct_email_type = process_action_property_repository.get_property(process_action, 'direct_email_type')
direct_email = process_action_property_repository.get_property(process_action, 'direct_email')
# we are going to parse this, like in the Call action
key_fields = json.loads(process_action_property_repository.get_property(process_action, 'key_fields'))
key_fields = ExpressionParser.parse(key_fields, locals())
attribute_fields = json.loads(process_action_property_repository.get_property(process_action, 'attribute_fields'))
attribute_fields = ExpressionParser.parse(attribute_fields, locals())
headers = json.loads(process_action_property_repository.get_property(process_action, 'headers'))
certificate = JsonExt.loads(process_action_property_repository.get_property(process_action, 'certificate'))
proxies = JsonExt.loads(process_action_property_repository.get_property(process_action, 'proxies'))
url = process_action_property_repository.get_property(process_action, 'url')
message = {
"auth_info": auth_info,
"database": database,
"workspace": workspace,
"contact_group": contact_group,
"group_type": group_type,
"direct_email_type": direct_email_type,
"direct_email": direct_email,
"key_fields": key_fields,
"attribute_fields": attribute_fields
}
logger.info(job_name, json.dumps(message))
response = RestRequest.post(headers, url, message, certificate, proxies)
logger.info(job_name, "status_code : " + str(response.status_code) + ", reason : " + response.reason + ", content : " + response.content)
if response.status_code != 200:
raise ProcessException("status_code : " + str(response.status_code) + ", reason : " + response.reason + ", content : " + response.content)
# store the returned mailjob_id in the process.
content = json.loads(response.content)
mailjob_id = content['mailjob_id']
process_property_repository.set_property(process, 'mailjob_id', mailjob_id)
|
<gh_stars>0
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from scipy.signal import find_peaks
import csv
import os
def find_vel(i,x_list,y_list):
if i < 1 or i >= len(x_list):
return 0
if (x_list[i-1] == 0 and y_list[i-1] == 0) or (x_list[i] == 0 and y_list[i] == 0):
return 0
return (abs(x_list[i] - x_list[i-1])**2 + abs(y_list[i] - y_list[i-1])**2)**0.5
def average_vel(i,x_list,y_list):
vel_sum = 0
n = 0
for m in range(i-2,i+3):
vel = find_vel(m,x_list,y_list)
if vel == 0:
continue
vel_sum += vel
n += 1
if n == 0:
return 0
return vel_sum/n
def find_acc(i,x_list,y_list):
if i < 2 or i >= len(x_list):
return 0,0
if (x_list[i-2] == 0 and y_list[i-2] == 0) or (x_list[i-1] == 0 and y_list[i-1] == 0) or (x_list[i] == 0 and y_list[i] == 0):
return 0,0
return x_list[i] - 2*x_list[i-1] + x_list[i-2],y_list[i] - 2*y_list[i-1] + y_list[i-2]
def average_acc(i,x_list,y_list):
ax_sum = 0
ay_sum = 0
n = 0
for m in range(i-2,i+3):
ax_temp,ay_temp = find_acc(m,x_list,y_list)
if ax_temp == 0 and ay_temp == 0:
continue
ax_sum += abs(ax_temp)
ay_sum += abs(ay_temp)
n += 1
if n == 0:
return 0,0
return ax_sum/n,ay_sum/n
def plot(dir_path):
set_num = 1
if dir_path[-1] != "/":
dir_path += "/"
while set_num <= 3:
score_A = 0
score_B = 0
while score_A < 22:
data_list = []
if score_B == 21:
score_A = score_A + 1
score_B = 0
else:
score_B = score_B + 1
rally_score = str(set_num)+"_"+(str(score_A)).zfill(2)+"_"+(str(score_B)).zfill(2)
denoise_file = dir_path + rally_score + "_denoise.csv"
out_file = dir_path + rally_score + "_truth.csv"
predict_file = dir_path + rally_score + "_predict.csv"
plot_file = dir_path + rally_score + "_plot.jpg"
if os.path.isfile(denoise_file) == False or os.path.isfile(out_file) == False or os.path.isfile(predict_file) == False:
continue
df = pd.read_csv(out_file)
x = df['X'].tolist()
y = df['Y'].tolist()
p = df['turning_point'].tolist()
vis = df['Visibility'].tolist()
x_out_line = []
y_out_line = []
z_out_line = []
for i in range(len(y)):
if x[i] != 0 or y[i] != 0:
x_out_line.append(x[i])
y_out_line.append(y[i])
z_out_line.append(i)
result_x = []
result_y = []
result_z = []
for i in range(len(p)):
if p[i] == 1:
result_x.append(x[i])
result_y.append(y[i])
result_z.append(i)
df = pd.read_csv(predict_file)
x = df['X'].tolist()
y = df['Y'].tolist()
p = df['turning_point'].tolist()
predict_x = []
predict_y = []
predict_z = []
for i in range(len(p)):
if p[i] == 1:
predict_x.append(x[i])
predict_y.append(y[i])
predict_z.append(i)
df = pd.read_csv(denoise_file)
x = df['X'].tolist()
y = df['Y'].tolist()
avg_vel = []
for i in range(len(y)):
avg_vel.append(find_vel(i,x,y))
x_denoise_line = []
y_denoise_line = []
z_denoise_line = []
for i in range(len(y)):
if x[i] != 0 or y[i] != 0:
x_denoise_line.append(x[i])
y_denoise_line.append(y[i])
z_denoise_line.append(i)
y_peaks,y_property = find_peaks(y_denoise_line,prominence=8,distance=5)
y_peak = []
z_peak = []
for i in range(len(y_peaks)):
y_peak.append(y_denoise_line[y_peaks[i]])
z_peak.append(z_denoise_line[y_peaks[i]])
ax = []
ay = []
z = []
for i in range(len(x)):
ax_temp,ay_temp = find_acc(i,x,y)
if ax_temp != 0 or ay_temp != 0:
ax.append(ax_temp)
ay.append(ay_temp)
z.append(i)
df = pd.read_csv(out_file)
p = df['turning_point'].tolist()
result_ax = []
result_ay = []
result_az = []
for i in range(len(p)):
if p[i] == 1:
ax_temp,ay_temp = find_acc(i,x,y)
result_ax.append(ax_temp)
result_ay.append(ay_temp)
result_az.append(i)
df = pd.read_csv(predict_file)
p = df['turning_point'].tolist()
predict_ax = []
predict_ay = []
predict_az = []
for i in range(len(p)):
if p[i] == 1:
ax_temp,ay_temp = find_acc(i,x,y)
predict_ax.append(ax_temp)
predict_ay.append(ay_temp)
predict_az.append(i)
avg_ax = []
avg_ay = []
avg_z = []
for i in range(len(x)):
avg_ax_temp,avg_ay_temp = average_acc(i,x,y)
avg_ax.append(avg_ax_temp)
avg_ay.append(avg_ay_temp)
avg_z.append(i)
peaks, properties = find_peaks(avg_ay, prominence=3, distance=10, height=3)
# print(peaks)
# print(properties)
df = pd.read_csv(out_file)
p = df['turning_point'].tolist()
result_avg_ax = []
result_avg_ay = []
result_avg_z = []
for i in range(len(p)):
if p[i] == 1:
avg_ax_temp,avg_ay_temp = average_acc(i,x,y)
result_avg_ax.append(avg_ax_temp)
result_avg_ay.append(avg_ay_temp)
result_avg_z.append(i)
df = pd.read_csv(predict_file)
p = df['turning_point'].tolist()
predict_avg_ax = []
predict_avg_ay = []
predict_avg_z = []
for i in range(len(p)):
if p[i] == 1:
avg_ax_temp,avg_ay_temp = average_acc(i,x,y)
predict_avg_ax.append(avg_ax_temp)
predict_avg_ay.append(avg_ay_temp)
predict_avg_z.append(i)
fig = plt.figure(figsize=(12,8))
# plt.subplot(2,2,1)
# plt.plot(z,ay,'-')
# plt.plot(result_az,result_ay,'ro')
# plt.plot(predict_az,predict_ay,'bs')
# plt.title("aY versus Frame")
# plt.subplot(2,2,2)
# plt.plot(z,ax,'-')
# plt.plot(result_az,result_ax,'ro')
# plt.plot(predict_az,predict_ax,'bs')
# plt.title("aX versus Frame")
plt.subplot(2,2,3)
plt.plot(avg_z,avg_ay,'-')
plt.plot(result_avg_z,result_avg_ay,'ro')
plt.plot(predict_avg_z,predict_avg_ay,'bs')
plt.title("Avg_aY versus Frame")
plt.subplot(2,2,4)
plt.plot(avg_z,avg_ax,'-')
plt.plot(result_avg_z,result_avg_ax,'ro')
plt.plot(predict_avg_z,predict_avg_ax,'bs')
plt.title("Avg_aX versus Frame")
# plt.subplot(2,2,1)
# plt.plot(z_out_line,y_out_line,'-')
# plt.plot(result_z,result_y,'ro')
# plt.plot(predict_z,predict_y,'bs')
# plt.title("Y versus Frame with tracknet")
# plt.subplot(2,2,2)
# plt.plot(z_out_line,x_out_line,'-')
# plt.plot(result_z,result_x,'ro')
# plt.plot(predict_z,predict_x,'bs')
# plt.title("X versus Frame with tracknet")
plt.subplot(2,2,1)
plt.plot(z_denoise_line,y_denoise_line,'-')
plt.plot(np.arange(len(avg_vel)),avg_vel,'--')
plt.plot(result_z,result_y,'ro')
plt.plot(predict_z,predict_y,'bs')
# plt.plot(z_peak,y_peak,'g^')
for i in result_z:
plt.axvline(x=i, ymin=0, ymax=1, color='bisque', linestyle='--')
plt.title("Y versus Frame with denoise")
plt.subplot(2,2,2)
plt.plot(z_denoise_line,x_denoise_line,'-')
plt.plot(result_z,result_x,'ro')
plt.plot(predict_z,predict_x,'bs')
for i in result_z:
plt.axvline(x=i, ymin=0, ymax=1, color='bisque', linestyle='--')
plt.title("X versus Frame with denoise")
plt.savefig(plot_file)
plt.close(fig)
print("Save image as " + rally_score + "_plot.jpg")
set_num += 1
|
# USAGE
# python neural_style_transfer_video.py
# during the process :
# press 'q' to quit
# press 'n' for next model
# press 'a' for auto models rotation switch (on/off)
# press 'l' to hide/display the legend
# press 's' to save the picture
# import the necessary packages
from helpers import SimpleDatasetRenamer
from helpers.pypeline import Step
from helpers.views import MailView
from imutils.video import VideoStream
from conf import config as conf
from imutils import paths
import itertools
import argparse
import imutils
import time
import cv2
import os
import sh
# grab the paths to all neural style transfer models in our 'models'
# directory, provided all models end with the '.t7' file extension
modelPaths = paths.list_files(conf.MODELS, validExts=(".t7",))
modelPaths = sorted(list(modelPaths))
# initialize the filename creator
sdr = SimpleDatasetRenamer()
# auto-rotation parameters initialization
auto = conf.AUTO_ROTATION_MODE
cnt = 0
switch = {True:"on", False:"off"}
legend = conf.LIVE_LEGEND
email = conf.LIVE_EMAIL
# generate unique IDs for each of the model paths, then combine the
# two lists together
models = list(zip(range(0, len(modelPaths)), (modelPaths)))
# use the cycle function of itertools that can loop over all model
# paths, and then when the end is reached, restart again
modelIter = itertools.cycle(models)
(modelID, modelPath) = next(modelIter)
# load the neural style transfer model from disk
print("[INFO] loading style transfer model...")
net = cv2.dnn.readNetFromTorch(modelPath)
# initialize the video stream, then allow the camera sensor to warm up
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
print("[INFO] {}. {}".format(modelID + 1, modelPath))
# loop over frames from the video file stream
while True:
# grab the frame from the threaded video stream
frame = vs.read()
# resize the frame to have a width of 600 pixels (while
# maintaining the aspect ratio), and then grab the image
# dimensions
copy = frame.copy()
frame = imutils.resize(frame, width=conf.LIVE_INPUT_WIDTH)
(h, w) = frame.shape[:2]
# construct a blob from the frame, set the input, and then perform a
# forward pass of the network
blob = cv2.dnn.blobFromImage(frame, 1.0, (w, h),
(103.939, 116.779, 123.680), swapRB=False, crop=False)
net.setInput(blob)
output = net.forward()
# reshape the output tensor, add back in the mean subtraction, and
# then swap the channel ordering
output = output.reshape((3, output.shape[2], output.shape[3]))
output[0] += 103.939
output[1] += 116.779
output[2] += 123.680
output /= 255.0
output = output.transpose(1, 2, 0)
# resize the output and draw the legend on it
output = imutils.resize(output, width=max(conf.LIVE_OUTPUT_WIDTH,
conf.LEGEND_WIDTH))
# check to see if we want to draw the legend
if legend:
output[0:conf.L_LEGEND_HEIGHT, 0:conf.LEGEND_WIDTH] = 0
cv2.putText(output, "Press 'n' for next model", (30, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
cv2.putText(output, "Press 'a' to switch {} auto-rotation".format(
switch[not auto]), (30, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
(255, 0, 0), 2)
cv2.putText(output, "Press 's' to save the picture", (30, 60),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
# show the original frame along with the output neural style
# transfer
cv2.imshow("Input", frame)
cv2.imshow("Output", output)
key = cv2.waitKey(1) & 0xFF
# check to see if we are in auto mode
if auto:
# if we reached the right number of frames, load the next model and then
# reset the frame counter
if cnt >= conf.AUTO_MAX_FRAME:
(modelID, modelPath) = next(modelIter)
print("[INFO] {}. {}".format(modelID + 1, modelPath))
net = cv2.dnn.readNetFromTorch(modelPath)
cnt = 0
# increment the number of frames on which we applied the same model
cnt += 1
# if the key 'a' (for "auto") is pressed, switch the auto-rotation mode
if key == ord("a"):
cnt = 0
auto = True if auto == False else False
print("[INFO] switching auto mode to: {}".format(switch[auto]))
# if the key 'l' (for "legend") is pressed, switch the display mode
if key == ord("l"):
legend = True if legend == False else False
print("[INFO] switching legend mode to: {}".format(switch[legend]))
# if the key 'e' (for "email") is pressed, switch the email enabled state
if key == ord("e"):
email = True if email == False else False
print("[INFO] switching email capture mode to: {}".format(switch[email]))
# if the `n` key is pressed (for "next"), load the next neural
# style transfer model
if key == ord("n"):
# grab the next nueral style transfer model model and load it
(modelID, modelPath) = next(modelIter)
print("[INFO] {}. {}".format(modelID + 1, modelPath))
net = cv2.dnn.readNetFromTorch(modelPath)
cnt = 0
# if the 's' key is pressed (for "save"), we want to save
# the image to disk
if key == ord("s"):
# check to see if the destination directories exists
# and if not, create it
if not os.path.exists(conf.ORIG_DIR):
os.makedirs(conf.ORIG_DIR)
if not os.path.exists(conf.DEEP_DIR):
os.makedirs(conf.DEEP_DIR)
# generate the paths to images
filename = str(sdr.id_generator(sequential=False)) + ".jpg"
origname = os.path.sep.join([conf.ORIG_DIR, filename])
deepname = os.path.sep.join([conf.DEEP_DIR, filename])
# copy the frame to disk and then create a pypeline Step which
# will launch the neural_style_transfer.py script as a shell
# command line so we give the required arguments for the script
cv2.imwrite(origname, copy)
transfer = Step("neural_style_transfer.py",
"apply neural style transfer to frame",
[["model", modelPath],
["image", origname],
["width", conf.NEURAL_INPUT_WIDTH],
["output-width", conf.NEURAL_OUTPUT_WIDTH],
["output-path", deepname]])
transfer.execute()
# initialize the output message
msg = "[INFO] frame saved as {}".format(filename)
# show a view to enter email information
if email:
interface = MailView()
interface.create()
interface.show()
# save the mapping between file <-> mail to disk
f = open("mails.csv", "a+")
f.write("{} {}\n".format(filename, interface.mail))
f.close()
msg += " for {}".format(interface.mail)
# print the output message about the filename and possibly the
# mapping with mail
print(msg)
# if the `q` (for "quit") key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
|
<filename>custom_widgets/navigation/navbutton/navbutton.py
from enum import Enum
from PySide2.QtGui import QPainter, QColor, QPixmap, QPen, QPolygon, QBrush
from PySide2.QtCore import QEnum, QSize, Qt, QRect, QPoint, QEvent
from PySide2.QtWidgets import QApplication, QPushButton
class NavButton(QPushButton):
"""
导航按钮控件
作者:feiyangqingyun(QQ:517216493) 2017-12-19
译者:sunchuquin(QQ:1715216365) 2020-12-17
1. 可设置文字的左侧+右侧+顶部+底部间隔
2. 可设置文字对齐方式
3. 可设置显示倒三角/倒三角边长/倒三角位置/倒三角颜色
4. 可设置显示图标/图标间隔/图标尺寸/正常状态图标/悬停状态图标/选中状态图标
5. 可设置显示边框线条/线条宽度/线条间隔/线条位置/线条颜色
6. 可设置正常背景颜色/悬停背景颜色/选中背景颜色
7. 可设置正常文字颜色/悬停文字颜色/选中文字颜色
8. 可设置背景颜色为画刷颜色
"""
# 文本对齐方式
@QEnum
class TextAlign(Enum):
TEXTALIGN_LEFT = 0x0001 # 左侧对齐
TEXTALIGN_RIGHT = 0x0002 # 右侧对齐
TEXTALIGN_TOP = 0x0020 # 顶部对齐
TEXTALIGN_BOTTOM = 0x0040 # 底部对齐
TEXTALIGN_CENTER = 0x0004 # 居中对齐
# 三角形的位置
@QEnum
class TrianglePosition(Enum):
TRIANGLEPOSITION_LEFT = 0 # 左侧
TRIANGLEPOSITION_RIGHT = 1 # 右侧
TRIANGLEPOSITION_TOP = 2 # 顶部
TRIANGLEPOSITION_BOTTOM = 3 # 底部
# 线的位置
@QEnum
class LinePosition(Enum):
LINEPOSITION_LEFT = 0 # 左侧
LINEPOSITION_RIGHT = 1 # 右侧
LINEPOSITION_TOP = 2 # 顶部
LINEPOSITION_BOTTOM = 3 # 底部
# 图标的位置
@QEnum
class IconPosition(Enum):
ICONPOSITION_LEFT = 0 # 左侧
ICONPOSITION_RIGHT = 1 # 右侧
ICONPOSITION_TOP = 2 # 顶部
ICONPOSITION_BOTTOM = 3 # 底部
def __init__(self, parent=None):
super(NavButton, self).__init__(parent=parent)
self.__paddingLeft: int = 20 # 文字左侧间隔
self.__paddingRight: int = 5 # 文字右侧间隔
self.__paddingTop: int = 5 # 文字顶部间隔
self.__paddingBottom: int = 5 # 文字底部间隔
self.__textAlign: NavButton.TextAlign = NavButton.TextAlign.TEXTALIGN_LEFT # 文字对齐
self.__showTriangle: bool = False # 显示倒三角
self.__triangleLen: int = 5 # 倒三角边长
self.__trianglePosition: NavButton.TrianglePosition = NavButton.TrianglePosition.TRIANGLEPOSITION_RIGHT # 倒三角位置
self.__triangleColor: QColor = QColor(255, 255, 255) # 倒三角颜色
self.__showIcon: bool = True # 显示图标
self.__iconSpace: int = 10 # 图标间隔
self.__iconSize: QSize = QSize(16, 16) # 图标尺寸
self.__iconNormal: QPixmap = QPixmap(0, 0) # 正常图标
self.__iconHover: QPixmap = QPixmap(0, 0) # 悬停图标
self.__iconCheck: QPixmap = QPixmap(0, 0) # 选中图标
self.__showLine: bool = True # 显示线条
self.__lineSpace: int = 0 # 线条间隔
self.__lineWidth: int = 5 # 线条宽度
self.__linePosition: NavButton.LinePosition = NavButton.LinePosition.LINEPOSITION_LEFT # 线条位置
self.__lineColor: QColor = QColor(0, 187, 158) # 线条颜色
self.__normalBgColor: QColor = QColor(230, 230, 230) # 正常背景颜色
self.__hoverBgColor: QColor = QColor(130, 130, 130) # 悬停背景颜色
self.__checkBgColor: QColor = QColor(80, 80, 80) # 选中背景颜色
self.__normalTextColor: QColor = QColor(100, 100, 100) # 正常文字颜色
self.__hoverTextColor: QColor = QColor(255, 255, 255) # 悬停文字颜色
self.__checkTextColor: QColor = QColor(255, 255, 255) # 选中文字颜色
self.__normalBgBrush: QBrush = Qt.NoBrush # 正常背景画刷
self.__hoverBgBrush: QBrush = Qt.NoBrush # 悬停背景画刷
self.__checkBgBrush: QBrush = Qt.NoBrush # 选中背景画刷
self.__hover: bool = False # 悬停标志位
self.setCheckable(True)
self.setText("导航按钮")
def enterEvent(self, event: QEvent) -> None:
""" """
self.__hover = True
self.update()
def leaveEvent(self, event: QEvent) -> None:
""" """
self.__hover = False
self.update()
def paintEvent(self, event: QEvent) -> None:
""" """
# 绘制准备工作,启用反锯齿
painter: QPainter = QPainter(self)
painter.setRenderHints(QPainter.Antialiasing | QPainter.TextAntialiasing)
# 绘制背景
self.drawBg(painter)
# 绘制文字
self.drawText(painter)
# 绘制图标
self.drawIcon(painter)
# 绘制边框线条
self.drawLine(painter)
# 绘制倒三角
self.drawTriangle(painter)
def drawBg(self, painter: QPainter) -> None:
""" """
painter.save()
painter.setPen(Qt.NoPen)
width: int = self.width()
height: int = self.height()
bgRect: QRect = QRect()
if self.__linePosition == NavButton.LinePosition.LINEPOSITION_LEFT:
bgRect = QRect(self.lineSpace, 0, width - self.__lineSpace, height)
elif self.__linePosition == NavButton.LinePosition.LINEPOSITION_RIGHT:
bgRect = QRect(0, 0, width - self.__lineSpace, height)
elif self.__linePosition == NavButton.LinePosition.LINEPOSITION_TOP:
bgRect = QRect(0, self.lineSpace, width, height - self.__lineSpace)
elif self.__linePosition == NavButton.LinePosition.LINEPOSITION_BOTTOM:
bgRect = QRect(0, 0, width, height - self.__lineSpace)
# 如果画刷存在则取画刷
if self.isChecked():
bgBrush: QBrush = self.__checkBgBrush
elif self.__hover:
bgBrush: QBrush = self.__hoverBgBrush
else:
bgBrush: QBrush = self.__normalBgBrush
if bgBrush != Qt.NoBrush:
painter.setBrush(bgBrush)
else:
# 根据当前状态选择对应颜色
if self.isChecked():
bgColor: QColor = self.__checkBgColor
elif self.__hover:
bgColor: QColor = self.__hoverBgColor
else:
bgColor: QColor = self.__normalBgColor
painter.setBrush(bgColor)
painter.drawRect(bgRect)
painter.restore()
def drawText(self, painter: QPainter) -> None:
""" """
painter.save()
painter.setBrush(Qt.NoBrush)
# 根据当前状态选择对应颜色
if self.isChecked():
textColor: QColor = self.__checkTextColor
elif self.__hover:
textColor: QColor = self.__hoverTextColor
else:
textColor: QColor = self.__normalTextColor
textRect = QRect(self.__paddingLeft,
self.__paddingTop,
self.width() - self.__paddingLeft - self.__paddingRight,
self.height() - self.__paddingTop - self.__paddingBottom)
painter.setPen(textColor)
painter.drawText(textRect, self.__textAlign.value | Qt.AlignVCenter, self.text())
painter.restore()
def drawIcon(self, painter: QPainter) -> None:
""" """
if not self.__showIcon:
return
painter.save()
if self.isChecked():
pix: QPixmap = self.__iconCheck
elif self.__hover:
pix: QPixmap = self.__iconHover
else:
pix: QPixmap = self.__iconNormal
if not pix.isNull():
# 等比例平滑缩放图标
pix: QPixmap = pix.scaled(self.__iconSize, Qt.KeepAspectRatio, Qt.SmoothTransformation)
painter.drawPixmap(self.__iconSpace, int((self.height() - self.__iconSize.height()) / 2), pix)
painter.restore()
def drawLine(self, painter: QPainter) -> None:
""" """
if not self.__showLine:
return
if not self.isChecked():
return
painter.save()
pen: QPen = QPen()
pen.setWidth(self.__lineWidth)
pen.setColor(self.__lineColor)
painter.setPen(pen)
# 根据线条位置设置线条坐标
pointStart: QPoint = QPoint()
pointEnd: QPoint = QPoint()
if self.__linePosition == NavButton.LinePosition.LINEPOSITION_LEFT:
pointStart = QPoint(0, 0)
pointEnd = QPoint(0, self.height())
elif self.__linePosition == NavButton.LinePosition.LINEPOSITION_RIGHT:
pointStart = QPoint(self.width(), 0)
pointEnd = QPoint(self.width(), self.height())
elif self.__linePosition == NavButton.LinePosition.LINEPOSITION_TOP:
pointStart = QPoint(0, 0)
pointEnd = QPoint(self.width(), 0)
elif self.__linePosition == NavButton.LinePosition.LINEPOSITION_BOTTOM:
pointStart = QPoint(0, self.height())
pointEnd = QPoint(self.width(), self.height())
painter.drawLine(pointStart, pointEnd)
painter.restore()
def drawTriangle(self, painter: QPainter) -> None:
""" """
if not self.__showTriangle:
return
# 选中或者悬停显示
if (not self.__hover) and (not self.isChecked()):
return
painter.save()
painter.setPen(Qt.NoPen)
painter.setBrush(self.__triangleColor)
# 绘制在右侧中间,根据设定的倒三角的边长设定三个点位置
width: int = self.width()
height: int = self.height()
midWidth: int = width // 2
midHeight: int = height // 2
pts: QPolygon = QPolygon()
if self.__trianglePosition == NavButton.TrianglePosition.TRIANGLEPOSITION_LEFT:
pts.setPoints(3,
self.__triangleLen, midHeight,
0, midHeight - self.__triangleLen,
0, midHeight + self.__triangleLen)
elif self.__trianglePosition == NavButton.TrianglePosition.TRIANGLEPOSITION_RIGHT:
pts.setPoints(3,
width - self.__triangleLen, midHeight,
width, midHeight - self.__triangleLen,
width, midHeight + self.__triangleLen)
elif self.__trianglePosition == NavButton.TrianglePosition.TRIANGLEPOSITION_TOP:
pts.setPoints(3,
midWidth, self.__triangleLen,
midWidth - self.__triangleLen, 0,
midWidth + self.__triangleLen, 0)
elif self.__trianglePosition == NavButton.TrianglePosition.TRIANGLEPOSITION_BOTTOM:
pts.setPoints(3,
midWidth, height - self.__triangleLen,
midWidth - self.__triangleLen, height,
midWidth + self.__triangleLen, height)
painter.drawPolygon(pts)
painter.restore()
def getPaddingLeft(self) -> int:
""" 读取文字左侧间隔 """
return self.__paddingLeft
def getPaddingRight(self) -> int:
""" 读取文字右侧间隔 """
return self.__paddingRight
def getPaddingTop(self) -> int:
""" 读取文字顶部间隔 """
return self.__paddingTop
def getPaddingBottom(self) -> int:
""" 读取文字底部间隔 """
return self.__paddingBottom
def getTextAlign(self) -> TextAlign:
""" 读取文字对齐方式 """
return self.__textAlign
def getShowTriangle(self) -> bool:
""" 读取是否显示倒三角 """
return self.__showTriangle
def getTriangleLen(self) -> int:
""" 读取倒三角边长 """
return self.__triangleLen
def getTrianglePosition(self) -> TrianglePosition:
""" 读取倒三角位置 """
return self.__trianglePosition
def getTriangleColor(self) -> QColor:
""" 读取倒三角颜色 """
return self.__triangleColor
def getShowIcon(self) -> bool:
""" 读取是否显示图标 """
return self.__showIcon
def getIconSpace(self) -> int:
""" 读取图标间隔 """
return self.__iconSpace
def getIconSize(self) -> QSize:
""" 读取图标尺寸 """
return self.__iconSize
def getIconNormal(self) -> QPixmap:
""" 读取正常图标 """
return self.__iconNormal
def getIconHover(self) -> QPixmap:
""" 读取悬停图标 """
return self.__iconHover
def getIconCheck(self) -> QPixmap:
""" 读取选中图标 """
return self.__iconCheck
def getShowLine(self) -> bool:
""" 读取是否显示线条 """
return self.__showLine
def getLineSpace(self) -> int:
""" 读取线条间隔 """
return self.__lineSpace
def getLineWidth(self) -> int:
""" 读取线条宽度 """
return self.__lineWidth
def getLinePosition(self) -> LinePosition:
""" 读取线条位置 """
return self.__linePosition
def getLineColor(self) -> QColor:
""" 读取线条颜色 """
return self.__lineColor
def getNormalBgColor(self) -> QColor:
""" 读取正常背景颜色 """
return self.__normalBgColor
def getHoverBgColor(self) -> QColor:
""" 读取悬停背景颜色 """
return self.__hoverBgColor
def getCheckBgColor(self) -> QColor:
""" 读取选中背景颜色 """
return self.__checkBgColor
def getNormalTextColor(self) -> QColor:
""" 读取正常文字颜色 """
return self.__normalTextColor
def getHoverTextColor(self) -> QColor:
""" 读取悬停文字颜色 """
return self.__hoverTextColor
def getCheckTextColor(self) -> QColor:
""" 读取选中文字颜色 """
return self.__checkTextColor
def sizeHint(self) -> QSize:
""" """
return QSize(100, 30)
def minimumSizeHint(self) -> QSize:
""" """
return QSize(20, 10)
def setPaddingLeft(self, padding_left: int) -> None:
""" 设置文字左侧间隔 """
if self.__paddingLeft != padding_left:
self.__paddingLeft = padding_left
self.update()
def setPaddingRight(self, padding_right: int) -> None:
""" 设置文字右侧间隔 """
if self.__paddingRight != padding_right:
self.__paddingRight = padding_right
self.update()
def setPaddingTop(self, padding_top: int) -> None:
""" 设置文字顶部间隔 """
if self.__paddingTop != padding_top:
self.__paddingTop = padding_top
self.update()
def setPaddingBottom(self, padding_bottom: int) -> None:
""" 设置文字底部间隔 """
if self.__paddingBottom != padding_bottom:
self.__paddingBottom = padding_bottom
self.update()
def setPadding(self, padding_left: int, padding_right: int, padding_top: int, padding_bottom: int) -> None:
""" 设置文字间隔 """
self.__paddingLeft = padding_left
self.__paddingRight = padding_right
self.__paddingTop = padding_top
self.__paddingBottom = padding_bottom
self.update()
def setTextAlign(self, text_align: TextAlign) -> None:
""" 设置文字对齐 """
if self.__textAlign != text_align:
self.__textAlign = text_align
self.update()
def setShowTriangle(self, show_triangle: bool) -> None:
""" 设置是否显示倒三角 """
if self.__showTriangle != show_triangle:
self.__showTriangle = show_triangle
self.update()
def setTriangleLen(self, triangle_len: int) -> None:
""" 设置倒三角边长 """
if self.__triangleLen != triangle_len:
self.__triangleLen = triangle_len
self.update()
def setTrianglePosition(self, triangle_position: TrianglePosition) -> None:
""" 设置倒三角位置 """
if self.__trianglePosition != triangle_position:
self.__trianglePosition = triangle_position
self.update()
def setTriangleColor(self, triangle_color: QColor) -> None:
""" 设置倒三角颜色 """
if self.__triangleColor != triangle_color:
self.__triangleColor = triangle_color
self.update()
def setShowIcon(self, show_icon: bool) -> None:
""" 设置是否显示图标 """
if self.__showIcon != show_icon:
self.__showIcon = show_icon
self.update()
def setIconSpace(self, icon_space: int) -> None:
""" 设置图标间隔 """
if self.__iconSpace != icon_space:
self.__iconSpace = icon_space
self.update()
def setIconSize(self, icon_size: QSize) -> None:
""" 设置图标尺寸 """
if self.__iconSize != icon_size:
self.__iconSize = icon_size
self.update()
def setIconNormal(self, icon_normal: QPixmap) -> None:
""" 设置正常图标 """
self.__iconNormal = icon_normal
self.update()
def setIconHover(self, icon_hover: QPixmap) -> None:
""" 设置悬停图标 """
self.__iconHover = icon_hover
self.update()
def setIconCheck(self, icon_check: QPixmap) -> None:
""" 设置按下图标 """
self.__iconCheck = icon_check
self.update()
def setShowLine(self, show_line: bool) -> None:
""" 设置是否显示线条 """
if self.__showLine != show_line:
self.__showLine = show_line
self.update()
def setLineSpace(self, line_space: int) -> None:
""" 设置线条间隔 """
if self.__lineSpace != line_space:
self.__lineSpace = line_space
self.update()
def setLineWidth(self, line_width: int) -> None:
""" 设置线条宽度 """
if self.__lineWidth != line_width:
self.__lineWidth = line_width
self.update()
def setLinePosition(self, line_position: LinePosition) -> None:
""" 设置线条位置 """
if self.__linePosition != line_position:
self.__linePosition = line_position
self.update()
def setLineColor(self, line_color: QColor) -> None:
""" 设置线条颜色 """
if self.__lineColor != line_color:
self.__lineColor = line_color
self.update()
def setNormalBgColor(self, normal_bg_color: QColor) -> None:
""" 设置正常背景颜色 """
if self.__normalBgColor != normal_bg_color:
self.__normalBgColor = normal_bg_color
self.update()
def setHoverBgColor(self, hover_bg_color: QColor) -> None:
""" 设置悬停背景颜色 """
if self.__hoverBgColor != hover_bg_color:
self.__hoverBgColor = hover_bg_color
self.update()
def setCheckBgColor(self, check_bg_color: QColor) -> None:
""" 设置选中背景颜色 """
if self.__checkBgColor != check_bg_color:
self.__checkBgColor = check_bg_color
self.update()
def setNormalTextColor(self, normal_text_color: QColor) -> None:
""" 设置正常文字颜色 """
if self.__normalTextColor != normal_text_color:
self.__normalTextColor = normal_text_color
self.update()
def setHoverTextColor(self, hover_text_color: QColor) -> None:
""" 设置悬停文字颜色 """
if self.__hoverTextColor != hover_text_color:
self.__hoverTextColor = hover_text_color
self.update()
def setCheckTextColor(self, check_text_color: QColor) -> None:
""" 设置选中文字颜色 """
if self.__checkTextColor != check_text_color:
self.__checkTextColor = check_text_color
self.update()
def setNormalBgBrush(self, normal_bg_brush: QBrush) -> None:
""" 设置正常背景画刷 """
if self.__normalBgBrush != normal_bg_brush:
self.__normalBgBrush = normal_bg_brush
self.update()
def setHoverBgBrush(self, hover_bg_brush: QBrush) -> None:
""" 设置悬停背景画刷 """
if self.__hoverBgBrush != hover_bg_brush:
self.__hoverBgBrush = hover_bg_brush
self.update()
def setCheckBgBrush(self, check_bg_brush: QBrush) -> None:
""" 设置选中背景画刷 """
if self.__checkBgBrush != check_bg_brush:
self.__checkBgBrush = check_bg_brush
self.update()
paddingLeft: int = property(fget=getPaddingLeft, fset=setPaddingLeft, fdel=None, doc="")
paddingRight: int = property(fget=getPaddingRight, fset=setPaddingRight, fdel=None, doc="")
paddingTop: int = property(fget=getPaddingTop, fset=setPaddingTop, fdel=None, doc="")
paddingBottom: int = property(fget=getPaddingBottom, fset=setPaddingBottom, fdel=None, doc="")
textAlign: TextAlign = property(fget=getTextAlign, fset=setTextAlign, fdel=None, doc="")
showTriangle: bool = property(fget=getShowTriangle, fset=setShowTriangle, fdel=None, doc="")
triangleLen: int = property(fget=getTriangleLen, fset=setTriangleLen, fdel=None, doc="")
trianglePosition: TrianglePosition = property(fget=getTrianglePosition, fset=setTrianglePosition, fdel=None, doc="")
triangleColor: QColor = property(fget=getTriangleColor, fset=setTriangleColor, fdel=None, doc="")
showIcon: bool = property(fget=getShowIcon, fset=setShowIcon, fdel=None, doc="")
iconSpace: int = property(fget=getIconSpace, fset=setIconSpace, fdel=None, doc="")
iconSize: QSize = property(fget=getIconSize, fset=setIconSize, fdel=None, doc="")
iconNormal: QPixmap = property(fget=getIconNormal, fset=setIconNormal, fdel=None, doc="")
iconHover: QPixmap = property(fget=getIconHover, fset=setIconHover, fdel=None, doc="")
iconCheck: QPixmap = property(fget=getIconCheck, fset=setIconCheck, fdel=None, doc="")
showLine: bool = property(fget=getShowLine, fset=setShowLine, fdel=None, doc="")
lineSpace: int = property(fget=getLineSpace, fset=setLineSpace, fdel=None, doc="")
lineWidth: int = property(fget=getLineWidth, fset=setLineWidth, fdel=None, doc="")
linePosition: LinePosition = property(fget=getLinePosition, fset=setLinePosition, fdel=None, doc="")
lineColor: QColor = property(fget=getLineColor, fset=setLineColor, fdel=None, doc="")
normalBgColor: QColor = property(fget=getNormalBgColor, fset=setNormalBgColor, fdel=None, doc="")
hoverBgColor: QColor = property(fget=getHoverBgColor, fset=setHoverBgColor, fdel=None, doc="")
checkBgColor: QColor = property(fget=getCheckBgColor, fset=setCheckBgColor, fdel=None, doc="")
normalTextColor: QColor = property(fget=getNormalTextColor, fset=setNormalTextColor, fdel=None, doc="")
hoverTextColor: QColor = property(fget=getHoverTextColor, fset=setHoverTextColor, fdel=None, doc="")
checkTextColor: QColor = property(fget=getCheckTextColor, fset=setCheckTextColor, fdel=None, doc="")
if __name__ == '__main__':
import sys
def buttonClick(is_clicked: bool) -> None:
print(is_clicked)
app: QApplication = QApplication(sys.argv)
button: NavButton = NavButton()
button.clicked.connect(buttonClick)
button.show()
sys.exit(app.exec_())
|
<filename>arosics/CoReg_local.py
# -*- coding: utf-8 -*-
# AROSICS - Automated and Robust Open-Source Image Co-Registration Software
#
# Copyright (C) 2017-2021
# - <NAME> (GFZ Potsdam, <EMAIL>)
# - Helmholtz Centre Potsdam - GFZ German Research Centre for Geosciences Potsdam,
# Germany (https://www.gfz-potsdam.de/)
#
# This software was developed within the context of the GeoMultiSens project funded
# by the German Federal Ministry of Education and Research
# (project grant code: 01 IS 14 010 A-C).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import os
from copy import copy
from typing import Tuple, Union, Optional
from collections import OrderedDict
# custom
from osgeo import gdal
try:
import pyfftw
except ImportError:
pyfftw = None
import numpy as np
from matplotlib import pyplot as plt # noqa F401
from geopandas import GeoDataFrame # noqa F401
from .Tie_Point_Grid import Tie_Point_Grid
from .CoReg import COREG
from .DeShifter import DESHIFTER
from .geometry import has_metaRotation, remove_metaRotation
from py_tools_ds.geo.coord_trafo import transform_any_prj, reproject_shapelyGeometry
from py_tools_ds.geo.map_info import geotransform2mapinfo
from geoarray import GeoArray
__author__ = '<NAME>'
class COREG_LOCAL(object):
"""
COREG_LOCAL applies the algorithm to detect spatial shifts to the whole overlap area of the input images.
Spatial shifts are calculated for each point in grid of which the parameters can be adjusted using keyword
arguments. Shift correction performs a polynomial transformation using the calculated shifts of each point in the
grid as GCPs. Thus this class can be used to correct for locally varying geometric distortions of the target image.
See help(COREG_LOCAL) for documentation.
"""
def __init__(self,
im_ref: Union[GeoArray, str],
im_tgt: Union[GeoArray, str],
grid_res: float,
max_points: int = None,
window_size: Tuple[int, int] = (256, 256),
path_out: str = None,
fmt_out: str = 'ENVI',
out_crea_options: list = None,
projectDir: str = None,
r_b4match: int = 1,
s_b4match: int = 1,
max_iter: int = 5,
max_shift: int = 5,
tieP_filter_level: int = 3,
min_reliability: float = 60,
rs_max_outlier: float = 10,
rs_tolerance: float = 2.5,
align_grids: bool = True,
match_gsd: bool = False,
out_gsd: float = None,
target_xyGrid=None,
resamp_alg_deshift: str = 'cubic',
resamp_alg_calc: str = 'cubic',
footprint_poly_ref: str = None,
footprint_poly_tgt: str = None,
data_corners_ref: list = None,
data_corners_tgt: list = None,
outFillVal: int = -9999,
nodata: Tuple[int, int] = (None, None),
calc_corners: bool = True,
binary_ws: bool = True,
force_quadratic_win: bool = True,
mask_baddata_ref: Union[GeoArray, str] = None,
mask_baddata_tgt: Union[GeoArray, str] = None,
CPUs: int = None,
progress: bool = True,
v: bool = False,
q: bool = False,
ignore_errors: bool = True
) -> None:
"""
Get an instance of COREG_LOCAL.
:param im_ref:
source path of reference image (any GDAL compatible image format is supported)
:param im_tgt:
source path of image to be shifted (any GDAL compatible image format is supported)
:param grid_res:
tie point grid resolution in pixels of the target image (x-direction)
:param max_points:
maximum number of points used to find coregistration tie points
NOTE: Points are selected randomly from the given point grid (specified by 'grid_res').
If the point grid does not provide enough points, all available points are chosen.
:param window_size:
custom matching window size [pixels] (default: (256,256))
:param path_out:
target path of the coregistered image
- if None (default), no output is written to disk
- if 'auto': /dir/of/im1/<im1>__shifted_to__<im0>.bsq
:param fmt_out:
raster file format for output file. ignored if path_out is None. Can be any GDAL compatible raster file
format (e.g. 'ENVI', 'GTIFF'; default: ENVI). Refer to https://gdal.org/drivers/raster/index.html to get a
full list of supported formats.
:param out_crea_options:
GDAL creation options for the output image, e.g. ["QUALITY=80", "REVERSIBLE=YES", "WRITE_METADATA=YES"]
:param projectDir:
name of a project directory where to store all the output results. If given, name is inserted into all
automatically generated output paths.
:param r_b4match:
band of reference image to be used for matching (starts with 1; default: 1)
:param s_b4match:
band of shift image to be used for matching (starts with 1; default: 1)
:param max_iter:
maximum number of iterations for matching (default: 5)
:param max_shift:
maximum shift distance in reference image pixel units (default: 5 px)
:param tieP_filter_level:
filter tie points used for shift correction in different levels (default: 3).
NOTE: lower levels are also included if a higher level is chosen
- Level 0: no tie point filtering
- Level 1: Reliablity filtering
- filter all tie points out that have a low reliability according to internal tests
- Level 2: SSIM filtering
- filters all tie points out where shift correction does not increase image similarity within
matching window (measured by mean structural similarity index)
- Level 3: RANSAC outlier detection
:param min_reliability:
Tie point filtering: minimum reliability threshold, below which tie points are marked as false-positives
(default: 60%)
- accepts values between 0% (no reliability) and 100 % (perfect reliability)
HINT: decrease this value in case of poor signal-to-noise ratio of your input data
:param rs_max_outlier:
RANSAC tie point filtering: proportion of expected outliers (default: 10%)
:param rs_tolerance:
RANSAC tie point filtering: percentage tolerance for max_outlier_percentage (default: 2.5%)
:param align_grids:
True: align the input coordinate grid to the reference (does not affect the output pixel size as long as
input and output pixel sizes are compatible (5:30 or 10:30 but not 4:30), default = True
:param match_gsd:
True: match the input pixel size to the reference pixel size,
default = False
:param out_gsd:
output pixel size in units of the reference coordinate system (default = pixel size of the input array),
given values are overridden by match_gsd=True
:param target_xyGrid:
a list with a target x-grid and a target y-grid like [[15,45], [15,45]]
This overrides 'out_gsd', 'align_grids' and 'match_gsd'.
:param resamp_alg_deshift:
the resampling algorithm to be used for shift correction (if neccessary)
valid algorithms: nearest, bilinear, cubic, cubic_spline, lanczos, average, mode, max, min, med, q1, q3
(default: cubic)
:param resamp_alg_calc:
the resampling algorithm to be used for all warping processes during calculation of spatial shifts
valid algorithms: nearest, bilinear, cubic, cubic_spline, lanczos, average, mode, max, min, med, q1, q3
(default: cubic (highly recommended))
:param footprint_poly_ref:
footprint polygon of the reference image (WKT string or shapely.geometry.Polygon),
e.g. 'POLYGON ((299999 6000000, 299999 5890200, 409799 5890200, 409799 6000000, 299999 6000000))'
:param footprint_poly_tgt:
footprint polygon of the image to be shifted (WKT string or shapely.geometry.Polygon)
e.g. 'POLYGON ((299999 6000000, 299999 5890200, 409799 5890200, 409799 6000000, 299999 6000000))'
:param data_corners_ref:
map coordinates of data corners within reference image. ignored if footprint_poly_ref is given.
:param data_corners_tgt:
map coordinates of data corners within image to be shifted. ignored if footprint_poly_tgt is given.
:param outFillVal:
if given the generated tie point grid is filled with this value in case no match could be found during
co-registration (default: -9999)
:param nodata:
no data values for reference image and image to be shifted
:param calc_corners:
calculate true positions of the dataset corners in order to get a useful matching window position within
the actual image overlap
(default: True; deactivated if 'data_corners_im0' and 'data_corners_im1' are given)
:param binary_ws:
use binary X/Y dimensions for the matching window (default: True)
:param force_quadratic_win:
force a quadratic matching window (default: True)
:param mask_baddata_ref:
path to a 2D boolean mask file (or an instance of BadDataMask) for the reference image where all bad data
pixels (e.g. clouds) are marked with True and the remaining pixels with False. Must have the same
geographic extent and projection like 'im_ref'. The mask is used to check if the chosen matching window
position is valid in the sense of useful data. Otherwise this window position is rejected.
:param mask_baddata_tgt:
path to a 2D boolean mask file (or an instance of BadDataMask) for the image to be shifted where all bad
data pixels (e.g. clouds) are marked with True and the remaining pixels with False. Must have the same
geographic extent and projection like 'im_ref'. The mask is used to check if the chosen matching window
position is valid in the sense of useful data. Otherwise this window position is rejected.
:param CPUs:
number of CPUs to use during calculation of tie point grid (default: None, which means 'all CPUs available')
:param progress:
show progress bars (default: True)
:param v:
verbose mode (default: False)
:param q:
quiet mode (default: False)
:param ignore_errors:
Useful for batch processing. (default: False)
"""
# assertions / input validation
assert gdal.GetDriverByName(fmt_out), "'%s' is not a supported GDAL driver." % fmt_out
if match_gsd and out_gsd:
warnings.warn("'-out_gsd' is ignored because '-match_gsd' is set.\n")
if out_gsd:
assert isinstance(out_gsd, list) and len(out_gsd) == 2, 'out_gsd must be a list with two values.'
self.params = dict([x for x in locals().items() if x[0] != "self" and not x[0].startswith('__')])
# NOTE: self.imref and self.im2shift are handled completely independent from self.COREG_obj.ref and
# self.COREG_obj.shift. self.COREG_obj.ref and self.COREG_obj.shift are used for shift calculation and
# correction is applied to self.im2shift.
self.imref = GeoArray(im_ref, nodata=nodata[0], progress=progress, q=q)
self.im2shift = GeoArray(im_tgt, nodata=nodata[1], progress=progress, q=q)
self.path_out = path_out # updated by self.set_outpathes
self.fmt_out = fmt_out
self.out_creaOpt = out_crea_options
self._projectDir = projectDir
self.grid_res = grid_res
self.max_points = max_points
self.window_size = window_size
self.max_shift = max_shift
self.max_iter = max_iter
self.tieP_filter_level = tieP_filter_level
self.min_reliability = min_reliability
self.rs_max_outlier = rs_max_outlier
self.rs_tolerance = rs_tolerance
self.align_grids = align_grids
self.match_gsd = match_gsd
self.out_gsd = out_gsd
self.target_xyGrid = target_xyGrid
self.rspAlg_DS = resamp_alg_deshift # TODO convert integers to strings
self.rspAlg_calc = resamp_alg_calc
self.calc_corners = calc_corners
self.nodata = nodata
self.outFillVal = outFillVal
self.bin_ws = binary_ws
self.force_quadratic_win = force_quadratic_win
self.CPUs = CPUs
self.path_verbose_out = '' # TODO
self.v = v
self.q = q if not v else False # overridden by v
self.progress = progress if not q else False # overridden by v
self.ignErr = ignore_errors # FIXME this is not yet implemented for COREG_LOCAL
assert self.tieP_filter_level in range(4), 'Invalid tie point filter level.'
assert isinstance(self.imref, GeoArray) and isinstance(self.im2shift, GeoArray), \
'Something went wrong with the creation of GeoArray instances for reference or target image. The created ' \
'instances do not seem to belong to the GeoArray class. If you are working in Jupyter Notebook, reset ' \
'the kernel and try again.'
COREG.__dict__['_set_outpathes'](self, self.imref, self.im2shift)
# make sure that the output directory of coregistered image is the project directory if a project directory is
# given
if path_out and projectDir and os.path.basename(self.path_out):
self.path_out = os.path.join(self.projectDir, os.path.basename(self.path_out))
gdal.AllRegister()
# resample input data in case there is a metadata rotation (not handled by AROSICS)
self._check_and_handle_metaRotation()
try:
# ignore_errors must be False because in case COREG init fails, coregistration for the whole scene fails
self.COREG_obj = COREG(self.imref, self.im2shift,
ws=window_size,
footprint_poly_ref=footprint_poly_ref,
footprint_poly_tgt=footprint_poly_tgt,
data_corners_ref=data_corners_ref,
data_corners_tgt=data_corners_tgt,
resamp_alg_calc=self.rspAlg_calc,
calc_corners=calc_corners,
r_b4match=r_b4match,
s_b4match=s_b4match,
max_iter=max_iter,
max_shift=max_shift,
nodata=nodata,
mask_baddata_ref=None, # see below
mask_baddata_tgt=None,
CPUs=self.CPUs,
force_quadratic_win=self.force_quadratic_win,
binary_ws=self.bin_ws,
progress=self.progress,
v=v,
q=q,
ignore_errors=False)
except Exception:
warnings.warn('\nFirst attempt to check the functionality of co-registration failed. Check your '
'input data and parameters. The following error occurred:', stacklevel=3)
raise
if pyfftw:
self.check_if_fftw_works()
# add bad data mask
# (mask is not added during initialization of COREG object in order to avoid bad data area errors there)
if mask_baddata_ref is not None:
self.COREG_obj.ref.mask_baddata = mask_baddata_ref
if mask_baddata_tgt is not None:
self.COREG_obj.shift.mask_baddata = mask_baddata_tgt
self._tiepoint_grid = None # set by self.tiepoint_grid
self._CoRegPoints_table = None # set by self.CoRegPoints_table
self._coreg_info = None # set by self.coreg_info
self.deshift_results = None # set by self.correct_shifts()
self._success = None # set by self.success property
def _check_and_handle_metaRotation(self):
"""Check if the provided input data have a metadata rotation and if yes, correct it AND equalize grids.
In case there is a rotation, the GDAL GeoTransform is not 0 at positions 2 or 4. So far, AROSICS does not
handle such rotations, so the resampling is needed to make things work. The pixel grid equalization is also
done here to avoid a double-resampling (grid would be equalized by COREG.equalize_pixGrids() otherwise).
"""
grid2use = 'ref' if self.im2shift.xgsd <= self.imref.xgsd else 'shift'
if has_metaRotation(self.imref) or has_metaRotation(self.im2shift):
msg = 'The %s image needs to be resampled because it has a row/column rotation in '\
'its map info which is not handled by AROSICS.'
if grid2use == 'ref':
if has_metaRotation(self.imref):
warnings.warn(msg % 'reference')
self.imref = remove_metaRotation(self.imref)
# resample target to reference image
if not self.q:
print('Adapting the target image pixel grid to the one of the reference image for shift detection.')
self.im2shift.reproject_to_new_grid(prototype=self.imref, CPUs=self.CPUs)
else:
# remove any metadata rotation (a rotation that only exists in the map info)
if has_metaRotation(self.im2shift):
warnings.warn(msg % 'target')
self.im2shift = remove_metaRotation(self.im2shift)
# resample reference to target image
print('Adapting the reference image pixel grid to the one of the target image for shift detection.')
self.imref.reproject_to_new_grid(prototype=self.im2shift, CPUs=self.CPUs)
def check_if_fftw_works(self) -> None:
"""Assign the attribute 'fftw_works' to self.COREG_obj by executing shift calculation once with muted output."""
# calculate global shift once in order to check is fftw works
try:
self.COREG_obj.q = True
self.COREG_obj.v = False
self.COREG_obj.calculate_spatial_shifts()
except RuntimeError:
if self.COREG_obj.fftw_works is not None:
pass
else:
warnings.warn('\nFirst attempt to check if functionality of co-registration failed. Check your '
'input data and parameters. The following error occurred:', stacklevel=3)
raise
self.COREG_obj.q = self.q
self.COREG_obj.v = self.v
@property
def projectDir(self) -> str:
if self._projectDir:
if len(os.path.split(self._projectDir)) == 1:
return os.path.abspath(os.path.join(os.path.curdir, self._projectDir))
else:
return os.path.abspath(self._projectDir)
else:
# return a project name that not already has a corresponding folder on disk
root_dir = os.path.dirname(self.im2shift.filePath) if self.im2shift.filePath else os.path.curdir
fold_name = 'UntitledProject_1'
while os.path.isdir(os.path.join(root_dir, fold_name)):
fold_name = '%s_%s' % (fold_name.split('_')[0], int(fold_name.split('_')[-1]) + 1)
self._projectDir = os.path.join(root_dir, fold_name)
return self._projectDir
@property
def tiepoint_grid(self) -> Tie_Point_Grid:
if self._tiepoint_grid:
return self._tiepoint_grid
else:
self.calculate_spatial_shifts()
return self._tiepoint_grid
@property
def CoRegPoints_table(self) -> GeoDataFrame:
"""Return a GeoDataFrame containing all the results from coregistration for all points in the tie point grid.
Columns of the GeoDataFrame: 'geometry','POINT_ID','X_IM','Y_IM','X_MAP','Y_MAP','X_WIN_SIZE', 'Y_WIN_SIZE',
'X_SHIFT_PX','Y_SHIFT_PX', 'X_SHIFT_M', 'Y_SHIFT_M', 'ABS_SHIFT' and 'ANGLE'
"""
return self.tiepoint_grid.CoRegPoints_table
@property
def success(self) -> bool:
self._success = self.tiepoint_grid.GCPList != []
return self._success
def calculate_spatial_shifts(self) -> None:
self._tiepoint_grid = \
Tie_Point_Grid(self.COREG_obj, self.grid_res,
max_points=self.max_points,
outFillVal=self.outFillVal,
resamp_alg_calc=self.rspAlg_calc,
tieP_filter_level=self.tieP_filter_level,
outlDetect_settings=dict(
min_reliability=self.min_reliability,
rs_max_outlier=self.rs_max_outlier,
rs_tolerance=self.rs_tolerance),
dir_out=self.projectDir,
CPUs=self.CPUs,
progress=self.progress,
v=self.v,
q=self.q)
self._tiepoint_grid.get_CoRegPoints_table()
if self.v:
print('Visualizing CoReg points grid...')
self.view_CoRegPoints(figsize=(10, 10))
def show_image_footprints(self):
"""Show a web map containing the calculated footprints and overlap area of the input images.
NOTE: This method is intended to be called from Jupyter Notebook.
"""
return self.COREG_obj.show_image_footprints()
def view_CoRegPoints(self,
shapes2plot: str = 'points',
attribute2plot: str = 'ABS_SHIFT',
cmap: plt.cm = None,
exclude_fillVals: bool = True,
backgroundIm: str = 'tgt',
hide_filtered: bool = True,
figsize: tuple = None,
figsize_multiplier: float = 1,
title: str = '',
vector_scale: float = 1.,
savefigPath: str = '',
savefigDPI: int = 96,
showFig: bool = True,
vmin: float = None,
vmax: float = None,
return_map: bool = False
) -> Optional[Tuple]:
"""
Show a map of the calculated tie point grid with the target image as background.
:param shapes2plot: 'points': plot points representing values of 'attribute2plot' onto the map
'vectors': plot shift vectors onto the map
:param attribute2plot: the attribute of the tie point grid to be shown (default: 'ABS_SHIFT')
:param cmap: a custom color map to be applied to the plotted grid points (default: 'RdYlGn_r')
:param exclude_fillVals: whether to exclude those points of the grid where spatial shift detection failed
:param backgroundIm: whether to use the target or the reference image as map background. Possible
options are 'ref' and 'tgt' (default: 'tgt')
:param hide_filtered: hide all points that have been filtered out according to tie point filter level
:param figsize: size of the figure to be viewed, e.g. (10, 10); automatically estimated if not given
:param figsize_multiplier: if given, the figure size is multiplied with this value
:param title: plot title
:param vector_scale: scale factor for shift vector length (default: 1 -> no scaling)
:param savefigPath: path where to save the figure
:param savefigDPI: DPI resolution of the output figure when saved to disk (default: 96)
:param showFig: whether to show or to hide the figure (default: True)
:param vmin: minimum value of 'attribute2plot' to be included in the figure
:param vmax: maximum value of 'attribute2plot' to be included in the figure
:param return_map: whether to return the figure and axis objects (default: False)
:return: tuple of figure and axis objects or None in case return_map is set to False
"""
from matplotlib import pyplot as plt # noqa
from matplotlib.offsetbox import AnchoredText
from cartopy.crs import PlateCarree
from mpl_toolkits.axes_grid1 import make_axes_locatable
# get background image (reference or target image)
if backgroundIm not in ['tgt', 'ref']:
raise ValueError('backgroundIm')
backgroundIm = self.im2shift if backgroundIm == 'tgt' else self.imref
# make sure the output figure has a reasonable size, also if figsize is not given
if not figsize:
r, c = backgroundIm.shape[:2]
figsize = (8, r / c * 8) if r > c else (c / r * 8, 8)
# apply figsize multiplier
if figsize_multiplier:
if figsize_multiplier < 0:
raise ValueError(figsize_multiplier, 'The figure size multiplier must be a positive finite number.')
figsize = (figsize[0] * figsize_multiplier, figsize[1] * figsize_multiplier)
# get a map showing the background image
fig, ax = backgroundIm.show_map(figsize=figsize,
nodataVal=self.nodata[1],
return_map=True,
band=self.COREG_obj.shift.band4match)
# set figure title
dict_attr_title = dict(
X_WIN_SIZE='size of the matching window in x-direction [pixels]',
Y_WIN_SIZE='size of the matching window in y-direction [pixels]',
X_SHIFT_PX='absolute shifts in x-direction [pixels]',
Y_SHIFT_PX='absolute shifts in y-direction [pixels]',
X_SHIFT_M='absolute shifts in x-direction [map units]',
Y_SHIFT_M='absolute shifts in y-direction [map units]',
ABS_SHIFT='absolute shift vector length [map units]',
ANGLE='shift vector direction [angle in degrees]',
SSIM_BEFORE='structural similarity index before co-registration',
SSIM_AFTER='structural similarity index after co-registration',
SSIM_IMPROVED='structural similarity index improvement through co-registration [yes/no]',
RELIABILITY='reliability of the computed shift vector'
)
if title:
ax.set_title(title)
elif attribute2plot in dict_attr_title:
ax.set_title(dict_attr_title[attribute2plot], pad=20)
elif attribute2plot in self.CoRegPoints_table.columns:
ax.set_title(attribute2plot)
else:
raise ValueError(attribute2plot, "Invalid value for 'attribute2plot'. Valid values are: %s."
% ", ".join(self.CoRegPoints_table.columns))
if not self.CoRegPoints_table.empty:
# get GeoDataFrame containing everything needed for plotting
outlierCols = [c for c in self.CoRegPoints_table.columns if 'OUTLIER' in c]
attr2include = ['geometry', attribute2plot] + outlierCols + ['X_SHIFT_M', 'Y_SHIFT_M']
GDF = self.CoRegPoints_table.loc[self.CoRegPoints_table.X_SHIFT_M != self.outFillVal, attr2include].copy()\
if exclude_fillVals else self.CoRegPoints_table.loc[:, attr2include]
# get LonLat coordinates for all points
XY = np.array([geom.coords.xy for geom in GDF.geometry]).reshape(-1, 2)
lon, lat = transform_any_prj(self.im2shift.projection, 4326, XY[:, 0], XY[:, 1])
GDF['Lon'], GDF['Lat'] = lon, lat
# get colors for all points
palette = cmap if cmap is not None else plt.cm.get_cmap('RdYlGn_r')
if cmap is None and attribute2plot == 'ANGLE':
import cmocean
palette = getattr(cmocean.cm, 'delta')
if hide_filtered:
if self.tieP_filter_level > 0:
GDF = GDF[GDF.L1_OUTLIER.__eq__(False)].copy()
if self.tieP_filter_level > 1:
GDF = GDF[GDF.L2_OUTLIER.__eq__(False)].copy()
if self.tieP_filter_level > 2:
GDF = GDF[GDF.L3_OUTLIER.__eq__(False)].copy()
else:
marker = 'o' if len(GDF) < 10000 else '.'
common_kw = dict(marker=marker, alpha=1.0, transform=PlateCarree())
if self.tieP_filter_level > 0:
# flag level 1 outliers
GDF_filt = GDF[GDF.L1_OUTLIER.__eq__(True)].copy()
ax.scatter(GDF_filt['Lon'], GDF_filt['Lat'], c='b', s=250, label='reliability',
**common_kw)
if self.tieP_filter_level > 1:
# flag level 2 outliers
GDF_filt = GDF[GDF.L2_OUTLIER.__eq__(True)].copy()
ax.scatter(GDF_filt['Lon'], GDF_filt['Lat'], c='r', s=150, label='SSIM',
**common_kw)
if self.tieP_filter_level > 2:
# flag level 3 outliers
GDF_filt = GDF[GDF.L3_OUTLIER.__eq__(True)].copy()
ax.scatter(GDF_filt['Lon'], GDF_filt['Lat'], c='y', s=250, label='RANSAC',
**common_kw)
if self.tieP_filter_level > 0:
ax.legend(loc=0, scatterpoints=1)
# plot all points or vectors on top
if not GDF.empty:
vmin_auto, vmax_auto = \
(np.percentile(GDF[attribute2plot], 0),
np.percentile(GDF[attribute2plot], 98)) \
if attribute2plot != 'ANGLE' else (0, 360)
vmin = vmin if vmin is not None else vmin_auto
vmax = vmax if vmax is not None else vmax_auto
if shapes2plot == 'vectors':
# plot shift vectors
# doc: https://matplotlib.org/devdocs/api/_as_gen/matplotlib.axes.Axes.quiver.html
mappable = ax.quiver(
GDF['Lon'].values, GDF['Lat'].values,
-GDF['X_SHIFT_M'].values,
-GDF['Y_SHIFT_M'].values, # invert absolute shifts to make arrows point to tgt
GDF[attribute2plot].clip(vmin, vmax), # sets the colors
scale=1200 / vector_scale, # larger values decrease the arrow length
width=.0015, # arrow width (in relation to plot width)
# linewidth=1, # maybe use this to mark outliers instead of scatter points
cmap=palette,
pivot='middle', # position the middle point of the arrows onto the tie point location
transform=PlateCarree()
)
elif shapes2plot == 'points':
# plot tie points
mappable = ax.scatter(
GDF['Lon'], GDF['Lat'],
c=GDF[attribute2plot],
lw=0,
cmap=palette,
marker='o' if len(GDF) < 10000 else '.',
s=50,
alpha=1.0,
vmin=vmin,
vmax=vmax,
transform=PlateCarree())
pass
else:
raise ValueError("The parameter 'shapes2plot' must be set to 'vectors' or 'points'. "
"Received %s." % shapes2plot)
# add colorbar
divider = make_axes_locatable(ax)
cax = divider.new_vertical(size="2%", pad=0.4, pack_start=True,
axes_class=plt.Axes # needed because ax is a GeoAxis instance
)
fig.add_axes(cax)
fig.colorbar(mappable, cax=cax, orientation="horizontal")
# hack to enlarge the figure on the top to avoid cutting off the title (everthing else has no effect)
divider.new_vertical(size="2%", pad=0.4, pack_start=False, axes_class=plt.Axes)
else:
msg = "The map does not contain any tie points \n" \
"because all the found tie points were flagged as false-positives."
ax.add_artist(AnchoredText(msg, loc='lower center', prop=dict(c='r')))
if not self.q:
warnings.warn(msg)
else:
msg = "The map does not contain any tie points because no tie points were found at all."
ax.add_artist(AnchoredText(msg, loc='lower center', prop=dict(c='r')))
if not self.q:
warnings.warn(msg)
# remove white space around the figure
plt.subplots_adjust(top=.95, bottom=.05, right=.95, left=.05)
if savefigPath:
fig.savefig(savefigPath, dpi=savefigDPI, pad_inches=0.1, bbox_inches='tight')
if return_map:
return fig, ax
if showFig and not self.q:
plt.show(block=True)
else:
plt.close(fig)
def view_CoRegPoints_folium(self, attribute2plot: str = 'ABS_SHIFT'):
warnings.warn(UserWarning('This function is still under construction and may not work as expected!'))
assert self.CoRegPoints_table is not None, 'Calculate tie point grid first!'
import folium
import geojson
from folium.raster_layers import ImageOverlay
lon_min, lat_min, lon_max, lat_max = \
reproject_shapelyGeometry(self.im2shift.box.mapPoly, self.im2shift.projection, 4326).bounds
center_lon, center_lat = (lon_min + lon_max) / 2, (lat_min + lat_max) / 2
# get image to plot
image2plot = self.im2shift[:, :, 0] # FIXME hardcoded band
from py_tools_ds.geo.raster.reproject import warp_ndarray
image2plot, gt, prj = \
warp_ndarray(image2plot, self.im2shift.geotransform, self.im2shift.projection,
in_nodata=self.nodata[1], out_nodata=self.nodata[1], out_XYdims=(1000, 1000), q=True,
out_prj='epsg:3857') # image must be transformed into web mercator projection
# create map
map_osm = folium.Map(location=[center_lat, center_lon]) # ,zoom_start=3)
# import matplotlib
ImageOverlay(
colormap=lambda x: (1, 0, 0, x), # TODO a colormap must be given
# colormap=matplotlib.cm.gray, # does not work
image=image2plot, bounds=[[lat_min, lon_min], [lat_max, lon_max]],
).add_to(map_osm)
points_values = self.CoRegPoints_table[['geometry', attribute2plot]]
points_values.geometry.crs = points_values.crs
folium.GeoJson(points_values).add_to(map_osm)
# add overlap polygon
overlapPoly = reproject_shapelyGeometry(self.COREG_obj.overlap_poly, self.im2shift.prj, 4326)
gjs = geojson.Feature(geometry=overlapPoly, properties={})
folium.GeoJson(gjs).add_to(map_osm)
return map_osm
def _get_updated_map_info_meanShifts(self) -> list:
"""Return the updated map info of the target image, shifted on the basis of the mean X/Y shifts."""
original_map_info = geotransform2mapinfo(self.im2shift.gt, self.im2shift.prj)
updated_map_info = copy(original_map_info)
updated_map_info[3] = str(float(original_map_info[3]) + self.tiepoint_grid.mean_x_shift_map)
updated_map_info[4] = str(float(original_map_info[4]) + self.tiepoint_grid.mean_y_shift_map)
return updated_map_info
@property
def coreg_info(self) -> dict:
"""Return a dictionary containing everthing to correct the detected local displacements of the target image."""
if self._coreg_info:
return self._coreg_info
else:
if not self._tiepoint_grid:
self.calculate_spatial_shifts()
TPG = self._tiepoint_grid
self._coreg_info = {
'GCPList': TPG.GCPList,
'mean_shifts_px': {'x': TPG.mean_x_shift_px if TPG.GCPList else None,
'y': TPG.mean_y_shift_px if TPG.GCPList else None},
'mean_shifts_map': {'x': TPG.mean_x_shift_map if TPG.GCPList else None,
'y': TPG.mean_y_shift_map if TPG.GCPList else None},
'updated map info means': self._get_updated_map_info_meanShifts() if TPG.GCPList else None,
'original map info': geotransform2mapinfo(self.imref.gt, self.imref.prj),
'reference projection': self.imref.prj,
'reference geotransform': self.imref.gt,
'reference grid': [[self.imref.gt[0], self.imref.gt[0] + self.imref.gt[1]],
[self.imref.gt[3], self.imref.gt[3] + self.imref.gt[5]]],
'reference extent': {'cols': self.imref.xgsd, 'rows': self.imref.ygsd}, # FIXME not needed anymore
'success': self.success
}
return self.coreg_info
def correct_shifts(self,
max_GCP_count: int = None,
cliptoextent: bool = False,
min_points_local_corr: int = 5
) -> OrderedDict:
"""Perform a local shift correction using all points from the previously calculated tie point grid.
NOTE: Only valid matches are used as GCP points.
:param max_GCP_count: maximum number of GCPs to use
:param cliptoextent: whether to clip the output image to its real extent
:param min_points_local_corr: number of valid tie points, below which a global shift correction is
performed instead of a local correction (global X/Y shift is then computed as
the mean shift of the remaining points)(default: 5 tie points)
:return:
"""
if not self._tiepoint_grid:
self.calculate_spatial_shifts()
if self.tiepoint_grid.GCPList:
if max_GCP_count:
self.coreg_info['GCPList'] = self.coreg_info['GCPList'][:max_GCP_count]
# make sure the correction is applied to the original target image
im2shift = GeoArray(self.params['im_tgt'], nodata=self.nodata[1], progress=self.progress, q=self.q)
if has_metaRotation(im2shift):
# resample the target image because (so far) the computed shifts cannot be applied to a dataset with
# a metadata rotation (GDAL GeoTransform not 0 at positons 2 and 4)
im2shift = remove_metaRotation(im2shift)
# apply the correction
DS = DESHIFTER(im2shift, self.coreg_info,
path_out=self.path_out,
fmt_out=self.fmt_out,
out_crea_options=self.out_creaOpt,
align_grids=self.align_grids,
match_gsd=self.match_gsd,
out_gsd=self.out_gsd,
target_xyGrid=self.target_xyGrid,
min_points_local_corr=min_points_local_corr,
resamp_alg=self.rspAlg_DS,
cliptoextent=cliptoextent,
# clipextent=self.im2shift.box.boxMapYX,
progress=self.progress,
v=self.v,
q=self.q)
self.deshift_results = DS.correct_shifts()
return self.deshift_results
else:
if not self.q:
warnings.warn('Correction of geometric shifts failed because the input GCP list is empty!')
|
<filename>square/api/v1_items_api.py
# -*- coding: utf-8 -*-
from deprecation import deprecated
from square.api_helper import APIHelper
from square.http.api_response import ApiResponse
from square.api.base_api import BaseApi
from square.http.auth.o_auth_2 import OAuth2
class V1ItemsApi(BaseApi):
"""A Controller to access Endpoints in the square API."""
def __init__(self, config, call_back=None):
super(V1ItemsApi, self).__init__(config, call_back)
@deprecated()
def list_categories(self,
location_id):
"""Does a GET request to /v1/{location_id}/categories.
Lists all the item categories for a given location.
Args:
location_id (string): The ID of the location to list categories
for.
Returns:
list of V1Category: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/categories'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def create_category(self,
location_id,
body):
"""Does a POST request to /v1/{location_id}/categories.
Creates an item category.
Args:
location_id (string): The ID of the location to create an item
for.
body (V1Category): An object containing the fields to POST for the
request. See the corresponding object definition for field
details.
Returns:
V1Category: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/categories'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def delete_category(self,
location_id,
category_id):
"""Does a DELETE request to /v1/{location_id}/categories/{category_id}.
Deletes an existing item category.
__DeleteCategory__ returns nothing on success but Connect SDKs
map the empty response to an empty `V1DeleteCategoryRequest` object
as documented below.
Args:
location_id (string): The ID of the item's associated location.
category_id (string): The ID of the category to delete.
Returns:
V1Category: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/categories/{category_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'category_id': category_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.delete(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def update_category(self,
location_id,
category_id,
body):
"""Does a PUT request to /v1/{location_id}/categories/{category_id}.
Modifies the details of an existing item category.
Args:
location_id (string): The ID of the category's associated
location.
category_id (string): The ID of the category to edit.
body (V1Category): An object containing the fields to POST for the
request. See the corresponding object definition for field
details.
Returns:
V1Category: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/categories/{category_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'category_id': category_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def list_discounts(self,
location_id):
"""Does a GET request to /v1/{location_id}/discounts.
Lists all the discounts for a given location.
Args:
location_id (string): The ID of the location to list categories
for.
Returns:
list of V1Discount: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/discounts'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def create_discount(self,
location_id,
body):
"""Does a POST request to /v1/{location_id}/discounts.
Creates a discount.
Args:
location_id (string): The ID of the location to create an item
for.
body (V1Discount): An object containing the fields to POST for the
request. See the corresponding object definition for field
details.
Returns:
V1Discount: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/discounts'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def delete_discount(self,
location_id,
discount_id):
"""Does a DELETE request to /v1/{location_id}/discounts/{discount_id}.
Deletes an existing discount.
__DeleteDiscount__ returns nothing on success but Connect SDKs
map the empty response to an empty `V1DeleteDiscountRequest` object
as documented below.
Args:
location_id (string): The ID of the item's associated location.
discount_id (string): The ID of the discount to delete.
Returns:
V1Discount: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/discounts/{discount_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'discount_id': discount_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.delete(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def update_discount(self,
location_id,
discount_id,
body):
"""Does a PUT request to /v1/{location_id}/discounts/{discount_id}.
Modifies the details of an existing discount.
Args:
location_id (string): The ID of the category's associated
location.
discount_id (string): The ID of the discount to edit.
body (V1Discount): An object containing the fields to POST for the
request. See the corresponding object definition for field
details.
Returns:
V1Discount: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/discounts/{discount_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'discount_id': discount_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def list_fees(self,
location_id):
"""Does a GET request to /v1/{location_id}/fees.
Lists all the fees (taxes) for a given location.
Args:
location_id (string): The ID of the location to list fees for.
Returns:
list of V1Fee: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/fees'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def create_fee(self,
location_id,
body):
"""Does a POST request to /v1/{location_id}/fees.
Creates a fee (tax).
Args:
location_id (string): The ID of the location to create a fee for.
body (V1Fee): An object containing the fields to POST for the
request. See the corresponding object definition for field
details.
Returns:
V1Fee: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/fees'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def delete_fee(self,
location_id,
fee_id):
"""Does a DELETE request to /v1/{location_id}/fees/{fee_id}.
Deletes an existing fee (tax).
__DeleteFee__ returns nothing on success but Connect SDKs
map the empty response to an empty `V1DeleteFeeRequest` object
as documented below.
Args:
location_id (string): The ID of the fee's associated location.
fee_id (string): The ID of the fee to delete.
Returns:
V1Fee: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/fees/{fee_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'fee_id': fee_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.delete(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def update_fee(self,
location_id,
fee_id,
body):
"""Does a PUT request to /v1/{location_id}/fees/{fee_id}.
Modifies the details of an existing fee (tax).
Args:
location_id (string): The ID of the fee's associated location.
fee_id (string): The ID of the fee to edit.
body (V1Fee): An object containing the fields to POST for the
request. See the corresponding object definition for field
details.
Returns:
V1Fee: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/fees/{fee_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'fee_id': fee_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def list_inventory(self,
location_id,
limit=None,
batch_token=None):
"""Does a GET request to /v1/{location_id}/inventory.
Provides inventory information for all inventory-enabled item
variations.
Args:
location_id (string): The ID of the item's associated location.
limit (int, optional): The maximum number of inventory entries to
return in a single response. This value cannot exceed 1000.
batch_token (string, optional): A pagination cursor to retrieve
the next set of results for your original query to the
endpoint.
Returns:
list of V1InventoryEntry: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/inventory'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'limit': limit,
'batch_token': batch_token
}
_query_builder = APIHelper.append_url_with_query_parameters(
_query_builder,
_query_parameters
)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def adjust_inventory(self,
location_id,
variation_id,
body):
"""Does a POST request to /v1/{location_id}/inventory/{variation_id}.
Adjusts the current available inventory of an item variation.
Args:
location_id (string): The ID of the item's associated location.
variation_id (string): The ID of the variation to adjust inventory
information for.
body (V1AdjustInventoryRequest): An object containing the fields
to POST for the request. See the corresponding object
definition for field details.
Returns:
V1InventoryEntry: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/inventory/{variation_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'variation_id': variation_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def list_items(self,
location_id,
batch_token=None):
"""Does a GET request to /v1/{location_id}/items.
Provides summary information of all items for a given location.
Args:
location_id (string): The ID of the location to list items for.
batch_token (string, optional): A pagination cursor to retrieve
the next set of results for your original query to the
endpoint.
Returns:
list of V1Item: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/items'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'batch_token': batch_token
}
_query_builder = APIHelper.append_url_with_query_parameters(
_query_builder,
_query_parameters
)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def create_item(self,
location_id,
body):
"""Does a POST request to /v1/{location_id}/items.
Creates an item and at least one variation for it.
Item-related entities include fields you can use to associate them
with
entities in a non-Square system.
When you create an item-related entity, you can optionally specify
`id`.
This value must be unique among all IDs ever specified for the
account,
including those specified by other applications. You can never reuse
an
entity ID. If you do not specify an ID, Square generates one for the
entity.
Item variations have a `user_data` string that lets you associate
arbitrary
metadata with the variation. The string cannot exceed 255 characters.
Args:
location_id (string): The ID of the location to create an item
for.
body (V1Item): An object containing the fields to POST for the
request. See the corresponding object definition for field
details.
Returns:
V1Item: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/items'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def delete_item(self,
location_id,
item_id):
"""Does a DELETE request to /v1/{location_id}/items/{item_id}.
Deletes an existing item and all item variations associated with it.
__DeleteItem__ returns nothing on success but Connect SDKs
map the empty response to an empty `V1DeleteItemRequest` object
as documented below.
Args:
location_id (string): The ID of the item's associated location.
item_id (string): The ID of the item to modify.
Returns:
V1Item: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/items/{item_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'item_id': item_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.delete(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def retrieve_item(self,
location_id,
item_id):
"""Does a GET request to /v1/{location_id}/items/{item_id}.
Provides the details for a single item, including associated modifier
lists and fees.
Args:
location_id (string): The ID of the item's associated location.
item_id (string): The item's ID.
Returns:
V1Item: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/items/{item_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'item_id': item_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def update_item(self,
location_id,
item_id,
body):
"""Does a PUT request to /v1/{location_id}/items/{item_id}.
Modifies the core details of an existing item.
Args:
location_id (string): The ID of the item's associated location.
item_id (string): The ID of the item to modify.
body (V1Item): An object containing the fields to POST for the
request. See the corresponding object definition for field
details.
Returns:
V1Item: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/items/{item_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'item_id': item_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def remove_fee(self,
location_id,
item_id,
fee_id):
"""Does a DELETE request to /v1/{location_id}/items/{item_id}/fees/{fee_id}.
Removes a fee assocation from an item so the fee is no longer
automatically applied to the item in Square Point of Sale.
Args:
location_id (string): The ID of the fee's associated location.
item_id (string): The ID of the item to add the fee to.
fee_id (string): The ID of the fee to apply.
Returns:
V1Item: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/items/{item_id}/fees/{fee_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'item_id': item_id,
'fee_id': fee_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.delete(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def apply_fee(self,
location_id,
item_id,
fee_id):
"""Does a PUT request to /v1/{location_id}/items/{item_id}/fees/{fee_id}.
Associates a fee with an item so the fee is automatically applied to
the item in Square Point of Sale.
Args:
location_id (string): The ID of the fee's associated location.
item_id (string): The ID of the item to add the fee to.
fee_id (string): The ID of the fee to apply.
Returns:
V1Item: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/items/{item_id}/fees/{fee_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'item_id': item_id,
'fee_id': fee_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.put(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def remove_modifier_list(self,
location_id,
modifier_list_id,
item_id):
"""Does a DELETE request to /v1/{location_id}/items/{item_id}/modifier-lists/{modifier_list_id}.
Removes a modifier list association from an item so the modifier
options from the list can no longer be applied to the item.
Args:
location_id (string): The ID of the item's associated location.
modifier_list_id (string): The ID of the modifier list to remove.
item_id (string): The ID of the item to remove the modifier list
from.
Returns:
V1Item: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/items/{item_id}/modifier-lists/{modifier_list_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'modifier_list_id': modifier_list_id,
'item_id': item_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.delete(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def apply_modifier_list(self,
location_id,
modifier_list_id,
item_id):
"""Does a PUT request to /v1/{location_id}/items/{item_id}/modifier-lists/{modifier_list_id}.
Associates a modifier list with an item so the associated modifier
options can be applied to the item.
Args:
location_id (string): The ID of the item's associated location.
modifier_list_id (string): The ID of the modifier list to apply.
item_id (string): The ID of the item to add the modifier list to.
Returns:
V1Item: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/items/{item_id}/modifier-lists/{modifier_list_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'modifier_list_id': modifier_list_id,
'item_id': item_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.put(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def create_variation(self,
location_id,
item_id,
body):
"""Does a POST request to /v1/{location_id}/items/{item_id}/variations.
Creates an item variation for an existing item.
Args:
location_id (string): The ID of the item's associated location.
item_id (string): The item's ID.
body (V1Variation): An object containing the fields to POST for
the request. See the corresponding object definition for
field details.
Returns:
V1Variation: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/items/{item_id}/variations'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'item_id': item_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def delete_variation(self,
location_id,
item_id,
variation_id):
"""Does a DELETE request to /v1/{location_id}/items/{item_id}/variations/{variation_id}.
Deletes an existing item variation from an item.
__DeleteVariation__ returns nothing on success but Connect SDKs
map the empty response to an empty `V1DeleteVariationRequest` object
as documented below.
Args:
location_id (string): The ID of the item's associated location.
item_id (string): The ID of the item to delete.
variation_id (string): The ID of the variation to delete.
Returns:
V1Variation: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/items/{item_id}/variations/{variation_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'item_id': item_id,
'variation_id': variation_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.delete(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def update_variation(self,
location_id,
item_id,
variation_id,
body):
"""Does a PUT request to /v1/{location_id}/items/{item_id}/variations/{variation_id}.
Modifies the details of an existing item variation.
Args:
location_id (string): The ID of the item's associated location.
item_id (string): The ID of the item to modify.
variation_id (string): The ID of the variation to modify.
body (V1Variation): An object containing the fields to POST for
the request. See the corresponding object definition for
field details.
Returns:
V1Variation: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/items/{item_id}/variations/{variation_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'item_id': item_id,
'variation_id': variation_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def list_modifier_lists(self,
location_id):
"""Does a GET request to /v1/{location_id}/modifier-lists.
Lists all the modifier lists for a given location.
Args:
location_id (string): The ID of the location to list modifier
lists for.
Returns:
list of V1ModifierList: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/modifier-lists'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def create_modifier_list(self,
location_id,
body):
"""Does a POST request to /v1/{location_id}/modifier-lists.
Creates an item modifier list and at least 1 modifier option for it.
Args:
location_id (string): The ID of the location to create a modifier
list for.
body (V1ModifierList): An object containing the fields to POST for
the request. See the corresponding object definition for
field details.
Returns:
V1ModifierList: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/modifier-lists'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def delete_modifier_list(self,
location_id,
modifier_list_id):
"""Does a DELETE request to /v1/{location_id}/modifier-lists/{modifier_list_id}.
Deletes an existing item modifier list and all modifier options
associated with it.
__DeleteModifierList__ returns nothing on success but Connect SDKs
map the empty response to an empty `V1DeleteModifierListRequest`
object
as documented below.
Args:
location_id (string): The ID of the item's associated location.
modifier_list_id (string): The ID of the modifier list to delete.
Returns:
V1ModifierList: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/modifier-lists/{modifier_list_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'modifier_list_id': modifier_list_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.delete(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def retrieve_modifier_list(self,
location_id,
modifier_list_id):
"""Does a GET request to /v1/{location_id}/modifier-lists/{modifier_list_id}.
Provides the details for a single modifier list.
Args:
location_id (string): The ID of the item's associated location.
modifier_list_id (string): The modifier list's ID.
Returns:
V1ModifierList: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/modifier-lists/{modifier_list_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'modifier_list_id': modifier_list_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def update_modifier_list(self,
location_id,
modifier_list_id,
body):
"""Does a PUT request to /v1/{location_id}/modifier-lists/{modifier_list_id}.
Modifies the details of an existing item modifier list.
Args:
location_id (string): The ID of the item's associated location.
modifier_list_id (string): The ID of the modifier list to edit.
body (V1UpdateModifierListRequest): An object containing the
fields to POST for the request. See the corresponding object
definition for field details.
Returns:
V1ModifierList: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/modifier-lists/{modifier_list_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'modifier_list_id': modifier_list_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def create_modifier_option(self,
location_id,
modifier_list_id,
body):
"""Does a POST request to /v1/{location_id}/modifier-lists/{modifier_list_id}/modifier-options.
Creates an item modifier option and adds it to a modifier list.
Args:
location_id (string): The ID of the item's associated location.
modifier_list_id (string): The ID of the modifier list to edit.
body (V1ModifierOption): An object containing the fields to POST
for the request. See the corresponding object definition for
field details.
Returns:
V1ModifierOption: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/modifier-lists/{modifier_list_id}/modifier-options'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'modifier_list_id': modifier_list_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def delete_modifier_option(self,
location_id,
modifier_list_id,
modifier_option_id):
"""Does a DELETE request to /v1/{location_id}/modifier-lists/{modifier_list_id}/modifier-options/{modifier_option_id}.
Deletes an existing item modifier option from a modifier list.
__DeleteModifierOption__ returns nothing on success but Connect
SDKs map the empty response to an empty
`V1DeleteModifierOptionRequest`
object.
Args:
location_id (string): The ID of the item's associated location.
modifier_list_id (string): The ID of the modifier list to delete.
modifier_option_id (string): The ID of the modifier list to edit.
Returns:
V1ModifierOption: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/modifier-lists/{modifier_list_id}/modifier-options/{modifier_option_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'modifier_list_id': modifier_list_id,
'modifier_option_id': modifier_option_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.delete(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def update_modifier_option(self,
location_id,
modifier_list_id,
modifier_option_id,
body):
"""Does a PUT request to /v1/{location_id}/modifier-lists/{modifier_list_id}/modifier-options/{modifier_option_id}.
Modifies the details of an existing item modifier option.
Args:
location_id (string): The ID of the item's associated location.
modifier_list_id (string): The ID of the modifier list to edit.
modifier_option_id (string): The ID of the modifier list to edit.
body (V1ModifierOption): An object containing the fields to POST
for the request. See the corresponding object definition for
field details.
Returns:
V1ModifierOption: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/modifier-lists/{modifier_list_id}/modifier-options/{modifier_option_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'modifier_list_id': modifier_list_id,
'modifier_option_id': modifier_option_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def list_pages(self,
location_id):
"""Does a GET request to /v1/{location_id}/pages.
Lists all Favorites pages (in Square Point of Sale) for a given
location.
Args:
location_id (string): The ID of the location to list Favorites
pages for.
Returns:
list of V1Page: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/pages'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def create_page(self,
location_id,
body):
"""Does a POST request to /v1/{location_id}/pages.
Creates a Favorites page in Square Point of Sale.
Args:
location_id (string): The ID of the location to create an item
for.
body (V1Page): An object containing the fields to POST for the
request. See the corresponding object definition for field
details.
Returns:
V1Page: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/pages'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def delete_page(self,
location_id,
page_id):
"""Does a DELETE request to /v1/{location_id}/pages/{page_id}.
Deletes an existing Favorites page and all of its cells.
__DeletePage__ returns nothing on success but Connect SDKs
map the empty response to an empty `V1DeletePageRequest` object.
Args:
location_id (string): The ID of the Favorites page's associated
location.
page_id (string): The ID of the page to delete.
Returns:
V1Page: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/pages/{page_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'page_id': page_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.delete(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def update_page(self,
location_id,
page_id,
body):
"""Does a PUT request to /v1/{location_id}/pages/{page_id}.
Modifies the details of a Favorites page in Square Point of Sale.
Args:
location_id (string): The ID of the Favorites page's associated
location
page_id (string): The ID of the page to modify.
body (V1Page): An object containing the fields to POST for the
request. See the corresponding object definition for field
details.
Returns:
V1Page: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/pages/{page_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'page_id': page_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def delete_page_cell(self,
location_id,
page_id,
row=None,
column=None):
"""Does a DELETE request to /v1/{location_id}/pages/{page_id}/cells.
Deletes a cell from a Favorites page in Square Point of Sale.
__DeletePageCell__ returns nothing on success but Connect SDKs
map the empty response to an empty `V1DeletePageCellRequest` object
as documented below.
Args:
location_id (string): The ID of the Favorites page's associated
location.
page_id (string): The ID of the page to delete.
row (string, optional): The row of the cell to clear. Always an
integer between 0 and 4, inclusive. Row 0 is the top row.
column (string, optional): The column of the cell to clear. Always
an integer between 0 and 4, inclusive. Column 0 is the
leftmost column.
Returns:
V1Page: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/pages/{page_id}/cells'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'page_id': page_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'row': row,
'column': column
}
_query_builder = APIHelper.append_url_with_query_parameters(
_query_builder,
_query_parameters
)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.delete(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def update_page_cell(self,
location_id,
page_id,
body):
"""Does a PUT request to /v1/{location_id}/pages/{page_id}/cells.
Modifies a cell of a Favorites page in Square Point of Sale.
Args:
location_id (string): The ID of the Favorites page's associated
location.
page_id (string): The ID of the page the cell belongs to.
body (V1PageCell): An object containing the fields to POST for the
request. See the corresponding object definition for field
details.
Returns:
V1Page: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v1/{location_id}/pages/{page_id}/cells'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id,
'page_id': page_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
|
<reponame>robmarkcole/London-Air-Quality<gh_stars>1-10
from datetime import timedelta
import requests
from typing import List, Dict
AUTHORITIES = [
"<NAME>",
"Barnet",
"Bexley",
"Brent",
"Bromley",
"Camden",
"City of London",
"Croydon",
"Ealing",
"Enfield",
"Greenwich",
"Hackney",
"Hammersmith and Fulham",
"Haringey",
"Harrow",
"Havering",
"Hillingdon",
"Hounslow",
"Islington",
"Kensington and Chelsea",
"Kingston",
"Lambeth",
"Lewisham",
"Merton",
"Newham",
"Redbridge",
"Richmond",
"Southwark",
"Sutton",
"Tower Hamlets",
"Waltham Forest",
"Wandsworth",
"Westminster",
]
LAQ_HOURLY_URL = (
"http://api.erg.kcl.ac.uk/AirQuality/Hourly/MonitoringIndex/GroupName=London/Json"
)
TIMEOUT = 10
class LondonAirQualityException(Exception):
pass
def request_data(url: str, timeout: int = TIMEOUT) -> Dict:
"""
Request data from a URL and return valid data as dictionary.
"""
try:
response = requests.get(url, timeout=TIMEOUT)
if response.status_code == 200:
return response.json()
else:
raise LondonAirQualityException(
f"Status code {response.status_code} returned from {url}"
)
except requests.exceptions.Timeout:
raise LondonAirQualityException(
f"Request timeout, current timeout is {timeout} seconds"
)
except requests.exceptions.ConnectionError as exc:
raise LondonAirQualityException(f"Internet connection error: {exc}")
def parse_hourly_response(hourly_response: Dict) -> Dict:
"""
Return hourly response data to index by Borough.
Allows filtering authorities with no data, and cleans up some data structure.
"""
data = dict.fromkeys(AUTHORITIES)
for authority in AUTHORITIES:
try:
for entry in hourly_response["HourlyAirQualityIndex"]["LocalAuthority"]:
if entry["@LocalAuthorityName"] == authority:
if isinstance(entry["Site"], dict):
entry_sites_data = [entry["Site"]]
else:
entry_sites_data = entry["Site"]
data[authority] = parse_site(entry_sites_data)
except Exception:
data[authority] = {}
return data
def parse_species(species_data: List[Dict]) -> List[Dict]:
"""Iterate over list of species at each site."""
parsed_species_data = []
quality_list = []
for species in species_data:
if species["@AirQualityBand"] != "No data":
species_dict = {}
species_dict["description"] = species["@SpeciesDescription"]
species_dict["code"] = species["@SpeciesCode"]
species_dict["quality"] = species["@AirQualityBand"]
species_dict["index"] = species["@AirQualityIndex"]
species_dict["summary"] = (
species_dict["code"] + " is " + species_dict["quality"]
)
parsed_species_data.append(species_dict)
quality_list.append(species_dict["quality"])
return parsed_species_data, quality_list
def parse_site(entry_sites_data: List[Dict]) -> List[Dict]:
"""Iterate over all sites at an local authority and tidy the data."""
authority_data = []
for site in entry_sites_data:
site_data = {}
species_data = []
site_data["updated"] = site["@BulletinDate"]
site_data["latitude"] = site["@Latitude"]
site_data["longitude"] = site["@Longitude"]
site_data["site_code"] = site["@SiteCode"]
site_data["site_name"] = site["@SiteName"].split("-")[-1].lstrip()
site_data["site_type"] = site["@SiteType"]
if isinstance(site["Species"], dict):
species_data = [site["Species"]]
else:
species_data = site["Species"]
parsed_species_data, quality_list = parse_species(species_data)
if not parsed_species_data:
parsed_species_data.append("no_species_data")
site_data["pollutants"] = parsed_species_data
if quality_list:
site_data["pollutants_status"] = max(
set(quality_list), key=quality_list.count
)
site_data["number_of_pollutants"] = len(quality_list)
else:
site_data["pollutants_status"] = "no_species_data"
site_data["number_of_pollutants"] = 0
authority_data.append(site_data)
return authority_data
def get_hourly_data_flat(hourly_data: Dict) -> List[Dict]:
all_data = []
for authority in hourly_data.keys():
for site in hourly_data[authority]:
for pollutant in site["pollutants"]:
try:
pollutant["borough"] = authority
pollutant["site_code"] = site["site_code"]
pollutant["site_name"] = site["site_name"]
pollutant["latitude"] = site["latitude"]
pollutant["longitude"] = site["longitude"]
pollutant["updated"] = site["updated"]
all_data.append(pollutant)
except:
pass
return all_data
|
<filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Python Markdown
A Python implementation of John Gruber's Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by <NAME> (http://www.dwerg.net/).
Maintained for a few years by <NAME> (http://www.freewisdom.org).
Currently maintained by <NAME> (https://github.com/waylan),
<NAME> (https://github.com/mitya57) and <NAME> (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 <NAME> (v. 0.2-1.6b)
Copyright 2004 <NAME> (the original version)
License: BSD (see LICENSE.md for details).
"""
from setuptools import setup
from markdown import __version__, __version_info__
# Get development Status for classifiers
dev_status_map = {
'dev': '2 - Pre-Alpha',
'alpha': '3 - Alpha',
'beta': '4 - Beta',
'rc': '4 - Beta',
'final': '5 - Production/Stable'
}
DEVSTATUS = dev_status_map[__version_info__[3]]
# The command line script name. Currently set to "markdown_py" so as not to
# conflict with the perl implimentation (which uses "markdown").
SCRIPT_NAME = 'markdown_py'
long_description = '''
This is a Python implementation of John Gruber's Markdown_.
It is almost completely compliant with the reference implementation,
though there are a few known issues. See Features_ for information
on what exactly is supported and what is not. Additional features are
supported by the `Available Extensions`_.
.. _Markdown: http://daringfireball.net/projects/markdown/
.. _Features: https://Python-Markdown.github.io#features
.. _`Available Extensions`: https://Python-Markdown.github.io/extensions/
Support
=======
You may ask for help and discuss various other issues on the
`mailing list`_ and report bugs on the `bug tracker`_.
.. _`mailing list`: http://lists.sourceforge.net/lists/listinfo/python-markdown-discuss
.. _`bug tracker`: http://github.com/Python-Markdown/markdown/issues
'''
setup(
name='Markdown',
version=__version__,
url='https://Python-Markdown.github.io/',
download_url='http://pypi.python.org/packages/source/M/Markdown/Markdown-%s-py2.py3-none-any.whl' % __version__,
description='Python implementation of Markdown.',
long_description=long_description,
author='<NAME>, <NAME> and <NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
license='BSD License',
packages=['markdown', 'markdown.extensions'],
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',
install_requires=['setuptools >= 36'],
extras_require={
'testing': [
'coverage',
'pyyaml',
],
},
entry_points={
'console_scripts': [
'%s = markdown.__main__:run' % SCRIPT_NAME,
],
# Register the built in extensions
'markdown.extensions': [
'abbr = markdown.extensions.abbr:AbbrExtension',
'admonition = markdown.extensions.admonition:AdmonitionExtension',
'attr_list = markdown.extensions.attr_list:AttrListExtension',
'codehilite = markdown.extensions.codehilite:CodeHiliteExtension',
'def_list = markdown.extensions.def_list:DefListExtension',
'extra = markdown.extensions.extra:ExtraExtension',
'fenced_code = markdown.extensions.fenced_code:FencedCodeExtension',
'footnotes = markdown.extensions.footnotes:FootnoteExtension',
'meta = markdown.extensions.meta:MetaExtension',
'nl2br = markdown.extensions.nl2br:Nl2BrExtension',
'sane_lists = markdown.extensions.sane_lists:SaneListExtension',
'smarty = markdown.extensions.smarty:SmartyExtension',
'tables = markdown.extensions.tables:TableExtension',
'toc = markdown.extensions.toc:TocExtension',
'wikilinks = markdown.extensions.wikilinks:WikiLinkExtension',
'legacy_attrs = markdown.extensions.legacy_attrs:LegacyAttrExtension',
'legacy_em = markdown.extensions.legacy_em:LegacyEmExtension',
]
},
classifiers=[
'Development Status :: %s' % DEVSTATUS,
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Communications :: Email :: Filters',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Topic :: Software Development :: Documentation',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Filters',
'Topic :: Text Processing :: Markup :: HTML'
]
)
|
<reponame>DrackThor/artifactory-cleanup
import importlib
import logging
import sys
from datetime import timedelta, date
import requests
from hurry.filesize import size
from plumbum import cli
from prettytable import PrettyTable
from requests.auth import HTTPBasicAuth
from artifactory_cleanup.context_managers import get_context_managers
from artifactory_cleanup.rules.base import CleanupPolicy
from artifactory_cleanup.rules.delete import delete_empty_folder
requests.packages.urllib3.disable_warnings()
def init_logging():
logger_format_string = "%(thread)5s %(module)-20s %(levelname)-8s %(message)s"
logging.basicConfig(
level=logging.DEBUG, format=logger_format_string, stream=sys.stdout
)
class ArtifactoryCleanup(cli.Application):
_user = cli.SwitchAttr(
["--user"],
help="Login to access to the artifactory",
mandatory=True,
envname="ARTIFACTORY_USER",
)
_password = cli.SwitchAttr(
["--password"],
help="Password to access to the artifactory",
mandatory=True,
envname="ARTIFACTORY_PASSWORD",
)
_policy_name = cli.SwitchAttr(
["--policy-name"], help="Name for a rule", mandatory=False
)
_config = cli.SwitchAttr(
["--config"], help="Name of config with list of policies", mandatory=False
)
_artifactory_server = cli.SwitchAttr(
["--artifactory-server"],
help="URL to artifactory, e.g: https://arti.example.com/artifactory",
mandatory=True,
envname="ARTIFACTORY_SERVER",
)
_destroy = cli.Flag("--destroy", help="Remove artifacts", mandatory=False)
_debug = cli.Flag(
"--debug",
help="Only print artifacts that can be deleted with the specified cleaning policies and rules",
mandatory=False,
)
_remove_empty_folder = cli.Flag(
"--remove-empty-folder", help="Cleaning up empty folders in local repositories"
)
_days_in_future = cli.SwitchAttr(
"--days-in-future",
help="Simulate future behaviour",
mandatory=False,
excludes=["--destroy"],
)
def _destroy_or_verbose(self):
if self._destroy:
print("*" * 80)
print("Delete MODE")
else:
print("*" * 80)
print("Verbose MODE")
def main(self):
# remove trailing slash
self._artifactory_server = self._artifactory_server.rstrip("/")
if self._remove_empty_folder:
rules = [
CleanupPolicy(
"Cleaning up empty folders in local repositories",
delete_empty_folder(),
)
]
else:
try:
self._config = self._config.replace(".py", "")
sys.path.append(".")
rules = getattr(importlib.import_module(self._config), "RULES")
except ImportError as error:
print("Error: {}".format(error))
exit(1)
self._destroy_or_verbose()
if self._days_in_future:
self._today = date.today() + timedelta(days=int(self._days_in_future))
print(f"Simulating cleanup actions that will occur on {self._today}")
else:
self._today = date.today()
artifactory_session = requests.Session()
artifactory_session.auth = HTTPBasicAuth(self._user, self._password)
# Validate that all rules is CleanupPolicy
for cleanup_rule in rules:
if not isinstance(cleanup_rule, CleanupPolicy):
sys.exit(
"Rule '{}' is not CleanupPolicy, check this please".format(
cleanup_rule
)
)
if self._policy_name:
rules = [rule for rule in rules if self._policy_name in rule.name]
if not rules:
sys.exit("Rule with name '{}' does not found".format(self._policy_name))
table = PrettyTable()
table.field_names = ["Cleanup Policy", "Files count", "Size"]
table.align["Cleanup Policy"] = "l"
total_size = 0
ctx_mgr_block, ctx_mgr_test = get_context_managers()
for cleanup_rule in rules: # type: CleanupPolicy
with ctx_mgr_block(cleanup_rule.name):
cleanup_rule.init(
artifactory_session, self._artifactory_server, self._today
)
# prepare
with ctx_mgr_block("AQL filter"):
cleanup_rule.aql_filter()
# Get artifacts
with ctx_mgr_block("Get artifacts"):
print("*" * 80)
print("AQL Query:")
print(cleanup_rule.aql_text)
print("*" * 80)
artifacts = cleanup_rule.get_artifacts()
print("Found {} artifacts".format(len(artifacts)))
# Filter
with ctx_mgr_block("Filter results"):
artifacts_to_remove = cleanup_rule.filter(artifacts)
print(
"Found {} artifacts AFTER filtering".format(
len(artifacts_to_remove)
)
)
# Delete or debug
for artifact in artifacts_to_remove:
# test name for teamcity
repo_underscore = (
artifact["repo"].replace(".", "_").replace("/", "_")
)
path_underscore = (
artifact["path"].replace(".", "_").replace("/", "_")
)
name_underscore = (
artifact["name"].replace(".", "_").replace("/", "_")
)
test_name = "cleanup.{}.{}_{}".format(
repo_underscore, path_underscore, name_underscore
)
with ctx_mgr_test(test_name):
cleanup_rule.delete(artifact, destroy=self._destroy)
# Info
count_artifacts = len(artifacts_to_remove)
print("Deleted artifacts count: {}".format(count_artifacts))
try:
artifacts_size = sum([x["size"] for x in artifacts_to_remove])
total_size += artifacts_size
artifacts_size = size(artifacts_size)
print("Summary size: {}".format(artifacts_size))
table.add_row([cleanup_rule.name, count_artifacts, artifacts_size])
except KeyError:
print("Summary size not defined")
print()
table.add_row(["", "", ""])
table.add_row(["Total size: {}".format(size(total_size)), "", ""])
print(table)
if __name__ == "__main__":
init_logging()
ArtifactoryCleanup.run()
|
<reponame>simonchuth/patentAI
import datetime
from tqdm import tqdm
import pickle
import requests
import PyPDF2
import io
from os import listdir
from os.path import join
def generate_datelist(numdays=3650, date_list=None):
if date_list is None:
base = datetime.datetime.today()
date_list = [base - datetime.timedelta(days=x) for x in range(numdays)]
weekday = [date for date in date_list if date.weekday() < 5]
ipos_format = [date.strftime('%Y-%m-%d') for date in weekday]
else:
ipos_format = date_list
return ipos_format
def fetch_patent_url(date_list):
url = 'https://api.data.gov.sg/v1/technology/ipos/patents?lodgement_date='
target_doc = []
for date in tqdm(date_list):
api = url + date
result = requests.get(api).json()
if 'message' in result.keys(): # Pick up if error message was returned
continue
applications = result['items']
for app in applications:
if not english_origin(app):
continue
documents = app['documents']
for d in documents:
if d['docType']['description'] == \
'Description (with claims)':
target_doc.append(d['url'])
elif d['docType']['description'] == \
'Full Specification (Grant)':
target_doc.append(d['url'])
return target_doc
def english_origin(app):
try:
eng = app['applicant'][0]['countryOfIncorporationOrResidence']['code']\
in ['UK', 'US', 'SG']
return eng
except Exception:
return False
def load_web_pdf(url):
response = requests.get(url)
with io.BytesIO(response.content) as open_pdf_file:
read_pdf = PyPDF2.PdfFileReader(open_pdf_file)
num_pages = read_pdf.getNumPages()
txt = [read_pdf.getPage(i).extractText() for i in range(num_pages)]
return txt
def extract_intro(txt):
intro = txt[0:2]
intro = ' '.join(intro)
return intro
def extract_claim_text(txt):
claims_start_page = [i for i, page in enumerate(txt) if
('CLAIMS' in page) or
('what is claimed is' in page.lower())][0]
claim_pages = txt[claims_start_page:]
text_pages = txt[0:claims_start_page]
return claim_pages, text_pages
def field_keyword_filter(intro, keywords=['bio', 'pharm', 'medic']):
for word in keywords:
if word in intro.lower():
return True
return False
def main_extraction(target_doc,
L=None,
checkpoint=None,
keywords=['bio', 'pharm', 'medic']):
data = []
failed_extract_text = []
for url in tqdm(target_doc):
# Load PDF
try:
txt = load_web_pdf(url)
except Exception:
continue
# Extract text
try:
intro = extract_intro(txt)
claim_pages, text_pages = extract_claim_text(txt)
if field_keyword_filter(intro, keywords):
data.append([intro, claim_pages, text_pages, txt])
except Exception:
failed_extract_text.append(txt)
output = [data, failed_extract_text]
if L is None:
return output # Normal usage
else:
L.append(output) # Multiprocessing
if checkpoint is not None:
filename = join(checkpoint, str(len(L))) + '_checkpoint.pkl'
with open(filename, 'wb') as file:
pickle.dump(output, file)
print(f'Completed {len(L)} chunks')
def combine_mp_chunks(pkl_path):
pkl_path = 'pkl_files/ipos_extracted.pkl'
with open(pkl_path, 'rb') as file:
combined_output = pickle.load(file)
data_list = [data for data, failed_extract_text, access_problem in
combined_output]
data_combined = [app for data in data_list for app in data]
return data_combined
def combine_checkpoint_file(folder_path):
pkl_list = [filename for filename in listdir(folder_path) if
filename.endswith('_checkpoint.pkl')]
output_combined = []
for pkl_name in pkl_list:
path = join(folder_path, pkl_name)
with open(path, 'rb') as pklfile:
data = pickle.load(pklfile)[0]
output_combined.append(data)
return output_combined
def extract_app(output_list):
app_list = [app for output in output_list for app in output]
return app_list
|
import re
import options
from utils import basetypes
import sys
import collections
from enum import IntEnum
from exprs import exprs
from exprs import exprtypes
import math
import heapq
import functools
from core import grammars
from enumerators import enumerators
from parsers import parser
from semantics import semantics_core
from semantics import semantics_types
from core import synthesis_context
import random
import pickle
import itertools
from phogs.phog_utils import *
from benchmarks import *
max_score = 9999999.0
# write_instrs = [Tcond.WRITE_VALUE, Tcond.WRITE_TYPE]
# we do not use write_type for now
write_instrs = [Tcond.WRITE_VALUE]
begin_instrs = [Tcond.UP, Tcond.LEFT, Tcond.PREV_DFS]
move_instrs = [Tcond.UP, Tcond.DOWN_FIRST, Tcond.DOWN_LAST, Tcond.LEFT, Tcond.RIGHT, Tcond.PREV_DFS]
# parameter for penalizing too complicated tcond programs
# lambda_penalize = 0.05
# maximum #. of iteration for the total training
max_iter = 10
# maximum #. of iteration for program generator
max_iter_gen = 100
# maximum #. of iteration for data sampling
max_iter_sample = 100
# maximum size of candidate program/data list
pool_size = 10
# k-fold cross validation
k_fold = 2
# perform witten-bell interpolation?
do_wb = True
# perform data sampling?
do_sample = True
# learn PHOG on a decomposed grammar?
eu = False
# maximum program size when randomly generating programs.
max_prog_size = 30
# thresholds for penalizing long instructions
wb_threshold = 300000
write_num_threshold = 5
# lambda candidates
lambda_candidates = [1.0, 2.0, 3.0, 4.0, 5.0]
# lambda_candidates = [10.0, 20.0, 30.0, 40.0, 50.0]
# alpha value for stupid backoff
alpha = 0.1
# pre-compute information about exprs
exprs_info = None
n_cores = 4
exprstr2expr = {}
def get_func_exprs_grammars(benchmark_files):
global eu
# Grammars
grammars = set([])
rettype2fun_exprs = {}
# for eusolver
ite_related_macros = []
for benchmark_file in benchmark_files:
print('Loading : ', benchmark_file)
file_sexp = parser.sexpFromFile(benchmark_file)
if file_sexp is None:
continue
core_instantiator = semantics_core.CoreInstantiator()
theory_instantiators = [parser.get_theory_instantiator(theory) for theory in parser._known_theories]
macro_instantiator = semantics_core.MacroInstantiator()
uf_instantiator = semantics_core.UninterpretedFunctionInstantiator()
synth_instantiator = semantics_core.SynthFunctionInstantiator()
syn_ctx = synthesis_context.SynthesisContext(
core_instantiator,
*theory_instantiators,
macro_instantiator,
uf_instantiator,
synth_instantiator)
syn_ctx.set_macro_instantiator(macro_instantiator)
defs, _ = parser.filter_sexp_for('define-fun', file_sexp)
if defs is None: defs = []
for [name, args_data, ret_type_data, interpretation] in defs:
for eusolver in ([True] if eu else [False]):
((arg_vars, arg_types, arg_var_map), return_type) = parser._process_function_defintion(args_data, ret_type_data)
expr = parser.sexp_to_expr(interpretation, syn_ctx, arg_var_map)
macro_func = semantics_types.MacroFunction(name, len(arg_vars), tuple(arg_types), return_type, expr,
arg_vars)
# for eusolver (recording macro functions of which definition include ite)
if eusolver:
app = exprs.find_application(expr, 'ite')
if app is not None: ite_related_macros.append(name)
macro_instantiator.add_function(name, macro_func)
i = 0
subs_pairs = []
for (var_expr, ty) in zip(arg_vars, arg_types):
i = random.randint(1,100000) if eu else i
param_expr = exprs.FormalParameterExpression(None,ty,i)
subs_pairs.append((var_expr, param_expr))
i += 1
expr = exprs.substitute_all(expr, subs_pairs)
# resolve macro functions involving ite (for enumeration of pred exprs (eusolver))
if eusolver:
for fname in ite_related_macros:
app = exprs.find_application(expr, fname)
if app is None: continue
expr = macro_instantiator.instantiate_macro(expr, fname)
if (return_type, eusolver) not in rettype2fun_exprs:
rettype2fun_exprs[(return_type, eusolver)] = set([])
rettype2fun_exprs[(return_type, eusolver)].add(expr)
@static_var("cnt", 0)
def rename(synth_funs_data):
for synth_fun_data in synth_funs_data:
# to avoid duplicated names
synth_fun_data[0] = "__aux_name__" + benchmark_file + str(rename.cnt)
rename.cnt += 1
# collect grammars
synth_funs_data, _ = parser.filter_sexp_for('synth-fun', file_sexp)
if len(synth_funs_data) == 0:
synth_funs_data, _ = parser.filter_sexp_for('synth-inv', file_sexp)
rename(synth_funs_data)
synth_funs_grammar_data = parser.process_synth_invs(synth_funs_data, synth_instantiator, syn_ctx)
else:
rename(synth_funs_data)
synth_funs_grammar_data = parser.process_synth_funcs(synth_funs_data, synth_instantiator, syn_ctx)
for synth_fun, arg_vars, grammar_data in synth_funs_grammar_data:
if grammar_data != 'Default grammar':
grammar = parser.sexp_to_grammar(arg_vars, grammar_data, synth_fun, syn_ctx)
grammars.add(grammar)
return rettype2fun_exprs, grammars
class Mode(IntEnum):
DELETE = 0
MODIFY = 1
ADD = 2
def prog_to_str(prog):
return ' '.join([str(int(i)) for i in prog])
# @static_var("cache", {})
# def get_data_expr(expr, prog):
# key = (expr, tuple(prog))
# if key in get_data_expr.cache:
# return get_data_expr.cache[key]
#
# result = []
# banned = set()
# stack = [(expr, [])]
# while len(stack) > 0:
# v, addr = stack.pop(0)
# if exprs.is_function_expression(v):
# for i,child in enumerate(v.children):
# new_addr = list(addr)
# new_addr.append(i)
# banned.add(tuple(new_addr))
# stack.append((child,new_addr))
#
# stack = [(expr, [])]
# while len(stack) > 0:
# v, addr = stack.pop(0)
# banned.discard(tuple(addr))
# _,ctxt = get_ctxt(expr, addr, prog, banned)
# term_symb = fetchop(v)
# cond = ','.join(ctxt)
# result.append((cond, term_symb))
# if exprs.is_function_expression(v):
# added = []
# for i,child in enumerate(v.children):
# new_addr = list(addr)
# new_addr.append(i)
# added.append((child, new_addr))
# stack[0:0] = added
#
# get_data_expr.cache[key] = result
# return result
def get_data_expr(expr, prog):
global exprs_info
(expr2history, expr2cache) = exprs_info
assert (expr in expr2history and expr in expr2cache)
result = []
for (expr, target_addr) in expr2history[expr]:
# move_cache : addr x instr -> addr'
# write_cache : addr -> topsymb, toptype
(move_cache, write_cache) = expr2cache[expr]
curr_addr = target_addr
ctxt = []
# here, term_symb stands for a fired rule.
term_symb, _ = write_cache[target_addr]
for instr in prog:
if instr in write_instrs:
if curr_addr != target_addr:
symb_at_curr_addr, type_at_curr_addr = write_cache[curr_addr]
if instr == Tcond.WRITE_VALUE:
ctxt.append(symb_at_curr_addr)
else:
ctxt.append(type_at_curr_addr)
else:
ctxt.append('_')
else:
curr_addr = move_cache[(curr_addr, instr)]
cond = ','.join(ctxt)
result.append((cond, term_symb))
return result
def compute_score_with_cache(mle, instrs, expr):
sum = 0.0
h_e = get_data_expr(expr, instrs)
for (cond, term_symb) in h_e:
prob = mle.get(cond, {}).get(term_symb, 0.001)
l_e = -1.0 * math.log2(prob) if prob > 0.0 else -1.0 * math.log2(0.001)
sum += l_e
return sum
def get_data_exprs(exprs, prog):
result = []
for expr in exprs:
data = get_data_expr(expr, prog)
result = result + data
return result
def get_exprs_info(expr_str_set):
global exprstr2expr
expr_set = [exprstr2expr[exprstr] for exprstr in expr_str_set]
def get_all_addrs(expr):
addrs = set()
addrs.add(())
stack = [(expr, [])]
while len(stack) > 0:
v, addr = stack.pop(0)
if exprs.is_function_expression(v):
for i, child in enumerate(v.children):
new_addr = list(addr)
new_addr.append(i)
addrs.add(tuple(new_addr))
stack.append((child, new_addr))
return addrs
expr2history = {}
for expr in expr_set:
history = get_history(expr)
expr2history[expr] = history
# for expr,history in expr2history.items():
# print('expr : ', exprs.expression_to_string(expr))
# for e, addr in history:
# print('\t', exprs.expression_to_string(e), '\t', fetchop(fetch_prod(e, addr)))
expr2cache = {}
for _,history in expr2history.items():
for (expr, target_addr) in history:
addrs = get_all_addrs(expr)
write_cache = {}
move_cache = {}
for addr in addrs:
for instr in move_instrs:
new_addr, _ = get_ctxt(expr, addr, [instr], training=True)
move_cache[(addr,instr)] = new_addr
_, ctxt = get_ctxt(expr, addr, [Tcond.WRITE_VALUE], training=True)
value = ctxt[0]
# we do not use types for now
# _, ctxt = get_ctxt(expr, addr, [Tcond.WRITE_TYPE], training=True)
type = ctxt[0]
# store value, type at addr into write_cache
write_cache[addr] = (value, type)
expr2cache[exprs.expression_to_string(expr)] = (move_cache, write_cache)
# print(move_cache)
# print(write_cache)
# transform history into expr_string -> history_only_with_exprstrings
expr2history_ = {}
for (expr, history) in expr2history.items():
history_ = []
for (expr, target_addr) in history:
history_.append((exprs.expression_to_string(expr), target_addr))
history = history_
expr2history_[exprs.expression_to_string(expr)] = history
expr2history = expr2history_
return (expr2history, expr2cache)
# prog : Tcond.enum list
def mutate_prog(prog):
flatten = lambda l: [item for sublist in l for item in sublist]
def get_chunks(prog):
result = []
chunk = []
for instr in prog:
chunk.append(instr)
if instr in write_instrs:
result.append(list(chunk))
chunk.clear()
return result
# select target chunk
chunks = get_chunks(prog)
target_chunk_index = random.randint(0, len(chunks) - 1)
mode = random.randint(Mode.DELETE, Mode.ADD) if len(chunks) > 1 else random.randint(Mode.MODIFY, Mode.ADD)
target_chunk = chunks[target_chunk_index]
assert (len(target_chunk) >= 2)
if mode == Mode.DELETE:
# if target_chunk == [some move; write], remove the chunk
if len(target_chunk) == 2:
assert(len(chunks) > 1)
return flatten([chunk for i,chunk in enumerate(chunks) if i != target_chunk_index])
else:
# target_chunk == [(up | left | prevdfs) ; ... ; write]. do not delete the first and the last.
remove_index = random.randint(1, len(target_chunk) - 2)
target_chunk = [instr for i,instr in enumerate(target_chunk) if i != remove_index]
chunks[target_chunk_index] = target_chunk
return flatten(chunks)
elif mode == Mode.MODIFY:
# if target_chunk == [some move; write], modify the first one. it should be (up | left | prevdfs)
if len(target_chunk) == 2:
target_chunk[0] = random.choice(begin_instrs)
return flatten(chunks)
else:
# target_chunk == [(up | left | prevdfs) ; ... ; write]. do not modify the first and the last.
modify_index = random.randint(1, len(target_chunk) - 2)
target_chunk[modify_index] = random.choice(move_instrs)
chunks[target_chunk_index] = target_chunk
return flatten(chunks)
else: # ADD
if random.randint(0,1) == 0: # add instr
target_chunk.insert(0, random.choice(begin_instrs))
chunks[target_chunk_index] = target_chunk
return flatten(chunks)
else: # add chunk
result = list(prog)
result.append(random.choice(begin_instrs))
result.append(random.choice(write_instrs))
return result
# this function is called only during training phase.
def r_regent(data_set, prog, lambda_penalize):
global write_num_threshold
# compute sum of scores
sum = 0.0
mle = get_mle(prog, data_set, training=True)
for expr in data_set:
expr_score = compute_score_with_cache(mle, prog, expr)
sum += expr_score
write_num = len([instr for instr in prog if instr in write_instrs])
regularize = lambda_penalize * write_num if write_num <= write_num_threshold else max_score
result = sum / len(data_set) + regularize
return result
# computing cross entropy
# k = min(k_fold, len(data_set))
#
# def chunks(l, n):
# """Yield successive n-sized chunks from l."""
# for i in range(0, len(l), n):
# yield l[i:i + n]
#
# l_ent_sum = 0.0
# (part_size, rem) = divmod(len(data_set), k)
# # print([exprs.expression_to_string(e) for e in data_set])
# part_num = 0
# for partition in chunks(list(data_set), part_size):
# test_set = set(partition)
# train_set = list(data_set - test_set)
# # print([exprs.expression_to_string(e) for e in train_set])
# mle = get_mle(prog, train_set, training=True)
# # h_e = get_data_exprs(test_set, prog)
#
# for expr in test_set:
# l_ent_sum += compute_score_with_cache(mle, prog, expr)
# part_num += 1
#
# # for (cond, term_symb) in h_e:
# # prob = mle.get(cond, {}).get(term_symb, 0.001)
# # l_e = -1.0 * math.log2(prob) if prob > 0.0 else 10.0
# # l_ent_sum += l_e
#
# regularize = lambda_penalize * len(prog)
#
#
# return l_ent_sum / part_num + regularize
def get_mle_reg(prog, exprs):
data = get_data_exprs(exprs, prog)
conds, rules = (set([]), set([]))
mle = {}
cond_num = {}
condrule_num = {}
for (cond, rule) in data:
mle[cond] = {}
conds.add(cond)
rules.add(rule)
cond_num[cond] = cond_num.get(cond, 0) + 1
condrule_num[(cond,rule)] = condrule_num.get((cond,rule), 0) + 1
for cond in conds:
for rule in rules:
denom = cond_num.get(cond, 0)
nom = condrule_num.get((cond,rule), 0)
mle[cond][rule] = nom / denom
return mle
# cond : string list, rule : string, sentences : list of string list, all_vocabs : string set
# https://dash.harvard.edu/bitstream/handle/1/25104739/tr-10-98.pdf?sequence=1
@static_var("memo", {})
def wb(cond, rule, all_vocabs, unigram, T, N):
cond_str = ','.join(cond)
sentence_str = cond_str + ',' + rule
if sentence_str in wb.memo:
return wb.memo[sentence_str]
# unigram
if len(cond) == 0:
return unigram[rule]
else:
nom = N[sentence_str] + T[cond_str] * wb(cond[1:], rule, all_vocabs, unigram, T, N)
denom = T[cond_str]
for vocab in all_vocabs:
sentence_str = cond_str + ',' + vocab
denom = denom + N[sentence_str]
if denom > 0.0:
result = nom / denom
wb.memo[sentence_str] = result
return result
else:
return 0
def get_mle_wb(prog, exprs):
all_vocabs = get_mle.all_vocabs
# length of context
cond_len = len([instr for instr in prog if instr in write_instrs])
# give up if too many entries are expected.
n_mle_entries = len(all_vocabs) ** (cond_len + 1)
print('# mle entries : ', n_mle_entries)
if (n_mle_entries > wb_threshold):
print('gave up interpolation due to too many possibilities : ', n_mle_entries)
return get_mle_reg(prog, exprs)
data = get_data_exprs(exprs, prog)
# sentences : list of string list
sentences = []
for (cond, rule) in data:
cond = cond.split(',')
cond.append(rule)
sentences.append(cond)
# compute unigram : vocab -> R
def compute_unigram(sentences):
unigram = {}
used_vocabs = set([])
T = len(all_vocabs)
N = 0
for sentence in sentences:
N += len(sentence)
for vocab in sentence:
used_vocabs.add(vocab)
never_used_vocabs = all_vocabs - used_vocabs
Z = len(never_used_vocabs)
for vocab in never_used_vocabs:
unigram[vocab] = T / ((N + T) * Z)
for vocab in used_vocabs:
cnt = 0
for sentence in sentences:
cnt += len([v for v in sentence if v == vocab])
unigram[vocab] = cnt / (N + T)
return unigram
unigram = compute_unigram(sentences)
# pre-compute
# T : prefix -> |{prefix . c | c \in all_vocabs}|
# N : sentence -> # of appearance of the sentence
T = {}
N = {}
# sentences : list of string
# sentences_wo_dup : set of string
sentences = [','.join(sentence) for sentence in sentences]
sentences_wo_dup = set(sentences)
# all possible contexts
for cond in itertools.product(all_vocabs, repeat=cond_len):
cond = list(cond)
for i in range(0, len(cond)):
# all sub contexts
subcond = cond[i:]
subcond_str = ','.join(subcond)
T[subcond_str] = len([sentence for sentence in sentences_wo_dup if subcond_str in sentence])
for vocab in all_vocabs:
sen = subcond_str + ',' + vocab
N[sen] = len([sentence for sentence in sentences if sen in sentence])
# compute mle
mle = {}
for cond in itertools.product(all_vocabs, repeat=cond_len):
cond = list(cond)
cond_str = ','.join(cond)
mle[cond_str] = {}
for vocab in all_vocabs:
mle[cond_str][vocab] = wb(cond, vocab, all_vocabs, unigram, T, N)
return mle
def get_mle_backoff(prog, exprs):
global alpha
all_vocabs = get_mle.all_vocabs
# length of context
cond_len = len([instr for instr in prog if instr in write_instrs])
# give up if too many entries are expected.
n_mle_entries = len(all_vocabs) ** (cond_len + 1)
print('# mle entries : ', n_mle_entries)
if (n_mle_entries > wb_threshold):
print('gave up interpolation due to too many possibilities : ', n_mle_entries)
return get_mle_reg(prog, exprs)
data = get_data_exprs(exprs, prog)
# sentences : list of string list
sentences = []
for (cond, rule) in data:
cond = cond.split(',')
cond.append(rule)
sentences.append(cond)
print('# sentences : ', len(sentences))
# compute unigram : vocab -> R
def compute_unigram(sentences):
unigram = {}
used_vocabs = set([])
T = len(all_vocabs)
N = 0
for sentence in sentences:
N += len(sentence)
for vocab in sentence:
used_vocabs.add(vocab)
never_used_vocabs = all_vocabs - used_vocabs
Z = len(never_used_vocabs)
for vocab in never_used_vocabs:
unigram[vocab] = T / ((N + T) * Z)
for vocab in used_vocabs:
cnt = 0
for sentence in sentences:
cnt += len([v for v in sentence if v == vocab])
unigram[vocab] = cnt / (N + T)
return unigram
unigram = compute_unigram(sentences)
# pre-compute
# N : sentence -> # of appearance of the sentence
N = {}
# sentences : list of string
sentence_strs = [','.join(sentence) for sentence in sentences]
# init
for vocab in all_vocabs:
sum = 0
for sentence in sentence_strs:
sum += sentence.count(vocab)
N[vocab] = sum
# all possible contexts
for cond in itertools.product(all_vocabs, repeat=cond_len):
for i in range(0, len(cond)):
# all sub contexts
subcond = cond[i:]
subcond_str = ','.join(subcond)
for vocab in all_vocabs:
sen = subcond_str + ',' + vocab
sum = 0
for sentence in sentence_strs:
sum += sentence.count(sen)
# sum += len([m.start() for m in re.finditer('(?=%s)' % sen, sentence)])
N[sen] = sum
# N[sen] = len([sentence for sentence in sentences if sen in sentence])
S = {}
# cond : string list
def get_S(cond, vocab):
if len(cond) == 0: return unigram[vocab]
cond_str = ','.join(cond)
sen = cond_str + ',' + vocab
if sen in S:
return S[sen]
elif sen in N and N[sen] > 0:
result = N[sen] / N[cond_str]
if result > 1 :
print('weird : %d %d\n' % (N[sen], N[cond_str]))
assert(False)
S[sen] = result
return result
else:
result = alpha * get_S(cond[1:], vocab)
S[sen] = result
return result
mle = {}
for cond in itertools.product(all_vocabs, repeat=cond_len):
cond_str = ','.join(cond)
mle[cond_str] = {}
for vocab in all_vocabs:
# print(cond_str, ' ', vocab)
if vocab != '_':
mle[cond_str][vocab] = get_S(cond, vocab)
return mle
def remove_zero_probs(mle):
if mle == None: return
zero_entries = []
for ctxt, topsymb_to_prob in mle.items():
for topsymb, prob in topsymb_to_prob.items():
if prob == 0.0: zero_entries.append((ctxt, topsymb))
for (ctxt, topsymb) in zero_entries:
del mle[ctxt][topsymb]
def get_mle(prog, exprs, training=True):
mle = get_mle_backoff(prog, exprs) if do_wb and not training else get_mle_reg(prog, exprs)
remove_zero_probs(mle)
return mle
def data_sampling(progs, exprs, size, rDp, lambda_penalize):
def get_repr(Q, d):
max_repr = 0.0
for p in Q:
max_repr = max(max_repr, math.fabs(rDp[prog_to_str(p)] - r_regent(d, p, lambda_penalize)))
return max_repr
def mutate_data(data_set):
new_data_set = set(data_set)
mode = random.randint(0,2) if len(new_data_set) > size else random.randint(1,2)
if mode == Mode.DELETE:
new_data_set.remove(random.sample(new_data_set, 1)[0])
elif mode == Mode.MODIFY:
new_data_set.remove(random.sample(new_data_set, 1)[0])
new_data_set.add(random.sample(exprs, 1)[0])
else: # ADD
new_data_set.add(random.sample(exprs, 1)[0])
repr = get_repr(progs, new_data_set)
return data_set, repr
def pick_random_data(exprs, size):
result = []
for i in range(0, pool_size):
data = set([])
for i in range(0, size):
data.add(random.sample(exprs, 1)[0])
result.append((data,get_repr(progs, data)))
return result
# pool : (expr list * score) list
# generating init_pool
pool = pick_random_data(exprs, size)
iter = 0
while iter < max_iter_sample:
iter += 1
# mutate
mutated_pool = [mutate_data(data) for (data, _) in pool]
# sort
new_pool = pool + mutated_pool
new_pool.sort(key=lambda x: x[1])
# drop
pool.clear()
for i in range(0, len(new_pool)):
if i < pool_size: pool.append(new_pool[i])
return pool[0][0]
def prog_gen(data_set, progs, lambda_penalize):
def mutate_prog_sub(prog):
new_prog = mutate_prog(prog)
new_score = r_regent(data_set, new_prog, lambda_penalize)
return new_prog, new_score
def pick_random_prog(size):
class State(IntEnum):
MOVE = 1
WRITE = 2
result = []
size -= 1
result.append(random.choice([Tcond.UP, Tcond.LEFT, Tcond.PREV_DFS]))
state = State.MOVE
while size > 1:
size -= 1
if state == State.MOVE:
instr = random.choice(move_instrs)
else:
instr = random.choice(write_instrs)
result.append(instr)
if state == State.WRITE or size == 2:
state = State.MOVE
else:
state = random.choice([State.MOVE, State.WRITE])
result.append(random.choice(write_instrs))
return result
def pick_random_progs():
global max_prog_size
result = []
for i in range(0, pool_size):
prog = pick_random_prog(random.randint(2,max_prog_size))
result.append((prog, r_regent(data_set, prog, lambda_penalize)))
return result
# pool : (prog * score) list
# generating init_pool
pool = pick_random_progs() + progs
iter = 0
while iter < max_iter_gen:
iter += 1
# mutate
mutated_pool = [mutate_prog_sub(prog) for (prog, _) in pool]
# sort
new_pool = pool + mutated_pool
new_pool.sort(key=lambda x: x[1])
# drop
pool.clear()
# for i in range(0, len(new_pool)):
# if i < pool_size: pool.append(new_pool[i])
n_inserted = 0
for i in range(0, len(new_pool)):
if n_inserted < pool_size and random.random() < (len(new_pool) - i) / len(new_pool):
n_inserted += 1
pool.append(new_pool[i])
return pool[0][0]
# exprs : expr set
def train_model(exprs, lambda_penalize, silent=False):
global do_sample
if not silent: print('Training with %d expressions...' % (len(exprs)))
assert (k_fold > 1)
# init
random.seed()
iter = 0
# default prog
p = [Tcond.UP, random.choice(write_instrs)]
progs = []
if do_sample:
data_set = set(random.sample(exprs, min(len(exprs), k_fold)))
else:
data_set = exprs
rDp = {}
while iter < max_iter:
if not silent: print(iter, '-th iteration')
iter += 1
if iter > 1:
if len(data_set) < len(exprs) - 1 and do_sample:
data_set = data_sampling(progs, exprs, len(data_set) + 1, rDp, lambda_penalize)
else: data_set = exprs
# print('generated data set : ', len(data_set))
progs_so_far = []
for p in progs:
p_with_score = (p, r_regent(data_set, p, lambda_penalize) if do_sample else rDp[prog_to_str(p)])
progs_so_far.append(p_with_score)
p = prog_gen(data_set, progs_so_far, lambda_penalize)
rDp[prog_to_str(p)] = r_regent(exprs, p, lambda_penalize)
if not silent: print('generated prog : %s (%.2f)' % (prog_to_str(p), rDp[prog_to_str(p)]))
if p not in progs:
progs.append(p)
# if data sampling was done, the last one is the best
if do_sample:
final_prog = p
else: # else, pick the best among the generated ones
final_prog = min(progs + [p], key=(lambda p: rDp[prog_to_str(p)]))
return final_prog
import re
def print_mle(f, mle):
for (cond, rule_to_prob) in mle.items():
for topsymb, prob in rule_to_prob.items():
if prob > 0.0:
f.write('%s %s %d\n' % (cond, topsymb, int(prob * 1000.0)))
def get_all_vocabs(grammars):
# '_' : epsilon
vocabs = set(['_'])
for grammar in grammars:
for nt,rule in grammar.rules.items():
for prod in rule:
vocabs.add(fetchop_rewrite(prod))
# vocabs.add(fetchtype_rewrite(prod))
return vocabs
def param_setting(args):
# global lambda_penalize
global alpha
global max_iter
global max_iter_gen
global max_iter_sample
global pool_size
global k_fold
global do_wb
global do_sample
global max_prog_size
global eu
# lambda_penalize = args.lambda_penalize
alpha = args.alpha
max_iter = args.max_iter
max_iter_gen = args.max_iter_gen
max_iter_sample = args.max_iter_sample
pool_size = args.pool_size
max_prog_size = args.max_size
k_fold = args.k_fold
do_wb = args.do_wb
do_sample = args.do_sample
eu = args.eu
print('Settings : ')
# print('\t lambda_penalize : ', lambda_penalize)
print('\t max_iter : ', max_iter)
print('\t max_iter_gen : ', max_iter_gen)
print('\t max_iter_sample : ', max_iter_sample)
print('\t pool_size : ', pool_size)
print('\t k_fold : ', k_fold)
print('\t do_wb : ', do_wb)
print('\t do_sample : ', do_sample)
print('\t eu : ', eu)
# collect term and pred exprs for eusolver
def get_all_atomic_exprs(fun_exprs):
def is_bool_expr(expr):
return isinstance(exprs.get_expression_type(expr), exprtypes._BoolType)
def get_all_atomic_term_expr(expr):
result = set([])
# macro functions involving ite have been resolved.
app = exprs.find_application(expr, 'ite')
if app is not None:
for child in expr.children:
result.update(get_all_atomic_term_expr(child))
return result
else:
if not is_bool_expr(expr): result.add(expr)
return result
# if exprs.is_function_expression(expr):
# include_bool = False
# for child in get_all_exprs(expr):
# include_bool = include_bool or is_bool_expr(child)
# if include_bool:
# for child in expr.children:
# result.update(get_all_atomic_term_expr(child))
# return result
# else:
# result.add(expr)
# return result
# else:
# if not is_bool_expr(expr):
# result.add(expr)
# return result
def get_all_atomic_pred_expr(expr):
result = set([])
app = exprs.find_application(expr, 'ite')
if app is not None:
result.add(expr.children[0])
for child in expr.children:
result.update(get_all_atomic_pred_expr(child))
return result
else:
return result
# if is_bool_expr(expr):
# result.add(expr)
# return result
# else:
# if exprs.is_function_expression(expr):
# include_bool = False
# for child in get_all_exprs(expr):
# include_bool = include_bool or is_bool_expr(child)
# if include_bool:
# for child in expr.children:
# result.update(get_all_atomic_pred_expr(child))
# return result
# else:
# return result
# else:
# return result
def add_dummy_pred(expr):
arg_var = exprs.VariableExpression(exprs.VariableInfo(exprtypes.BoolType(), 'd', 0))
dummy_macro_func = semantics_types.MacroFunction(dummy_pred_name, 1, (exprtypes.BoolType(),),
exprtypes.BoolType(), arg_var, [arg_var])
expr = exprs.FunctionExpression(dummy_macro_func, (expr,))
return expr
result_termexprs = set([])
result_predexprs = set([])
for fun_expr in fun_exprs:
result_termexprs.update(get_all_atomic_term_expr(fun_expr))
result_predexprs.update(get_all_atomic_pred_expr(fun_expr))
# transform pred exprs for EUSolver (encosing with 'dummy_pred_id')
result_predexprs = set([add_dummy_pred(e) for e in result_predexprs])
return (result_termexprs, result_predexprs)
def get_ngram_instr(ngram):
prog = []
for i in range(0, ngram):
prog.append(Tcond.PREV_DFS)
prog.append(Tcond.WRITE_VALUE)
return prog
def lambda_selector(data):
global n_cores
global lambda_candidates
import numpy as np
import sklearn.cluster
import distance
print('lambda selection...')
data = list(data)
k = min(k_fold, len(data))
lambda_vals = lambda_candidates
lambda2scores = {}
for lambda_val in lambda_vals:
lambda2scores[lambda_val] = []
def clustering(expr_strs, k):
words = np.asarray(expr_strs) # So that indexing with a list will work
lev_similarity = -1 * np.array([[distance.levenshtein(w1, w2) for w1 in words] for w2 in words])
affprop = sklearn.cluster.KMeans(n_clusters=k, n_jobs=n_cores, precompute_distances=True, max_iter=1000, n_init=100)
affprop.fit(lev_similarity)
ind2words = {}
for i, ind in enumerate(affprop.labels_):
if ind in ind2words:
ind2words[ind].append(expr_strs[i])
else:
ind2words[ind] = [expr_strs[i]]
return ind2words
# cross-validation
def get_partitions(progs, k_fold):
subset_size = int(len(progs) / k_fold)
result = []
for i in range(k_fold):
if i == k_fold - 1:
testing_this_round = progs[i * subset_size:]
else:
testing_this_round = progs[i * subset_size:][:subset_size]
training_this_round = list(set(progs) - set(testing_this_round))
# training_this_round = solution_files[:i * subset_size] + solution_files[(i + 1) * subset_size:]
print('testing : ', testing_this_round)
print('training : ', training_this_round)
result.append((testing_this_round, training_this_round))
return result
def get_stratified_partitions(progs, k):
global k_fold
clusters = clustering(progs, k)
ordered_progs = []
cycler = itertools.cycle(range(0, k_fold))
while len(ordered_progs) != len(progs):
i = next(cycler)
if len(clusters[i]) > 0:
prog = random.choice(clusters[i])
clusters[i].remove(prog)
ordered_progs.append(prog)
return k, get_partitions(ordered_progs, k)
# def get_stratified_partitions(sols, k):
# result = {}
# clusters = {}
# while k > 0:
# print('Computing clusters for stratified %d cross-validation...' % k)
# clusters = clustering(sols, k)
# cluster_sizes = [len(sols) for _, sols in clusters.items()]
# print('Cluster sizes : ', cluster_sizes)
# if len([n for n in cluster_sizes if n >= k]) == k: break
# print('Outliers : ', [sols for _, sols in clusters.items() if len(sols) <= k])
# k -= 1
# if k < 2:
# print('Data is so highly biased that stratified cross-validation is not feasible.')
# assert False
#
# for _, sols in clusters.items():
# subset_size = int(len(sols) / k)
# for i in range(k):
# testing_this_round = sols[i * subset_size:][:subset_size]
# training_this_round = sols[:i * subset_size] + sols[(i + 1) * subset_size:]
# result_test, result_training = result.get(i, ([], []))
# result_test.extend(testing_this_round)
# result_training.extend(training_this_round)
# result[i] = (result_test, result_training)
# return k, result
k, stratified_cv_result = get_stratified_partitions(data, k)
for lambda_val in lambda_vals:
print('lambda : %.2f' % lambda_val)
for i in range(0, k):
(test_set, train_set) = stratified_cv_result[i]
# print([exprs.expression_to_string(e) for e in test_set])
# print([exprs.expression_to_string(e) for e in train_set])
prog = train_model(train_set, lambda_val, silent=True)
mle = get_mle(prog, train_set, training=True)
for expr in test_set:
l_ent_sum = compute_score_with_cache(mle, prog, expr)
lambda2scores[lambda_val].append(l_ent_sum)
print('avg score : %.2f' % (sum(lambda2scores[lambda_val]) / len(lambda2scores[lambda_val])))
best_lambda = min(lambda2scores.keys(), key=(lambda key: sum(lambda2scores[key]) / len(lambda2scores[key])))
print('lambda chosen : ', best_lambda)
return best_lambda
if __name__ == "__main__":
import pickle
import sys
import argparse
sys.setrecursionlimit(10000)
argparser = argparse.ArgumentParser(description='Train PHOG model')
argparser.add_argument('-lambda_penalize', type=float, default=0.0)
argparser.add_argument('-alpha', type=float, default=0.4)
argparser.add_argument('-max_iter', type=int, default=10)
argparser.add_argument('-max_iter_gen', type=int, default=20)
argparser.add_argument('-max_iter_sample', type=int, default=20)
argparser.add_argument('-pool_size', type=int, default=10)
argparser.add_argument('-max_size', type=int, default=30)
argparser.add_argument('-k_fold', type=int, default=4)
argparser.add_argument('-ngram', type=int, default=0)
argparser.add_argument('-do_wb', action='store_true')
argparser.add_argument('-do_sample', action='store_true')
argparser.add_argument('-text', action='store_true')
argparser.add_argument('-eu', action='store_true')
argparser.add_argument('-out', type=str, default='mle')
argparser.add_argument('bench', nargs=argparse.REMAINDER)
if len(sys.argv) < 2:
argparser.print_usage()
exit(0)
args = argparser.parse_args()
param_setting(args)
output_file = args.out
benchmark_files = args.bench
ngram = args.ngram
text_out = args.text
# bechmark_file format : original benchmark_file + its solution
rettype2fun_exprs, grammars = get_func_exprs_grammars(benchmark_files)
# print([exprs.expression_to_string(e) for e in fun_exprs])
# collect vocab
all_vocabs = set(['_'])
for _, fun_exprs in rettype2fun_exprs.items():
if len(grammars) == 0:
for fun_expr in fun_exprs:
sub_exprs = exprs.get_all_exprs(fun_expr)
for expr in sub_exprs:
all_vocabs.add(fetchop(expr))
# all_vocabs.add(fetchtype(expr))
else:
all_vocabs.update(get_all_vocabs(grammars))
print('# of vocabs : ', len(all_vocabs))
# print([vocab for vocab in all_vocabs])
get_mle.all_vocabs = all_vocabs
# setting maximum number of writes
all_vocabs = get_mle.all_vocabs
write_num_threshold = math.log2(wb_threshold) / math.log2(len(all_vocabs)) - 1 if args.do_wb else max_prog_size / 2
print('PHOG instrs with |Write| > %.2f will be ignored.' % write_num_threshold)
# for eusolver
rettype2mle = {}
for ret_type_eusolver, fun_exprs in rettype2fun_exprs.items():
(ret_type, eusolver) = ret_type_eusolver
print('Training on exprs returning %s %s' % (str(ret_type), '(for eusolver)' if eusolver else ''))
(term_exprs, pred_exprs) = get_all_atomic_exprs(fun_exprs) if eusolver else (fun_exprs, set([]))
print('Collected term exprs for training : ', len(term_exprs))
# for expr in term_exprs:
# print(exprs.expression_to_string(expr))
print('Collected pred exprs for training : ', len(pred_exprs))
# for expr in pred_exprs:
# print(exprs.expression_to_string(expr))
# collecting pre-computed information to expedite learning
total_exprs = list(term_exprs) + list(pred_exprs)
for e in total_exprs:
estr = exprs.expression_to_string(e)
exprstr2expr[estr] = e
exprs_info = get_exprs_info(list(exprstr2expr.keys()))
# freeze exprs to strings
term_exprs_ = [exprs.expression_to_string(e) for e in term_exprs]
term_exprs = term_exprs_
pred_exprs_ = [exprs.expression_to_string(e) for e in pred_exprs]
pred_exprs = pred_exprs_
print('learn PHOG for term exprs')
if len(term_exprs) == 0:
term_prog = None
term_mle = None
else:
if ngram == 0:
if args.lambda_penalize == 0.0:
lambda_penalize = lambda_selector(term_exprs)
else:
lambda_penalize = args.lambda_penalize
term_prog = train_model(term_exprs, lambda_penalize)
# term_prog = [Tcond.LEFT, Tcond.WRITE, Tcond.UP, Tcond.WRITE]
# term_prog = [Tcond.WRITE]
else: # n PrevDFS seq
term_prog = get_ngram_instr(ngram)
print(prog_to_str(term_prog))
term_mle = get_mle(term_prog, term_exprs, training=False)
# print probabilities of training data
# for term_expr in term_exprs:
# print('%s : %.2f' % (term_expr, compute_score_with_cache(term_mle, term_prog, term_expr)))
# print_mle(term_mle)
print('learn PHOG for pred exprs')
if len(pred_exprs) == 0:
pred_prog = None
pred_mle = None
else:
if ngram == 0:
if args.lambda_penalize == 0.0:
lambda_penalize = lambda_selector(pred_exprs)
else:
lambda_penalize = args.lambda_penalize
pred_prog = train_model(pred_exprs, lambda_penalize)
else: # n PrevDFS seq
pred_prog = get_ngram_instr(ngram)
print(prog_to_str(pred_prog))
pred_mle = get_mle(pred_prog, pred_exprs, training=False)
# print_mle(pred_mle)
# cleaning up mles
remove_zero_probs(term_mle)
remove_zero_probs(pred_mle)
# store the mles for dumping
rettype2mle[(str(ret_type), eusolver)] = ((term_prog, term_mle), (pred_prog, pred_mle))
if text_out:
with open(output_file, 'w') as f:
for (ret_type_str, eusolver), ((term_prog, term_mle), (pred_prog, pred_mle)) in rettype2mle.items():
# XXX : for stochastic solver
if not eusolver:
# f.write('%s %s\n' % (ret_type_str, '(eusolver)' if eusolver else ''))
f.write('%s\n' % prog_to_str(term_prog))
print_mle(f, term_mle)
else:
with open(output_file, 'wb') as f:
pickle.dump(rettype2mle, f)
|
import ckan.model as model
from ckan.tests import *
from ckan.lib.base import *
import ckan.authz as authz
from test_edit_authz import check_and_set_checkbox
class TestPackageEditAuthz(TestController):
@classmethod
def setup_class(self):
# for the authorization editing tests we set up test data so:
# three users, madeup-sysadmin , madeup-administrator, and madeup-another
# one authzgroup
# two packages test6 and test6a, m-a is admin on both
model.repo.init_db()
model.repo.new_revision()
self.sysadmin = 'madeup-sysadmin'
sysadmin_user = model.User(name=unicode(self.sysadmin))
self.admin = 'madeup-administrator'
admin_user = model.User(name=unicode(self.admin))
self.another = u'madeup-another'
another_user = model.User(name=unicode(self.another))
self.authzgroup = u'madeup-authzgroup'
authzgroup = model.AuthorizationGroup(name=unicode(self.authzgroup))
for obj in sysadmin_user, admin_user, another_user, authzgroup:
model.Session.add(obj)
model.add_user_to_role(sysadmin_user, model.Role.ADMIN, model.System())
model.repo.new_revision()
self.pkgname = u'test6'
self.pkgname2 = u'test6a'
pkg = model.Package(name=self.pkgname)
pkg2 = model.Package(name=self.pkgname2)
model.Session.add(pkg)
model.Session.add(pkg2)
admin_user = model.User.by_name(unicode(self.admin))
assert admin_user
model.setup_default_user_roles(pkg, admins=[admin_user])
model.setup_default_user_roles(pkg2, admins=[admin_user])
model.repo.commit_and_remove()
@classmethod
def teardown_class(self):
model.repo.rebuild_db()
def test_0_nonadmin_cannot_edit_authz(self):
offset = url_for(controller='package', action='authz', id=self.pkgname)
res = self.app.get(offset, status=[302, 401])
res = res.follow()
assert res.request.url.startswith('/user/login')
def test_1_admin_has_access(self):
offset = url_for(controller='package', action='authz', id=self.pkgname)
res = self.app.get(offset, extra_environ={'REMOTE_USER':
self.admin})
def test_1_sysadmin_has_access(self):
offset = url_for(controller='package', action='authz', id=self.pkgname)
res = self.app.get(offset, extra_environ={'REMOTE_USER':
self.sysadmin})
def test_2_read_ok(self):
offset = url_for(controller='package', action='authz', id=self.pkgname)
res = self.app.get(offset, extra_environ={'REMOTE_USER':
self.admin})
assert self.pkgname in res
# all the package\'s users and roles should appear in tables
assert '<tr' in res
for (user,role) in self.package_roles():
assert user in res
assert role in res
def package_roles(self):
pkg = model.Package.by_name(self.pkgname)
list = [ (r.user.name, r.role) for r in pkg.roles if r.user]
list.extend([(r.authorized_group.name, r.role) for r in pkg.roles if r.authorized_group])
return list
def assert_package_roles_to_be(self, roles_list):
prs=self.package_roles()
ok = ( len(prs) == len(roles_list) )
for r in roles_list:
if not r in prs:
ok = False
if not ok:
print "expected roles: ", roles_list
print "actual roles: ", prs
assert False, "roles not as expected"
def change_roles(self, user):
# load authz page
offset = url_for(controller='package', action='authz', id=self.pkgname)
res = self.app.get(offset, extra_environ={'REMOTE_USER':user})
assert self.pkgname in res
self.assert_package_roles_to_be([
('madeup-administrator', 'admin'),
('visitor', 'reader'),
('logged_in', 'reader')])
#admin makes visitor an editor and logged in an admin
form = res.forms['theform']
check_and_set_checkbox(form, u'visitor', u'editor', False, True)
check_and_set_checkbox(form, u'logged_in', u'admin', False, True)
check_and_set_checkbox(form, u'logged_in', u'reader', True, False)
res = form.submit('save', extra_environ={'REMOTE_USER': user})
# ensure db was changed
self.assert_package_roles_to_be([
('madeup-administrator', 'admin'),
('visitor', 'editor'),
('visitor', 'reader'),
('logged_in', 'admin')])
# ensure rerender of form is changed
offset = url_for(controller='package', action='authz', id=self.pkgname)
res = self.app.get(offset, extra_environ={'REMOTE_USER':user})
assert self.pkgname in res
# check that the checkbox states are what we think they should be
# and put things back how they were.
form = res.forms['theform']
check_and_set_checkbox(form, u'visitor', u'reader', True, True)
check_and_set_checkbox(form, u'logged_in', u'admin', True, False)
check_and_set_checkbox(form, u'visitor', u'editor', True, False)
check_and_set_checkbox(form, u'logged_in', u'reader', False, True)
res = form.submit('save', extra_environ={'REMOTE_USER': user})
# ensure db was changed
self.assert_package_roles_to_be([
('madeup-administrator', 'admin'),
('visitor', 'reader'),
('logged_in', 'reader')])
def test_3_admin_changes_role(self):
self.change_roles(self.admin)
def test_3_sysadmin_changes_role(self):
self.change_roles(self.sysadmin)
def delete_role_as(self,user):
# get the authz page, check that visitor's in there
# remove visitor's role on the package
# re-get the page and make sure that visitor's not in there at all
offset = url_for(controller='package', action='authz', id=self.pkgname)
res = self.app.get(offset, extra_environ={'REMOTE_USER':user})
assert self.pkgname in res
self.assert_package_roles_to_be([
('madeup-administrator', 'admin'),
('visitor', 'reader'),
('logged_in', 'reader')])
assert 'visitor' in res
assert 'madeup-administrator' in res
assert 'logged_in' in res
#admin removes visitor's only role
form = res.forms['theform']
check_and_set_checkbox(form, u'visitor', u'reader', True, False)
res = form.submit('save', extra_environ={'REMOTE_USER': user})
# ensure db was changed
self.assert_package_roles_to_be([
('madeup-administrator', 'admin'),
('logged_in', 'reader')])
# ensure rerender of form is changed
offset = url_for(controller='package', action='authz', id=self.pkgname)
res = self.app.get(offset, extra_environ={'REMOTE_USER':user})
assert self.pkgname in res
assert 'visitor' not in res
assert 'madeup-administrator' in res
assert 'logged_in' in res
# check that the checkbox states are what we think they should be
form = res.forms['theform']
check_and_set_checkbox(form, u'logged_in', u'reader', True, True)
check_and_set_checkbox(form, u'madeup-administrator', u'admin', True, True)
# now we should add visitor back in, let's make him a reader
form = res.forms['addform']
form.fields['new_user_name'][0].value='visitor'
checkbox = [x for x in form.fields['reader'] \
if x.__class__.__name__ == 'Checkbox'][0]
# check it's currently unticked
assert checkbox.checked == False
# tick it and submit
checkbox.checked=True
res = form.submit('add', extra_environ={'REMOTE_USER':user})
assert "User role(s) added" in res, "don't see flash message"
# check that the page contains strings for everyone
assert 'visitor' in res
assert 'madeup-administrator' in res
assert 'logged_in' in res
# check that the roles in the db are back to normal
self.assert_package_roles_to_be([
('madeup-administrator', 'admin'),
('visitor', 'reader'),
('logged_in', 'reader')])
def test_4_admin_deletes_role(self):
self.delete_role_as(self.admin)
def test_4_sysadmin_deletes_role(self):
self.delete_role_as(self.sysadmin)
def test_5_add_change_delete_authzgroup(self):
user=self.admin
# get the authz page, check that authzgroup isn't in there
offset = url_for(controller='package', action='authz', id=self.pkgname)
res = self.app.get(offset, extra_environ={'REMOTE_USER':user})
assert self.pkgname in res
# check the state of the database
self.assert_package_roles_to_be([
('madeup-administrator', 'admin'),
('visitor', 'reader'),
('logged_in', 'reader')])
# and that corresponding user strings are in the authz page
assert 'visitor' in res
assert 'madeup-administrator' in res
assert 'logged_in' in res
assert 'madeup-authzgroup' not in res
# add madeup-authzgroup as an admin
form = res.forms['authzgroup_addform']
form.fields['new_user_name'][0].value='madeup-authzgroup'
checkbox = [x for x in form.fields['admin'] \
if x.__class__.__name__ == 'Checkbox'][0]
# check the checkbox is currently unticked
assert checkbox.checked == False
# tick it and submit
checkbox.checked=True
res = form.submit('authz_add', extra_environ={'REMOTE_USER':user})
assert "User role(s) added" in res, "don't see flash message"
# examine the new page for user names/authzgroup names
assert 'visitor' in res
assert 'madeup-administrator' in res
assert 'logged_in' in res
assert 'madeup-authzgroup' in res
# and ensure that the database has changed as expected
self.assert_package_roles_to_be([
('madeup-authzgroup', 'admin'),
('madeup-administrator', 'admin'),
('visitor', 'reader'),
('logged_in', 'reader')])
# check that the checkbox states are what we think they should be
# and change madeup-authzgroup from admin to editor
form = res.forms['authzgroup_form']
check_and_set_checkbox(form, u'madeup-authzgroup', u'editor', False, True)
check_and_set_checkbox(form, u'madeup-authzgroup', u'admin', True, False)
res = form.submit('authz_save', extra_environ={'REMOTE_USER': user})
#check database has changed.
self.assert_package_roles_to_be([
('madeup-authzgroup', 'editor'),
('madeup-administrator', 'admin'),
('visitor', 'reader'),
('logged_in', 'reader')])
# now remove madeup-authzgroup entirely
form = res.forms['authzgroup_form']
check_and_set_checkbox(form, u'madeup-authzgroup', u'editor', True, False)
check_and_set_checkbox(form, u'madeup-authzgroup', u'admin', False, False)
res = form.submit('authz_save', extra_environ={'REMOTE_USER': user})
#check database is back to normal
self.assert_package_roles_to_be([
('madeup-administrator', 'admin'),
('visitor', 'reader'),
('logged_in', 'reader')])
# and that page contains only the expected strings
assert 'visitor' in res
assert 'madeup-administrator' in res
assert 'logged_in' in res
assert 'madeup-authzgroup' not in res
|
import random
from typing import Type, Dict, Tuple
import cv2
import numpy as np
import pytest
from albumentations import (
RandomCrop,
PadIfNeeded,
VerticalFlip,
HorizontalFlip,
Flip,
Transpose,
RandomRotate90,
Rotate,
ShiftScaleRotate,
CenterCrop,
OpticalDistortion,
GridDistortion,
ElasticTransform,
RandomGridShuffle,
ToGray,
RandomGamma,
ImageCompression,
HueSaturationValue,
RGBShift,
Blur,
MotionBlur,
MedianBlur,
GaussianBlur,
GaussNoise,
CLAHE,
ChannelShuffle,
InvertImg,
IAAEmboss,
IAASuperpixels,
IAASharpen,
IAAAdditiveGaussianNoise,
IAAPiecewiseAffine,
IAAPerspective,
Cutout,
CoarseDropout,
Normalize,
ToFloat,
FromFloat,
RandomBrightnessContrast,
RandomSnow,
RandomRain,
RandomFog,
RandomSunFlare,
RandomCropNearBBox,
RandomShadow,
RandomSizedCrop,
RandomResizedCrop,
ChannelDropout,
ISONoise,
Solarize,
Posterize,
Equalize,
CropNonEmptyMaskIfExists,
LongestMaxSize,
Downscale,
MultiplicativeNoise,
GridDropout,
ColorJitter,
FDA,
HistogramMatching,
Perspective,
Sharpen,
)
@pytest.mark.parametrize(
["augmentation_cls", "params"],
[
[ImageCompression, {}],
[HueSaturationValue, {}],
[RGBShift, {}],
[RandomBrightnessContrast, {}],
[Blur, {}],
[MotionBlur, {}],
[MedianBlur, {}],
[GaussianBlur, {}],
[GaussNoise, {}],
[CLAHE, {}],
[ChannelShuffle, {}],
[InvertImg, {}],
[RandomGamma, {}],
[ToGray, {}],
[Cutout, {}],
[CoarseDropout, {}],
[GaussNoise, {}],
[RandomSnow, {}],
[RandomRain, {}],
[RandomFog, {}],
[RandomSunFlare, {}],
[RandomShadow, {}],
[ChannelDropout, {}],
[ISONoise, {}],
[Solarize, {}],
[Posterize, {}],
[Equalize, {}],
[Downscale, {}],
[MultiplicativeNoise, {}],
[GridDropout, {}],
[ColorJitter, {}],
[
HistogramMatching,
{"reference_images": [np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)], "read_fn": lambda x: x},
],
[
FDA,
{"reference_images": [np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)], "read_fn": lambda x: x},
],
[Sharpen, {}],
],
)
def test_image_only_augmentations(augmentation_cls, params, image, mask):
aug = augmentation_cls(p=1, **params)
data = aug(image=image, mask=mask)
assert data["image"].dtype == np.uint8
assert data["mask"].dtype == np.uint8
assert np.array_equal(data["mask"], mask)
@pytest.mark.parametrize(
["augmentation_cls", "params"],
[
[HueSaturationValue, {}],
[RGBShift, {}],
[RandomBrightnessContrast, {}],
[Blur, {}],
[MotionBlur, {}],
[MedianBlur, {"blur_limit": (3, 5)}],
[GaussianBlur, {}],
[GaussNoise, {}],
[ChannelShuffle, {}],
[InvertImg, {}],
[RandomGamma, {}],
[ImageCompression, {}],
[ToGray, {}],
[Cutout, {}],
[CoarseDropout, {}],
[GaussNoise, {}],
[RandomSnow, {}],
[RandomRain, {}],
[RandomFog, {}],
[RandomSunFlare, {}],
[RandomShadow, {}],
[ChannelDropout, {}],
[Solarize, {}],
[MultiplicativeNoise, {}],
[GridDropout, {}],
[ColorJitter, {}],
[
HistogramMatching,
{"reference_images": [np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)], "read_fn": lambda x: x},
],
[
FDA,
{"reference_images": [np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)], "read_fn": lambda x: x},
],
[Sharpen, {}],
],
)
def test_image_only_augmentations_with_float_values(augmentation_cls, params, float_image, mask):
aug = augmentation_cls(p=1, **params)
data = aug(image=float_image, mask=mask)
assert data["image"].dtype == np.float32
assert data["mask"].dtype == np.uint8
assert np.array_equal(data["mask"], mask)
@pytest.mark.parametrize(
["augmentation_cls", "params"],
[
[PadIfNeeded, {}],
[VerticalFlip, {}],
[HorizontalFlip, {}],
[Flip, {}],
[Transpose, {}],
[RandomRotate90, {}],
[Rotate, {}],
[CoarseDropout, {"fill_value": 0, "mask_fill_value": 0}],
[ShiftScaleRotate, {}],
[OpticalDistortion, {}],
[GridDistortion, {}],
[ElasticTransform, {}],
[CenterCrop, {"height": 10, "width": 10}],
[RandomCrop, {"height": 10, "width": 10}],
[CropNonEmptyMaskIfExists, {"height": 10, "width": 10}],
[RandomResizedCrop, {"height": 10, "width": 10}],
[RandomSizedCrop, {"min_max_height": (4, 8), "height": 10, "width": 10}],
[ISONoise, {}],
[RandomGridShuffle, {}],
[GridDropout, {}],
[Perspective, {}],
],
)
def test_dual_augmentations(augmentation_cls, params, image, mask):
aug = augmentation_cls(p=1, **params)
data = aug(image=image, mask=mask)
assert data["image"].dtype == np.uint8
assert data["mask"].dtype == np.uint8
@pytest.mark.parametrize(
["augmentation_cls", "params"],
[
[PadIfNeeded, {}],
[VerticalFlip, {}],
[HorizontalFlip, {}],
[Flip, {}],
[Transpose, {}],
[RandomRotate90, {}],
[Rotate, {}],
[ShiftScaleRotate, {}],
[OpticalDistortion, {}],
[GridDistortion, {}],
[ElasticTransform, {}],
[CenterCrop, {"height": 10, "width": 10}],
[RandomCrop, {"height": 10, "width": 10}],
[CropNonEmptyMaskIfExists, {"height": 10, "width": 10}],
[RandomResizedCrop, {"height": 10, "width": 10}],
[RandomSizedCrop, {"min_max_height": (4, 8), "height": 10, "width": 10}],
[RandomGridShuffle, {}],
[GridDropout, {}],
[Perspective, {}],
],
)
def test_dual_augmentations_with_float_values(augmentation_cls, params, float_image, mask):
aug = augmentation_cls(p=1, **params)
data = aug(image=float_image, mask=mask)
assert data["image"].dtype == np.float32
assert data["mask"].dtype == np.uint8
@pytest.mark.parametrize("augmentation_cls", [IAAEmboss, IAASuperpixels, IAASharpen, IAAAdditiveGaussianNoise])
def test_imgaug_image_only_augmentations(augmentation_cls, image, mask):
aug = augmentation_cls(p=1)
data = aug(image=image, mask=mask)
assert data["image"].dtype == np.uint8
assert data["mask"].dtype == np.uint8
assert np.array_equal(data["mask"], mask)
@pytest.mark.parametrize("augmentation_cls", [IAAPiecewiseAffine, IAAPerspective])
def test_imgaug_dual_augmentations(augmentation_cls, image, mask):
aug = augmentation_cls(p=1)
data = aug(image=image, mask=mask)
assert data["image"].dtype == np.uint8
assert data["mask"].dtype == np.uint8
@pytest.mark.parametrize(
["augmentation_cls", "params"],
[
[Cutout, {}],
[ImageCompression, {}],
[HueSaturationValue, {}],
[RGBShift, {}],
[RandomBrightnessContrast, {}],
[RandomBrightnessContrast, {}],
[Blur, {}],
[MotionBlur, {}],
[MedianBlur, {}],
[GaussianBlur, {}],
[GaussNoise, {}],
[CLAHE, {}],
[ChannelShuffle, {}],
[InvertImg, {}],
[RandomGamma, {}],
[ToGray, {}],
[Cutout, {}],
[CoarseDropout, {}],
[PadIfNeeded, {}],
[VerticalFlip, {}],
[HorizontalFlip, {}],
[Flip, {}],
[Transpose, {}],
[RandomRotate90, {}],
[Rotate, {}],
[ShiftScaleRotate, {}],
[OpticalDistortion, {}],
[GridDistortion, {}],
[ElasticTransform, {}],
[CenterCrop, {"height": 10, "width": 10}],
[RandomCrop, {"height": 10, "width": 10}],
[CropNonEmptyMaskIfExists, {"height": 10, "width": 10}],
[RandomResizedCrop, {"height": 10, "width": 10}],
[RandomSizedCrop, {"min_max_height": (4, 8), "height": 10, "width": 10}],
[Normalize, {}],
[GaussNoise, {}],
[ToFloat, {}],
[FromFloat, {}],
[RandomSnow, {}],
[RandomRain, {}],
[RandomFog, {}],
[RandomSunFlare, {}],
[RandomShadow, {}],
[ChannelDropout, {}],
[ISONoise, {}],
[RandomGridShuffle, {}],
[Solarize, {}],
[Posterize, {}],
[Equalize, {}],
[MultiplicativeNoise, {}],
[GridDropout, {}],
[ColorJitter, {}],
[
HistogramMatching,
{"reference_images": [np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)], "read_fn": lambda x: x},
],
[
FDA,
{"reference_images": [np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)], "read_fn": lambda x: x},
],
[Perspective, {}],
[Sharpen, {}],
],
)
def test_augmentations_wont_change_input(augmentation_cls, params, image, mask):
image_copy = image.copy()
mask_copy = mask.copy()
aug = augmentation_cls(p=1, **params)
aug(image=image, mask=mask)
assert np.array_equal(image, image_copy)
assert np.array_equal(mask, mask_copy)
@pytest.mark.parametrize(
["augmentation_cls", "params"],
[
[Cutout, {}],
[CoarseDropout, {}],
[HueSaturationValue, {}],
[RGBShift, {}],
[RandomBrightnessContrast, {}],
[RandomBrightnessContrast, {}],
[Blur, {}],
[MotionBlur, {}],
[MedianBlur, {"blur_limit": (3, 5)}],
[GaussianBlur, {}],
[GaussNoise, {}],
[ChannelShuffle, {}],
[InvertImg, {}],
[RandomGamma, {}],
[ToGray, {}],
[PadIfNeeded, {}],
[VerticalFlip, {}],
[HorizontalFlip, {}],
[Flip, {}],
[Transpose, {}],
[RandomRotate90, {}],
[Rotate, {}],
[ShiftScaleRotate, {}],
[OpticalDistortion, {}],
[GridDistortion, {}],
[ElasticTransform, {}],
[CenterCrop, {"height": 10, "width": 10}],
[RandomCrop, {"height": 10, "width": 10}],
[RandomResizedCrop, {"height": 10, "width": 10}],
[RandomSizedCrop, {"min_max_height": (4, 8), "height": 10, "width": 10}],
[Normalize, {}],
[GaussNoise, {}],
[ToFloat, {}],
[FromFloat, {}],
[RandomSnow, {}],
[RandomRain, {}],
[RandomFog, {}],
[RandomSunFlare, {}],
[RandomShadow, {}],
[ChannelDropout, {}],
[RandomGridShuffle, {}],
[Solarize, {}],
[MultiplicativeNoise, {}],
[GridDropout, {}],
[ColorJitter, {}],
[
HistogramMatching,
{"reference_images": [np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)], "read_fn": lambda x: x},
],
[
FDA,
{"reference_images": [np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)], "read_fn": lambda x: x},
],
[Perspective, {}],
[Sharpen, {}],
],
)
def test_augmentations_wont_change_float_input(augmentation_cls, params, float_image):
float_image_copy = float_image.copy()
aug = augmentation_cls(p=1, **params)
aug(image=float_image)
assert np.array_equal(float_image, float_image_copy)
@pytest.mark.parametrize(
["augmentation_cls", "params"],
[
[Cutout, {}],
[CoarseDropout, {}],
[ImageCompression, {}],
[RandomBrightnessContrast, {}],
[Blur, {}],
[MotionBlur, {}],
[MedianBlur, {}],
[GaussianBlur, {}],
[GaussNoise, {}],
[InvertImg, {}],
[RandomGamma, {}],
[VerticalFlip, {}],
[HorizontalFlip, {}],
[Flip, {}],
[Transpose, {}],
[RandomRotate90, {}],
[Rotate, {}],
[OpticalDistortion, {}],
[GridDistortion, {}],
[ElasticTransform, {}],
[GaussNoise, {}],
[ToFloat, {}],
[FromFloat, {}],
[RandomGridShuffle, {}],
[Solarize, {}],
[Posterize, {}],
[Equalize, {}],
[MultiplicativeNoise, {}],
[GridDropout, {}],
[HueSaturationValue, {}],
[ColorJitter, {}],
[
HistogramMatching,
{"reference_images": [np.random.randint(0, 256, [100, 100], dtype=np.uint8)], "read_fn": lambda x: x},
],
[FDA, {"reference_images": [np.random.randint(0, 256, [100, 100], dtype=np.uint8)], "read_fn": lambda x: x}],
[Perspective, {}],
[Sharpen, {}],
],
)
def test_augmentations_wont_change_shape_grayscale(augmentation_cls, params, image, mask):
aug = augmentation_cls(p=1, **params)
# Test for grayscale image
image = np.zeros((224, 224), dtype=np.uint8)
mask = np.zeros((224, 224))
result = aug(image=image, mask=mask)
assert np.array_equal(image.shape, result["image"].shape)
assert np.array_equal(mask.shape, result["mask"].shape)
# Test for grayscale image with dummy dim
image_1ch = np.zeros((224, 224, 1), dtype=np.uint8)
mask_1ch = np.zeros((224, 224, 1))
result = aug(image=image_1ch, mask=mask_1ch)
assert np.array_equal(image_1ch.shape, result["image"].shape)
assert np.array_equal(mask_1ch.shape, result["mask"].shape)
@pytest.mark.parametrize(
["augmentation_cls", "params"],
[
[Cutout, {}],
[CoarseDropout, {}],
[ImageCompression, {}],
[HueSaturationValue, {}],
[RGBShift, {}],
[RandomBrightnessContrast, {}],
[Blur, {}],
[MotionBlur, {}],
[MedianBlur, {}],
[GaussianBlur, {}],
[GaussNoise, {}],
[CLAHE, {}],
[ChannelShuffle, {}],
[InvertImg, {}],
[RandomGamma, {}],
[ToGray, {}],
[VerticalFlip, {}],
[HorizontalFlip, {}],
[Flip, {}],
[Transpose, {}],
[RandomRotate90, {}],
[Rotate, {}],
[OpticalDistortion, {}],
[GridDistortion, {}],
[ElasticTransform, {}],
[Normalize, {}],
[GaussNoise, {}],
[ToFloat, {}],
[FromFloat, {}],
[RandomSnow, {}],
[RandomRain, {}],
[RandomFog, {}],
[RandomSunFlare, {}],
[RandomShadow, {}],
[ChannelDropout, {}],
[ISONoise, {}],
[RandomGridShuffle, {}],
[Solarize, {}],
[Posterize, {}],
[Equalize, {}],
[MultiplicativeNoise, {}],
[GridDropout, {}],
[ColorJitter, {}],
[
HistogramMatching,
{"reference_images": [np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)], "read_fn": lambda x: x},
],
[
FDA,
{"reference_images": [np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)], "read_fn": lambda x: x},
],
[Perspective, {}],
[Sharpen, {}],
],
)
def test_augmentations_wont_change_shape_rgb(augmentation_cls, params, image, mask):
aug = augmentation_cls(p=1, **params)
# Test for RGB image
image_3ch = np.zeros((224, 224, 3), dtype=np.uint8)
mask_3ch = np.zeros((224, 224, 3))
result = aug(image=image_3ch, mask=mask_3ch)
assert np.array_equal(image_3ch.shape, result["image"].shape)
assert np.array_equal(mask_3ch.shape, result["mask"].shape)
@pytest.mark.parametrize(["augmentation_cls", "params"], [[RandomCropNearBBox, {"max_part_shift": 0.15}]])
def test_image_only_crop_around_bbox_augmentation(augmentation_cls, params, image, mask):
aug = augmentation_cls(p=1, **params)
annotations = {"image": image, "cropping_bbox": [-59, 77, 177, 231]}
data = aug(**annotations)
assert data["image"].dtype == np.uint8
@pytest.mark.parametrize(
["augmentation_cls", "params"],
[
[
PadIfNeeded,
{"min_height": 514, "min_width": 514, "border_mode": cv2.BORDER_CONSTANT, "value": 100, "mask_value": 1},
],
[Rotate, {"border_mode": cv2.BORDER_CONSTANT, "value": 100, "mask_value": 1}],
[ShiftScaleRotate, {"border_mode": cv2.BORDER_CONSTANT, "value": 100, "mask_value": 1}],
[OpticalDistortion, {"border_mode": cv2.BORDER_CONSTANT, "value": 100, "mask_value": 1}],
[ElasticTransform, {"border_mode": cv2.BORDER_CONSTANT, "value": 100, "mask_value": 1}],
[GridDistortion, {"border_mode": cv2.BORDER_CONSTANT, "value": 100, "mask_value": 1}],
],
)
def test_mask_fill_value(augmentation_cls, params):
random.seed(42)
aug = augmentation_cls(p=1, **params)
input = {"image": np.zeros((512, 512), dtype=np.uint8) + 100, "mask": np.ones((512, 512))}
output = aug(**input)
assert (output["image"] == 100).all()
assert (output["mask"] == 1).all()
@pytest.mark.parametrize(
["augmentation_cls", "params"],
[
[Blur, {}],
[MotionBlur, {}],
[MedianBlur, {}],
[GaussianBlur, {}],
[GaussNoise, {}],
[RandomSizedCrop, {"min_max_height": (384, 512), "height": 512, "width": 512}],
[ShiftScaleRotate, {}],
[PadIfNeeded, {"min_height": 514, "min_width": 516}],
[LongestMaxSize, {"max_size": 256}],
[GridDistortion, {}],
[ElasticTransform, {}],
[RandomBrightnessContrast, {}],
[MultiplicativeNoise, {}],
[GridDropout, {}],
[Perspective, {}],
],
)
def test_multichannel_image_augmentations(augmentation_cls, params):
image = np.zeros((512, 512, 6), dtype=np.uint8)
aug = augmentation_cls(p=1, **params)
data = aug(image=image)
assert data["image"].dtype == np.uint8
assert data["image"].shape[2] == 6
@pytest.mark.parametrize(
["augmentation_cls", "params"],
[
[Blur, {}],
[MotionBlur, {}],
[MedianBlur, {"blur_limit": [7, 7]}],
[GaussianBlur, {"blur_limit": [7, 7]}],
[GaussNoise, {}],
[RandomSizedCrop, {"min_max_height": (384, 512), "height": 512, "width": 512}],
[ShiftScaleRotate, {}],
[PadIfNeeded, {"min_height": 514, "min_width": 516}],
[LongestMaxSize, {"max_size": 256}],
[GridDistortion, {}],
[ElasticTransform, {}],
[RandomBrightnessContrast, {}],
[MultiplicativeNoise, {}],
[GridDropout, {}],
[Perspective, {}],
],
)
def test_multichannel_image_augmentations_diff_channels(augmentation_cls, params):
for num_channels in range(3, 13):
image = np.zeros((512, 512, num_channels), dtype=np.uint8)
aug = augmentation_cls(p=1, **params)
data = aug(image=image)
assert data["image"].dtype == np.uint8
assert data["image"].shape[2] == num_channels
@pytest.mark.parametrize(
["augmentation_cls", "params", "image_shape"],
[
[PadIfNeeded, {"min_height": 514, "min_width": 516}, (300, 200)],
[PadIfNeeded, {"min_height": 514, "min_width": 516}, (512, 516)],
[PadIfNeeded, {"min_height": 514, "min_width": 516}, (600, 600)],
[
PadIfNeeded,
{"min_height": None, "min_width": None, "pad_height_divisor": 128, "pad_width_divisor": 128},
(300, 200),
],
[
PadIfNeeded,
{"min_height": None, "min_width": None, "pad_height_divisor": 72, "pad_width_divisor": 128},
(72, 128),
],
[
PadIfNeeded,
{"min_height": None, "min_width": None, "pad_height_divisor": 72, "pad_width_divisor": 128},
(15, 15),
],
[
PadIfNeeded,
{"min_height": None, "min_width": None, "pad_height_divisor": 72, "pad_width_divisor": 128},
(144, 256),
],
[
PadIfNeeded,
{"min_height": None, "min_width": None, "pad_height_divisor": 72, "pad_width_divisor": 128},
(200, 300),
],
[PadIfNeeded, {"min_height": 512, "min_width": None, "pad_width_divisor": 128}, (300, 200)],
[PadIfNeeded, {"min_height": None, "min_width": 512, "pad_height_divisor": 128}, (300, 200)],
],
)
def test_pad_if_needed(augmentation_cls: Type[PadIfNeeded], params: Dict, image_shape: Tuple[int, int]):
image = np.zeros(image_shape)
pad = augmentation_cls(**params)
image_padded = pad(image=image)["image"]
if pad.min_width is not None:
assert image_padded.shape[1] >= pad.min_width
if pad.min_height is not None:
assert image_padded.shape[0] >= pad.min_height
if pad.pad_width_divisor is not None:
assert image_padded.shape[1] % pad.pad_width_divisor == 0
assert image_padded.shape[1] >= image.shape[1]
assert image_padded.shape[1] - image.shape[1] <= pad.pad_width_divisor
if pad.pad_height_divisor is not None:
assert image_padded.shape[0] % pad.pad_height_divisor == 0
assert image_padded.shape[0] >= image.shape[0]
assert image_padded.shape[0] - image.shape[0] <= pad.pad_height_divisor
|
<gh_stars>0
'''
File to store the Crystal class
Attributes
- molecules; list; list of all of the molecule objects in the crystal
Methods
- add_molecule(Molecule); return None; appends a Molecule object to molecules list
- add_molecules(list); return None; iterates through list of Molecule objects and appends them to molecules list
- centre_of_geometry(); return np.array; returns the coordinates of the centre of geometry of the crystal as a numpy array
- get_intermolecular_interactions(); return list; returns a list of Interaction objects for each pair of atoms that
do not share a parent molecule in the crystal
- get_centroid_displacements(basis=None); return
- get_central_molecule(); return Molecule; returns the molecule that is closest to the centre of geometry of the crystal
- get_molecule_centroids(); return list; returns a list of np.array objects of the coordinates of the centre of geometry
for each molecule in the crystal
- get_unique_dimers(); return list; returns a list of Molecule objects containing the unique dimers in the crystal
- get_molecule_atom_distances(mol1_index,mol2_index); return list; returns list of intermolecular atomic distances between two
molecules in the crystal
- get_molecule_atom_vdw_distances(mol1_index,mol2_index); return list; returns list of intermolecular atomic distances minus
the sum of their vdw_radii between two molecules in the crystal
- to_nx_graph(by='all'/'molecular_centroids'); return Networkx graph object; returns an nx graph object of the crystal.
If by ='all' the full list of atoms are the nodes and the covalent and intermolecular_bonds are the edges
If by='molecular_centroids' the molecular centroids are the nodes and the edges are the set of intermolecular interactions between
two molecules. default = 'all'
'''
from .Atom import Atom
from .Bond import Bond
from .Molecule import Molecule, Acene
from .Interaction import *
from .Geometry import *
import numpy as np
import os
import shutil
from openbabel import openbabel
import pandas as pd
def calc_lstsq_displacement(disp,vectors):
A = vectors.T
xs = []
x, _, _, _ = np.linalg.lstsq(A,disp,rcond=-1)
xs.append(x)
return np.array(xs[0])
class Crystal():
def __init__(self,molecules=[]):
self.molecules = molecules
self.dimers = []
def add_molecule(self,molecule):
self.molecules.append(molecule)
def add_molecules(self,molecules):
for molecule in molecules:
self.add_molecule(molecule)
def centre_of_geometry(self):
mol_centroids = self.get_molecule_centroids()
return np.mean(mol_centroids,axis=0)
def to_xyz(self,filename):
atom_symbols = np.array([atom.symbol for molecule in self.molecules for atom in molecule.atoms])
atoms = [atom for molecule in self.molecules for atom in molecule.atoms]
unique_atoms = np.unique(atom_symbols)
atom_count_dict = {}
for unique_atom in unique_atoms:
atom_count_dict[unique_atom] = np.sum(np.isin(atom_symbols,unique_atom))
with open(f'{filename}.xyz','w') as file:
file.write(f'{len(atom_symbols)}\n')
for key in atom_count_dict.keys():
file.write(f'{key}{atom_count_dict[key]} ')
file.write('\n')
for atom in atoms:
coords = atom.coordinates
file.write(f'{atom.symbol} {coords[0]} {coords[1]} {coords[2]}\n')
return None
def get_central_molecule(self,return_idx=False):
crystal_cog = np.array(self.centre_of_geometry())
mol_cogs = np.array(self.get_molecule_centroids())
displacements = np.array([mol_cog - crystal_cog for mol_cog in mol_cogs])
distances = [np.sqrt(displacement.dot(displacement)) for displacement in displacements]
idxs = [x for x in range(len(self.molecules))]
idx = idxs[np.where(distances == np.min(distances))[0][0]]
if return_idx:
return self.molecules[idx], idx
else:
return self.molecules[idx]
def get_molecule_centroids(self):
mol_centroids = []
for molecule in self.molecules:
mol_centroids.append(molecule.centre_of_geometry())
return mol_centroids
def unique_dimers_to_xyz(self):
com_distances = []
for i, mol1 in enumerate(self.molecules):
for j, mol2 in enumerate(self.molecules[i+1:],i+1):
cog1 = mol1.centre_of_geometry()
cog2 = mol2.centre_of_geometry()
displacement = cog2 - cog1
distance = np.round(np.sqrt(displacement.dot(displacement)),3)
atom_distances = self.get_molecule_atom_distances(i,j)
if distance in com_distances:
continue
elif ((distance > 5) & (np.min(atom_distances) > 5)):
continue
else:
dimer = Molecule(atoms = mol1.atoms+mol2.atoms,
bonds = mol1.bonds+mol2.bonds)
dimer.to_xyz(f'mol{i}_mol{j}_dimer')
com_distances.append(distance)
def get_unique_dimers(self):
dimers = []
com_distances = []
for i, mol1 in enumerate(self.molecules):
for j, mol2 in enumerate(self.molecules[i+1:],i+1):
cog1 = mol1.centre_of_geometry()
cog2 = mol2.centre_of_geometry()
displacement = cog2 - cog1
distance = np.round(np.sqrt(displacement.dot(displacement)),3)
atom_distances = self.get_molecule_atom_distances(i,j)
if distance in com_distances:
continue
elif ((distance > 5) & (np.min(atom_distances) > 5)):
continue
else:
dimer = Molecule(atoms = mol1.atoms+mol2.atoms,
bonds = mol1.bonds+mol2.bonds)
dimers.append(dimer)
com_distances.append(distance)
return dimers
def unique_dimers_to_mol(self):
os.mkdir('./tempdir')
os.chdir('./tempdir')
file = open('names.txt','w')
com_distances = []
for i, mol1 in enumerate(self.molecules):
for j, mol2 in enumerate(self.molecules[i+1:],i+1):
cog1 = mol1.centre_of_geometry()
cog2 = mol2.centre_of_geometry()
displacement = cog2 - cog1
distance = np.round(np.sqrt(displacement.dot(displacement)),3)
atom_distances = self.get_molecule_atom_distances(i,j)
if distance in com_distances:
continue
elif ((distance > 5) & (np.min(atom_distances) > 5)):
continue
else:
dimer = Molecule(atoms = mol1.atoms+mol2.atoms,
bonds = mol1.bonds+mol2.bonds)
dimer.to_xyz(f'mol{i}_mol{j}_dimer')
obConversion = openbabel.OBConversion()
obConversion.SetInAndOutFormats("xyz", "mol")
mol = openbabel.OBMol()
obConversion.ReadFile(mol, f'mol{i}_mol{j}_dimer.xyz')
mol.AddHydrogens()
obConversion.WriteFile(mol, f'mol{i}_mol{j}_dimer.mol')
shutil.copy(f'mol{i}_mol{j}_dimer.mol',f'../mol{i}_mol{j}_dimer.mol')
os.remove(f'mol{i}_mol{j}_dimer.xyz')
os.remove(f'mol{i}_mol{j}_dimer.mol')
file.write(f'mol{i}_mol{j}_dimer\n')
com_distances.append(distance)
file.close()
shutil.copy('names.txt','../names.txt')
os.remove('names.txt')
os.chdir('..')
os.rmdir('tempdir')
def get_molecule_atom_distances(self,mol1_index,mol2_index):
distances = []
for atom1 in self.molecules[mol1_index].atoms:
for atom2 in self.molecules[mol2_index].atoms:
coords1 = atom1.coordinates
coords2 = atom2.coordinates
disp = coords2 - coords1
dist = np.round(np.sqrt(disp.dot(disp)),3)
distances.append(dist)
return distances
def get_molecule_atom_vdw_distances(self,mol1_index,mol2_index):
distances = []
for atom1 in self.molecules[mol1_index].atoms:
for atom2 in self.molecules[mol2_index].atoms:
coords1 = atom1.coordinates
coords2 = atom2.coordinates
disp = coords2 - coords1
dist = np.round(np.sqrt(disp.dot(disp)),3)
dist -= (atom1.vdw_radius + atom2.vdw_radius)
distances.append(dist)
return distances
|
try:
import mysql.connector
from mysql.connector import Error
from zcrmsdk.src.com.zoho.api.authenticator.store.token_store import TokenStore
from zcrmsdk.src.com.zoho.api.authenticator.oauth_token import OAuthToken
from zcrmsdk.src.com.zoho.crm.api.util.constants import Constants
from zcrmsdk.src.com.zoho.crm.api.exception.sdk_exception import SDKException
except Exception as e:
import mysql.connector
from mysql.connector import Error
from .token_store import TokenStore
from ..oauth_token import OAuthToken
from ....crm.api.util.constants import Constants
from zcrmsdk.src.com.zoho.crm.api.exception.sdk_exception import SDKException
class DBStore(TokenStore):
"""
This class to store user token details to the MySQL DataBase.
"""
def __init__(self, host=Constants.MYSQL_HOST, database_name=Constants.MYSQL_DATABASE_NAME,
user_name=Constants.MYSQL_USER_NAME, password="", port_number=Constants.MYSQL_PORT_NUMBER,
table_name=Constants.MYSQL_TABLE_NAME):
"""
Creates a DBStore class instance with the specified parameters.
Parameters:
host (str) : A string containing the DataBase host name. Default value is localhost
database_name (str) : A string containing the DataBase name. Default value is zohooauth
user_name (str) : A string containing the DataBase user name. Default value is root
password (str) : A string containing the DataBase password. Default value is an empty string
port_number (str) : A string containing the DataBase port number. Default value is 3306
"""
self.__host = host
self.__database_name = database_name
self.__user_name = user_name
self.__password = password
self.__port_number = port_number
self.__table_name = table_name
def get_host(self):
"""
This is a getter method to get __host.
Returns:
string: A string representing __host
"""
return self.__host
def get_database_name(self):
"""
This is a getter method to get __database_name.
Returns:
string: A string representing __database_name
"""
return self.__database_name
def get_user_name(self):
"""
This is a getter method to get __user_name.
Returns:
string: A string representing __user_name
"""
return self.__user_name
def get_password(self):
"""
This is a getter method to get __password.
Returns:
string: A string representing __password
"""
return self.__password
def get_port_number(self):
"""
This is a getter method to get __port_number.
Returns:
string: A string representing __port_number
"""
return self.__port_number
def get_table_name(self):
"""
This is a getter method to get __table_name.
Returns:
string: A string representing __table_name
"""
return self.__table_name
def get_token(self, user, token):
cursor = None
try:
connection = mysql.connector.connect(host=self.__host, database=self.__database_name, user=self.__user_name, password=<PASSWORD>, port=self.__port_number)
try:
if isinstance(token, OAuthToken):
cursor = connection.cursor()
query = self.construct_dbquery(user.get_email(), token, False)
cursor.execute(query)
result = cursor.fetchone()
if result is not None:
oauthtoken = token
oauthtoken.set_id(result[0])
oauthtoken.set_user_mail(result[1])
oauthtoken.set_client_id(result[2])
oauthtoken.set_client_secret(result[3])
oauthtoken.set_refresh_token(result[4])
oauthtoken.set_access_token(result[5])
oauthtoken.set_grant_token(result[6])
oauthtoken.set_expires_in(str(result[7]))
oauthtoken.set_redirect_url(result[8])
return oauthtoken
except Error as ex:
raise ex
finally:
cursor.close() if cursor is not None else None
connection.close() if connection is not None else None
except Error as ex:
raise SDKException(code=Constants.TOKEN_STORE, message=Constants.GET_TOKEN_DB_ERROR, cause=ex)
def save_token(self, user, token):
cursor = None
try:
connection = mysql.connector.connect(host=self.__host, database=self.__database_name, user=self.__user_name, password=self.__password, port=self.__port_number)
try:
if isinstance(token, OAuthToken):
token.set_user_mail(user.get_email())
self.delete_token(token)
cursor = connection.cursor()
query = "insert into " + self.__table_name + " (id,user_mail,client_id,client_secret,refresh_token,access_token,grant_token,expiry_time,redirect_url) values (%s,%s,%s,%s,%s,%s,%s,%s,%s);"
val = (token.get_id(), user.get_email(), token.get_client_id(), token.get_client_secret(), token.get_refresh_token(), token.get_access_token(), token.get_grant_token(), token.get_expires_in(), token.get_redirect_url())
cursor.execute(query, val)
connection.commit()
except Error as ex:
raise ex
finally:
cursor.close() if cursor is not None else None
connection.close() if connection is not None else None
except Error as ex:
raise SDKException(code=Constants.TOKEN_STORE, message=Constants.SAVE_TOKEN_DB_ERROR, cause=ex)
def delete_token(self, token):
cursor = None
try:
connection = mysql.connector.connect(host=self.__host, database=self.__database_name, user=self.__user_name, password=self.__password, port=self.__port_number)
try:
if isinstance(token, OAuthToken):
cursor = connection.cursor()
query = self.construct_dbquery(token.get_user_mail(), token, True)
cursor.execute(query)
connection.commit()
except Error as ex:
raise ex
finally:
cursor.close() if cursor is not None else None
connection.close() if connection is not None else None
except Error as ex:
raise SDKException(code=Constants.TOKEN_STORE, message=Constants.DELETE_TOKEN_DB_ERROR, cause=ex)
def get_tokens(self):
cursor = None
try:
connection = mysql.connector.connect(host=self.__host, database=self.__database_name, user=self.__user_name, password=self.__password, port=self.__port_number)
tokens = []
try:
cursor = connection.cursor()
query = 'select * from ' + self.__table_name + ";"
cursor.execute(query)
results = cursor.fetchall()
for result in results:
token = OAuthToken(client_id=result[2], client_secret=result[3], refresh_token=result[4], grant_token=result[6])
token.set_id(result[0])
token.set_user_mail(result[1])
token.set_access_token(result[5])
token.set_expires_in(str(result[7]))
token.set_redirect_url(result[8])
tokens.append(token)
return tokens
except Error as ex:
raise ex
finally:
cursor.close() if cursor is not None else None
connection.close() if connection is not None else None
except Error as ex:
raise SDKException(code=Constants.TOKEN_STORE, message=Constants.GET_TOKENS_DB_ERROR, cause=ex)
def delete_tokens(self):
cursor = None
try:
connection = mysql.connector.connect(host=self.__host, database=self.__database_name, user=self.__user_name, password=self.__password, port=self.__port_number)
try:
cursor = connection.cursor()
query = 'delete from ' + self.__table_name + ";"
cursor.execute(query)
connection.commit()
except Error as ex:
raise ex
finally:
cursor.close() if cursor is not None else None
connection.close() if connection is not None else None
except Error as ex:
raise SDKException(code=Constants.TOKEN_STORE, message=Constants.DELETE_TOKENS_DB_ERROR, cause=ex)
def get_token_by_id(self, id, token):
cursor = None
try:
connection = mysql.connector.connect(host=self.__host, database=self.__database_name, user=self.__user_name, password=self.__password, port=self.__port_number)
try:
if isinstance(token, OAuthToken):
query = "select * from " + self.__table_name + " where id='" + id + "'"
oauthtoken = token
cursor = connection.cursor()
cursor.execute(query)
results = cursor.fetchall()
for result in results:
if result[0] == id:
oauthtoken.set_id(result[0])
oauthtoken.set_user_mail(result[1])
oauthtoken.set_client_id(result[2])
oauthtoken.set_client_secret(result[3])
oauthtoken.set_refresh_token(result[4])
oauthtoken.set_access_token(result[5])
oauthtoken.set_grant_token(result[6])
oauthtoken.set_expires_in(str(result[7]))
oauthtoken.set_redirect_url(result[8])
return oauthtoken
except Error as ex:
raise ex
finally:
cursor.close() if cursor is not None else None
connection.close() if connection is not None else None
except Error as ex:
raise SDKException(code=Constants.TOKEN_STORE, message=Constants.GET_TOKEN_BY_ID_DB_ERROR, cause=ex)
def construct_dbquery(self, email, token, is_delete):
if email is None:
raise SDKException(Constants.USER_MAIL_NULL_ERROR, Constants.USER_MAIL_NULL_ERROR_MESSAGE)
query = "delete from " if is_delete is True else "select * from "
query += self.__table_name + " where user_mail ='" + email + "' and client_id='" + token.get_client_id() + "' and "
if token.get_grant_token() is not None:
query += "grant_token='" + token.get_grant_token() + "'"
else:
query += "refresh_token='" + token.get_refresh_token() + "'"
return query
|
from PIL import Image
from collections import namedtuple
from datetime import datetime, timedelta
from io import BytesIO
import json
import logging
import os
# logging
logger = logging.getLogger('epaper')
class EPaper:
'''Manages data that is pulled from SITE_ARCHIVE to enable selection of a specific
publication.'''
def __init__(self, publisher=None, app_config=None):
# app config
self.publisher = publisher
self.app_config = app_config
# dict of publication labels and codes
self.publications = dict([
('The Times of India', 'TOI'),
])
# dict of edition labels and codes
self.editions = dict([
('Mumbai', 'BOM'),
])
# selectable dates for epaper download
self.available_dates = [
(datetime.today().date() - timedelta(i)).strftime('%Y%m%d') for i in range(1, 8)
]
# publication code and label after selection
self.selected_publication = ('', '') # (label, code)
# edition code and label after selection
self.selected_edition = ('', '') # (label, code)
# epaper date selection, default: today's datetime object
self.selected_date = datetime.today()
# download area
self.download_path = ''
# table of contents for the selected publication: a dict
self.toc_dict = dict()
# number of pages in selected epaper
self.num_pages = 0
# array index of page being viewed
self.selected_page = 0
# page data
# urls: dict of lists where each key points to [url, filename, exists]
# see implementation in scraper.build_page_urls()
# this is can be made better
self.Page = namedtuple(
'Page', ['number', 'title', 'urls'])
self.pages = []
# publications available in cache
# each element of list is a tuple(pub_code, edition_code, date_str)
self.on_disk_pubs = []
def get_page_image_from_disk(self, page_index, image_type='thumbnail'):
'''Read and return page image from disk given page_index.'''
if len(self.pages) > 0:
page = self.pages[page_index]
try:
filename = page.urls[image_type][1]
if os.path.exists(filename):
with open(filename, 'rb') as fd:
return Image(BytesIO(fd.read()))
else:
return None
except IOError as e:
logger.error('EPaperApp: error reading {fname}')
return None
def save_codes_to_config(self):
pub_code = self.selected_publication[1]
edition_code = self.selected_edition[1]
if self.publisher:
self.app_config.config[self.publisher]['selected_pub_code'] = pub_code
self.app_config.config[self.publisher]['selected_edition_code'] = edition_code
self.app_config.save()
def create_download_dir(self):
'''Given publication date and pub_code and edition_code, create disk cache path.'''
pub_code = self.selected_publication[1]
edition_code = self.selected_edition[1]
date = self.selected_date
if (date is not None) and \
(pub_code is not None) and \
(pub_code != '') and \
(edition_code is not None) and \
(edition_code != ''):
self.download_path = os.path.join(
self.app_config.config['App']['cache_dir'],
pub_code,
edition_code,
str(date.date()) # YYYY-MM-DD
)
os.makedirs(self.download_path, exist_ok=True)
def save_page_metadata(self):
'''Save self.pages after first initial download, so any subsequent redownloads
can restart from this db than re-requesting all data again. This should also
help manage planned sync feature.
'''
if len(self.pages) > 0:
filename = os.path.join(self.download_path, 'page_metadata.json')
with open(filename, 'w') as fd:
fd.write(json.dumps(self.pages))
def find_on_disk_pubs(self):
'''Find previously downloaded publications within cache directory.'''
cache_dir = self.app_config.config['App']['cache_dir']
return [tuple(dirpath.split('/')[-3:])
for dirpath, dirs, files in os.walk(cache_dir)
if 'toc.json' in files]
def load_pub(self, pub_code=None, edition_code=None, date_str=None):
'''Load self.pages data from json dump in disk cache.'''
cache_dir = self.app_config.config['App']['cache_dir']
download_path = os.path.join(
cache_dir, pub_code, edition_code, date_str)
toc_filename = os.path.join(download_path, 'toc.json')
metadata_filename = os.path.join(download_path, 'page_metadata.json')
if os.path.exists(toc_filename) and \
os.path.exists(metadata_filename):
with open(toc_filename, 'r') as fd:
toc = json.load(fd)
with open(metadata_filename, 'r') as fd:
metadata = json.load(fd)
return (toc, metadata)
|
import json
import yaml
import pprint
import unittest
import orthauth as oa
from orthauth import exceptions as exc
from .common import test_folder
class TestFormats(unittest.TestCase):
def _config(self, name):
path = test_folder / name
return oa.AuthConfig(path)
def _do_test(self, auth):
SECRET = auth.get('full-complexity-example')
assert SECRET == 'oh-no-my-api-key-is-on-github-!', 'derp'
def test_runtime(self):
_source = self._config('auth-config-1.yaml')
_ablob = _source.load()
# FIXME user-config-path ??? more uniform with other behavior
_ablob['config-search-paths'] = [test_folder / _ablob['config-search-paths'][0]]
source = self._config('auth-config-1.yaml')
ablob = source.load()
ublob = source.user_config.load()
with open(test_folder / 'secrets-test-1.yaml', 'rt') as f: # please never do this irl
sblob = yaml.safe_load(f)
try:
auth = oa.AuthConfig.runtimeConfig(ablob)
assert False, 'should have failed due to non-relative-path'
except exc.NoBasePathError:
pass
auth = oa.AuthConfig.runtimeConfig(_ablob)
assert auth.get('default-example') == 42, 'deep thought required'
auth = oa.AuthConfig.runtimeConfig(ablob, ublob)
assert auth.get('test-after-init') == 'after-init', 'failure'
auth = oa.AuthConfig.runtimeConfig(ablob, ublob, sblob)
assert auth.get('oh-nose-her-api-keys') == 'DO NOT WANT', '( ͡° ͜ʖ ͡°)'
def test_yaml(self):
auth = self._config('auth-config-1.yaml')
self._do_test(auth)
def test_python(self):
path = test_folder / 'auth-config-1.py'
try:
with open(test_folder / 'auth-config-1.yaml', 'rt') as f, open(path, 'wt') as o:
d = yaml.safe_load(f)
o.write(pprint.pformat(d))
config = self._config(path.name)
self._do_test(config)
finally:
if path.exists():
path.unlink()
def test_json(self):
path = test_folder / 'auth-config-1.json'
try:
with open(test_folder / 'auth-config-1.yaml', 'rt') as f, open(path, 'wt') as o:
d = yaml.safe_load(f)
json.dump(d, o)
config = self._config(path.name)
self._do_test(config)
finally:
if path.exists():
path.unlink()
class TestEmptyAuthConfig(unittest.TestCase):
_config = TestFormats._config
def _do_test(self, suffix):
path = (test_folder / 'auth-config-empty').with_suffix(suffix)
try:
with open(path, 'wt') as f:
pass
try:
config = self._config(path.name)
assert False, 'should have failed'
except exc.EmptyConfigError:
pass
finally:
if path.exists():
path.unlink()
def test_json(self):
self._do_test('.json')
def test_python(self):
self._do_test('.py')
def test_yaml(self):
self._do_test('.yaml')
class TestEmptyUserConfig(TestEmptyAuthConfig):
def _do_test(self, suffix):
path = (test_folder / 'user-config-empty').with_suffix(suffix)
_source = self._config('auth-config-1.yaml')
_ablob = _source.load()
_ablob['config-search-paths'] = [path]
try:
with open(path, 'wt') as f:
pass
try:
auth = oa.AuthConfig.runtimeConfig(_ablob)
#auth.get('default-example') # needed if don't call self.load() in __new__
assert False, 'should have failed due to empty user config'
except exc.EmptyConfigError:
pass
finally:
if path.exists():
path.unlink()
|
#! /usr/bin/env python
'''
Brian2 setup script
'''
import io
import sys
import os
import platform
from pkg_resources import parse_version
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
from distutils.errors import CompileError, DistutilsPlatformError
REQUIRED_CYTHON_VERSION = '0.29'
try:
import Cython
if parse_version(Cython.__version__) < parse_version(REQUIRED_CYTHON_VERSION):
raise ImportError('Cython version %s is too old' % Cython.__version__)
from Cython.Build import cythonize
cython_available = True
except ImportError:
cython_available = False
def has_option(name):
try:
sys.argv.remove('--%s' % name)
return True
except ValueError:
pass
# allow passing all cmd line options also as environment variables
env_val = os.getenv(name.upper().replace('-', '_'), 'false').lower()
if env_val == "true":
return True
return False
WITH_CYTHON = has_option('with-cython')
FAIL_ON_ERROR = has_option('fail-on-error')
pyx_fname = os.path.join('brian2', 'synapses', 'cythonspikequeue.pyx')
cpp_fname = os.path.join('brian2', 'synapses', 'cythonspikequeue.cpp')
if WITH_CYTHON or not os.path.exists(cpp_fname):
fname = pyx_fname
if not cython_available:
if FAIL_ON_ERROR and WITH_CYTHON:
raise RuntimeError('Compilation with Cython requested/necesary but '
'Cython >= %s is not available.' % REQUIRED_CYTHON_VERSION)
else:
sys.stderr.write('Compilation with Cython requested/necesary but '
'Cython >= %s is not available.\n' % REQUIRED_CYTHON_VERSION)
fname = None
if not os.path.exists(pyx_fname):
if FAIL_ON_ERROR and WITH_CYTHON:
raise RuntimeError(('Compilation with Cython requested/necessary but '
'Cython source file %s does not exist') % pyx_fname)
else:
sys.stderr.write(('Compilation with Cython requested/necessary but '
'Cython source file %s does not exist\n') % pyx_fname)
fname = None
else:
fname = cpp_fname
if fname is not None:
if (platform.system() == 'Linux' and
platform.architecture()[0] == '32bit' and
platform.machine() == 'x86_64'):
# We are cross-compiling (most likely to build a 32Bit package for conda
# on travis), set paths and flags for 32Bit explicitly
print('Configuring compilation for cross-compilation to 32 Bit')
extensions = [Extension("brian2.synapses.cythonspikequeue",
[fname],
include_dirs=[], # numpy include dir will be added later
library_dirs=['/lib32', '/usr/lib32'],
extra_compile_args=['-m32'],
extra_link_args=['-m32'])]
else:
extensions = [Extension("brian2.synapses.cythonspikequeue",
[fname],
include_dirs=[])] # numpy include dir will be added later
if fname == pyx_fname:
extensions = cythonize(extensions)
else:
extensions = []
class optional_build_ext(build_ext):
'''
This class allows the building of C extensions to fail and still continue
with the building process. This ensures that installation never fails, even
on systems without a C compiler, for example.
If brian is installed in an environment where building C extensions
*should* work, use the "--fail-on-error" option or set the environment
variable FAIL_ON_ERROR to true.
'''
def build_extension(self, ext):
import numpy
numpy_incl = numpy.get_include()
if hasattr(ext, 'include_dirs') and not numpy_incl in ext.include_dirs:
ext.include_dirs.append(numpy_incl)
try:
build_ext.build_extension(self, ext)
except (CompileError, DistutilsPlatformError) as ex:
if FAIL_ON_ERROR:
raise ex
else:
error_msg = ('Building %s failed (see error message(s) '
'above) -- pure Python version will be used '
'instead.') % ext.name
sys.stderr.write('*' * len(error_msg) + '\n' +
error_msg + '\n' +
'*' * len(error_msg) + '\n')
# Use readme file as long description
with io.open(os.path.join(os.path.dirname(__file__), 'README.rst'),
encoding='utf-8') as f:
long_description = f.read()
setup(name='Brian2',
version='2.4.1+git',
packages=find_packages(),
package_data={# include template files
'brian2.codegen.runtime.numpy_rt': ['templates/*.py_'],
'brian2.codegen.runtime.cython_rt': ['templates/*.pyx'],
'brian2.codegen.runtime.GSLcython_rt': ['templates/*.pyx'],
'brian2.devices.cpp_standalone': ['templates/*.cpp',
'templates/*.h',
'templates/makefile',
'templates/win_makefile',
'templates_GSL/*.cpp',
'brianlib/*.cpp',
'brianlib/*.h'],
# include test template files
'brian2.tests.test_templates.fake_package_1': ['templates/*.txt'],
'brian2.tests.test_templates.fake_package_2': ['templates/*.txt'],
# Include RALLPACK test data, external code, and pytest config
'brian2.tests': ['rallpack_data/README',
'rallpack_data/ref_*',
'func_def_cpp.cpp',
'func_def_cpp.h',
'func_def_cython.pyx',
'func_def_cython.pxd',
'pytest.ini'],
# include C++/Cython version of spike queue
'brian2.synapses': ['cspikequeue.cpp',
'cythonspikequeue.pyx',
'stdint_compat.h'],
# include randomkit
'brian2.random': ['randomkit/randomkit.c',
'randomkit/randomkit.h'],
# include default_preferences file
'brian2': ['default_preferences']
},
install_requires=['numpy>=1.15',
'cython>=0.29',
'sympy>=1.2',
'pyparsing',
'jinja2>=2.7',
'py-cpuinfo;platform_system=="Windows"',
'setuptools>=24.2'
],
setup_requires=['numpy>=1.10',
'setuptools>=24.2'
],
cmdclass={'build_ext': optional_build_ext},
provides=['brian2'],
extras_require={'test': ['pytest',
'pytest-xdist>=1.22.3'],
'docs': ['sphinx>=1.8',
'ipython>=5']},
use_2to3=False,
zip_safe=False,
ext_modules=extensions,
url='http://www.briansimulator.org/',
description='A clock-driven simulator for spiking neural networks',
long_description=long_description,
long_description_content_type='text/x-rst',
author='<NAME>, <NAME>, <NAME>',
author_email='<EMAIL>',
keywords='computational neuroscience simulation',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: CEA CNRS Inria Logiciel Libre License, version 2.1 (CeCILL-2.1)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Bio-Informatics'
],
python_requires='>=3.6'
)
|
<filename>gluoncv/utils/metrics/tracking.py
""" SiamRPN metrics """
import numpy as np
from colorama import Style, Fore
def overlap_ratio(rect1, rect2):
"""Compute overlap ratio between two rects
Parameters
----------
rect1 : nd.array
2d array of N x [x,y,w,h]
rect2 : nd.array
2d array of N x [x,y,w,h]
Return
----------
IOU
"""
left = np.maximum(rect1[:, 0], rect2[:, 0])
right = np.minimum(rect1[:, 0]+rect1[:, 2], rect2[:, 0]+rect2[:, 2])
top = np.maximum(rect1[:, 1], rect2[:, 1])
bottom = np.minimum(rect1[:, 1]+rect1[:, 3], rect2[:, 1]+rect2[:, 3])
intersect = np.maximum(0, right - left) * np.maximum(0, bottom - top)
union = rect1[:, 2]*rect1[:, 3] + rect2[:, 2]*rect2[:, 3] - intersect
iou = intersect / union
iou = np.maximum(np.minimum(1, iou), 0)
return iou
def success_overlap(gt_bb, result_bb, n_frame):
"""get success_overlap score
Parameters
----------
result_bb : nd.array
2d array of N x [x,y,w,h]
n_frame : int
frame number
Return
----------
success score
"""
thresholds_overlap = np.arange(0, 1.05, 0.05)
success = np.zeros(len(thresholds_overlap))
iou = np.ones(len(gt_bb)) * (-1)
mask = np.sum(gt_bb[:, 2:] > 0, axis=1) == 2
iou[mask] = overlap_ratio(gt_bb[mask], result_bb[mask])
for i, per_threshold in enumerate(thresholds_overlap):
success[i] = np.sum(iou > per_threshold) / float(n_frame)
return success
def success_error(gt_center, result_center, thresholds, n_frame):
"""get success_error score
Parameters
----------
gt_center : np.ndarray
2d array of N x [x,y,w,h]
result_center : np.ndarray
2d array of N x [x,y,w,h]
thresholds : float
error float
n_frame : int
frame number
Return
----------
success_error score
"""
success = np.zeros(len(thresholds))
dist = np.ones(len(gt_center)) * (-1)
mask = np.sum(gt_center > 0, axis=1) == 2
dist[mask] = np.sqrt(np.sum(
np.power(gt_center[mask] - result_center[mask], 2), axis=1))
for i, per_threshold in enumerate(thresholds):
success[i] = np.sum(dist <= per_threshold) / float(n_frame)
return success
class OPEBenchmark:
"""
SiamRPN OPEBenchmark have eval_success, precision to select.
eval_success is distance between the center point of the predicted position
precision is Compute overlap ratio between two rects through thresholds_overlap
Parameters
----------
dataset
dataset Benchmark
"""
def __init__(self, dataset):
self.dataset = dataset
def convert_bb_to_center(self, bboxes):
return np.array([(bboxes[:, 0] + (bboxes[:, 2] - 1) / 2),
(bboxes[:, 1] + (bboxes[:, 3] - 1) / 2)]).T
def convert_bb_to_norm_center(self, bboxes, gt_wh):
return self.convert_bb_to_center(bboxes) / (gt_wh+1e-16)
def eval_success(self, eval_trackers=None):
"""eval_success is distance between the center point of the predicted position
and the center position marked in the benchmark
Parameters
----------
eval_trackers: list of tracker name or single tracker name
Return
----------
return: dict of results
"""
if eval_trackers is None:
eval_trackers = self.dataset.tracker_names
if isinstance(eval_trackers, str):
eval_trackers = [eval_trackers]
success_ret = {}
for tracker_name in eval_trackers:
success_ret_ = {}
for video in self.dataset:
gt_traj = np.array(video.gt_traj)
if tracker_name not in video.pred_trajs:
tracker_traj = video.load_tracker(self.dataset.tracker_path,
tracker_name, False)
tracker_traj = np.array(tracker_traj)
else:
tracker_traj = np.array(video.pred_trajs[tracker_name])
n_frame = len(gt_traj)
if hasattr(video, 'absent'):
gt_traj = gt_traj[video.absent == 1]
tracker_traj = tracker_traj[video.absent == 1]
success_ret_[video.name] = success_overlap(gt_traj, tracker_traj, n_frame)
success_ret[tracker_name] = success_ret_
return success_ret
def eval_precision(self, eval_trackers=None):
"""
eval model precision in eval_precision
Parameters
----------
eval_trackers: list of tracker name or single tracker name
Return
----------
return: dict of results
"""
if eval_trackers is None:
eval_trackers = self.dataset.tracker_names
if isinstance(eval_trackers, str):
eval_trackers = [eval_trackers]
precision_ret = {}
for tracker_name in eval_trackers:
precision_ret_ = {}
for video in self.dataset:
gt_traj = np.array(video.gt_traj)
if tracker_name not in video.pred_trajs:
tracker_traj = video.load_tracker(self.dataset.tracker_path,
tracker_name, False)
tracker_traj = np.array(tracker_traj)
else:
tracker_traj = np.array(video.pred_trajs[tracker_name])
n_frame = len(gt_traj)
if hasattr(video, 'absent'):
gt_traj = gt_traj[video.absent == 1]
tracker_traj = tracker_traj[video.absent == 1]
gt_center = self.convert_bb_to_center(gt_traj)
tracker_center = self.convert_bb_to_center(tracker_traj)
thresholds = np.arange(0, 51, 1)
precision_ret_[video.name] = success_error(gt_center, tracker_center,
thresholds, n_frame)
precision_ret[tracker_name] = precision_ret_
return precision_ret
def eval_norm_precision(self, eval_trackers=None):
"""
eval model precision in eval_norm_precision
Parameters
----------
eval_trackers: list of tracker name or single tracker name
Return
----------
return: dict of results
"""
if eval_trackers is None:
eval_trackers = self.dataset.tracker_names
if isinstance(eval_trackers, str):
eval_trackers = [eval_trackers]
norm_precision_ret = {}
for tracker_name in eval_trackers:
norm_precision_ret_ = {}
for video in self.dataset:
gt_traj = np.array(video.gt_traj)
if tracker_name not in video.pred_trajs:
tracker_traj = video.load_tracker(self.dataset.tracker_path,
tracker_name, False)
tracker_traj = np.array(tracker_traj)
else:
tracker_traj = np.array(video.pred_trajs[tracker_name])
n_frame = len(gt_traj)
if hasattr(video, 'absent'):
gt_traj = gt_traj[video.absent == 1]
tracker_traj = tracker_traj[video.absent == 1]
gt_center_norm = self.convert_bb_to_norm_center(gt_traj, gt_traj[:, 2:4])
tracker_center_norm = self.convert_bb_to_norm_center(tracker_traj, gt_traj[:, 2:4])
thresholds = np.arange(0, 51, 1) / 100
norm_precision_ret_[video.name] = success_error(gt_center_norm,
tracker_center_norm,
thresholds, n_frame)
norm_precision_ret[tracker_name] = norm_precision_ret_
return norm_precision_ret
def show_result(self, success_ret, precision_ret=None,
norm_precision_ret=None, show_video_level=False, helight_threshold=0.6):
"""pretty print result
Parameters
----------
success_ret: returned dict from function eval
"""
# sort tracker
tracker_auc = {}
for tracker_name in success_ret.keys():
auc = np.mean(list(success_ret[tracker_name].values()))
tracker_auc[tracker_name] = auc
tracker_auc_ = sorted(tracker_auc.items(),
key=lambda x: x[1],
reverse=True)[:20]
tracker_names = [x[0] for x in tracker_auc_]
tracker_name_len = max((max([len(x) for x in success_ret.keys()])+2), 12)
header = ("|{:^"+str(tracker_name_len)+"}|{:^9}|{:^16}|{:^11}|").format("Tracker name",
"Success",
"Norm Precision",
"Precision")
formatter = "|{:^"+str(tracker_name_len)+"}|{:^9.3f}|{:^16.3f}|{:^11.3f}|"
print('-'*len(header))
print(header)
print('-'*len(header))
for tracker_name in tracker_names:
# success = np.mean(list(success_ret[tracker_name].values()))
success = tracker_auc[tracker_name]
if precision_ret is not None:
precision = np.mean(list(precision_ret[tracker_name].values()), axis=0)[20]
else:
precision = 0
if norm_precision_ret is not None:
norm_precision = np.mean(list(norm_precision_ret[tracker_name].values()),
axis=0)[20]
else:
norm_precision = 0
print(formatter.format(tracker_name, success, norm_precision, precision))
print('-'*len(header))
if show_video_level and len(success_ret) < 10 \
and precision_ret is not None \
and len(precision_ret) < 10:
print("\n\n")
header1 = "|{:^21}|".format("Tracker name")
header2 = "|{:^21}|".format("Video name")
for tracker_name in success_ret.keys():
# col_len = max(20, len(tracker_name))
header1 += ("{:^21}|").format(tracker_name)
header2 += "{:^9}|{:^11}|".format("success", "precision")
print('-'*len(header1))
print(header1)
print('-'*len(header1))
print(header2)
print('-'*len(header1))
videos = list(success_ret[tracker_name].keys())
for video in videos:
row = "|{:^21}|".format(video)
for tracker_name in success_ret.keys():
success = np.mean(success_ret[tracker_name][video])
precision = np.mean(precision_ret[tracker_name][video])
success_str = "{:^9.3f}".format(success)
if success < helight_threshold:
row += f'{Fore.RED}{success_str}{Style.RESET_ALL}|'
else:
row += success_str+'|'
precision_str = "{:^11.3f}".format(precision)
if precision < helight_threshold:
row += f'{Fore.RED}{precision_str}{Style.RESET_ALL}|'
else:
row += precision_str+'|'
print(row)
print('-'*len(header1))
|
<reponame>dperl-sol/cctbx_project
from __future__ import absolute_import, division, print_function
import iotbx.pdb
from libtbx.str_utils import split_keeping_spaces
import sys
import six
trans_dict = {}
for k,v in six.iteritems(iotbx.pdb.rna_dna_atom_names_reference_to_mon_lib_translation_dict):
trans_dict[k.strip()] = v
trans_dict["H2'"] = "H2*"
def trans_field(flds, i):
v3 = flds[i]
v2 = trans_dict[v3]
flds[i] = v2
if (i+1 < len(flds)):
l = len(flds[i+1])
d = len(v2) - len(v3)
assert l > d
flds[i+1] = " " * (l-d)
def iter_until_loop(lines):
for line in lines:
if ( line.startswith("#")
or line == "loop_"):
print(line)
return
yield line
def rename_generic(lines, len_flds, i_list):
for line in iter_until_loop(lines):
flds = split_keeping_spaces(line)
assert len(flds) == len_flds
for i in i_list:
trans_field(flds, i)
print("".join(flds))
def rename_atom(lines):
rename_generic(lines, 10, [3])
def rename_tree(lines):
for line in iter_until_loop(lines):
flds = split_keeping_spaces(line)
assert len(flds) == 10
for i in [3, 5, 7, 9]:
if (flds[i] not in ["n/a", "START", "ADD", "END", "."]):
trans_field(flds, i)
print("".join(flds))
def rename_bond(lines):
rename_generic(lines, 12, [3, 5])
def rename_angle(lines):
rename_generic(lines, 12, [3, 5, 7])
def rename_tor(lines):
rename_generic(lines, 18, [5, 7, 9, 11])
def rename_chir(lines):
rename_generic(lines, 14, [5, 7, 9, 11])
def rename_plan(lines):
rename_generic(lines, 8, [5])
def rename_link_bond(lines):
rename_generic(lines, 16, [5, 9])
def rename_link_angle(lines):
rename_generic(lines, 18, [5, 9, 13])
def rename_link_tor(lines):
rename_generic(lines, 26, [7, 11, 15, 19])
def run(args):
assert len(args) == 1
lines = iter(open(args[0]).read().splitlines())
for line in lines:
print(line)
if (line == "_chem_comp_atom.partial_charge"):
rename_atom(lines)
elif (line == "_chem_comp_tree.connect_type"):
rename_tree(lines)
elif (line == "_chem_comp_bond.value_dist_esd"):
rename_bond(lines)
elif (line == "_chem_comp_angle.value_angle_esd"):
rename_angle(lines)
elif (line == "_chem_comp_tor.period"):
rename_tor(lines)
elif (line == "_chem_comp_chir.volume_sign"):
rename_chir(lines)
elif (line == "_chem_comp_plane_atom.dist_esd"):
rename_plan(lines)
#
elif (line == "_chem_link_bond.value_dist_esd"):
rename_link_bond(lines)
elif (line == "_chem_link_angle.value_angle_esd"):
rename_link_angle(lines)
elif (line == "_chem_link_tor.period"):
rename_link_tor(lines)
elif (line == "_chem_link_chir.volume_sign"):
raise RuntimeError("Not implemented.")
elif (line == "_chem_link_plane.dist_esd"):
raise RuntimeError("Not implemented.")
if (__name__ == "__main__"):
run(args=sys.argv[1:])
|
<filename>vespa/analysis/block_raw.py
# Python modules
# 3rd party modules
from xml.etree.cElementTree import Element
# Our modules
import vespa.analysis.chain_raw as chain_raw
import vespa.analysis.block as block
import vespa.common.mrs_data_raw as mrs_data_raw
import vespa.common.util.xml_ as util_xml
from vespa.common.constants import Deflate
class _Settings(object):
"""
Settings object contains the parameter inputs used for processing in the
Chain object in this Block. Having a separate object helps to delineate
inputs/outputs and to simplify load/save of preset values.
This object can also save/recall these values to/from an XML node.
"""
XML_VERSION = "1.0.0"
def __init__(self, attributes=None):
""" Currently there are no input parameters set in this object. """
pass
def deflate(self, flavor=Deflate.ETREE):
if flavor == Deflate.ETREE:
e = Element("settings", {"version" : self.XML_VERSION})
return e
elif flavor == Deflate.DICTIONARY:
return self.__dict__.copy()
def inflate(self, source):
if hasattr(source, "makeelement"):
# Quacks like an ElementTree.Element
pass
elif hasattr(source, "keys"):
# Quacks like a dict
for key in list(source.keys()):
if hasattr(self, key):
setattr(self, key, source[key])
class BlockRaw(block.Block, mrs_data_raw.DataRaw):
"""
Building block to hold the state of a step in an MRS processing chain.
Includes the functionality to save/recall this object to/from an XML node.
Raw Blocks hold data loaded from file. They don't have 'inputs' for a
Chain object. They do have the attributes inheirited from DataRaw.
For some subclasses, one or more DataRaws objects can be held in a Block
object, such as the On/Off/Add/Diff objects of an Edited data file.
"""
XML_VERSION = "1.0.0"
def __init__(self, attributes=None):
block.Block.__init__(self, attributes)
mrs_data_raw.DataRaw.__init__(self, attributes)
# processing parameters
self.set = _Settings(attributes)
##### Standard Methods and Properties #####################################
@property
def dims(self):
"""Data dimensions in a list, read only."""
return list(self.data.shape[::-1]) if self.data is not None else None
def __str__(self):
lines = mrs_data_raw.DataRaw.__str__(self).split('\n')
# Replace the heading line
lines[0] = "------- {0} Object -------".format(self.__class__.__name__)
lines.append("No printable data ")
return '\n'.join(lines)
def create_chain(self, dataset):
self.chain = chain_raw.ChainRaw(dataset, self)
def concatenate(self, new):
# This is a method from DataRaw that's not supported here.
raise NotImplementedError
def deflate(self, flavor=Deflate.ETREE):
if flavor == Deflate.ETREE:
# Call base class - then update for subclass
e = mrs_data_raw.DataRaw.deflate(self, flavor)
e.tag = "block_raw"
e.set("version", self.XML_VERSION)
# Now I deflate the attribs that are specific to this class
e.append(self.set.deflate())
return e
elif flavor == Deflate.DICTIONARY:
raise NotImplementedError
def inflate(self, source):
# Make my base class do its inflate work
mrs_data_raw.DataRaw.inflate(self, source)
# Now I inflate the attribs that are specific to this class
if hasattr(source, "makeelement"):
# Quacks like an ElementTree.Element
self.set = util_xml.find_settings(source, "block_raw_settings")
self.set = _Settings(self.set)
elif hasattr(source, "keys"):
# Quacks like a dict
for key in list(source.keys()):
if key == "set":
setattr(self, key, source[key])
|
<reponame>paulhoule/tentacruel
# pylint: disable=missing-docstring
import datetime
import json
import os
import re
import sys
from email.utils import parsedate_to_datetime, format_datetime
from logging import getLogger
from shutil import copyfile
from uuid import uuid4, NAMESPACE_URL, uuid5
from hashlib import sha384
from math import floor
from operator import itemgetter
from pathlib import Path
from typing import Dict
import imageio
import numpy as np
from aiohttp import ClientSession, ClientResponseError, ClientError
from arango.database import Database
from bs4 import BeautifulSoup
from jinja2 import Environment, PackageLoader, select_autoescape
from metar.Metar import Metar
from tentacruel.time import from_zulu_string, to_zulu_string, utcnow
JINJA = Environment(
loader=PackageLoader('tentacruel.nws', 'jj2'),
autoescape=select_autoescape(['html', 'xml'])
)
LOGGER = getLogger(__package__)
# 600 x 576
# to install ffmepg: imageio.plugins.ffmpeg.download()
DATE_FUNCTIONS = {}
def register(function):
DATE_FUNCTIONS[function.__name__] = function
return function
def wind_alpha(angle: float):
if angle is None:
return "variable"
octant = int((angle + 22.5)/ 45.0)
return ["N", "NE", "E", "SE", "S", "SW", "W", "NW"][octant % 8]
RE_DURATION = re.compile(r"(\d+) (days|seconds|microseconds|milliseconds|minutes|hours|weeks)")
def parse_duration(duration):
match = RE_DURATION.fullmatch(duration)
if not match:
raise ValueError(f"duration {duration} is not an integer followed by a time unit")
amount = int(match[1])
unit = match[2]
return datetime.timedelta(**{unit: amount})
async def afetch(session: ClientSession, url: str):
"""
Asynchronous fetch. Do a GET request, return text, properly shutdown
:param session: ClientSession object for aiohttp connection
:param url: The URL we want
:return:
"""
async with session.get(url) as response:
response.raise_for_status()
return (await response.text(), response.headers)
async def ahead(session: ClientSession, url: str):
"""
Asynchronous head request.
:param session: aiohttp ClientSession
:param url: The URL we want
:return:
"""
async with session.head(url) as response:
return response.headers
class NotModified(Exception):
pass
async def bfetch(session: ClientSession, url: str, request_headers=None):
"""
Asynchronous binary fetch. Do a GET request, return binary data, properly shutdown
:param session: ClientSession object for aiohttp connection
:param url: The URL we want
:return:
"""
kwargs = {}
if request_headers:
kwargs["headers"] = request_headers
async with session.get(url, **kwargs) as response:
if response.status >= 400:
#
# 404 errors appear to happen regularly. I don't want these resulting in a
# cron job email, so I suppress the log message.
#
action = LOGGER.debug if response.status == 404 else LOGGER.error
action("Got status %s for GET %s", response.status, url)
response.raise_for_status()
if response.status == 304:
raise NotModified()
return (await response.read(), response.headers)
class NoVideoFrames(ValueError):
pass
class RadarFetch:
def __init__(self, config: Dict, adb: Database):
"""
:param config:
Configuration dictionary. The first key is "paths" and represents paths,
the second is "products" which represents products that this system can
generate. This is not the general 'tentacruel' configuration dictionary.
"""
self._source_base = config["paths"]["source_base"]
self._cache = Path.home() / "radar"
self._patterns = config["products"]
self._output = Path(config["paths"]["output"])
self._adb = adb
self._resources = {}
self._pages = {
pattern["template"] : pattern["title"] for pattern in self._patterns
}
self._pages["forecast.html"] = "Short-term forecast"
async def fetch_forecast_text(self, session: ClientSession):
url = "https://api.weather.gov/gridpoints/BGM/47,66/forecast"
_key = str(uuid5(NAMESPACE_URL, url))
cached = self._adb.collection("cache").get(_key)
if cached:
old_expires = from_zulu_string(cached["expires"])
if old_expires > utcnow():
LOGGER.debug("Fetched url %s out of arangodb", url)
return cached["content"]
try:
(content, headers) = await afetch(session, url)
except ClientError as that:
status = getattr(that, "status", "Unknown")
if status in [500, 503]:
logger = LOGGER.warning
else:
logger = LOGGER.error
logger("Attempt to fetch url %s failed with %s status code.", url, status)
if cached:
logger("Falling back on cached content from arangodb for url %s", url)
return cached["content"]
LOGGER.error("Could not find url %s in cache", url)
raise
result = json.loads(content)
if "expires" in headers:
expires = parsedate_to_datetime(headers["expires"])
document = {
"_key": _key,
"url": url,
"expires": to_zulu_string(expires),
"content": result
}
self._adb.aql.execute("""
UPSERT {_key: @key} INSERT @document REPLACE @document IN cache
""", bind_vars={"key": _key, "document": document})
return result
async def fetch_motd(self, session: ClientSession):
# pylint: disable = too-many-locals
url = "https://www.weather.gov/images/bgm/finalMOD.jpg"
target_dir = self._cache / "bgm" / "finalMod"
target_dir.mkdir(parents=True, exist_ok=True)
request_headers = {}
cursor = self._adb.aql.execute("""
for row in snapshots
filter row.url==@url
sort row.last_modified desc
limit 1
return row
""", bind_vars={"url": url})
try:
last = next(cursor)
http_modified = format_datetime(from_zulu_string(last["last_modified"]), usegmt=True)
#
# note here we are testing the file in storage no matter if it is the latest or not
#
old_hexdigest = last["sha384"]
filename = target_dir / f"{old_hexdigest}.jpg"
if filename.exists():
observed = sha384(filename.read_bytes()).hexdigest()
if observed == old_hexdigest:
request_headers["If-Modified-Since"] = http_modified
LOGGER.debug("Found file %s with correct SHA384 digest", filename)
else:
LOGGER.error("Found file %s did not match expected SHA384 digest", filename)
filename.unlink()
else:
LOGGER.error("Didn't find file %s although file was in database", filename)
except StopIteration:
old_hexdigest = None
try:
(content, headers) = await bfetch(session, url, request_headers=request_headers)
except NotModified:
LOGGER.debug("Recieved 304 Not Modified for url %s", url)
return filename
LOGGER.debug("Downloaded %d bytes from url %s", len(content), url)
hexdigest = sha384(content).hexdigest()
last_modified = to_zulu_string(parsedate_to_datetime(headers['Last-Modified']))
LOGGER.debug("SHA384 digest: %s", hexdigest)
LOGGER.debug("Last modified date: %s ", last_modified)
if hexdigest != old_hexdigest:
self._adb.insert_document("snapshots", {
"_key": str(uuid4()),
"url": url,
"last_modified": last_modified,
"content_length": len(content),
"sha384": hexdigest
})
filename = target_dir / f"{hexdigest}.jpg"
filename.write_bytes(content)
return filename
def fetch_wx(self):
cursor = self._adb.aql.execute("""
for row in metar
sort row.time desc
limit 1
return row
""")
return next(cursor)
def copy_template(self, pattern, failed=False, **kwargs):
template_name = pattern["template"]
destination = pattern["template"]
kwargs["template_name"] = template_name
kwargs["pages"] = self._pages
kwargs["failed"] = failed
if kwargs.get("failed"):
kwargs["radar_html"] = pattern["radar_html"]
template = JINJA.get_template(template_name)
index_out = self._output / destination
index_out.write_text(template.render(**kwargs), encoding="utf-8")
async def refresh(self):
async with ClientSession() as session:
self._resources["forecast"] = await self.fetch_forecast_text(session)
self._resources["motd"] = await self.fetch_motd(session)
for pattern in self._patterns:
await self._refresh(session, pattern)
async def _refresh(self, session, pattern: dict):
# pylint: disable = too-many-locals
await self._fetch_overlays(session, pattern)
product_dir = "/".join(pattern["pattern"].split("/")[:-1])
regex = re.compile(pattern["pattern"].split("/")[-1])
target_dir = self._cache / product_dir
target_dir.mkdir(parents=True, exist_ok=True)
url_directory = self._source_base + product_dir + "/"
LOGGER.debug("Checking %s", url_directory)
(target, *_) = await afetch(session, url_directory)
soup = BeautifulSoup(target, features="lxml")
links = soup.find_all("a")
crawl = []
for link in links:
href = link["href"]
if regex.match(href):
crawl.append(href)
for href in crawl:
target_file = self._cache / product_dir / href
source_url = url_directory + href
try:
await self._fetch_file(session, source_url, target_file)
except ClientResponseError as response_error:
# 404 errors happen sporadically; possibly the file got deleted on the server after
# we did a GET for the index. Pretend that 404s didn't happen
if response_error.status != 404:
raise
async def _fetch_overlays(self, session, pattern: dict):
if "overlays" in pattern:
for overlay in pattern["overlays"]:
source_url = self._source_base + overlay
target_file = self._cache / overlay
await self._fetch_file(session, source_url, target_file)
async def _fetch_file(self, session, source_url, target_file: Path):
if target_file.exists():
LOGGER.debug(
"File %s already exists -- no need to download %s",
target_file, source_url)
return
(gif_data, _) = await bfetch(session, source_url)
target_file.parent.mkdir(parents=True, exist_ok=True)
with open(target_file, "wb") as that:
that.write(gif_data)
def make_video(self):
arguments = self.wx_arguments()
for pattern in self._patterns:
try:
last_date = self._make_video(pattern)
self._make_still(pattern)
self.copy_template(pattern, last_date=last_date.isoformat(), **arguments)
except NoVideoFrames:
self.copy_template(pattern, failed=True, **arguments)
def wx_arguments(self):
"""
Compute arguments to include in template to show current weather information
:return:
"""
latest_wx = self.fetch_wx() # pylint: disable = invalid-name
parsed_wx = Metar(latest_wx["code"])
present_weather = parsed_wx.present_weather()
if not present_weather:
present_weather = "no precipitation"
arguments = {
"radar_id": "BGM",
"latest_wx": {
"wx_time": latest_wx.get("time"),
"location": "Ithaca Airport",
"temp": latest_wx.get("temp"),
"dewpt": latest_wx.get("dewpt"),
"humidity": latest_wx.get("humidity"),
"wind_speed": latest_wx.get("wind_speed"),
"wind_alpha": wind_alpha(latest_wx.get("wind_dir")),
"wind_dir": latest_wx.get("wind_dir"),
"pressure": latest_wx.get("pressure"),
"sky": latest_wx.get("sky"),
"present_weather": present_weather
}
}
return arguments
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
# pylint: disable=too-many-branches
def _make_video(self, pattern):
LOGGER.info("Creating video %s", pattern['video'])
start = datetime.datetime.now()
infiles = self._lookup_matching(pattern)
if infiles:
LOGGER.debug("Found %d image files matching pattern %s", len(infiles), pattern)
else:
LOGGER.error("No image files found matching pattern %s", pattern)
date_fn = DATE_FUNCTIONS[pattern["date_fn"]]
dated = [{"path": file, "timestamp": date_fn(file.name)} for file in infiles]
dated.sort(key=itemgetter("timestamp"))
now = datetime.datetime.now(datetime.timezone.utc)
window = parse_duration(pattern.get("window", "1 days"))
retain = parse_duration(pattern.get("retain", "2 weeks"))
dated = [{**row, "age": now - row["timestamp"]} for row in dated if row["timestamp"]]
video_frames = [row for row in dated if row["age"] < window]
ancient = [row for row in dated if row["age"] > retain]
if not video_frames:
for row in dated[-25:]:
LOGGER.debug(row)
raise NoVideoFrames(f"I can't make a video {pattern['video']} without any frames")
for row in ancient:
try:
LOGGER.debug("Removing old file %s", row["path"])
row["path"].unlink()
except OSError:
LOGGER.warning("Exception removing %s", row['path'])
self._output.mkdir(parents=True, exist_ok=True)
movie_out = str(self._output / pattern["video"])
movie_temp = movie_out[:-4] + "-temp.mp4"
overlays = self._load_overlays(pattern)
overlays = self._merge_overlays(overlays)
LOGGER.debug("Preparing to write movie to %s", movie_temp)
good_frames = 0
with imageio.get_writer(
movie_temp,
mode='I', fps=10) as writer:
for item in video_frames:
file = item["path"]
try:
LOGGER.debug("Composing frame from file %s", file)
content = self._compose_frame(file, overlays)
except Exception: # pylint: disable=broad-except
#
# PIL throws fairly random errors when parsing a corrupt file, so I
# catch Exception to get them all
#
LOGGER.info("Could not read image from %s deleting", file, exc_info=True)
try:
file.unlink()
# on Windows the file might not have been released by imageio and we might
# not be able to delete it
except PermissionError:
pass
continue
if len(content.shape) == 2:
content = np.moveaxis(
np.array([
content, content, content, np.zeros_like(content)
]), 0, -1)
# the image should be divisible for 16x16 macroblocks; crop away the from the left
# and the top because my judgement is that for the northeast case this is best.
(width, height, _) = content.shape
legal_width = 16 * floor(width / 16)
legal_height = 16 * floor(height / 16)
cropped = content[-legal_width:, -legal_height:]
writer.append_data(cropped)
good_frames += 1
if good_frames:
if not sys.platform.startswith('linux'):
if os.path.exists(movie_out):
os.unlink(movie_out)
os.rename(movie_temp, movie_out)
end = datetime.datetime.now()
LOGGER.info("Completed video %s in time %s", pattern['video'], end - start)
return date_fn(video_frames[-1]["path"].name)
raise ValueError("Found no valid video frames to make movie")
def _lookup_matching(self, pattern):
product_dir = "/".join(pattern["pattern"].split("/")[:-1])
src = self._cache / product_dir
ext = pattern["pattern"][-3:]
infiles = sorted(src.glob(f"*.{ext}"))
return infiles
def _make_still(self, pattern):
if "still" not in pattern:
return
last_shot = self._lookup_matching(pattern)[-1]
overlays = self._load_overlays(pattern)
content = self._compose_frame(last_shot, overlays)
imageio.imwrite(self._output / pattern["still"], content, "PNG-FI")
def _load_overlays(self, pattern):
output = []
if "overlays" in pattern:
for file_name in pattern["overlays"]:
overlay = load_masked_image(self._cache / file_name)
output.append(overlay)
return output
# pylint: disable=no-self-use
def _merge_overlays(self, overlays):
if len(overlays) < 2:
return overlays
output = overlays[0]
for overlay in overlays[1:]:
output = fast_composite(output, overlay)
return [output]
def _compose_frame(self, path, overlays):
content = load_masked_image(self._cache / path)
content = black_background(content)
for overlay in overlays:
content = fast_composite(content, overlay)
return content[0].astype(np.uint8)
def make_forecast(self):
arguments = self.wx_arguments()
arguments["forecast"] = self._resources["forecast"]
if "properties" not in arguments["forecast"]:
LOGGER.error(
"No 'properties' found in forecast data:\n%s",
json.dumps(arguments["forecast"], indent=2)
)
motd = self._resources["motd"]
copyfile(motd, self._output / "motd.jpg")
pattern = {
"template" : "forecast.html"
}
self.copy_template(pattern, motd=motd, **arguments)
def load_masked_image(path):
img = imageio.imread(path)
if len(img.shape) == 2:
rgb = np.broadcast_to(np.expand_dims(img, axis=-1), img.shape + (3,))
alpha = np.expand_dims(img.astype(bool), axis=-1)
return (rgb, alpha)
rgb = img[:, :, 0:3]
alpha = img[:, :, 3:].astype(bool)
return (rgb, alpha)
def fast_composite(below, above):
rgb = above[0] * above[1] + below[0] * (1-above[1])
alpha = above[1] | below[1]
return (rgb, alpha)
def black_background(above):
if (above[0] == 255).all() and above[1].all():
return (above[0]*0, above[1] | True)
rgb = above[0] * above[1]
alpha = above[1] | True
return (rgb, alpha)
def floatify(img):
return img.astype("f8")/255.0
def byteify(img):
clipped = np.clip(img, 0.0, 1.0)
return (clipped*255.0).astype('B')
def alpha_composite(below, above):
b_color = below[:, :, 0:3]
a_color = above[:, :, 0:3]
b_alpha = below[:, :, 3:]
a_alpha = above[:, :, 3:]
c_alpha = a_alpha + b_alpha * (1.0 - a_alpha)
c_color = np.nan_to_num(
(a_color * a_alpha + b_color * b_alpha * (1.0 - a_alpha)) / c_alpha,
copy=False
)
return np.concatenate((c_color, c_alpha), axis=2)
|
<gh_stars>1-10
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import abc
from typing import NamedTuple
import numpy as np
from caffe2.python import core
class OutputTransformerNet(NamedTuple):
net: core.Net
init_net: core.Net
class OutputTransformerBase(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def create_net(self, original_output) -> OutputTransformerNet:
pass
def create_const(
self, init_net, name, value, shape=None, dtype=core.DataType.FLOAT
):
shape = shape or []
blob = init_net.NextScopedBlob(name)
if not isinstance(value, list):
value = [value]
init_net.GivenTensorFill([], blob, shape=shape, values=value, dtype=dtype)
init_net.AddExternalOutput(blob)
return blob
def get_batch_size_blob(self, net, data):
data_shape = net.Shape(data, net.NextScopedBlob("data_shape"))
batch_size = net.Slice(
data_shape, net.NextScopedBlob("batch_size"), starts=[0], ends=[1]
)
return batch_size
def create_action_name_blob(self, init_net, action_names):
return self.create_const(
init_net,
"action_names",
action_names,
shape=[len(action_names)],
dtype=core.DataType.STRING,
)
def export_q_values(self, net, q_values, action_names, action_name_blob):
batch_size = self.get_batch_size_blob(net, q_values)
feature_lengths_blob = core.BlobReference(
"output/string_weighted_multi_categorical_features.lengths"
)
net.ConstantFill(
batch_size,
feature_lengths_blob,
value=1,
dtype=core.DataType.INT32,
input_as_shape=1,
)
feature_keys_blob = core.BlobReference(
"output/string_weighted_multi_categorical_features.keys"
)
net.ConstantFill(
batch_size,
feature_keys_blob,
value=0,
dtype=core.DataType.INT64,
input_as_shape=1,
)
values_lengths_blob = core.BlobReference(
"output/string_weighted_multi_categorical_features.values.lengths"
)
net.ConstantFill(
batch_size,
values_lengths_blob,
value=len(action_names),
dtype=core.DataType.INT32,
input_as_shape=1,
)
values_keys_blob = core.BlobReference(
"output/string_weighted_multi_categorical_features.values.keys"
)
net.Tile([action_name_blob, batch_size], values_keys_blob, axis=0)
values_values_blob = core.BlobReference(
"output/string_weighted_multi_categorical_features.values.values"
)
net.FlattenToVec(q_values, values_values_blob)
net.AddExternalOutput(
feature_lengths_blob,
feature_keys_blob,
values_lengths_blob,
values_keys_blob,
values_values_blob,
)
class DiscreteActionOutputTransformer(OutputTransformerBase):
def __init__(self, action_names, temperature=1.0):
self.action_names = action_names
self.temperature = temperature
def create_net(self, original_output):
net = core.Net("output_transformer")
init_net = core.Net("output_transformer_init")
action_name_blob = self.create_action_name_blob(init_net, self.action_names)
q_values = original_output.q_values()
self.export_q_values(net, q_values, self.action_names, action_name_blob)
max_q_idx = net.ArgMax(q_values, net.NextScopedBlob("max_q_idx"), keepdims=0)
max_q_idx = net.Cast(
max_q_idx, net.NextScopedBlob("max_q_idx_int"), to=core.DataType.INT32
)
temperature = self.create_const(init_net, "temperature", self.temperature)
tempered_q_values = net.Div(
[q_values, temperature],
net.NextScopedBlob("tempered_q_values"),
broadcast=1,
)
softmax_values = net.Softmax(tempered_q_values, net.NextScopedBlob("softmax"))
softmax_act_idx = net.WeightedSample(
[softmax_values], net.NextScopedBlob("softmax_act_idx")
)
action_indices, _ = net.Concat(
[max_q_idx, softmax_act_idx],
[
net.NextScopedBlob("action_indices"),
net.NextScopedBlob("action_indices_spilt_info"),
],
axis=1,
add_axis=1,
)
flatten_action_indices = net.FlattenToVec(
action_indices, net.NextScopedBlob("flatten_action_indices")
)
lengths = core.BlobReference(
"output/string_single_categorical_features.lengths"
)
keys = core.BlobReference("output/string_single_categorical_features.keys")
values = core.BlobReference("output/string_single_categorical_features.values")
net.Gather([action_name_blob, flatten_action_indices], values)
net.ConstantFill([max_q_idx], lengths, value=2, dtype=core.DataType.INT32)
action_keys = self.create_const(
init_net, "action_keys", value=[0, 1], shape=[2], dtype=core.DataType.INT64
)
batch_size = net.Shape(max_q_idx, 1)
net.Tile([action_keys, batch_size], keys, axis=0)
net.AddExternalOutput(lengths, keys, values)
return OutputTransformerNet(net=net, init_net=init_net)
class ParametricActionOutputTransformer(OutputTransformerBase):
def __init__(self):
self.action_names = ["Q"]
def create_net(self, original_output):
net = core.Net("output_transformer")
init_net = core.Net("output_transformer_init")
action_name_blob = self.create_action_name_blob(init_net, self.action_names)
q_value = original_output.q_value()
self.export_q_values(net, q_value, self.action_names, action_name_blob)
return OutputTransformerNet(net=net, init_net=init_net)
class ActorOutputTransformer(OutputTransformerBase):
def __init__(
self,
action_feature_ids,
serving_max_scale,
serving_min_scale,
training_max_scale=None,
training_min_scale=None,
):
self.action_feature_ids = action_feature_ids
self.serving_max_scale = np.array(serving_max_scale, dtype=np.float)
self.serving_min_scale = np.array(serving_min_scale, dtype=np.float)
self.training_max_scale = np.array(
training_max_scale or [1.0 - 1e-6] * len(action_feature_ids), dtype=np.float
)
self.training_min_scale = np.array(
training_min_scale or [-1.0 + 1e-6] * len(action_feature_ids),
dtype=np.float,
)
def create_net(self, original_output):
net = core.Net("output_transformer")
init_net = core.Net("output_transformer_init")
action = original_output.action()
batch_size = self.get_batch_size_blob(net, action)
action_dims = self.create_const(
init_net,
"action_dims",
len(self.action_feature_ids),
shape=[1],
dtype=core.DataType.INT32,
)
lengths = core.BlobReference("output/float_features.lengths")
net.Tile([action_dims, batch_size], lengths, axis=0)
net.AddExternalOutput(lengths)
action_feature_ids = self.create_const(
init_net,
"action_feature_ids",
self.action_feature_ids,
shape=[len(self.action_feature_ids)],
dtype=core.DataType.INT64,
)
keys = core.BlobReference("output/float_features.keys")
net.Tile([action_feature_ids, batch_size], keys, axis=0)
net.AddExternalOutput(keys)
values = core.BlobReference("output/float_features.values")
# Shifting action to [training_max - training_min, 0]
training_min_scale = self.create_const(
init_net,
"training_min_scale",
self.training_min_scale.tolist(),
shape=[len(self.training_min_scale)],
)
shifted_action = net.Sub([action, training_min_scale], 1, broadcast=1)
# Scaling action by (serving_max - serving_min) / (trainig_max - trainig_min)
scaling_factor = (self.serving_max_scale - self.serving_min_scale) / (
self.training_max_scale - self.training_min_scale
)
scaling_factor_blob = self.create_const(
init_net,
"scaling_factor",
scaling_factor.tolist(),
shape=[len(scaling_factor)],
)
scaled_shifted_action = net.Mul(
[shifted_action, scaling_factor_blob], 1, broadcast=1
)
# Shifting action to [serving_max, serving_min]
serving_min_scale = self.create_const(
init_net,
"serving_min_scale",
self.serving_min_scale.tolist(),
shape=[len(self.serving_min_scale)],
)
scaled_action = net.Add(
[scaled_shifted_action, serving_min_scale], 1, broadcast=1
)
# Now, we can flatten and return
net.FlattenToVec(scaled_action, values)
net.AddExternalOutput(values)
return OutputTransformerNet(net=net, init_net=init_net)
|
from src.tasks.visualization import Graph
from src.tasks.pdf_to_txt import PdfToTxt
from src.tasks.merge_relations import MergeRelation
from src.tasks.graph_visualization import GraphVisualization
from nltk import word_tokenize
from src.BioBERT_NER_RE import ner_lib,re_lib
import glob,os,re,time
import pandas as pd
def get_abbreviations_lookup():
abbreviations = dict()
abbreviations_df = pd.read_csv('./data/medical_abbreviations/Common_Abbreviations.tsv',sep='\t')
for idx, row in abbreviations_df.iterrows():
key = row['Abbreviation']
abbreviations[key] = row['Stands for']
return abbreviations
def get_entities_relations(sents,infer=False):
ner_time_start = time.time()
if infer==True:
sents_with_entities = ner_lib.infer_on_sentence(sents)
else:
sents_with_entities = ner_lib.get_annotated_sents_with_entities(sents)
if len(sents_with_entities)==2:
return sents_with_entities[1]
print("the time for running NER:",time.time()-ner_time_start)
sents_for_test =[]
for sent_with_entity in sents_with_entities:
annotated_sent = sent_with_entity['sent']
print(annotated_sent)
print(type(annotated_sent))
sent = re.sub('\[D\].*\[/D\]', 'DISEASE', annotated_sent)
sent_for_test = re.sub('\[C\].*\[/C\]', 'CHEMICAL', sent)
sents_for_test.append(sent_for_test)
re_time_start = time.time()
predicted_relations = re_lib.get_predicted_relation(sents_for_test)
print("the time for running RE:", time.time() - re_time_start)
for i,sent_with_entity in enumerate(sents_with_entities):
sent_with_entity['relation'] = predicted_relations[i]
return sents_with_entities
def replace_abbreviations(sents):
new_sents =[]
for sent in sents:
new_tokens = []
tokens = word_tokenize(sent)
for token in tokens:
if token in abbreviations_lookup:
print(token)
new_tokens.append(abbreviations_lookup[token].lower())
else:
new_tokens.append(token)
sent = " ".join(new_tokens)
new_sents.append(sent)
return new_sents
if __name__ == "__main__":
abbreviations_lookup = get_abbreviations_lookup()
merged = MergeRelation()
for file in glob.glob('data/PDF_FOR_PARSE/*.pdf'):
csv_file = file.replace('.pdf', '.csv')
if os.path.exists(csv_file):
df = pd.read_csv(csv_file)
print(csv_file,len(df))
merged.get_df(df)
else:
file_name = os.path.basename(file)
sents_path = file.replace('.pdf', '_sents.csv')
graph = Graph(file_name)
input_sents = []
if os.path.exists(sents_path):
df_sents = pd.read_csv(sents_path,index_col=0)
for idx, row in df_sents.iterrows():
input_sents.append(row[0])
else:
Pdf2txt = PdfToTxt(file,is_pdf=True)
input_sents = Pdf2txt.get_processed_sents()
input_sents = replace_abbreviations(input_sents)
sents_df = pd.DataFrame(input_sents)
sents_df.to_csv(sents_path)
graph.add_edges(get_entities_relations(input_sents))
graph.get_df()
csv_path = file.replace('.pdf', '.csv')
graph.kg_df.to_csv(csv_path)
merged.get_df(graph.kg_df)
print("merged.kg_df",len(merged.kg_df))
merged.vote_relations()
merged.show_graph(merged.kg_df)
merged.kg_df.to_csv('./results/relation_result/DF_full_without_lemm.csv',index=False)
merged.voted_by_pdfs.to_csv('./results/relation_result/DF_vote_by_pdfs.csv',index=False)
merged.voted_by_sents.to_csv('./results/relation_result/DF_vote_by_sentences.csv',index=False)
merged.vote_relations.to_csv('./results/relation_result/vote_relations.csv',index=False)
merged.un_vote_relations.to_csv('./results/relation_result/un_vote_relations.csv', index=False)
merged.show_voted_by_pdf()
merged.show_voted_by_sents()
GraphVisual = GraphVisualization(merged.G)
while True:
request = input("please input relation or entity or infer or exit:\n")
if request.lower()=='relation':
relation_name = input(
"Type one realtion type (treat or side_effect or contraindication) you are interested in:\n")
GraphVisual.show_gragh_by_edge(relation_name)
elif request.lower()=='entity':
node = input(
"Type the node or nodes you are interested in, input nodes with comma to separate them:\n")
if node.find(',') != -1:
node = node.split(',')
node = list(node)
GraphVisual.node_adj_and_shown(node)
else:
GraphVisual.node_adj_and_shown(node)
elif request.lower()=='infer':
sent = input("input a sentence for infer:\n")
print(get_entities_relations(sent,infer=True))
else:
break
|
<reponame>xdzkl/deep-learning-with-python-notebooks
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 4 15:52:14 2019
@author: Administrator
"""
# 导入imdb数据集,imdb数据集有5万条来自网络电影数据库的评论,电影评论转换成了一系列数字,每个数字代表字典汇总的一个单词,下载后放到~/.keras/datasets/目录下,即可正常运行。)中找到下载,下载后放到~/.keras/datasets/目录下,即可正常运行。
from tensorflow.keras.datasets import imdb
# 加载数据集,num_words意味着只保留训练集中最常出现的10000的单词,不经常出现的单词被抛弃,最终所有评论的维度保持相同,变量train_data,test_data是电影评论的列表,每条评论由数字(对应单词在词典中出现的位置下标)列表组成。train_labels,test_labels是0,1列表,0负面评论,1表示正面评论。
(train_data,train_labels),(test_data,test_labels) = imdb.load_data(num_words=10000)
# train_data的大小是(25000,0),test_data的大小是(250000,)
# test_labels的大小是(25000,0),test_labels的大小是(25000,)
train_data[0][:10]
#[1, 14, 22, 16, 43, 530, 973, 1622, 1385, 65]
train_labels[:10]
# array([1, 0, 0, 1, 0, 0, 1, 0, 1, 0], dtype=int64)
max([max(sequence) for sequence in train_data])
#9999,这里是和num_words相对应
# 获得imdb中,单词和数字的对应表,形如下面:
# {a:68893,own:70879}
word_index = imdb.get_word_index()
# 将单词和数字的对应表的键值反转,并最终保存为字典,结果形如下面:
# {34071:'fawn',52006:'tsukino',···}
reverse_word_index = dict([(value,key) for (key,value) in word_index.items()])
# 这里含义是找出train_data[0]中数字列表,然后从reverse_word_index中找出对应的value
# 并使用空格连接起来
# 字典中的get方法语法是dict.get(key,default=None),这里'?'就是默认值
# 这里-3的含义是,因为0,1,2,是为padding(填充),start of sequence(序列开始),unknown(未知词)分别保留的索引。
decoded_review = ' '.join([reverse_word_index.get(i-3,'?') for i in train_data[0]])
decoded_review
# 形如下面
#? was not for it's self joke professional disappointment see already pretending their staged a every so found of his movies
import numpy as np
def vectorize_sequence (sequences,dimension = 10000):
# 创建一个形状为(len(sequences),dimesion)的矩阵
results = np.zeros((len(sequences),dimension))
# 进行one-hot编码
for i,sequence in enumerate(sequences):
results[i,sequence] = 1
return results
# shape是(25000,10000),将训练数据向量化
x_train = vectorize_sequence(train_data)
# shape是(25000,10000)
x_test = vectorize_sequence(test_data)
x_train[0]
#array([0., 1., 1., ..., 0., 0., 0.])
# 将结构数据转换为ndarray,np.array(默认情况下)将会copy该对象,而 np.asarray除非必要,否则不会copy该对象。
# astype的含义是将数据类型转换为float32
y_train = np.asarray(train_labels).astype('float32')
y_test = np.asarray(test_labels).astype('float32')
# 导入模型层
from tensorflow.keras import models
# 导入层
from tensorflow.keras import layers
# 建立一个序贯模型,是多个网络层的线性堆叠,也就是一条路走到黑,
#详细信息见:https://keras-cn.readthedocs.io/en/latest/getting_started/sequential_model/
model = models.Sequential()
# 输入维度(10000,)输出维度(16,)激活函数是relu
model.add(layers.Dense(16,activation='relu',input_shape=(10000,)))
# 输入维度(16,),输出维度(16,),激活函数是relu
model.add(layers.Dense(16,activation='relu'))
# 输入维度是(16,),输出维度(1,),激活函数是sigmoid
model.add(layers.Dense(1,activation='sigmoid'))
model.summary()
#Model: "sequential_2"
#_________________________________________________________________
#Layer (type) Output Shape Param #
#=================================================================
#dense_6 (Dense) (None, 16) 160016
#_________________________________________________________________
#dense_7 (Dense) (None, 16) 272
#_________________________________________________________________
#dense_8 (Dense) (None, 1) 17
#=================================================================
#Total params: 160,305
#Trainable params: 160,305
#Non-trainable params: 0
#_________________________________________________________________
# compile的功能是编译模型,对学习过程进行配置,optimizer是优化器,
# loss是损失函数,metrics是指标列表
model.compile(optimizer="rmsprop",
loss='binary_crossentropy',
metrics=['accuracy'])
# 到入优化器类型
from tensorflow.keras import optimizers
# 使用RMSprop激活器,学习补偿是0.001
model.compile(optimizer = optimizers.RMSprop(lr=0.001),
loss='binary_crossentropy',
metrics=['accuracy'])
# 导入损失函数类和指标类
from tensorflow.keras import losses
from tensorflow.keras import metrics
# loss使用二元交叉熵,但对于输出概率值的模型,交叉熵往往是最好的选择,用于衡量概率分布之间的距离
# 在这个例子中,就是真实分布和预测值之间的余力
model.compile(optimizer = optimizers.RMSprop(lr=0.001),
loss = losses.binary_crossentropy,
metrics = [metrics.binary_accuracy])
# 将原始训练数据留出1000个样本作为验证集
x_val = x_train[:10000]
partial_x_train = x_train[10000:]
y_val = y_train[:10000]
partial_y_train = y_train[10000:]
# 使用512个样本组成的小批量,将模型训练20个轮次,监控留出的10000个样本上的损失和精度,可以通过将验证数据传入validation_data参数来完成
# 调用fit方法会返回一个History对象,这个对象有一个成员history,它是一个字典,包含训练过程中的所有数据
history = model.fit(partial_x_train,partial_y_train,
epochs =20,batch_size = 512,validation_data=(x_val,y_val))
history_dict = history.history
history_dict.keys()
#dict_keys(['loss', 'binary_accuracy', 'val_loss', 'val_binary_accuracy'])
#每个key包含一个列表,列表中有20个元素
import matplotlib.pyplot as plt
acc = history.history['binary_accuracy']
val_acc = history.history['val_binary_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1,len(acc)+1)
plt.plot(epochs,loss,'bo',label = 'Training loss')
plt.plot(epochs,val_loss,'b',label='Validation loss')
plt.title('training and validation loss')
plt.xlabel('epochs')
plt.ylabel('loss')
# 添加标签
plt.legend()
plt.show()
# clf的含义是清除图像
plt.clf()
acc_value = history_dict['binary_accuracy']
val_acc_value = history_dict['val_binary_accuracy']
plt.plot(epochs,acc,'bo',label='training acc')
plt.plot(epochs,val_acc,'b',label='validation acc')
plt.title('trainging and validation accuracy')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
plt.show()
model = models.Sequential()
model.add(layers.Dense(16,activation='relu',input_shape=(10000,)))
model.add(layers.Dense(16,activation='relu'))
model.add(layers.Dense(1,activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss = 'binary_crossentropy',
metrics=['accuracy'])
model.fit(x_train,y_train,epochs=4,batch_size = 512)
# 在测试模式下返回模型的误差值和评估标准值
result = model.evaluate(x_test,y_test)
result
model.predict(x_test) |
<reponame>TMillross/green_curriculum
"""
Created on Sun Nov 26 12:54:22 2017
@author: tom
Performs a keyword-based analysis of curriculum data from studiegids
Built for TU Delft to analyse the sustainability content of the education
With minor modifications, will work for any data with similar format
Outputs:
A ranking of all courses, based on the frequency of relevant keywords in the
free-text fields. Saved to a .txt file in the results directory
Instructions:
todo
"""
# import modules
# general
import sys
import os
import logging
import subprocess
# project specific
import nltk
""" Run these 2 lines separately to download the necessary stopwords data from this library
import nltk
nltk.download('stopwords')
"""
# Order of function definitions:
# support/helper
# data import
# data cleaning
# semantic analysis
# results calculation
# main body execution code at the end, includes saving the results
def file_folder_specs(root, uni):
""" Get file and folder structure - the place to change
folder name and structure information.
Returns
-------
dict
File and folder specs
"""
files_folders = {
'root': root,
'unidata': os.path.abspath(
os.path.join(root, 'data', uni)),
'keyworddata': os.path.abspath(
os.path.join(root, 'data', 'keywords')),
'results': os.path.abspath(
os.path.join(root, 'results', uni))
}
# todo: convert to loop, DNR
if not os.path.exists(files_folders['unidata']):
os.makedirs(files_folders['unidata'])
# if this folder is missing, the data cannot be present: stop script and inform user
sys.exit("Folder created: " + files_folders['unidata'] + " \nPlease add your course data here and run again.")
if not os.path.exists(files_folders['keyworddata']):
os.makedirs(files_folders['keyworddata'])
if not os.path.exists(files_folders['results']):
os.makedirs(files_folders['results'])
return files_folders
def _start_logger(logfile='log.txt', filemode='w', detail=False):
log = logging.getLogger()
log.setLevel(logging.DEBUG)
loghandler = logging.FileHandler(logfile, filemode)
loghandler.setLevel(logging.DEBUG)
if detail:
timeformat = logging.Formatter("%(asctime)s %(msecs)d - %(levelname)s - %(module)s.%(funcName)s(%(lineno)d) -"
" %(message)s [%(processName)s(%(process)d) %(threadName)s(%(thread)d)]",
datefmt='%Y%m%d %H%M%S')
else:
timeformat = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s", datefmt='%Y%m%d %H%M%S')
loghandler.setFormatter(timeformat)
log.addHandler(loghandler)
return loghandler
def _stop_logger(handler):
handler.flush()
handler.close()
log = logging.getLogger()
log.removeHandler(handler)
# data import
def import_study_gids(ff, course_catalog_fn):
import pyexcel
# filepath assumes data stored as data/uni/filename
filepath = ff['unidata'] + '\\' + course_catalog_fn
# Get an array from the spreadsheet-based data
try:
course_catalog = pyexcel.get_array(file_name=filepath, encoding='utf-8')
except:
#
sys.exit("import failed from spreadsheet filepath: " + filepath)
headers = course_catalog.pop(0)
return course_catalog, headers
def import_keywords(ff, keywords_fn):
filepath = ff['keyworddata'] + '\\' + keywords_fn
fhand = open(filepath, 'r')
keywords = [line.strip().lower() for line in fhand]
fhand.close()
return keywords
def convert_common_course_names():
# todo
""" Some courses have their name changed from year to year
But it can be useful during analysis/viz to treat these as if they were the same course
The courses to be renamed could be defined in a txt document
This function imports and translates the courses to their alternative/common names
To save and report in the results
"""
example_translations = dict()
# best data format for quick addition of conversions?
# ..probably external text file with 2 columns separated by a delimiter
example_translations["Advanced Course on LCA"] = "LCA"
example_translations["Advanced Course on LCA: Theory to Practice"] = "LCA"
example_translations["LCA Practice & Reporting"] = "LCA"
return example_translations
# data cleaning
def clean_text(text, stopwords):
""" Converts free-text with punctuation, numbers, capital letters etc.
into a list of words
without any of the punctuation and other 'noise'
and excludes the pre-determined 'stopwords'
"""
# remove digits
text = ''.join(i for i in text if not i.isdigit())
# remove extra whitespace (with split and join), lower case
text = ' '.join(text.lower().split())
# forward slash '/' often used to mean 'or'
text = text.replace('/', ' ')
# remove punctuation
import string
text = text.translate(text.maketrans('', '', string.punctuation))
# print('\n**after trans**: ', text)
# remove stop words
words = [word for word in text.split() if word not in stopwords]
# print('\n**after splitting into list: ', words)
return words
def stem_words(words):
import snowballstemmer as ss
stemmer = ss.stemmer('english')
# stemmer = ss.stemmer('dutch')
word_stems = [stemmer.stemWord(word) for word in words]
return word_stems
# semantic analysis
def get_word_frequency(words):
# blacklisting approach to count all words, not keywords
counts = dict()
for word in words:
# add word with value = 1 or increment word value
counts[word] = counts.get(word, 0) + 1
return counts
def get_keyword_frequency(words, keywords):
# whitelisting approach to only find pre-defined keywords
counts = dict()
# print('\n***\nWords = ', len(text), '\n', text)
for word in words:
if word in keywords:
# print(word)
counts[word] = counts.get(word, 0) + 1
return counts
# calculate results
def calculate_metrics(keyword_frequency, word_frequency):
# to do: extend to include a score for the relevance of each keyword
# so a broad word like 'energy' can score less than a more specifc one such as 'renewable'
# this would also require a change to the way keywords are entered and imported
word_metrics = {}
for course_code, histogram in keyword_frequency.items():
# print(course_code, word_frequency[course_code])
word_count = sum(word_frequency[course_code].values())
keyword_count = sum(histogram.values())
unqiue_keyword_count = len(histogram)
# courses with empty text to be filtered out before this point, but check
if word_count != 0:
keyword_ratio = keyword_count / word_count
else:
keyword_ratio = 0
# print("course found with no text(?):", course_code, "\n")
word_metrics[course_code] = (word_count, keyword_count, keyword_ratio, unqiue_keyword_count)
return word_metrics
def main():
# When run, the script starts from here
# SETTINGS - edit for your own requirements and config.
print("--Loading setings--")
# root director must host the data, this script, and will soon hold the results too.
# if this line does not correctly identify the root, enter it manually below instead.
root = os.getcwd()
# root = 'C:/code/green_curriculum/'
# enter the name of your university: used for finding the data on disk and storing results
uni = "delft"
course_catalog_fn = 'studiegids_1718.xls'
# course_catalog_fn = 'studiegids.xls'
unique_course_identifier_header = 'COURSE_ID'
keywords_fn = 'sustainability_keywords.txt'
# keywords_fn = 'waste_keywords.txt'
# The Excel data export provided by TU Delft included many columns with headers
# The headers containing free text we want to analyse are listed here.
# Edit if the free-text headers are different for your university
free_text_headers = ['SUMMARY', 'COURSECONTENS', 'COURSECONTENSMORE', 'STUDYGOALS',
'STUDYGOALSMORE', 'EDUCATIONMETHOD', 'LITRATURE', 'PRACTICALGUIDE',
'BOOKS', 'READER', 'ASSESMENT', 'SPECIALINFORMATION', 'REMARKS']
# these columns are used for tiltering and later reporting of results
ects_points_header = 'ECTS_POINTS'
faculty_code_header = 'BUREAU_ID'
program_code_header = 'EDUCATION_CODE'
language_header = 'COURSELANGUAGE'
# set filter conditions
# studiegids has 2 language settings - we are using only the export of the English version
# some of the courses taught in Dutch(Nederlands) also include English course descriptions,
# but the data for these is filled in poorly and hence excluded from our language list."""
language_include = ['English', 'Engels', 'Engels, Nederlands', 'Nederlands (op verzoek Engels)'] # , 'Nederlands']
# ignore courses with less ECTS than this value. Set to 0 to include all.
ects_min = 1
# enter faculty or pgoram caodes to restrict results to specific faculties or programs.
# all_faculties_delft = ['', 'CiTG', 'LR', 'TNW', 'BK', 'Extern', '3mE', 'EWI', 'TBM', 'IO', 'UD']
faculty_include = []
program_include = []
# for print to screen console
words_to_show = 5
# READ DATA
print("--Importing data--")
ff = file_folder_specs(root, uni)
# words to exclude from analysis
# stopwords are the ones we do not want to analyse, e.g. 'the', 'and', 'but'
stopwords = nltk.corpus.stopwords.words('english')
# some words which occur frequently in studiegids but contain no useful info are missing from the stopwords
# we add these dataset-specific stopwords in to our list manually
custom_stopwords = ['will', 'refer', 'part', 'description',
'see', 'can', 'course', 'students', 'assignment', 'o',
'us', 'also', 'lecture', 'main', 'module', 'exam',
'work', 'week', 'brightspace', 'blackboard']
stopwords += custom_stopwords
# read data from Excel file using the pyexcel library
course_catalog, headers = import_study_gids(ff, course_catalog_fn)
print("Courses loaded:", len(course_catalog), "\nTotal headers/columns:", len(headers))
# read keywords from .txt file
keywords = import_keywords(ff, keywords_fn)
print("Keywords imported:", len(keywords))
# stem the keywords and remove duplicates
# word stemming can be learned about here: https://en.wikipedia.org/wiki/Stemming
keyword_stems = list(set(stem_words(keywords)))
print("Distinct keywords after stemming:", len(keyword_stems))
# Filtering of
# for each filter, get the indices of courses which pass that filter
# if the filter conditions are left empty, all courses pass the filter due to "or" condition
idx_ects_min = [i for i, item in enumerate(course_catalog) if item[headers.index(ects_points_header)] >= ects_min]
idx_faculty_include = [i for i, item in enumerate(course_catalog) if
item[headers.index(faculty_code_header)] in faculty_include or faculty_include == []]
idx_program_include = [i for i, item in enumerate(course_catalog) if
item[headers.index(program_code_header)] in program_include or program_include == []]
idx_language_include = [i for i, item in enumerate(course_catalog) if
item[headers.index(language_header)] in language_include or language_include == []]
# combine all filters using sets, to leave only courses which pass all filters
idx = list(set(idx_ects_min) & set(idx_faculty_include) & set(idx_program_include) & set(
idx_language_include)) # & set(idx_contact_exclude))
# apply filter to select courses from imported catalog
courses_to_assess = [course_catalog[i] for i in idx]
print("Courses that pass all filters:", len(courses_to_assess))
print("--Analysis of key words starting. May take minutes--")
# get indices of these headers for later use
wanted_header_indices = [headers.index(wanted_header) for wanted_header in free_text_headers]
# empty objects for use within loop
courses_no_words = []
word_frequency = {}
keyword_frequency = {}
all_clean_words = []
all_word_stems = []
course_metadata = {}
for i, course in enumerate(courses_to_assess):
free_text = ''
clean_words = ''
course_id = course[headers.index(unique_course_identifier_header)]
# construct a long string with all free text from chosen columns
for j in wanted_header_indices:
free_text = ' '.join([course[int(j)] for j in wanted_header_indices])
if free_text == '':
# stop analysis for any courses that have no words that can be analysed
courses_no_words.append(course_id)
continue
else:
clean_words = clean_text(free_text, stopwords)
word_stems = stem_words(clean_words)
# word_frequency takes no account of the keywords, but instead counts all
word_frequency[course_id] = get_word_frequency(word_stems)
keyword_frequency[course_id] = get_keyword_frequency(word_stems, keyword_stems)
# store metadata on relevant courses for easy later access
course_metadata[course_id] = (
course[headers.index('COURSE_CODE')].strip(), course[headers.index('YEAR_LABEL')],
course[headers.index('COURSE_TITLE')].strip(), course[headers.index('EDUCATION_CODE')],
course[headers.index('BUREAU_ID')]
)
# combine all words found in the free-text fields into a list
all_clean_words += clean_words
all_word_stems += word_stems
# uncomment to view all original cleaned words and their stemmed forms:
# [print(word, stem) for word, stem in sorted(zip(all_clean_words, all_word_stems))]
unique_word_stems = set(all_word_stems)
print("Courses with no free text found:", len(courses_no_words), ". Therefore, text of", len(keyword_frequency),
"courses used for keyword analysis.")
print("This text corpus contains", len(all_clean_words), "total words, including", len(unique_word_stems),
"unique word stems.")
# get metrics on the words and keywords, for ranking and display
word_metrics = calculate_metrics(keyword_frequency, word_frequency)
# FORMAT RESULTS
# order the list from highest scoring to least, for display
word_metrics = sorted(word_metrics.items(), key=lambda x: x[1][2], reverse=True)
# print and save metrics and common keywords for each course in a table structure
import datetime
datestring = datetime.datetime.now().strftime("%Y-%m-%d_%H%M")
results_fn = datestring + '.txt' # +faculty_include[0]?
ff = file_folder_specs(root, uni)
results_file = ff['results'] + '\\' + results_fn
fout = open(results_file, 'w')
header = "CourseCode\tYear\tCourseTitle\tProgramCode\tFaculty\tUniqueKeywords\tKeywords\tWords\tPercent"
fout.write(header)
# print("\nShowing most common", words_to_show, "keyword stems each for courses with over",
# keyword_percent_threshold, "% keyword prevalence.")
# print(40 * "--")
# print(header)
# print(40 * "--")
for course_id, stats in word_metrics:
histogram = keyword_frequency[course_id]
keyword_percent = round(stats[2] * 100, 1)
# only show courses that have a sufficient number of keywords
word_count = stats[0]
keyword_count = stats[1]
unqiue_keyword_count = stats[3]
# frequent_keywords = sorted(histogram, key=histogram.get, reverse=True)[:words_to_show]
# keywords_string = [k, '\t' for k in frequent_keywords]
course_code = str(course_metadata[course_id][0])
course_year = str(course_metadata[course_id][1])
course_title = str(course_metadata[course_id][2])
program_code = str(course_metadata[course_id][3])
faculty = str(course_metadata[course_id][4])
line = course_code + '\t' + course_year + '\t' + course_title + '\t' + program_code + '\t' + faculty + '\t' + \
str(unqiue_keyword_count) + '\t' + str(keyword_count) + '\t' + str(word_count) + '\t' + \
str(keyword_percent) # + '\t' + str(frequent_keywords)
fout.write('\n' + line)
fout.close()
keywords_not_in_dataset = set(keyword_stems) - set(all_word_stems)
print("Imported keywords not present in dataset:\n", keywords_not_in_dataset)
# uncomment these lines to also show the words frequently occuring that are not in the keyword list
"""print('\nMost frequent of all words:')
for course_code, histogram in word_frequency.items():
total_words = word_count[course_code]
frequent_words = sorted(histogram, key=histogram.get, reverse=True)[:words_to_show]
print(course_code, ':', frequent_words, ', total words =', total_words)
"""
return locals()
if __name__ == "__main__":
# The main routine gets only started if the script is run directly.
# It only includes the logging boilerplate and a top level try-except for catching and logging all exceptions.
# START LOGGING
if not os.path.exists('./log'):
os.makedirs('./log')
log_summary = _start_logger(logfile='./log/process.log')
# log_detail = _start_logger(logfile = './log/process_details.log', detail = True)
logging.info('Start logging of {}'.format(__file__))
try:
logging.info("Current git commit: %s",
subprocess.check_output(["git", "log", "--pretty=format:%H", "-n1"]).decode("utf-8"))
except:
logging.warning('Running without version control')
# MAIN PROGRAM
try:
# The following update your local namespace with the variables from main()
locals().update(main())
# if you don't want the script to pollute your namespace use
# results = main()
# which gives you all variables from main in a dict called 'results'
except Exception as exc:
logging.exception(exc)
raise
finally:
# STOP LOGGER - clean
_stop_logger(log_summary)
# _stop_logger(log_detail)
|
#!/usr/bin/env python
import sys, time, os
import logging
import RPi.GPIO as GPIO
import pyownet
from influxdb import InfluxDBClient
# change this to the pin used to monitor the rain sensor
rain_sensor_pin = 5
# database engine host
host = os.getenv('INFLUXDB_HOST', 'localhost')
# database engine port
port = 8086
# database engine user
user = os.getenv('INFLUXDB_USER', 'admin')
# database engine password
password = os.getenv('INFLUXDB_PASSWD', '<PASSWORD>')
# database to save all our logging to
db_name = os.getenv('INFLUXDB_DB', 'sensors')
logging.basicConfig( filename="/var/log/sensor-service.log",
filemode='w',
level=logging.DEBUG,
format= '%(asctime)s - %(levelname)s - %(message)s',
)
# set up logging to console
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
logger=logging.getLogger(__name__)
logging.info('Starting sensor daemon.')
# setup raspberry pi pins
GPIO.setmode(GPIO.BCM)
GPIO.setup(rain_sensor_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# setup influx database engine connection and sensor database
db = InfluxDBClient(host, port, user, password, db_name)
# function to be called when it's raining
def rain_trigger_callback(channel):
logger.info('The Rain Keeps Pouring Down. %s', time.strftime('%a, %d %b %Y %H:%M:%S %Z(%z)'))
# setup values to save to the database. database will add timestamp automatically
json_body = [
{
"measurement": "rain",
"tags": {},
"fields": {
"value": 1.0
}
}]
# save to database
try:
db.write_points(json_body)
except Exception as err:
logger.error('Failed to log rainsensor reading to database. %s', err, exc_info=True)
# register callback for rising-edge interrupt on rain sensor pin, and add a debounce time of 1000 ms.
GPIO.add_event_detect(rain_sensor_pin, GPIO.RISING, callback=rain_trigger_callback, bouncetime=1000)
try:
# setup owserver(1-wire) connection
owproxy = pyownet.protocol.proxy(host=os.getenv('OWSERVER_HOST', 'localhost'), port=4304, flags=0, persistent=False, verbose=False)
# get list of all available 1-wire sensors, but only the sensors that starts with family name = 10 (DS1820)
owdevices = [d for d in owproxy.dir() if '10.' in d]
logger.info('Found owdevices: %s', str(owdevices))
# loop until program exists
while True:
# loop found 1-wire devices
for id in owdevices:
# remove '/' in 1-wire device names, we don't need them
id = id.replace('/', '')
# read temperature from one sensor, convert reading to a float-value
temp = float(owproxy.read('/{0}/temperature'.format(id)))
logger.info('Read temperature: {0} from device: {1}'.format(temp, id))
# setup values to save to the database. database will add timestamp automatically
json_body = [
{
"measurement": "temperature",
"tags": {
"sensor": id
},
"fields": {
"value": temp
}
}]
# save to database
try:
db.write_points(json_body)
except Exception as err:
logger.error('Failed to log temperature readings to database. %s', err, exc_info=True)
# wait 600 seconds(10 minutes) before we read sensors again
time.sleep(600)
except pyownet.protocol.Error as err:
logger.error('Failed to read from Owserver. %s', err, exc_info=True)
GPIO.cleanup()
except KeyboardInterrupt:
GPIO.cleanup()
|
import matplotlib.pyplot as plt
import numpy as np
# Displays the probability that the current trajectory matches the stored trajectores at every instant in time.
def plot_distribution(dof_names, mean, upper_bound, lower_bound):
"""Plots a given probability distribution.
"""
figures_per_plot = np.min([4, mean.shape[0]])
for index in range(mean.shape[0]):
if(index % figures_per_plot == 0):
fig = plt.figure()
new_plot = plt.subplot(figures_per_plot, 1, (index % figures_per_plot) + 1)
domain = np.linspace(0, 1, mean.shape[1])
new_plot.fill_between(domain, upper_bound[index], lower_bound[index], color = '#ccf5ff')
new_plot.plot(domain, mean[index], color = '#000000')
new_plot.set_title('Trajectory distribution for degree ' + dof_names[index])
fig.tight_layout()
plt.show(block = False)
def plot_trajectory(dof_names, trajectory, observed_trajectory, mean_trajectory = None):
"""Plots a given trajectory.
"""
fig = plt.figure()
plt.plot(trajectory[0], trajectory[1])
plt.plot(observed_trajectory[0], observed_trajectory[1])
if(mean_trajectory is not None):
plt.plot(mean_trajectory[0], mean_trajectory[1])
fig.suptitle('Probable trajectory')
fig = plt.figure()
for index, degree in enumerate(trajectory):
new_plot = plt.subplot(len(trajectory), 1, index + 1)
domain = np.linspace(0, 1, len(trajectory[index]))
new_plot.plot(domain, trajectory[index], label = "Inferred")
domain = np.linspace(0, 1, len(observed_trajectory[index]))
new_plot.plot(domain, observed_trajectory[index], label = "Observed")
if(mean_trajectory is not None):
domain = np.linspace(0, 1, len(mean_trajectory[index]))
new_plot.plot(domain, mean_trajectory[index], label = "Mean")
new_plot.set_title('Trajectory for degree ' + dof_names[index])
new_plot.legend()
plt.show()
def plot_partial_trajectory(trajectory, partial_observed_trajectory, mean_trajectory = None):
"""Plots a trajectory and a partially observed trajectory.
"""
fig = plt.figure()
plt.plot(partial_observed_trajectory[0], partial_observed_trajectory[1], color = "#6ba3ff", label = "Observed", linewidth = 3.0)
plt.plot(trajectory[0], trajectory[1], "--", color = "#ff6a6a", label = "Inferred", linewidth = 2.0)
if(mean_trajectory is not None):
plt.plot(mean_trajectory[0], mean_trajectory[1], color = "#85d87f", label = "Mean")
fig.suptitle('Probable trajectory')
plt.legend()
plt.text(0.01, 0.7, "Observed samples: " + str(partial_observed_trajectory.shape[1]), transform = fig.axes[0].transAxes)
plt.show()
def plot_approximation(dof_names, trajectory, approx_trajectory, approx_trajectory_deriv):
"""Plots a trajectory and its approximation.
"""
domain = np.linspace(0, 1, len(trajectory[0]))
approx_domain = np.linspace(0, 1, len(approx_trajectory[0]))
for dof in range(len(trajectory)):
plt.figure()
new_plot = plt.subplot(3, 1, 1)
new_plot.plot(domain, trajectory[dof])
new_plot.set_title('Original ' + dof_names[dof] + ' Data')
new_plot = plt.subplot(3, 1, 2)
# The trailing [0] is the dimension of the the state. In this case only plot position.
new_plot.plot(approx_domain, approx_trajectory[dof])
new_plot.set_title('Approximated ' + dof_names[dof] + ' Data')
new_plot = plt.subplot(3, 1, 3)
# The trailing [0] is the dimension of the the state. In this case only plot position.
new_plot.plot(approx_domain, approx_trajectory_deriv[dof])
new_plot.set_title('Approximated ' + dof_names[dof] + ' Derivative')
plt.show()
def plot_weights(weight_matrix):
plt.figure()
plt.imshow(weight_matrix, cmap = "gray", interpolation = "none")
plt.colorbar()
plt.show()
|
"""
The :mod:`pyfan.devel.flog.logsupport` initiates logging and set logging options, output log path
points.
This is imported into other programs as *import pyfan.devel.flog.logsupport as pyfan_logsup*
Includes method :func:`log_vig_start`, :func:`log_format`
"""
import logging
import pyfan.util.path.getfiles as pyfan_getfiles
import pyfan.util.timer.timer as pyfan_timer
import numpy as np
def log_vig_start(spt_root, main_folder_name, file_name='fs_gen_combo_type',
sub_folder_name=None, subsub_folder_name=None,
it_time_format=8, log_level=logging.WARNING,
**kwargs):
"""Start logging to log file
Generate path to log file and initiate log file. Return this full path to log file. Configure the log file
with formating
Parameters
----------
spt_root : str
folder root to log file.
main_folder_name : str
main folder, appended to `spt_root`.
file_name : str
file name for the log file, without suffix.
sub_folder_name : str, optional
possible subfolder name. This is double pound vig level.
subsub_folder_name : str, optional
possible subsub folder name. try not to have lower than this level. This is triple pound vig level.
it_time_format : int
different types of time formatting, if `it_time_format` is zero, no time suffix
log_level : int
logging level integers to report, including CRITICAL 50 ERROR 40 WARNING 30 INFO 20 DEBUG 10 NOTSET 0.
**kwargs
Arguments for functions that is called, including :func:`log_format`
Returns
-------
str
return the path to the log file
Examples
--------
>>> log_vig_start(spt_root = 'C:/Users/fan/',
... main_folder_name='logvig', sub_folder_name='parameters',
... subsub_folder_name='combo_type',
... file_name='fs_gen_combo_type',
... it_time_format=8, log_level=logging.INFO)
C:\\Users\\fan\\logvig\\parameters\\combo_type\\fs_gen_combo_type_20201030.log.py
"""
# A. Generate path
spt_log = pyfan_getfiles.gen_path(spt_root, main_folder_name, sub_folder_name, subsub_folder_name)
# B. File name with suffix
if it_time_format == 0:
snm_log = file_name
else:
snm_log = file_name + '_' + str(pyfan_timer.getDateTime(it_time_format))
# C. Generate full path to log file
spn_log = pyfan_getfiles.gen_path_file(spt_log, snm_log, st_file_type='log')
# D. Start logging using the log file
logging.basicConfig(filename=spn_log, filemode='w', level=log_level, format=log_format(**kwargs))
return spn_log
def log_format(bl_set_print_opt=True, it_print_opt=1):
"""Logging formats
This is called by :func:`log_vig_start`, with parameters fed in with *kwargs*
Parameters
----------
bl_set_print_opt : bool, optional
If to set numpy table printing options, how many columns and decimal controls
it_print_opt : int, optional
Different possible options to set
Returns
-------
str
formatting string options for logging config
Examples
--------
>>> log_format(bl_set_print_opt = True, it_print_opt = 1)
'%(filename)s - %(funcName)s - %(lineno)d - %(asctime)s - %(levelname)s %(message)s'
"""
if bl_set_print_opt:
if it_print_opt == 1:
np.set_printoptions(precision=2, linewidth=100, suppress=True, threshold=3000)
return '%(filename)s - %(funcName)s - %(lineno)d - %(asctime)s - %(levelname)s %(message)s'
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
def get_config_schema():
from aksetup_helper import ConfigSchema, Option, \
IncludeDir, LibraryDir, Libraries, BoostLibraries, \
Switch, StringListOption, make_boost_base_options
import sys
if 'darwin' in sys.platform:
import platform
osx_ver, _, _ = platform.mac_ver()
osx_ver = '.'.join(osx_ver.split('.')[:2])
sysroot_paths = [
"/Applications/Xcode.app/Contents/Developer/Platforms/"
"MacOSX.platform/Developer/SDKs/MacOSX%s.sdk" % osx_ver,
"/Developer/SDKs/MacOSX%s.sdk" % osx_ver
]
default_libs = []
default_cxxflags = ['-arch', 'i386', '-arch', 'x86_64']
from os.path import isdir
for srp in sysroot_paths:
if isdir(srp):
default_cxxflags.extend(['-isysroot', srp])
break
default_ldflags = default_cxxflags[:] + ["-Wl,-framework,OpenCL"]
else:
default_libs = ["OpenCL"]
default_cxxflags = []
default_ldflags = []
return ConfigSchema(make_boost_base_options() + [
BoostLibraries("python"),
Switch("USE_SHIPPED_BOOST", True, "Use included Boost library"),
Switch("CL_TRACE", False, "Enable OpenCL API tracing"),
Switch("CL_ENABLE_GL", False, "Enable OpenCL<->OpenGL interoperability"),
Switch("CL_ENABLE_DEVICE_FISSION", True,
"Enable device fission extension, if present"),
Option("CL_PRETEND_VERSION", None,
"Dotted CL version (e.g. 1.2) which you'd like to use."),
IncludeDir("CL", []),
LibraryDir("CL", []),
Libraries("CL", default_libs),
StringListOption("CXXFLAGS", default_cxxflags,
help="Any extra C++ compiler options to include"),
StringListOption("LDFLAGS", default_ldflags,
help="Any extra linker options to include"),
])
def main():
from aksetup_helper import (hack_distutils, get_config, setup,
NumpyExtension, set_up_shipped_boost_if_requested,
check_git_submodules)
check_git_submodules()
hack_distutils()
conf = get_config(get_config_schema(),
warn_about_no_config=False)
EXTRA_OBJECTS, EXTRA_DEFINES = \
set_up_shipped_boost_if_requested("pyopencl", conf)
LIBRARY_DIRS = conf["BOOST_LIB_DIR"]
LIBRARIES = conf["BOOST_PYTHON_LIBNAME"]
EXTRA_INCLUDE_DIRS = []
EXTRA_DEFINES["PYGPU_PACKAGE"] = "pyopencl"
EXTRA_DEFINES["PYGPU_PYOPENCL"] = "1"
if conf["CL_TRACE"]:
EXTRA_DEFINES["PYOPENCL_TRACE"] = 1
INCLUDE_DIRS = conf["BOOST_INC_DIR"] + conf["CL_INC_DIR"]
if conf["CL_ENABLE_GL"]:
EXTRA_DEFINES["HAVE_GL"] = 1
if conf["CL_ENABLE_DEVICE_FISSION"]:
EXTRA_DEFINES["PYOPENCL_USE_DEVICE_FISSION"] = 1
if conf["CL_PRETEND_VERSION"]:
try:
major, minor = [int(x) for x in conf["CL_PRETEND_VERSION"].split(".")]
EXTRA_DEFINES["PYOPENCL_PRETEND_CL_VERSION"] = \
0x1000*major + 0x10 * minor
except:
print("CL_PRETEND_VERSION must be of the form M.N, "
"with two integers M and N")
raise
ver_dic = {}
version_file = open("pyopencl/version.py")
try:
version_file_contents = version_file.read()
finally:
version_file.close()
exec(compile(version_file_contents, "pyopencl/version.py", 'exec'), ver_dic)
SEPARATOR = "-"*75
try:
from distutils.command.build_py import build_py_2to3 as build_py
except ImportError:
# 2.x
from distutils.command.build_py import build_py
try:
import mako # noqa
except ImportError:
print(SEPARATOR)
print("Mako is not installed.")
print(SEPARATOR)
print("That is not a problem, as most of PyOpenCL will be just fine ")
print("without it.Some higher-level parts of pyopencl (such as ")
print("pyopencl.reduction) will not function without the templating engine ")
print("Mako [1] being installed. If you would like this functionality to ")
print("work, you might want to install Mako after you finish ")
print("installing PyOpenCL.")
print("")
print("[1] http://www.makotemplates.org/")
print(SEPARATOR)
print("Hit Ctrl-C now if you'd like to think about the situation.")
print(SEPARATOR)
from aksetup_helper import count_down_delay
count_down_delay(delay=5)
might_be_cuda = False
for inc_dir in conf["CL_INC_DIR"]:
inc_dir = inc_dir.lower()
if "nv" in inc_dir or "cuda" in inc_dir:
might_be_cuda = True
if might_be_cuda and conf["CL_ENABLE_DEVICE_FISSION"]:
print(SEPARATOR)
print("You might be compiling against Nvidia CUDA with device "
"fission enabled.")
print(SEPARATOR)
print("That is not a problem on CUDA 4.0 and newer. If you are "
"using CUDA 3.2,")
print("your build will break, because Nvidia shipped a broken CL header in")
print("in your version. The fix is to set CL_ENABLE_DEVICE_FISSION to False")
print("in your PyOpenCL configuration.")
print(SEPARATOR)
print("Hit Ctrl-C now if you'd like to think about the situation.")
print(SEPARATOR)
from aksetup_helper import count_down_delay
count_down_delay(delay=5)
import sys
if sys.version_info >= (3,):
pvt_struct_source = "src/wrapper/_pvt_struct_v3.cpp"
else:
pvt_struct_source = "src/wrapper/_pvt_struct_v2.cpp"
setup(name="pyopencl",
# metadata
version=ver_dic["VERSION_TEXT"],
description="Python wrapper for OpenCL",
long_description=open("README.rst", "rt").read(),
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
url="http://mathema.tician.de/software/pyopencl",
classifiers=[
'Environment :: Console',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: C++',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics',
],
# build info
packages=["pyopencl", "pyopencl.characterize", "pyopencl.compyte"],
setup_requires=[
"numpy",
],
install_requires=[
"pytools>=2014.2",
"pytest>=2",
"decorator>=3.2.0",
"appdirs>=1.4.0",
# "Mako>=0.3.6",
],
ext_package="pyopencl",
ext_modules=[
NumpyExtension("_cl",
[
"src/wrapper/wrap_cl.cpp",
"src/wrapper/wrap_cl_part_1.cpp",
"src/wrapper/wrap_cl_part_2.cpp",
"src/wrapper/wrap_constants.cpp",
"src/wrapper/wrap_mempool.cpp",
"src/wrapper/bitlog.cpp",
]+EXTRA_OBJECTS,
include_dirs=INCLUDE_DIRS + EXTRA_INCLUDE_DIRS,
library_dirs=LIBRARY_DIRS + conf["CL_LIB_DIR"],
libraries=LIBRARIES + conf["CL_LIBNAME"],
define_macros=list(EXTRA_DEFINES.items()),
extra_compile_args=conf["CXXFLAGS"],
extra_link_args=conf["LDFLAGS"],
),
NumpyExtension("_pvt_struct",
[pvt_struct_source],
extra_compile_args=conf["CXXFLAGS"],
extra_link_args=conf["LDFLAGS"],
),
],
include_package_data=True,
package_data={
"pyopencl": [
"cl/*.cl",
"cl/*.h",
]
},
# 2to3 invocation
cmdclass={'build_py': build_py},
zip_safe=False)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------\n
# Original Authors: BARC Project, Berkely MPC Laboratory -> https://github.com/MPC-Berkeley/barc
# Modified by: <NAME>, Graduate Student, Clemson University
# Date Create: 20/5/2016, Last Modified: 20/5/2016 \n
# --------------------------------------------------------------------------------------\n
import rospy
import time
import math
from barc.msg import ECU_raw
from geometry_msgs.msg import Vector3
from simulator.msg import Z_DynBkMdl, eZ_DynBkMdl
from numpy import sin, cos, tan, arctan, array, dot, pi
from numpy import sign, argmin, sqrt, zeros
from system_models_simulator import f_KinBkMdl, f_DynBkMdl, ef_KinBkMdl, ef_DynBkMdl
# input variables
d_f = 0
FxR = 0
a = 0
# raw measurement variables
yaw_prev = 0
(roll, pitch, yaw, a_x, a_y, a_z, w_x, w_y, w_z) = zeros(9)
# from encoder
v_x_enc = 0
t0 = time.time()
n_FL = 0 # counts in the front left tire
n_FR = 0 # counts in the front right tire
n_FL_prev = 0
n_FR_prev = 0
r_tire = 0.04 # radius from tire center to perimeter along magnets [m]
dx_qrt = 2.0*pi*r_tire/4.0 # distance along quarter tire edge
m = 1.98 # mass of the car
x_target = 10
y_target = 10
# for ECU
read_ECU0 = False
read_veh2 = False
# for veh2
x2 = 0
y2 = 0
# target command update
def target_callback(data):
global x_target, y_target
x_target = data.x
y_target = data.y
#rospy.loginfo("%s",x_target)
# ecu1 command update
def ecu1_callback(data):
global FxR, d_f
FxR = m*data.motor # input acceleration
d_f = data.servo # input steering angle
# ecu2 command update
def ecu2_callback(data):
global read_veh2
read_veh2 = True
# position of veh2
def veh2_pos_callback(data):
global x2, y2
x2 = data.x
y2 = data.y
# state estimation node
def vehicle_simulator():
global d_f, FxR, x_target, y_target, read_ECU0
# initialize node
rospy.init_node('vehicle_simulator', anonymous=True)
# topic subscriptions / publications
rospy.Subscriber('ecu_cmd_1', ECU_raw, ecu1_callback)
rospy.Subscriber('ecu_2', ECU_raw, ecu2_callback)
rospy.Subscriber('z_vhcl_2', Z_DynBkMdl, veh2_pos_callback)
rospy.Subscriber('target', Vector3, target_callback)
state_pub = rospy.Publisher('z_vhcl_1', Z_DynBkMdl, queue_size = 10)
state_pub2 = rospy.Publisher('v_sim_1', Vector3, queue_size = 10)
#state_pub3 = rospy.Publisher('steering_sim', Vector3, queue_size = 10)
#state_error_frame_pub = rospy.Publisher('ez_vhcl', eZ_DynBkMdl, queue_size = 10)
# get vehicle dimension parameters
L_a = rospy.get_param("L_a") # distance from CoG to front axel
L_b = rospy.get_param("L_b") # distance from CoG to rear axel
m = rospy.get_param("m") # mass of vehicle
I_z = rospy.get_param("I_z") # moment of inertia about z-axis
vhMdl = (L_a, L_b, m, I_z)
# get tire model
B = rospy.get_param("tire_model/B")
C = rospy.get_param("tire_model/C")
mu = rospy.get_param("tire_model/mu")
trMdl = ([B,C,mu],[B,C,mu])
# get external force model
a0 = rospy.get_param("air_drag_coeff")
Ff = rospy.get_param("friction")
F_ext = (a0, Ff)
# set node rate
loop_rate = 50
dt = 1.0 / loop_rate
rate = rospy.Rate(loop_rate)
t0 = time.time()
# set initial conditions
x = 0
y = 0
psi = 0
v_x = 0
v_y = 0
r = 0
d_f_sens = 0
s = 0
ey = 0
epsi = 0
while not rospy.is_shutdown():
# publish state estimate
bta = arctan( L_a / (L_a + L_b) * tan(d_f) )
d_target = math.sqrt((x_target-x)**2 + (y_target-y)**2)
if d_target<3:
(FxR, d_f) = (0, 0)
(v_x, v_y) = (0, 0)
#if not read_ECU0:
# d_f_prev = d_f
# FxR_prev = FxR
# read_ECU0 = True
#d_f_diff = d_f_prev-d_f
#d_f_prev = d_f
# FxR_diff = FxR_prev-FxR
#FxR_prev = FxR
#if d_f_diff == 0 and FxR_diff == 0:
# d_f = 0
# FxR = 0
#to ensure the veh1 starts after veh2 opt completes (Uncomment if you want to disconnect veh2 communication)
#if not read_veh2:
# d_f = 0
# FxR = 0
#dis = math.sqrt((x2-x)**2 + (y2-y)**2)
#if dis > 10:
# d_f = 0
# FxR = 0
if abs(v_x) > 0.05:
z = (x, y, psi, v_x, v_y, r)
ze = (s, ey, epsi, v_x, v_y, r)
u = (d_f, FxR)
d_f_sens = d_f
(x, y, psi, v_x, v_y, r) = f_DynBkMdl(z, u, vhMdl, trMdl, F_ext, dt)
(s, ey, epsi, v_x, v_y, r) = ef_DynBkMdl(ze, u, vhMdl, trMdl, F_ext, dt)
v = math.sqrt(v_x**2 + v_y**2)
else:
z = (x, y, psi, v_x)
ze = (s, ey, epsi, v_x)
u = (d_f, FxR)
(x, y, psi, v_x) = f_KinBkMdl(z,u, (L_a, L_b), F_ext, dt)
(s, ey, epsi, v_x) = ef_KinBkMdl(ze,u, (L_a, L_b), F_ext, dt)
v_y = 0
r = 0
v = v_x
# publish information
#if v_x == 0:
# v_x = 0.0001
state_pub.publish( Z_DynBkMdl(x, y, psi, v_x, v_y, r) )
state_pub2.publish( Vector3(v, 0, 0) )
#state_pub3.publish( Vector3(d_f_sens, 0, 0) )
#state_error_frame_pub.publish( eZ_DynBkMdl(s, ey, epsi, v_x, v_y, r) )
# wait
rate.sleep()
if __name__ == '__main__':
try:
vehicle_simulator()
except rospy.ROSInterruptException:
pass
|
<filename>python/lib/packet/signed_util.py
# Copyright 2018 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`signed_util` --- Utility for signed control payloads
=========================================
"""
# SCION
from lib.errors import SCIONKeyError, SCIONParseError, SCIONVerificationError
from lib.packet.cert_mgmt import CertChainReply, TRCReply
from lib.packet.ctrl_pld import CtrlPayload, SignedCtrlPayload
from lib.packet.proto_sign import ProtoSign, ProtoSignType, DefaultSignSrc
from lib.packet.scion_addr import ISD_AS
from lib.trust_store import TrustStore
def create_sign(ia: ISD_AS, chain_ver: int, trc_ver: int) -> ProtoSign:
"""
Create ProtoSign for the specified values with ed25519 as the singing algorithm.
:param ISD_AS ia: ISD-AS of the signing AS.
:param int chain_ver: Version of the certificate authenticating the signing key.
:param int trc_ver: Version of the TRC authenticating the certificate chain.
:returns: The sign object
:rtype: ProtoSign
"""
sign = DefaultSignSrc.from_values(ia, chain_ver, trc_ver)
return ProtoSign.from_values(ProtoSignType.ED25519, sign.pack())
class Signer(object):
"""
Basic signer to create signed control payloads.
"""
def __init__(self, sign: ProtoSign, key: bytes) -> None:
"""
:param ProtoSign sign: The ProtoSign used to sign each control payload.
:param bytes key: The key used to sign each control payload authenticated by sign.
"""
self._sign = sign
self._key = key
def sign(self, pld: CtrlPayload) -> SignedCtrlPayload:
"""
Creates a signed version of the supplied control payload.
:param CtrlPayload pld: The control payload to be signed
:returns: the signed control payload
:rtype: SignedCtrlPayload
:raises: ProtoSignError
"""
sig_pld = SignedCtrlPayload.from_values(
pld.proto().to_bytes_packed(), self._sign.copy())
sig_pld.sign(self._key)
return sig_pld
class Verifier(object):
"""
Basic verifier to verify signed control payloads.
"""
def __init__(self, trust_store: TrustStore) -> None:
"""
:param TrustStore trust_store: The trust store used to fetch the trust objects.
"""
self._trust_store = trust_store
def verify(self, spld: SignedCtrlPayload) -> bool:
"""
Verify checks if the signed control payload can be verified.
If not, an error is raised.
:param SignedCtrlPayload spld: the signed control payload to be verified.
:returns: whether the verification was successful.
:rtype: bool
:raises: SCIONVerificationError
"""
try:
cpld = spld.pld()
except SCIONParseError as e:
raise SCIONVerificationError(
"Unable to unpack control payload. Error: %s" % e) from None
if self.ignore_sign(cpld):
return True
try:
vkey = self.get_verifying_key(spld.psign)
except (SCIONKeyError, SCIONParseError, SCIONVerificationError) as e:
raise SCIONVerificationError("Unable to fetch verifying key. Error: %s" % e) from None
return spld.verify(vkey)
def ignore_sign(self, cpld: CtrlPayload) -> bool:
"""
Check if the signature shall be ignored for this type of control payload.
CertChainReply and TRCReply are ignored to avoid dependency cycles.
:param CtrlPayload cpld: The control payload.
:returns: whether the signature shall be ignored.
:rtype: bool
"""
if type(cpld.union.union) in (CertChainReply, TRCReply,):
return True
return False
def get_verifying_key(self, sign: ProtoSign) -> bytes:
"""
Parses the sign src and fetches the authenticated verifying key.
In case the certificate chain or TRC are not present, a SCIONKeyError is thrown.
In case the the certificate chain or TRC are not valid anymore, a
SCIONVerificationError is thrown.
:param ProtoSign sign: The sign of the signed control payload to be verified.
:returns: the verifying key
:rtype: bytes
:raises SCIONSignSrcError, SCIONKeyError, SCIONVerificationError
"""
if sign.p.type == ProtoSignType.NONE:
return bytes(0)
src = DefaultSignSrc(sign.p.src)
chain = self._trust_store.get_cert(src.ia, src.chain_ver)
if not chain:
raise SCIONKeyError("Chain (%sv%s) not found" % (src.ia, src.chain_ver)) from None
trc = self._trust_store.get_trc(src.ia[0], src.trc_ver)
if not trc:
raise SCIONKeyError("TRC (%sv%s) not found" % (src.ia[0], src.trc_ver)) from None
max_trc = self._trust_store.get_trc(src.ia[0])
trc.check_active(max_trc)
chain.verify(chain.as_cert.subject, trc)
return chain.as_cert.subject_sig_key
|
<reponame>nikifkon/ChatApp<filename>backend/socket_chat/tests/group_consumer/conftest.py
from datetime import datetime
import pytest
from django.contrib.auth import get_user_model
from channels.testing import WebsocketCommunicator
from backend.groups.models import ChatGroup, GroupMessage
User = get_user_model()
@pytest.fixture
def group_create_request(get_yml_dataset) -> dict:
root_dataset = get_yml_dataset(__file__)
request_data = root_dataset["group_create_request"]
return request_data
@pytest.fixture
def group_join_request(get_yml_dataset) -> dict:
root_dataset = get_yml_dataset(__file__)
request_data = root_dataset["group_join_request"]
return request_data
@pytest.fixture
async def group(group_create_request: dict, ok_status: str, group_join_request: dict,
auth_com: WebsocketCommunicator, another_auth_com: WebsocketCommunicator) -> ChatGroup:
await auth_com.send_json_to(group_create_request)
response = await auth_com.receive_json_from()
assert ok_status == response["status"], response["data"]
group_id = response["data"]["id"]
group_join_request["data"]["id"] = group_id
await another_auth_com.send_json_to(group_join_request)
another_response = await another_auth_com.receive_json_from()
assert ok_status == another_response["status"], another_response["data"]
return ChatGroup.objects.get(id=group_id)
@pytest.fixture
def group_messages() -> list:
return []
@pytest.fixture
def group_members(user_serialized_data, another_user_serialized_data):
return [
{
"person": user_serialized_data,
"date_joined": str(datetime.date(datetime.today())),
"role": "A"
},
{
"person": another_user_serialized_data,
"date_joined": str(datetime.date(datetime.today())),
"role": "S"
}
]
@pytest.fixture
def group_last_message(group_messages: list) -> dict:
if len(group_messages) > 0:
message = group_messages[-1]
return {
"sender": message["sender"]["id"],
"text": message["text"],
"date": message["date"]
}
else:
return {
"sender": None,
"text": ""
}
@pytest.fixture
def unread_count() -> int:
return 0
@pytest.fixture
def group_img(group: ChatGroup) -> str:
if group.img.name == "":
return None
return group.img.url
@pytest.fixture
def group_data(group: ChatGroup, group_img: str, group_messages: list,
group_members: list, group_last_message: dict, unread_count: int) -> dict:
return {
"id": group.id,
"name": group.name,
"slug": group.slug,
"img": group_img,
"description": group.description,
"messages": group_messages,
"members": group_members,
"unread_count": unread_count,
"last_message": group_last_message
}
@pytest.fixture
def message_text(get_yml_dataset) -> str:
root_dataset = get_yml_dataset(__file__)
return root_dataset["group_message_data"]["text"]
@pytest.fixture
def group_message_send_request(get_yml_dataset, group: ChatGroup, message_text: str) -> dict:
root_dataset = get_yml_dataset(__file__)
request_data = root_dataset["messsage_send_request"]
request_data["data"]["id"] = group.id
request_data["data"]["text"] = message_text
return request_data
@pytest.fixture
def group_message_sender(user: User) -> dict:
return {
"id": user.id,
"username": user.username,
"avatar": user.avatar.url
}
@pytest.fixture
async def group_message(auth_com: WebsocketCommunicator, another_auth_com: WebsocketCommunicator,
group_message_send_request: dict, ok_status: str) -> GroupMessage:
await auth_com.send_json_to(group_message_send_request)
response = await auth_com.receive_json_from()
assert response["status"] == ok_status, response["data"]
another_response = await another_auth_com.receive_json_from()
assert another_response["status"] == ok_status, another_response["data"]
return GroupMessage.objects.get(id=response["data"]["id"])
@pytest.fixture
def unread() -> bool:
return False
@pytest.fixture
def stared() -> bool:
return False
@pytest.fixture
def group_message_data(group_message: GroupMessage, group_message_sender: dict,
message_text: str, unread: bool, stared: bool,
group: ChatGroup) -> dict:
return {
"id": group_message.id,
"sender": group_message_sender,
"chat_id": group.id,
"text": message_text,
"unread": unread,
"stared": stared,
"date": datetime.utcnow().strftime("%Y-%m-%dT%H:%MZ")
}
|
"""
Helper functions
"""
import os
import time
from os.path import join
from pathlib import Path
from typing import Callable
from colorama import Fore, Style
from pytorch_lightning import seed_everything
from torch import Tensor
from torch.nn import Module
VERBOSITY = 3
TIMESTAMPED = True
DATA_DIR = join(Path(os.path.dirname(os.path.abspath(__file__))).parents[1], 'data')
LOG_DIR = join(Path(os.path.dirname(os.path.abspath(__file__))).parents[1], 'logs')
TENSOR_NET_TYPES = ['cp', 'cpd', 'canonical', 'tucker', 'train', 'tensor-train', 'tt']
KUL_PAL = ['#FF7251', '#C58B85', '#8CA5B8', '#52BEEC']
KUL_PAL2 = ['#FF7251', '#E67D67', '#CE887D','#B59393','#9C9DAA','#83A8C0','#6BB3D6','#52BEEC']
# Set parameters
def set_params(verbosity: int = None, timestamped: bool = None, data_dir: str = None, log_dir: str = None):
global VERBOSITY
global TIMESTAMPED
global DATA_DIR
global LOG_DIR
set_dir(DATA_DIR, LOG_DIR)
VERBOSITY = verbosity if verbosity else VERBOSITY
TIMESTAMPED = timestamped if timestamped is not None else TIMESTAMPED
DATA_DIR = data_dir if data_dir else DATA_DIR
LOG_DIR = log_dir if log_dir else LOG_DIR
DATA_DIR = os.path.abspath(DATA_DIR)
LOG_DIR = os.path.abspath(LOG_DIR)
def hi(title=None, **params):
"""
Say hello. (It's stupid, I know.)
If there's anything to initialize, do so here.
"""
print("\n")
print(Fore.BLUE, end='')
print(" ___ _ ___ _ _ _")
print(" | \ ___ _ __ _ __ ___| |/ __| /_\ | \| |__ _ ___ _ _")
print(" | |) / _ \ '_ \ '_ \/ -_) | (_ |/ _ \| .` / _` / -_) '_|")
print(" |___/\___/ .__/ .__/\___|_|\___/_/ \_\_|\_\__, \___|_|")
print(" |_| |_| |___/", end='')
print(Style.RESET_ALL)
print()
if title:
log(title, title=True, color='blue')
# Set params on request
if params:
set_params(**params)
log(f"VERBOSITY is set to {VERBOSITY}", verbosity=1, timestamped=False, color='green')
log(f"TIMESTAMPED is set to {TIMESTAMPED}", verbosity=1, timestamped=False, color='green')
log(f"DATA_DIR is now set to {os.path.abspath(DATA_DIR)}", timestamped=False, verbosity=1, color='green')
log(f"LOG_DIR is set to {os.path.abspath(LOG_DIR)}", timestamped=False, verbosity=1, color='green')
print()
# Set directories
if not os.path.exists(DATA_DIR) or not os.path.exists(LOG_DIR):
set_dir(DATA_DIR, LOG_DIR)
# Set seed
seed_everything(616)
# Expand on what happens to input when sent through layer
def whatsgoingon(layer: Callable, input: Tensor):
"""
Processes input through layer, and prints the effect on the dimensionality
"""
# Generate output
output = layer(input)
# Log the effect
log(f'{layer.__name__}: {input.shape} --> {output.shape}')
return output
# Fancy print
def log(*message, verbosity=3, sep="", timestamped=None, title=False, color=None):
"""
Print wrapper that adds timestamp, and can be used to toggle levels of logging info.
:param message: message to print
:param verbosity: importance of message: level 1 = top importance, level 3 = lowest importance
:param timestamped: include timestamp at start of log
:param sep: separator
:param title: toggle whether this is a title or not
:param color: text color
:return: /
"""
# Set colors
color_dict = {
'red': Fore.RED,
'blue': Fore.BLUE,
'green': Fore.GREEN,
'yellow': Fore.YELLOW,
'magenta': Fore.MAGENTA,
'cyan': Fore.CYAN,
}
if color and color in color_dict:
color = color_dict[color]
# Title always get shown
verbosity = 1 if title else verbosity
# Print if log level is sufficient
if verbosity <= VERBOSITY:
# Print title
if title:
n = len(*message)
if color:
print(color, end='')
print('\n' + (n + 4) * '#')
print('# ', *message, ' #', sep='')
print((n + 4) * '#' + '\n' + Style.RESET_ALL)
# Print regular
else:
ts = timestamped if timestamped is not None else TIMESTAMPED
t = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
if color:
print(color, end='')
print((str(t) + (" - " if sep == "" else "-")) if ts else "", *message, Style.RESET_ALL, sep=sep)
return
def time_it(f: Callable):
"""
Timer decorator: shows how long execution of function took.
:param f: function to measure
:return: /
"""
def timed(*args, **kwargs):
t1 = time.time()
res = f(*args, **kwargs)
t2 = time.time()
log("\'", f.__name__, "\' took ", round(t2 - t1, 3), " seconds to complete.", sep="")
return res
return timed
def set_dir(*dirs):
"""
If folder doesn't exist, make it.
:param dir: directory to check/create
:return: path to dir
"""
for dir in dirs:
if not os.path.exists(dir):
os.makedirs(dir)
log("WARNING: Data directory <{dir}> did not exist yet, and was created.".format(dir=dir), verbosity=1)
else:
log("\'{}\' folder accounted for.".format(dir), verbosity=3)
def count_params(module: Module, format=None, precision=3):
"""Count total parameters in a module/model"""
n = sum(p.numel() for p in module.parameters())
if format == 'k':
return f'{round(n/1000,precision)}k params'
elif format == 'M':
return f'{round(n/1000000,precision)}M params'
elif format == 'G':
return f'{round(n/1000000000,precision)}G params'
elif format == 'base':
return f'{round(n, precision)} params'
else:
return n
if __name__ == '__main__':
hi('Test!')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File coupling.py created on 20:47 2018/1/1
@author: <NAME>
@version: 1.0
"""
import matplotlib.cm
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
import time
import logging
from interfaces import *
from tree_search import *
from lispy import standard_env, eval, parse
class Workflow(object):
"""Workflow: Put things together"""
def __init__(self, simulator: BaseSimulator, s: Type[State], a: Type[Action], test_policy=None, gamma=0.99):
self.logger = logging.getLogger()
self.simulator = simulator
self.game_simulator = type(simulator)()
self.initial_state = s.get_initial_state()
self.zero_action = a(0)
self.type_state = s
self.type_action = a
self.test_policy = test_policy
# self.game = Game()
self.IRL = IRL(simulator, self.zero_action, gamma=gamma)
input_dim = len(self.initial_state.feature_func())
hidden_units = [64, 128, 128]
batch_size = 128
epochs = 1
self.NN = NN(input_dim, hidden_units, batch_size, epochs)
# scheme env
self.global_env = standard_env()
self.global_env.update({
'state': self.type_state,
'action': self.type_action,
'repeat': self._repeat_game_with_policy,
'train!': lambda traces: self._train_nn(traces, self._train_feature_weight_with_traces(traces)),
'save_model!': lambda : self.NN.save(),
'load_model!': lambda : self.NN.load(),
'explore': Exploration,
'random_policy': Exploration(ZeroPolicy(self.type_action), epsilon=1),
'tree_search_policy': MinimaxSearch(self.type_action, self.simulator, self.NN),
'test_policy': self.test_policy,
'get_weight': lambda : self.IRL.coef,
'draw': self._draw
})
def command(self, cmd: str):
print('Executing:', cmd)
result = eval(parse(cmd), env=self.global_env)
if result is not None:
if isinstance(result, int) or isinstance(result, float):
print('Result:', result)
else:
print('Result:', type(result))
print(result)
return result
def flow(self, _policy1, _policy2):
""" this method is deprecated and preserved for debug use. """
traces = self._repeat_game_with_policy(100, _policy1, _policy2)
w = self._train_feature_weight_with_traces(traces)
self.logger.info(w)
print(w)
print(w.shape)
self._train_nn(traces, w)
print('Testing 100 with trained policy and random policy')
policy0 = BaseTreeSearch(self.type_action, self.simulator, self.NN)
self._repeat_game_with_policy(20, policy0, _policy1)
#print('Testing 100 with trained policy and trained policy')
#policy0 = BaseTreeSearch(self.type_action, self.simulator, self.NN)
# note the policy is stateless
#self._repeat_game_with_policy(100, policy0, policy0)
print('Testing 100 with trained policy and trained policy with exploration')
policy01 = BaseTreeSearch(self.type_action, self.simulator, self.NN)
policy01_exp = Exploration(policy01, epsilon=0.3)
# note the policy is stateless
self.traces = self._repeat_game_with_policy(20, policy01, policy01_exp)
self._judge_policy(policy01, tag='v0.1', n=1)
print('use replay of version 0.1 to train weight')
w2 = self._train_feature_weight_with_traces(self.traces)
print(w2)
print(np.linalg.norm(w-w2))
print('use replay of version 0.1 and weight to train nn')
self._train_nn(self.traces, w2)
print('Testing 100 with trained policy and trained policy with exploration')
policy02 = BaseTreeSearch(self.type_action, self.simulator, self.NN)
policy02_exp = Exploration(policy02, epsilon=0.3)
self.traces = self._repeat_game_with_policy(100, policy02, policy02_exp)
self._judge_policy(policy02, tag='v0.2', n=1)
# hint: LoosedTrace(traces[2], wf.simulator).show(wf.IRL.coef, wf.NN)
def _repeat_game_with_policy(self, n, policy1, policy2):
self.game = Game(policy1, policy2, max_step=300)
self.game_simulator.reset_cnt()
total, win, tie, lose = 0, 0, 0, 0
traces = []
lose_idx = []
avg_turns = []
ts = time.time()
for e in range(n):
self.game.reset()
trace = self.game.start(self.game_simulator)
reward = trace.final_state().reward()
total += 1
win += 1 if reward > 0 else 0
tie += 1 if reward == 0 else 0
lose += 1 if reward < 0 else 0
avg_turns.append(trace.step)
if 'update_score' in dir(self.game_simulator):
self.game_simulator.update_score(win,tie,lose)
if reward < 0:
lose_idx.append(e)
traces.append(trace)
if n >= 5 and e % (n//5) == (n//5-1):
turns = np.mean(avg_turns)
avg_turns = []
print("[%-10s] %d%% - W/T/L: %d/%d/%d - Avg Turns: %.1f" % ('=' * (10 * (e + 1) // n), (100 * (e + 1) // n), win, tie, lose, turns))
print()
t = time.time() - ts
self.logger.info('W/T/L: %d/%d/%d - Time: %.1f s'%(win, tie, lose, t))
print('W/T/L: %d/%d/%d - Time: %.1f s'%(win, tie, lose, t))
print('Lose idx:', lose_idx)
step_cnt, act_cnt = self.simulator.reset_cnt()
print('step_cnt: %d, act_cnt: %d' % (step_cnt, act_cnt))
return traces
def _train_feature_weight_with_traces(self, traces):
self.IRL.feed(traces) # the weight of each trace is same here
# Maybe you can use cross validation to choose hyper param c
param = self.IRL.train_w(hyper_param_c=1e-1)
return param
def _train_nn(self, traces, weight):
# get the training data of nn
# For computation efficiency, process traces as LoosedTrace first
lt_traces = [LoosedTrace(trace, self.simulator) for trace in traces]
dd1 = [self.IRL._split_branch(trace, weight) for trace in lt_traces]
if self.NN.ready():
from collections import Counter
cnt = Counter()
traces_s = [trace.fix_auto(self.simulator, self.NN, target=1, arg=cnt) for trace in lt_traces]
fixed_traces = [item for sublist in traces_s for item in sublist] # type: List[List[State]] # flatten array
dd2 = []
for trace in fixed_traces:
self.IRL.process_trace_to_vector(trace, player=1, ret=dd2)
print('Find %d fixed traces %s data [in coupling.py - Line 158]' % (len(fixed_traces), len(dd2)))
print(cnt.most_common())
else:
dd2 = []
dd = [item for sublist in dd1 for item in sublist] # type: List[Tuple[State, np.ndarray, float]]
dd = dd + dd2
data = [(sf, r) for s, sf, r in dd] # type: List[Tuple[np.ndarray, float]]
size = len(data)
print('Total %d training data [in coupling.py - Line 164]' % size)
X, y = [list(t) for t in zip(*data)]
X = np.array(X)
y = np.array(y).transpose()
self.NN.build()
self.NN.train(X, y)
y_pred = np.ravel(self.NN.predict(X))
self.y = y
self.y_pred = y_pred
# sample data
indexes = random.sample(range(size), k=30)
indexes.sort(key=lambda x: y[x])
for ind in indexes:
st = dd[ind][0]
print('%s label: %+.4f pred: %+.4f diff: %.2f' % (st, y[ind], y_pred[ind], abs(y[ind]-y_pred[ind])))
print('END of data sample')
# Calculate the point density
xy = np.vstack([y[:10000], y_pred[:10000]])
z0 = gaussian_kde(xy)(xy)
# Sort the points by density, so that the densest points are plotted last
idx = z0.argsort()
x0, y0, z0 = y[idx], y_pred[idx], z0[idx]
fig, ax = plt.subplots()
ax.scatter(x0, y0, c=z0, s=50, edgecolor='')
plt.axis('equal')
plt.show()
def _draw(self, trace, weight):
""" Input trace, use NN to predict result"""
LoosedTrace(trace, self.simulator).show(weight, self.NN)
dd = self.IRL._split_branch(trace, weight)
data = [(sf, r) for s, sf, r in dd]
X, y = [list(t) for t in zip(*data)]
X = np.array(X)
y = np.array(y).transpose()
y_pred = np.ravel(self.NN.predict(X))
# Calculate the point density
xy = np.vstack([y, y_pred])
z0 = gaussian_kde(xy)(xy)
# Sort the points by density, so that the densest points are plotted last
idx = z0.argsort()
x0, y0, z0 = y[idx], y_pred[idx], z0[idx]
fig, ax = plt.subplots()
ax.scatter(x0, y0, c=z0, s=50, edgecolor='')
plt.axis('equal')
plt.show()
def _judge_policy(self, policy_to_be_judged, tag='', n=100):
if self.test_policy is None:
return
print('Judging %d with tag = %s' % (n, tag))
self.judge_traces = self._repeat_game_with_policy(n, policy_to_be_judged, self.test_policy)
|
<gh_stars>0
from numpy.lib.function_base import _piecewise_dispatcher
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#define a class Team, which takes team name and performes the analysis
class Team():
def __init__(self, team_name):
self.team_name = team_name
self.team_row=data.loc[data['Team']== self.team_name]
print(self.team_row)
self.dictionary ={1: 'Overall', 2:'Home', 3:'Road', 4:'E', 5:'W',6:'A', 7:'C',8:'SE', 9:'NW', 10:'P',11:'SW', 12:'Pre' , 13:'Post', 14:'≤3', 15:'≥10', 16:'Oct', 17:'Nov', 18:'Dec', 19:'Jan', 20:'Feb', 21:'Mar', 22:'Jul', 23:'Aug'}
self.user_input = None
self.user_option = None
self.data_visual = Show_data()
def menu(self):
while True:
print('What metrix do you want to see? \n 1. Win/loss ratio \n 2. Division comparison \n 3. Monthly graph \n 4. Conference statistics \n 5. Game result predictor \n 6. All-Star analysis \n 7. Game Margin \n\n OR type Q to quit \n')
self.user_option = input("What option are you choosing? ")
if self.user_option == '1':
print('What metrix do you want to see? \n 1. Overall win ratio \n 2. Home win ratio \n 3. Road win ratio \n\n OR press any button to go back to the main menu!\n')
self.user_input=input('Input a number between 1 and 3: ')
try:
self.user_input = int(self.user_input)
if self.user_input < 1 or self.user_input > 3:
print('This is not a valid option!')
else:
a = self.win_loss_ratio(self.user_input)
self.data_visual.piechart_win_loss_ratio(a[0], a[1])
except (TypeError, ValueError) as e:
print('Not a valid option. Choose again')
continue
elif self.user_option == '2':
self.divison_comparison()
continue
elif self.user_option == '3':
a = self.monthly_win_loss()
self.data_visual.linechart(a)
continue
elif self.user_option == '4':
self.conference_comparison()
continue
elif self.user_option == '5':
pass
elif self.user_option == '6':
self.all_star()
continue
elif self.user_option == '7':
self.game_margin()
continue
elif self.user_option =='q':
quit()
else:
print("\nThis is not a valid option! Please try again!")
continue
def win_loss_ratio(self,category,printing):
#get only the row of the chosen team
category_value = self.team_row.iat[0,category]
wins = int(category_value.split('-')[0])
loses = int(category_value.split('-')[1])
win_loss_ratio = round((wins / (wins+loses) *100),2)
# simple IF function to print or not that phrase when callin the win_loss_ratio function
if printing == True:
print(f'The {self.dictionary[category]} win/loss ratio of {self.team_name} is {win_loss_ratio} % !')
# return wins and losses
return wins,loses,win_loss_ratio
def monthly_win_loss(self):
oct = self.win_loss_ratio(16)[2]
nov = self.win_loss_ratio(17)[2]
dec = self.win_loss_ratio(18)[2]
jan = self.win_loss_ratio(19)[2]
feb = self.win_loss_ratio(20)[2]
mar = self.win_loss_ratio(21)[2]
jul = self.win_loss_ratio(22)[2]
aug = self.win_loss_ratio(23)[2]
return oct,nov,dec,jan,feb,mar,jul,aug
def divison_comparison(self):
# creating a list of win/loss ratios for each division
division_list = []
for i in range(6,12):
division_win_loss = self.win_loss_ratio(i,False)[2]
division_list.append(division_win_loss)
# manually creating a list of the official division names
division_list_names = ["Atlantic","Central","South-East","North-West","Pacific","South-West"]
position_max = division_list.index(max(division_list))
position_min = division_list.index(min(division_list))
# fetching the corresponding Division name of the min/max win/loss ratio
name_position_max = division_list_names[position_max]
name_position_min = division_list_names[position_min]
# creating a list of the total number of games per division, for one team (ex:[10,8,7,15,12,9])
list_number_games = []
number_games = []
for i in range(6,12):
number_games = self.win_loss_ratio(i,False)[0] + self.win_loss_ratio(i,False)[1]
list_number_games.append(number_games)
print("")
print("The highest win/loss ratio was",max(division_list),"%", f"and it was in the {name_position_max} division")
print("")
print("The lowest win/loss ratio was",min(division_list),"%", f"and it was in the {name_position_min} division")
print("")
# looping for the 6 different divisions
print(f"The {team_name} played a total of :")
print("")
for i in range(6):
print('\t' * 1 + f"{list_number_games[i]} games in the {division_list_names[i]} division")
print("")
def conference_comparison(self):
# creating a list of win/loss ratios for each division
conference_list = []
for i in range(4,6):
conference_win_loss = self.win_loss_ratio(i,False)[2]
conference_list.append(conference_win_loss)
# manually creating a list of the official division names
conference_list_names = ["East","West"]
position_max = conference_list.index(max(conference_list))
position_min = conference_list.index(min(conference_list))
# fetching the corresponding Division name of the min/max win/loss ratio
name_position_max = conference_list_names[position_max]
name_position_min = conference_list_names[position_min]
# creating a list of the total number of games per conference, for one team (ex:[10,8,7,15,12,9])
list_number_games = []
number_games = []
for i in range(4,6):
number_games = self.win_loss_ratio(i,False)[0] + self.win_loss_ratio(i,False)[1]
list_number_games.append(number_games)
print("")
print("The highest win/loss ratio was",max(conference_list),"%", f"and it was in the {name_position_max} conference")
print("")
print("The lowest win/loss ratio was",min(conference_list),"%", f"and it was in the {name_position_min} conference")
print("")
# looping for the 2 different conferences
print(f"The {team_name} played a total of :")
print("")
for i in range(2):
print('\t' * 1 + f"{list_number_games[i]} games in the {conference_list_names[i]} conference")
print("")
def all_star(self):
# creating a list of win/loss ratios for pre and post All-Star game
all_star_list = []
for i in range(12,14):
all_star_win_loss = self.win_loss_ratio(i,False)[2]
all_star_list.append(all_star_win_loss)
# manually creating a list of the official division names
all_star_list_names = ["Pre All-Star Game","Post All-Star Game"]
position_max = all_star_list.index(max(all_star_list))
position_min = all_star_list.index(min(all_star_list))
# fetching the corresponding Division name of the min/max win/loss ratio
name_position_max = all_star_list_names[position_max]
name_position_min = all_star_list_names[position_min]
# creating a list of the total number of games per conference, for one team (ex:[10,8,7,15,12,9])
list_number_games = []
number_games = []
for i in range(12,14):
number_games = self.win_loss_ratio(i,False)[0] + self.win_loss_ratio(i,False)[1]
list_number_games.append(number_games)
print("")
print("The highest win/loss ratio was",max(all_star_list),"%", f"and it was {name_position_max}")
print("")
print("The lowest win/loss ratio was",min(all_star_list),"%", f"and it was {name_position_min}")
print("")
# looping for the 2 different conferences
print(f"The {team_name} played a total of :")
print("")
for i in range(2):
print('\t' * 1 + f"{list_number_games[i]} games {all_star_list_names[i]}")
print("")
def game_margin(self):
# creating a list of win/loss ratios for pre and post All-Star game
game_margin_list = []
for i in range(14,16):
game_margin_win_loss = self.win_loss_ratio(i,False)[2]
game_margin_list.append(game_margin_win_loss)
# manually creating a list of the official division names
game_margin_list_names = ["≤3 points game margin","≥ 10 points game margin"]
position_max = game_margin_list.index(max(game_margin_list))
position_min = game_margin_list.index(min(game_margin_list))
# fetching the corresponding Division name of the min/max win/loss ratio
name_position_max = game_margin_list_names[position_max]
name_position_min = game_margin_list_names[position_min]
# creating a list of the total number of games per conference, for one team (ex:[10,8,7,15,12,9])
list_number_games = []
number_games = []
for i in range(14,16):
number_games = self.win_loss_ratio(i,False)[0] + self.win_loss_ratio(i,False)[1]
list_number_games.append(number_games)
print("")
print("The highest win/loss ratio was",max(game_margin_list),"%", f"and it was in the {name_position_max} category")
print("")
print("The lowest win/loss ratio was",min(game_margin_list),"%", f"and it was in the {name_position_min} category")
print("")
# looping for the 2 different conferences
print(f"The {team_name} played a total of :")
print("")
for i in range(2):
print('\t' * 1 + f"{list_number_games[i]} games with {game_margin_list_names[i]}")
print("")
class Show_data():
def __init__(self):
pass
#Define a function to create a piechart based on wins and losses in any category
def piechart_win_loss_ratio(self, wins ,loses):
labels = 'Wins', 'Losses'
sizes = [wins,loses]
explode = (0.1 ,0)
colors = ['#e38d8d', '#95b7ed']
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',startangle=90, colors=colors)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
ax1.set_title('')
plt.savefig('plots.png')
plt.show()
plt.close(1)
def linechart(self,y):
x = [10,20,30,40,50,60,70,80]
x_text = ['Oct', 'Nov','Dec','Jan','Feb','Mar','Jul','Aug']
plt.figure()
plt.xticks(x,x_text)
plt.plot(x,y)
plt.show()
plt.close()
#get the data from the csv file
data = pd.read_csv("nba_test.csv")
#Reaplce the NAN values with 0. For example team '<NAME>' does not have values in the Jul and Aug category.
data = data.fillna('1-100')
#This is to test and should be in main
#print(data)
#print(data.Team)
team_name = input("Team name: ")
my_team=Team(team_name)
a = my_team.menu()
|
import tkinter as tk
import tkinter.messagebox
from enums import CamperType, CampRegion
import csv, os, classBooking, random
import tkBooking
from datetime import date
class AdvisorWindow:
def __init__(self, root):
self.window = tk.Toplevel(root)
self.window.grab_set()
label1 = tk.Label(self.window, text="Welcome To Solent Camper!")
label1.pack(side=tk.TOP, pady=(20,0))
label2 = tk.Label(self.window, text="As Advisr, You Can Search For Camping Sites and Vans..")
label2.pack(side=tk.TOP, pady=(5,0))
self.regionName = tk.StringVar()
self.regionName.set(CampRegion.RegionA.name)
self.vanType = tk.StringVar()
self.vanType.set(CamperType.Small.name)
self.campName = tk.StringVar()
self.campName.set("")
self.campList = []
labelvan = tk.Label(self.window, text="Van Type")
labelvan.pack(side=tk.TOP, pady=(5,0))
optionVan = tk.OptionMenu(self.window, self.vanType, *[e.name for e in CamperType])
optionVan.pack(side=tk.TOP)
labelregion = tk.Label(self.window, text="Region")
labelregion.pack(side=tk.TOP, pady=(5,0))
optionRegion = tk.OptionMenu(self.window, self.regionName, *[e.name for e in CampRegion], command=self.campRegionSelected)
optionRegion.pack(side=tk.TOP)
labelcamp = tk.Label(self.window, text="Camp Name")
labelcamp.pack(side=tk.TOP, pady=(5,0))
self.optionCamp = tk.OptionMenu(self.window, self.campName, self.campList)
self.bookButton = tk.Button(self.window, text="Book", command=self.saveBooking)
self.window.title('Advisor - Solent Campers')
self.window.geometry("400x350+400+100")
def campRegionSelected(self, region):
for e in CampRegion:
if e.name == region:
region = e.value
break
camps = []
if not os.path.isfile('data/camps.csv'):
tkinter.messagebox.showerror(master=self.window, title="Sorry", message="Data Files Do Not Exist or are unreadable. Contact Administrator")
return
else:
f = open('data/camps.csv', 'r')
reader = csv.reader(f)
header = next(reader)
for row in reader:
if int(row[2]) == region:
camps.append(row)
f.close()
if not len(camps) == 0:
self.campList = camps
self.campName.set(camps[0][1])
self.displayCampName()
else:
tkinter.messagebox.showerror(master=self.window, title="Sorry", message="No Camping Site Added in this region. Contact Administrator.")
return
def displayCampName(self):
menu = self.optionCamp["menu"]
menu.delete(0, "end")
for value in self.campList:
menu.add_command(label=value[1], command=lambda v=value[1]: self.campName.set(v))
self.optionCamp.pack(side=tk.TOP, pady=(0,30))
self.bookButton.pack(side=tk.TOP)
def saveBooking(self):
bookingID = random.randint(100,999)
newBooking = classBooking.Booking(bookingID, self.campName.get(), self.vanType.get(), self.regionName.get(), date.today())
newBooking.writeBookingData()
bookingWindow = tkBooking.BookingWindow(self.window, [bookingID, self.campName.get(), self.vanType.get(), self.regionName.get(), date.today()])
bookingWindow.window.mainloop() |
#!/usr/bin/env python3
import sys, math, numpy, os.path, collections, argparse
def parse_args(arglist):
parser = argparse.ArgumentParser()
parser.add_argument("path", help="Path/prefix for SNP file, and maybe coverage and output files")
parser.add_argument("--coverfile", help="Coverage file, if different from <path>_cover.txt")
parser.add_argument("--outpath", help="path/prefix for output files, if different from input")
parser.add_argument("--scales", help="number of length scales to use for windows", type=int, default=16)
parser.add_argument("--ratio", help="Ratio between successive length scales. Must be integer", type=int, default=2)
parser.add_argument("--baselength", help="Smallest length scale", type=int, default=80)
parser.add_argument("--min_coverage", help="Minimum coverage for a window to be counted", type=float, default=0.8)
parser.add_argument("--sample_gaps", help="For counts, up- or downsample windows to the same coverage", choices=['up', 'down', 'no'], default='down')
parser.add_argument("--stat", help="Statistic to calculate (tbl, individual tips, folded sfs component)", default='tbl')
parser.add_argument("--input_form", help="Format of input", choices=['msmc','windows','reverse_windows'], default='msmc')
parser.add_argument("--final_windows", help="Print out the longest windows", action='store_true')
parser.add_argument("--seed", help="Seed for random sampling of windows", type=int)
args = parser.parse_args(arglist)
if args.path.endswith('.txt'):
args.path = args.path[:-4]
return args
def merge(windows, num):
if num % 1:
sys.exit('Ratio of succesive window sizes must be an integer')
while len(windows) % num: #delete from the ends
# to squeeze the most out of data, better to do chromosome arms separately, so that centromeric region also gets dropped
if windows[0][1] < windows[-1][1]:
windows.pop(0)
else:
windows.pop()
return [numpy.sum(windows[i:i+num], axis=0) for i in range(0, len(windows), num)]
def snpdist(windows, L, args, rng=numpy.random):
dist = {}
for win in windows:
if win[1] >= args.min_coverage * L :
# the window is sequenced at high enough coverage
if args.sample_gaps == 'up' and win[1] < L and win[0]:
# upsample the window
count = win[0] + rng.binomial(L-win[1], win[0]/L)
elif args.sample_gaps == 'down' and win[1] > math.ceil(args.min_coverage * L) and win[0]:
# downsample the window
count = rng.hypergeometric(win[0], win[1]-win[0], math.ceil(args.min_coverage*L))
else:
# record window as-is
count = win[0]
try:
dist[count] += 1
except KeyError:
dist[count] = 1
return dist
def initwindows(SNPs, basecoverage, args):
L0 = args.baselength
#initialize windows
windowcounts = numpy.histogram(SNPs, bins=list(range(1, len(basecoverage)*L0+1, L0)))[0]
return [numpy.array(x) for x in zip(windowcounts, basecoverage)]
if __name__ == '__main__':
args = parse_args(sys.argv[1:])
if args.coverfile:
covername = args.coverfile
else:
covername = args.path + '_cover.txt'
if args.outpath:
outname = args.outpath
else:
outname = args.path
if args.input_form == 'msmc':
# set up the random number generator (although it will only be used if sample_gaps != 'no'):
prng = numpy.random.RandomState(args.seed)
with open(args.path+'.txt', 'r') as infile:
samplesize = len(next(infile).split()[-1]) # might need this if we're calculating SFS or individuals' singletons
with open(args.path+'.txt', 'r') as infile:
if args.stat == 'tbl':
# list of positions of every polymorphic site
SNPs = [int(line.split()[1]) for line in infile]
elif args.stat == 'tbl_alleles':
if args.sample_gaps:
sys.exit('Up/down-sampling only work with number of segregating sites, not number of alleles')
# list of positions of every polymorphic site, with multiplicity = num alleles - 1
SNPs = [int(line.split()[1]) for line in infile for _ in range(1, len(set(line.split()[-1])))]
elif args.stat == 'indiv_tips':
if args.final_windows:
print("No final_windows option yet for indiv_singletons", file=sys.stderr)
# break out singletons in each individual separately
SNPs = {i:[] for i in range(0, samplesize//2)}
for line in infile:
alleles = line.split()[-1]
for allele, num in collections.Counter(alleles).items():
if num == 1:
SNPs[alleles.find(allele)//2].append(int(line.split()[1]))
else: #guess that we're trying to use sites with allele in n copies for some n
try:
copies = int(args.stat)
except ValueError:
sys.exit("Not sure what statistic to calculate")
if copies not in range(1,samplesize):
sys.exit("SFS component out of range")
# list of positions of every site (without multiplicity) where an allele has n copies
SNPs = [int(line.split()[1]) for line in infile if copies in collections.Counter(line.split()[-1]).values()]
with open(covername, 'r') as coverfile:
basecoverage = [int(x) for x in coverfile]
elif args.input_form == 'windows':
with open(args.path+'.txt', 'r') as infile:
windows = [numpy.array([int(x) for x in line.split()[:2]]) for line in infile]
elif args.input_form == 'reverse_windows':
with open(args.path+'.txt', 'r') as infile:
windows = [numpy.array([int(x) for x in reversed(line.split()[:2])]) for line in infile]
#count diversity in windows:
if args.stat == 'indiv_tips':
windowsums = {indiv: [] for indiv in SNPs.keys()}
else:
windowsums = []
with open(outname+'.log','w') as outfile:
for fold in range(args.scales):
print('Calculating stats for lengthscale '+str(fold), file=outfile)
L = args.baselength * pow(args.ratio,fold)
if args.stat == 'indiv_tips':
try:
windows = {indiv: merge(indivwindows, args.ratio) for indiv, indivwindows in windows.items()}
except NameError:
windows = {indiv: initwindows(indivSNPs, basecoverage, args) for indiv, indivSNPs in SNPs.items()}
for indiv, indivwindows in windows.items():
if len(indivwindows) < 2:
print('Reached chromosome length at length scale {} for individual {}'.format(fold, indiv), file=outfile)
del windows[indiv]
else:
windowsums[indiv].append(snpdist(indivwindows, L, args, prng))
if not windows:
break
else:
try:
windows = merge(windows, args.ratio)
except NameError:
windows = initwindows(SNPs, basecoverage, args)
if len(windows) < 2:
print('Reached chromosome length at length scale ' + str(fold), file=outfile)
break
windowsums.append(snpdist(windows, L, args, prng))
if args.stat == 'indiv_tips':
for indiv, indivwindows in windowsums.items():
with open(outname+'{}_counts.txt'.format(indiv),'w') as outfile:
for scale in indivwindows:
print(', '.join(' '.join(str(x) for x in pair) for pair in sorted(scale.items())), file=outfile)
else:
with open(outname+'_counts.txt','w') as outfile:
for scale in windowsums:
print(', '.join(' '.join(str(x) for x in pair) for pair in sorted(scale.items())), file=outfile)
if args.final_windows:
with open(outname+'_windows'+str(fold)+'.txt','w') as outfile:
print('\n'.join((' '.join(str(x) for x in window)) for window in windows), file=outfile)
|
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_daq as daq
from dash.dependencies import Input, Output, State
from datetime import date, datetime, timedelta
import plotly.express as px
import pandas as pd
import requests
import base64
from utility.utils import (logger, prediction_endpoint, TODAY,
correction_endpoint, translation_endpoint,
get_tweets_by_topic_endpoint)
from utility.params import topic
# from utility.functions import get_data
from main import app
from wordcloud import WordCloud, STOPWORDS
layout = html.Div([
html.H1(children='ANALYTICS DASHBOARD'),
dcc.Tabs([
dcc.Tab(label='Topic', children=[
html.Div([
html.Div('Analyzie data by topic.', style={'width': '40%', 'display': 'inline-block'}),
html.Div('Topic', style={'width': '20%', 'display': 'inline-block'}),
html.Div('Range time', style={'width': '20%', 'display': 'inline-block'}),
]),
html.Div([
html.Div('', style={'width': '40%', 'display': 'inline-block'}),
dbc.Input(id='topic-input-analytics', type='text', value=topic, style={'width': '20%', 'display': 'inline-block'}),
# html.Div('', style={'width': '10%', 'display': 'inline-block'}),
# dbc.Input(id='date_range_analytics', type='number', value=RANGE_TIME, style={'width': '20%', 'display': 'inline-block'})
dcc.DatePickerRange(
id='range-time-input',
display_format='YYYY-MM-DD',
start_date_placeholder_text="Start Period",
end_date_placeholder_text = "End Period",
initial_visible_month = date(TODAY.year, TODAY.month, 1),
min_date_allowed = date(2021, 9, 1),
max_date_allowed = TODAY,
minimum_nights = 0
),
dbc.Button('Submit', id='submit-topic-button',
outline=False, color="primary", n_clicks=0, className="mr-1"),
]),
html.Div([
# html.H2('Tweets count:'),
# html.Div('-', id='tweets-count-id'),
daq.LEDDisplay(
id='tweets-count-id',
label=dict(
label="Tweets count",
style={'width': '100'}
),
value='0',
size=150
#color="#FF5E5E"
)
], style={'width': '40%', 'display': 'inline-block'}),
html.Div([
html.Br(),
# dcc.Markdown("", id='world-cloud-img', style={'width': '100%'})
html.Img(src="", id='world-cloud-img', width='100%')
], style={'width': '60%', 'display': 'inline-block'}),
# html.Div([], style={'width': '40%', 'display': 'inline-block'}),
html.Div([
dcc.Graph(id='barplot-sentence-count'),
], style={'width': '50%', 'display': 'inline-block'}),
html.Div([
dcc.Graph(id='piechart-sentence-count'),
], style={'width': '50%', 'display': 'inline-block'}),
html.Div([
dcc.Graph(
id='hist-polarity-topic'
)], style={'width': '50%', 'display': 'inline-block'}),
html.Div([
dcc.Graph(
id='hist-subjectivity-topic'
)], style={'width': '50%', 'display': 'inline-block'}),
html.Div([
dcc.Graph(
id='box-polarity-topic'
)], style={'width': '50%', 'display': 'inline-block'}),
html.Div([
dcc.Graph(
id='box-subjectivity-topic'
)], style={'width': '50%', 'display': 'inline-block'}),
]),
dcc.Tab(label='Tweet', children=[
html.Div([
html.Div('Insert your tweet:'),
# dbc.Input(id='topic_input_analytics_2', type='text', placeholder='Insert Here...'),
dbc.Textarea(
id='textarea-state',
placeholder='Put your tweet here...',
bs_size="lg",
style={'width': '100%', 'height': 100},
),
dbc.Button('Submit', id='textarea-state-button',
outline=False, color="primary", n_clicks=0, className="mr-1"),
html.Div(style={'width': '100%', 'height': 50}),
html.Div('Correct tweet:'),
html.Div('-', id='correct-tweet'),
html.Div(style={'width': '100%', 'height': 50}),
html.Div('Translation:'),
html.Div('-', id='translated-tweet'),
html.Div(style={'width': '100%', 'height': 50}),
], style={'width': '50%', 'display': 'inline-block'}),
html.Div([
daq.Gauge(
id='gauge-analytics-1',
value=-1,
label='Polarity',
max=1,
min=-1,
),
daq.Gauge(
id='gauge-analytics-2',
value=0,
label='Subjectivity',
max=1,
min=0,
)
], style={'width': '50%', 'display': 'inline-block'}),
])
])
])
# =============================================================================
# CALLBACK
# =============================================================================
@app.callback(
[Output('correct-tweet', 'children'),
Output('translated-tweet', 'children'),
Output('gauge-analytics-1', 'value'),
Output('gauge-analytics-2', 'value'),],
Input('textarea-state-button', 'n_clicks'),
State('textarea-state', 'value')
)
def update_tweet(n, text):
if n > 0:
try:
logger.info(f'INPUT TEXT: {text}, TYPE: {type(text)}')
# CORRECT TWEET
payload = dict(tweet=text)
correct_text = requests.get(correction_endpoint, params=payload).json()['correct_tweet']
logger.info(f'CLEAN TEXT: {correct_text}')
# TRANSLATED TWEET
payload = dict(tweet=correct_text)
translated_text = requests.get(translation_endpoint, params=payload).json()['translated_tweet']
logger.info(f'TRANSLATED TEXT: {translated_text}')
# PREDICTED TWEET
payload = dict(tweet=correct_text, topic='None')
prediction = requests.get(prediction_endpoint, params=payload).json()
logger.info(f'RESPONSE: {prediction}')
polarity, subjectivity = prediction['polarity'], prediction['subjectivity']
return correct_text, translated_text, polarity, subjectivity
except Exception as ex:
logger.exception(ex)
@app.callback(
[Output('tweets-count-id', 'value'),
Output('barplot-sentence-count', 'figure'),
Output('piechart-sentence-count', 'figure'),
Output('world-cloud-img', 'src'),
Output('hist-polarity-topic', 'figure'),
Output('hist-subjectivity-topic', 'figure'),
Output('box-polarity-topic', 'figure'),
Output('box-subjectivity-topic', 'figure'),
],
[Input('submit-topic-button', 'n_clicks')],
[State('topic-input-analytics', 'value'),
State('range-time-input', 'start_date'),
State('range-time-input', 'end_date')]
)
def update_figure(n, topic, start_date, end_date):
if n > 0:
try:
end_date = (datetime.strptime(end_date, '%Y-%m-%d') + timedelta(days=1)).strftime('%Y-%m-%d')
payload = dict(topic=topic, start_date=start_date, end_date=end_date)
logger.info(f'GTBT PAYLOAD: {payload}')
out = requests.get(get_tweets_by_topic_endpoint, params=payload).json()
data, columns = out['tweets'], out['columns']
df = pd.DataFrame(data, columns=columns)
logger.info(df.head())
tweets_count = str(len(df))
df_mod = df.copy().groupby(['sentence']).agg(count=('sentence', 'count')).reset_index()
logger.info(df_mod.head())
# SENTIMENT BARPLOT
fig = px.bar(df_mod, x = 'sentence',
y = 'count',
color = 'sentence',
barmode='group',
title = 'Sentence Barplot',
color_discrete_map={'positive':'lightcyan',
'negative':'cyan',
'neutral':'royalblue'})
fig.update_layout({
'plot_bgcolor': 'rgba(0, 0, 0, 0)'
})
# SENTIMENT BARPLOT
fig1 = px.pie(df_mod,
names='sentence',
values='count',
title='Sentence Piechart',
color= 'sentence',
# color_discrete_sequence=px.colors.sequential.RdBu)
color_discrete_map={'positive':'lightcyan',
'negative':'cyan',
'neutral':'royalblue'})
fig.update_layout({
'plot_bgcolor': 'rgba(0, 0, 0, 0)'
})
# WORD CLOUD
stopwords = set(STOPWORDS)
wordcloud = WordCloud(max_font_size=50,
max_words=100,
stopwords=stopwords,
background_color="white").generate(" ".join(df['tweet']))
wordcloud.to_file("word_cloud_image.png")
encoded_image = base64.b64encode(open("word_cloud_image.png", 'rb').read())
img = "{}".format(encoded_image)[2:-1]
# fig2 = ''.format(img)
fig2 = 'data:image/png;base64,{}'.format(img)
# POLARITY DENSE PLOT
fig3 = px.histogram(df, x='polarity') #ff.create_distplot([polarities], ['Polarity'], show_hist=False, colors=['#37AA9C'])
fig3.update_layout({
'plot_bgcolor': 'rgba(0, 0, 0, 0)'
})
# SUBJECTIVITY DENSE PLOT
fig4 = px.histogram(df, x='subjectivity') #ff.create_distplot([subjectivities], ['subjectivity'], show_hist=False, colors=['#94F3E4'])
fig4.update_layout({
'plot_bgcolor': 'rgba(0, 0, 0, 0)'
})
# POLARITY BOX PLOT
fig5 = px.box(df, x='polarity', title="Polarity Boxplot", points="all")
fig5.update_layout({
'plot_bgcolor': 'rgba(0, 0, 0, 0)'
})
# SUBJECTIVITY BOX PLOT
fig6 = px.box(df, x='subjectivity', title="Subjectivity Boxplot", points="all")
fig6.update_layout({
'plot_bgcolor': 'rgba(0, 0, 0, 0)'
})
return tweets_count, fig, fig1, fig2, fig3, fig4, fig5, fig6
except Exception as ex:
logger.exception(ex) |
<reponame>codacy-badger/prototorch
"""ProtoTorch GLVQ example using 2D Iris data."""
import numpy as np
import torch
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from prototorch.functions.distances import euclidean_distance
from prototorch.modules.losses import GLVQLoss
from prototorch.modules.prototypes import Prototypes1D
# Prepare and preprocess the data
scaler = StandardScaler()
x_train, y_train = load_iris(True)
x_train = x_train[:, [0, 2]]
scaler.fit(x_train)
x_train = scaler.transform(x_train)
# Define the GLVQ model
class Model(torch.nn.Module):
def __init__(self, **kwargs):
"""GLVQ model."""
super().__init__()
self.p1 = Prototypes1D(input_dim=2,
prototypes_per_class=1,
nclasses=3,
prototype_initializer='zeros')
def forward(self, x):
protos = self.p1.prototypes
plabels = self.p1.prototype_labels
dis = euclidean_distance(x, protos)
return dis, plabels
# Build the GLVQ model
model = Model()
# Optimize using SGD optimizer from `torch.optim`
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
criterion = GLVQLoss(squashing='sigmoid_beta', beta=10)
# Training loop
fig = plt.figure('Prototype Visualization')
for epoch in range(70):
# Compute loss.
distances, plabels = model(torch.tensor(x_train))
loss = criterion([distances, plabels], torch.tensor(y_train))
print(f'Epoch: {epoch + 1:03d} Loss: {loss.item():02.02f}')
# Take a gradient descent step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Get the prototypes form the model
protos = model.p1.prototypes.data.numpy()
# Visualize the data and the prototypes
ax = fig.gca()
ax.cla()
cmap = 'viridis'
ax.scatter(x_train[:, 0], x_train[:, 1], c=y_train, edgecolor='k')
ax.scatter(protos[:, 0],
protos[:, 1],
c=plabels,
cmap=cmap,
edgecolor='k',
marker='D',
s=50)
# Paint decision regions
border = 1
resolution = 50
x = np.vstack((x_train, protos))
x_min, x_max = x[:, 0].min(), x[:, 0].max()
y_min, y_max = x[:, 1].min(), x[:, 1].max()
x_min, x_max = x_min - border, x_max + border
y_min, y_max = y_min - border, y_max + border
try:
xx, yy = np.meshgrid(np.arange(x_min, x_max, 1.0 / resolution),
np.arange(y_min, y_max, 1.0 / resolution))
except ValueError as ve:
print(ve)
raise ValueError(f'x_min: {x_min}, x_max: {x_max}. '
f'x_min - x_max is {x_max - x_min}.')
except MemoryError as me:
print(me)
raise ValueError('Too many points. ' 'Try reducing the resolution.')
mesh_input = np.c_[xx.ravel(), yy.ravel()]
torch_input = torch.from_numpy(mesh_input)
d = model(torch_input)[0]
y_pred = np.argmin(d.detach().numpy(), axis=1)
y_pred = y_pred.reshape(xx.shape)
# Plot voronoi regions
ax.contourf(xx, yy, y_pred, cmap=cmap, alpha=0.35)
ax.set_xlim(left=x_min + 0, right=x_max - 0)
ax.set_ylim(bottom=y_min + 0, top=y_max - 0)
plt.pause(0.1)
|
<reponame>HungYangChang/HAET-2021-competition
import torch
import sys
import os
from utils import *
import os.path
import torchvision
total_class = 10
# load data
def Data_load(root='./data'):
# CIFAR10
download = lambda train: torchvision.datasets.CIFAR10(root=root, train=train, download=True)
return {k: {'data': v.data, 'targets': v.targets} for k,v in [('train', download(train=True)), ('valid', download(train=False))]}
data_sampled = Data_load('./data')
# calculate mean and std of data
data_mean = np.mean(data_sampled['train']['data'], axis=(0,1,2))
data_std = np.std(data_sampled['train']['data'], axis=(0,1,2))
print (data_mean,data_std)
batch_norm = partial(GhostBatchNorm, num_splits=4, weight_freeze=True)
relu = partial(nn.CELU, alpha=0.3)
def conv_bn(c_in, c_out, pool=None):
block = {
'conv': nn.Conv2d(c_in, c_out, kernel_size=3, stride=1, padding=1, bias=False),
'bn': batch_norm(c_out),
'relu': relu(),
}
if pool: block = {'conv': block['conv'], 'pool': pool, 'bn': block['bn'], 'relu': block['relu']}
return block
print('Downloading datasets')
dataset = map_nested(torch.tensor, data_sampled)
## if training sample = 5000, the following setting gives best result (87%acc)
epochs, ema_epochs = 60, 10
lr_schedule = PiecewiseLinear([0, 12, epochs-ema_epochs], [0, 1.0, 1e-4])
batch_size = 128
float_size = torch.float16
# data_augmentation
train_transforms = [Crop(32, 32), FlipLR()]
loss = label_smoothing_loss(0.2)
print('Starting timer')
timer = Timer(synch=torch.cuda.synchronize)
print('Preprocessing training data')
dataset = map_nested(to(device), dataset)
T = lambda x: torch.tensor(x, dtype=float_size, device=device)
transforms = [
to(dtype=float_size),
partial(normalise, mean=T(data_mean), std=T(data_std)),
partial(transpose, source='NHWC', target='NCHW'),
]
train_set = preprocess(dataset['train'], transforms + [partial(pad, border=4)])
print(f'Finished in {timer():.2} seconds')
print(train_set['data'].shape[0], ' train imgs')
# create network
model = Network(net(weight=1/16, conv_bn=conv_bn, prep=conv_bn, total_class=total_class)).to(device).half()
train_batches = GPUBatches(batch_size=batch_size, transforms=train_transforms, dataset=train_set, shuffle=True, drop_last=False, max_options=200)
is_bias = group_by_key(('bias' in k, v) for k, v in trainable_params(model).items())
opts = [
SGD(is_bias[False], {'lr': (lambda step: lr_schedule(step/len(train_batches))/batch_size), 'weight_decay': Const(5e-4*batch_size), 'momentum': Const(0.9)}),
SGD(is_bias[True], {'lr': (lambda step: lr_schedule(step/len(train_batches))*(64/batch_size)), 'weight_decay': Const(5e-4*batch_size/64), 'momentum': Const(0.9)})
]
## training
logs_train, state = Table(), {MODEL: model, VALID_MODEL: copy.deepcopy(model), LOSS: loss, OPTS: opts}
default_train_steps = (forward(training_mode=True), log_activations(('loss', 'acc')), backward(), opt_steps)
for epoch in range(epochs):
logs_train.append(union({'epoch': epoch+1}, train_epoch_new(state, timer, train_batches,
train_steps=(*default_train_steps, update_ema(momentum=0.99, update_freq=5))
)))
## save network
net_save = 'HAET_model.pt'
state = {'net': model.state_dict()}
torch.save(state, net_save)
print ("Save network!")
|
<gh_stars>1-10
import pe
from random import randint, choice
import pytest
import os
import itertools
import inspect
from pe_core import pe_core_genesis2
import glob
import fault
# PECore uses it's own tester rather than a functional tester because the pe.py
# functional model doesn't match the new garnet functional model interface.
# Once we have a new functional model, we can migrate this tester to use the
# Testers in common.testers
class PECoreTester(fault.Tester):
def reset(self):
self.poke(self.circuit.rst_n, 1)
self.eval()
self.poke(self.circuit.rst_n, 0)
self.eval()
self.poke(self.circuit.rst_n, 1)
self.eval()
def configure(self, lut_code, cfg_d, debug_trig=0, debug_trig_p=0):
self.poke(self.circuit.cfg_en, 1)
# TODO: Get these addresses from the PECore generator
self.poke(self.circuit.cfg_d, lut_code)
self.poke(self.circuit.cfg_a, 0x00)
self.step(2)
# TODO: Why did we set these addresses to 0?
self.poke(self.circuit.cfg_d, 0)
self.poke(self.circuit.cfg_a, 0xF0)
self.step(2)
self.poke(self.circuit.cfg_d, 0)
self.poke(self.circuit.cfg_a, 0xF1)
self.step(2)
self.poke(self.circuit.cfg_d, 0)
self.poke(self.circuit.cfg_a, 0xF3)
self.step(2)
self.poke(self.circuit.cfg_d, 0)
self.poke(self.circuit.cfg_a, 0xF4)
self.step(2)
self.poke(self.circuit.cfg_d, 0)
self.poke(self.circuit.cfg_a, 0xF5)
self.step(2)
self.poke(self.circuit.cfg_d, cfg_d)
self.poke(self.circuit.cfg_a, 0xFF)
self.step(2)
self.poke(self.circuit.cfg_d, debug_trig)
self.poke(self.circuit.cfg_a, 0xE0)
self.step(2)
self.poke(self.circuit.cfg_d, debug_trig_p)
self.poke(self.circuit.cfg_a, 0xE1)
self.step(2)
self.poke(self.circuit.cfg_en, 0)
self.step(2)
# Generate the PE
pe_core = pe_core_genesis2.pe_core_wrapper.generator()()
_tester = PECoreTester(pe_core, pe_core.clk)
_tester.compile(target='verilator', directory="test_pe_core/build",
include_directories=["../../genesis_verif"],
magma_output="verilog",
flags=['-Wno-fatal'])
@pytest.fixture
def tester(scope="module"):
return _tester
def teardown_module():
# Cleanup PE genesis2 collateral
# for item in glob.glob('genesis_*'):
# os.system(f"rm -r {item}")
os.system(f"rm PEtest_pe")
os.system(f"rm PECOMPtest_pe_comp_unq1")
os.system(f"rm REGMODEtest_opt_reg")
os.system(f"rm REGMODEtest_opt_reg_file")
ops, signed_ops = [], []
for name, op in inspect.getmembers(pe, inspect.isfunction):
signature = inspect.signature(op)
if "signed" in signature.parameters:
signed_ops.append(name)
ops.append(name)
else:
ops.append(name)
def pytest_generate_tests(metafunc):
if 'op' in metafunc.fixturenames:
metafunc.parametrize("op", ops)
if 'signed_op' in metafunc.fixturenames:
metafunc.parametrize("signed_op", signed_ops)
metafunc.parametrize("signed", [True, False])
if 'const_value' in metafunc.fixturenames:
metafunc.parametrize("const_value", range(16))
if 'signed' in metafunc.fixturenames:
metafunc.parametrize("signed", [True, False])
if 'strategy' in metafunc.fixturenames:
if metafunc.config.option.longrun:
metafunc.parametrize("strategy", ["complete", "random"])
else:
metafunc.parametrize("strategy", ["random"])
if 'flag_sel' in metafunc.fixturenames:
metafunc.parametrize("flag_sel", range(0, 16))
if 'lut_code' in metafunc.fixturenames:
metafunc.parametrize("lut_code", range(0, 16))
if 'random_op' in metafunc.fixturenames:
metafunc.parametrize("random_op", [choice(ops)])
if 'input_modes' in metafunc.fixturenames:
input_modes = itertools.product(*(range(0, 4) for _ in range(5)))
metafunc.parametrize("input_modes", input_modes)
if 'irq_en_0' in metafunc.fixturenames:
metafunc.parametrize("irq_en_0", [True, False])
if 'irq_en_1' in metafunc.fixturenames:
metafunc.parametrize("irq_en_1", [True, False])
if 'debug_trig' in metafunc.fixturenames:
metafunc.parametrize("debug_trig", [randint(0, (1 << 16) - 1)])
if 'debug_trig_p' in metafunc.fixturenames:
metafunc.parametrize("debug_trig_p", [randint(0, 1)])
def get_iter(strategy, signed):
if strategy is "complete":
width = 4
N = 1 << width
return itertools.product(
range(0, N) if not signed else range(- N // 2, N // 2), # data0
range(0, N) if not signed else range(- N // 2, N // 2), # data1
range(0, 2), # bit0
range(0, 2), # bit1
range(0, 2) # bit2
)
elif strategy is "random":
n = 16
width = 16
N = 1 << width
return [
(randint(0, N - 1) if not signed else
randint(- N // 2, N // 2 - 1), # data0
randint(0, N - 1) if not signed else
randint(- N // 2, N // 2 - 1), # data1
randint(0, 1), # bit0
randint(0, 1), # bit1
randint(0, 1)) # bit2
for _ in range(n)
]
elif strategy is "lut_complete":
return itertools.product(
range(0, 2) if not signed else range(-1, 2), # data0
range(0, 2) if not signed else range(-1, 2), # data1
range(0, 2), # bit0
range(0, 2), # bit1
range(0, 2) # bit2
)
raise NotImplementedError(strategy)
def run_test(tester, functional_model, strategy, signed, lut_code,
cfg_d, debug_trig=0, debug_trig_p=0, with_clk=False):
tester.clear()
pe_core = tester.circuit
tester.poke(pe_core.clk_en, 1)
tester.reset()
_iter = get_iter(strategy, signed)
tester.configure(lut_code, cfg_d, debug_trig, debug_trig_p)
print("---FLAG---")
print(functional_model.flag_sel)
print("---")
for data0, data1, bit0, bit1, bit2 in _iter:
tester.poke(pe_core.data0, data0)
tester.poke(pe_core.data1, data1)
tester.poke(pe_core.bit0, bit0)
tester.poke(pe_core.bit1, bit1)
tester.poke(pe_core.bit2, bit2)
print(data0);
print(data1);
print(bit0)
print(bit1)
print(bit2)
print("---")
if not with_clk:
tester.eval()
res, res_p, irq = functional_model(data0=data0, data1=data1,
bit0=bit0, bit1=bit1, bit2=bit2)
# print("-res_p-")
tester.expect(pe_core.res, res)
tester.expect(pe_core.res_p, res_p)
tester.expect(pe_core.irq, irq)
tester.eval()
# print(res_p)
# print("---")
else:
for i in range(2):
tester.step()
res, res_p, irq = functional_model(data0=data0, data1=data1,
bit0=bit0, bit1=bit1,
bit2=bit2, clk=i, clk_en=1)
tester.expect(pe_core.res, res)
tester.expect(pe_core.res_p, res_p)
# print(res_p)
# print("---")
tester.expect(pe_core.irq, irq)
tester.run(target='verilator')
@pytest.mark.parametrize('random_flag', [randint(0, 15)])
@pytest.mark.parametrize('random_signed', [randint(0, 1)])
@pytest.mark.parametrize('random_lut_code', [randint(0, 1)])
@pytest.mark.parametrize('random_irq_en', [(randint(0, 1), randint(0, 1))])
@pytest.mark.parametrize('random_debug_trig', [(randint(0, (1 << 16) - 1),
randint(0, 1))])
def test_op_random_quick(op, random_flag, random_signed, tester,
random_lut_code, random_irq_en, random_debug_trig):
irq_en_0, irq_en_1 = random_irq_en
debug_trig, debug_trig_p = random_debug_trig
signed = random_signed
flag_sel = random_flag
lut_code = random_lut_code
if flag_sel in [0x4, 0x5, 0x6, 0x7, 0xA, 0xB, 0xC, 0xD] and not signed:
# Flag modes with N, V are signed only
signed = True
if op == "abs":
# abs only defined in signed mode
signed = True
args = [signed] if op in signed_ops else []
functional_model = getattr(pe, op)(*args).flag(flag_sel)\
.lut(lut_code)\
.irq_en(irq_en_0, irq_en_1)\
.debug_trig(debug_trig)\
.debug_trig_p(debug_trig_p)\
.signed(signed)
cfg_d = functional_model.instruction
run_test(tester, functional_model, "random", signed, lut_code, cfg_d,
debug_trig, debug_trig_p)
def test_op(strategy, op, flag_sel, signed, tester):
if flag_sel == 0xE:
return # Skip lut, tested separately
if flag_sel in [0x4, 0x5, 0x6, 0x7, 0xA, 0xB, 0xC, 0xD] and not signed:
# Flag modes with N, V are signed only
return
if op == "abs" and not signed:
return # abs only defined in signed mode
lut_code = 0x00
args = [signed] if op in signed_ops else []
functional_model = getattr(pe, op)(*args).flag(flag_sel)\
.lut(lut_code)\
.signed(signed)
cfg_d = functional_model.instruction
run_test(tester, functional_model, strategy, signed, lut_code, cfg_d)
@pytest.mark.longrun
def test_input_modes(signed, input_modes, tester):
op = "add"
lut_code = randint(0, 15)
flag_sel = randint(0, 15)
if not signed:
# Skip flags involving V for unsigned mode
while flag_sel in [0x6, 0x7, 0xA, 0xB, 0xC, 0xD]:
flag_sel = randint(0, 15)
print(f"flag_sel={flag_sel}")
data0_mode, data1_mode, bit0_mode, bit1_mode, bit2_mode = input_modes
irq_en = 0
acc_en = 0
functional_model = getattr(pe, op)().flag(flag_sel)\
.lut(lut_code)\
.signed(signed)
for reg, mode in zip(
(functional_model.rega, functional_model.regb, functional_model.regd,
functional_model.rege, functional_model.regf),
input_modes
):
reg(mode)
cfg_d = functional_model.instruction
run_test(tester, functional_model, "random", signed, lut_code, cfg_d,
with_clk=True)
def test_lut(signed, lut_code, tester): # , random_op):
# op = random_op
# op = choice(ops)
op = "add"
flag_sel = 0xE # Lut output
bit2_mode = 0x2 # BYPASS
bit1_mode = 0x2 # BYPASS
bit0_mode = 0x2 # BYPASS
data1_mode = 0x2 # BYPASS
data0_mode = 0x2 # BYPASS
irq_en = 0
acc_en = 0
functional_model = getattr(pe, op)().flag(flag_sel)\
.lut(lut_code)\
.signed(signed)
cfg_d = functional_model.instruction
run_test(tester, functional_model, "lut_complete", signed, lut_code,
cfg_d)
def test_irq(strategy, irq_en_0, irq_en_1, debug_trig, debug_trig_p, signed,
tester):
op = "add"
flag_sel = 0x0 # Z
lut_code = 0x0
acc_en = 0
functional_model = getattr(pe, op)().flag(flag_sel)\
.lut(lut_code)\
.irq_en(irq_en_0, irq_en_1)\
.debug_trig(debug_trig)\
.debug_trig_p(debug_trig_p)\
.signed(signed)
cfg_d = functional_model.instruction
run_test(tester, functional_model, strategy, signed, lut_code, cfg_d,
debug_trig, debug_trig_p)
|
<filename>examples/dry_bf_bubble.py
from gusto import *
from firedrake import (IntervalMesh, ExtrudedMesh,
SpatialCoordinate, conditional, cos, pi, sqrt,
TestFunction, dx, TrialFunction, Constant, Function,
LinearVariationalProblem, LinearVariationalSolver, DirichletBC,
FunctionSpace, BrokenElement, VectorFunctionSpace)
from firedrake.slope_limiter.vertex_based_limiter import VertexBasedLimiter
import sys
dt = 1.0
if '--running-tests' in sys.argv:
tmax = 10.
deltax = 1000.
else:
deltax = 100.
tmax = 1000.
if '--recovered' in sys.argv:
recovered = True
else:
recovered = False
if '--limit' in sys.argv:
limit = True
else:
limit = False
# make mesh
L = 10000.
H = 10000.
nlayers = int(H/deltax)
ncolumns = int(L/deltax)
m = IntervalMesh(ncolumns, L)
mesh = ExtrudedMesh(m, layers=nlayers, layer_height=H/nlayers)
# options
diffusion = False
degree = 0 if recovered else 1
fieldlist = ['u', 'rho', 'theta']
timestepping = TimesteppingParameters(dt=dt, maxk=4, maxi=1)
dirname = 'dry_bf_bubble'
if recovered:
dirname += '_recovered'
if limit:
dirname += '_limit'
output = OutputParameters(dirname=dirname,
dumpfreq=20,
dumplist=['u'],
perturbation_fields=['theta'],
log_level='INFO')
params = CompressibleParameters()
diagnostics = Diagnostics(*fieldlist)
diagnostic_fields = []
state = State(mesh, vertical_degree=degree, horizontal_degree=degree,
family="CG",
timestepping=timestepping,
output=output,
parameters=params,
diagnostics=diagnostics,
fieldlist=fieldlist,
diagnostic_fields=diagnostic_fields,
u_bc_ids=[1, 2])
# Initial conditions
u0 = state.fields("u")
rho0 = state.fields("rho")
theta0 = state.fields("theta")
# spaces
Vu = u0.function_space()
Vt = theta0.function_space()
Vr = rho0.function_space()
x, z = SpatialCoordinate(mesh)
# Define constant theta_e and water_t
Tsurf = 300.0
theta_b = Function(Vt).interpolate(Constant(Tsurf))
# Calculate hydrostatic fields
compressible_hydrostatic_balance(state, theta_b, rho0, solve_for_rho=True)
# make mean fields
rho_b = Function(Vr).assign(rho0)
# define perturbation
xc = L / 2
zc = 2000.
rc = 2000.
Tdash = 2.0
r = sqrt((x - xc) ** 2 + (z - zc) ** 2)
theta_pert = Function(Vt).interpolate(conditional(r > rc,
0.0,
Tdash * (cos(pi * r / (2.0 * rc))) ** 2))
# define initial theta
theta0.assign(theta_b * (theta_pert / 300.0 + 1.0))
# find perturbed rho
gamma = TestFunction(Vr)
rho_trial = TrialFunction(Vr)
lhs = gamma * rho_trial * dx
rhs = gamma * (rho_b * theta_b / theta0) * dx
rho_problem = LinearVariationalProblem(lhs, rhs, rho0)
rho_solver = LinearVariationalSolver(rho_problem)
rho_solver.solve()
# initialise fields
state.initialise([('u', u0),
('rho', rho0),
('theta', theta0)])
state.set_reference_profiles([('rho', rho_b),
('theta', theta_b)])
# Set up advection schemes
if recovered:
VDG1 = state.spaces("DG1")
VCG1 = FunctionSpace(mesh, "CG", 1)
Vt_brok = FunctionSpace(mesh, BrokenElement(Vt.ufl_element()))
Vu_DG1 = VectorFunctionSpace(mesh, VDG1.ufl_element())
Vu_CG1 = VectorFunctionSpace(mesh, "CG", 1)
Vu_brok = FunctionSpace(mesh, BrokenElement(Vu.ufl_element()))
u_opts = RecoveredOptions(embedding_space=Vu_DG1,
recovered_space=Vu_CG1,
broken_space=Vu_brok,
boundary_method=Boundary_Method.dynamics)
rho_opts = RecoveredOptions(embedding_space=VDG1,
recovered_space=VCG1,
broken_space=Vr,
boundary_method=Boundary_Method.dynamics)
theta_opts = RecoveredOptions(embedding_space=VDG1,
recovered_space=VCG1,
broken_space=Vt_brok)
ueqn = EmbeddedDGAdvection(state, Vu, equation_form="advective", options=u_opts)
rhoeqn = EmbeddedDGAdvection(state, Vr, equation_form="continuity", options=rho_opts)
thetaeqn = EmbeddedDGAdvection(state, Vt, equation_form="advective", options=theta_opts)
else:
ueqn = EulerPoincare(state, Vu)
rhoeqn = AdvectionEquation(state, Vr, equation_form="continuity")
thetaeqn = EmbeddedDGAdvection(state, Vt, equation_form="advective", options=EmbeddedDGOptions())
# set up limiter
if limit:
if recovered:
limiter = VertexBasedLimiter(VDG1)
else:
limiter = ThetaLimiter(Vt)
else:
limiter = None
advected_fields = [('rho', SSPRK3(state, rho0, rhoeqn)),
('theta', SSPRK3(state, theta0, thetaeqn, limiter=limiter))]
if recovered:
advected_fields.append(('u', SSPRK3(state, u0, ueqn)))
else:
advected_fields.append(('u', ThetaMethod(state, u0, ueqn)))
# Set up linear solver
linear_solver = CompressibleSolver(state)
# Set up forcing
if recovered:
compressible_forcing = CompressibleForcing(state, euler_poincare=False)
else:
compressible_forcing = CompressibleForcing(state)
# diffusion
bcs = [DirichletBC(Vu, 0.0, "bottom"),
DirichletBC(Vu, 0.0, "top")]
diffused_fields = []
if diffusion:
diffused_fields.append(('u', InteriorPenalty(state, Vu, kappa=Constant(60.),
mu=Constant(10./deltax), bcs=bcs)))
# build time stepper
stepper = CrankNicolson(state, advected_fields, linear_solver,
compressible_forcing,
diffused_fields=diffused_fields)
stepper.run(t=0, tmax=tmax)
|
<reponame>alfredoosauce/quant-trading
# coding: utf-8
# In[1]:
# i call it oil money
# cuz its a statistical arbitrage on crude benchmark and petrocurrency
# the inspiration came from an article i read
# it suggested to trade on petrocurrency when the oil price went uprising
# plus overall volatility for forex market was low
# the first thing is to build up a model to explore the causality
# we split the historical datasets into two parts
# one for the model estimation, the other for the model validation
# we do a regression on estimation horizon
# we use linear regression to make a prediction on ideal price level
# we set up thresholds based on the standard deviation of residual
# take one deviation above as the upper threshold
# if the currency price breaches the upper threshold
# take a short position as it is assumed to revert to its 'normal' price range soon
# vice versa
# so its kinda like bollinger bands
# however, our regression is based on statistics
# we still need to consider fundamental influence
# what if the market condition has changed
# in that case our model wont work any more
# well,all models lose their creditability over the time
# denote the price deviating two sigmas away from predicted value as model failure
# which we should revert our positions
# e.g. the price is two sigmas above our predicted value
# we change our short to long since the market has changed its sentiment
# there is probably hidden information in the uprising price
# lets follow the trend and see where it ends
# this idea sounds very silly
# nobody actually does it or not that i know of
# i just wanna to see if the idea would work
# perhaps the idea would bring a huge loss
# nonetheless, it turns out to be a big surprise!
# first, we choose our currency norwegian krone
# norway is one of the largest oil producing countries with floating fx regime
# other oil producing countries such as saudi, iran, venezuela have their fx pegged to usd
# russia is supposed to be a good training set
# nevertheless, russia gets sanctioned by uncle sam a lot
# we would see this in the next script
# https://github.com/je-suis-tm/quant-trading/blob/master/Oil%20Money%20project/Oil%20Money%20RUB.py
# after targetting at norwegian krone, we have to choose a currency to evaluate nok
# take a look at norway's biggest trading partners
# we should include us dollar, euro and uk sterling as well as brent crude price in our model
# in addition, the base currency would be japanese yen
# cuz its not a big trading partner with norway
# which implies it doesnt have much correlation with nok
# preparation is done, lets get started!
import matplotlib.pyplot as plt
import statsmodels.api as sm
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn.linear_model import ElasticNetCV as en
from statsmodels.tsa.stattools import adfuller as adf
import os
os.chdir("/Users/15128/quant-trading/Oil Money project/data")
# In[2]:
df = pd.read_csv('brent crude nokjpy.csv')
df.set_index(pd.to_datetime(df[list(df.columns)[0]]), inplace=True)
del df[list(df.columns)[0]]
# In[3]:
# identification
# first of first, using scatter plot to visualize the correlation
# lets denote data from 2013-4-25 to 2017-4-25 as estimation horizon/training set
# lets denote data from 2017-4-25 to 2018-4-25 as validation horizon/testing set
ax = plt.figure(figsize=(10, 5)).add_subplot(111)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.scatter(df['brent'][df.index < '2017-04-25'], df['nok'][df.index < '2017-04-25'], s=1, c='#5f0f4e')
plt.title('NOK Brent Correlation')
plt.xlabel('Brent in JPY')
plt.ylabel('NOKJPY')
plt.show()
# if we run a covariance matrix on nok and brent, we got
np.corrcoef(df['nok'],df['brent'])
# array([[1. , 0.89681228],[0.89681228, 1. ]])
# dual axis plot
def dual_axis_plot(xaxis, data1, data2, fst_color='r',
sec_color='b', fig_size=(10, 5),
x_label='', y_label1='', y_label2='',
legend1='', legend2='', grid=False, title=''):
fig = plt.figure(figsize=fig_size)
ax = fig.add_subplot(111)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label1, color=fst_color)
ax.plot(xaxis, data1, color=fst_color, label=legend1)
ax.tick_params(axis='y', labelcolor=fst_color)
ax.yaxis.labelpad = 15
plt.legend(loc=3)
ax2 = ax.twinx()
ax2.set_ylabel(y_label2, color=sec_color, rotation=270)
ax2.plot(xaxis, data2, color=sec_color, label=legend2)
ax2.tick_params(axis='y', labelcolor=sec_color)
ax2.yaxis.labelpad = 15
fig.tight_layout()
plt.legend(loc=4)
plt.grid(grid)
plt.title(title)
plt.show()
# nok vs ir
dual_axis_plot(df.index, df['nok'], df['interest rate'],
fst_color='#34262b', sec_color='#cb2800',
fig_size=(10, 5), x_label='Date',
y_label1='NOKJPY', y_label2='Norges Bank Interest Rate%',
legend1='NOKJPY', legend2='Interest Rate',
grid=False, title='NOK vs Interest Rate')
# nok vs brent
dual_axis_plot(df.index, df['nok'], df['brent'],
fst_color='#4f2d20', sec_color='#3feee6',
fig_size=(10, 5), x_label='Date',
y_label1='NOKJPY', y_label2='Brent in JPY',
legend1='NOKJPY', legend2='Brent',
grid=False, title='NOK vs Brent')
# nok vs gdp
# cuz gdp is released quarterly
# we need to convert nok into quarterly data as well
ind = df['gdp yoy'].dropna().index
dual_axis_plot(df.loc[ind].index,
df['nok'].loc[ind],
df['gdp yoy'].dropna(),
fst_color='#116466', sec_color='#ff652f',
fig_size=(10, 5), x_label='Date',
y_label1='NOKJPY', y_label2='Norway GDP YoY %',
legend1='NOKJPY', legend2='GDP',
grid=False, title='NOK vs GDP')
# Now we do our linear regression
x0 = pd.concat([df['usd'], df['gbp'], df['eur'], df['brent']], axis=1)
x1 = sm.add_constant(x0)
x = x1[x1.index < '2017-04-25']
y = df['nok'][df.index < '2017-04-25']
model = sm.OLS(y, x).fit()
print(model.summary(), '\n')
# In[4]:
# from the summary u can tell there is multicollinearity
# the condition number is skyrocketing
# alternatively, i can use elastic net regression to achieve the convergence
# check the link below for more details
# https://github.com/je-suis-tm/machine-learning/blob/master/coordinate%20descent%20for%20elastic%20net.ipynb
m = en(alphas=[0.0001, 0.0005, 0.001, 0.01, 0.1, 1, 10],
l1_ratio=[.01, .1, .5, .9, .99], max_iter=5000).fit(x0[x0.index < '2017-04-25'], y)
print(m.intercept_, m.coef_)
# elastic net estimation results:
# 3.79776228406 [ 0.00388958 0.01992038 0.02823187 0.00050092]
# In[5]:
# calculate the fitted value of nok
df['sk_fit'] = (df['usd'] * m.coef_[0] + df['gbp'] * m.coef_[1] +
df['eur'] * m.coef_[2] + df['brent'] * m.coef_[3] + m.intercept_)
# In[6]:
# getting the residual
df['sk_residual'] = df['nok'] - df['sk_fit']
# one can always argue what if we eliminate some regressors
# in econometrics, if adding extra variables do not decrease adjusted r squared
# or worsen AIC, BIC
# we should include more information as long as it makes sense
# In[7]:
# lets generate signals based on the elastic net
# we set one sigma of the residual as thresholds
# two sigmas of the residual as stop orders
# which is common practise in statistics
upper = np.std(df['sk_residual'][df.index < '2017-04-25'])
lower = -upper
signals = pd.concat([df[i] for i in ['nok', 'usd', 'eur', 'gbp', 'brent', 'sk_fit', 'sk_residual']],
axis=1)[df.index >= '2017-04-25']
signals['fitted'] = signals['sk_fit']
del signals['sk_fit']
signals['upper'] = signals['fitted'] + upper
signals['lower'] = signals['fitted'] + lower
signals['stop profit'] = signals['fitted'] + 2 * upper
signals['stop loss'] = signals['fitted'] + 2 * lower
signals['signals'] = 0
# In[8]:
# while doing a traversal
# we apply the rules mentioned before
# if actual price goes beyond upper threshold
# we take a short and bet on its reversion process
# vice versa
# we use cumsum to make sure our signals only get generated
# for the first time condions are met
# when actual price hits the stop order boundary
# we revert our positions
# u may wonder whats next for breaking the boundary
# well, we stop the signal generation algorithm
# we need to recalibrate our model or use other trend following strategies
index = list(signals.columns).index('signals')
for j in range(len(signals)):
if signals['nok'].iloc[j] > signals['upper'].iloc[j]:
signals.iloc[j, index] = -1
if signals['nok'].iloc[j] < signals['lower'].iloc[j]:
signals.iloc[j, index] = 1
signals['cumsum'] = signals['signals'].cumsum()
if signals['cumsum'].iloc[j] > 1 or signals['cumsum'].iloc[j] < -1:
signals.iloc[j, index] = 0
if signals['nok'].iloc[j] > signals['stop profit'].iloc[j]:
signals['cumsum'] = signals['signals'].cumsum()
signals.iloc[j, index] = -signals['cumsum'].iloc[j] + 1
signals['cumsum'] = signals['signals'].cumsum()
break
if signals['nok'].iloc[j] < signals['stop loss'].iloc[j]:
signals['cumsum'] = signals['signals'].cumsum()
signals.iloc[j, index] = -signals['cumsum'].iloc[j] - 1
signals['cumsum'] = signals['signals'].cumsum()
break
# In[9]:
# next, we plot the usual positions as the first figure
ax = plt.figure(figsize=(10, 5)).add_subplot(111)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
signals['nok'].plot(label='NOKJPY', c='#594f4f', alpha=0.5)
ax.plot(signals.loc[signals['signals'] > 0].index,
signals['nok'][signals['signals'] > 0],
lw=0, marker='^', c='#83af9b', label='LONG', markersize=10)
ax.plot(signals.loc[signals['signals'] < 0].index,
signals['nok'][signals['signals'] < 0],
lw=0, marker='v', c='#fe4365', label='SHORT', markersize=10)
ax.plot(pd.to_datetime('2017-12-20'),
signals['nok'].loc['2017-12-20'],
lw=0, marker='*', c='#f9d423', markersize=15, alpha=0.8,
label='Potential Exit Point of Momentum Trading')
plt.axvline('2017/11/15', linestyle=':', c='k', label='Exit')
plt.legend()
plt.title('NOKJPY Positions')
plt.ylabel('NOKJPY')
plt.xlabel('Date')
plt.show()
# In[10]:
# the second figure explores thresholds and boundaries for signal generation
# we can see after 2017/11/15, nokjpy price went skyrocketing
# as a data scientist, we must ask why?
# is it a problem of our model identification
# or the fundamental situation of nokjpy or oil changed
ax = plt.figure(figsize=(10, 5)).add_subplot(111)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
signals['fitted'].plot(lw=2.5, label='Fitted', c='w', alpha=0.6)
signals['nok'].plot(lw=2, label='Actual', c='#04060f', alpha=0.8)
ax.fill_between(signals.index, signals['upper'],
signals['lower'], alpha=0.2, label='1 Sigma', color='#2a3457')
ax.fill_between(signals.index, signals['stop profit'],
signals['stop loss'], alpha=0.1, label='2 Sigma', color='#720017')
plt.legend(loc='best')
plt.title('Fitted vs Actual')
plt.ylabel('NOKJPY')
plt.xlabel('Date')
plt.show()
# In[11]:
# if we decompose nokjpy into long term trend and short term random process
# we could clearly see that brent crude price has dominated short term random process
# so what changed the long term trend?
# there are a few possible reasons
# saudi and iran endorsed an extension of production caps on that particular date
# donald trump got elected as potus so he would encourage a depreciated us dollar
# which ultimately pushed up the oil price
# In[12]:
# lets normalize all prices by 100
# its easy to see that nok follows euro
# and economics explanation would be norway is in eea
# its economy heavily relies on eu
ax = plt.figure(figsize=(10, 5)).add_subplot(111)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
(df['nok'] / df['nok'][0] * 100).plot(c='#ff8c94', label='Norwegian Krone', alpha=0.9)
(df['usd'] / df['usd'][0] * 100).plot(c='#9de0ad', label='US Dollar', alpha=0.9)
(df['eur'] / df['eur'][0] * 100).plot(c='#45ada8', label='Euro', alpha=0.9)
(df['gbp'] / df['gbp'][0] * 100).plot(c='#f8b195', label='UK Sterling', alpha=0.9)
(df['brent'] / df['brent'][0] * 100).plot(c='#6c5b7c', label='<NAME>', alpha=0.5)
plt.legend(loc='best')
plt.ylabel('Normalized Price by 100')
plt.xlabel('Date')
plt.title('Trend')
plt.show()
# In[13]:
# that still doesnt sound convincable
# lets try cointegration test
# academically we should use johansen test which works on multi dimensions
# unfortunately, there is no johansen test in statsmodels (at the time i wrote this script)
# well, here we go again
# we have to use Engle-Granger two step!
# salute to Engle, mentor of my mentor Gallo
# to the nobel prize winner
# im not gonna explain much here
# if u have checked my other codes, u sould know
# details are in pair trading session
# https://github.com/je-suis-tm/quant-trading/blob/master/Pair%20trading%20backtest.py
x2 = df['eur'][df.index < '2017-04-25']
x3 = sm.add_constant(x2)
model = sm.OLS(y, x3).fit()
ero = model.resid
print(adf(ero))
print(model.summary())
# (-2.5593457642922992, 0.10169409761939013, 0, 1030,
# {'1%': -3.4367147300588341, '5%': -2.8643501440982058, '10%': -2.5682662399849185}, -1904.8360920752475)
# 0.731199409071
# unfortunately, the residual hasnt even reached 90% confidence interval
# we cant conclude any cointegration from the test
# still, from the visualization
# we can tell nok and eur are somewhat correlated
# our rsquared suggested euro has the power of 73% explanation on nok
# In[14]:
# then lets do a pnl analysis
capital0 = 2000
positions = 100
portfolio = pd.DataFrame(index=signals.index)
portfolio['holding'] = signals['nok'] * signals['cumsum'] * positions
portfolio['cash'] = capital0 - (signals['nok'] * signals['signals'] * positions).cumsum()
portfolio['total asset'] = portfolio['holding'] + portfolio['cash']
portfolio['signals'] = signals['signals']
# In[15]:
portfolio = portfolio[portfolio.index > '2017-10-01']
portfolio = portfolio[portfolio.index < '2018-01-01']
# In[16]:
# we plot how our asset value changes over time
ax = plt.figure(figsize=(10, 5)).add_subplot(111)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
portfolio['total asset'].plot(c='#594f4f', alpha=0.5, label='Total Asset')
ax.plot(portfolio.loc[portfolio['signals'] > 0].index, portfolio['total asset'][portfolio['signals'] > 0],
lw=0, marker='^', c='#2a3457', label='LONG', markersize=10, alpha=0.5)
ax.plot(portfolio.loc[portfolio['signals'] < 0].index, portfolio['total asset'][portfolio['signals'] < 0],
lw=0, marker='v', c='#720017', label='The Big Short', markersize=15, alpha=0.5)
ax.fill_between(portfolio['2017-11-20':'2017-12-20'].index,
(portfolio['total asset'] + np.std(portfolio['total asset']))['2017-11-20':'2017-12-20'],
(portfolio['total asset'] - np.std(portfolio['total asset']))['2017-11-20':'2017-12-20'],
alpha=0.2, color='#547980')
plt.text(pd.to_datetime('2017-12-20'),
(portfolio['total asset'] + np.std(portfolio['total asset'])).loc['2017-12-20'],
'What if we use MACD here?')
plt.axvline('2017/11/15', linestyle=':', label='Exit', c='#ff847c')
plt.legend()
plt.title('Portfolio Performance')
plt.ylabel('Asset Value')
plt.xlabel('Date')
plt.show()
# surprising when our model is valid for prediction
# its difficult to make money from thresholds oscillating
# when actual price goes beyond stop order boundary
# that is basically the most profitable trade ever
# best to follow up with a momentum strategy
# maybe this is not a statistical arbitrage after all
# the model is a trend following entry indicator
# In[17]:
# now lets construct a trend following strategy based on the previous strategy
# call it oil money version 2 or whatever
# here i would only import the strategy script as this is a script for analytics and visualization
# the official trading strategy script is in the following link
# https://github.com/je-suis-tm/quant-trading/blob/master/Oil%20Money%20project/Oil%20Money%20Trading%20backtest.py
import oil_money_trading_backtest as om
# generate signals,monitor portfolio performance
# plot positions and total asset
signals = om.signal_generation(dataset, 'brent', 'nok', om.oil_money)
p = om.portfolio(signals, 'nok')
om.plot(signals, 'nok')
om.profit(p, 'nok')
# but thats not enough, we are not happy with the return
# come on, 2 percent return?
# i may as well as deposit the money into the current account
# and get 0.75% risk free interest rate
# therefore, we gotta try different holding period and stop loss/profit point
# the double loop is very slow, i almost wanna do it in julia
# plz go get a coffee or even lunch and dont wait for it
dic = {}
for holdingt in range(5, 20):
for stopp in np.arange(0.3, 1.1, 0.05):
signals = om.signal_generation(dataset, 'brent', 'nok', om.oil_money, \
holding_threshold=holdingt, \
stop=stopp)
p = om.portfolio(signals, 'nok')
dic[holdingt, stopp] = p['asset'].iloc[-1] / p['asset'].iloc[0] - 1
profile = pd.DataFrame({'params': list(dic.keys()), 'return': list(dic.values())})
# In[18]:
# plotting the distribution of return
# in average the return is 2%
# but we can get -6% and 6% as extreme values
# we dont give a crap about average
# we want the largest positive return
ax = plt.figure(figsize=(10, 5)).add_subplot(111)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
profile['return'].apply(lambda x: x * 100).hist(histtype='bar', \
color='#f09e8c', \
width=0.45, bins=20)
plt.title('Distribution of Return on NOK Trading')
plt.grid(False)
plt.ylabel('Frequency')
plt.xlabel('Return (%)')
plt.show()
# In[19]:
# plotting the heatmap of return under different parameters
# try to find the optimal parameters to maximize the return
# turn the dataframe into a matrix format first
matrix = pd.DataFrame(columns= \
[round(i, 2) for i in np.arange(0.3, 1.1, 0.05)])
matrix['index'] = np.arange(5, 20)
matrix.set_index('index', inplace=True)
for i, j in profile['params']:
matrix.at[i, round(j, 2)] = \
profile['return'][profile['params'] == (i, j)].item() * 100
for i in matrix.columns:
matrix[i] = matrix[i].apply(float)
# plotting
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111)
sns.heatmap(matrix, cmap='gist_heat_r', square=True, \
xticklabels=3, yticklabels=3)
ax.collections[0].colorbar.set_label('Return(%) \n', \
rotation=270)
plt.xlabel('\nStop Loss/Profit (points)')
plt.ylabel('Position Holding Period (days)\n')
plt.title('Profit Heatmap\n', fontsize=10)
plt.style.use('default')
# it seems like the return doesnt depend on the stop profit/loss point
# it is correlated with the length of holding period
# the ideal one should be 9 trading days
# as for stop loss/profit point could range from 0.6 to 1.05
|
# This file is part of the History Store (histore).
#
# Copyright (C) 2018-2021 New York University.
#
# The History Store (histore) is released under the Revised BSD License. See
# file LICENSE for full license details.
"""unit tests for snapshot descriptors and snapshot descriptor listings."""
import pytest
from histore.archive.snapshot import Snapshot, SnapshotListing
from histore.document.base import InputDescriptor
import histore.util as util
def test_append_snapshots():
"""Test appending new snapshot descriptors to a snapshot listing."""
snapshots = SnapshotListing()
assert snapshots.is_empty()
assert snapshots.last_snapshot() is None
snapshots = snapshots.append(snapshots.next_version())
assert not snapshots.is_empty()
assert snapshots.last_snapshot() is not None
s = snapshots.last_snapshot()
assert s.version == 0
assert str(s).startswith('<Snapshot')
snapshots = snapshots.append(
version=snapshots.next_version(),
descriptor=InputDescriptor(
description='some text',
action={'command': 'X'}
)
)
s = snapshots.last_snapshot()
assert s.version == 1
assert s.description == 'some text'
assert s.action == {'command': 'X'}
assert len(snapshots) == 2
assert snapshots.has_version(0)
assert snapshots.has_version(1)
assert not snapshots.has_version(2)
def test_create_snapshot_descriptor():
"""Test creating instances of the snapshot descriptor class."""
s = Snapshot(version=0, valid_time=util.to_datetime('2020-05-01'))
assert s.version == 0
assert s.valid_time == util.to_datetime('2020-05-01')
assert s.transaction_time is not None
assert s.transaction_time == s.created_at
assert s.description == ''
assert s.action is None
s = Snapshot(
version=0,
valid_time=util.to_datetime('2020-05-01'),
transaction_time=util.to_datetime('2020-04-01'),
description='some text'
)
assert s.valid_time == util.to_datetime('2020-05-01')
assert s.transaction_time == util.to_datetime('2020-04-01')
assert s.description == 'some text'
def test_snapshot_listing():
"""Test creating a listing of snapshot descriptors."""
s1 = Snapshot(version=0, valid_time=util.to_datetime('2020-05-01'))
s2 = Snapshot(version=1, valid_time=util.to_datetime('2020-05-02'))
s3 = Snapshot(version=2, valid_time=util.to_datetime('2020-05-03'))
listing = SnapshotListing(snapshots=[s1, s2, s3])
# Get snapshots by identifier.
for version in range(2):
assert listing[version].version == version
assert listing.get(version).version == version
versions = list()
for s in listing:
versions.append(s.version)
assert versions == [0, 1, 2]
s = listing.at_time(util.to_datetime('2020-04-01'))
assert s is None
s = listing.at_time(util.to_datetime('2020-05-01T08:00:00'))
assert s.version == 0
s = listing.at_time(util.to_datetime('2020-05-10'))
assert s.version == 2
# Error when accessing snapshot with unknown identifier.
with pytest.raises(KeyError):
listing[4]
# Error when adding snapshot with invalid version number.
with pytest.raises(ValueError):
listing.append(version=100)
# Empty listing returns None for any time
assert SnapshotListing().at_time(util.to_datetime('2020-04-01')) is None
# Error case for snapshots with invalid 'vaid_time' order
with pytest.raises(ValueError):
SnapshotListing(snapshots=[s1, s3, s2])
s1 = Snapshot(version=0, valid_time=util.to_datetime('2020-05-01'))
s2 = Snapshot(version=1, valid_time=util.to_datetime('2020-05-03'))
s3 = Snapshot(version=2, valid_time=util.to_datetime('2020-05-02'))
# Error case for snapshots with invalid 'vaid_time'
with pytest.raises(ValueError):
SnapshotListing(snapshots=[s1, s2, s3])
def test_snapshot_rollback():
"""Test rollback for a snapshot listing."""
snapshots = SnapshotListing()
snapshots = snapshots.append(snapshots.next_version())
snapshots = snapshots.append(snapshots.next_version())
snapshots = snapshots.append(snapshots.next_version())
assert len(snapshots) == 3
snapshots = snapshots.rollback(1)
assert len(snapshots) == 2
snapshots = snapshots.rollback(0)
assert len(snapshots) == 1
|
# All Rights Reserved.
# Copyright 2013 SolidFire Inc
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import json
import math
import re
import socket
import string
import time
import warnings
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import units
import requests
from requests.packages.urllib3 import exceptions
import six
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder.objects import fields
from cinder import utils
from cinder.volume import configuration
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume.targets import iscsi as iscsi_driver
from cinder.volume import volume_types
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
sf_opts = [
cfg.BoolOpt('sf_emulate_512',
default=True,
help='Set 512 byte emulation on volume creation; '),
cfg.BoolOpt('sf_allow_tenant_qos',
default=False,
help='Allow tenants to specify QOS on create'),
cfg.StrOpt('sf_account_prefix',
help='Create SolidFire accounts with this prefix. Any string '
'can be used here, but the string \"hostname\" is special '
'and will create a prefix using the cinder node hostname '
'(previous default behavior). The default is NO prefix.'),
cfg.StrOpt('sf_volume_prefix',
default='UUID-',
help='Create SolidFire volumes with this prefix. Volume names '
'are of the form <sf_volume_prefix><cinder-volume-id>. '
'The default is to use a prefix of \'UUID-\'.'),
cfg.StrOpt('sf_svip',
help='Overrides default cluster SVIP with the one specified. '
'This is required or deployments that have implemented '
'the use of VLANs for iSCSI networks in their cloud.'),
cfg.PortOpt('sf_api_port',
default=443,
help='SolidFire API port. Useful if the device api is behind '
'a proxy on a different port.'),
cfg.BoolOpt('sf_enable_vag',
default=False,
help='Utilize volume access groups on a per-tenant basis.'),
cfg.StrOpt('sf_provisioning_calc',
default='maxProvisionedSpace',
choices=['maxProvisionedSpace', 'usedSpace'],
help='Change how SolidFire reports used space and '
'provisioning calculations. If this parameter is set to '
'\'usedSpace\', the driver will report correct '
'values as expected by Cinder '
'thin provisioning.')]
CONF = cfg.CONF
CONF.register_opts(sf_opts, group=configuration.SHARED_CONF_GROUP)
# SolidFire API Error Constants
xExceededLimit = 'xExceededLimit'
xAlreadyInVolumeAccessGroup = 'xAlreadyInVolumeAccessGroup'
xVolumeAccessGroupIDDoesNotExist = 'xVolumeAccessGroupIDDoesNotExist'
xNotInVolumeAccessGroup = 'xNotInVolumeAccessGroup'
class DuplicateSfVolumeNames(exception.Duplicate):
message = _("Detected more than one volume with name %(vol_name)s")
class SolidFireAPIException(exception.VolumeBackendAPIException):
message = _("Bad response from SolidFire API")
class SolidFireDriverException(exception.VolumeDriverException):
message = _("SolidFire Cinder Driver exception")
class SolidFireAPIDataException(SolidFireAPIException):
message = _("Error in SolidFire API response: data=%(data)s")
class SolidFireAccountNotFound(SolidFireDriverException):
message = _("Unable to locate account %(account_name)s on "
"Solidfire device")
class SolidFireRetryableException(exception.VolumeBackendAPIException):
message = _("Retryable SolidFire Exception encountered")
class SolidFireReplicationPairingError(exception.VolumeBackendAPIException):
message = _("Error on SF Keys")
def retry(exc_tuple, tries=5, delay=1, backoff=2):
def retry_dec(f):
@six.wraps(f)
def func_retry(*args, **kwargs):
_tries, _delay = tries, delay
while _tries > 1:
try:
return f(*args, **kwargs)
except exc_tuple:
time.sleep(_delay)
_tries -= 1
_delay *= backoff
LOG.debug('Retrying %(args)s, %(tries)s attempts '
'remaining...',
{'args': args, 'tries': _tries})
# NOTE(jdg): Don't log the params passed here
# some cmds like createAccount will have sensitive
# info in the params, grab only the second tuple
# which should be the Method
msg = (_('Retry count exceeded for command: %s') %
(args[1],))
LOG.error(msg)
raise SolidFireAPIException(message=msg)
return func_retry
return retry_dec
def locked_image_id_operation(f, external=False):
def lvo_inner1(inst, *args, **kwargs):
lock_tag = inst.driver_prefix
call_args = inspect.getcallargs(f, inst, *args, **kwargs)
if call_args.get('image_meta'):
image_id = call_args['image_meta']['id']
else:
err_msg = _('The decorated method must accept image_meta.')
raise exception.VolumeBackendAPIException(data=err_msg)
@utils.synchronized('%s-%s' % (lock_tag, image_id),
external=external)
def lvo_inner2():
return f(inst, *args, **kwargs)
return lvo_inner2()
return lvo_inner1
def locked_source_id_operation(f, external=False):
def lvo_inner1(inst, *args, **kwargs):
lock_tag = inst.driver_prefix
call_args = inspect.getcallargs(f, inst, *args, **kwargs)
src_arg = call_args.get('source', None)
if src_arg and src_arg.get('id', None):
source_id = call_args['source']['id']
else:
err_msg = _('The decorated method must accept src_uuid.')
raise exception.VolumeBackendAPIException(message=err_msg)
@utils.synchronized('%s-%s' % (lock_tag, source_id),
external=external)
def lvo_inner2():
return f(inst, *args, **kwargs)
return lvo_inner2()
return lvo_inner1
@interface.volumedriver
class SolidFireDriver(san.SanISCSIDriver):
"""OpenStack driver to enable SolidFire cluster.
.. code-block:: default
Version history:
1.0 - Initial driver
1.1 - Refactor, clone support, qos by type and minor bug fixes
1.2 - Add xfr and retype support
1.2.1 - Add export/import support
1.2.2 - Catch VolumeNotFound on accept xfr
2.0.0 - Move from httplib to requests
2.0.1 - Implement SolidFire Snapshots
2.0.2 - Implement secondary account
2.0.3 - Implement cluster pairing
2.0.4 - Implement volume replication
2.0.5 - Try and deal with the stupid retry/clear issues from objects
and tflow
2.0.6 - Add a lock decorator around the clone_image method
2.0.7 - Add scaled IOPS
2.0.8 - Add active status filter to get volume ops
2.0.9 - Always purge on delete volume
2.0.10 - Add response to debug on retryable errors
2.0.11 - Add ability to failback replicating volumes
2.0.12 - Fix bug #1744005
2.0.14 - Fix bug #1782588 qos settings on extend
2.0.15 - Fix bug #1834013 NetApp SolidFire replication errors
2.0.16 - Add options for replication mode (Async, Sync and
SnapshotsOnly)
"""
VERSION = '2.0.16'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "NetApp_SolidFire_CI"
driver_prefix = 'solidfire'
sf_qos_dict = {'slow': {'minIOPS': 100,
'maxIOPS': 200,
'burstIOPS': 200},
'medium': {'minIOPS': 200,
'maxIOPS': 400,
'burstIOPS': 400},
'fast': {'minIOPS': 500,
'maxIOPS': 1000,
'burstIOPS': 1000},
'performant': {'minIOPS': 2000,
'maxIOPS': 4000,
'burstIOPS': 4000},
'off': None}
sf_qos_keys = ['minIOPS', 'maxIOPS', 'burstIOPS']
sf_scale_qos_keys = ['scaledIOPS', 'scaleMin', 'scaleMax', 'scaleBurst']
sf_iops_lim_min = {'minIOPS': 100, 'maxIOPS': 100, 'burstIOPS': 100}
sf_iops_lim_max = {'minIOPS': 15000,
'maxIOPS': 200000,
'burstIOPS': 200000}
cluster_stats = {}
retry_exc_tuple = (SolidFireRetryableException,
requests.exceptions.ConnectionError)
retryable_errors = ['xDBVersionMismatch',
'xMaxSnapshotsPerVolumeExceeded',
'xMaxClonesPerVolumeExceeded',
'xMaxSnapshotsPerNodeExceeded',
'xMaxClonesPerNodeExceeded',
'xSliceNotRegistered',
'xNotReadyForIO']
def __init__(self, *args, **kwargs):
super(SolidFireDriver, self).__init__(*args, **kwargs)
self.failed_over_id = kwargs.get('active_backend_id', None)
self.replication_status = kwargs.get('replication_status', "na")
self.configuration.append_config_values(sf_opts)
self.template_account_id = None
self.max_volumes_per_account = 1990
self.volume_map = {}
self.cluster_pairs = []
self.replication_enabled = False
self.failed_over = False
self.verify_ssl = self.configuration.driver_ssl_cert_verify
self.target_driver = SolidFireISCSI(solidfire_driver=self,
configuration=self.configuration)
self._check_replication_configs()
# If we're failed over, we need to parse things out and set the active
# cluster appropriately
if self.failed_over_id:
LOG.info("Running on failed-over mode. "
"Active backend-id: %s", self.failed_over_id)
repl_target = self.configuration.get('replication_device', [])
if not repl_target:
LOG.error('Failed to initialize SolidFire driver to '
'a remote cluster specified at id: %s',
self.failed_over_id)
raise SolidFireDriverException
remote_endpoint = self._build_repl_endpoint_info(
**repl_target[0])
self.active_cluster = self._create_cluster_reference(
remote_endpoint)
# When in failed-over state, we have only endpoint info from the
# primary cluster.
self.primary_cluster = {"endpoint": self._build_endpoint_info()}
self.failed_over = True
else:
self.primary_cluster = self._create_cluster_reference()
self.active_cluster = self.primary_cluster
if self.configuration.replication_device:
self._set_cluster_pairs()
LOG.debug("Active cluster: %s", self.active_cluster)
# NOTE(jdg): This works even in a failed over state, because what we
# do is use self.active_cluster in issue_api_request so by default we
# always use the currently active cluster, override that by providing
# an endpoint to issue_api_request if needed
try:
self._update_cluster_status()
except SolidFireAPIException:
pass
@classmethod
def get_driver_options(cls):
additional_opts = cls._get_oslo_driver_opts(
'san_ip', 'san_login', 'san_password', 'driver_ssl_cert_verify',
'replication_device', 'reserved_percentage',
'max_over_subscription_ratio')
return sf_opts + additional_opts
def _init_vendor_properties(self):
properties = {}
self._set_property(
properties,
"solidfire:replication_mode",
"Replication mode",
_("Specifies replication mode."),
"string",
enum=["Async", "Sync", "SnapshotsOnly"])
return properties, 'solidfire'
def __getattr__(self, attr):
if hasattr(self.target_driver, attr):
return getattr(self.target_driver, attr)
else:
msg = _('Attribute: %s not found.') % attr
raise NotImplementedError(msg)
def _get_remote_info_by_id(self, backend_id):
remote_info = None
for rd in self.configuration.get('replication_device', []):
if rd.get('backend_id', None) == backend_id:
remote_endpoint = self._build_endpoint_info(**rd)
remote_info = self._get_cluster_info(remote_endpoint)
remote_info['endpoint'] = remote_endpoint
if not remote_info['endpoint']['svip']:
remote_info['endpoint']['svip'] = (
remote_info['svip'] + ':3260')
return remote_info
def _create_remote_pairing(self, remote_device):
try:
pairing_info = self._issue_api_request('StartClusterPairing',
{}, version='8.0')['result']
pair_id = self._issue_api_request(
'CompleteClusterPairing',
{'clusterPairingKey': pairing_info['clusterPairingKey']},
version='8.0',
endpoint=remote_device['endpoint'])['result']['clusterPairID']
except SolidFireAPIException as ex:
if 'xPairingAlreadyExists' in ex.msg:
LOG.debug('Pairing already exists during init.')
else:
with excutils.save_and_reraise_exception():
LOG.error('Cluster pairing failed: %s', ex.msg)
LOG.debug('Initialized Cluster pair with ID: %s', pair_id)
remote_device['clusterPairID'] = pair_id
return pair_id
def _get_cluster_info(self, remote_endpoint):
try:
return self._issue_api_request(
'GetClusterInfo', {},
endpoint=remote_endpoint)['result']['clusterInfo']
except SolidFireAPIException:
msg = _("Replication device is unreachable!")
LOG.exception(msg)
raise
def _check_replication_configs(self):
repl_configs = self.configuration.replication_device
if not repl_configs:
return
# We only support one replication target. Checking if the user is
# trying to add more than one;
if len(repl_configs) > 1:
msg = _("SolidFire driver only supports one replication target "
"device.")
LOG.error(msg)
raise SolidFireDriverException(msg)
repl_configs = repl_configs[0]
# Check if the user is not using the same MVIP as source
# and replication target.
if repl_configs['mvip'] == self.configuration.san_ip:
msg = _("Source mvip cannot be the same "
"as the replication target.")
LOG.error(msg)
raise SolidFireDriverException(msg)
def _set_cluster_pairs(self):
repl_configs = self.configuration.replication_device[0]
existing_pairs = self._issue_api_request(
'ListClusterPairs',
{},
version='8.0')['result']['clusterPairs']
LOG.debug("Existing cluster pairs: %s", existing_pairs)
remote_pair = {}
remote_endpoint = self._build_repl_endpoint_info(**repl_configs)
remote_info = self._create_cluster_reference(remote_endpoint)
remote_info['backend_id'] = repl_configs['backend_id']
for ep in existing_pairs:
if repl_configs['mvip'] == ep['mvip']:
remote_pair = ep
LOG.debug("Found remote pair: %s", remote_pair)
remote_info['clusterPairID'] = ep['clusterPairID']
break
if (not remote_pair and
remote_info['mvip'] != self.active_cluster['mvip']):
LOG.debug("Setting up new cluster pairs.")
# NOTE(jdg): create_remote_pairing sets the
# clusterPairID in remote_info for us
self._create_remote_pairing(remote_info)
self.cluster_pairs.append(remote_info)
LOG.debug("Available cluster pairs: %s", self.cluster_pairs)
self.replication_enabled = True
def _create_cluster_reference(self, endpoint=None):
cluster_ref = {}
cluster_ref['endpoint'] = endpoint
if not endpoint:
cluster_ref['endpoint'] = self._build_endpoint_info()
cluster_info = (self._issue_api_request(
'GetClusterInfo', {}, endpoint=cluster_ref['endpoint'])
['result']['clusterInfo'])
for k, v in cluster_info.items():
cluster_ref[k] = v
# Add a couple extra things that are handy for us
cluster_ref['clusterAPIVersion'] = (
self._issue_api_request('GetClusterVersionInfo',
{}, endpoint=cluster_ref['endpoint'])
['result']['clusterAPIVersion'])
# NOTE(sfernand): If a custom svip is configured, we update the
# default storage ip to the configuration value.
# Otherwise, we update endpoint info with the default storage ip
# retrieved from GetClusterInfo API call.
svip = cluster_ref['endpoint'].get('svip')
if not svip:
svip = cluster_ref['svip']
if ':' not in svip:
svip += ':3260'
cluster_ref['svip'] = svip
cluster_ref['endpoint']['svip'] = svip
return cluster_ref
def _set_active_cluster(self, endpoint=None):
if not endpoint:
self.active_cluster['endpoint'] = self._build_endpoint_info()
else:
self.active_cluster['endpoint'] = endpoint
for k, v in self._issue_api_request(
'GetClusterInfo',
{})['result']['clusterInfo'].items():
self.active_cluster[k] = v
# Add a couple extra things that are handy for us
self.active_cluster['clusterAPIVersion'] = (
self._issue_api_request('GetClusterVersionInfo',
{})['result']['clusterAPIVersion'])
if self.configuration.get('sf_svip', None):
self.active_cluster['svip'] = (
self.configuration.get('sf_svip'))
def _create_provider_id_string(self,
resource_id,
account_or_vol_id):
# NOTE(jdg): We use the same format, but in the case
# of snapshots, we don't have an account id, we instead
# swap that with the parent volume id
return "%s %s %s" % (resource_id,
account_or_vol_id,
self.active_cluster['uuid'])
def _init_snapshot_mappings(self, srefs):
updates = []
sf_snaps = self._issue_api_request(
'ListSnapshots', {}, version='6.0')['result']['snapshots']
for s in srefs:
seek_name = '%s%s' % (self.configuration.sf_volume_prefix, s['id'])
sfsnap = next(
(ss for ss in sf_snaps if ss['name'] == seek_name), None)
if sfsnap:
id_string = self._create_provider_id_string(
sfsnap['snapshotID'],
sfsnap['volumeID'])
if s.get('provider_id') != id_string:
updates.append(
{'id': s['id'],
'provider_id': id_string})
return updates
def _init_volume_mappings(self, vrefs):
updates = []
sf_vols = self._issue_api_request('ListActiveVolumes',
{})['result']['volumes']
self.volume_map = {}
for v in vrefs:
seek_name = '%s%s' % (self.configuration.sf_volume_prefix, v['id'])
sfvol = next(
(sv for sv in sf_vols if sv['name'] == seek_name), None)
if sfvol:
if v.get('provider_id', 'nil') != sfvol['volumeID']:
updates.append(
{'id': v['id'],
'provider_id': self._create_provider_id_string(
sfvol['volumeID'], sfvol['accountID'])})
return updates
def update_provider_info(self, vrefs, snaprefs):
volume_updates = self._init_volume_mappings(vrefs)
snapshot_updates = self._init_snapshot_mappings(snaprefs)
return (volume_updates, snapshot_updates)
def _build_repl_endpoint_info(self, **repl_device):
endpoint = {
'mvip': repl_device.get('mvip'),
'login': repl_device.get('login'),
'passwd': repl_device.get('password'),
'port': repl_device.get('port', 443),
'url': 'https://%s:%s' % (repl_device.get('mvip'),
repl_device.get('port', 443)),
'svip': repl_device.get('svip')
}
return endpoint
def _build_endpoint_info(self, **kwargs):
endpoint = {}
# NOTE(jdg): We default to the primary cluster config settings
# but always check to see if desired settings were passed in
# to handle things like replication targets with unique settings
endpoint['mvip'] = (
kwargs.get('mvip', self.configuration.san_ip))
endpoint['login'] = (
kwargs.get('login', self.configuration.san_login))
endpoint['passwd'] = (
kwargs.get('password', self.configuration.san_password))
endpoint['port'] = (
kwargs.get(('port'), self.configuration.sf_api_port))
endpoint['url'] = 'https://%s:%s' % (endpoint['mvip'],
endpoint['port'])
endpoint['svip'] = kwargs.get('svip', self.configuration.sf_svip)
if not endpoint.get('mvip', None) and kwargs.get('backend_id', None):
endpoint['mvip'] = kwargs.get('backend_id')
return endpoint
@retry(retry_exc_tuple, tries=6)
def _issue_api_request(self, method, params, version='1.0', endpoint=None):
if params is None:
params = {}
if endpoint is None:
endpoint = self.active_cluster['endpoint']
payload = {'method': method, 'params': params}
url = '%s/json-rpc/%s/' % (endpoint['url'], version)
with warnings.catch_warnings():
warnings.simplefilter("ignore", exceptions.InsecureRequestWarning)
req = requests.post(url,
data=json.dumps(payload),
auth=(endpoint['login'], endpoint['passwd']),
verify=self.verify_ssl,
timeout=30)
response = req.json()
req.close()
if (('error' in response) and
(response['error']['name'] in self.retryable_errors)):
msg = ('Retryable error (%s) encountered during '
'SolidFire API call.' % response['error']['name'])
LOG.debug(msg)
LOG.debug("API response: %s", response)
raise SolidFireRetryableException(message=msg)
if (('error' in response) and
response['error']['name'] == 'xInvalidPairingKey'):
LOG.debug("Error on volume pairing!")
raise SolidFireReplicationPairingError
if 'error' in response:
msg = _('API response: %s') % response
raise SolidFireAPIException(msg)
return response
def _get_volumes_by_sfaccount(self, account_id, endpoint=None):
"""Get all volumes on cluster for specified account."""
params = {'accountID': account_id}
return self._issue_api_request(
'ListVolumesForAccount',
params,
endpoint=endpoint)['result']['volumes']
def _get_volumes_for_account(self, sf_account_id, cinder_uuid=None,
endpoint=None):
# ListVolumesForAccount gives both Active and Deleted
# we require the solidfire accountID, uuid of volume
# is optional
vols = self._get_volumes_by_sfaccount(sf_account_id, endpoint=endpoint)
if cinder_uuid:
vlist = [v for v in vols if
cinder_uuid in v['name']]
else:
vlist = [v for v in vols]
vlist = sorted(vlist, key=lambda k: k['volumeID'])
return vlist
def _get_sfvol_by_cinder_vref(self, vref):
# sfvols is one or more element objects returned from a list call
# sfvol is the single volume object that will be returned or it will
# be None
sfvols = None
sfvol = None
provider_id = vref.get('provider_id', None)
if provider_id:
try:
sf_vid, sf_aid, sf_cluster_id = provider_id.split(' ')
except ValueError:
LOG.warning("Invalid provider_id entry for volume: %s",
vref.id)
else:
# So there shouldn't be any clusters out in the field that are
# running Element < 8.0, but just in case; we'll to a try
# block here and fall back to the old methods just to be safe
try:
sfvol = self._issue_api_request(
'ListVolumes',
{'startVolumeID': sf_vid,
'limit': 1},
version='8.0')['result']['volumes'][0]
# Bug 1782373 validate the list returned has what we asked
# for, check if there was no match
if sfvol['volumeID'] != int(sf_vid):
sfvol = None
except Exception:
pass
if not sfvol:
LOG.info("Failed to find volume by provider_id, "
"attempting ListForAccount")
for account in self._get_sfaccounts_for_tenant(vref.project_id):
sfvols = self._issue_api_request(
'ListVolumesForAccount',
{'accountID': account['accountID']})['result']['volumes']
# Bug 1782373 match single vref.id encase no provider as the
# above call will return a list for the account
for sfv in sfvols:
if sfv['attributes'].get('uuid', None) == vref.id:
sfvol = sfv
break
return sfvol
def _get_sfaccount_by_name(self, sf_account_name, endpoint=None):
"""Get SolidFire account object by name."""
sfaccount = None
params = {'username': sf_account_name}
try:
data = self._issue_api_request('GetAccountByName',
params,
endpoint=endpoint)
if 'result' in data and 'account' in data['result']:
LOG.debug('Found solidfire account: %s', sf_account_name)
sfaccount = data['result']['account']
except SolidFireAPIException as ex:
if 'xUnknownAccount' in ex.msg:
return sfaccount
else:
raise
return sfaccount
def _get_sf_account_name(self, project_id):
"""Build the SolidFire account name to use."""
prefix = self.configuration.sf_account_prefix or ''
if prefix == 'hostname':
prefix = socket.gethostname()
return '%s%s%s' % (prefix, '-' if prefix else '', project_id)
def _get_sfaccount(self, project_id):
sf_account_name = self._get_sf_account_name(project_id)
sfaccount = self._get_sfaccount_by_name(sf_account_name)
if sfaccount is None:
raise SolidFireAccountNotFound(
account_name=sf_account_name)
return sfaccount
def _create_sfaccount(self, sf_account_name, endpoint=None):
"""Create account on SolidFire device if it doesn't already exist.
We're first going to check if the account already exists, if it does
just return it. If not, then create it.
"""
sfaccount = self._get_sfaccount_by_name(sf_account_name,
endpoint=endpoint)
if sfaccount is None:
LOG.debug('solidfire account: %s does not exist, create it...',
sf_account_name)
chap_secret = self._generate_random_string(12)
params = {'username': sf_account_name,
'initiatorSecret': chap_secret,
'targetSecret': chap_secret,
'attributes': {}}
self._issue_api_request('AddAccount', params,
endpoint=endpoint)
sfaccount = self._get_sfaccount_by_name(sf_account_name,
endpoint=endpoint)
return sfaccount
def _generate_random_string(self, length):
"""Generates random_string to use for CHAP password."""
return volume_utils.generate_password(
length=length,
symbolgroups=(string.ascii_uppercase + string.digits))
def _build_connection_info(self, sfaccount, vol, endpoint=None):
"""Gets the connection info for specified account and volume."""
if endpoint:
iscsi_portal = endpoint['svip']
else:
iscsi_portal = self.active_cluster['svip']
if ':' not in iscsi_portal:
iscsi_portal += ':3260'
chap_secret = sfaccount['targetSecret']
vol_id = vol['volumeID']
iqn = vol['iqn']
conn_info = {
# NOTE(john-griffith): SF volumes are always at lun 0
'provider_location': ('%s %s %s' % (iscsi_portal, iqn, 0)),
'provider_auth': ('CHAP %s %s' % (sfaccount['username'],
chap_secret))
}
if not self.configuration.sf_emulate_512:
conn_info['provider_geometry'] = ('%s %s' % (4096, 4096))
conn_info['provider_id'] = (
self._create_provider_id_string(vol_id, sfaccount['accountID']))
return conn_info
def _get_model_info(self, sfaccount, sf_volume_id, endpoint=None):
volume = None
iteration_count = 0
while not volume and iteration_count < 600:
volume_list = self._get_volumes_by_sfaccount(
sfaccount['accountID'], endpoint=endpoint)
for v in volume_list:
if v['volumeID'] == sf_volume_id:
volume = v
break
iteration_count += 1
if not volume:
LOG.error('Failed to retrieve volume SolidFire-'
'ID: %s in get_by_account!', sf_volume_id)
raise exception.VolumeNotFound(volume_id=sf_volume_id)
model_update = self._build_connection_info(sfaccount, volume,
endpoint=endpoint)
return model_update
def _snapshot_discovery(self, src_uuid, params, vref):
# NOTE(jdg): First check the SF snapshots
# if we don't find a snap by the given name, just move on to check
# volumes. This may be a running system that was updated from
# before we did snapshots, so need to check both
is_clone = False
sf_vol = None
snap_name = '%s%s' % (self.configuration.sf_volume_prefix, src_uuid)
snaps = self._get_sf_snapshots()
snap = next((s for s in snaps if s["name"] == snap_name), None)
if snap:
params['snapshotID'] = int(snap['snapshotID'])
params['volumeID'] = int(snap['volumeID'])
params['newSize'] = int(vref['size'] * units.Gi)
else:
sf_vol = self._get_sf_volume(src_uuid)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=src_uuid)
params['volumeID'] = int(sf_vol['volumeID'])
params['newSize'] = int(vref['size'] * units.Gi)
is_clone = True
return params, is_clone, sf_vol
def _do_clone_volume(self, src_uuid,
vref, sf_src_snap=None):
"""Create a clone of an existing volume or snapshot."""
LOG.debug("Creating cloned volume from vol %(src)s to %(dst)s.",
{'src': src_uuid, 'dst': vref.id})
sf_account = self._get_create_account(vref['project_id'])
params = {'name': '%(prefix)s%(id)s' %
{'prefix': self.configuration.sf_volume_prefix,
'id': vref['id']},
'newAccountID': sf_account['accountID']}
is_clone = False
if sf_src_snap:
# In some scenarios we are passed the snapshot information that we
# are supposed to clone.
params['snapshotID'] = sf_src_snap['snapshotID']
params['volumeID'] = sf_src_snap['volumeID']
params['newSize'] = int(vref['size'] * units.Gi)
else:
params, is_clone, sf_src_vol = self._snapshot_discovery(
src_uuid, params, vref)
data = self._issue_api_request('CloneVolume', params, version='6.0')
if (('result' not in data) or ('volumeID' not in data['result'])):
msg = _("API response: %s") % data
raise SolidFireAPIException(msg)
sf_cloned_id = data['result']['volumeID']
# NOTE(jdg): all attributes are copied via clone, need to do an update
# to set any that were provided
params = self._get_default_volume_params(vref, is_clone=is_clone)
params['volumeID'] = sf_cloned_id
data = self._issue_api_request('ModifyVolume', params)
model_update = self._get_model_info(sf_account, sf_cloned_id)
if model_update is None:
mesg = _('Failed to get model update from clone')
raise SolidFireAPIException(mesg)
rep_settings = self._retrieve_replication_settings(vref)
if self.replication_enabled and rep_settings:
try:
vref['volumeID'] = sf_cloned_id
rep_updates = self._replicate_volume(
vref, params, sf_account, rep_settings)
model_update.update(rep_updates)
except SolidFireDriverException:
with excutils.save_and_reraise_exception():
self._issue_api_request('DeleteVolume',
{'volumeID': sf_cloned_id})
self._issue_api_request('PurgeDeletedVolume',
{'volumeID': sf_cloned_id})
# Increment the usage count, just for data collection
# We're only doing this for clones, not create_from snaps
if is_clone:
data = self._update_attributes(sf_src_vol)
return (data, sf_account, model_update)
def _update_attributes(self, sf_vol):
cloned_count = sf_vol['attributes'].get('cloned_count', 0)
cloned_count += 1
attributes = sf_vol['attributes']
attributes['cloned_count'] = cloned_count
params = {'volumeID': int(sf_vol['volumeID'])}
params['attributes'] = attributes
return self._issue_api_request('ModifyVolume', params)
def _do_volume_create(self, sf_account, params, endpoint=None):
params['accountID'] = sf_account['accountID']
sf_volid = self._issue_api_request(
'CreateVolume', params, endpoint=endpoint)['result']['volumeID']
return self._get_model_info(sf_account, sf_volid, endpoint=endpoint)
def _do_snapshot_create(self, params):
model_update = {}
snapshot_id = self._issue_api_request(
'CreateSnapshot', params, version='6.0')['result']['snapshotID']
snaps = self._get_sf_snapshots()
snap = (
next((s for s in snaps if int(s["snapshotID"]) ==
int(snapshot_id)), None))
model_update['provider_id'] = (
self._create_provider_id_string(snap['snapshotID'],
snap['volumeID']))
return model_update
def _set_qos_presets(self, volume):
qos = {}
valid_presets = self.sf_qos_dict.keys()
# First look to see if they included a preset
presets = [i.value for i in volume.get('volume_metadata')
if i.key == 'sf-qos' and i.value in valid_presets]
if len(presets) > 0:
if len(presets) > 1:
LOG.warning('More than one valid preset was '
'detected, using %s', presets[0])
qos = self.sf_qos_dict[presets[0]]
else:
# look for explicit settings
for i in volume.get('volume_metadata'):
if i.key in self.sf_qos_keys:
qos[i.key] = int(i.value)
return qos
def _extract_sf_attributes_from_extra_specs(self, type_id):
# This will do a 1:1 copy of the extra spec keys that
# include the SolidFire delimeter into a Volume attribute
# K/V pair
ctxt = context.get_admin_context()
volume_type = volume_types.get_volume_type(ctxt, type_id)
specs = volume_type.get('extra_specs')
sf_keys = []
for key, value in specs.items():
if "SFAttribute:" in key:
fields = key.split(':')
sf_keys.append({fields[1]: value})
return sf_keys
def _set_qos_by_volume_type(self, ctxt, type_id, vol_size):
qos = {}
scale_qos = {}
volume_type = volume_types.get_volume_type(ctxt, type_id)
qos_specs_id = volume_type.get('qos_specs_id')
specs = volume_type.get('extra_specs')
# NOTE(jdg): We prefer the qos_specs association
# and over-ride any existing
# extra-specs settings if present
if qos_specs_id is not None:
# Policy changes require admin context to get QoS specs
# at the object layer (base:get_by_id), we can either
# explicitly promote here, or pass in a context of None
# and let the qos_specs api get an admin context for us
# personally I prefer explicit, so here ya go.
admin_ctxt = context.get_admin_context()
kvs = qos_specs.get_qos_specs(admin_ctxt, qos_specs_id)['specs']
else:
kvs = specs
for key, value in kvs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
if key in self.sf_qos_keys:
qos[key] = int(value)
if key in self.sf_scale_qos_keys:
scale_qos[key] = value
# look for the 'scaledIOPS' key and scale QoS if set
if 'scaledIOPS' in scale_qos:
scale_qos.pop('scaledIOPS')
for key, value in scale_qos.items():
if key == 'scaleMin':
qos['minIOPS'] = (qos['minIOPS'] +
(int(value) * (vol_size - 1)))
elif key == 'scaleMax':
qos['maxIOPS'] = (qos['maxIOPS'] +
(int(value) * (vol_size - 1)))
elif key == 'scaleBurst':
qos['burstIOPS'] = (qos['burstIOPS'] +
(int(value) * (vol_size - 1)))
# Cap the IOPS values at their limits
capped = False
for key, value in qos.items():
if value > self.sf_iops_lim_max[key]:
qos[key] = self.sf_iops_lim_max[key]
capped = True
if value < self.sf_iops_lim_min[key]:
qos[key] = self.sf_iops_lim_min[key]
capped = True
if capped:
LOG.debug("A SolidFire QoS value was capped at the defined limits")
# Check that minIOPS <= maxIOPS <= burstIOPS
if (qos.get('minIOPS', 0) > qos.get('maxIOPS', 0) or
qos.get('maxIOPS', 0) > qos.get('burstIOPS', 0)):
msg = (_("Scaled QoS error. Must be minIOPS <= maxIOPS <= "
"burstIOPS. Currently: Min: %(min)s, Max: "
"%(max)s, Burst: %(burst)s.") %
{"min": qos['minIOPS'],
"max": qos['maxIOPS'],
"burst": qos['burstIOPS']})
raise exception.InvalidQoSSpecs(reason=msg)
return qos
def _get_sf_volume(self, uuid, params=None, endpoint=None):
if params:
vols = [v for v in self._issue_api_request(
'ListVolumesForAccount',
params)['result']['volumes'] if v['status'] == "active"]
else:
vols = self._issue_api_request(
'ListActiveVolumes', params,
endpoint=endpoint)['result']['volumes']
found_count = 0
sf_volref = None
for v in vols:
# NOTE(jdg): In the case of "name" we can't
# update that on manage/import, so we use
# the uuid attribute
meta = v.get('attributes')
alt_id = ''
if meta:
alt_id = meta.get('uuid', '')
if uuid in v['name'] or uuid in alt_id:
found_count += 1
sf_volref = v
LOG.debug("Mapped SolidFire volumeID %(volume_id)s "
"to cinder ID %(uuid)s.",
{'volume_id': v['volumeID'], 'uuid': uuid})
if found_count == 0:
# NOTE(jdg): Previously we would raise here, but there are cases
# where this might be a cleanup for a failed delete.
# Until we get better states we'll just log an error
LOG.error("Volume %s, not found on SF Cluster.", uuid)
if found_count > 1:
LOG.error("Found %(count)s volumes mapped to id: %(uuid)s.",
{'count': found_count,
'uuid': uuid})
raise DuplicateSfVolumeNames(vol_name=uuid)
return sf_volref
def _get_sf_snapshots(self, sf_volid=None):
params = {}
if sf_volid:
params = {'volumeID': sf_volid}
return self._issue_api_request(
'ListSnapshots', params, version='6.0')['result']['snapshots']
def _get_sfaccounts_for_tenant(self, cinder_project_id, endpoint=None):
accounts = self._issue_api_request(
'ListAccounts', {}, endpoint=endpoint)['result']['accounts']
# Note(jdg): On SF we map account-name to OpenStack's tenant ID
# we use tenantID in here to get secondaries that might exist
# Also: we expect this to be sorted, so we get the primary first
# in the list
return sorted([acc for acc in accounts if
cinder_project_id in acc['username']],
key=lambda k: k['accountID'])
def _get_all_active_volumes(self, cinder_uuid=None):
params = {}
volumes = self._issue_api_request('ListActiveVolumes',
params)['result']['volumes']
if cinder_uuid:
vols = ([v for v in volumes if
cinder_uuid in v.name])
else:
vols = [v for v in volumes]
return vols
def _get_all_deleted_volumes(self, cinder_uuid=None):
params = {}
vols = self._issue_api_request('ListDeletedVolumes',
params)['result']['volumes']
if cinder_uuid:
deleted_vols = ([v for v in vols if
cinder_uuid in v['name']])
else:
deleted_vols = [v for v in vols]
return deleted_vols
def _get_account_create_availability(self, accounts, endpoint=None):
# we'll check both the primary and the secondary
# if it exists and return whichever one has count
# available.
for acc in accounts:
if len(self._get_volumes_for_account(
acc['accountID'],
endpoint=endpoint)) < self.max_volumes_per_account:
return acc
if len(accounts) == 1:
sfaccount = self._create_sfaccount(accounts[0]['username'] + '_',
endpoint=endpoint)
return sfaccount
return None
def _get_create_account(self, proj_id, endpoint=None):
# Retrieve SolidFire accountID to be used for creating volumes.
sf_accounts = self._get_sfaccounts_for_tenant(
proj_id, endpoint=endpoint)
if not sf_accounts:
sf_account_name = self._get_sf_account_name(proj_id)
sf_account = self._create_sfaccount(
sf_account_name, endpoint=endpoint)
else:
# Check availability for creates
sf_account = self._get_account_create_availability(
sf_accounts, endpoint=endpoint)
if not sf_account:
msg = _('Volumes/account exceeded on both primary and '
'secondary SolidFire accounts.')
raise SolidFireDriverException(msg)
return sf_account
def _create_vag(self, iqn, vol_id=None):
"""Create a volume access group(vag).
Returns the vag_id.
"""
vag_name = re.sub('[^0-9a-zA-Z]+', '-', iqn)
params = {'name': vag_name,
'initiators': [iqn],
'volumes': [vol_id],
'attributes': {'openstack': True}}
try:
result = self._issue_api_request('CreateVolumeAccessGroup',
params,
version='7.0')
return result['result']['volumeAccessGroupID']
except SolidFireAPIException as error:
if xExceededLimit in error.msg:
if iqn in error.msg:
# Initiator double registered.
return self._safe_create_vag(iqn, vol_id)
else:
# VAG limit reached. Purge and start over.
self._purge_vags()
return self._safe_create_vag(iqn, vol_id)
else:
raise
def _safe_create_vag(self, iqn, vol_id=None):
# Potential race condition with simultaneous volume attaches to the
# same host. To help avoid this, VAG creation makes a best attempt at
# finding and using an existing VAG.
vags = self._get_vags_by_name(iqn)
if vags:
# Filter through the vags and find the one with matching initiator
vag = next((v for v in vags if iqn in v['initiators']), None)
if vag:
return vag['volumeAccessGroupID']
else:
# No matches, use the first result, add initiator IQN.
vag_id = vags[0]['volumeAccessGroupID']
return self._add_initiator_to_vag(iqn, vag_id)
return self._create_vag(iqn, vol_id)
def _base_get_vags(self):
params = {}
vags = self._issue_api_request(
'ListVolumeAccessGroups',
params,
version='7.0')['result']['volumeAccessGroups']
return vags
def _get_vags_by_name(self, iqn):
"""Retrieve SolidFire volume access group objects by name.
Returns an array of vags with a matching name value.
Returns an empty array if there are no matches.
"""
vags = self._base_get_vags()
vag_name = re.sub('[^0-9a-zA-Z]+', '-', iqn)
matching_vags = [vag for vag in vags if vag['name'] == vag_name]
return matching_vags
def _get_vags_by_volume(self, vol_id):
params = {"volumeID": vol_id}
vags = self._issue_api_request(
'GetVolumeStats',
params)['result']['volumeStats']['volumeAccessGroups']
return vags
def _add_initiator_to_vag(self, iqn, vag_id):
# Added a vag_id return as there is a chance that we might have to
# create a new VAG if our target VAG is deleted underneath us.
params = {"initiators": [iqn],
"volumeAccessGroupID": vag_id}
try:
self._issue_api_request('AddInitiatorsToVolumeAccessGroup',
params,
version='7.0')
return vag_id
except SolidFireAPIException as error:
if xAlreadyInVolumeAccessGroup in error.msg:
return vag_id
elif xVolumeAccessGroupIDDoesNotExist in error.msg:
# No locking means sometimes a VAG can be removed by a parallel
# volume detach against the same host.
return self._safe_create_vag(iqn)
else:
raise
def _add_volume_to_vag(self, vol_id, iqn, vag_id):
# Added a vag_id return to be consistent with add_initiator_to_vag. It
# isn't necessary but may be helpful in the future.
params = {"volumeAccessGroupID": vag_id,
"volumes": [vol_id]}
try:
self._issue_api_request('AddVolumesToVolumeAccessGroup',
params,
version='7.0')
return vag_id
except SolidFireAPIException as error:
if xAlreadyInVolumeAccessGroup in error.msg:
return vag_id
elif xVolumeAccessGroupIDDoesNotExist in error.msg:
return self._safe_create_vag(iqn, vol_id)
else:
raise
def _remove_volume_from_vag(self, vol_id, vag_id):
params = {"volumeAccessGroupID": vag_id,
"volumes": [vol_id]}
try:
self._issue_api_request('RemoveVolumesFromVolumeAccessGroup',
params,
version='7.0')
except SolidFireAPIException as error:
if xNotInVolumeAccessGroup in error.msg:
pass
elif xVolumeAccessGroupIDDoesNotExist in error.msg:
pass
else:
raise
def _remove_volume_from_vags(self, vol_id):
# Due to all sorts of uncertainty around multiattach, on volume
# deletion we make a best attempt at removing the vol_id from VAGs.
vags = self._get_vags_by_volume(vol_id)
for vag in vags:
self._remove_volume_from_vag(vol_id, vag['volumeAccessGroupID'])
def _remove_vag(self, vag_id):
params = {"volumeAccessGroupID": vag_id}
try:
self._issue_api_request('DeleteVolumeAccessGroup',
params,
version='7.0')
except SolidFireAPIException as error:
if xVolumeAccessGroupIDDoesNotExist not in error.msg:
raise
def _purge_vags(self, limit=10):
# Purge up to limit number of VAGs that have no active volumes,
# initiators, and an OpenStack attribute. Purge oldest VAGs first.
vags = self._base_get_vags()
targets = [v for v in vags if v['volumes'] == [] and
v['initiators'] == [] and
v['deletedVolumes'] == [] and
v['attributes'].get('openstack')]
sorted_targets = sorted(targets,
key=lambda k: k['volumeAccessGroupID'])
for vag in sorted_targets[:limit]:
self._remove_vag(vag['volumeAccessGroupID'])
@locked_image_id_operation
def clone_image(self, context,
volume, image_location,
image_meta, image_service):
"""Clone an existing image volume."""
public = False
# NOTE(jdg): Glance V2 moved from is_public to visibility
# so we check both, as we don't necessarily know or want
# to care which we're using. Will need to look at
# future handling of things like shared and community
# but for now, it's owner or public and that's it
visibility = image_meta.get('visibility', None)
if visibility and visibility == 'public':
public = True
elif image_meta.get('is_public', False):
public = True
else:
if image_meta['owner'] == volume['project_id']:
public = True
if not public:
LOG.warning("Requested image is not "
"accessible by current Tenant.")
return None, False
# If we don't have the image-volume to clone from return failure
# cinder driver will then create source for clone first
try:
(data, sfaccount, model) = self._do_clone_volume(image_meta['id'],
volume)
except exception.VolumeNotFound:
return None, False
return model, True
# extended_size > 0 when we are extending a volume
def _retrieve_qos_setting(self, volume, extended_size=0):
qos = {}
if (self.configuration.sf_allow_tenant_qos and
volume.get('volume_metadata')is not None):
qos = self._set_qos_presets(volume)
ctxt = context.get_admin_context()
type_id = volume.get('volume_type_id', None)
if type_id is not None:
qos = self._set_qos_by_volume_type(ctxt, type_id,
extended_size if extended_size
> 0 else volume.get('size'))
return qos
def _get_default_volume_params(self, volume, is_clone=False):
sf_account = self._get_create_account(volume.project_id)
qos = self._retrieve_qos_setting(volume)
create_time = volume.created_at.isoformat()
attributes = {
'uuid': volume.id,
'is_clone': is_clone,
'created_at': create_time,
'cinder-name': volume.get('display_name', "")
}
if volume.volume_type_id:
for attr in self._extract_sf_attributes_from_extra_specs(
volume.volume_type_id):
for k, v in attr.items():
attributes[k] = v
vol_name = '%s%s' % (self.configuration.sf_volume_prefix, volume.id)
params = {'name': vol_name,
'accountID': sf_account['accountID'],
'sliceCount': 1,
'totalSize': int(volume.size * units.Gi),
'enable512e': self.configuration.sf_emulate_512,
'attributes': attributes,
'qos': qos}
return params
def create_volume(self, volume):
"""Create volume on SolidFire device.
The account is where CHAP settings are derived from, volume is
created and exported. Note that the new volume is immediately ready
for use.
One caveat here is that an existing user account must be specified
in the API call to create a new volume. We use a set algorithm to
determine account info based on passed in cinder volume object. First
we check to see if the account already exists (and use it), or if it
does not already exist, we'll go ahead and create it.
"""
sf_account = self._get_create_account(volume['project_id'])
params = self._get_default_volume_params(volume)
# NOTE(jdg): Check if we're a migration tgt, if so
# use the old volume-id here for the SF Name
migration_status = volume.get('migration_status', None)
if migration_status and 'target' in migration_status:
k, v = migration_status.split(':')
vname = '%s%s' % (self.configuration.sf_volume_prefix, v)
params['name'] = vname
params['attributes']['migration_uuid'] = volume['id']
params['attributes']['uuid'] = v
model_update = self._do_volume_create(sf_account, params)
try:
rep_settings = self._retrieve_replication_settings(volume)
if self.replication_enabled and rep_settings:
volume['volumeID'] = (
int(model_update['provider_id'].split()[0]))
rep_updates = self._replicate_volume(volume, params,
sf_account, rep_settings)
if rep_updates:
model_update.update(rep_updates)
except SolidFireAPIException:
# NOTE(jdg): Something went wrong after the source create, due to
# the way TFLOW works and it's insistence on retrying the same
# command over and over coupled with the fact that the introduction
# of objects now sets host to None on failures we'll end up with an
# orphaned volume on the backend for every one of these segments
# that fail, for n-retries. Sad Sad Panda!! We'll just do it
# ourselves until we can get a general fix in Cinder further up the
# line
with excutils.save_and_reraise_exception():
sf_volid = int(model_update['provider_id'].split()[0])
self._issue_api_request('DeleteVolume', {'volumeID': sf_volid})
self._issue_api_request('PurgeDeletedVolume',
{'volumeID': sf_volid})
return model_update
def _retrieve_replication_settings(self, volume):
rep_data = "Async"
ctxt = context.get_admin_context()
type_id = volume.get('volume_type_id', None)
if type_id is not None:
rep_data = self._set_rep_by_volume_type(ctxt, type_id)
return rep_data
def _set_rep_by_volume_type(self, ctxt, type_id):
rep_modes = ['Async', 'Sync', 'SnapshotsOnly']
rep_opts = {}
type_ref = volume_types.get_volume_type(ctxt, type_id)
specs = type_ref.get('extra_specs')
if specs.get('replication_enabled', "") == "<is> True":
if specs.get('solidfire:replication_mode') in rep_modes:
rep_opts['rep_type'] = specs.get('solidfire:replication_mode')
else:
rep_opts['rep_type'] = 'Async'
return rep_opts
def _replicate_volume(self, volume, params,
parent_sfaccount, rep_info):
updates = {}
rep_success_status = fields.ReplicationStatus.ENABLED
# NOTE(erlon): Right now we only support 1 remote target so, we always
# get cluster_pairs[0]
tgt_endpoint = self.cluster_pairs[0]['endpoint']
LOG.debug("Replicating volume on remote cluster: %(tgt)s\n params: "
"%(params)s", {'tgt': tgt_endpoint, 'params': params})
params['username'] = self._get_sf_account_name(volume['project_id'])
try:
params['initiatorSecret'] = parent_sfaccount['initiatorSecret']
params['targetSecret'] = parent_sfaccount['targetSecret']
self._issue_api_request(
'AddAccount',
params,
endpoint=tgt_endpoint)['result']['accountID']
except SolidFireAPIException as ex:
if 'xDuplicateUsername' not in ex.msg:
raise
remote_account = (
self._get_sfaccount_by_name(params['username'],
endpoint=tgt_endpoint))
# Create the volume on the remote cluster w/same params as original
params['accountID'] = remote_account['accountID']
LOG.debug("Create remote volume on: %(endpoint)s with account: "
"%(account)s",
{'endpoint': tgt_endpoint['url'], 'account': remote_account})
model_update = self._do_volume_create(
remote_account, params, endpoint=tgt_endpoint)
tgt_sfid = int(model_update['provider_id'].split()[0])
params = {'volumeID': tgt_sfid, 'access': 'replicationTarget'}
self._issue_api_request('ModifyVolume',
params,
'8.0',
endpoint=tgt_endpoint)
# NOTE(erlon): For some reason the SF cluster randomly fail the
# replication of volumes. The generated keys are deemed invalid by the
# target backend. When that happens, we re-start the volume pairing
# process.
@retry(SolidFireReplicationPairingError, tries=6)
def _pair_volumes():
# Enable volume pairing
LOG.debug("Start volume pairing on volume ID: %s",
volume['volumeID'])
# Make sure we split any pair the volume have
params = {'volumeID': volume['volumeID'],
'mode': rep_info['rep_type']}
self._issue_api_request('RemoveVolumePair', params, '8.0')
rep_key = self._issue_api_request(
'StartVolumePairing', params,
'8.0')['result']['volumePairingKey']
params = {'volumeID': tgt_sfid,
'volumePairingKey': rep_key}
LOG.debug("Sending issue CompleteVolumePairing request on remote: "
"%(endpoint)s, %(parameters)s",
{'endpoint': tgt_endpoint['url'], 'parameters': params})
self._issue_api_request('CompleteVolumePairing',
params,
'8.0',
endpoint=tgt_endpoint)
try:
_pair_volumes()
except SolidFireAPIException:
with excutils.save_and_reraise_exception():
params = {'volumeID': tgt_sfid}
LOG.debug("Error pairing volume on remote cluster. Rolling "
"back and deleting volume %(vol)s at cluster "
"%(cluster)s.",
{'vol': tgt_sfid, 'cluster': tgt_endpoint})
self._issue_api_request('DeleteVolume', params,
endpoint=tgt_endpoint)
self._issue_api_request('PurgeDeletedVolume', params,
endpoint=tgt_endpoint)
updates['replication_status'] = rep_success_status
LOG.debug("Completed volume pairing.")
return updates
def _disable_replication(self, volume):
updates = {}
tgt_endpoint = self.cluster_pairs[0]['endpoint']
sfvol = self._get_sfvol_by_cinder_vref(volume)
if len(sfvol['volumePairs']) != 1:
LOG.warning("Trying to disable replication on volume %s but "
"volume does not have pairs.", volume.id)
updates['replication_status'] = fields.ReplicationStatus.DISABLED
return updates
params = {'volumeID': sfvol['volumeID']}
self._issue_api_request('RemoveVolumePair', params, '8.0')
remote_sfid = sfvol['volumePairs'][0]['remoteVolumeID']
params = {'volumeID': remote_sfid}
self._issue_api_request('RemoveVolumePair',
params, '8.0', endpoint=tgt_endpoint)
self._issue_api_request('DeleteVolume', params,
endpoint=tgt_endpoint)
self._issue_api_request('PurgeDeletedVolume', params,
endpoint=tgt_endpoint)
updates['replication_status'] = fields.ReplicationStatus.DISABLED
return updates
@locked_source_id_operation
def create_cloned_volume(self, volume, source):
"""Create a clone of an existing volume."""
(_data, _sfaccount, model) = self._do_clone_volume(
source['id'],
volume)
return model
def delete_volume(self, volume):
"""Delete SolidFire Volume from device.
SolidFire allows multiple volumes with same name,
volumeID is what's guaranteed unique.
"""
sf_vol = self._get_sfvol_by_cinder_vref(volume)
if sf_vol is not None:
for vp in sf_vol.get('volumePairs', []):
LOG.debug("Deleting paired volume on remote cluster...")
pair_id = vp['clusterPairID']
for cluster in self.cluster_pairs:
if cluster['clusterPairID'] == pair_id:
params = {'volumeID': vp['remoteVolumeID']}
LOG.debug("Issue Delete request on cluster: "
"%(remote)s with params: %(parameters)s",
{'remote': cluster['endpoint']['url'],
'parameters': params})
self._issue_api_request('DeleteVolume', params,
endpoint=cluster['endpoint'])
self._issue_api_request('PurgeDeletedVolume', params,
endpoint=cluster['endpoint'])
# The multiattach volumes are only removed from the VAG on
# deletion.
if volume.get('multiattach'):
self._remove_volume_from_vags(sf_vol['volumeID'])
if sf_vol['status'] == 'active':
params = {'volumeID': sf_vol['volumeID']}
self._issue_api_request('DeleteVolume', params)
self._issue_api_request('PurgeDeletedVolume', params)
else:
LOG.error("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"delete_volume operation!", volume['id'])
def delete_snapshot(self, snapshot):
"""Delete the specified snapshot from the SolidFire cluster."""
sf_snap_name = '%s%s' % (self.configuration.sf_volume_prefix,
snapshot['id'])
accounts = self._get_sfaccounts_for_tenant(snapshot['project_id'])
snap = None
for acct in accounts:
params = {'accountID': acct['accountID']}
sf_vol = self._get_sf_volume(snapshot['volume_id'], params)
if sf_vol:
sf_snaps = self._get_sf_snapshots(sf_vol['volumeID'])
snap = next((s for s in sf_snaps if s["name"] == sf_snap_name),
None)
if snap:
params = {'snapshotID': snap['snapshotID']}
self._issue_api_request('DeleteSnapshot',
params,
version='6.0')
return
LOG.warning(
"Snapshot %s not found, old style clones may not be deleted.",
snapshot.id)
def create_snapshot(self, snapshot):
sfaccount = self._get_sfaccount(snapshot['project_id'])
if sfaccount is None:
LOG.error("Account for Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"create_snapshot operation!", snapshot['volume_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(snapshot['volume_id'], params)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=snapshot['volume_id'])
params = {'volumeID': sf_vol['volumeID'],
'name': '%s%s' % (self.configuration.sf_volume_prefix,
snapshot['id'])}
rep_settings = self._retrieve_replication_settings(snapshot.volume)
if self.replication_enabled and rep_settings:
params['enableRemoteReplication'] = True
return self._do_snapshot_create(params)
@locked_source_id_operation
def create_volume_from_snapshot(self, volume, source):
"""Create a volume from the specified snapshot."""
if source.get('group_snapshot_id'):
# We're creating a volume from a snapshot that resulted from a
# consistency group snapshot. Because of the way that SolidFire
# creates cgsnaps, we have to search for the correct snapshot.
group_snapshot_id = source.get('group_snapshot_id')
snapshot_id = source.get('volume_id')
sf_name = self.configuration.sf_volume_prefix + group_snapshot_id
sf_group_snap = self._get_group_snapshot_by_name(sf_name)
return self._create_clone_from_sf_snapshot(snapshot_id,
group_snapshot_id,
sf_group_snap,
volume)
(_data, _sfaccount, model) = self._do_clone_volume(
source['id'],
volume)
return model
# Consistency group helpers
def _sf_create_group_snapshot(self, name, sf_volumes):
# Group snapshot is our version of a consistency group snapshot.
vol_ids = [vol['volumeID'] for vol in sf_volumes]
params = {'name': name,
'volumes': vol_ids}
snapshot_id = self._issue_api_request('CreateGroupSnapshot',
params,
version='7.0')
return snapshot_id['result']
def _group_snapshot_creator(self, gsnap_name, src_vol_ids):
# Common helper that takes in an array of OpenStack Volume UUIDs and
# creates a SolidFire group snapshot with them.
vol_names = [self.configuration.sf_volume_prefix + vol_id
for vol_id in src_vol_ids]
active_sf_vols = self._get_all_active_volumes()
target_vols = [vol for vol in active_sf_vols
if vol['name'] in vol_names]
if len(src_vol_ids) != len(target_vols):
msg = (_("Retrieved a different amount of SolidFire volumes for "
"the provided Cinder volumes. Retrieved: %(ret)s "
"Desired: %(des)s") % {"ret": len(target_vols),
"des": len(src_vol_ids)})
raise SolidFireDriverException(msg)
result = self._sf_create_group_snapshot(gsnap_name, target_vols)
return result
def _create_temp_group_snapshot(self, source_cg, source_vols):
# Take a temporary snapshot to create the volumes for a new
# consistency group.
gsnap_name = ("%(prefix)s%(id)s-tmp" %
{"prefix": self.configuration.sf_volume_prefix,
"id": source_cg['id']})
vol_ids = [vol['id'] for vol in source_vols]
self._group_snapshot_creator(gsnap_name, vol_ids)
return gsnap_name
def _list_group_snapshots(self):
result = self._issue_api_request('ListGroupSnapshots',
{},
version='7.0')
return result['result']['groupSnapshots']
def _get_group_snapshot_by_name(self, name):
target_snaps = self._list_group_snapshots()
target = next((snap for snap in target_snaps
if snap['name'] == name), None)
return target
def _delete_group_snapshot(self, gsnapid):
params = {'groupSnapshotID': gsnapid}
self._issue_api_request('DeleteGroupSnapshot',
params,
version='7.0')
def _delete_cgsnapshot_by_name(self, snap_name):
# Common function used to find and delete a snapshot.
target = self._get_group_snapshot_by_name(snap_name)
if not target:
msg = _("Failed to find group snapshot named: %s") % snap_name
raise SolidFireDriverException(msg)
self._delete_group_snapshot(target['groupSnapshotID'])
def _find_linked_snapshot(self, target_uuid, group_snap):
# Because group snapshots name each individual snapshot the group
# snapshot name, we have to trawl through the SolidFire snapshots to
# find the SolidFire snapshot from the group that is linked with the
# SolidFire volumeID that is linked to the Cinder snapshot source
# volume.
source_vol = self._get_sf_volume(target_uuid)
target_snap = next((sn for sn in group_snap['members']
if sn['volumeID'] == source_vol['volumeID']), None)
return target_snap
def _create_clone_from_sf_snapshot(self, target_uuid, src_uuid,
sf_group_snap, vol):
# Find the correct SolidFire backing snapshot.
sf_src_snap = self._find_linked_snapshot(target_uuid,
sf_group_snap)
_data, _sfaccount, model = self._do_clone_volume(src_uuid,
vol,
sf_src_snap)
model['id'] = vol['id']
model['status'] = 'available'
return model
def _map_sf_volumes(self, cinder_volumes, endpoint=None):
"""Get a list of SolidFire volumes.
Creates a list of SolidFire volumes based
on matching a list of cinder volume ID's,
also adds an 'cinder_id' key to match cinder.
"""
vols = self._issue_api_request(
'ListActiveVolumes', {},
endpoint=endpoint)['result']['volumes']
# FIXME(erlon): When we fetch only for the volume name, we miss
# volumes that where brought to Cinder via cinder-manage.
vlist = (
[sfvol for sfvol in vols for cv in cinder_volumes if cv['id'] in
sfvol['name']])
for v in vlist:
v['cinder_id'] = v['name'].split(
self.configuration.sf_volume_prefix)[1]
return vlist
# Generic Volume Groups.
def create_group(self, ctxt, group):
# SolidFire does not have the concept of volume groups. We're going to
# play along with the group song and dance. There will be a lot of
# no-ops because of this.
if volume_utils.is_group_a_cg_snapshot_type(group):
return {'status': fields.GroupStatus.AVAILABLE}
# Blatantly ripping off this pattern from other drivers.
raise NotImplementedError()
def create_group_from_src(self, ctxt, group, volumes, group_snapshots=None,
snapshots=None, source_group=None,
source_vols=None):
# At this point this is just a pass-through.
if volume_utils.is_group_a_cg_snapshot_type(group):
return self._create_consistencygroup_from_src(
ctxt,
group,
volumes,
group_snapshots,
snapshots,
source_group,
source_vols)
# Default implementation handles other scenarios.
raise NotImplementedError()
def create_group_snapshot(self, ctxt, group_snapshot, snapshots):
# This is a pass-through to the old consistency group stuff.
if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
return self._create_cgsnapshot(ctxt, group_snapshot, snapshots)
# Default implementation handles other scenarios.
raise NotImplementedError()
def delete_group(self, ctxt, group, volumes):
# Delete a volume group. SolidFire does not track volume groups,
# however we do need to actually remove the member volumes of the
# group. Right now only consistent volume groups are supported.
if volume_utils.is_group_a_cg_snapshot_type(group):
return self._delete_consistencygroup(ctxt, group, volumes)
# Default implementation handles other scenarios.
raise NotImplementedError()
def update_group(self, ctxt, group, add_volumes=None, remove_volumes=None):
# Regarding consistency groups SolidFire does not track volumes, so
# this is a no-op. In the future with replicated volume groups this
# might actually do something.
if volume_utils.is_group_a_cg_snapshot_type(group):
return self._update_consistencygroup(ctxt,
group,
add_volumes,
remove_volumes)
# Default implementation handles other scenarios.
raise NotImplementedError()
def _create_consistencygroup_from_src(self, ctxt, group, volumes,
cgsnapshot, snapshots,
source_cg, source_vols):
if cgsnapshot and snapshots:
sf_name = self.configuration.sf_volume_prefix + cgsnapshot['id']
sf_group_snap = self._get_group_snapshot_by_name(sf_name)
# Go about creating volumes from provided snaps.
vol_models = []
for vol, snap in zip(volumes, snapshots):
vol_models.append(self._create_clone_from_sf_snapshot(
snap['volume_id'],
snap['id'],
sf_group_snap,
vol))
return ({'status': fields.GroupStatus.AVAILABLE},
vol_models)
elif source_cg and source_vols:
# Create temporary group snapshot.
gsnap_name = self._create_temp_group_snapshot(source_cg,
source_vols)
try:
sf_group_snap = self._get_group_snapshot_by_name(gsnap_name)
# For each temporary snapshot clone the volume.
vol_models = []
for vol in volumes:
vol_models.append(self._create_clone_from_sf_snapshot(
vol['source_volid'],
vol['source_volid'],
sf_group_snap,
vol))
finally:
self._delete_cgsnapshot_by_name(gsnap_name)
return {'status': fields.GroupStatus.AVAILABLE}, vol_models
def _create_cgsnapshot(self, ctxt, cgsnapshot, snapshots):
vol_ids = [snapshot['volume_id'] for snapshot in snapshots]
vol_names = [self.configuration.sf_volume_prefix + vol_id
for vol_id in vol_ids]
active_sf_vols = self._get_all_active_volumes()
target_vols = [vol for vol in active_sf_vols
if vol['name'] in vol_names]
if len(snapshots) != len(target_vols):
msg = (_("Retrieved a different amount of SolidFire volumes for "
"the provided Cinder snapshots. Retrieved: %(ret)s "
"Desired: %(des)s") % {"ret": len(target_vols),
"des": len(snapshots)})
raise SolidFireDriverException(msg)
snap_name = self.configuration.sf_volume_prefix + cgsnapshot['id']
self._sf_create_group_snapshot(snap_name, target_vols)
return None, None
def _update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
# Similar to create_consistencygroup, SolidFire's lack of a consistency
# group object means there is nothing to update on the cluster.
return None, None, None
def _delete_cgsnapshot(self, ctxt, cgsnapshot, snapshots):
snap_name = self.configuration.sf_volume_prefix + cgsnapshot['id']
self._delete_cgsnapshot_by_name(snap_name)
return None, None
def delete_group_snapshot(self, context, group_snapshot, snapshots):
if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
return self._delete_cgsnapshot(context, group_snapshot, snapshots)
# Default implementation handles other scenarios.
raise NotImplementedError()
def _delete_consistencygroup(self, ctxt, group, volumes):
# TODO(chris_morrell): exception handling and return correctly updated
# volume_models.
for vol in volumes:
self.delete_volume(vol)
return None, None
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update first.
The name is a bit misleading as
the majority of the data here is cluster
data
"""
if refresh:
try:
self._update_cluster_status()
except SolidFireAPIException:
pass
LOG.debug("SolidFire cluster_stats: %s", self.cluster_stats)
return self.cluster_stats
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"extend_volume operation!", volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
qos = self._retrieve_qos_setting(volume, new_size)
params = {
'volumeID': sf_vol['volumeID'],
'totalSize': int(new_size * units.Gi),
'qos': qos
}
self._issue_api_request('ModifyVolume',
params, version='5.0')
rep_settings = self._retrieve_replication_settings(volume)
if self.replication_enabled and rep_settings:
if len(sf_vol['volumePairs']) != 1:
LOG.error("Can't find remote pair while extending the "
"volume or multiple replication pairs found!")
raise exception.VolumeNotFound(volume_id=volume['id'])
tgt_endpoint = self.cluster_pairs[0]['endpoint']
target_vol_id = sf_vol['volumePairs'][0]['remoteVolumeID']
params2 = params.copy()
params2['volumeID'] = target_vol_id
self._issue_api_request('ModifyVolume',
params2, version='5.0',
endpoint=tgt_endpoint)
def _get_provisioned_capacity_iops(self):
response = self._issue_api_request('ListVolumes', {}, version='8.0')
volumes = response['result']['volumes']
LOG.debug("%s volumes present in cluster", len(volumes))
provisioned_cap = 0
provisioned_iops = 0
for vol in volumes:
provisioned_cap += vol['totalSize']
provisioned_iops += vol['qos']['minIOPS']
return provisioned_cap, provisioned_iops
def _update_cluster_status(self):
"""Retrieve status info for the Cluster."""
params = {}
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or self.__class__.__name__
data["vendor_name"] = 'SolidFire Inc'
data["driver_version"] = self.VERSION
data["storage_protocol"] = 'iSCSI'
data['consistencygroup_support'] = True
data['consistent_group_snapshot_enabled'] = True
data['replication_enabled'] = self.replication_enabled
if self.replication_enabled:
data['replication'] = 'enabled'
data['active_cluster_mvip'] = self.active_cluster['mvip']
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = True
data['multiattach'] = True
try:
results = self._issue_api_request('GetClusterCapacity', params,
version='8.0')
except SolidFireAPIException:
data['total_capacity_gb'] = 0
data['free_capacity_gb'] = 0
self.cluster_stats = data
return
results = results['result']['clusterCapacity']
prov_cap, prov_iops = self._get_provisioned_capacity_iops()
if self.configuration.sf_provisioning_calc == 'usedSpace':
free_capacity = (
results['maxUsedSpace'] - results['usedSpace'])
data['total_capacity_gb'] = results['maxUsedSpace'] / units.Gi
data['thin_provisioning_support'] = True
data['provisioned_capacity_gb'] = prov_cap / units.Gi
data['max_over_subscription_ratio'] = (
self.configuration.max_over_subscription_ratio
)
else:
free_capacity = (
results['maxProvisionedSpace'] - results['usedSpace'])
data['total_capacity_gb'] = (
results['maxProvisionedSpace'] / units.Gi)
data['free_capacity_gb'] = float(free_capacity / units.Gi)
if (results['uniqueBlocksUsedSpace'] == 0 or
results['uniqueBlocks'] == 0 or
results['zeroBlocks'] == 0):
data['compression_percent'] = 100
data['deduplicaton_percent'] = 100
data['thin_provision_percent'] = 100
else:
data['compression_percent'] = (
(float(results['uniqueBlocks'] * 4096) /
results['uniqueBlocksUsedSpace']) * 100)
data['deduplicaton_percent'] = (
float(results['nonZeroBlocks'] /
results['uniqueBlocks']) * 100)
data['thin_provision_percent'] = (
(float(results['nonZeroBlocks'] + results['zeroBlocks']) /
results['nonZeroBlocks']) * 100)
data['provisioned_iops'] = prov_iops
data['current_iops'] = results['currentIOPS']
data['average_iops'] = results['averageIOPS']
data['max_iops'] = results['maxIOPS']
data['peak_iops'] = results['peakIOPS']
data['shared_targets'] = False
self.cluster_stats = data
def initialize_connection(self, volume, connector):
"""Initialize the connection and return connection info.
Optionally checks and utilizes volume access groups.
"""
properties = self._sf_initialize_connection(volume, connector)
properties['data']['discard'] = True
return properties
def attach_volume(self, context, volume,
instance_uuid, host_name,
mountpoint):
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"attach_volume operation!", volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
attributes = sf_vol['attributes']
attributes['attach_time'] = volume.get('attach_time', None)
attributes['attached_to'] = instance_uuid
params = {
'volumeID': sf_vol['volumeID'],
'attributes': attributes
}
self._issue_api_request('ModifyVolume', params)
def terminate_connection(self, volume, properties, force):
return self._sf_terminate_connection(volume,
properties,
force)
def detach_volume(self, context, volume, attachment=None):
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"detach_volume operation!", volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
attributes = sf_vol['attributes']
attributes['attach_time'] = None
attributes['attached_to'] = None
params = {
'volumeID': sf_vol['volumeID'],
'attributes': attributes
}
self._issue_api_request('ModifyVolume', params)
def accept_transfer(self, context, volume,
new_user, new_project):
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"accept_transfer operation!", volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
if new_project != volume['project_id']:
# do a create_sfaccount here as this tenant
# may not exist on the cluster yet
sfaccount = self._get_create_account(new_project)
params = {
'volumeID': sf_vol['volumeID'],
'accountID': sfaccount['accountID']
}
self._issue_api_request('ModifyVolume',
params, version='5.0')
volume['project_id'] = new_project
volume['user_id'] = new_user
return self.target_driver.ensure_export(context, volume, None)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred and a dict
with the updates on the volume.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities (Not Used).
"""
model_update = {}
LOG.debug("Retyping volume %(vol)s to new type %(type)s",
{'vol': volume.id, 'type': new_type})
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=volume['id'])
if self.replication_enabled:
ctxt = context.get_admin_context()
src_rep_type = self._set_rep_by_volume_type(
ctxt, volume.volume_type_id)
dst_rep_type = self._set_rep_by_volume_type(ctxt, new_type['id'])
if src_rep_type != dst_rep_type:
if dst_rep_type:
rep_settings = self._retrieve_replication_settings(volume)
rep_params = self._get_default_volume_params(volume)
volume['volumeID'] = (
int(volume.provider_id.split()[0]))
rep_updates = self._replicate_volume(volume, rep_params,
sfaccount,
rep_settings)
else:
rep_updates = self._disable_replication(volume)
if rep_updates:
model_update.update(rep_updates)
attributes = sf_vol['attributes']
attributes['retyped_at'] = timeutils.utcnow().isoformat()
params = {'volumeID': sf_vol['volumeID'], 'attributes': attributes}
qos = self._set_qos_by_volume_type(ctxt, new_type['id'],
volume.get('size'))
if qos:
params['qos'] = qos
self._issue_api_request('ModifyVolume', params)
return True, model_update
def manage_existing(self, volume, external_ref):
"""Manages an existing SolidFire Volume (import to Cinder).
Renames the Volume to match the expected name for the volume.
Also need to consider things like QoS, Emulation, account/tenant and
replication settings.
"""
sfid = external_ref.get('source-id', None)
sfname = external_ref.get('name', None)
LOG.debug("Managing volume %(id)s to ref %(ref)s",
{'id': volume.id, 'ref': external_ref})
if sfid is None:
raise SolidFireAPIException(_("Manage existing volume "
"requires 'source-id'."))
# First get the volume on the SF cluster (MUST be active)
params = {'startVolumeID': sfid,
'limit': 1}
vols = self._issue_api_request(
'ListActiveVolumes', params)['result']['volumes']
sf_ref = vols[0]
sfaccount = self._get_create_account(volume['project_id'])
import_time = volume['created_at'].isoformat()
attributes = {'uuid': volume['id'],
'is_clone': 'False',
'os_imported_at': import_time,
'old_name': sfname}
params = self._get_default_volume_params(volume)
params['volumeID'] = sf_ref['volumeID']
params['attributes'] = attributes
params.pop('totalSize')
self._issue_api_request('ModifyVolume',
params, version='5.0')
try:
rep_updates = {}
rep_settings = self._retrieve_replication_settings(volume)
if self.replication_enabled and rep_settings:
if len(sf_ref['volumePairs']) != 0:
msg = _("Not possible to manage a volume with "
"replicated pair! Please split the volume pairs.")
LOG.error(msg)
raise SolidFireDriverException(msg)
else:
params = self._get_default_volume_params(volume)
params['volumeID'] = sf_ref['volumeID']
volume['volumeID'] = sf_ref['volumeID']
params['totalSize'] = sf_ref['totalSize']
rep_updates = self._replicate_volume(
volume, params, sfaccount, rep_settings)
except Exception:
with excutils.save_and_reraise_exception():
# When the replication fails in mid process, we need to
# set the volume properties the way it was before.
LOG.error("Error trying to replicate volume %s",
volume.id)
params = {'volumeID': sf_ref['volumeID']}
params['attributes'] = sf_ref['attributes']
self._issue_api_request('ModifyVolume',
params, version='5.0')
model_update = self._get_model_info(sfaccount, sf_ref['volumeID'])
model_update.update(rep_updates)
return model_update
def manage_existing_get_size(self, volume, external_ref):
"""Return size of an existing LV for manage_existing.
existing_ref is a dictionary of the form:
{'name': <name of existing volume on SF Cluster>}
"""
sfid = external_ref.get('source-id', None)
if sfid is None:
raise SolidFireAPIException(_("Manage existing get size "
"requires 'id'."))
params = {'startVolumeID': int(sfid),
'limit': 1}
vols = self._issue_api_request(
'ListActiveVolumes', params)['result']['volumes']
if len(vols) != 1:
msg = _("Provided volume id does not exist on SolidFire backend.")
raise SolidFireDriverException(msg)
return int(math.ceil(float(vols[0]['totalSize']) / units.Gi))
def unmanage(self, volume):
"""Mark SolidFire Volume as unmanaged (export from Cinder)."""
sfaccount = self._get_sfaccount(volume['project_id'])
if sfaccount is None:
LOG.error("Account for Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"unmanage operation!", volume['id'])
raise SolidFireAPIException(_("Failed to find account "
"for volume."))
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=volume['id'])
export_time = timeutils.utcnow().isoformat()
attributes = sf_vol['attributes']
attributes['os_exported_at'] = export_time
params = {'volumeID': int(sf_vol['volumeID']),
'attributes': attributes}
self._issue_api_request('ModifyVolume',
params, version='5.0')
def _failover_volume(self, tgt_vol, tgt_cluster, src_vol=None):
"""Modify remote volume to R/W mode."""
if src_vol:
# Put the src in tgt mode assuming it's still available
# catch the exception if the cluster isn't available and
# continue on
params = {'volumeID': src_vol['volumeID'],
'access': 'replicationTarget'}
try:
self._issue_api_request('ModifyVolume', params)
except SolidFireAPIException:
# FIXME
pass
# Now call out to the remote and make the tgt our new src
params = {'volumeID': tgt_vol['volumeID'],
'access': 'readWrite'}
self._issue_api_request('ModifyVolume', params,
endpoint=tgt_cluster['endpoint'])
def failover_host(self, context, volumes, secondary_id=None, groups=None):
"""Failover to replication target.
In order to do failback, you MUST specify the original/default cluster
using secondary_id option. You can do this simply by specifying:
`secondary_id=default`
"""
remote = None
failback = False
volume_updates = []
LOG.info("Failing over. Secondary ID is: %s",
secondary_id)
# NOTE(erlon): For now we only support one replication target device.
# So, there are two cases we have to deal with here:
# 1. Caller specified a backend target to fail-over to (this must be
# the backend_id as defined in replication_device. Any other values
# will raise an error. If the user does not specify anything, we
# also fall in this case.
# 2. Caller wants to failback and therefore sets backend_id=default.
secondary_id = secondary_id.lower() if secondary_id else None
if secondary_id == "default" and not self.failed_over:
msg = _("SolidFire driver received failover_host "
"specifying failback to default, the "
"host however is not in `failed_over` "
"state.")
raise exception.InvalidReplicationTarget(msg)
elif secondary_id == "default" and self.failed_over:
remote = self.primary_cluster
failback = True
else:
repl_configs = self.configuration.replication_device[0]
if secondary_id and repl_configs['backend_id'] != secondary_id:
msg = _("Replication id (%s) does not match the configured "
"one in cinder.conf.") % secondary_id
raise exception.InvalidReplicationTarget(msg)
remote = self.cluster_pairs[0]
if not remote or not self.replication_enabled:
LOG.error("SolidFire driver received failover_host "
"request, however replication is NOT "
"enabled, or there are no available "
"targets to fail-over to.")
raise exception.UnableToFailOver(reason=_("Failover requested "
"on non replicated "
"backend."))
target_vols = self._map_sf_volumes(volumes,
endpoint=remote['endpoint'])
LOG.debug("Mapped target_vols: %s", target_vols)
primary_vols = None
try:
primary_vols = self._map_sf_volumes(volumes)
LOG.debug("Mapped Primary_vols: %s", target_vols)
except SolidFireAPIException:
# API Request failed on source. Failover/failback will skip next
# calls to it.
pass
for v in volumes:
if v['status'] == "error":
LOG.debug("Skipping operation for Volume %s as it is "
"on error state.", v['id'])
continue
target_vlist = [sfv for sfv in target_vols
if sfv['cinder_id'] == v['id']]
if len(target_vlist) > 0:
target_vol = target_vlist[0]
if primary_vols:
vols = [sfv for sfv in primary_vols
if sfv['cinder_id'] == v['id']]
if not vols:
LOG.error("SolidFire driver cannot proceed. "
"Could not find volume %s in "
"back-end storage.", v['id'])
raise exception.UnableToFailOver(
reason=_("Cannot find cinder volume in "
"back-end storage."))
# Have at least one cinder volume in storage
primary_vol = vols[0]
else:
primary_vol = None
LOG.debug('Failing-over volume %s, target vol %s, '
'primary vol %s', v, target_vol, primary_vol)
try:
self._failover_volume(target_vol, remote, primary_vol)
sf_account = self._get_create_account(
v.project_id, endpoint=remote['endpoint'])
conn_info = self._build_connection_info(
sf_account, target_vol, endpoint=remote['endpoint'])
# volume status defaults to failed-over
replication_status = 'failed-over'
# in case of a failback, volume status must be reset to its
# original state
if failback:
replication_status = 'enabled'
vol_updates = {
'volume_id': v['id'],
'updates': {
'replication_status': replication_status
}
}
vol_updates['updates'].update(conn_info)
volume_updates.append(vol_updates)
LOG.debug("Updates for volume: %(id)s %(updates)s",
{'id': v.id, 'updates': vol_updates})
except Exception as e:
volume_updates.append({'volume_id': v['id'],
'updates': {'status': 'error', }})
if failback:
LOG.error("Error trying to failback volume %s", v.id)
else:
LOG.error("Error trying to failover volume %s", v.id)
msg = e.message if hasattr(e, 'message') else e
LOG.exception(msg)
else:
volume_updates.append({'volume_id': v['id'],
'updates': {'status': 'error', }})
# FIXME(jdg): This introduces a problem for us, up until now our driver
# has been pretty much stateless and has allowed customers to run
# active/active HA c-vol services with SolidFire. The introduction of
# the active_cluster and failed_over attributes is going to break that
# but for now that's going to be the trade off of using replication
if failback:
active_cluster_id = None
self.failed_over = False
else:
active_cluster_id = remote['backend_id']
self.failed_over = True
self.active_cluster = remote
return active_cluster_id, volume_updates, []
def freeze_backend(self, context):
"""Freeze backend notification."""
pass
def thaw_backend(self, context):
"""Thaw backend notification."""
pass
def revert_to_snapshot(self, context, volume, snapshot):
"""Revert a volume to a given snapshot."""
sfaccount = self._get_sfaccount(volume.project_id)
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume.id, params)
if sf_vol is None:
LOG.error("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"revert_to_snapshot operation!", volume.id)
raise exception.VolumeNotFound(volume_id=volume['id'])
params['volumeID'] = sf_vol['volumeID']
sf_snap_name = '%s%s' % (self.configuration.sf_volume_prefix,
snapshot.id)
sf_snaps = self._get_sf_snapshots(sf_vol['volumeID'])
snap = next((s for s in sf_snaps if s["name"] == sf_snap_name),
None)
if not snap:
LOG.error("Snapshot ID %s was not found on "
"the SolidFire Cluster while attempting "
"revert_to_snapshot operation!", snapshot.id)
raise exception.VolumeSnapshotNotFound(volume_id=volume.id)
params['snapshotID'] = snap['snapshotID']
params['saveCurrentState'] = 'false'
self._issue_api_request('RollbackToSnapshot',
params,
version='6.0')
class SolidFireISCSI(iscsi_driver.SanISCSITarget):
def __init__(self, *args, **kwargs):
super(SolidFireISCSI, self).__init__(*args, **kwargs)
self.sf_driver = kwargs.get('solidfire_driver')
def __getattr__(self, attr):
if hasattr(self.sf_driver, attr):
return getattr(self.sf_driver, attr)
else:
msg = _('Attribute: %s not found.') % attr
raise NotImplementedError(msg)
def _do_iscsi_export(self, volume):
sfaccount = self._get_sfaccount(volume['project_id'])
model_update = {}
model_update['provider_auth'] = ('CHAP %s %s'
% (sfaccount['username'],
sfaccount['targetSecret']))
return model_update
def create_export(self, context, volume, volume_path):
return self._do_iscsi_export(volume)
def ensure_export(self, context, volume, volume_path):
try:
return self._do_iscsi_export(volume)
except SolidFireAPIException:
return None
# Following are abc's that we make sure are caught and
# paid attention to. In our case we don't use them
# so just stub them out here.
def remove_export(self, context, volume):
pass
def terminate_connection(self, volume, connector, **kwargs):
pass
def _sf_initialize_connection(self, volume, connector):
"""Initialize the connection and return connection info.
Optionally checks and utilizes volume access groups.
"""
if self.configuration.sf_enable_vag:
iqn = connector['initiator']
provider_id = volume['provider_id']
vol_id = int(provider_id.split()[0])
# safe_create_vag may opt to reuse vs create a vag, so we need to
# add our vol_id.
vag_id = self._safe_create_vag(iqn, vol_id)
self._add_volume_to_vag(vol_id, iqn, vag_id)
# Continue along with default behavior
return super(SolidFireISCSI, self).initialize_connection(volume,
connector)
def _sf_terminate_connection(self, volume, properties, force):
"""Terminate the volume connection.
Optionally remove volume from volume access group.
If the VAG is empty then the VAG is also removed.
"""
if self.configuration.sf_enable_vag:
provider_id = volume['provider_id']
vol_id = int(provider_id.split()[0])
if properties:
iqn = properties['initiator']
vag = self._get_vags_by_name(iqn)
if vag and not volume['multiattach']:
# Multiattach causes problems with removing volumes from
# VAGs.
# Compromise solution for now is to remove multiattach
# volumes from VAGs during volume deletion.
vag = vag[0]
vag_id = vag['volumeAccessGroupID']
if [vol_id] == vag['volumes']:
self._remove_vag(vag_id)
elif vol_id in vag['volumes']:
self._remove_volume_from_vag(vol_id, vag_id)
else:
self._remove_volume_from_vags(vol_id)
return super(SolidFireISCSI, self).terminate_connection(volume,
properties,
force=force)
|
<reponame>lessss4/oil-and-rope
import random
from django.conf import settings
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext as _
class SheetHeader(models.Model):
"""
Sheet
Parameters
----------
name: class:`str`
game: :class:`Game`
character_info: class:`CharacterInfo`
user: :class:`User`
"""
name = models.CharField(_("Name"), max_length=50)
# Game
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("User"), on_delete=models.CASCADE)
character_info = models.OneToOneField("sheet.CharacterInfo", verbose_name=_("Character Info"),
on_delete=models.CASCADE)
class Meta:
verbose_name = _("Sheet Header")
verbose_name_plural = _("Sheet Headers")
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("sheetheader_detail", kwargs={"pk": self.pk})
class CharacterInfo(models.Model):
"""
The character info depends of the game. At the moment, is tatic, but due to be changed when i have a better idea
of how to to it
parameters
----------
name: :class:`str`
age: :class:`int`
height: :class:`float`
This should be either in m or ft.
weight: :class:`float`
hair_color: :class:`str`
eye_color: :class:`str`
height_measurement_system: :class:`select`
weight_measurement_system: :class:`select`
"""
name = models.CharField(_("Name"), max_length=50)
age = models.IntegerField(_("Age"))
height = models.DecimalField(_("Height"), max_digits=5, decimal_places=2, null=True, blank=True)
weight = models.DecimalField(_("Weight"), max_digits=5, decimal_places=2, null=True, blank=True)
hair_color = models.CharField(_("Hair Color"), max_length=30, null=True, blank=True)
eye_color = models.CharField(_("Eye Color"), max_length=30, null=True, blank=True)
METRIC = 0
US = 1
MEASUREMENT_SYSTEM = (
(METRIC, _('Metric')),
(US, _('US Standard')),
)
height_measurement_system = models.PositiveSmallIntegerField(_("Height Measurement System"),
choices=MEASUREMENT_SYSTEM, default=0)
WEIGHT_MEASUREMENT_SYSTEM = models.PositiveSmallIntegerField(_("Weight Measurement System"),
choices=MEASUREMENT_SYSTEM, default=0)
class Meta:
verbose_name = _("Character Info")
verbose_name_plural = _("Character Infos")
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("CharacterInfo_detail", kwargs={"pk": self.pk})
class SheetDetail(models.Model):
"""
In this model all the attribute skills and rolls are described.
Parameters
----------
name: :class:`str`
start_value: :class:`int`
Ex: the start value of str is 10 in pathfinder, but 0
rollable: :class:`bool`
dice_class: :class:`int`
dice_number: :class:`int`
inherited_bonus: :class:`SheetDetail`
In this case, we'll take the bonus of another detail (example: perception uses wisdom)
misc_bonus: :class:`int`
extra_bonus_1: :class:`int`
extra_bonus_2: :class:`int`
sheet: :class:`Sheet`
"""
name = models.CharField(_("Name"), max_length=50)
start_value = models.SmallIntegerField(_("Start Value"), default=0)
rollable = models.BooleanField(_("Rollable"), default=True)
D3 = 0
D4 = 1
D6 = 2
D8 = 3
D10 = 4
D12 = 5
D16 = 6
D20 = 7
D100 = 8
DICES = (
(D3, _("D3")),
(D4, _("D4")),
(D6, _("D6")),
(D8, _("D8")),
(D10, _("D10")),
(D12, _("D12")),
(D16, _("D16")),
(D20, _("D20")),
(D100, _("D100")),
)
dice_class = models.PositiveSmallIntegerField(_("Dice Class"), default=20)
dice_number = models.PositiveSmallIntegerField(_("Dice Number"), default=1)
# Inherited bonus
misc_bonus = models.SmallIntegerField(_("Miscelaneous Bonus"), default=0)
extra_bonus_1 = models.SmallIntegerField(_("Extra Bonus 1"), default=0)
extra_bonus_2 = models.SmallIntegerField(_("Extra Bonus 2"), default=0)
sheet = models.ForeignKey("sheet.SheetHeader", verbose_name=_("Sheet Header"),
on_delete=models.CASCADE, related_name="sheet_details")
@property
def get_total_bonus(self):
return self.misc_bonus + self.extra_bonus_1 + self.extra_bonus_2
@property
def roll(self):
if self.rollable:
random.seed()
roll = self.start_value
for i in range(self.dice_number):
roll += random.randrange(1, self.dice_class)
roll = roll + self.get_total_bonus
return roll
else:
value = self.start_value + self.get_total_bonus
return value
class Meta:
verbose_name = _("Sheet Detail")
verbose_name_plural = _("Sheet Details")
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("sheetdetail_detail", kwargs={"pk": self.pk})
|
import pickle
from typing import Tuple, Optional, Any, Dict
import torch
import torch.nn as nn
from torch.autograd.function import once_differentiable
from hivemind.proto import runtime_pb2, runtime_pb2_grpc as runtime_grpc
from hivemind.utils import nested_flatten, nested_pack, nested_compare, Endpoint
from hivemind.utils.grpc import serialize_torch_tensor, deserialize_torch_tensor, ChannelCache
DUMMY = torch.empty(0, requires_grad=True) # dummy tensor that triggers autograd in RemoteExpert
def _get_expert_stub(endpoint: Endpoint, *extra_options: Tuple[str, Any]):
""" Create a gRPC stub to access remote expert or use previously created stub from a process-wide cache """
channel_options = (('grpc.max_send_message_length', -1), ('grpc.max_receive_message_length', -1)) + extra_options
return ChannelCache.get_stub(endpoint, runtime_grpc.ConnectionHandlerStub, aio=False, options=channel_options)
class RemoteExpert(nn.Module):
"""
A simple module that runs forward/backward of an expert hosted on a remote machine.
Works seamlessly with pytorch autograd. (this is essentially a simple RPC function)
Warning: RemoteExpert currently assumes that you provide it with correct input shapes.
Sending wrong input shapes can cause RemoteExpert to freeze indefinitely due to error in runtime.
:param uid: unique expert identifier
:param endpoint: network endpoint of a server that services that expert, e.g. "201.123.321.99:1337" or "[::]:8080"
"""
def __init__(self, uid, endpoint: Endpoint):
super().__init__()
self.uid, self.endpoint = uid, endpoint
self._info = None
@property
def stub(self):
return _get_expert_stub(self.endpoint)
def forward(self, *args, **kwargs):
""" Call RemoteExpert for the specified inputs and return its output(s). Compatible with pytorch.autograd. """
assert len(kwargs) == len(self.info['keyword_names']), f"Keyword args should be {self.info['keyword_names']}"
kwargs = {key: kwargs[key] for key in self.info['keyword_names']}
# Note: we put keyword arguments in the same order as on a server to prevent f(a=1, b=2) != f(b=2, a=1) errors
forward_inputs = (args, kwargs)
if not nested_compare(forward_inputs, self.info['forward_schema']):
raise TypeError(f"Inputs do not match expert input schema. Did you pass the right number of parameters?")
flat_outputs = _RemoteModuleCall.apply(DUMMY, self.uid, self.stub, self.info, *nested_flatten(forward_inputs))
# Note: we send DUMMY to prevent torch from excluding expert from backward if no other inputs require grad
return nested_pack(flat_outputs, structure=self.info['outputs_schema'])
@property
def info(self):
if self._info is None:
outputs = self.stub.info(runtime_pb2.ExpertUID(uid=self.uid))
self._info = pickle.loads(outputs.serialized_info)
return self._info
def extra_repr(self):
return f"uid={self.uid}, endpoint={self.endpoint}"
class _RemoteModuleCall(torch.autograd.Function):
""" Internal autograd-friendly call of a remote module. For applications, use RemoteExpert instead. """
@staticmethod
def forward(ctx, dummy: torch.Tensor, uid: str, stub: runtime_grpc.ConnectionHandlerStub,
info: Dict[str, Any], *inputs: torch.Tensor) -> Tuple[torch.Tensor, ...]:
# Note: *inputs are flattened input tensors that follow the expert's info['input_schema']
# detach to avoid pickling the computation graph
inputs = tuple(tensor.cpu().detach() for tensor in inputs)
ctx.uid, ctx.stub, ctx.info = uid, stub, info
ctx.save_for_backward(*inputs)
serialized_tensors = [serialize_torch_tensor(inp, proto.compression)
for inp, proto in zip(inputs, nested_flatten(info["forward_schema"]))]
outputs = stub.forward(
runtime_pb2.ExpertRequest(uid=ctx.uid, tensors=serialized_tensors))
deserialized_outputs = [deserialize_torch_tensor(tensor) for tensor in outputs.tensors]
return tuple(deserialized_outputs)
@staticmethod
@once_differentiable
def backward(ctx, *grad_outputs) -> Tuple[Optional[torch.Tensor], ...]:
grad_outputs_cpu = tuple(tensor.cpu() for tensor in grad_outputs)
inputs_and_grad_outputs = tuple(nested_flatten((ctx.saved_tensors, grad_outputs_cpu)))
backward_schema = tuple(nested_flatten((ctx.info["forward_schema"], ctx.info["outputs_schema"])))
serialized_tensors = [serialize_torch_tensor(tensor, proto.compression)
for tensor, proto in zip(inputs_and_grad_outputs, backward_schema)]
grad_inputs = ctx.stub.backward(runtime_pb2.ExpertRequest(uid=ctx.uid, tensors=serialized_tensors))
deserialized_grad_inputs = [deserialize_torch_tensor(tensor) for tensor in grad_inputs.tensors]
return (DUMMY, None, None, None, *deserialized_grad_inputs)
|
<reponame>james-guevara/synthdnm
import pandas as pd
from sklearn.externals import joblib
from Backend import get_path
import os,sys
import numpy as np
def classify_dataframe(df, clf, ofh,pyDNM_header=False, mode="a",keep_fp=True):
pd.options.mode.chained_assignment = None
df = df.replace([np.inf, -np.inf], np.nan)
df = df.dropna(axis=0,subset=df.columns[12:36])
if df.empty:
# print("Empty dataframe.")
return 0
X = df[df.columns[12:36]].values
df["pred"] = clf.predict(X)
df["prob"] = clf.predict_proba(X)[:,1]
if keep_fp == False:
df = df.loc[df["pred"] == 1]
with open(ofh, mode) as f:
df.to_csv(f, sep="\t",header=pyDNM_header, index=False)
def get_sex(fam_fh):
fam = open(fam_fh, "r")
fam_dict = {}
for line in fam:
linesplit = line.rstrip().split("\t")
iid = linesplit[1]
sex = linesplit[4]
fam_dict[iid] = sex
df = pd.Series(fam_dict).to_frame("sex")
df["iid"] = df.index
df.reset_index(inplace=True)
df.drop(columns=["index"],inplace=True)
return df
def classify(ofh_tmp=None,ofh=None,keep_fp=None,pseud=None,vcf=None,make_bed=True,make_vcf=True,fam_fh=None):
ofh_new = ofh
# fam
#fam_fh = "/home/a1lian/reach_ssc1-4.fam"
df_fam = get_sex(fam_fh)
pseud_chrX = pseud["chrX"]
pseud_chrX_interval_one = pseud_chrX[0]
pseud_chrX_interval_two = pseud_chrX[1]
pseud_chrY = pseud["chrY"]
pseud_chrY_interval_one = pseud_chrY[0]
pseud_chrY_interval_two = pseud_chrY[1]
# Get classifiers
# OLD CLFS:
snv_clf = get_path()+'/pydnm.snv.clf.joblib'
indels_clf = get_path()+'/pydnm.indels.clf.joblib'
# NEW CLFS:
# snv_clf = get_path()+'/ssc1-1-snp-clf-2020-08-09.joblib'
# indels_clf = get_path()+'/ssc1-1-indel-clf-2020-08-09.joblib'
snv_chrX_clf=get_path()+'/chrX_training_snps.joblib'
snv_chrY_clf= get_path()+'/chrY_training_snps.joblib'
indels_chrX_chrY_clf= get_path()+'/chrX_chrY_training_indels.joblib'
if not os.path.isfile(snv_clf):
sys.stderr.write('FATAL ERROR: {} CLASSIFIER NOT FOUND\n'.format(snv_clf))
sys.exit(1)
clf = joblib.load(snv_clf)
clf_indels = joblib.load(indels_clf)
clf_chrX_snps = joblib.load(snv_chrX_clf)
clf_chrY_snps = joblib.load(snv_chrY_clf)
clf_chrX_chrY_indels = joblib.load(indels_chrX_chrY_clf)
# Make dataframe from input pydnm file
df = pd.read_csv(ofh_tmp,sep="\t",dtype={"chrom": str})
# Filter original dataframe
# df = df.loc[(df["offspring_gt"] != "0/0")]
df['iid']=df['iid'].astype(str)
df_fam['iid']=df_fam['iid'].astype(str)
df = pd.merge(df, df_fam, on="iid")
df=df[~df["chrom"].str.contains("GL*")]
df["chrom"]=df["chrom"].astype(str)
df["chrom"] = df["chrom"].apply(lambda s: "chr" + s if not s.startswith("chr") else s)
df_autosomal_SNV = df.loc[(df["chrom"] != "chrY") & (df["chrom"] != "chrX") &(df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0') & (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1)]
df_autosomal_indel = df.loc[(df["chrom"] != "chrY") & (df["chrom"] != "chrX") &(df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1))]
df_female_X_SNV = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "2") & (df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0') & (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1)]
df_female_X_indel = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "2") &(df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0') & ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1))]
df_male_nonPAR_X_SNV = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "1") & (df["offspring_gt"]=='1/1') & (df["mother_gt"]=='0/0') & (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1) & ~(df["pos"].between(pseud_chrX_interval_one[0],pseud_chrX_interval_one[1]) | df["pos"].between(pseud_chrX_interval_two[0], pseud_chrX_interval_two[1]))]
df_male_nonPAR_Y_SNV = df.loc[(df["chrom"] == "chrY") & (df["sex"] == "1") & (df["offspring_gt"]=='1/1')&(df["father_gt"]=='0/0') & (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1) & ~(df["pos"].between(pseud_chrY_interval_one[0],pseud_chrY_interval_one[1]) | df["pos"].between(pseud_chrY_interval_two[0], pseud_chrY_interval_two[1]))]
df_male_nonPAR_X_indel = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "1") & (df["offspring_gt"]=='1/1') & (df["mother_gt"]=='0/0') & ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1)) & ~(df["pos"].between(pseud_chrX_interval_one[0],pseud_chrX_interval_one[1]) | df["pos"].between(pseud_chrX_interval_two[0], pseud_chrX_interval_two[1]))]
df_male_nonPAR_Y_indel = df.loc[(df["chrom"] == "chrY") & (df["sex"] == "1") & (df["offspring_gt"]=='1/1') &(df["father_gt"]=='0/0')& ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1)) & ~(df["pos"].between(pseud_chrY_interval_one[0],pseud_chrY_interval_one[1]) | df["pos"].between(pseud_chrY_interval_two[0], pseud_chrY_interval_two[1]))]
df_male_PAR_X_SNV = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "1") & (df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1) & (df["pos"].between(pseud_chrX_interval_one[0],pseud_chrX_interval_one[1]) | df["pos"].between(pseud_chrX_interval_two[0], pseud_chrX_interval_two[1]))]
df_male_PAR_Y_SNV = df.loc[(df["chrom"] == "chrY") & (df["sex"] == "1") &(df["offspring_gt"]=='0/1') &(df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1) & (df["pos"].between(pseud_chrY_interval_one[0],pseud_chrY_interval_one[1]) | df["pos"].between(pseud_chrY_interval_two[0], pseud_chrY_interval_two[1]))]
df_male_PAR_X_indel = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "1") & (df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1)) & (df["pos"].between(pseud_chrX_interval_one[0],pseud_chrX_interval_one[1]) | df["pos"].between(pseud_chrX_interval_two[0], pseud_chrX_interval_two[1]))]
df_male_PAR_Y_indel = df.loc[(df["chrom"] == "chrY") & (df["sex"] == "1") &(df["offspring_gt"]=='0/1') &(df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1)) & (df["pos"].between(pseud_chrY_interval_one[0],pseud_chrY_interval_one[1]) | df["pos"].between(pseud_chrY_interval_two[0], pseud_chrY_interval_two[1]))]
classify_dataframe(df_autosomal_SNV,clf,ofh_new,False,"w")
classify_dataframe(df_autosomal_indel,clf_indels,ofh_new)
classify_dataframe(df_female_X_SNV,clf,ofh_new)
classify_dataframe(df_female_X_indel,clf_indels,ofh_new)
classify_dataframe(df_male_nonPAR_X_SNV,clf_chrX_snps,ofh_new)
classify_dataframe(df_male_nonPAR_Y_SNV,clf_chrY_snps,ofh_new)
classify_dataframe(df_male_nonPAR_X_indel,clf_chrX_chrY_indels,ofh_new)
classify_dataframe(df_male_nonPAR_Y_indel,clf_chrX_chrY_indels,ofh_new)
classify_dataframe(df_male_PAR_X_SNV,clf,ofh_new)
classify_dataframe(df_male_PAR_Y_SNV,clf,ofh_new)
classify_dataframe(df_male_PAR_X_indel,clf_indels,ofh_new)
classify_dataframe(df_male_PAR_Y_indel,clf_indels,ofh_new)
df = pd.read_csv(ofh_new,sep="\t",header=None)
df.columns=['chrom','pos','id','ref','alt','iid','offspring_gt','father_gt','mother_gt','nalt','filter','qual','parent_ar_max','parent_ar_min','offspring_ar','parent_dp_max','parent_dp_min','offspring_dp','parent_dnm_pl_max','parent_dnm_pl_min','parent_inh_pl_max','parent_inh_pl_min','offspring_dnm_pl','offspring_inh_pl','parent_gq_max','parent_gq_min','offspring_gq','VQSLOD','ClippingRankSum','BaseQRankSum','FS','SOR','MQ','MQRankSum','QD','ReadPosRankSum','sex','pred','prob']
with open(ofh_new,'w') as f:
df.to_csv(f, sep="\t", index=False)
if make_bed:
ofb = make_output_bed(ofh_new)
def make_output_bed(ofh_new):
ofb =ofh_new+".bed"
fout = open(ofb,"w")
f = open(ofh_new,"r")
f.readline()
dnm_bed = []
for line in f:
linesplit = line.rstrip().split("\t")
chrom = linesplit[0]
pos = linesplit[1]
ref = linesplit[3]
alt = linesplit[4]
###SNPs
if len(ref) == 1 and len(alt) == 1:
iid = linesplit[5]
pred = linesplit[-2]
prob = linesplit[-1]
pos_0 = str(int(pos)-1)
pos_1 = pos
ID_col = "{}:{}:{}:{}:{}:{}:{}".format(chrom,pos,ref,alt,iid,pred,prob)
newline = "{}\t{}\t{}\t{}\n".format(chrom,pos_0,pos_1,ID_col)
fout.write(newline)
dnm_bed.append(newline)
###INDELs
else:
iid = linesplit[5]
pred = linesplit[-2]
prob = linesplit[-1]
pos_0 = str(int(pos)-1)
pos_1= str(int(pos) + len(ref) - 1)
ID_col = "{}:{}:{}:{}:{}:{}:{}".format(chrom,pos,ref,alt,iid,pred,prob)
newline = "{}\t{}\t{}\t{}\n".format(chrom,pos_0,pos_1,ID_col)
fout.write(newline)
dnm_bed.append(newline)
return ofb
|
#! /usr/bin/env python
"""Tests of finite differencing module."""
from __future__ import division
import sys
import unittest
from indiff import FiniteDiff, FwdDiff, BwdDiff, CenDiff
import numpy as np
import xarray as xr
from . import InfiniteDiffTestCase
class DiffSharedTests(object):
def test_slice_arr_dim(self):
slice_ = slice(1, -2)
arr = self.ones
actual = self._DIFF_CLS(arr, self.dim)._slice_arr_dim(slice_, arr)
self.assertDatasetIdentical(actual, arr[{self.dim: slice_}])
def test_reverse_dim(self):
values = np.arange(self.array_len)
arr = xr.DataArray(values, dims=[self.dim], coords={self.dim: values})
actual = self._DIFF_CLS(arr, self.dim)._reverse_dim(arr)
desired = xr.DataArray(values[::-1], dims=[self.dim],
coords={self.dim: values[::-1]})
self.assertDatasetIdentical(actual, desired)
class FiniteDiffTestCase(InfiniteDiffTestCase):
_DIFF_CLS = FiniteDiff
def setUp(self):
super(FiniteDiffTestCase, self).setUp()
self.spacing = 1
self.arr = self.ones
self.diff_obj = self._DIFF_CLS(self.arr, self.dim,
spacing=self.spacing)
self.fd_ones_trunc = [self._DIFF_CLS(self.ones_trunc[n], self.dim)
for n in range(self.array_len)]
class TestFiniteDiff(DiffSharedTests, FiniteDiffTestCase):
pass
class FwdDiffTestCase(FiniteDiffTestCase):
_DIFF_CLS = FwdDiff
_IS_BWD = False
def setUp(self):
super(FwdDiffTestCase, self).setUp()
class TestFwdDiff(FwdDiffTestCase):
def test_diff_output_coords(self):
for n in range(self.array_len - 1):
actual = self._DIFF_CLS(self.ones, self.dim,
spacing=n+1).diff()
trunc = slice(n+1, None) if self._IS_BWD else slice(0, -(n+1))
desired = self.ones[{self.dim: trunc}]
self.assertCoordsIdentical(actual, desired)
def test_diff_zero_slope_varied_arr_len(self):
for n, ones in enumerate(self.ones_trunc[:-2]):
actual = self._DIFF_CLS(ones, self.dim).diff()
desired = self.zeros_trunc[n+1]
self.assertDatasetIdentical(actual, desired)
def test_diff_zero_slope_varied_spacing(self):
for n, ones in enumerate(self.ones_trunc[:-1]):
actual = self._DIFF_CLS(self.ones, self.dim, spacing=n+1).diff()
desired = self.zeros_trunc[n]
self.assertDatasetIdentical(actual, desired)
def test_diff_const_slope_varied_arr_len(self):
for n, arange in enumerate(self.arange_trunc[:-2]):
actual = self._DIFF_CLS(arange, self.dim).diff()
desired = self.ones_trunc[n+1]
self.assertDatasetIdentical(actual, desired)
def test_diff_const_slope_varied_spacing(self):
for n, ones in enumerate(self.ones_trunc[:-1]):
actual = self._DIFF_CLS(self.arange, self.dim, spacing=n+1).diff()
desired = (n+1)*ones
self.assertDatasetIdentical(actual, desired)
def _compar_to_diff(self, arr):
label = 'upper' if self._IS_BWD else 'lower'
actual = self._DIFF_CLS(arr, self.dim).diff()
desired = arr.diff(self.dim, n=1, label=label)
self.assertDatasetIdentical(actual, desired)
def test_diff_misc_slopes(self):
for arr in [self.ones, self.zeros, self.arange, self.random]:
self._compar_to_diff(arr)
class BwdDiffTestCase(FwdDiffTestCase):
_DIFF_CLS = BwdDiff
_IS_BWD = True
def setUp(self):
super(BwdDiffTestCase, self).setUp()
self.zeros_trunc = [self.zeros.isel(**{self.dim: slice(n+1, None)})
for n in range(self.array_len)]
self.ones_trunc = [self.ones.isel(**{self.dim: slice(n+1, None)})
for n in range(self.array_len)]
self.arange_trunc = [self.arange.isel(**{self.dim: slice(n+1, None)})
for n in range(self.array_len)]
self.random_trunc = [self.random.isel(**{self.dim: slice(n+1, None)})
for n in range(self.array_len)]
class TestBwdDiff(TestFwdDiff, BwdDiffTestCase):
pass
class CenDiffTestCase(FiniteDiffTestCase):
_DIFF_CLS = CenDiff
def setUp(self):
super(CenDiffTestCase, self).setUp()
self.zeros_trunc = [self.zeros.isel(**{self.dim: slice(n+1, -(n+1))})
for n in range(self.array_len // 2 - 1)]
self.ones_trunc = [self.ones.isel(**{self.dim: slice(n+1, -(n+1))})
for n in range(self.array_len // 2 - 1)]
self.arange_trunc = [self.arange.isel(**{self.dim: slice(n+1, -(n+1))})
for n in range(self.array_len // 2 - 1)]
self.random_trunc = [self.random.isel(**{self.dim: slice(n+1, -(n+1))})
for n in range(self.array_len // 2 - 1)]
class TestCenDiff(DiffSharedTests, CenDiffTestCase):
def test_diff_output_coords(self):
for n in range(self.array_len // 2 - 1):
actual = self._DIFF_CLS(self.ones, self.dim,
spacing=n+1).diff()
trunc = slice(n+1, -(n+1))
desired = self.ones[{self.dim: trunc}]
self.assertCoordsIdentical(actual, desired)
def test_diff_zero_slope_varied_arr_len(self):
for n, ones in enumerate(self.ones_trunc[:-2]):
actual = self._DIFF_CLS(ones, self.dim).diff()
desired = self.zeros_trunc[n+1]
self.assertDatasetIdentical(actual, desired)
def test_diff_zero_slope_varied_spacing(self):
for n, ones in enumerate(self.ones_trunc[:-1]):
actual = self._DIFF_CLS(self.ones, self.dim, spacing=n+1).diff()
desired = self.zeros_trunc[n]
self.assertDatasetIdentical(actual, desired)
def test_diff_const_slope_varied_arr_len(self):
for n, arange in enumerate(self.arange_trunc[:-2]):
actual = self._DIFF_CLS(arange, self.dim).diff()
desired = 2*self.ones_trunc[n+1]
self.assertDatasetIdentical(actual, desired)
def test_diff_const_slope_varied_spacing(self):
for n, ones in enumerate(self.ones_trunc[:-1]):
actual = self._DIFF_CLS(self.arange, self.dim, spacing=n+1).diff()
desired = 2*(n+1)*ones
self.assertDatasetIdentical(actual, desired)
def test_diff_fill_edge(self):
fills = [False, 'left', 'right', 'both', True]
truncs = [slice(1, -1), slice(0, -1), slice(1, None),
slice(None, None), slice(None, None)]
for fill, trunc in zip(fills, truncs):
actual = self._DIFF_CLS(self.arange, self.dim, spacing=1,
fill_edge=fill).diff()
desired = self.arange[{self.dim: trunc}]
self.assertCoordsIdentical(actual, desired)
def _compar_to_diff(self, arr):
actual = self._DIFF_CLS(arr, self.dim).diff()
desired_values = (arr.isel(**{self.dim: slice(2, None)}).values -
arr.isel(**{self.dim: slice(None, -2)})).values
desired = xr.DataArray(desired_values, dims=actual.dims,
coords=actual.coords)
self.assertDatasetIdentical(actual, desired)
def test_diff(self):
for arr in [self.ones, self.zeros, self.arange, self.random]:
self._compar_to_diff(arr)
if __name__ == '__main__':
sys.exit(unittest.main())
# TODO: more of private utility methods
# TODO: OneSidedDiff class
# TODO: non-default coord values (shouldn't affect diffs on arrays)
|
<reponame>ubirch/elevate-research
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
import pytz
from datetime import datetime
import sensor as sensor
USED_TIMEZONE = pytz.timezone('Europe/Berlin')#pytz.utc
def convert_timestamps(timestamps_in):
return [datetime.fromtimestamp(ts,USED_TIMEZONE) for ts in timestamps_in]
FILE = "rawData/log_raw_00.dat.8"
timestamp_s_raw = []
timestamp_us_raw = []
accel_raw = []
timestamp_us = []
timecount = []
timedelta = []
# read the input file and store the values into buffers
with open (FILE, 'r') as f:
count = 0
buffer_pre = 0
buffer_post = 0
while True:
count += 1
# Get next line from file
line = f.readline().rstrip('\n').rstrip(',')
# if line is empty
# end of file is reached
if not line:
break
line_list = line.split(",",maxsplit=34)
if len(line_list) >2:
timestamp_s_raw.append(int(line_list[0]))
timestamp_us_raw.append(int(line_list[1]))
buffer_post = timestamp_us_raw[-1] # take the last value
for i in range(2,len(line_list)):
accel_raw.append(float(line_list[i]))
timestamp_us.append(int((buffer_post -buffer_pre) / 32.0 * (i-2) + buffer_pre))
timecount.append(count)
timedelta.append(buffer_post - buffer_pre)
buffer_pre = buffer_post # stores the value for the next round
# print("Line{}: {}".format(count, line.strip()))
# print("timestamp_s_raw ({}) = {}".format(len(timestamp_s_raw), timestamp_s_raw))
# print("timestamp_us_raw ({}) = {}".format(len(timestamp_us_raw), timestamp_us_raw))
# print("timestamp_us ({}) = {}".format(len(timestamp_us), timestamp_us))
# print("accel_raw ({}) = {}".format(len(accel_raw),accel_raw))
# print(timestamp)
# convert to numpy
_ts = np.array(timestamp_us[:len(timestamp_us)])
_td = np.array(timedelta[:len(timedelta)])
_ar = np.array(accel_raw[:len(accel_raw)])
# time = np.array(timecount[:len(timecount)])
time = convert_timestamps(_ts)
# plot results
# plt.plot(time, _ts, '-', label='X')
plt.subplot(211)
plt.plot(time, _ar, '-', label='delta')
plt.xlabel('Time [ms]')
plt.ylabel('Acceleration [G]')
plt.grid()
plt.legend()
# plt.show()
# now do the filtering
x1_points = accel_raw
#######################################
# get the sensor filters and variables
sensor = sensor.MovementSensor()
accel_smooth = []
accel_filtered = []
accel_filtered_smooth = []
speed = []
speed_smooth = []
speed_filtered = []
speed_filtered_smooth = []
for j in range(3):
accel_smooth.append([])
accel_filtered.append([])
accel_filtered_smooth.append([])
speed.append([])
speed_smooth.append([])
speed_filtered.append([])
speed_filtered_smooth.append([])
#########################################
# forward the data to the filters
for x in range(0, len(x1_points), 32):
sensor.write_sensor_values(x1_points[x+0:x+32], x1_points[x+0:x+32], x1_points[x+0:x+32])
sensor.calc_speed()
for j in range(3):
for i in range(32):
accel_smooth[j].append(sensor.accel_smooth[i][j])
accel_filtered[j].append(sensor.accel_filtered[i][j])
accel_filtered_smooth[j].append(sensor.accel_filtered_smooth[i][j])
speed[j].append(sensor.speed[i][j])
speed_smooth[j].append(sensor.speed_smooth[i][j])
speed_filtered[j].append(sensor.speed_filtered[i][j])
speed_filtered_smooth[j].append(sensor.speed_filtered_smooth[i][j])
# convert to numpy
_as = np.array(accel_smooth[:len(x1_points)])
_af = np.array(accel_filtered[:len(x1_points)])
_afs = np.array(accel_filtered_smooth[:len(x1_points)])
_s = np.array(speed[:len(x1_points)])
_ss = np.array(speed_smooth[:len(x1_points)])
_sf = np.array(speed_filtered[:len(x1_points)])
_sfs = np.array(speed_filtered_smooth[:len(x1_points)])
# convert to numpy arrays
# time = convert_timestamps(time)
x = np.array(x1_points)
# plot results
plt.subplot(212)
plt.plot(time, x, '-', label='X')
plt.plot(time, _as[0], '-', label='AS')
# plt.plot(time, _af[0], '-', label='AF')
plt.plot(time, _afs[0], '-', label='AFS')
plt.plot(time, _s[0], '-', label='S')
plt.plot(time, _ss[0], '-', label='SS')
plt.plot(time, _sf[0], '-', label='SF')
plt.plot(time, _sfs[0], '-', label='SFS')
plt.xlabel('Time [s]')
plt.ylabel('Acceleration [mG]')
plt.grid()
plt.legend()
plt.show()
|
<reponame>ArenaNetworks/dto-digitalmarketplace-api
from datetime import date, timedelta
import pytest
import pendulum
from app.api.services import suppliers
from app.models import Supplier, User, db, utcnow
from tests.app.helpers import BaseApplicationTest
class TestSuppliersService(BaseApplicationTest):
def setup(self):
super(TestSuppliersService, self).setup()
@pytest.fixture()
def supplier(self, app, request):
params = request.param if hasattr(request, 'param') else {}
expiry_date = date.today() + timedelta(days=10)
expiry = '{}-{}-{}'.format(expiry_date.year, expiry_date.month, expiry_date.day)
labourHire = (
params['labourHire'] if 'labourHire' in params else
{
'vic': {
'expiry': expiry,
'licenceNumber': 'V123456'
},
'qld': {
'expiry': expiry,
'licenceNumber': 'Q123456'
},
'sa': {
'expiry': expiry,
'licenceNumber': 'S123456'
}
}
)
with app.app_context():
db.session.add(Supplier(id=1, code=1, name='Seller 1', data={
'contact_email': '<EMAIL>',
'email': '<EMAIL>',
'documents': {
'indemnity': {
'expiry': expiry
},
'liability': {
'expiry': expiry
},
'workers': {
'expiry': expiry
}
},
'labourHire': labourHire
}))
yield db.session.query(Supplier).first()
@pytest.fixture()
def users(self, app):
with app.app_context():
db.session.add(
User(
id=1,
name='User 1',
password='<PASSWORD>',
email_address='<EMAIL>',
active=True,
role='supplier',
supplier_code=1,
password_changed_at=utcnow()
)
)
db.session.add(
User(
id=2,
name='<NAME>',
password='<PASSWORD>',
email_address='<EMAIL>',
active=True,
role='supplier',
supplier_code=1,
password_changed_at=utcnow()
)
)
db.session.add(
User(
id=3,
name='<NAME>',
password='<PASSWORD>',
email_address='<EMAIL>',
active=False,
role='supplier',
supplier_code=1,
password_changed_at=utcnow()
)
)
yield db.session.query(User).all()
def test_get_expired_documents_returns_empty_array_when_no_expired_documents(self, supplier, users):
suppliers_with_expired_documents = suppliers.get_suppliers_with_expiring_documents(days=2)
assert len(suppliers_with_expired_documents) == 0
def test_get_expired_documents_returns_indemnity_liability_and_workers(self, supplier, users):
expiry_date = date.today() + timedelta(days=10)
expiry = '{}-{}-{}'.format(expiry_date.year, expiry_date.month, expiry_date.day)
suppliers_with_expired_documents = suppliers.get_suppliers_with_expiring_documents(days=10)
assert {
'expiry': expiry,
'type': 'indemnity'
} in suppliers_with_expired_documents[0]['documents']
assert {
'expiry': expiry,
'type': 'liability'
} in suppliers_with_expired_documents[0]['documents']
assert {
'expiry': expiry,
'type': 'workers'
} in suppliers_with_expired_documents[0]['documents']
def test_get_expired_documents_returns_all_supplier_email_addresses(self, supplier, users):
expiry_date = date.today() + timedelta(days=10)
expiry = '{}-{}-{}'.format(expiry_date.year, expiry_date.month, expiry_date.day)
suppliers_with_expired_documents = suppliers.get_suppliers_with_expiring_documents(days=10)
email_addresses = suppliers_with_expired_documents[0]['email_addresses']
assert len(email_addresses) == 4
assert '<EMAIL>' in email_addresses
assert '<EMAIL>' in email_addresses
assert '<EMAIL>' in email_addresses
assert '<EMAIL>' in email_addresses
def test_get_expired_documents_removes_duplicate_email_addresses(self, supplier, users):
expiry_date = date.today() + timedelta(days=10)
expiry = '{}-{}-{}'.format(expiry_date.year, expiry_date.month, expiry_date.day)
supplier.data['contact_email'] = '<EMAIL>'
supplier.data['email'] = '<EMAIL>'
suppliers_with_expired_documents = suppliers.get_suppliers_with_expiring_documents(days=10)
email_addresses = suppliers_with_expired_documents[0]['email_addresses']
assert len(email_addresses) == 2
assert '<EMAIL>' in email_addresses
assert '<EMAIL>' in email_addresses
def test_get_expired_licences_returns_empty_array_when_no_expired_licences(self, supplier, users):
suppliers_with_expired_licences = suppliers.get_suppliers_with_expiring_labour_hire_licences(days=2)
assert len(suppliers_with_expired_licences) == 0
def test_get_expired_licences_returns_vic_and_qld(self, supplier, users):
expiry_date = date.today() + timedelta(days=10)
expiry = '{}-{}-{}'.format(expiry_date.year, expiry_date.month, expiry_date.day)
suppliers_with_expired_licences = suppliers.get_suppliers_with_expiring_labour_hire_licences(days=10)
assert len(suppliers_with_expired_licences[0]['labour_hire_licences']) == 2
@pytest.mark.parametrize(
'supplier', [
{
'labourHire': {
'vic': {
'expiry': pendulum.today(tz='Australia/Sydney').add(days=10).format('%Y-%m-%d'),
'licenceNumber': 'V123456'
}
}
}
], indirect=True
)
def test_get_expired_licences_returns_vic_only(self, supplier, users):
expiry = pendulum.today(tz='Australia/Sydney').add(days=10).format('%Y-%m-%d')
suppliers_with_expired_licences = suppliers.get_suppliers_with_expiring_labour_hire_licences(days=10)
assert suppliers_with_expired_licences[0]['labour_hire_licences'] == [
{
'expiry': expiry,
'state': 'vic',
'licenceNumber': 'V123456'
}
]
@pytest.mark.parametrize(
'supplier', [
{
'labourHire': {
'sa': {
'expiry': pendulum.today(tz='Australia/Sydney').add(days=10).format('%Y-%m-%d'),
'licenceNumber': 'S123456'
},
'vic': {
'expiry': pendulum.today(tz='Australia/Sydney').add(days=10).format('%Y-%m-%d'),
'licenceNumber': 'V123456'
}
}
}
], indirect=True
)
def test_ignore_sa_expired_licences_and_return_vic_only(self, supplier, users):
expiry = pendulum.today(tz='Australia/Sydney').add(days=10).format('%Y-%m-%d')
suppliers_with_expired_licences = suppliers.get_suppliers_with_expiring_labour_hire_licences(days=10)
assert suppliers_with_expired_licences[0]['labour_hire_licences'] == [
{
'expiry': expiry,
'state': 'vic',
'licenceNumber': 'V123456'
}
]
def test_get_expired_licences_returns_all_supplier_email_addresses(self, supplier, users):
expiry_date = date.today() + timedelta(days=10)
expiry = '{}-{}-{}'.format(expiry_date.year, expiry_date.month, expiry_date.day)
suppliers_with_expired_licences = suppliers.get_suppliers_with_expiring_labour_hire_licences(days=10)
email_addresses = suppliers_with_expired_licences[0]['email_addresses']
assert len(email_addresses) == 4
assert '<EMAIL>' in email_addresses
assert '<EMAIL>' in email_addresses
assert '<EMAIL>' in email_addresses
assert '<EMAIL>' in email_addresses
def test_get_expired_licences_removes_duplicate_email_addresses(self, supplier, users):
expiry_date = date.today() + timedelta(days=10)
expiry = '{}-{}-{}'.format(expiry_date.year, expiry_date.month, expiry_date.day)
supplier.data['contact_email'] = '<EMAIL>'
supplier.data['email'] = '<EMAIL>'
suppliers_with_expired_licences = suppliers.get_suppliers_with_expiring_labour_hire_licences(days=10)
email_addresses = suppliers_with_expired_licences[0]['email_addresses']
assert len(email_addresses) == 2
assert '<EMAIL>' in email_addresses
assert '<EMAIL>' in email_addresses
|
<filename>code/linear_bias.py
import numpy as np
import os
import pyccl as ccl
import h5py
import pandas as pd
from scipy.interpolate import CubicSpline
from scipy.interpolate import interp1d
from astropy.cosmology import LambdaCDM
def shear_extractor(zmin = 1.0, incomp = True, shape_noise = 0.3):
"""
extract the shear signal and its covariances
per tomographic bin
"""
zmax = zmin + 0.1
fname = "gtheta_zmin_{0:.1f}_zmax_{1:.1f}.hdf5".format(zmin, zmax)
if incomp == True:
fname = "incomplete_"+fname
shear_data = h5py.File(fname, "r")
theta = shear_data["theta"][:]
xi = -1.*shear_data["xi"][:]
# jacknife covariance without shot noise
jk_cov = shear_data["cov"][:]
# Npairs per angular bin
npairs = shear_data["npairs"][:]
#Poisson noise
shot_cov = np.diag(shape_noise**2./npairs)
#total covariance
total_cov = jk_cov + shot_cov
#total signal-to-noise ratio (snr)
total_snr = np.dot(xi.T, np.linalg.solve(total_cov, xi))**0.5
# jacknife snr
jk_snr = np.dot(xi.T, np.linalg.solve(jk_cov, xi))**0.5
# Poisson snr
shot_snr = np.dot(xi.T, np.linalg.solve(shot_cov, xi))**0.5
# masking the covariance and the data using the estimated scale cut
_, _, _, theta_min = load_nz(zmin, incomp)
mask = theta > theta_min
ind = np.where(mask)[0][0]
theta_lin = theta[ind:]
xi_lin = xi[ind:]
shot_cov_lin = shot_cov[ind:, ind:]
jk_cov_lin = jk_cov[ind:, ind:]
#debiasing the jk covariance matrix
fctr = (100 - 1.)/(100 - len(xi_lin) - 2.)
jk_cov_lin = fctr * jk_cov_lin
# every relevant info in a dictionary placeholder
shear_dict = {"theta": theta_lin,
"xi" : xi_lin,
"total_cov": jk_cov_lin + shot_cov_lin}
return shear_dict
def z_angular_mpc(zmean):
'''returns the minimum angular cut to be applied
in order to discard nonlinearities
'''
cosmo = LambdaCDM(H0 = 67, Om0 = 0.319, Ode0 = 0.681, Ob0 = 0.049)
angle = cosmo.arcsec_per_kpc_comoving(zmean).value * 12000./60
return angle
def load_nz(zmin, incomp):
nz_fname = "nz_zmin_{0:.1f}_zmax_{1:.1f}_incomp_{2:s}.csv".format(zmin, zmin+0.1, str(incomp))
nz_fname = os.path.join('data', nz_fname)
nz_df = pd.read_csv(nz_fname)
z, gal_nz, shear_nz = nz_df["z"], nz_df["gal_nz"], nz_df["shear_nz"]
zmean = np.sum(z * gal_nz)/np.sum(gal_nz)
theta_min = z_angular_mpc(zmean)
return z, gal_nz, shear_nz, theta_min
class cosmo_model:
def __init__(self, Omega_m = .319, Omega_b = .049, h = .67, sigma8 = .83, n_s = .96):
'''
initializing a default LCDM model with the parameters taken from the flagship database
'''
self.Omega_m = Omega_m
self.Omega_b = Omega_b
self.sigma8 = sigma8
self.n_s = n_s
def model(self):
return ccl.Cosmology(self.Omega_m, self.Omega_b, self.h, self.sigma8, self.n_s)
class linear_model(cosmo_model):
def __init__(self, zmin, lmin, lmax, nl, incomp, cosmo_dict):
self.zmin = zmin
self.ell = np.linspace(lmin, lmax, nl)
self.incomp = incomp
self.theta = shear_extractor(zmin = zmin,
incomp = incomp,
shape_noise = 0.3)["theta"]
self.z, self.gal_nz, self.shear_nz, self.theta_min = load_nz(zmin, incomp)
print("minimum possible angular scale is ", self.theta_min)
self.theta = self.theta[self.theta > self.theta_min]
self.Omega_m = cosmo_dict["Omega_m"]
self.Omega_b = cosmo_dict["Omega_b"]
self.h = cosmo_dict["h"]
self.sigma8 = cosmo_dict["sigma8"]
self.n_s = cosmo_dict["n_s"]
super().__init__(self.Omega_m, self.Omega_b, self.h, self.sigma8, self.n_s)
self.model = super().model()
def __call__(self, theta):
return self._sum_stat(theta)
def _sum_stat(self, theta):
lens = ccl.NumberCountsTracer(self.model,
dndz = (self.z, self.gal_nz),
has_rsd = True,
bias = (self.z, theta*np.ones(len(self.z))))
source = ccl.WeakLensingTracer(self.model,
dndz=(self.z, self.shear_nz),
has_shear=True,
ia_bias=None)
cl_gm = ccl.angular_cl(self.model, lens, source, self.ell)
xi = ccl.correlation(self.model, self.ell, cl_gm,
self.theta/60, corr_type = 'gl',
method = 'Bessel')
return xi
if __name__ == '__main__':
cosmo_dict = {"Omega_m" : 0.319, "Omega_b" : 0.04, "sigma8" : 0.83, "h" : 0.67, "n_s" : 0.96}
lm = linear_model(0.9, 10, 1000, 1000, False, cosmo_dict)
print("predicted GGL signal is = ", lm(1.0))
|
<gh_stars>0
import tempfile
import os
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from rest_framework import status
from recipe.serialisers import RecipeSerializer, RecipeDetailSerializer
from PIL import Image
RECIPE_URL = reverse('recipe:recipe-list')
def image_upload_url(recipe_id):
"""create upload url"""
return reverse('recipe:recipe-upload-image', args=[recipe_id])
def sample_tag(user, name='Main course'):
"""sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Carrot'):
""""sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def detail_url(recipe_id):
"""return details of recipe"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_recipe(user, **params):
"""create recipe"""
defaults = {
'title': 'paneer tikka',
'time_minute': 10,
'price': 5.00
}
defaults.update(**params)
return Recipe.objects.create(user=user, **defaults)
# public test
class PublicRecipeTests(TestCase):
"""public api test for recipe"""
def setUp(self):
self.client = APIClient()
def test_retrieve_recipe(self):
"""retrieve unauthorised recipee"""
res = self.client.get(RECIPE_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeTests(TestCase):
"""authorised test cases"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
email='<EMAIL>', password='<PASSWORD>')
self.client.force_authenticate(self.user)
def test_retrieve_recipe(self):
"""retrieve recipes"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPE_URL)
recipe = Recipe.objects.all().order_by('-id')
serialiser = RecipeSerializer(recipe, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serialiser.data)
def test_user_recipe(self):
"""retrieve user recipes"""
usernew = get_user_model().objects.create_user(
'<EMAIL>',
'somenewpasword'
)
sample_recipe(user=self.user)
sample_recipe(usernew)
res = self.client.get(RECIPE_URL)
recipe = Recipe.objects.filter(user=self.user)
serialiser = RecipeSerializer(recipe, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serialiser.data)
self.assertTrue(len(res.data), 1)
def test_recipe_detail(self):
"""see details of recipe"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serialiser = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, serialiser.data)
class ImageUploadTest(TestCase):
"""Image upload tests"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'testpass'
)
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self):
"""after test method """
self.recipe.image.delete()
def test_image_upload(self):
"""upload image test"""
url = image_upload_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
test_image = Image.new('RGB', (10, 10))
test_image.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.post(url, {'image': ntf}, format='multipart')
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
"""Test uploading an invalid image"""
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image': 'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_by_tags(self):
"""filter recipe with tags"""
recipe1 = sample_recipe(user=self.user, title='Paneer tikka')
recipe2 = sample_recipe(user=self.user, title='Paneer masala')
tag1 = sample_tag(user=self.user, name='vegan')
tag2 = sample_tag(user=self.user, name='vegetarian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user, title="chicken")
paylaod = {'tags': f'{tag1.id},{tag2.id}'}
res = self.client.get(RECIPE_URL, paylaod)
serialiser1 = RecipeSerializer(recipe1)
serialiser2 = RecipeSerializer(recipe2)
serialiser3 = RecipeSerializer(recipe3)
self.assertIn(serialiser1.data, res.data)
self.assertIn(serialiser2.data, res.data)
self.assertNotIn(serialiser3.data, res.data)
def test_filter_by_ingredients(self):
"""filter recipe with tags"""
recipe1 = sample_recipe(user=self.user, title='Paneer tikka')
recipe2 = sample_recipe(user=self.user, title='Paneer masala')
ingredient1 = sample_ingredient(user=self.user, name='soda')
ingredient2 = sample_ingredient(user=self.user, name='pickle')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(user=self.user, title="mashrooms")
paylaod = {'ingredients': f'{ingredient1.id},{ingredient2.id}'}
res = self.client.get(RECIPE_URL, paylaod)
serialiser1 = RecipeSerializer(recipe1)
serialiser2 = RecipeSerializer(recipe2)
serialiser3 = RecipeSerializer(recipe3)
self.assertIn(serialiser1.data, res.data)
self.assertIn(serialiser2.data, res.data)
self.assertNotIn(serialiser3.data, res.data)
|
import io
import extractseqs
import inserthdr
last_output_text = """\
# LAST version 833
#
# a=7 b=1 A=7 B=1 e=34 d=-1 x=33 y=9 z=33 D=1e+06 E=22.3617
# R=01 u=2 s=2 S=0 M=0 T=0 m=10 l=1 n=10 k=1 w=1000 t=0.910239 j=3 Q=0
# /work/04658/jklynch/ohana/last/HOT_genes
# Reference sequences=42682828 normal letters=22359609694
# lambda=1.09602 K=0.335388
#
# A C G T
# A 1 -1 -1 -1
# C -1 1 -1 -1
# G -1 -1 1 -1
# T -1 -1 -1 1
#
# Fields: query id, subject id, % identity, alignment length, mismatches, gap opens, q. start, q. end, s. start, s. end, evalue, bit score, query length, subject length
# batch 0
A\tHOT234_1_0200m_c10096_4\t100.00\t147\t0\t0\t1\t147\t400\t546\t8.6e-68\t234\t147\t546
A\tHOT234_1_0200m_rep_c55158_2\t100.00\t147\t0\t0\t1\t147\t400\t546\t8.6e-68\t234\t147\t546
A\tHOT238_1c_0200m_c3_1\t100.00\t147\t0\t0\t1\t147\t1\t147\t8.6e-68\t234\t147\t147
A\tHOT238_1c_0200m_rep_c260499_1\t100.00\t147\t0\t0\t1\t147\t1\t147\t8.6e-68\t234\t147\t147"""
fasta_text = """\
>A
GTGC
>B
ATGC
>C
ATGG"""
def test_get_last_hits():
last_output_file = io.StringIO(last_output_text)
last_hits = extractseqs.get_last_reference_hits(last_output_file=last_output_file, last_output_row_limit=None)
assert len(last_hits) == 2
assert len(last_hits['HOT234_1_0200m']) == 2
assert 'HOT234_1_0200m_rep_c55158_2' in last_hits['HOT234_1_0200m']
assert 'HOT234_1_0200m_c10096_4' in last_hits['HOT234_1_0200m']
assert len(last_hits['HOT238_1c_0200m']) == 2
assert 'HOT238_1c_0200m_c3_1' in last_hits['HOT238_1c_0200m']
assert 'HOT238_1c_0200m_rep_c260499_1' in last_hits['HOT238_1c_0200m']
def test_get_last_hits__limit():
last_output_file = io.StringIO(last_output_text)
last_hits = extractseqs.get_last_reference_hits(last_output_file=last_output_file, last_output_row_limit=3)
assert len(last_hits) == 2
assert len(last_hits['HOT234_1_0200m']) == 2
assert 'HOT234_1_0200m_rep_c55158_2' in last_hits['HOT234_1_0200m']
assert 'HOT234_1_0200m_c10096_4' in last_hits['HOT234_1_0200m']
assert len(last_hits['HOT238_1c_0200m']) == 1
assert 'HOT238_1c_0200m_c3_1' in last_hits['HOT238_1c_0200m']
def test_find_sequences():
fasta_file = io.StringIO(fasta_text)
search_results = list(extractseqs.find_sequences(['C', 'B', 'A'], fasta_file=fasta_file))
assert len(search_results) == 3
assert search_results[0].id == 'A'
assert search_results[1].id == 'B'
assert search_results[2].id == 'C'
def test_find_sequences__first():
fasta_file = io.StringIO(fasta_text)
search_results = list(extractseqs.find_sequences(['A'], fasta_file=fasta_file))
assert len(search_results) == 1
assert search_results[0].id == 'A'
def test_find_sequences__middle():
fasta_file = io.StringIO(fasta_text)
search_results = list(extractseqs.find_sequences(['B'], fasta_file=fasta_file))
assert len(search_results) == 1
assert search_results[0].id == 'B'
def test_find_sequences__last():
fasta_file = io.StringIO(fasta_text)
search_results = list(extractseqs.find_sequences(['C'], fasta_file=fasta_file))
assert len(search_results) == 1
assert search_results[0].id == 'C'
def test_parse_last_output_filename__contigs():
last_input_file_name, seq_type = extractseqs.parse_muscope_last_output_filename('/some/dir/test.fa-contigs.tab')
assert last_input_file_name == 'test.fa'
assert seq_type == 'contigs'
def test_parse_last_output_filename__genes():
last_input_file_name, seq_type = extractseqs.parse_muscope_last_output_filename('/some/dir/test.fa-genes.tab')
assert last_input_file_name == 'test.fa'
assert seq_type == 'genes'
def test_parse_last_output_filename__proteins():
last_input_file_name, seq_type = extractseqs.parse_muscope_last_output_filename('/some/dir/test.fa-proteins.tab')
assert last_input_file_name == 'test.fa'
assert seq_type == 'proteins'
def test_insrthdr():
input_file = io.StringIO(last_output_text)
output_file = io.StringIO()
inserthdr.inserthdr(input_file, output_file)
output_file_text = output_file.getvalue()
output_file_text_lines = output_file_text.splitlines()
print(output_file_text)
assert output_file_text_lines[16].startswith('query id')
|
<gh_stars>10-100
import os
import subprocess
import numpy as np
import skimage.io
from datasets.base import BaseDataset
from utils.boxes import generate_anchors
class KITTI(BaseDataset):
def __init__(self, phase, cfg):
super(KITTI, self).__init__(phase, cfg)
self.input_size = (384, 1248) # (height, width), both dividable by 16
self.class_names = ('Car', 'Pedestrian', 'Cyclist')
self.rgb_mean = np.array([93.877, 98.801, 95.923], dtype=np.float32).reshape(1, 1, 3)
self.rgb_std = np.array([78.782, 80.130, 81.200], dtype=np.float32).reshape(1, 1, 3)
self.num_classes = len(self.class_names)
self.class_ids_dict = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_names)}
self.data_dir = os.path.join(cfg.data_dir, 'kitti')
self.sample_ids, self.sample_set_path = self.get_sample_ids()
self.grid_size = tuple(x // 16 for x in self.input_size) # anchors grid
self.anchors_seed = np.array([[34, 30], [75, 45], [38, 90],
[127, 68], [80, 174], [196, 97],
[194, 178], [283, 156], [381, 185]], dtype=np.float32)
self.anchors = generate_anchors(self.grid_size, self.input_size, self.anchors_seed)
self.anchors_per_grid = self.anchors_seed.shape[0]
self.num_anchors = self.anchors.shape[0]
self.results_dir = os.path.join(cfg.save_dir, 'results')
def get_sample_ids(self):
sample_set_name = 'train.txt' if self.phase == 'train' \
else 'val.txt' if self.phase == 'val' \
else 'trainval.txt' if self.phase == 'trainval' \
else None
sample_ids_path = os.path.join(self.data_dir, 'image_sets', sample_set_name)
with open(sample_ids_path, 'r') as fp:
sample_ids = fp.readlines()
sample_ids = tuple(x.strip() for x in sample_ids)
return sample_ids, sample_ids_path
def load_image(self, index):
image_id = self.sample_ids[index]
image_path = os.path.join(self.data_dir, 'training/image_2', image_id + '.png')
image = skimage.io.imread(image_path).astype(np.float32)
return image, image_id
def load_annotations(self, index):
ann_id = self.sample_ids[index]
ann_path = os.path.join(self.data_dir, 'training/label_2', ann_id + '.txt')
with open(ann_path, 'r') as fp:
annotations = fp.readlines()
annotations = [ann.strip().split(' ') for ann in annotations]
class_ids, boxes = [], []
for ann in annotations:
if ann[0] not in self.class_names:
continue
class_ids.append(self.class_ids_dict[ann[0]])
boxes.append([float(x) for x in ann[4:8]])
class_ids = np.array(class_ids, dtype=np.int16)
boxes = np.array(boxes, dtype=np.float32)
return class_ids, boxes
# ========================================
# evaluation
# ========================================
def save_results(self, results):
txt_dir = os.path.join(self.results_dir, 'data')
os.makedirs(txt_dir, exist_ok=True)
for res in results:
txt_path = os.path.join(txt_dir, res['image_meta']['image_id'] + '.txt')
if 'class_ids' not in res:
with open(txt_path, 'w') as fp:
fp.write('')
continue
num_boxes = len(res['class_ids'])
with open(txt_path, 'w') as fp:
for i in range(num_boxes):
class_name = self.class_names[res['class_ids'][i]].lower()
score = res['scores'][i]
bbox = res['boxes'][i, :]
line = '{} -1 -1 0 {:.2f} {:.2f} {:.2f} {:.2f} 0 0 0 0 0 0 0 {:.3f}\n'.format(
class_name, *bbox, score)
fp.write(line)
def evaluate(self):
kitti_eval_tool_path = os.path.join(self.cfg.root_dir, 'src/utils/kitti-eval/cpp/evaluate_object')
cmd = '{} {} {} {} {}'.format(kitti_eval_tool_path,
os.path.join(self.data_dir, 'training'),
self.sample_set_path,
self.results_dir,
len(self.sample_ids))
status = subprocess.call(cmd, shell=True)
aps = {}
for class_name in self.class_names:
map_path = os.path.join(self.results_dir, 'stats_{}_ap.txt'.format(class_name.lower()))
if os.path.exists(map_path):
with open(map_path, 'r') as f:
lines = f.readlines()
_aps = [float(line.split('=')[1].strip()) for line in lines]
else:
_aps = [0., 0., 0.]
aps[class_name + '_easy'] = _aps[0]
aps[class_name + '_moderate'] = _aps[1]
aps[class_name + '_hard'] = _aps[2]
aps['mAP'] = sum(aps.values()) / len(aps)
return aps
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental framework for generic TensorBoard data providers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import six
import numpy as np
@six.add_metaclass(abc.ABCMeta)
class DataProvider(object):
"""Interface for reading TensorBoard scalar, tensor, and blob data.
These APIs are under development and subject to change. For instance,
providers may be asked to implement more filtering mechanisms, such as
downsampling strategies or domain restriction by step or wall time.
The data provider interface specifies three *data classes*: scalars,
tensors, and blob sequences. All data is stored in *time series* for
one of these data classes. A time series is identified by run name and
tag name (each a non-empty text string), as well as an experiment ID
and plugin name (see below). Points in a time series are uniquely
indexed by *step*, an arbitrary non-negative integer. Each point in a
time series also has an associated wall time, plus its actual value,
which is drawn from the corresponding data class.
Each point in a scalar time series contains a single scalar value, as
a 64-bit floating point number. Scalars are "privileged" rather than
being subsumed under tensors because there are useful operations on
scalars that don't make sense in the general tensor case: e.g., "list
all scalar time series with tag name `accuracy` whose exponentially
weighted moving average is at least 0.999".
Each point in a tensor time series contains a tensor of arbitrary
dtype (including byte strings and text strings) and shape (including
rank-0 tensors, a.k.a. scalars). Each tensor is expected to be
"reasonably small" to accommodate common database cell size limits.
For instance, a histogram with a bounded number of buckets (say, 30)
occupies about 500 bytes, and a PR curve with a bounded number of
thresholds (say, 201) occupies about 5000 bytes. These are both well
within typical database tolerances (Google Cloud Spanner: 10 MiB;
MySQL: 64 KiB), and would be appropriate to store as tensors. By
contrast, image, audio, or model graph data may easily be multiple
megabytes in size, and so should be stored as blobs instead. The
tensors at each step in a time series need not have the same dtype or
shape.
Each point in a blob sequence time series contains an ordered sequence
of zero or more blobs, which are arbitrary data with no tensor
structure. These might represent PNG-encoded image data, protobuf wire
encodings of TensorFlow graphs, or PLY-format 3D mesh data, for some
examples. This data class provides blob *sequences* rather than just
blobs because it's common to want to take multiple homogeneous samples
of a given time series: say, "show me the bounding box classifications
for 3 random inputs from this batch". A single blob can of course be
represented as a blob sequence that always has exactly one element.
When reading time series, *downsampling* refers to selecting a
subset of the points in each time series. Downsampling only occurs
across the step axis (rather than, e.g., the blobs in a single blob
sequence datum), and occurs individually within each time series.
When downsampling, the latest datum should always be included in the
sample, so that clients have a view of metrics that is maximally up
to date. Implementations may choose to force the first (oldest)
datum to be included in each sample as well, but this is not
required; clients should not make assumptions either way. The
remainder of the points in the sample should be selected uniformly
at random from available points. Downsampling should be
deterministic within a time series. It is also useful for the
downsampling behavior to depend only on the set of step values
within a time series, such that two "parallel" time series with data
at exactly the same steps also retain the same steps after
downsampling.
Every time series belongs to a specific experiment and is owned by a
specific plugin. (Thus, the "primary key" for a time series has four
components: experiment, plugin, run, tag.) The experiment ID is an
arbitrary URL-safe non-empty text string, whose interpretation is at
the discretion of the data provider. As a special case, the empty
string as an experiment ID denotes that no experiment was given. Data
providers may or may not fully support an empty experiment ID. The
plugin name should correspond to the `plugin_data.plugin_name` field
of the `SummaryMetadata` proto passed to `tf.summary.write`.
All methods on this class take a `RequestContext` parameter as the
first positional argument. This argument is temporarily optional to
facilitate migration, but will be required in the future.
Unless otherwise noted, any methods on this class may raise errors
defined in `tensorboard.errors`, like `tensorboard.errors.NotFoundError`.
"""
def data_location(self, ctx=None, *, experiment_id):
"""Render a human-readable description of the data source.
For instance, this might return a path to a directory on disk.
The default implementation always returns the empty string.
Args:
ctx: A TensorBoard `RequestContext` value.
experiment_id: ID of enclosing experiment.
Returns:
A string, which may be empty.
"""
return ""
def experiment_metadata(self, ctx=None, *, experiment_id):
"""Retrieve metadata of a given experiment.
The metadata may include fields such as name and description
of the experiment, as well as a timestamp for the experiment.
Args:
ctx: A TensorBoard `RequestContext` value.
experiment_id: ID of the experiment in question.
Returns:
If the metadata does not exist, `None`.
Otherwise, an `ExperimentMetadata` object containing metadata about
the experiment.
"""
return None
def list_plugins(self, ctx=None, *, experiment_id):
"""List all plugins that own data in a given experiment.
This should be the set of all plugin names `p` such that calling
`list_scalars`, `list_tensors`, or `list_blob_sequences` for the
given `experiment_id` and plugin name `p` gives a non-empty
result.
This operation is optional, but may later become required.
Args:
ctx: A TensorBoard `RequestContext` value.
experiment_id: ID of enclosing experiment.
Returns:
A collection of strings representing plugin names, or `None`
if this operation is not supported by this data provider.
"""
return None
@abc.abstractmethod
def list_runs(self, ctx=None, *, experiment_id):
"""List all runs within an experiment.
Args:
ctx: A TensorBoard `RequestContext` value.
experiment_id: ID of enclosing experiment.
Returns:
A collection of `Run` values.
Raises:
tensorboard.errors.PublicError: See `DataProvider` class docstring.
"""
pass
@abc.abstractmethod
def list_scalars(
self, ctx=None, *, experiment_id, plugin_name, run_tag_filter=None
):
"""List metadata about scalar time series.
Args:
ctx: A TensorBoard `RequestContext` value.
experiment_id: ID of enclosing experiment.
plugin_name: String name of the TensorBoard plugin that created
the data to be queried. Required.
run_tag_filter: Optional `RunTagFilter` value. If omitted, all
runs and tags will be included.
The result will only contain keys for run-tag combinations that
actually exist, which may not include all entries in the
`run_tag_filter`.
Returns:
A nested map `d` such that `d[run][tag]` is a `ScalarTimeSeries`
value.
Raises:
tensorboard.errors.PublicError: See `DataProvider` class docstring.
"""
pass
@abc.abstractmethod
def read_scalars(
self,
ctx=None,
*,
experiment_id,
plugin_name,
downsample=None,
run_tag_filter=None
):
"""Read values from scalar time series.
Args:
ctx: A TensorBoard `RequestContext` value.
experiment_id: ID of enclosing experiment.
plugin_name: String name of the TensorBoard plugin that created
the data to be queried. Required.
downsample: Integer number of steps to which to downsample the
results (e.g., `1000`). See `DataProvider` class docstring
for details about this parameter. Required.
run_tag_filter: Optional `RunTagFilter` value. If provided, a time
series will only be included in the result if its run and tag
both pass this filter. If `None`, all time series will be
included.
The result will only contain keys for run-tag combinations that
actually exist, which may not include all entries in the
`run_tag_filter`.
Returns:
A nested map `d` such that `d[run][tag]` is a list of
`ScalarDatum` values sorted by step.
Raises:
tensorboard.errors.PublicError: See `DataProvider` class docstring.
"""
pass
def list_tensors(
self, ctx=None, *, experiment_id, plugin_name, run_tag_filter=None
):
"""List metadata about tensor time series.
Args:
ctx: A TensorBoard `RequestContext` value.
experiment_id: ID of enclosing experiment.
plugin_name: String name of the TensorBoard plugin that created
the data to be queried. Required.
run_tag_filter: Optional `RunTagFilter` value. If omitted, all
runs and tags will be included.
The result will only contain keys for run-tag combinations that
actually exist, which may not include all entries in the
`run_tag_filter`.
Returns:
A nested map `d` such that `d[run][tag]` is a `TensorTimeSeries`
value.
Raises:
tensorboard.errors.PublicError: See `DataProvider` class docstring.
"""
pass
def read_tensors(
self,
ctx=None,
*,
experiment_id,
plugin_name,
downsample=None,
run_tag_filter=None
):
"""Read values from tensor time series.
Args:
ctx: A TensorBoard `RequestContext` value.
experiment_id: ID of enclosing experiment.
plugin_name: String name of the TensorBoard plugin that created
the data to be queried. Required.
downsample: Integer number of steps to which to downsample the
results (e.g., `1000`). See `DataProvider` class docstring
for details about this parameter. Required.
run_tag_filter: Optional `RunTagFilter` value. If provided, a time
series will only be included in the result if its run and tag
both pass this filter. If `None`, all time series will be
included.
The result will only contain keys for run-tag combinations that
actually exist, which may not include all entries in the
`run_tag_filter`.
Returns:
A nested map `d` such that `d[run][tag]` is a list of
`TensorDatum` values sorted by step.
Raises:
tensorboard.errors.PublicError: See `DataProvider` class docstring.
"""
pass
def list_blob_sequences(
self, ctx=None, *, experiment_id, plugin_name, run_tag_filter=None
):
"""List metadata about blob sequence time series.
Args:
ctx: A TensorBoard `RequestContext` value.
experiment_id: ID of enclosing experiment.
plugin_name: String name of the TensorBoard plugin that created the data
to be queried. Required.
run_tag_filter: Optional `RunTagFilter` value. If omitted, all runs and
tags will be included. The result will only contain keys for run-tag
combinations that actually exist, which may not include all entries in
the `run_tag_filter`.
Returns:
A nested map `d` such that `d[run][tag]` is a `BlobSequenceTimeSeries`
value.
Raises:
tensorboard.errors.PublicError: See `DataProvider` class docstring.
"""
pass
def read_blob_sequences(
self,
ctx=None,
*,
experiment_id,
plugin_name,
downsample=None,
run_tag_filter=None
):
"""Read values from blob sequence time series.
Args:
ctx: A TensorBoard `RequestContext` value.
experiment_id: ID of enclosing experiment.
plugin_name: String name of the TensorBoard plugin that created the data
to be queried. Required.
downsample: Integer number of steps to which to downsample the
results (e.g., `1000`). See `DataProvider` class docstring
for details about this parameter. Required.
run_tag_filter: Optional `RunTagFilter` value. If provided, a time series
will only be included in the result if its run and tag both pass this
filter. If `None`, all time series will be included. The result will
only contain keys for run-tag combinations that actually exist, which
may not include all entries in the `run_tag_filter`.
Returns:
A nested map `d` such that `d[run][tag]` is a list of
`BlobSequenceDatum` values sorted by step.
Raises:
tensorboard.errors.PublicError: See `DataProvider` class docstring.
"""
pass
def read_blob(self, ctx=None, *, blob_key):
"""Read data for a single blob.
Args:
ctx: A TensorBoard `RequestContext` value.
blob_key: A key identifying the desired blob, as provided by
`read_blob_sequences(...)`.
Returns:
Raw binary data as `bytes`.
Raises:
tensorboard.errors.PublicError: See `DataProvider` class docstring.
"""
pass
class ExperimentMetadata(object):
"""Metadata about an experiment.
Attributes:
experiment_name: A user-facing name for the experiment (as a `str`).
experiment_description: A user-facing description for the experiment
(as a `str`).
creation_time: A timestamp for the creation of the experiment, as `float`
seconds since the epoch.
"""
def __init__(self, experiment_name, experiment_description, creation_time):
self._experiment_name = experiment_name
self._experiment_description = experiment_description
self._creation_time = creation_time
@property
def experiment_name(self):
return self._experiment_name
@property
def experiment_description(self):
return self._experiment_description
@property
def creation_time(self):
return self._creation_time
class Run(object):
"""Metadata about a run.
Attributes:
run_id: A unique opaque string identifier for this run.
run_name: A user-facing name for this run (as a `str`).
start_time: The wall time of the earliest recorded event in this
run, as `float` seconds since epoch, or `None` if this run has no
recorded events.
"""
__slots__ = ("_run_id", "_run_name", "_start_time")
def __init__(self, run_id, run_name, start_time):
self._run_id = run_id
self._run_name = run_name
self._start_time = start_time
@property
def run_id(self):
return self._run_id
@property
def run_name(self):
return self._run_name
@property
def start_time(self):
return self._start_time
def __eq__(self, other):
if not isinstance(other, Run):
return False
if self._run_id != other._run_id:
return False
if self._run_name != other._run_name:
return False
if self._start_time != other._start_time:
return False
return True
def __hash__(self):
return hash((self._run_id, self._run_name, self._start_time))
def __repr__(self):
return "Run(%s)" % ", ".join(
(
"run_id=%r" % (self._run_id,),
"run_name=%r" % (self._run_name,),
"start_time=%r" % (self._start_time,),
)
)
class _TimeSeries(object):
"""Metadata about time series data for a particular run and tag.
Superclass of `ScalarTimeSeries`, `TensorTimeSeries`, and
`BlobSequenceTimeSeries`.
"""
__slots__ = (
"_max_step",
"_max_wall_time",
"_plugin_content",
"_description",
"_display_name",
)
def __init__(
self,
*,
max_step,
max_wall_time,
plugin_content,
description,
display_name
):
self._max_step = max_step
self._max_wall_time = max_wall_time
self._plugin_content = plugin_content
self._description = description
self._display_name = display_name
@property
def max_step(self):
return self._max_step
@property
def max_wall_time(self):
return self._max_wall_time
@property
def plugin_content(self):
return self._plugin_content
@property
def description(self):
return self._description
@property
def display_name(self):
return self._display_name
class ScalarTimeSeries(_TimeSeries):
"""Metadata about a scalar time series for a particular run and tag.
Attributes:
max_step: The largest step value of any datum in this scalar time series; a
nonnegative integer.
max_wall_time: The largest wall time of any datum in this time series, as
`float` seconds since epoch.
plugin_content: A bytestring of arbitrary plugin-specific metadata for this
time series, as provided to `tf.summary.write` in the
`plugin_data.content` field of the `metadata` argument.
description: An optional long-form Markdown description, as a `str` that is
empty if no description was specified.
display_name: An optional long-form Markdown description, as a `str` that is
empty if no description was specified. Deprecated; may be removed soon.
"""
def __eq__(self, other):
if not isinstance(other, ScalarTimeSeries):
return False
if self._max_step != other._max_step:
return False
if self._max_wall_time != other._max_wall_time:
return False
if self._plugin_content != other._plugin_content:
return False
if self._description != other._description:
return False
if self._display_name != other._display_name:
return False
return True
def __hash__(self):
return hash(
(
self._max_step,
self._max_wall_time,
self._plugin_content,
self._description,
self._display_name,
)
)
def __repr__(self):
return "ScalarTimeSeries(%s)" % ", ".join(
(
"max_step=%r" % (self._max_step,),
"max_wall_time=%r" % (self._max_wall_time,),
"plugin_content=%r" % (self._plugin_content,),
"description=%r" % (self._description,),
"display_name=%r" % (self._display_name,),
)
)
class ScalarDatum(object):
"""A single datum in a scalar time series for a run and tag.
Attributes:
step: The global step at which this datum occurred; an integer. This
is a unique key among data of this time series.
wall_time: The real-world time at which this datum occurred, as
`float` seconds since epoch.
value: The scalar value for this datum; a `float`.
"""
__slots__ = ("_step", "_wall_time", "_value")
def __init__(self, step, wall_time, value):
self._step = step
self._wall_time = wall_time
self._value = value
@property
def step(self):
return self._step
@property
def wall_time(self):
return self._wall_time
@property
def value(self):
return self._value
def __eq__(self, other):
if not isinstance(other, ScalarDatum):
return False
if self._step != other._step:
return False
if self._wall_time != other._wall_time:
return False
if self._value != other._value:
return False
return True
def __hash__(self):
return hash((self._step, self._wall_time, self._value))
def __repr__(self):
return "ScalarDatum(%s)" % ", ".join(
(
"step=%r" % (self._step,),
"wall_time=%r" % (self._wall_time,),
"value=%r" % (self._value,),
)
)
class TensorTimeSeries(_TimeSeries):
"""Metadata about a tensor time series for a particular run and tag.
Attributes:
max_step: The largest step value of any datum in this tensor time series; a
nonnegative integer.
max_wall_time: The largest wall time of any datum in this time series, as
`float` seconds since epoch.
plugin_content: A bytestring of arbitrary plugin-specific metadata for this
time series, as provided to `tf.summary.write` in the
`plugin_data.content` field of the `metadata` argument.
description: An optional long-form Markdown description, as a `str` that is
empty if no description was specified.
display_name: An optional long-form Markdown description, as a `str` that is
empty if no description was specified. Deprecated; may be removed soon.
"""
def __eq__(self, other):
if not isinstance(other, TensorTimeSeries):
return False
if self._max_step != other._max_step:
return False
if self._max_wall_time != other._max_wall_time:
return False
if self._plugin_content != other._plugin_content:
return False
if self._description != other._description:
return False
if self._display_name != other._display_name:
return False
return True
def __hash__(self):
return hash(
(
self._max_step,
self._max_wall_time,
self._plugin_content,
self._description,
self._display_name,
)
)
def __repr__(self):
return "TensorTimeSeries(%s)" % ", ".join(
(
"max_step=%r" % (self._max_step,),
"max_wall_time=%r" % (self._max_wall_time,),
"plugin_content=%r" % (self._plugin_content,),
"description=%r" % (self._description,),
"display_name=%r" % (self._display_name,),
)
)
class TensorDatum(object):
"""A single datum in a tensor time series for a run and tag.
Attributes:
step: The global step at which this datum occurred; an integer. This
is a unique key among data of this time series.
wall_time: The real-world time at which this datum occurred, as
`float` seconds since epoch.
numpy: The `numpy.ndarray` value with the tensor contents of this
datum.
"""
__slots__ = ("_step", "_wall_time", "_numpy")
def __init__(self, step, wall_time, numpy):
self._step = step
self._wall_time = wall_time
self._numpy = numpy
@property
def step(self):
return self._step
@property
def wall_time(self):
return self._wall_time
@property
def numpy(self):
return self._numpy
def __eq__(self, other):
if not isinstance(other, TensorDatum):
return False
if self._step != other._step:
return False
if self._wall_time != other._wall_time:
return False
if not np.array_equal(self._numpy, other._numpy):
return False
return True
# Unhashable type: numpy arrays are mutable.
__hash__ = None
def __repr__(self):
return "TensorDatum(%s)" % ", ".join(
(
"step=%r" % (self._step,),
"wall_time=%r" % (self._wall_time,),
"numpy=%r" % (self._numpy,),
)
)
class BlobSequenceTimeSeries(_TimeSeries):
"""Metadata about a blob sequence time series for a particular run and tag.
Attributes:
max_step: The largest step value of any datum in this scalar time series; a
nonnegative integer.
max_wall_time: The largest wall time of any datum in this time series, as
`float` seconds since epoch.
max_length: The largest length (number of blobs) of any datum in
this scalar time series, or `None` if this time series is empty.
plugin_content: A bytestring of arbitrary plugin-specific metadata for this
time series, as provided to `tf.summary.write` in the
`plugin_data.content` field of the `metadata` argument.
description: An optional long-form Markdown description, as a `str` that is
empty if no description was specified.
display_name: An optional long-form Markdown description, as a `str` that is
empty if no description was specified. Deprecated; may be removed soon.
"""
__slots__ = ("_max_length",)
def __init__(
self,
*,
max_step,
max_wall_time,
max_length,
plugin_content,
description,
display_name
):
super(BlobSequenceTimeSeries, self).__init__(
max_step=max_step,
max_wall_time=max_wall_time,
plugin_content=plugin_content,
description=description,
display_name=display_name,
)
self._max_length = max_length
@property
def max_length(self):
return self._max_length
def __eq__(self, other):
if not isinstance(other, BlobSequenceTimeSeries):
return False
if self._max_step != other._max_step:
return False
if self._max_wall_time != other._max_wall_time:
return False
if self._max_length != other._max_length:
return False
if self._plugin_content != other._plugin_content:
return False
if self._description != other._description:
return False
if self._display_name != other._display_name:
return False
return True
def __hash__(self):
return hash(
(
self._max_step,
self._max_wall_time,
self._max_length,
self._plugin_content,
self._description,
self._display_name,
)
)
def __repr__(self):
return "BlobSequenceTimeSeries(%s)" % ", ".join(
(
"max_step=%r" % (self._max_step,),
"max_wall_time=%r" % (self._max_wall_time,),
"max_length=%r" % (self._max_length,),
"plugin_content=%r" % (self._plugin_content,),
"description=%r" % (self._description,),
"display_name=%r" % (self._display_name,),
)
)
class BlobReference(object):
"""A reference to a blob.
Attributes:
blob_key: A string containing a key uniquely identifying a blob, which
may be dereferenced via `provider.read_blob(blob_key)`.
These keys must be constructed such that they can be included directly in
a URL, with no further encoding. Concretely, this means that they consist
exclusively of "unreserved characters" per RFC 3986, namely
[a-zA-Z0-9._~-]. These keys are case-sensitive; it may be wise for
implementations to normalize case to reduce confusion. The empty string
is not a valid key.
Blob keys must not contain information that should be kept secret.
Privacy-sensitive applications should use random keys (e.g. UUIDs), or
encrypt keys containing secret fields.
url: (optional) A string containing a URL from which the blob data may be
fetched directly, bypassing the data provider. URLs may be a vector
for data leaks (e.g. via browser history, web proxies, etc.), so these
URLs should not expose secret information.
"""
__slots__ = ("_url", "_blob_key")
def __init__(self, blob_key, url=None):
self._blob_key = blob_key
self._url = url
@property
def blob_key(self):
"""Provide a key uniquely identifying a blob.
Callers should consider these keys to be opaque-- i.e., to have
no intrinsic meaning. Some data providers may use random IDs;
but others may encode information into the key, in which case
callers must make no attempt to decode it.
"""
return self._blob_key
@property
def url(self):
"""Provide the direct-access URL for this blob, if available.
Note that this method is *not* expected to construct a URL to
the data-loading endpoint provided by TensorBoard. If this
method returns None, then the caller should proceed to use
`blob_key()` to build the URL, as needed.
"""
return self._url
def __eq__(self, other):
if not isinstance(other, BlobReference):
return False
if self._blob_key != other._blob_key:
return False
if self._url != other._url:
return False
return True
def __hash__(self):
return hash((self._blob_key, self._url))
def __repr__(self):
return "BlobReference(%s)" % ", ".join(
("blob_key=%r" % (self._blob_key,), "url=%r" % (self._url,))
)
class BlobSequenceDatum(object):
"""A single datum in a blob sequence time series for a run and tag.
Attributes:
step: The global step at which this datum occurred; an integer. This is a
unique key among data of this time series.
wall_time: The real-world time at which this datum occurred, as `float`
seconds since epoch.
values: A tuple of `BlobReference` objects, providing access to elements of
this sequence.
"""
__slots__ = ("_step", "_wall_time", "_values")
def __init__(self, step, wall_time, values):
self._step = step
self._wall_time = wall_time
self._values = values
@property
def step(self):
return self._step
@property
def wall_time(self):
return self._wall_time
@property
def values(self):
return self._values
def __eq__(self, other):
if not isinstance(other, BlobSequenceDatum):
return False
if self._step != other._step:
return False
if self._wall_time != other._wall_time:
return False
if self._values != other._values:
return False
return True
def __hash__(self):
return hash((self._step, self._wall_time, self._values))
def __repr__(self):
return "BlobSequenceDatum(%s)" % ", ".join(
(
"step=%r" % (self._step,),
"wall_time=%r" % (self._wall_time,),
"values=%r" % (self._values,),
)
)
class RunTagFilter(object):
"""Filters data by run and tag names."""
def __init__(self, runs=None, tags=None):
"""Construct a `RunTagFilter`.
A time series passes this filter if both its run *and* its tag are
included in the corresponding whitelists.
Order and multiplicity are ignored; `runs` and `tags` are treated as
sets.
Args:
runs: Collection of run names, as strings, or `None` to admit all
runs.
tags: Collection of tag names, as strings, or `None` to admit all
tags.
"""
self._runs = self._parse_optional_string_set("runs", runs)
self._tags = self._parse_optional_string_set("tags", tags)
def _parse_optional_string_set(self, name, value):
if value is None:
return None
if isinstance(value, six.string_types):
# Prevent confusion: strings _are_ iterable, but as
# sequences of characters, so this likely signals an error.
raise TypeError(
"%s: expected `None` or collection of strings; got %r: %r"
% (name, type(value), value)
)
value = frozenset(value)
for item in value:
if not isinstance(item, six.string_types):
raise TypeError(
"%s: expected `None` or collection of strings; "
"got item of type %r: %r" % (name, type(item), item)
)
return value
@property
def runs(self):
return self._runs
@property
def tags(self):
return self._tags
def __repr__(self):
return "RunTagFilter(%s)" % ", ".join(
("runs=%r" % (self._runs,), "tags=%r" % (self._tags,),)
)
|
<gh_stars>1-10
import unittest
import os
import shutil
import json
import glob
import logging
from math import floor
from easysquid.toolbox import logger
from easysquid.simulations.single_simulation_run_methods import SimulationParameters
from simulations import _get_configs_from_easysquid
from easysquid.simulations import create_simdetails
from simulations import create_measure_simulation
from simulations.create_measure_simulation.setupsim import perform_single_simulation_run, \
set_simdetails_and_paramcombinations
from simulations import analysis_sql_data
from simulations.simulation_methods import setup_physical_network, setup_network_protocols
logger.setLevel(logging.CRITICAL)
class TestSimulations(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sim_dir = os.path.dirname(create_measure_simulation.__file__)
cls.sim_name = "Test_NoNoise"
cls.results_folder = os.path.join(os.path.abspath(os.path.dirname(__file__)), "test_simulation_tmp")
cls.alpha = 0.1
cls.create_probA = 1
cls.create_probB = 0
cls.max_mhp_cycle = 1000
@classmethod
def tearDownClass(cls):
cls._reset_folder(cls.results_folder, make_new=False)
# Reset files
set_simdetails_and_paramcombinations.main(ask_for_input=False)
@staticmethod
def _reset_folder(folder, make_new=True):
if os.path.exists(folder):
for f in os.listdir(folder):
f_path = os.path.join(folder, f)
if os.path.isfile(f_path):
if not f.startswith("."):
os.remove(f_path)
try:
os.rmdir(folder)
except OSError:
pass
if not os.path.exists(folder):
if make_new:
os.mkdir(folder)
def test1_grab_config_files(self):
# Test grabbing files from easysquid
_get_configs_from_easysquid.main()
def test2_create_details_and_params(self):
params = {"num_pairs": [1, 1],
"tmax_pair": 0,
"min_fidelity": 0,
"purpose_id": 0,
"priority": 0,
"store": False,
"atomic": False,
"measure_directly": True}
request_paramsA = {"reqs": {"prob": self.create_probA,
"number_request": 500,
"params": params}}
request_paramsB = {"reqs": {"prob": self.create_probB,
"number_request": 500,
"params": params}}
paramcombinations = {
self.sim_name: {
"request_paramsA": request_paramsA,
"request_paramsB": request_paramsB,
"request_cycle": 0,
"max_sim_time": 0,
"max_wall_time": 345600,
"max_mhp_cycle": self.max_mhp_cycle,
"enable_pdb": False,
"alphaA": self.alpha,
"alphaB": self.alpha,
"t0": 0,
"wall_time_per_timestep": 1,
"save_additional_data": True,
"collect_queue_data": True,
"config": "setupsim/config/no_noise/no_noise.json"
}
}
# Test creating simdetails and paramcombinations
create_simdetails.setup_sim_parameters(params=paramcombinations,
sim_dir=self.sim_dir,
description="Test simulation",
num_runs=1, sim_name="test_simulations",
make_paramcombinations=False, ask_for_input=False)
self._reset_folder(self.results_folder)
paramfile = os.path.join(self.sim_dir, "setupsim/paramcombinations.json")
shutil.copy(paramfile, self.results_folder)
def test3_run_single_case(self):
timestamp = "TEST_SIMULATION"
runindex = 0
paramfile = os.path.join(self.sim_dir, "setupsim/paramcombinations.json")
actualkey = self.sim_name
perform_single_simulation_run.main(final_results_dir=self.results_folder, tmp_results_dir=self.results_folder,
timestamp=timestamp, run_key=actualkey, run_index=runindex,
paramcombinations_file=paramfile)
def test4_analyse_single_case(self):
analysis_sql_data.main(results_path=self.results_folder, no_plot=True, save_figs=False, save_output=True)
add_data_file_path = glob.glob("{}/*additional_data.json".format(self.results_folder))[0]
with open(add_data_file_path, 'r') as f:
additional_data = json.load(f)
# Get the additional data
mhp_t_cycle = additional_data["mhp_t_cycle"]
request_t_cycle = additional_data["request_t_cycle"]
alphaA = additional_data["alphaA"]
alphaB = additional_data["alphaB"]
create_probA = additional_data["request_paramsA"]["reqs"]["prob"]
create_probB = additional_data["request_paramsB"]["reqs"]["prob"]
total_matrix_time = additional_data['total_real_time']
p_succ = additional_data["p_succ"]
self.assertEqual(mhp_t_cycle, request_t_cycle)
self.assertEqual(alphaA, self.alpha)
self.assertEqual(alphaB, self.alpha)
self.assertEqual(create_probA, self.create_probA)
self.assertEqual(create_probB, self.create_probB)
self.assertEqual(total_matrix_time, mhp_t_cycle * self.max_mhp_cycle)
self.assertAlmostEqual(p_succ, 2 * self.alpha * (1 - self.alpha) + self.alpha ** 2, places=1)
def test5_run_multi_case(self):
self._reset_folder(self.results_folder)
paramfile = os.path.join(os.path.dirname(__file__), "resources/paramcombinations.json")
shutil.copy(paramfile, self.results_folder)
# Load full_paramcombinations.json
with open(paramfile) as f:
paramcombinations = json.load(f)
timestamp = "TEST_SIMULATION"
runindex = 0
for actualkey in paramcombinations.keys():
perform_single_simulation_run.main(final_results_dir=self.results_folder,
tmp_results_dir=self.results_folder, timestamp=timestamp,
run_key=actualkey, run_index=runindex, paramcombinations_file=paramfile)
def test6_analyse_multi_case(self):
nr_of_add_data_files = len(
glob.glob(os.path.join(os.path.dirname(__file__), "test_simulation_tmp/*additional_data.json")))
self.assertEqual(nr_of_add_data_files, 3)
nr_of_data_files = len(glob.glob(os.path.join(os.path.dirname(__file__), "test_simulation_tmp/*.db")))
self.assertEqual(nr_of_data_files, 3)
analysis_sql_data.main(results_path=self.results_folder, no_plot=True, save_figs=False, save_output=True)
nr_of_analysis_files = len(
glob.glob(os.path.join(os.path.dirname(__file__), "test_simulation_tmp/*/analysis_output.txt")))
self.assertEqual(nr_of_analysis_files, 4)
def test_midpoint_rtt(self):
self._reset_folder(self.results_folder)
paramfile = os.path.join(os.path.dirname(__file__), "resources/paramcombinations.json")
shutil.copy(paramfile, self.results_folder)
# Load full_paramcombinations.json
with open(paramfile) as f:
paramcombinations = json.load(f)
for actualkey in paramcombinations.keys():
timestamp = "TEST_SIMULATION"
runindex = 0
sim_param = SimulationParameters(final_results_dir=self.results_folder,
tmp_results_dir=self.results_folder,
timestamp=timestamp,
run_key=actualkey,
run_index=runindex,
paramcombinations_file=paramfile)
# extract the desired data from the SimulationInputParser
paramsdict = sim_param.paramsdict
# Get absolute path to config
abs_config_path = os.path.join(self.sim_dir, paramsdict["config"])
# Create the network
network = setup_physical_network(abs_config_path)
nodeA = network.get_node_by_id(0)
nodeB = network.get_node_by_id(1)
mhp_conn = network.get_connection(nodeA, nodeB, "mhp_conn")
mhp_conn.set_timings(t_cycle=0, t0=0)
# Setup entanglement generation protocols
egpA, egpB = setup_network_protocols(network)
rtt_delay = egpA.mhp_service.get_midpoint_rtt_delay(nodeA)
rtt_cycles = floor(rtt_delay / egpA.scheduler.mhp_cycle_period)
if "Lab" in actualkey:
self.assertEqual(rtt_cycles, 0)
elif "QLink" in actualkey:
self.assertEqual(rtt_cycles, 14)
if __name__ == '__main__':
unittest.main()
|
## @package twitter.bots
# coding: UTF-8
import logging
import json
from typing import Dict, List
from pyrabbit2.http import NetworkError
from pyrabbit2.api import Client
from bots.utils import current_time, from_json
logger = logging.getLogger("rabbit-messaging")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(open("rabbit_messaging.log", "w"))
handler.setFormatter(logging.Formatter(
"[%(asctime)s]:[%(levelname)s]:%(module)s - %(message)s"))
logger.addHandler(handler)
class MessagingSettings:
def __init__(self, exchange, routing_key, queue=None):
self.exchange = exchange
self.routing_key = routing_key
self.queue = queue
def __str__(self) -> str:
return f"exchange={self.exchange}, routing_key={self.routing_key}, queue={self.queue}"
class RabbitMessaging:
def __init__(self, url, username, password, vhost, settings: Dict[str, MessagingSettings],
reconnect_max_iterations=5):
self.__url = url
self.__username = username
self.__password = password
#
self.vhost = vhost
self.settings = settings
self.__messaging: Client = None
self.__reconnect_max_iterations = reconnect_max_iterations
# first connection
self.__connect()
def __reconnect_messaging(function):
"""Decorator to try to reconnect multiple times if the connection to RabbitMQ fails
:param function: function to decorate
"""
def wrapper(self, *args, **kwargs):
for current_reconnect in range(self.__reconnect_max_iterations):
try:
return function(self, *args, **kwargs)
except NetworkError as error:
logger.error(f"{current_time(str_time=True)}: Connection to RabbitMQ lost. Trying to reconnect...")
self.__connect()
if current_reconnect == self.__reconnect_max_iterations - 1:
raise error
return wrapper
def __connect(self):
self.__messaging = Client(api_url=self.__url, user=self.__username, passwd=self.__password)
@__reconnect_messaging
def _setup_messaging(self):
"""Private method for setting up the messaging connections
"""
for current_setting_name in self.settings:
current_setting = self.settings[current_setting_name]
logger.info(f"Setting up Messaging to: {current_setting}\n"
f"Connecting to exchange {current_setting.exchange}")
self.__messaging.create_exchange(vhost=self.vhost, name=current_setting.exchange, xtype="direct")
if current_setting.queue:
logger.info(f"Creating queue {current_setting.queue}")
self.__messaging.create_queue(vhost=self.vhost, name=current_setting.queue, durable=True)
logger.info(f"Binding exchange to queue {current_setting.queue} with key {current_setting.routing_key}")
self.__messaging.create_binding(vhost=self.vhost, exchange=current_setting.exchange,
queue=current_setting.queue, rt_key=current_setting.routing_key)
logger.info(f"Connected to Messaging Service using: {current_setting.__str__()}")
logger.info("---------------------------------------")
@__reconnect_messaging
def _send_message(self, data: json, send_to: str):
"""Function to publish a message on one of the rabbitMQ's exchanges
:param data: data to publish in json format
:param send_to: where to publish the data; corresponds to a str key in self.settings, where that key maps to
an object with the exchange and routing key values
"""
send_to = self.settings[send_to]
logger.debug(
f"Sending message to exchange <{send_to.exchange}> with routing_key <{send_to.routing_key}>")
self.__messaging.publish(vhost=self.vhost, xname=send_to.exchange, rt_key=send_to.routing_key, payload=data)
def _receive_message(self, receive_from: str) -> Dict:
"""Function to get a message from a specific rabbitMQ exchange
:param receive_from: where to get the data; corresponds to a str key in self.settings, where that key
maps to an object with the exchange and routing key values
"""
receive_from = self.settings[receive_from]
msg: List[Dict] = self.__messaging.get_messages(vhost=self.vhost, qname=receive_from.queue, count=1)
if msg and msg[0].get("payload", None):
return from_json(msg[0]["payload"])
return {}
|
<gh_stars>0
import cv2
import numpy as np
from . import border_utils
from . import layer_utils
from .frame import Frame
from .cv2_utils import cv2_estimateRigidTransform
def build_transformation_matrix(transform):
"""Convert transform list to transformation matrix
:param transform: transform list as [dx, dy, da]
:return: transform matrix as 2d (2, 3) numpy array
"""
# 变换矩阵
transform_matrix = np.zeros((2, 3))
transform_matrix[0, 0] = np.cos(transform[2])
transform_matrix[0, 1] = -np.sin(transform[2])
transform_matrix[1, 0] = np.sin(transform[2])
transform_matrix[1, 1] = np.cos(transform[2])
transform_matrix[0, 2] = transform[0]
transform_matrix[1, 2] = transform[1]
return transform_matrix
def border_frame(frame, border_size, border_type):
"""Convenience wrapper of cv2.copyMakeBorder for how vidstab applies borders
:param frame: frame to apply border to
:param border_size: int border size in number of pixels
:param border_type: one of the following ['black', 'reflect', 'replicate']
:return: bordered version of frame with alpha layer for frame overlay options
"""
# 边框模式对应表
border_modes = {'black': cv2.BORDER_CONSTANT,
'reflect': cv2.BORDER_REFLECT,
'replicate': cv2.BORDER_REPLICATE}
# 边框模式
border_mode = border_modes[border_type]
# 添加边框
bordered_frame_image = cv2.copyMakeBorder(frame.image,
top=border_size,
bottom=border_size,
left=border_size,
right=border_size,
borderType=border_mode,
value=[0, 0, 0])
# 封装成帧
bordered_frame = Frame(bordered_frame_image, color_format=frame.color_format)
# BGRA图像
alpha_bordered_frame = bordered_frame.bgra_image
# alpha通道为0:不透明
alpha_bordered_frame[:, :, 3] = 0
# 帧的高、宽
h, w = frame.image.shape[:2]
# 边框用透明填充
alpha_bordered_frame[border_size:border_size + h, border_size:border_size + w, 3] = 255
# 用alpha通道添加边框后的帧,边框模式
return alpha_bordered_frame, border_mode
def match_keypoints(optical_flow, prev_kps):
"""Match optical flow keypoints
:param optical_flow: output of cv2.calcOpticalFlowPyrLK
:param prev_kps: keypoints that were passed to cv2.calcOpticalFlowPyrLK to create optical_flow
:return: tuple of (cur_matched_kp, prev_matched_kp)
"""
# 当前帧的角点,状态信息,错误信息
cur_kps, status, err = optical_flow
# storage for keypoints with status 1
# 前一帧匹配成功的角点
prev_matched_kp = []
# 当前帧匹配成功的角点
cur_matched_kp = []
# 状态为空:直接返回空列表
if status is None:
return cur_matched_kp, prev_matched_kp
# 遍历所有角点的状态
for i, matched in enumerate(status):
# store coords of keypoints that appear in both
# 匹配成功:添加到返回列表
if matched:
prev_matched_kp.append(prev_kps[i])
cur_matched_kp.append(cur_kps[i])
return cur_matched_kp, prev_matched_kp
def estimate_partial_transform(matched_keypoints):
"""Wrapper of cv2.estimateRigidTransform for convenience in vidstab process
:param matched_keypoints: output of match_keypoints util function; tuple of (cur_matched_kp, prev_matched_kp)
:return: transform as list of [dx, dy, da]
"""
cur_matched_kp, prev_matched_kp = matched_keypoints
# 得到前一帧到当前帧角点之间的变换
transform = cv2_estimateRigidTransform(np.array(prev_matched_kp), # 前一帧匹配成功的角点
np.array(cur_matched_kp), # 当前帧匹配成功的角点
False) # full
# 变换非空
if transform is not None:
# translation x
# x方向的平移
dx = transform[0, 2]
# translation y
# y方向的平移
dy = transform[1, 2]
# rotation
# 顺时针旋转的角度
da = np.arctan2(transform[1, 0], transform[0, 0])
# 变换为空:无变换
else:
dx = dy = da = 0
# 返回相邻两帧的[dx, dy, da]
return [dx, dy, da]
def transform_frame(frame, transform, border_size, border_type):
# 边框类型
if border_type not in ['black', 'reflect', 'replicate']:
raise ValueError('Invalid border type')
# 生成变换矩阵
transform = build_transformation_matrix(transform)
# 添加边框
bordered_frame_image, border_mode = border_frame(frame, border_size, border_type)
# 高、宽
h, w = bordered_frame_image.shape[:2]
# 应用变换
transformed_frame_image = cv2.warpAffine(bordered_frame_image, transform, (w, h), borderMode=border_mode)
# 封装成帧
transformed_frame = Frame(transformed_frame_image, color_format='BGRA')
# 返回变换后的帧
return transformed_frame
def post_process_transformed_frame(transformed_frame, border_options, layer_options):
#
cropped_frame = border_utils.crop_frame(transformed_frame, border_options)
if layer_options['layer_func'] is not None:
cropped_frame = layer_utils.apply_layer_func(cropped_frame,
layer_options['prev_frame'],
layer_options['layer_func'])
layer_options['prev_frame'] = cropped_frame
return cropped_frame, layer_options
|
<gh_stars>0
from copy import copy
from collections import OrderedDict, defaultdict
import six
from bpmappers.utils import sort_dict_with_keys
from bpmappers.fields import Field, BaseField
from bpmappers.exceptions import DataError
class Options(object):
"""Meta data of Mapper.
"""
def __init__(self, *args, **kwargs):
self.fields = defaultdict(list)
# Use this list to checking for existing name.
self.field_names = []
def add_field(self, name, field):
"""Add field"""
if isinstance(field, Field) and field.key is None:
field.key = name
if name in self.field_names:
# if the field is already registered, remove it.
lst = self.fields[field.key]
self.fields[field.key] = [tp for tp in lst if tp[0] != name]
for key in list(self.fields.keys()):
lst = self.fields[key]
updated_lst = [tp for tp in lst if tp[0] != name]
if updated_lst:
self.fields[key] = [tp for tp in lst if tp[0] != name]
else:
del self.fields[key]
else:
self.field_names.append(name)
self.fields[field.key].append((name, field))
def copy(self):
opt = Options()
opt.fields = copy(self.fields)
opt.field_names = copy(self.field_names)
return opt
def __repr__(self):
return '<Options: %s>' % self.fields
class BaseMapper(type):
"""Metaclass of Mapper.
"""
def __new__(cls, name, bases, attrs):
# copy bases
opt = None
base_opts = []
for base_class in bases:
if hasattr(base_class, '_meta'):
base_opt = base_class._meta.copy()
base_opts.append(base_opt)
if '_meta' not in attrs:
if opt is None:
opt = Options()
else:
opt = attrs['_meta'].copy()
# Merge bases
for base_opt in base_opts:
for key in base_opt.fields.keys():
lst = base_opt.fields[key]
for _name, field in lst:
opt.add_field(_name, field)
for k, v in attrs.items():
if isinstance(v, BaseField):
opt.add_field(k, v)
attrs['_meta'] = opt
return type.__new__(cls, name, bases, attrs)
class Mapper(six.with_metaclass(BaseMapper)):
"""Basic Mapper class.
"""
default_options = {}
def __init__(self, data=None, **options):
"""
:data: Mapping source object.
:\*\*options: Optional values.
"""
self.data = data
self.options = self.default_options.copy()
self.options.update(options)
def _getattr_inner(self, obj, key):
# Priority "attr", "dict", "getattr".
if not key:
return
if isinstance(obj, dict):
return obj.get(key)
else:
try:
return getattr(obj, key)
except AttributeError:
raise DataError(
'"%(obj)s" does not have this key'
' "%(key)s in %(mapper)s"' % {
'obj': obj, 'key': key, 'mapper': repr(self)})
def _getattr(self, obj, key):
# Recursive call if it is dot splited accessor.
if '.' in key:
keys = key.split('.')
obj_child = self._getattr(obj, keys[0])
# If child object is callable, call that object.
if hasattr(obj_child, '__call__'):
obj_child = obj_child()
value = self._getattr(obj_child, '.'.join(keys[1:]))
else:
value = self._getattr_inner(obj, key)
return value
def as_dict(self):
"""
Return the OrderedDict it is mapping result.
"""
parsed = OrderedDict()
for k in self._meta.fields:
# _meta.fields is MultiValueDict
for name, field in self._meta.fields[k]:
if field.is_nonkey:
v = None
elif isinstance(self.data, list):
# if data is list, use first.
data_check = False
error = None
for item in self.data:
try:
v = self._getattr(item, k)
except DataError:
import sys
error = sys.exc_info()[1]
else:
data_check = True
break
if not data_check:
raise DataError(error.message)
else:
v = self._getattr(self.data, k)
if hasattr(v, '__call__') and not field.skip_callable:
v = v()
filter_name = 'filter_%s' % name
if hasattr(self, filter_name):
if field.is_nonkey:
v = getattr(self, filter_name)()
else:
v = getattr(self, filter_name)(v)
value = field.get_value(self, v)
# after filter hook
after_filter_name = 'after_filter_%s' % name
if hasattr(self, after_filter_name):
value = getattr(self, after_filter_name)(value)
# attach hook
attach_name = 'attach_%s' % name
if hasattr(self, attach_name):
getattr(self, attach_name)(parsed, value)
else:
attach_parent = getattr(field, 'attach_parent', False)
if attach_parent:
parsed.update(value)
else:
parsed[self.key_name(name, value, field)] = value
ordered = self.order(parsed)
return ordered
def order(self, parsed):
"""
This method **must** return the OrderedDict.
"""
return sort_dict_with_keys(parsed, self._meta.field_names)
def key_name(self, name, value, field):
"""
Hook point for key name converting.
"""
return name
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def __str__(self):
return str(', '.join(self._meta.field_names))
|
import json
import shutil
from pathlib import Path
from unittest import mock
from aiounittest import AsyncTestCase
from crawler.crawlers import Crawler
from crawler.scrapers import URLScraper
class CrawlerTest(AsyncTestCase):
def setUp(self) -> None:
self.crawler = Crawler(
initial_url='https://www.example.test/',
num_workers=1,
out_path=Path('./out')
)
def test_add_to_queue(self) -> None:
"""
Test the adding URL to the Queue.
"""
urls = [
# Valid URLS
'https://www.example.test/',
'https://www.example.test/a/',
'https://www.example.test/a/b/',
# Invalid URLS
'https://www.otherexample2.test/',
'https://www.otherexample3.test/',
]
for url in urls:
self.crawler._add_to_queue(url)
self.assertEqual(self.crawler.queue.qsize(), 3)
def test_add_to_visited_urls(self) -> None:
"""
Test the adding URL to the visited list.
"""
queue_url = 'https://www.example.test/a/'
urls = [
# Valid URLS
'https://www.example.test/a/b/',
'https://www.example.test/a/b/c',
# Invalid URLS
'https://www.example.test/',
'https://www.otherexample2.test/',
'https://www.otherexample3.test/',
]
for url in urls:
self.crawler._add_to_visited_urls(queue_url, url)
self.assertEqual(len(self.crawler.visited_urls), 1)
self.assertEqual(len(self.crawler.visited_urls[queue_url]), 3)
def test_write_to_file(self) -> None:
"""
Test writing output to a file.
"""
self.crawler.visited_urls = {
'https://www.example.test/': {
'https://www.example.test/b/',
'https://www.example.test/c/'
},
'https://www.example.test/b/': set()
}
self.crawler._write_to_file()
with open('out/out.json', mode='r') as f:
content = f.read()
self.assertDictEqual(json.loads(content), self.crawler.visited_urls)
@mock.patch.object(URLScraper, 'run')
async def test_start_worker(self, mock_method) -> None:
"""
Test the worker consuming the Queue.
"""
mock_method.return_value = {'https://www.example.test/1'}
queue_url = 'https://www.example.test/'
self.crawler._add_to_queue(queue_url)
await self.crawler._start_worker(name='worker-1')
self.assertEqual(self.crawler.queue.qsize(), 0)
self.assertDictEqual(
self.crawler.visited_urls,
{
'https://www.example.test/': {'https://www.example.test/1'},
'https://www.example.test/1': {'https://www.example.test/1'},
}
)
@mock.patch.object(URLScraper, 'run')
async def test_run(self, mock_method) -> None:
"""
Test the crawler with multiple workers.
"""
mock_method.return_value = {
'https://www.example.test/1',
'https://www.example.test/2',
}
await self.crawler.run()
with open('out/out.json', mode='r') as f:
content = f.read()
self.assertEqual(self.crawler.queue.qsize(), 0)
self.assertEqual(len(self.crawler.visited_urls), 3)
self.assertDictEqual(json.loads(content), self.crawler.visited_urls)
@mock.patch.object(URLScraper, 'run')
async def test_run_with_0_workers(self, mock_method) -> None:
"""
Test the crawler with 0 workers.
"""
mock_method.return_value = {
'https://www.example.test/1',
'https://www.example.test/2',
}
self.crawler.num_workers = 0
with self.assertRaises(Exception) as context:
await self.crawler.run()
expected_msg = 'The number of workers must be higher than 1.'
self.assertTrue(expected_msg in str(context.exception))
def tearDown(self) -> None:
path = Path('out')
if path.exists():
shutil.rmtree('out')
|
<filename>www/apps/social/providers/base/oauth2base.py
import cgi, urllib, urllib2, json, base64
from django.utils.translation import ugettext as _
from django.http import HttpResponseRedirect
from django.contrib.auth import authenticate, login, load_backend
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.views.decorators.csrf import csrf_exempt
from django.contrib import messages
from django.conf import settings
from ...models import SocialAuthProvider
class Oauth2Base(object):
#name - Facebook, Google ... other Oauth2 providers
name = 'Social2Base'
# request we are processing
request = None
REQUEST_METHOD_GET = 'get'
REQUEST_METHOD_POST = 'post'
# required by all oauth2 provider
client_id = ''
redirect_uri = ''
client_secret = ''
scope = ''
# provider specific (e.g. required by google)
response_type = ''
grant_type = ''
alt = ''
dialog_base_url = '' # call to this url + arg will initiate a auth dialog
token_base_url = '' # call to this url + arg will request an access token
info_base_url = '' # call to this url + arg will request user info
code = ''
access_token = ''
user_info = None
access_token_data_format = 'json' # format of retrieved access token, (json, or txt)
user_info_data_format = 'json' # format of retrieved user info, (json, or txt)
def __init__(self, request):
self.request = request
self.name = self.name.capitalize()
# get the base url for the first dialog with the provider
def get_dialog_url(self):
return self.dialog_base_url
# get the base url for the access token request
def get_tocken_url(self):
return self.token_base_url
# get the base url for the data request
def get_info_url(self):
return self.info_base_url
# get the required args for a dialog with the provider
def get_dialog_arg(self):
args = {
'client_id': self.client_id,
'redirect_uri': self.redirect_uri,
'scope': self.scope
}
return args
# get the required args for a token request
def get_token_args(self):
args = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'redirect_uri': self.redirect_uri,
'code': self.code
}
return args
# get the required args for a user info request
def get_info_args(self):
args = {
'access_token': self.access_token
}
return args
# prepare a request
def build_request(self, req_type='token', req_method=REQUEST_METHOD_GET):
if req_type == 'token':
url = self.get_tocken_url()
args = self.get_token_args()
else: # must be a user_info request
url = self.get_info_url()
args = self.get_info_args()
args = urllib.urlencode(args)
if req_method == self.REQUEST_METHOD_POST:
req = urllib2.Request(url, data=args)
else:
req = urllib2.Request(url+args)
return req
# make a request and return the response
def make_request_return_response(self, req):
try:
resp = urllib2.urlopen(req)
except urllib2.URLError, e:
resp = e
return False, resp
return True, resp
# extract data from response (returns a dict)
def extract_data_from_response(self, resp, format='json'):
if format == 'json' or format == 'text':
data = json.loads(resp.read())
else:
data = dict(cgi.parse_qsl(resp.read()))
return data
# extract code from callback (return string)
def extract_code_from_callback(self):
self.code = ''
msg = ''
success = True
try:
self.code = self.request.GET['code']
except:
msg = _('[%s] authentication failed (!code)' % self.name)
if not self.code or msg:
success = False
return success, msg
# which request method does this provider requires for access_token request (get or post)
def get_access_token_request_method(self):
return self.REQUEST_METHOD_POST
# which request method does this provider requires for user info request (get or post)
def get_user_info_request_method(self):
return self.REQUEST_METHOD_GET
# get a response for an access token request
def get_response_for_access_token_request(self):
req_method = self.get_access_token_request_method()
req = self.build_request(req_type='token', req_method=req_method)
success, resp = self.make_request_return_response(req)
return success, resp
# get a response for a user data request
def get_response_for_user_info_request(self):
req_method = self.get_user_info_request_method()
req = self.build_request(req_type='user_info', req_method=req_method)
success, resp = self.make_request_return_response(req)
return success, resp
# we are processing a locally initiated social authentication request
# we are initiating a dialog with the social provider in question
def initiate_authentication_request(self):
url = self.get_dialog_url()
args = self.get_dialog_arg()
args = urllib.urlencode(args)
return HttpResponseRedirect(url+args)
# we are processing the callback from the provider, we need an access token
# send the access token request to the provider
def get_access_token(self):
msg = ''
self.access_token = ''
success, resp = self.get_response_for_access_token_request()
if not success:
msg = _('[%s] error - access token - (%s)' % (self.name, resp.code))
else:
data = self.extract_data_from_response(resp, format=self.access_token_data_format)
self.access_token = data['access_token']
if not self.access_token:
msg = _('[%s] error - empty access token' % self.name)
return success, msg
# we are processing the callback from the provider, have an access token already
# send the access token back and request user data which may be provider specific
def get_user_info(self):
msg = ''
self.user_info = None
success, resp = self.get_response_for_user_info_request()
if not success:
msg = _('[%s] error - user info - (%s) %s' % (self.name, resp.code))
else:
self.user_info = self.extract_data_from_response(resp, format=self.user_info_data_format)
if not self.user_info:
msg = _('[%s] error - no user info' % self.name)
success = False
return success, msg
# derived class should override this, combo some of user info and base64 it
def get_uuid(self):
uuid = base64.encodestring('override me please')
raise _("get_uuid must be overridden")
return ''
# we have the user info, see if we can extract and email address, derived class has to override
def get_email(self):
return ''
# redirect when we fail
def redirect_on_failure(self, msg=''):
redirect = settings.LOGIN_URL
if self.request.user.is_authenticated():
redirect = reverse('social_provider_list')
if msg:
messages.add_message(self.request, messages.INFO, msg)
return HttpResponseRedirect(redirect)
# redirect when we fail
def redirect_on_success(self, msg='', success_url=settings.LOGIN_REDIRECT_URL):
if msg:
messages.add_message(self.request, messages.INFO, msg)
return HttpResponseRedirect(success_url)
# we have authenticated this user via a social provider, just login'm in
def socially_login(self, user):
# use the backend for login from django.contrib.auth
if not hasattr(user, 'backend'):
for backend in settings.AUTHENTICATION_BACKENDS:
if user == load_backend(backend).get_user(user.pk):
user.backend = backend
break
if hasattr(user, 'backend'):
return login(self.request, user)
# we have requested social authentication and the provider has called us back
def process_callback(self):
# we called, now called back with a code, go and extract the code (self.code)
success, msg = self.extract_code_from_callback()
if not success:
return self.redirect_on_failure(msg=msg)
# exchange the code for an access token
success, msg = self.get_access_token()
if not success:
return self.redirect_on_failure(msg=msg)
# exchange the access token for the user info
success, msg = self.get_user_info()
if not success:
return self.redirect_on_failure(msg=msg)
# we got a uuid
uuid = self.get_uuid()
email = self.get_email()
# try to find the provider to login user with
try:
sp = SocialAuthProvider.objects.get(name__iexact=self.name, uuid__exact=uuid)
except SocialAuthProvider.DoesNotExist:
# no such provider, user must be trying to add it
sp = SocialAuthProvider()
sp.name = self.name
sp.uuid = uuid
sp.email = email
sp.is_active = False
else:
# provider found, try to login user with
self.socially_login(sp.user)
msg = _('you are logged in via %s' % self.name)
return self.redirect_on_success(msg=msg, success_url=settings.LOGIN_REDIRECT_URL)
# if user is already authenticated, the they are adding this provider app to their account
# if user not authenticated, then the are trying to login as they have this provider in their account
if self.request.user.is_authenticated():
sp.user = self.request.user
sp.is_active = True
sp.save()
msg = _('%s is now enabled, so from now on, you can login via %s' % (self.name, self.name))
return self.redirect_on_success(msg=msg, success_url=reverse('social_provider_list'))
# not logged in, so user must be trying to login without having an account or this provider enabled
msg = _('setup an account or login with password, then enable %s in your account settings for future logins' % self.name)
return self.redirect_on_failure(msg=msg)
|
from unittest.mock import Mock
import pytest
from empresa import Pessoa, Funcionario, Programador, Estagiario, Vendedor, Empresa, EmpresaCreationError
# -----------------------
# Testes da classe Pessoa
# -----------------------
def test_cria_pessoa():
try:
p = Pessoa("João", 20)
except:
raise AssertError("Erro ao criar um objeto da classe Pessoa.")
else:
assert hasattr(p, "nome"), "Não existe o atributo público nome."
assert hasattr(p, "idade"), "Não existe o atributo público idade."
assert hasattr(p, "aniversario"), ("Não existe o método público",
"aniversario.")
@pytest.mark.parametrize("nome", [5, 6.1, True, [], {}, ()])
def test_cria_pessoa_nome_type_error(nome):
try:
p = Pessoa(nome, 20)
except TypeError:
pass
except Exception:
raise AssertionError("Não levanta TypeError para nome.")
else:
raise AssertionError("Criou um objeto da classe Pessoa com nome que",
"não é string.")
def test_cria_pessoa_nome_value_error():
try:
p = Pessoa("", 20)
except ValueError:
pass
except Exception:
raise AssertionError("Não levanta ValueError para nome.")
else:
raise AssertionError("Criou um objeto da classe Pessoa com nome vazio.")
@pytest.mark.parametrize("idade", [5.6, "21", [], {}, ()])
def test_cria_pessoa_idade_type_error(idade):
try:
p = Pessoa("João", idade)
except TypeError:
pass
except Exception:
raise AssertionError("Não leventa TypeError para idade.")
else:
raise AssertionError("Criou um objeto da classe Pessoa com idade que",
"não é inteira.")
@pytest.mark.parametrize("idade", [-1, -80, -100])
def test_cria_pessoa_idade_value_error(idade):
try:
p = Pessoa("João", idade)
except ValueError:
pass
except Exception:
raise AssertionError("Não levanta ValueError para idade.")
else:
raise AssertionError("Criou um objeto da classe Pessoa com uma idade",
"negativa.")
def test_pessoa_altera_nome():
p = Pessoa("João", 20)
try:
p.nome = "Maria"
except AttributeError:
pass
else:
raise AssertionError("O atributo nome da classe Pessoa tem um método",
"setter.")
def test_pessoa_altera_idade():
p = Pessoa("João", 20)
try:
p.idade = 25
except AttributeError:
pass
else:
raise AssertionError("O atributo idade de Pessoa tem um método",
"setter.")
def test_pessoa_aniversario():
p = Pessoa("João", 20)
try:
p.aniversario()
except:
raise AssertionError("Não é possível chamar o método aniversário",
"de Pessoa.")
else:
assert p.idade == 21, ("O método aniversário de Pessoa não está",
"incrementando a idade")
# ----------------------------
# Testes da classe Funcionario
# ----------------------------
def test_cria_funcionario():
try:
f = Funcionario("João", 20, "<EMAIL>", 44)
except NotImplementedError:
pass
else:
raise AssertionError("Criou um objeto da classe abstrata Funcionario.")
# ----------------------------
# Testes da classe Programador
# ----------------------------
def test_cria_programador():
try:
p = Programador("João", 20, "<EMAIL>", 30)
except:
raise AssertionError("Erro ao criar um objeto da classe Programador.")
else:
assert hasattr(p, "nome"), "Não existe o atributo público nome."
assert hasattr(p, "idade"), "Não existe o atributo público idade."
assert hasattr(p, "email"), "Não existe o atributo público email."
assert hasattr(p, "carga_horaria"), ("Não existe o atributo público",
"carga_horaria.")
assert hasattr(p, "aniversario"), ("Não existe o método público",
"aniversário")
assert hasattr(p, "calcula_salario"), ("Não existe o método público",
"calcula_salario.")
assert hasattr(p, "aumenta_salario"), ("Não existe o método público",
"aumenta_salario.")
@pytest.mark.parametrize("email", [5, 5.6, True, [], {}, ()])
def test_programador_email_type_error(email):
try:
p = Programador("João", 20, email, 30)
except TypeError:
pass
except Exception:
raise AssertionError("Não levanta TypeError para email.")
else:
raise AssertionError("Criou um objeto da classe Programador com email",
"que não é string.")
@pytest.mark.parametrize("email", ["jsilva", "1@@", "!", "-j@"])
def test_programador_email_value_error(email):
try:
p = Programador("João", 20, email, 30)
except ValueError:
pass
except Exception:
raise AssertionError("Não levanta ValueError para email.")
else:
raise AssertionError("Criou um objeto da classe Programador com email",
"inválido.")
@pytest.mark.parametrize("carga_horaria", [10, 19, -5, 41, 50])
def test_programador_carga_horaria_value_error(carga_horaria):
try:
p = Programador("João", 20, "<EMAIL>", carga_horaria)
except ValueError:
pass
except Exception:
raise AssertionError("Não levanta ValueError para carga_horaria.")
else:
raise AssertionError("Criou um objeto da classe Programador com",
"carga_horaria inválida.")
@pytest.mark.parametrize("carga_horaria", [10, 19, -5, 41, 50])
def test_programador_altera_carga_horaria_value_error(carga_horaria):
p = Programador("João", 20, "<EMAIL>", 30)
try:
p.carga_horaria = carga_horaria
except ValueError:
pass
except Exception:
raise AssertionError("Não levanta ValueError para carga_horaria.")
else:
raise AssertionError("Criou um objeto da classe Programador com",
"carga_horaria inválida.")
@pytest.mark.parametrize("carga_horaria", [20, 25, 30, 35, 40])
def test_programador_calcula_salario(carga_horaria):
p = Programador("João", 20, "<EMAIL>", carga_horaria)
assert p.calcula_salario() == carga_horaria * 4.5 * 35, ("O cálculo do",
"salário está incorreto.")
def test_programador_aumenta_salario():
p = Programador("João", 20, "<EMAIL>", 30)
salario_antigo = p.calcula_salario()
p.aumenta_salario()
salario_novo = p.calcula_salario()
assert salario_novo == salario_antigo * 1.05, ("O método calcula_salario",
"não está aumentando o salário corretamente.")
# ---------------------------
# Testes da classe Estagiário
# ---------------------------
def test_cria_estagiario():
try:
e = Estagiario("João", 20, "<EMAIL>", 20)
except:
raise AssertionError("Erro ao criar um objeto da classe Estagiário.")
else:
assert hasattr(e, "nome"), "Não existe o atributo público nome."
assert hasattr(e, "idade"), "Não existe o atributo público idade."
assert hasattr(e, "email"), "Não existe o atributo público email."
assert hasattr(e, "carga_horaria"), ("Não existe o atributo público",
"carga_horaria.")
assert hasattr(e, "aniversario"), ("Não existe o método público",
"aniversário")
assert hasattr(e, "calcula_salario"), ("Não existe o método público",
"calcula_salario.")
assert hasattr(e, "aumenta_salario"), ("Não existe o método público",
"aumenta_salario.")
@pytest.mark.parametrize("email", [5, 5.6, True, [], {}, ()])
def test_estagiario_email_type_error(email):
try:
e = Estagiario("João", 20, email, 30)
except TypeError:
pass
except Exception:
raise AssertionError("Não levanta TypeError para email.")
else:
raise AssertionError("Criou um objeto da classe Estagiario com email",
"que não é string.")
@pytest.mark.parametrize("email", ["jsilva", "1@@", "!", "-j@"])
def test_estagiario_email_value_error(email):
try:
e = Estagiario("João", 20, email, 30)
except ValueError:
pass
except Exception:
raise AssertionError("Não levanta ValueError para email.")
else:
raise AssertionError("Criou um objeto da classe Estagiario com email",
"inválido.")
@pytest.mark.parametrize("carga_horaria", [10, 15, -5, 31, 50])
def test_estagiario_carga_horaria_value_error(carga_horaria):
try:
e = Estagiario("João", 20, "<EMAIL>", carga_horaria)
except ValueError:
pass
except Exception:
raise AssertionError("Não levanta ValueError para carga_horaria.")
else:
raise AssertionError("Criou um objeto da classe Estagiario com",
"carga_horaria inválida.")
@pytest.mark.parametrize("carga_horaria", [10, 15, -5, 31, 50])
def test_estagiario_altera_carga_horaria_value_error(carga_horaria):
e = Estagiario("João", 20, "<EMAIL>", 20)
try:
e.carga_horaria = carga_horaria
except ValueError:
pass
except Exception:
raise AssertionError("Não levanta ValueError para carga_horaria.")
else:
raise AssertionError("Criou um objeto da classe Estagiario com",
"carga_horaria inválida.")
@pytest.mark.parametrize("carga_horaria", [16, 20, 25, 30])
def test_estagiario_calcula_salario(carga_horaria):
e = Estagiario("João", 20, "<EMAIL>", carga_horaria)
assert e.calcula_salario() == carga_horaria * 4.5 * 15.5 + 250, ("O",
"cálculo do salário está incorreto.")
def test_estagiario_aumenta_salario():
e = Estagiario("João", 20, "<EMAIL>", 20)
salario_antigo = e.calcula_salario()
e.aumenta_salario()
salario_novo = e.calcula_salario()
assert salario_novo == 15.5 * 1.05 * 20 * 4.5 + 250, ("O método",
"calcula_salario não está aumentando o salário corretamente.")
# -------------------------
# Testes da classe Vendedor
# -------------------------
def test_cria_vendedor():
try:
v = Vendedor("João", 20, "<EMAIL>", 30)
except:
raise AssertionError("Erro ao criar um objeto da classe Vendedor")
else:
assert hasattr(v, "nome"), "Não existe atributo público nome."
assert hasattr(v, "idade"), "Não existe atributo público idade."
assert hasattr(v, "email"), "Não existe o atributo público email."
assert hasattr(v, "carga_horaria"), ("Não existe o atributo público",
"carga_horaria.")
assert hasattr(v, "visitas"), "Não existe o atributo público visitas."
assert hasattr(v, "aniversario"), ("Não existe o método público",
"aniversario")
assert hasattr(v, "calcula_salario"), ("Não existe o método público",
"calcula_salario.")
assert hasattr(v, "aumenta_salario"), ("Não existe o método público",
"aumenta_salario.")
assert hasattr(v, "realizar_visita"), ("Não existe o método público",
"realizar_visita.")
assert hasattr(v, "zerar_visitas"), ("Não existe o método público",
"zerar_visitas.")
@pytest.mark.parametrize("email", [5, 5.6, True, [], {}, ()])
def test_vendedor_email_type_error(email):
try:
v = Vendedor("João", 20, email, 30)
except TypeError:
pass
except Exception:
raise AssertionError("Não levanta TypeError para email.")
else:
raise AssertionError("Criou um objeto da classe Vendedor com email",
"que não é string.")
@pytest.mark.parametrize("email", ["jsilva", "1@@", "!", "-j"])
def test_vendedor_email_value_error(email):
try:
v = Vendedor("João", 20, email, 30)
except ValueError:
pass
except Exception:
raise AssertionError("Não levanta ValueError para email.")
else:
raise AssertionError("Criou um objeto da classe Vendedor com email",
"inválido.")
@pytest.mark.parametrize("carga_horaria", [10, 14, -5, 46, 50])
def test_vendedor_carga_horaria_value_error(carga_horaria):
try:
v = Vendedor("João", 20, "<EMAIL>", carga_horaria)
except ValueError:
pass
except Exception:
raise AssertionError("Não levanta ValueError para carga_horaria.")
else:
raise AssertionError("Criou um objeto da classe Vendedor com",
"carga_horaria inválida.")
@pytest.mark.parametrize("carga_horaria", [10, 14, -5, 46, 50])
def test_vendedor_altera_carga_horaria_value_error(carga_horaria):
v = Vendedor("João", 20, "<EMAIL>", 30)
try:
v.carga_horaria = carga_horaria
except ValueError:
pass
except Exception:
raise AssertionError("Não levanta ValueError para carga_horaria.")
else:
raise AssertionError("Criou um objeto da classe Vendedor com",
"carga_horaria inválida.")
@pytest.mark.parametrize("carga_horaria", [15, 20, 25, 30, 35, 40, 45])
def test_vendedor_calcula_salario(carga_horaria):
v = Vendedor("João", 20, "<EMAIL>", carga_horaria)
assert v.calcula_salario() == (30 * 4.5 * carga_horaria + 350 +
v.visitas * 30), "O cálculo do salário está incorreto."
def test_vendedor_aumenta_salario():
v = Vendedor("João", 20, "<EMAIL>", 30)
salario_antigo = v.calcula_salario()
v.aumenta_salario()
salario_novo = v.calcula_salario()
assert salario_novo == 30 * 1.05 * 30 * 4.5 + 350 + v.visitas * 30, ("O ",
"método calcula_salario não está aumentando o salário ",
"corretamente.")
@pytest.mark.parametrize("n_visitas", [5.6, "8", [], (), {}])
def test_vendedor_realizar_visita_type_error(n_visitas):
v = Vendedor("João", 20, "<EMAIL>", 30)
try:
v.realizar_visita(n_visitas)
except TypeError:
pass
except Exception:
raise AssertionError("Não levanta TypeError para n_visitas.")
else:
raise AssertionError("Alterou visitas com n_visitas não inteiro.")
@pytest.mark.parametrize("n_visitas", [-10, -1, 11, 20, 100])
def test_vendedor_realizar_visita_value_error(n_visitas):
v = Vendedor("João", 20, "<EMAIL>", 30)
try:
v.realizar_visita(n_visitas)
except ValueError:
pass
except Exception:
raise AssertionError("Não levanta ValueError para n_visitas.")
else:
raise AssertionError("Alterou visitas com n_visitas inválido.")
def test_vendedor_zerar_visitas():
v = Vendedor("João", 20, "<EMAIL>", 30)
v.realizar_visita(5)
v.zerar_visitas()
assert v.visitas == 0, "O método zerar_visitas não zera as visitas."
# ------------------------
# Testes da classe Empresa
# ------------------------
def test_cria_empresa():
try:
e = Empresa("Macrosft", "08194332000124", "Software",
[Programador("João", 20, "<EMAIL>", 30)])
except:
raise AssertionError("Erro ao criar objeto da classe Empresa.")
else:
assert hasattr(e, "nome"), "Não existe o atributo público nome."
assert hasattr(e, "cnpj"), "Não existe o atributo público cnpj."
assert hasattr(e, "area_atuacao"), ("Não existe o atributo público",
"area_atuacao.")
assert hasattr(e, "equipe"), "Não existe o atributo público equipe."
assert hasattr(e, "contrata"), "Não existe o método público contrata."
assert hasattr(e, "folha_pagamento"), ("Não existe o método público",
"folha_pagamento.")
assert hasattr(e, "dissidio_anual"), ("Não existe o método público",
"dissidio_anual.")
assert hasattr(e, "listar_visitas"), ("Não existe o método público",
"listar_visitas.")
assert hasattr(e, "zerar_visitas_vendedores"), ("Não existe o método",
"público zerar_visitas_vendedores.")
@pytest.mark.parametrize("nome", [5, 5.6, True, [], (), {}])
def test_empresa_nome(nome):
try:
e = Empresa(nome, "08194332000124", "Software",
[Programador("João", 20, "<EMAIL>", 30)])
except EmpresaCreationError:
pass
except Exception:
raise AssertionError("Não levanta EmpresaCreationError para nome.")
else:
raise AssertionError("Criou um objeto da classe Empresa com nome",
"inválido.")
@pytest.mark.parametrize("cnpj", [5, 5.6, True, [], (), {}])
def test_empresa_cnpj(cnpj):
try:
e = Empresa("Macrosoft", cnpj, "Software",
[Programador("João", 20, "<EMAIL>", 30)])
except EmpresaCreationError:
pass
except Exception:
raise AssertionError("Não levanta EmpresaCreationError para cnpj.")
else:
raise AssertionError("Criou um objeto da classe Empresa com cnpj",
"inválido.")
@pytest.mark.parametrize("area_atuacao", [5, 5.6, True, [], (), {}])
def test_empresa_area_atuacao(area_atuacao):
try:
e = Empresa("Macrosoft", "08194332000124", area_atuacao,
[Programador("João", 20, "<EMAIL>", 30)])
except EmpresaCreationError:
pass
except Exception:
raise AssertionError("Não levanta EmpresaCreationError para",
"area_atuacao.")
else:
raise AssertionError("Criou um objeto da classe Empresa com",
"area_atuacao inválida.")
@pytest.mark.parametrize("equipe", [[Pessoa("Maria", 20)], [5, 6, 7],
[Vendedor("José", 30, "<EMAIL>", 30), Pessoa("Maria", 20)]])
def test_empresa_equipe(equipe):
try:
e = Empresa("Macrosoft", "08194332000124", "Software", equipe)
except EmpresaCreationError:
pass
except Exception:
raise AssertionError("Não levanta EmpresaCreationError para equipe.")
else:
raise AssertionError("Criou um objeto da classe Empresa com equipe",
"inválida.")
@pytest.mark.parametrize("novo_funcionario", [int, Pessoa("Maria", 20), [], str])
def test_empresa_contrata_type_error(novo_funcionario):
e = Empresa("Macrosoft", "08194332000124", "Software",
[Programador("João", 20, "<EMAIL>", 30)])
try:
e.contrata(novo_funcionario)
except TypeError:
pass
except Exception:
raise AssertionError("Não levanta TypeError para novo_funcionario.")
else:
raise AssertionError("Adicionou novo_funcionario inválido à equipe.")
@pytest.mark.parametrize("novo_funcionario",
[Programador("João", 20, "<EMAIL>", 30),
Vendedor("José", 30, "<EMAIL>", 30),
Estagiario("Ana", 18, "<EMAIL>", 20)])
def teste_empresa_contrata_valido(novo_funcionario):
e = Empresa("Macrosoft", "08194332000124", "Software",
[Programador("João", 20, "<EMAIL>", 30)])
try:
e.contrata(novo_funcionario)
except:
raise AssertionError("Não aceita subtipos de Funcionario")
else:
pass
def test_empresa_contrata_adiciona():
funcionarios = [Programador("João", 20, "<EMAIL>", 30),
Vendedor("José", 30, "<EMAIL>", 30)]
e = Empresa("Macrosoft", "08194332000124", "Software", [funcionarios[0]])
e.contrata(funcionarios[1])
assert e.equipe == funcionarios, ("O método contrata não está",
"adicionando novo_funcionario à equipe.")
def test_empresa_folha_pagamento():
funcionarios = [Programador("João", 20, "<EMAIL>", 30),
Vendedor("José", 30, "<EMAIL>", 30),
Estagiario("Ana", 18, "<EMAIL>", 20)]
e = Empresa("Macrosoft", "08194332000124", "Software", funcionarios)
e.equipe[1].realizar_visita(5)
salario_esperado = ((30 * 4.5 * 35) +
(30 * 4.5 * 30 + 350 + 30 * 5) +
(20 * 4.5 * 15.5 + 250))
assert salario_esperado == e.folha_pagamento(), ("O cálculo de",
"folha_pagamento está incorreto.")
def test_empresa_dissidio_anual():
funcionarios = [Programador("João", 20, "<EMAIL>", 30),
Vendedor("José", 30, "<EMAIL>", 30),
Estagiario("Ana", 18, "<EMAIL>", 20)]
e = Empresa("Macrosoft", "08194332000124", "Software", funcionarios)
e.equipe[1].realizar_visita(5)
e.dissidio_anual()
salario_esperado = ((30 * 4.5 * 35 * 1.05) +
(30 * 4.5 * 30 * 1.05 + 350 + 30 * 5) +
(20 * 4.5 * 15.5 * 1.05 + 250))
assert salario_esperado == e.folha_pagamento(), ("O cálculo de",
"dissidio_anual está incorreto.")
def test_empresa_listar_visitas():
funcionarios = [Programador("João", 20, "<EMAIL>", 30),
Vendedor("José", 30, "<EMAIL>", 30),
Estagiario("Ana", 18, "<EMAIL>", 20),
Vendedor("Maria", 20, "<EMAIL>", 40)]
e = Empresa("Macrosoft", "08194332000124", "Software", funcionarios)
e.equipe[1].realizar_visita(5)
e.equipe[3].realizar_visita(1)
lista_visitas_esperada = {"<EMAIL>": 5, "<EMAIL>": 1}
assert lista_visitas_esperada == e.listar_visitas(), ("O método",
"listar_visitas está incorreto.")
def test_zerar_visitas_vendedores():
funcionarios = [Programador("João", 20, "<EMAIL>", 30),
Vendedor("José", 30, "<EMAIL>", 30),
Estagiario("Ana", 18, "<EMAIL>", 20),
Vendedor("Maria", 20, "<EMAIL>", 40)]
e = Empresa("Macrosoft", "08194332000124", "Software", funcionarios)
e.equipe[1].realizar_visita(5)
e.equipe[3].realizar_visita(1)
e.zerar_visitas_vendedores()
assert (e.equipe[1].visitas, e.equipe[3].visitas) == (0, 0), ("O método",
"zerar_visitas_vendedores não está zerando as visitas.")
|
<reponame>CrazyBunQnQ/12306-ocr<gh_stars>100-1000
# coding: utf-8
import cv2
import tensorflow as tf
import numpy as np
from keras import models
from config import Logger, Config
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
class ShareInstance():
__session = None
@classmethod
def share(cls, **kwargs):
if not cls.__session:
cls.__session = cls(**kwargs)
return cls.__session
class Predict(ShareInstance):
def __init__(self):
# 识别文字
self.model = models.load_model(Config.TEXT_MODEL_FILE, compile=False)
with open(Config.TEXTS_FILE, encoding='utf-8') as f:
self.texts = [text.rstrip('\n') for text in f]
# 加载图片分类器
self.code_model = models.load_model(Config.IMAGE_MODEL_FILE, compile=False)
def get_text(self, img, offset=0):
text = img[3:22, 120 + offset:177 + offset]
text = cv2.cvtColor(text, cv2.COLOR_BGR2GRAY)
text = text / 255.0
h, w = text.shape
text.shape = (1, h, w, 1)
return text
def get_coordinate(self, img_str):
# 储存最终坐标结果
result = ''
try:
# 读取并预处理验证码
img = cv2.imdecode(np.fromstring(img_str, np.uint8), cv2.IMREAD_COLOR)
text = self.get_text(img)
images = np.array(list(self._get_imgs(img)))
images = self.preprocess_input(images)
label = self.model.predict(text)
label = label.argmax()
text = self.texts[label]
# list放文字
titles = [text]
position = []
# 获取下一个词
# 根据第一个词的长度来定位第二个词的位置
if len(text) == 1:
offset = 27
elif len(text) == 2:
offset = 47
else:
offset = 60
text2 = self.get_text(img, offset=offset)
if text2.mean() < 0.95:
label = self.model.predict(text2)
label = label.argmax()
text2 = self.texts[label]
titles.append(text2)
labels = self.code_model.predict(images)
labels = labels.argmax(axis=1)
for pos, label in enumerate(labels):
if self.texts[label] in titles:
position.append(pos + 1)
# 没有识别到结果
if len(position) == 0:
return result
result = position
Logger.info('识别结果: %s' % result)
except:
pass
return result
def preprocess_input(self, x):
x = x.astype('float32')
# 我是用cv2来读取的图片,其已经是BGR格式了
mean = [103.939, 116.779, 123.68]
x -= mean
return x
def _get_imgs(self, img):
interval = 5
length = 67
for x in range(40, img.shape[0] - length, interval + length):
for y in range(interval, img.shape[1] - length, interval + length):
yield img[x:x + length, y:y + length]
if __name__ == '__main__':
for i in range(10):
with open('test.jpg', 'r') as f:
print(Predict.share().get_coordinate(f.buffer.read()))
|
<filename>Dimensionality-Reduction/src/main.py
import numpy as np
import os
from sklearn.decomposition import IncrementalPCA
from os import listdir
from os.path import isfile, join
import copy
import torch
import time
def get_PCA(src_dir, tar_dir, k):
'''
Assuming IPCA object hasn't been trained yet.
src_dir: directory with all the original features *only*
tar_dir: directory to place PCA reduced features
k: reduced dimension of each vector
'''
onlyfiles = [f for f in listdir(src_dir) if isfile(join(src_dir, f))]
onlyfiles.sort()
tot_im = len(onlyfiles)
bs = k
total_bs = tot_im // bs
rem = tot_im % bs
ipca = IncrementalPCA(n_components=bs)
print("Starting PCA Training")
temp = np.zeros((bs, 512*512), dtype=float)
for i in range(total_bs):
for j in range(bs):
nm = onlyfiles[i*bs+j]
temp[j,:] = np.load(src_dir + nm).flatten()
ipca.partial_fit(temp)
del temp
print("Getting PCA Features")
temp = np.zeros((512*512), dtype=float)
for i in range(tot_im):
nm = onlyfiles[i]
temp[:] = np.load(src_dir + nm).flatten()
np.save(tar_dir + onlyfiles[i], ipca.transform(temp.reshape(1,-1)))
del temp
def normalize(x, mean, k):
if k == None:
return (x - mean)
return np.divide((x - mean), np.absolute(x - mean) ** k)
def pdist(sample_1, sample_2, norm=2):
r"""Compute the matrix of all squared pairwise distances.
Arguments
---------
sample_1 : torch.Tensor or Variable
The first sample, should be of shape ``(n_1, d)``.
sample_2 : torch.Tensor or Variable
The second sample, should be of shape ``(n_2, d)``.
norm : float
The l_p norm to be used.
Returns
-------
torch.Tensor or Variable
Matrix of shape (n_1, n_2). The [i, j]-th entry is equal to
``|| sample_1[i, :] - sample_2[j, :] ||_p``."""
n_1, n_2 = sample_1.size(0), sample_2.size(0)
norm = float(norm)
if norm == 2.:
norms_1 = torch.sum(sample_1**2, dim=1, keepdim=True)
norms_2 = torch.sum(sample_2**2, dim=1, keepdim=True)
norms = (norms_1.expand(n_1, n_2) +
norms_2.transpose(0, 1).expand(n_1, n_2))
distances_squared = norms - 2 * sample_1.mm(sample_2.t())
return torch.abs(distances_squared)
else:
dim = sample_1.size(1)
expanded_1 = sample_1.unsqueeze(1).expand(n_1, n_2, dim)
expanded_2 = sample_2.unsqueeze(0).expand(n_1, n_2, dim)
differences = torch.abs(expanded_1 - expanded_2) ** norm
inner = torch.sum(differences, dim=2, keepdim=False)
return inner
def get_nearest_neighbours(src_dir, tar_dir, k):
'''
src_dir: directory with all the features stored as numpy files
tar_dir: directory to store the txt file listing all the neighbours
k: number of neighbours
'''
image_names = listdir(src_dir)
image_names.sort()
num_of_images = len(image_names)
print(len(image_names), "images found.")
# TODO: To parallelize
mean = None
for i, image_name in enumerate(image_names):
if i % 1000 == 0:
print(i)
features = np.load(src_dir + image_name)
if type(mean) == type(None):
mean = copy.deepcopy(features)
mean = ((i * mean) + features) / (1 + i)
print("Calculated the mean of all features!")
X = []
for i, image_name in enumerate(image_names):
if (i % 100 == 0):
print(i)
features = np.load(src_dir + image_name)
features = normalize(features, mean, None)
X.append(features)
X = np.array(X)
print("Loaded all features after normalizing ->", X.shape)
BATCH_SIZE = 1000
sources = []
B = torch.from_numpy(X.astype(np.float32))
top15 = open(tar_dir + "Top15.txt","w+")
bottom15 = open(tar_dir + "Bottom15.txt","w+")
begin = time.time()
for i in range(0, num_of_images, BATCH_SIZE):
sources = []
for j in range(BATCH_SIZE):
if (i + j >= num_of_images):
break
sources.append(X[i+j])
sources = np.array(sources)
print("Initialized batch with ", sources.shape)
A = torch.from_numpy(sources.astype(np.float32))
d = pdist(A, B)
print("Calculated distances!")
neighbours = d.data.numpy().argsort(axis = -1)
print("Sorted distances!")
n = len(sources)
for img_id in range(n):
top15.write(image_names[i + img_id].strip('.npy') + ', ')
bottom15.write(image_names[i + img_id].strip('.npy') + ', ')
for j in range(k):
top15.write(image_names[neighbours[img_id, j + 1]].strip('.npy') + ', ')
bottom15.write(image_names[neighbours[img_id, num_of_images-1-j]].strip('.npy') + ', ')
top15.write('\n')
bottom15.write('\n')
print("Elapsed: ", time.time() - begin)
print("ETA: ", ((time.time() - begin) * (num_of_images - i - n)) / (i + n), " seconds")
top15.close()
bottom15.close()
|
"""
test nieghbors
"""
import pytest
@pytest.mark.skip(reason="This test documents an example, but is redundant. Skipped in the interest of CI time.")
def test_current_example():
import os
import pandas as pd
import numpy as np
from tcrdist.repertoire import TCRrep
from tcrdist.neighbors import compute_ecdf, bkgd_cntl_nn2
from tcrdist.automate import auto_pgen
import scipy.sparse
fn_mira_background = 'mira_epitope_60_436_MWSFNPETNI_SFNPETNIL_SMWSFNPET.tcrdist3.csv.olga100K_brit100K_bkgd.csv'
fn_mira_background = os.path.join('tcrdist', 'data', 'covid19', fn_mira_background)
df_background = pd.read_csv(fn_mira_background)
tr_background = TCRrep( cell_df = df_background.copy(),
organism = "human",
chains= ['beta'],
compute_distances = False)
fn_mira = 'mira_epitope_60_436_MWSFNPETNI_SFNPETNIL_SMWSFNPET.tcrdist3.csv'
fn_mira = os.path.join('tcrdist', 'data', 'covid19', fn_mira)
df_mira = pd.read_csv(fn_mira)
df_mira = df_mira[['v_b_gene', 'j_b_gene', 'cdr3_b_aa']]
tr = TCRrep( cell_df = df_mira.copy(),
organism = 'human',
chains = ['beta'],
db_file = 'alphabeta_gammadelta_db.tsv',
store_all_cdr = False,
compute_distances = True)
auto_pgen(tr)
tr.compute_rect_distances(df = tr.clone_df,
df2 = tr_background.clone_df,
store = False)
assert tr.rw_beta.shape[0] == tr.clone_df.shape[0]
centers_df = bkgd_cntl_nn2( tr = tr,
tr_background = tr_background,
ctrl_bkgd = 10**-6,
weights =tr_background.clone_df.weights,
col = 'cdr3_b_aa',
ncpus = 2,
thresholds = [x for x in range(0,50,2)],
generate_regex = True,
test_regex = True)
out_fn_center_df = 'mira_epitope_60_436_MWSFNPETNI_SFNPETNIL_SMWSFNPET.tcrdist3.csv.centers_df.csv'
out_fn_rw_beta_sparse_matrix = 'mira_epitope_60_436_MWSFNPETNI_SFNPETNIL_SMWSFNPET.tcrdist3.csv.rw_beta.sparse.npz'
centers_df.to_csv(out_fn_center_df, index = False)
tr.rw_beta[tr.rw_beta == 0] = 1 # set true zeros to 1
tr.rw_beta[tr.rw_beta > 50] = 0 # ignores everything less than 100
rw_beta_sparse = scipy.sparse.csr_matrix(tr.rw_beta)
scipy.sparse.save_npz(out_fn_rw_beta_sparse_matrix, rw_beta_sparse)
# NOTE USED ANYMORE
# def test_compute_population_estimate_ecdf():
# """
# Test import, and ecdf with some or all optional arguments
# """
# from tcrdist.neighbors import compute_population_estimate_ecdf
# import numpy as np
# assert isinstance(compute_population_estimate_ecdf(data = np.array([1,2,3,4,5,6])), np.ndarray)
# assert isinstance(compute_population_estimate_ecdf(data = np.array([1,2,3,4,5,6]), thresholds = np.array([3])), np.ndarray)
# r = compute_population_estimate_ecdf(data = np.array([1,2,3,4,5,6]), thresholds = np.array([3]))
# assert np.all( np.isclose(r, np.array([0.5]) ) )
@pytest.mark.skip(reason="This test documents an example, but is redundant. Skipped in the interest of CI time.")
def test_old_example():
"""
The purpose of this example is to show the use of
chosing thresholds based on background discovery rate
"""
import os
import pandas as pd
import numpy as np
from tcrdist.repertoire import TCRrep
from tcrdist.neighbors import compute_ecdf, bkgd_cntl_nn2
from tcrdist.automate import auto_pgen
from tcrdist.regex import _index_to_regex_str, _index_to_seqs
from tcrdist.summarize import _summ, _dist_summ, _select, filter_gt, filter_is, test_for_subsets, test_for_almost_subsets
fn = os.path.join('tcrdist', 'data', 'covid19', "m60_bkgd_test_input.csv")
df_background = pd.read_csv(fn)
tr_background = TCRrep(cell_df = df_background,
organism = "human",
chains= ['beta'],
compute_distances = False)
tr_background.clone_df['weights'] = 1
fn = os.path.join('tcrdist', 'data', 'covid19', "m60_test_input.csv")
df = pd.read_csv(fn)
tr = TCRrep(cell_df = df,
organism = 'human',
chains = ['beta'],
db_file = 'alphabeta_gammadelta_db.tsv')
auto_pgen(tr)
tr.compute_rect_distances(df = tr.clone_df,
df2 = tr_background.clone_df,
store = False)
assert tr.rw_beta.shape[0] == tr.clone_df.shape[0]
centers_df = bkgd_cntl_nn2( tr = tr,
tr_background = tr_background,
ctrl_bkgd = 2*10**-5,
weights =tr_background.clone_df.weights,
col = 'cdr3_b_aa',
ncpus = 2,
thresholds = [x for x in range(0,50,2)],
generate_regex = True,
test_regex = True)
centers_df.sort_values(['target_hits'], ascending = False)
# # Let's add some useful summary info about publicity and number of unique seqs
# def tabulate_subjects(neighbor_df, clone_df, col_nn ='target_neighbors', col_seq= 'target_seqs'):
# # Tabulate the number of unique subjects at each node
# neighbor_df['nsubject'] = neighbor_df[col_nn].apply( lambda x: len(set(_select(clone_df, iloc_rows =x, col = 'subject'))))
# # Tabulate the number of unique sequences at each node
# neighbor_df['useq'] = neighbor_df[col_seq].apply( lambda x: len(set(x)))
# return neighbor_df
# tabulate_subjects(centers_df, tr.clone_df)
# # Let's add some useful info about Probability of generation
# centers_df['pgen_cdr3_b_aa'] = tr.clone_df.pgen_cdr3_b_aa.copy()
# centers_df['pgen_dist'] = _summ(df = tr.clone_df,
# indices = centers_df['target_neighbors'],
# column = 'pgen_cdr3_b_aa',
# f=_dist_summ)
# from scipy.stats import chi2_contingency
# n1 = 436
# n2 = 100000
# beta_re = 1
# beta_dist = 1
# centers_df['chi2re'] = [chi2_contingency(np.array( [[r['target_re_hits'], n1-r['target_re_hits']],[r['bkgd_re_hits'], n2-r['bkgd_re_hits']]]))[0] for _,r in centers_df.iterrows() ]
# centers_df['chi2dist'] = [chi2_contingency(np.array( [[r['target_hits'], n1-r['target_hits']],[r['background_hits'], n2-r['background_hits']]]))[0] for _,r in centers_df.iterrows() ]
# centers_df['chi2joint'] = [beta_re * r['chi2re'] + beta_dist* r['chi2dist'] for _,r in centers_df.iterrows() ]
# # Rank and select non-redundant
# sorted_centers_df = centers_df.sort_values(['chi2joint'], ascending = False).copy()
# sorted_centers_df['novel'] = test_for_almost_subsets(sorted_centers_df['target_neighbors'], 5)
# sorted_filtered_centers_df = filter_is(sorted_centers_df, 'novel', 1).copy()
# #sorted_filtered_centers_df.to_csv("m60_centers_df.csv")
|
<reponame>Bugnon/oc-2018
import turtle
bob = turtle.Turtle()
def lettre_a(t):
t.lt(75)
t.fd(170)
t.rt(150)
t.fd(170)
t.rt(180)
t.fd(70)
t.lt(75)
t.fd(55)
t.bk(55)
t.rt(75)
t.bk(70)
t.rt(105)
def lettre_b(t):
t.lt(90)
t.fd(200)
t.rt(180)
t.fd(200)
t.lt(90)
t.fd(30)
t.circle(50,180)
t.fd(30)
t.lt(180)
t.fd(30)
t.circle(50,180)
t.fd(30)
t.lt(90)
t.fd(200)
t.lt(90)
t.penup()
t.fd(85)
t.pendown()
def lettre_c(t):
t.penup()
t.fd(105)
t.lt(90)
t.fd(200)
t.pendown()
t.lt(90)
t.circle(100, 180)
def lettre_d(t):
t.circle(100,180)
t.lt(90)
t.fd(200)
def lettre_e(t):
t.fd(80)
t.lt(180)
t.fd(80)
t.lt(90)
t.fd(66)
t.lt(90)
t.fd(80)
t.lt(180)
t.fd(80)
t.lt(90)
t.fd(66)
t.lt(90)
t.fd(80)
def lettre_f(t):
t.fd(80)
t.lt(180)
t.fd(80)
t.lt(90)
t.fd(66)
t.lt(90)
t.fd(80)
t.lt(180)
t.fd(80)
t.lt(90)
t.fd(66)
def lettre_g(t):
t.fd(80)
t.lt(180)
t.fd(80)
t.lt(90)
t.fd(140)
t.lt(90)
t.fd(80)
t.lt(90)
t.fd(70)
t.lt(90)
t.fd(50)
def lettre_h(t):
t.lt(90)
t.fd(200)
t.lt(180)
t.fd(100)
t.lt(90)
t.fd(80)
t.rt(90)
t.fd(100)
t.lt(180)
t.fd(200)
def lettre_i(t):
t.lt(90)
t.fd(200)
def lettre_j(t):
t.rt(90)
t.circle(50,180)
t.fd(100)
def lettre_k(t):
t.lt(90)
t.fd(200)
t.lt(180)
t.fd(100)
t.lt(45)
t.fd(120)
t.lt(180)
t.fd(120)
t.rt(90)
t.fd(120)
def lettre_l(t):
t.rt(90)
t.fd(200)
t.lt(90)
t.fd(100)
def lettre_m(t):
t.lt(90)
t.fd(200)
t.rt(135)
t.fd(90)
t.lt(90)
t.fd(90)
t.rt(135)
t.fd(200)
def lettre_n(t):
t.lt(90)
t.fd(200)
t.rt(160)
t.fd(210)
t.lt(160)
t.fd(200)
def lettre_o(t):
t.circle(100, 360)
def lettre_p(t):
t.lt(90)
t.fd(100)
t.rt(90)
t.circle(50,180)
t.lt(90)
t.fd(100)
def lettre_r(t):
t.lt(90)
t.fd(100)
t.rt(90)
t.circle(50,180)
t.lt(90)
t.fd(100)
t.lt(30)
t.fd(110)
def lettre_s(t):
t.circle(50,180)
t.rt(90)
t.penup()
t.fd(100)
t.pendown()
t.lt(90)
t.circle(50,180)
def lettre_t(t):
t.lt(90)
t.fd(200)
t.lt(90)
t.fd(100)
t.lt(180)
t.fd(200)
def lettre_u(t):
t.rt(90)
t.fd(200)
t.circle(50,180)
t.fd(200)
def lettre_v(t):
t.rt(65)
t.fd(200)
t.lt(130)
t.fd(200)
def lettre_y(t):
t.rt(65)
t.fd(100)
t.lt(130)
t.fd(100)
t.bk(200)
def lettre_x(t):
t.lt(60)
t.fd(200)
t.bk(100)
t.rt(120)
t.fd(100)
t.bk(200)
def lettre_z(t):
t.fd(100)
t.rt(135)
t.fd(120)
t.lt(135)
t.fd(100)
def change(t):
t.penup()
t.fd(40)
t.pendown() |
import bpy
from mathutils import Vector, Euler
from mathutils.geometry import intersect_line_plane
from .functions_modal import *
from .classes_tool import *
def setup_tools(modal):
modal.tools = GEN_Modal_Container()
modal.tools.set_cancel_keys(['Cancel Tool 1', 'Cancel Tool 2'])
modal.tools.set_confirm_keys(['Confirm Tool 1', 'Confirm Tool 2'])
modal.tools.set_pass_through_events(modal.nav_list)
# BOX SEL
tool = modal.tools.add_tool(inherit_confirm=False)
tool.set_use_start(True)
tool.add_start_argument('Box Select Start Selection', box_sel_start)
tool.set_mouse_function(box_sel_mouse)
tool.set_cancel_function(box_sel_cancel)
tool.set_confirm_function(box_sel_confirm)
tool.add_confirm_key('Box New Selection')
tool.add_confirm_key('Box Add Selection')
tool.add_confirm_key('Box Remove Selection')
# tool.set_pre_pass_through_function(clear_draw_pre_navigate)
# tool.set_post_pass_through_function(clear_draw_post_navigate)
modal._box_sel_tool = tool
# LASSO SEL
tool = modal.tools.add_tool(inherit_confirm=False)
tool.set_use_start(True)
tool.add_start_argument('Lasso Select Start Selection', lasso_sel_start)
tool.set_mouse_function(lasso_sel_mouse)
tool.set_cancel_function(lasso_sel_cancel)
tool.set_confirm_function(lasso_sel_confirm)
tool.add_confirm_key('Lasso New Selection')
tool.add_confirm_key('Lasso Add Selection')
tool.add_confirm_key('Lasso Remove Selection')
# tool.set_pre_pass_through_function(clear_draw_pre_navigate)
# tool.set_post_pass_through_function(clear_draw_post_navigate)
modal._lasso_sel_tool = tool
# CIRCLE SEL
tool = modal.tools.add_tool(inherit_confirm=False)
tool.set_use_start(True)
tool.add_start_argument('Circle Select Start Selection', circle_sel_start)
tool.add_keymap_argument('Circle Increase Size 1',
circle_sel_inc, pre_start=True)
tool.add_keymap_argument('Circle Increase Size 2',
circle_sel_inc, pre_start=True)
tool.add_keymap_argument('Circle Decrease Size 1',
circle_sel_dec, pre_start=True)
tool.add_keymap_argument('Circle Decrease Size 2',
circle_sel_dec, pre_start=True)
tool.add_keymap_argument('Circle Resize Mode Start',
circle_sel_start_resize, pre_start=True)
tool.set_mouse_function(circle_sel_mouse)
tool.set_cancel_function(circle_sel_cancel)
tool.set_confirm_function(circle_sel_confirm)
tool.add_confirm_key('Circle End Selection')
tool.add_confirm_key('Circle Add Selection')
tool.add_confirm_key('Circle Remove Selection')
# tool.set_pre_pass_through_function(clear_draw_pre_navigate)
# tool.set_post_pass_through_function(clear_draw_post_navigate)
modal._circle_sel_tool = tool
# CIRCLE SEL RESIZE
tool = modal.tools.add_tool()
tool.set_mouse_function(circle_resize_mouse)
tool.set_cancel_function(circle_resize_cancel)
tool.set_confirm_function(circle_resize_confirm)
tool.add_confirm_key('Circle Resize Confirm')
modal._circle_resize_tool = tool
# ROTATE NORMALS
tool = modal.tools.add_tool()
tool.set_mouse_function(rotate_norms_mouse)
tool.set_cancel_function(rotate_norms_cancel)
tool.set_confirm_function(rotate_norms_confirm)
tool.add_keymap_argument('Rotate X Axis', rotate_set_x)
tool.add_keymap_argument('Rotate Y Axis', rotate_set_y)
tool.add_keymap_argument('Rotate Z Axis', rotate_set_z)
tool.set_pre_pass_through_function(rotate_pre_navigate)
tool.set_post_pass_through_function(rotate_post_navigate)
modal._rotate_norms_tool = tool
# SPHEREIZE
tool = modal.tools.add_tool(inherit_confirm=False, inherit_cancel=False)
tool.set_mouse_function(sphereize_mouse)
tool.set_confirm_function(sphereize_confirm)
tool.set_cancel_function(sphereize_cancel)
tool.set_mouse_pass(True)
tool.add_keymap_argument('Target Move Start', sphereize_start_move)
tool.add_keymap_argument('Target Center Reset', sphereize_reset)
tool.add_keymap_argument('Toggle X-Ray', toggle_x_ray)
tool.add_cancel_key('Cancel Tool 1')
modal._sphereize_tool = tool
# SPHEREIZE MOVE
tool = modal.tools.add_tool()
tool.set_mouse_function(sphereize_move_mouse)
tool.set_cancel_function(sphereize_move_cancel)
tool.set_confirm_function(sphereize_move_confirm)
tool.add_keymap_argument('Target Move X Axis', sphereize_move_set_x)
tool.add_keymap_argument('Target Move Y Axis', sphereize_move_set_y)
tool.add_keymap_argument('Target Move Z Axis', sphereize_move_set_z)
tool.add_keymap_argument('Toggle X-Ray', toggle_x_ray)
modal._sphereize_move_tool = tool
# POINT
tool = modal.tools.add_tool(inherit_confirm=False, inherit_cancel=False)
tool.set_mouse_function(point_mouse)
tool.set_confirm_function(point_confirm)
tool.set_cancel_function(point_cancel)
tool.set_mouse_pass(True)
tool.add_keymap_argument('Target Move Start', point_start_move)
tool.add_keymap_argument('Target Center Reset', point_reset)
tool.add_keymap_argument('Toggle X-Ray', toggle_x_ray)
tool.add_cancel_key('Cancel Tool 1')
modal._point_tool = tool
# POINT MOVE
tool = modal.tools.add_tool()
tool.set_mouse_function(point_move_mouse)
tool.set_cancel_function(point_move_cancel)
tool.set_confirm_function(point_move_confirm)
tool.add_keymap_argument('Target Move X Axis', point_move_set_x)
tool.add_keymap_argument('Target Move Y Axis', point_move_set_y)
tool.add_keymap_argument('Target Move Z Axis', point_move_set_z)
tool.add_keymap_argument('Toggle X-Ray', toggle_x_ray)
modal._point_move_tool = tool
# GIZMO CLICK
tool = modal.tools.add_tool()
tool.set_mouse_function(gizmo_mouse)
tool.set_confirm_function(gizmo_confirm)
tool.set_cancel_function(gizmo_cancel)
tool.add_confirm_key('Confirm Tool 3')
modal._gizmo_tool = tool
# MIRROR
tool = modal.tools.add_tool(inherit_confirm=False)
tool.set_cancel_function(mirror_cancel)
tool.add_keymap_argument('Mirror Normals X', mirror_x)
tool.add_keymap_argument('Mirror Normals Y', mirror_y)
tool.add_keymap_argument('Mirror Normals Z', mirror_z)
modal._mirror_tool = tool
# FLATTEN
tool = modal.tools.add_tool(inherit_confirm=False)
tool.set_cancel_function(flatten_cancel)
tool.add_keymap_argument('Flatten Normals X', flatten_x)
tool.add_keymap_argument('Flatten Normals Y', flatten_y)
tool.add_keymap_argument('Flatten Normals Z', flatten_z)
modal._flatten_tool = tool
# ALIGN
tool = modal.tools.add_tool(inherit_confirm=False)
tool.set_cancel_function(align_cancel)
tool.add_keymap_argument('Align Normals Pos X', align_pos_x)
tool.add_keymap_argument('Align Normals Pos Y', align_pos_y)
tool.add_keymap_argument('Align Normals Pos Z', align_pos_z)
tool.add_keymap_argument('Align Normals Neg X', align_neg_x)
tool.add_keymap_argument('Align Normals Neg Y', align_neg_y)
tool.add_keymap_argument('Align Normals Neg Z', align_neg_z)
modal._align_tool = tool
return
def tool_end(modal):
modal.tool_mode = False
keymap_refresh(modal)
return
#
# BOX SELECT FUNCS
def box_sel_start(modal, context, event, keys, func_data):
modal._mode_cache.append(
[modal._mouse_reg_loc.tolist(), modal._mouse_reg_loc.tolist()])
modal.box_selecting = True
return
def box_sel_mouse(modal, context, event, func_data):
if event.alt:
if np.isnan(modal._mouse_init).all():
modal._mouse_init[:] = modal._mouse_reg_loc
else:
offset = modal._mouse_reg_loc-modal._mouse_init
for p in range(len(modal._mode_cache[0])):
modal._mode_cache[0][p][0] += offset[0]
modal._mode_cache[0][p][1] += offset[1]
modal._mouse_init[:] = modal._mouse_reg_loc
else:
modal._mouse_init[:] = np.nan
modal._mode_cache[0].pop(-1)
modal._mode_cache[0].append(modal._mouse_reg_loc.tolist())
return
def box_sel_confirm(modal, context, event, keys, func_data):
if 'Box Add Selection' in keys:
change = box_selection_test(modal, True, False)
if change:
add_to_undostack(modal, 0)
elif 'Box Remove Selection' in keys:
change = box_selection_test(modal, False, True)
if change:
add_to_undostack(modal, 0)
elif 'Box New Selection' in keys:
change = box_selection_test(modal, False, False)
if change:
add_to_undostack(modal, 0)
modal.box_selecting = False
modal._mode_cache.clear()
modal._mouse_init[:] = np.nan
bpy.context.window.cursor_modal_set('DEFAULT')
update_orbit_empty(modal)
tool_end(modal)
end_selection_drawing(modal)
return
def box_sel_cancel(modal, context, event, keys, func_data):
modal.box_selecting = False
modal._mode_cache.clear()
modal._mouse_init[:] = np.nan
bpy.context.window.cursor_modal_set('DEFAULT')
update_orbit_empty(modal)
tool_end(modal)
end_selection_drawing(modal)
return
#
# LASSO SELECT FUNCS
def lasso_sel_start(modal, context, event, keys, func_data):
modal._mode_cache.append([modal._mouse_reg_loc.tolist()])
modal.lasso_selecting = True
return
def lasso_sel_mouse(modal, context, event, func_data):
if event.alt:
if np.isnan(modal._mouse_init).all():
modal._mouse_init[:] = modal._mouse_reg_loc
else:
offset = modal._mouse_reg_loc-modal._mouse_init
for p in range(len(modal._mode_cache[0])):
modal._mode_cache[0][p][0] += offset[0]
modal._mode_cache[0][p][1] += offset[1]
modal._mouse_init[:] = modal._mouse_reg_loc
else:
modal._mouse_init[:] = np.nan
prev_loc = Vector(modal._mode_cache[0][-1])
cur_loc = Vector(modal._mouse_reg_loc)
offset = cur_loc.xy - prev_loc.xy
if offset.length > 5.0:
modal._mode_cache[0].append(modal._mouse_reg_loc.tolist())
return
def lasso_sel_confirm(modal, context, event, keys, func_data):
if 'Lasso Add Selection' in keys:
change = lasso_selection_test(modal, True, False)
if change:
add_to_undostack(modal, 0)
elif 'Lasso Remove Selection' in keys:
change = lasso_selection_test(modal, False, True)
if change:
add_to_undostack(modal, 0)
elif 'Lasso New Selection' in keys:
change = lasso_selection_test(modal, False, False)
if change:
add_to_undostack(modal, 0)
modal.lasso_selecting = False
modal._mode_cache.clear()
modal._mouse_init[:] = np.nan
bpy.context.window.cursor_modal_set('DEFAULT')
update_orbit_empty(modal)
tool_end(modal)
end_selection_drawing(modal)
return
def lasso_sel_cancel(modal, context, event, keys, func_data):
modal.lasso_selecting = False
modal._mode_cache.clear()
modal._mouse_init[:] = np.nan
bpy.context.window.cursor_modal_set('DEFAULT')
update_orbit_empty(modal)
tool_end(modal)
end_selection_drawing(modal)
return
#
# CIRCLE SELECT FUNCS
def circle_sel_start(modal, context, event, keys, func_data):
if 'Circle Add Selection' in keys:
change = circle_selection_test(modal, True, False, modal.circle_radius)
elif 'Circle Remove Selection' in keys:
change = circle_selection_test(modal, False, True, modal.circle_radius)
modal.circle_removing = True
else:
change = circle_selection_test(
modal, False, False, modal.circle_radius)
if change:
modal.redraw = True
return
def circle_sel_mouse(modal, context, event, func_data):
if modal.circle_removing == False:
change = circle_selection_test(modal, True, False, modal.circle_radius)
else:
change = circle_selection_test(modal, False, True, modal.circle_radius)
if change:
modal.redraw = True
return
def circle_sel_confirm(modal, context, event, keys, func_data):
modal.circle_removing = False
modal._circle_sel_tool.restart()
modal._mode_cache.clear()
return
def circle_sel_inc(modal, context, event, keys, func_data):
modal.circle_radius += 10
return
def circle_sel_dec(modal, context, event, keys, func_data):
modal.circle_radius -= 10
if modal.circle_radius < 10:
modal.circle_radius = 10
return
def circle_sel_start_resize(modal, context, event, keys, func_data):
modal.circle_resizing = True
modal._current_tool = modal._circle_resize_tool
if modal._mouse_reg_loc[0]-modal.circle_radius < 0:
modal._mouse_init[:] = modal._mouse_reg_loc
modal._mouse_init[0] += modal.circle_radius
else:
modal._mouse_init[:] = modal._mouse_reg_loc
modal._mouse_init[0] -= modal.circle_radius
modal._mode_cache.append(modal.circle_radius)
return
def circle_sel_cancel(modal, context, event, keys, func_data):
add_to_undostack(modal, 0)
modal.circle_selecting = False
modal.circle_removing = False
update_orbit_empty(modal)
modal._mode_cache.clear()
bpy.context.window.cursor_modal_set('DEFAULT')
tool_end(modal)
end_selection_drawing(modal)
return
#
# CIRCLE SELECT RESIZE FUNCS
def circle_resize_mouse(modal, context, event, func_data):
prev_loc = Vector(modal._mouse_init)
cur_loc = Vector(modal._mouse_reg_loc)
diff = int((cur_loc-prev_loc).length)
modal.circle_radius = diff
return
def circle_resize_confirm(modal, context, event, keys, func_data):
modal._mode_cache.clear()
modal.circle_resizing = False
modal._current_tool = modal._circle_sel_tool
return
def circle_resize_cancel(modal, context, event, keys, func_data):
modal.circle_radius = modal._mode_cache[0]
modal._mode_cache.clear()
modal.circle_resizing = False
modal._current_tool = modal._circle_sel_tool
return
#
# NAVIGATE CLEAR DRAWING
def clear_draw_pre_navigate(modal, context, event, func_data):
modal.selection_drawing = False
empty_selection_drawing_lists(modal)
return
def clear_draw_post_navigate(modal, context, event, func_data):
modal.selection_drawing = True
return
#
# ROTATE NORMALS FUNCS
def rotate_norms_mouse(modal, context, event, func_data):
center = np.array(view3d_utils.location_3d_to_region_2d(
modal.act_reg, modal.act_rv3d, modal._mode_cache[0]).to_3d())
start_vec = Vector(modal._mouse_init-center).xy
mouse_vec = Vector(modal._mouse_reg_loc-center).xy
ang = mouse_vec.angle_signed(start_vec)
if event.shift:
ang *= 0.1
if ang != 0.0:
modal._mode_cache[1] = modal._mode_cache[1]+ang*modal._mode_cache[2]
rotate_vectors(modal, modal._mode_cache[1])
modal._mouse_init[:] = modal._mouse_reg_loc
modal.redraw_active = True
return
def rotate_norms_confirm(modal, context, event, keys, func_data):
add_to_undostack(modal, 1)
modal._mode_cache.clear()
modal._mouse_init[:] = np.nan
modal.translate_axis = 2
modal.translate_mode = 0
clear_translate_axis_draw(modal)
modal._window.clear_status()
modal.rotating = False
tool_end(modal)
gizmo_update_hide(modal, True)
end_selection_drawing(modal)
end_active_drawing(modal)
return
def rotate_norms_cancel(modal, context, event, keys, func_data):
modal._container.new_norms[:] = modal._container.cache_norms
set_new_normals(modal)
modal._mode_cache.clear()
modal._mouse_init[:] = np.nan
modal.translate_axis = 2
modal.translate_mode = 0
clear_translate_axis_draw(modal)
modal._window.clear_status()
modal.redraw = True
modal.rotating = False
tool_end(modal)
gizmo_update_hide(modal, True)
end_selection_drawing(modal)
end_active_drawing(modal)
return
def rotate_pre_navigate(modal, context, event, func_data):
modal.rotating = False
end_selection_drawing(modal)
bpy.context.window.cursor_modal_set('NONE')
return
def rotate_post_navigate(modal, context, event, func_data):
if modal.translate_mode == 0:
rotate_vectors(modal, modal._mode_cache[1])
modal.redraw_active = True
modal._mouse_init[:] = modal._mouse_reg_loc
bpy.context.window.cursor_modal_set('DEFAULT')
modal.rotating = True
modal.selection_drawing = True
modal._mode_cache[2] = translate_axis_side(modal)
return
def rotate_set_x(modal, context, event, keys, func_data):
translate_axis_change(modal, 'ROTATING', 0)
modal._mode_cache[2] = translate_axis_side(modal)
rotate_vectors(modal, modal._mode_cache[1]*modal._mode_cache[2])
modal.redraw_active = True
return
def rotate_set_y(modal, context, event, keys, func_data):
translate_axis_change(modal, 'ROTATING', 1)
modal._mode_cache[2] = translate_axis_side(modal)
rotate_vectors(modal, modal._mode_cache[1]*modal._mode_cache[2])
modal.redraw_active = True
return
def rotate_set_z(modal, context, event, keys, func_data):
translate_axis_change(modal, 'ROTATING', 2)
modal._mode_cache[2] = translate_axis_side(modal)
rotate_vectors(modal, modal._mode_cache[1]*modal._mode_cache[2])
modal.redraw_active = True
return
#
# SPHEREIZE FUNCS
def sphereize_mouse(modal, context, event, func_data):
hov_status = modal._window.test_hover(modal._mouse_reg_loc.tolist())
modal.ui_hover = hov_status != None
return
def sphereize_start_move(modal, context, event, keys, func_data):
modal._window.set_status('VIEW TRANSLATION')
rco = view3d_utils.location_3d_to_region_2d(
modal.act_reg, modal.act_rv3d, modal._target_emp.location)
modal._mode_cache.append(modal._mouse_reg_loc.tolist())
modal._mode_cache.append(modal._target_emp.location.copy())
modal._mode_cache.append(rco)
keymap_target_move(modal)
modal._current_tool = modal._sphereize_move_tool
return
def sphereize_reset(modal, context, event, keys, func_data):
modal._target_emp.location = modal._mode_cache[0]
sphereize_normals(modal)
return
def sphereize_confirm(modal, context, event, keys, func_data):
if event.value == 'PRESS':
# Test 2d ui selection
if modal._sphere_panel.visible:
modal._sphere_panel.test_click_down(
modal._mouse_reg_loc.tolist(), event.shift, arguments=[event])
modal.click_hold = True
else:
if modal._sphere_panel.visible:
modal._sphere_panel.test_click_up(
modal._mouse_reg_loc.tolist(), event.shift, arguments=[event])
modal.click_hold = False
return
def sphereize_cancel(modal, context, event, keys, func_data):
end_sphereize_mode(modal, False)
return
def toggle_x_ray(modal, context, event, keys, func_data):
modal._x_ray_mode = not modal._x_ray_mode
modal._xray_bool.toggle_bool()
return
#
# SPHEREIZE MOVE FUNCS
def sphereize_move_mouse(modal, context, event, func_data):
move_target(modal, event.shift)
sphereize_normals(modal)
modal._mode_cache.pop(1)
modal._mode_cache.insert(1, modal._mouse_reg_loc.tolist())
modal.redraw_active = True
return
def sphereize_move_confirm(modal, context, event, keys, func_data):
modal.translate_axis = 2
modal.translate_mode = 0
clear_translate_axis_draw(modal)
modal._window.clear_status()
keymap_target(modal)
modal._mode_cache.pop(3)
modal._mode_cache.pop(2)
modal._mode_cache.pop(1)
modal._current_tool = modal._sphereize_tool
end_active_drawing(modal)
return
def sphereize_move_cancel(modal, context, event, keys, func_data):
modal.translate_axis = 2
modal.translate_mode = 0
clear_translate_axis_draw(modal)
modal._window.clear_status()
modal.redraw = True
modal._target_emp.location = modal._mode_cache[2].copy()
keymap_target(modal)
sphereize_normals(modal)
modal._mode_cache.pop(3)
modal._mode_cache.pop(2)
modal._mode_cache.pop(1)
modal._current_tool = modal._sphereize_tool
end_active_drawing(modal)
return
def sphereize_move_set_x(modal, context, event, keys, func_data):
translate_axis_change(modal, 'TRANSLATING', 0)
move_target(modal, event.shift)
sphereize_normals(modal)
modal.redraw_active = True
return
def sphereize_move_set_y(modal, context, event, keys, func_data):
translate_axis_change(modal, 'TRANSLATING', 1)
move_target(modal, event.shift)
sphereize_normals(modal)
modal.redraw_active = True
return
def sphereize_move_set_z(modal, context, event, keys, func_data):
translate_axis_change(modal, 'TRANSLATING', 2)
move_target(modal, event.shift)
sphereize_normals(modal)
modal.redraw_active = True
return
#
# POINT FUNCS
def point_mouse(modal, context, event, func_data):
hov_status = modal._window.test_hover(modal._mouse_reg_loc.tolist())
modal.ui_hover = hov_status != None
return
def point_start_move(modal, context, event, keys, func_data):
modal._window.set_status('VIEW TRANSLATION')
rco = view3d_utils.location_3d_to_region_2d(
modal.act_reg, modal.act_rv3d, modal._target_emp.location)
modal._mode_cache.append(modal._mouse_reg_loc.tolist())
modal._mode_cache.append(modal._target_emp.location.copy())
modal._mode_cache.append(rco)
keymap_target_move(modal)
modal._current_tool = modal._point_move_tool
return
def point_reset(modal, context, event, keys, func_data):
modal._target_emp.location = modal._mode_cache[0]
point_normals(modal)
return
def point_confirm(modal, context, event, keys, func_data):
if event.value == 'PRESS':
# Test 2d ui selection
if modal._point_panel.visible:
modal._point_panel.test_click_down(
modal._mouse_reg_loc.tolist(), event.shift, arguments=[event])
modal.click_hold = True
else:
if modal._point_panel.visible:
modal._point_panel.test_click_up(
modal._mouse_reg_loc.tolist(), event.shift, arguments=[event])
modal.click_hold = False
return
def point_cancel(modal, context, event, keys, func_data):
end_point_mode(modal, False)
return
#
# POINT MOVE FUNCS
def point_move_mouse(modal, context, event, func_data):
move_target(modal, event.shift)
point_normals(modal)
modal._mode_cache.pop(1)
modal._mode_cache.insert(1, modal._mouse_reg_loc.tolist())
modal.redraw_active = True
return
def point_move_confirm(modal, context, event, keys, func_data):
modal.translate_axis = 2
modal.translate_mode = 0
clear_translate_axis_draw(modal)
modal._window.clear_status()
keymap_target(modal)
modal._mode_cache.pop(3)
modal._mode_cache.pop(2)
modal._mode_cache.pop(1)
modal._current_tool = modal._point_tool
end_active_drawing(modal)
return
def point_move_cancel(modal, context, event, keys, func_data):
modal.translate_axis = 2
modal.translate_mode = 0
clear_translate_axis_draw(modal)
modal._window.clear_status()
modal._target_emp.location = modal._mode_cache[2].copy()
keymap_target(modal)
point_normals(modal)
modal._mode_cache.pop(3)
modal._mode_cache.pop(2)
modal._mode_cache.pop(1)
modal._current_tool = modal._point_tool
end_active_drawing(modal)
return
def point_move_set_x(modal, context, event, keys, func_data):
translate_axis_change(modal, 'TRANSLATING', 0)
move_target(modal, event.shift)
point_normals(modal)
modal.redraw_active = True
return
def point_move_set_y(modal, context, event, keys, func_data):
translate_axis_change(modal, 'TRANSLATING', 1)
move_target(modal, event.shift)
point_normals(modal)
modal.redraw_active = True
return
def point_move_set_z(modal, context, event, keys, func_data):
translate_axis_change(modal, 'TRANSLATING', 2)
move_target(modal, event.shift)
point_normals(modal)
modal.redraw_active = True
return
#
# GIZMO FUNCS
def gizmo_mouse(modal, context, event, func_data):
start_vec = modal._mode_cache[0]
view_vec = view3d_utils.region_2d_to_vector_3d(
modal.act_reg, modal.act_rv3d, modal._mouse_reg_loc)
view_orig = view3d_utils.region_2d_to_origin_3d(
modal.act_reg, modal.act_rv3d, modal._mouse_reg_loc)
line_a = view_orig
line_b = view_orig + view_vec*10000
# Get start vector to measure angle of mouse
if modal.translate_axis == 0:
giz_vec = modal._mode_cache[3] @ Vector((1, 0, 0)) - \
modal._mode_cache[3].translation
if modal.translate_axis == 1:
giz_vec = modal._mode_cache[3] @ Vector((0, 1, 0)) - \
modal._mode_cache[3].translation
if modal.translate_axis == 2:
giz_vec = modal._mode_cache[3] @ Vector((0, 0, 1)) - \
modal._mode_cache[3].translation
mouse_co_3d = intersect_line_plane(
line_a, line_b, modal._mode_cache[3].translation, giz_vec)
mouse_co_local = modal._mode_cache[3].inverted() @ mouse_co_3d
# Get angle of current rotation
ang_fac = 1.0
if modal.translate_axis == 0:
mouse_loc = mouse_co_local.yz
elif modal.translate_axis == 1:
mouse_loc = mouse_co_local.xz
ang_fac = -1.0
elif modal.translate_axis == 2:
mouse_loc = mouse_co_local.xy
ang = start_vec.angle_signed(mouse_loc)*-1
if event.shift:
ang *= 0.1
# Apply angle to normals or gizmo
if ang != 0.0:
modal._mode_cache[1] = modal._mode_cache[1]+ang
modal._mode_cache.pop(0)
modal._mode_cache.insert(0, mouse_loc)
if modal._mode_cache[4]:
rotate_vectors(modal, modal._mode_cache[1]*ang_fac)
modal._window.update_gizmo_rot(
modal._mode_cache[1], modal._mode_cache[2])
modal.redraw_active = True
else:
if modal.translate_axis == 0:
rot_mat = Euler([ang, 0, 0]).to_matrix().to_4x4()
if modal.translate_axis == 1:
rot_mat = Euler([0, -ang, 0]).to_matrix().to_4x4()
if modal.translate_axis == 2:
rot_mat = Euler([0, 0, ang]).to_matrix().to_4x4()
modal._orbit_ob.matrix_world = modal._orbit_ob.matrix_world @ rot_mat
modal._window.update_gizmo_orientation(
modal._orbit_ob.matrix_world)
return
def gizmo_confirm(modal, context, event, keys, func_data):
for gizmo in modal._rot_gizmo.gizmos:
gizmo.active = True
gizmo.in_use = False
if modal._mode_cache[4]:
add_to_undostack(modal, 1)
modal.gizmo_click = False
modal.translate_mode = 0
modal.translate_axis = 2
modal._mode_cache.clear()
modal.click_hold = False
end_active_drawing(modal)
tool_end(modal)
return
def gizmo_cancel(modal, context, event, keys, func_data):
if modal._mode_cache[4]:
modal._container.new_norms[:] = modal._container.cache_norms
set_new_normals(modal)
else:
modal._orbit_ob.matrix_world = modal._mode_cache[3].copy()
modal._window.update_gizmo_orientation(
modal._orbit_ob.matrix_world)
for gizmo in modal._rot_gizmo.gizmos:
gizmo.active = True
gizmo.in_use = False
modal.gizmo_click = False
modal.translate_mode = 0
modal.translate_axis = 2
modal._mode_cache.clear()
end_active_drawing(modal)
tool_end(modal)
modal.click_hold = False
return
#
# MIRROR FUNCS
def mirror_x(modal, context, event, keys, func_data):
mirror_normals(modal, 0)
tool_end(modal)
return
def mirror_y(modal, context, event, keys, func_data):
mirror_normals(modal, 1)
tool_end(modal)
return
def mirror_z(modal, context, event, keys, func_data):
mirror_normals(modal, 2)
tool_end(modal)
return
def mirror_cancel(modal, context, event, keys, func_data):
tool_end(modal)
return
#
# FLATTEN FUNCS
def flatten_x(modal, context, event, keys, func_data):
flatten_normals(modal, 0)
tool_end(modal)
return
def flatten_y(modal, context, event, keys, func_data):
flatten_normals(modal, 1)
tool_end(modal)
return
def flatten_z(modal, context, event, keys, func_data):
flatten_normals(modal, 2)
tool_end(modal)
return
def flatten_cancel(modal, context, event, keys, func_data):
tool_end(modal)
return
#
# ALIGN FUNCS
def align_pos_x(modal, context, event, keys, func_data):
align_to_axis_normals(modal, 0, 1)
tool_end(modal)
return
def align_pos_y(modal, context, event, keys, func_data):
align_to_axis_normals(modal, 1, 1)
tool_end(modal)
return
def align_pos_z(modal, context, event, keys, func_data):
align_to_axis_normals(modal, 2, 1)
tool_end(modal)
return
def align_neg_x(modal, context, event, keys, func_data):
align_to_axis_normals(modal, 0, -1)
tool_end(modal)
return
def align_neg_y(modal, context, event, keys, func_data):
align_to_axis_normals(modal, 1, -1)
tool_end(modal)
return
def align_neg_z(modal, context, event, keys, func_data):
align_to_axis_normals(modal, 2, -1)
tool_end(modal)
return
def align_cancel(modal, context, event, keys, func_data):
tool_end(modal)
return
|
from typing import List
import numpy as np
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.op2.result_objects.op2_objects import ScalarObject
from pyNastran.f06.f06_formatting import write_floats_13e, write_imag_floats_13e
class AppliedLoadsVectorArray(ScalarObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
ScalarObject.__init__(self, data_code, isubcase)
#self.dt = dt
self.ntimes = 0
self.itotal = 0
self.eids = None
self.sources = None
self.data = None # forces/moments
def data_type(self):
raise NotImplementedError()
def _reset_indices(self):
self.itotal = 0
def build(self):
"""sizes the vectorized attributes of the AppliedLoadsVectorArray"""
self.eids = np.zeros(self.itotal, dtype='int32')
self.sources = np.zeros(self.itotal, dtype='|S8')
#[f1, f2, f3, m1, m2, m3]
self.data = np.zeros((self.ntimes, self.itotal, 6), dtype=self.data_type())
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
#ngrids = len(self.gridTypes)
msg = []
ntimes = len(self._times)
#len(self.node_gridtype)
#nnodes, two = self.node_gridtype.shape
nelements = 0
ntimes = self.data.shape[0]
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%s nelements=%s\n'
% (self.__class__.__name__, ntimes, nelements))
else:
msg.append(' type=%s nelements=%s\n'
% (self.__class__.__name__, nelements))
msg.append(' data: [f1, f2, f3, m1, m2, m3] shape=%s dtype=%s\n'
% ([int(i) for i in self.data.shape], self.data.dtype))
msg.append(' sources, eids\n')
msg += self.get_data_code()
return msg
def add_sort1(self, node_id, eid, source, v1, v2, v3, v4, v5, v6):
"""unvectorized method for adding SORT1 transient data"""
#raise NotImplementedError('AppliedLoadsVector')
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
msg = "node_id=%s v1=%s v2=%s v3=%s" % (node_id, v1, v2, v3)
assert 0 < node_id < 1000000000, msg
#assert nodeID not in self.forces
#[f1, f2, f3, m1, m2, m3]
self.data[self.itime, self.itotal, :] = [v1, v2, v3, v4, v5, v6]
class RealAppliedLoadsVectorArray(AppliedLoadsVectorArray):
def __init__(self, data_code, is_sort1, isubcase, dt):
AppliedLoadsVectorArray.__init__(self, data_code, isubcase, dt)
def data_type(self):
raise 'float32'
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
words = [' APPLIED LOADS VECTOR\n',
'\n',
' EID SOURCE FX FY FZ MX MY MZ\n']
#ntimes, ntotal = self.data.shape[:2]
eids = self.eids
for itime, dt in enumerate(self._times):
if self.nonlinear_factor not in (None, np.nan):
if isinstance(dt, float):
header[1] = ' %s = %10.4E\n' % (self.data_code['name'], dt)
else:
header[1] = ' %s = %10i\n' % (self.data_code['name'], dt)
f06_file.write(''.join(header + words))
f1 = self.data[itime, :, 0]
f2 = self.data[itime, :, 1]
f3 = self.data[itime, :, 2]
m1 = self.data[itime, :, 3]
m2 = self.data[itime, :, 4]
m3 = self.data[itime, :, 5]
source = ''
for eid, f1i, f2i, f3i, m1i, m2i, m3i in zip(eids, f1, f2, f3, m1, m2, m3):
vals = [f1i, f2i, f3i, m1i, m2i, m3i]
vals2 = write_floats_13e(vals)
(dx, dy, dz, rx, ry, rz) = vals2
f06_file.write('%14i %6s %-13s %-13s %-13s %-13s %-13s %s\n' % (
# TODO: fix this...
eid, source, dx, dy, dz, rx, ry, rz))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num-1
class ComplexAppliedLoadsVectorArray(AppliedLoadsVectorArray):
def __init__(self, data_code, is_sort1, isubcase, dt):
AppliedLoadsVectorArray.__init__(self, data_code, isubcase, dt)
def data_type(self):
raise 'float32'
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
words = [' APPLIED LOADS VECTOR\n',
'\n',
' EID SOURCE FX FY FZ MX MY MZ\n']
#ntimes, ntotal, size = self.data.shape
eids = self.eids
for itime, dt in enumerate(self._times):
if self.nonlinear_factor not in (None, np.nan):
if isinstance(dt, float):
header[1] = ' %s = %10.4E\n' % (self.data_code['name'], dt)
else:
header[1] = ' %s = %10i\n' % (self.data_code['name'], dt)
f06_file.write(''.join(header + words))
f1 = self.data[itime, :, 0]
f2 = self.data[itime, :, 1]
f3 = self.data[itime, :, 2]
m1 = self.data[itime, :, 3]
m2 = self.data[itime, :, 4]
m3 = self.data[itime, :, 5]
source = ''
#node_id = ''
for eid, f1i, f2i, f3i, m1i, m2i, m3i in zip(eids, f1, f2, f3, m1, m2, m3):
vals = [f1i, f2i, f3i, m1i, m2i, m3i]
vals2 = write_imag_floats_13e(vals, is_mag_phase)
(dxr, dxi, dyr, dyi, dzr, dzi,
rxr, rxi, ryr, ryi, rzr, rzi) = vals2 # TODO :verify
f06_file.write('%14i %6s %-13s %-13s %-13s %-13s %-13s %s\n'
'%14s %6s %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, source, dxr, dyr, dzr, rxr, ryr, rzr,
'', '', dxi, dyi, dzi, rxi, ryi, rzi))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num-1
|
import sys
import os
import argparse
import torch
import time
import datetime
import pytz
#----------------------------------------------------------
def add_if_absent_(opt,names,val):
for name in names:
if not hasattr(opt,name):
setattr(opt,name,val)
#----------------------------------------------------------
def raise_if_absent(opt,names,who):
for name in names:
if not hasattr(opt,name):
raise Exception("%s requires %s, but it's missing." % (who,name))
#----------------------------------------------------------
def set_if_none(opt, name, val):
if getattr(opt, name) is None:
setattr(opt, name, val)
#----------------------------------------------------------
#!! This actually writes a file of length 0.
#----------------------------------------------------------
def is_writable(pathname):
try:
fp = open(pathname, 'wb')
except IOError:
return False
else:
fp.close()
return True
#----------------------------------------------------------
def divup(a,b):
if a % b == 0:
return a//b
else:
return a//b+1
#----------------------------------------------------------
def stem_name(fname, suffixes):
if not isinstance(suffixes, list):
suffixes = [ suffixes ]
for suffix in suffixes:
if fname.endswith(suffix) and len(fname) > len(suffix):
return fname[0:len(fname)-len(suffix)]
return fname
#----------------------------------------------------------
def raise_if_nonpositive_any(opt, arg_names):
od = vars(opt)
for name in arg_names:
arg = od[name]
if arg <= 0:
raise ValueError('%s must be positive: %s.' % (name, str(arg)))
#----------------------------------------------------------
def raise_if_None_any(opt, arg_names):
od = vars(opt)
for name in arg_names:
arg = od[name]
if arg is None:
raise ValueError('%s must not be None.' % name)
#----------------------------------------------------------
def show_args(opt, arg_names, header='', do_show_all=False):
od = vars(opt)
if header:
logging(header)
def show(name,arg):
logging(' %s= %s' % (name,arg))
def show_bool(name,arg):
if arg:
logging(' %s is turned on.' % name)
def is_not_specified(arg):
return arg is None or ((isinstance(arg,int) or isinstance(arg,float)) and arg < 0) or (isinstance(arg,str) and len(arg) == 0)
if do_show_all:
for name in arg_names:
show(name,od[name])
else:
for name in arg_names:
arg = od[name]
if is_not_specified(arg):
continue
if isinstance(arg,bool):
show_bool(name,arg)
else:
show(name,arg)
#----------------------------------------------------------
def raise_if_negative(value, kw):
if value is None or value < 0:
raise ValueError(kw + ' must be nonnegative.')
#----------------------------------------------------------
def raise_if_nonpositive(value, kw):
if value is None or value <= 0:
raise ValueError(kw + ' must be positive: ' + str(value))
#----------------------------------------------------------
def raise_if_None(value, kw):
if value is None:
raise ValueError(kw + ' must not be None.')
#----------------------------------------------------------
def raise_if_nan(value):
if value != value:
raise Exception("nan was detected.")
#----------------------------------------------------------
class Clock(object):
def __init__(self):
self.data = {}
self.data['clk'] = time.clock()
self.data['tim'] = time.time()
self.data['accum_clk'] = 0
self.data['accum_tim'] = 0
def tick(self):
clk = time.clock()
tim = time.time()
self.data['accum_clk'] += clk - self.data['clk']
self.data['accum_tim'] += tim - self.data['tim']
self.data['clk'] = clk
self.data['tim'] = tim
return ( 'clk,' + '%.5f' % (self.data['accum_clk']) +
',tim,' + '%.5f' % (self.data['accum_tim']) )
def suspend(self):
return self.tick()
def resume(self):
self.data['clk'] = time.clock()
self.data['tim'] = time.time()
#----------------------------------------------------------
def logging(str, filename=None):
if filename:
with open(filename, 'a') as flog:
flog.write(str + '\n')
print(str)
sys.stdout.flush()
#----------------------------------------------------------
def reset_logging(filename):
if filename:
logfile = open(filename, 'w+')
logfile.close()
#----------------------------------------------------------
def timeLog(msg, filename=None):
s = datetime.datetime.now(pytz.timezone('US/Eastern')).strftime('%Y-%m-%d %H:%M:%S %Z') + ': ' + msg
logging(s, filename)
#----------------------------------------------------------
class Local_state: # this is for convenience in saving/restoring a snapshot
def __init__(self, epo=0, upd=0, lr_coeff=1, inplist=None):
self.reset(epo, upd, lr_coeff)
if inplist is not None:
self.from_list(inplist)
def to_list(self):
return [ self._epo, self._upd, self._lr_coeff ]
def from_list(self, list):
self.reset(list[0], list[1], list[2])
def reset(self, epo=0, upd=0, lr_coeff=1):
self._epo = epo
self._upd = upd
self._lr_coeff = lr_coeff
def get(self):
return self._epo, self._upd, self._lr_coeff
def __str__(self):
return ( 'epo:' + str(self._epo)
+ ',upd:' + str(self._upd)
+ ',lr_coeff:' + str(self._lr_coeff) )
#----------------------------------------------------------
class Global_state:
def __init__(self, inplist=None):
self._g_epo = self._g_upd = self._g_lc = 0 # 'g' for global
if inplist is not None:
self.from_list(inplist)
def to_list(self):
return [ self._g_epo, self._g_upd, self._g_lc ]
def from_list(self, inp):
self._g_epo = inp[0]; self._g_upd = inp[1]; self._g_lc = inp[2]
def update(self, inc_epo, inc_upd): # call this at the end of base_update or gulf_update
self._g_epo += inc_epo
self._g_upd += inc_upd
self._g_lc += 1
def epo(self, local_epo):
return self._g_epo + local_epo
def upd(self, local_upd):
return self._g_upd + local_upd
def lc(self):
return self._g_lc
def __str__(self):
return ( 'g_epo:%d,g_upd:%d,g_lc:%d' % (self._g_epo, self._g_upd, self._g_lc))
#----------------------------------------------------------
class ArgParser_HelpWithDefaults(argparse.ArgumentParser):
def add_argument(self, *args, help=None, default=None, **kwargs):
if help is not None:
kwargs['help'] = help
else:
kwargs['help'] = ''
if default is not None and args[0] != '-h':
kwargs['default'] = default
# if help is not None:
kwargs['help'] += ' Default: {}'.format(default)
super().add_argument(*args, **kwargs)
#----------------------------------------------------------
def are_these_same(o0, o1, names):
for name in names:
if getattr(o0,name) != getattr(o1,name):
return False
return True
#----------------------------------------------------------
def copy_params(src, dst):
for key, value in dst.items():
value.data.copy_(src[key])
#----------------------------------------------------------
def clone_params(src, do_copy_requires_grad=False):
p = { key: torch.zeros_like(value).data.copy_(value) for key, value in src.items() }
if do_copy_requires_grad:
for k,v in p.items():
v.requires_grad = src[k].requires_grad
return p
#----------------------------------------------------------
def print_params(params):
if len(params) <= 0:
return
kmax = max(len(key) for key in params.keys())
for (key, v) in sorted(params.items()):
print(key.ljust(kmax+3), str(tuple(v.shape)).ljust(23), torch.typename(v), v.requires_grad, v.is_leaf)
def print_num_params(params):
n_parameters = sum(p.numel() for p in params.values() if p.requires_grad)
logging('#parameters:' + str(n_parameters)) |
from itertools import chain
import json
import re
import time
from common.logging import get_logger
from common.utils import retry
from exceptions import ScrappingError
from scrapper.scripts import xhr_intercept_response
from scrapper.driver import forced_click
from selenium.common.exceptions import (
TimeoutException,
WebDriverException
)
logger = get_logger(name='scrapper')
def log(text):
logger.debug(text)
def encode_date(dt):
return dt.strftime('%d/%m/%Y')
@retry(exceptions=(TimeoutException, WebDriverException), logger=logger)
def login(browser, username, password):
log('Loading BANKIA main page')
browser.get('https://www.bankia.es')
try:
browser.driver.find_element_by_css_selector('a#CybotCookiebotDialogBodyButtonAccept').click()
except:
log('Timeout trying to click on cookie accept button, continuing anyway')
log('Opening login form')
browser.find_element_by_css_selector('a.fc-openLogin').click()
browser.driver.switch_to_frame(browser.find_element_by_id('login-iframe').result)
log('Filling login form')
browser.find_element_by_css_selector('form[name=formLogin] input#user').send_keys(username)
browser.find_element_by_css_selector('form[name=formLogin] input#password').send_keys(password)
log('Submitting login')
browser.find_element_by_css_selector('form[name=formLogin] button[type=submit]').click()
# Close popup if any
log('Waiting for popups to close them')
modal_close_buttons = browser.find_elements_by_css_selector('div.modal button', timeout=10, do_raise=False)
if modal_close_buttons:
buttons = modal_close_buttons.filter(lambda el: 'cerrar' in el.text.lower())
if buttons:
buttons[0].forced_click()
log('Popup closed')
else:
log('No popups found')
else:
log('No popups found')
@retry(exceptions=(TimeoutException, WebDriverException), logger=logger)
def get_account_transactions(browser, account_number, from_date, to_date):
log('Loading BANKIA account list page')
browser.get('https://www.bankia.es/oficina/particulares/#/cuentas')
# Wait for page rendering all elements, otherwise the next queries change at some
# point and the resulting elements are inusable afterwards
time.sleep(1)
log('Locating account row')
account_matcher = re.compile(r'.*?' + ''.join([r'\s*{}'.format(a) for a in account_number]), re.DOTALL)
account_rows = browser.find_elements_by_css_selector('table tr.table__detail').result
for row in account_rows:
if account_matcher.match(row.get_attribute('innerHTML')):
account_row = row
log('Loading account advanced search')
forced_click(account_row.find_element_by_css_selector('div[role=button].dropdown'))
browser.find_element_by_css_selector('li a[href="#/cuentas/movimientos"]').forced_click()
browser.find_element_by_css_selector('oip-drop-section-search div[role="button"] i').forced_click()
log('Filling date query parameters')
browser.find_element_by_css_selector('input#campoDesdeBuscador', visible=True).clear().send_keys(encode_date(from_date))
browser.find_element_by_css_selector('input#campoHastaBuscador').clear().send_keys(encode_date(to_date))
log('Setting up XHR request interceptor')
script = xhr_intercept_response(
match_url="cuenta.movimiento",
output="interceptedResponse",
)
browser.driver.execute_script(script)
log('Launching the initial search')
browser.find_elements_by_css_selector('button').filter(lambda element: 'Buscar' in element.text)[0].focus().forced_click()
intercepted_responses = []
intercepted_responses_count = 0
still_have_results = True
# Iterate trough all the infinite scrolling pagination
while still_have_results:
t0 = time.time()
while intercepted_responses_count == len(intercepted_responses):
# Inner Loop to wait for the page to load and push the new transactions
# This scrolling command is just a visual debug aid
browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
browser.find_element_by_id("interceptedResponse")
intercepted_json = browser.execute_script("return JSON.stringify(document.getElementById('interceptedResponse').responses)")
intercepted_responses = json.loads(intercepted_json)
time.sleep(0.1)
still_have_results = False if intercepted_responses[-1] is None else intercepted_responses[-1]['indicadorMasRegistros']
t1 = time.time()
if t1 - t0 > 20:
raise ScrappingError('bankia account', account_number, 'Timeout while waiting to load more results')
intercepted_responses_count = len(intercepted_responses)
# Weirdest pagination on earth, where you do a request with 40 results, but you do two paginations of 20 results
# in between. Each time we exit the inner loop because we intercepted a new request, we'll do this extra pagination click,
# except when the more results indicator is false. In this case, the browser won't show the last records, but we'll already
# have it in the intercepted request
if still_have_results:
log('Loading more results (preloaded)')
browser.find_element_by_css_selector('oip-pagination').forced_click()
log('Loading more results')
browser.find_element_by_css_selector('oip-pagination').forced_click()
time.sleep(0.1)
# Results come from newer to older, we want it the other way around, that why we reverse them
results = list(reversed(list(chain.from_iterable([response['movimientos'] for response in intercepted_responses if response is not None]))))
return results
@retry(exceptions=(TimeoutException, WebDriverException), logger=logger)
def get_credit_card_transactions(browser, card_number, from_date, to_date):
log('Open dedicated account page')
browser.get('https://www.bankia.es/oficina/particulares/#/tarjetas/mis-tarjetas')
log('Locating card row')
credit_card_number_element = (
browser
.find_elements_by_css_selector('.oip-tarjetas-posicion table table ul li')
.filter(lambda el: card_number == re.sub(r'[^\w]', '', el.get_attribute('textContent')))
[0]
)
log('Opening card options menu')
credit_card_row = credit_card_number_element.find_element_by_xpath('ancestor-or-self::tr[contains(@class, "table-data")]')
credit_card_row.find_element_by_css_selector('oip-commons-vertical-operate button').forced_click()
log('Load advanced search')
credit_card_row.find_element_by_css_selector('ul li a[href*="movimientos"]').click()
browser.find_element_by_css_selector('form#formFiltroMovimientosTarjetas div[role="button"]').forced_click()
log('Filling date query parameters')
time.sleep(2)
browser.find_element_by_css_selector('form#formFiltroMovimientosTarjetas input#optionsRadios2').select() # Select find between dates option
browser.find_element_by_css_selector('input#desde').clear().send_keys(encode_date(from_date))
browser.find_element_by_css_selector('input#hasta').clear().send_keys(encode_date(to_date))
# Execute search
log('Setting up XHR request interceptor')
script = xhr_intercept_response(
match_url="tarjetas/movimientos",
output="interceptedResponse",
)
browser.driver.execute_script(script)
log('Launching the initial search')
browser.find_elements_by_css_selector('form#formFiltroMovimientosTarjetas button').filter(lambda el: 'Buscar' in el.text)[0].click()
intercepted_responses = []
intercepted_responses_count = 0
still_have_results = True
# Iterate trough all the infinite scrolling pagination
while still_have_results:
while intercepted_responses_count == len(intercepted_responses):
# Inner Loop to wait for the page to load and push the new transactions
# The scrolling command is just a visual debug aid
browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
browser.find_element_by_id("interceptedResponse")
intercepted_json = browser.execute_script("return JSON.stringify(document.getElementById('interceptedResponse').responses)")
intercepted_responses = json.loads(intercepted_json)
time.sleep(0.1)
intercepted_responses_count = len(intercepted_responses)
still_have_results = False if intercepted_responses[-1] is None else intercepted_responses[-1]['indicadorMasMovimientos']
# Trigger pagination by clicking the "Ver mas resultados" button
if still_have_results:
log('Loading more results')
browser.find_element_by_css_selector('.masMovimientos').forced_click()
time.sleep(0.1)
# results already sorted from older to newer, no need to reverse them
results = list(reversed(list(chain.from_iterable([response['movimientosTarjeta'] for response in intercepted_responses if response is not None]))))
return results
|
<reponame>nephomaniac/nephoria
# Software License Agreement (BSD License)
#
# Copyright (c) 2009-2014, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: <EMAIL>
import re
import copy
from boto import __version__ as boto_version
from boto.ec2.regioninfo import RegionInfo
from boto.ec2.cloudwatch import CloudWatchConnection
from cloud_utils.log_utils import printinfo
from nephoria.baseops.botobaseops import BotoBaseOps
CWRegionData = {
'us-east-1': 'monitoring.us-east-1.amazonaws.com',
'us-west-1': 'monitoring.us-west-1.amazonaws.com',
'eu-west-1': 'monitoring.eu-west-1.amazonaws.com',
'ap-northeast-1': 'monitoring.ap-northeast-1.amazonaws.com',
'ap-southeast-1': 'monitoring.ap-southeast-1.amazonaws.com'
}
DimensionArray = ['AutoScalingGroupName', 'ImageId', 'InstanceId', 'InstanceType']
StatsArray = ['Average', 'Sum', 'Maximum', 'Minimum','SampleCount']
ComparisonOperator = ['>=', '>', '<', '<=']
InstanceMetricArray = [
{'name':'CPUUtilization','unit':'Percent' },
{'name':'DiskReadOps','unit':'Count'},
{'name':'DiskWriteOps','unit':'Count'},
{'name':'DiskReadBytes','unit': 'Bytes'},
{'name':'DiskWriteBytes','unit':'Bytes'},
{'name':'NetworkIn','unit':'Bytes'},
{'name':'NetworkOut','unit':'Bytes'}
]
StatusMetricArray = [
{'name':'StatusCheckFailed','unit':'Count'},
{'name':'StatusCheckFailed_Instance','unit':'Count'},
{'name':'StatusCheckFailed_System','unit':'Count'}
]
EbsMetricsArray = [
{'name':'VolumeReadBytes','unit':'Bytes'},
{'name':'VolumeWriteBytes','unit':'Bytes'},
{'name':'VolumeReadOps','unit':'Count'},
{'name':'VolumeWriteOps','unit':'Count'},
{'name':'VolumeTotalReadTime','unit':'Seconds'},
{'name':'VolumeTotalWriteTime','unit':'Seconds'},
{'name':'VolumeIdleTime','unit':'Seconds'},
{'name':'VolumeQueueLength','unit':'Count'},
{'name':'VolumeThroughputPercentage','unit':'Percent'},
{'name':'VolumeConsumedReadWriteOps','unit':'Count'}
]
class CWops(BotoBaseOps):
SERVICE_PREFIX = 'monitoring'
EUCARC_URL_NAME = 'cloudwatch_url'
CONNECTION_CLASS = CloudWatchConnection
def get_cw_connection_args(self, endpoint=None, aws_access_key_id=None,
aws_secret_access_key=None, is_secure=True,
host=None, region=None, path='/', port=443, boto_debug=0):
'''
:param endpoint:
:param aws_access_key_id:
:param aws_secret_access_key:
:param is_secure:
:param host:
:param region:
:param path:
:param port:
:param boto_debug:
:raise:
'''
cw_region = RegionInfo()
if region:
self.debug('Check region: ' + str(region))
try:
if not endpoint:
cw_region.endpoint = CWRegionData[region]
else:
cw_region.endpoint = endpoint
except KeyError:
raise Exception('Unknown region: %s' % region)
else:
cw_region.name = 'eucalyptus'
if not host:
if endpoint:
cw_region.endpoint = endpoint
else:
cw_region.endpoint = self.get_cw_ip()
connection_args = {'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key,
'is_secure': is_secure,
'debug': boto_debug,
'port': port,
'path': path,
'region': cw_region}
if re.search('2.6', boto_version):
connection_args['validate_certs'] = False
cw_connection_args = copy.copy(connection_args)
cw_connection_args['path'] = path
cw_connection_args['region'] = cw_region
return cw_connection_args
def setup_resource_trackers(self):
'''
Setup keys in the test_resources hash in order to track artifacts created
'''
self.test_resources['alarms'] = []
self.test_resources['metric'] = []
self.test_resources['datapoint'] = []
def get_namespaces(self):
'''
Convenience function for easily segregating metrics into their namespaces
:return: Dict where key is the Namespace and the value is a list with all metrics
'''
metrics= self.connection.list_metrics()
namespaces = {}
for metric in metrics:
if not namespaces[metric.namespace]:
namespaces[metric.namespace] = [metric]
else:
namespaces[metric.namespace].append(metric)
return namespaces
def list_metrics( self, next_token=None, dimensions=None, metric_name=None, namespace=None ):
self.debug('Calling list_metrics( {p1}, {p2}, {p3}, {p4} )'.format(p1=next_token, p2=dimensions, p3=metric_name, p4=namespace))
return self.connection.list_metrics(next_token , dimensions, metric_name, namespace)
def get_metric_statistics( self, period, start_time, end_time, metric_name, namespace, statistics, dimensions=None, unit=None):
self.debug('Calling get_metric_statistics( {p1}, {p2}, {p3}, {p4}, {p5}, {p6}, {p7}, {p8} )'.format(
p1=period, p2=start_time, p3=end_time, p4=metric_name, p5=namespace, p6=statistics, p7=dimensions, p8=unit))
return self.connection.get_metric_statistics(period, start_time, end_time, metric_name, namespace, statistics, dimensions, unit)
def put_metric_data( self, namespace, name, value=None, timestamp=None, unit=None, dimensions=None, statistics=None):
self.debug('Calling put_metric_data( {p1}, {p2}, {p3}, {p4}, {p5}, {p6}, {p7} )'.format(
p1=namespace, p2=name, p3=value, p4=timestamp, p5=unit, p6=dimensions, p7=dimensions))
return self.connection.put_metric_data(namespace, name, value, timestamp, unit, dimensions, statistics)
def metric_alarm(self, name, metric, comparison, threshold, period, evaluation_periods, statistic,
description=None, dimensions=None, alarm_actions=None,
ok_actions=None, insufficient_data_actions=None, unit=None, namespace=None):
self.debug('Calling create_metric_alarm ( {p1}, {p2}, {p3}, {p4}, {p5}, {p6}, {p7}, {p8}, {p9}, {p10}, {p11}, {p12}, {p13}, {p14} )'.format(
p1=name, p2=metric, p3=comparison, p4=threshold, p5=period, p6=evaluation_periods, p7=statistic,
p8=description, p9=dimensions, p10=alarm_actions, p11=ok_actions, p12=insufficient_data_actions, p13=unit, p14=namespace))
alarm = boto.ec2.cloudwatch.alarm.MetricAlarm(name=name, metric=metric, comparison=comparison, threshold=threshold, period=period,
evaluation_periods=evaluation_periods, statistic=statistic,
description=description, dimensions=dimensions, alarm_actions=alarm_actions,
ok_actions=ok_actions, insufficient_data_actions=insufficient_data_actions, unit=unit,
namespace=namespace)
return alarm
def put_metric_alarm(self, alarm):
self.debug('Calling put_metric_alarm (' + str(alarm) +')')
self.connection.put_metric_alarm(alarm)
def set_alarm_state(self, alarm_name, state_reason='testing', state_value='ALARM', state_reason_data=None):
self.debug('Calling set_alarm_state( {p1}, {p2}, {p3}, {p4})'.format( p1=alarm_name, p2=state_reason, p3=state_value, p4=state_reason_data))
self.connection.set_alarm_state(alarm_name, state_reason, state_value)
def delete_all_alarms(self):
self.debug('Calling delete_all_alarms(' + str(self.connection.describe_alarms()) + ')')
alarms = self.connection.describe_alarms()
if alarms:
alarm_names = [alarm.name for alarm in alarms]
self.connection.delete_alarms(alarm_names)
def describe_alarms(self, action_prefix=None, alarm_name_prefix=None, alarm_names=None, max_records=None, state_value=None, next_token=None):
self.debug('Calling describe_alarms( {p1}, {p2}, {p3}, {p4}, {p5}, {p6} )'.format(p1=action_prefix, p2=alarm_name_prefix, p3=alarm_names,
p4=max_records, p5=state_value, p6=next_token))
return self.connection.describe_alarms(action_prefix, alarm_name_prefix, alarm_names, max_records, state_value, next_token)
def describe_alarms_for_metric(self, metric_name, namespace, period=None, statistic=None, dimensions=None, unit=None):
self.debug('Calling describe_alarms_for_metric( {p1}, {p2}, {p3}, {p4}, {p5}, {p6} )'.format(p1=metric_name, p2=namespace, p3=period, p4=statistic,
p5=dimensions, p6=unit))
return self.connection.describe_alarms_for_metric(metric_name, namespace, period, statistic, dimensions, unit)
def describe_alarm_history(self, alarm_name=None, start_date=None, end_date=None, max_records=None, history_item_type=None, next_token=None):
self.debug('Calling describe_alarm_history( {p1}, {p2}, {p3}, {p4}, {p5}, {p6} )'.format(p1=alarm_name, p2=start_date, p3=end_date, p4=max_records,
p5=history_item_type, p6=next_token))
return self.connection.describe_alarm_history(alarm_name, start_date, end_date, max_records, history_item_type, next_token)
def get_dimension_array(self):
return DimensionArray
def get_stats_array(self):
return StatsArray
def get_instance_metrics_array(self):
return InstanceMetricArray
def get_status_metric_array(self):
return StatusMetricArray
def get_ebs_metrics_array(self):
return EbsMetricsArray
def enable_alarm_actions(self, alarm_names ):
self.debug('Calling enable_alarm_actions( ' + str(alarm_names) + ' )')
self.connection.enable_alarm_actions(alarm_names)
def disable_alarm_actions(self, alarm_names ):
self.debug('Calling disable_alarm_actions( ' + str(alarm_names) + ' )')
self.connection.disable_alarm_actions(alarm_names)
def validateStats(self, values):
average = float(values[0])
theSum = float(values[1])
maximum = float(values[2])
minimum = float(values[3])
sample = float(values[4])
assert average <= maximum and average >= minimum
assert maximum >= minimum
assert minimum <= maximum
assert sample > 0
|
# SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
import logging
from datetime import datetime
from typing import List
from pandas import DataFrame, Series, read_sql_query, to_datetime
import sqlalchemy
import xxhash
logger = logging.getLogger(__name__)
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
SYNC_COLUMNS = [
"SourceId",
"Json",
"Hash",
"CreateDate",
"LastModifiedDate",
"SyncNeeded",
]
SYNC_COLUMNS_SQL = """
SourceId TEXT,
Json TEXT,
Hash TEXT,
CreateDate DATETIME,
LastModifiedDate DATETIME,
SyncNeeded BIGINT,
PRIMARY KEY (SourceId)
"""
def _json_hash_encode(row: Series) -> Series:
"""
Take a DataFrame row, add serialized JSON and hash
Parameters
----------
row: Series
a DataFrame row
Returns
-------
Series
the row with the json and hash columns added
"""
json = row.to_json()
row["Json"] = json
row["Hash"] = xxhash.xxh64_hexdigest(json.encode("utf-8"))
return row
def add_hash_and_json_to(df: DataFrame) -> DataFrame:
"""
Create Hash and Json columns for DataFrame. Do this
before adding any other columns e.g. SourceId
Parameters
----------
df: DataFrame
a DataFrame with fetched data
Returns
-------
DataFrame
a new DataFrame with the json and hash columns added
"""
return df.apply(_json_hash_encode, axis=1)
def add_sourceid_to(df: DataFrame, identity_columns: List[str]):
"""
Create SourceId column for DataFrame with given identity columns.
Parameters
----------
df: DataFrame
a DataFrame with fetched data
identity_columns: List[str]
a List of the identity columns for the resource dataframe
"""
assert (
Series(identity_columns).isin(df.columns).all()
), "Identity columns missing from dataframe"
df[identity_columns] = df[identity_columns].astype("string")
df["SourceId"] = df[sorted(identity_columns)].agg("-".join, axis=1)
def _create_sync_table_from_resource_df(
resource_df: DataFrame,
identity_columns: List[str],
resource_name: str,
sync_db: sqlalchemy.engine.base.Engine,
):
"""
Take fetched data and push to a new temporary sync table. Includes
hash and tentative extractor CreateDate/LastModifiedDates.
Parameters
----------
resource_df: DataFrame
a DataFrame with current fetched data.
identity_columns: List[str]
a List of the identity columns for the resource dataframe.
resource_name: str
the name of the API resource, e.g. "Courses", to be used in SQL
sync_db: sqlalchemy.engine.base.Engine
an Engine instance for creating database connections
"""
with sync_db.connect() as con:
# ensure sync table exists, need column ordering to be identical to regular table
con.execute(f"DROP TABLE IF EXISTS Sync_{resource_name}")
con.execute(
f"""
CREATE TABLE IF NOT EXISTS Sync_{resource_name} (
{SYNC_COLUMNS_SQL}
)
"""
)
sync_df: DataFrame = resource_df.copy()
sync_df = add_hash_and_json_to(sync_df)
# add (possibly composite) primary key, sorting for consistent ordering
add_sourceid_to(sync_df, identity_columns)
now: datetime = datetime.now()
sync_df["CreateDate"] = now
sync_df["LastModifiedDate"] = now
sync_df["SyncNeeded"] = 1
sync_df = sync_df[SYNC_COLUMNS]
sync_df.set_index("SourceId", inplace=True)
# push to temporary sync table
sync_df.to_sql(
f"Sync_{resource_name}", sync_db, if_exists="append", index=True, chunksize=1000
)
def _ensure_main_table_exists(
resource_name: str,
con: sqlalchemy.engine.base.Connection,
):
"""
Ensure the main resource table exists, creating if necessary.
Parameters
----------
resource_name: str
the name of the API resource, e.g. "Courses", to be used in SQL
table_columns_sql: str
the columns for the resource in the database, in SQL table creation form,
with dangling commas
con: sqlalchemy.engine.base.Connection
an open database connection, which will not be closed by this function
"""
con.execute(f"DROP INDEX IF EXISTS SYNCNEEDED_{resource_name}")
con.execute(
f"""
CREATE TABLE IF NOT EXISTS {resource_name} (
{SYNC_COLUMNS_SQL}
)
"""
)
con.execute(
f"CREATE INDEX IF NOT EXISTS SYNCNEEDED_{resource_name} ON {resource_name}(SyncNeeded)"
)
def _create_unmatched_records_temp_table(
resource_name: str,
con: sqlalchemy.engine.base.Connection,
):
"""
Select unmatched records into new temp table - differing by hash for same identity.
Single entry in result set if identity only exists in one table (meaning add or missing),
so SyncNeeded flag will indicate which table it's from.
Double entry in result set if identity exists in both (meaning update needed),
so SyncNeeded will show which row is from which table.
Parameters
----------
resource_name: str
the name of the API resource, e.g. "Courses", to be used in SQL
con: sqlalchemy.engine.base.Connection
an open database connection, which will not be closed by this function
"""
con.execute(f"DROP INDEX IF EXISTS ID_{resource_name}")
con.execute(f"DROP TABLE IF EXISTS Unmatched_{resource_name}")
con.execute(
f"""
CREATE TABLE Unmatched_{resource_name} AS
SELECT * FROM (
SELECT * FROM {resource_name}
UNION ALL
SELECT * FROM Sync_{resource_name}
)
GROUP BY SourceId, Hash
HAVING COUNT(*) = 1
"""
)
con.execute(
f"CREATE INDEX IF NOT EXISTS ID_{resource_name} ON Unmatched_{resource_name}(SourceId)"
)
def _get_true_create_dates_for_unmatched_records(
resource_name: str,
con: sqlalchemy.engine.base.Connection,
):
"""
All rows start with CreateDate and LastModifiedDate initialized to "now",
but updated rows need the original CreateDate pulled from existing table.
Note: UPDATE-FROM is not available in sqlite until v3.33.0, thus the
double select goofiness.
Parameters
----------
resource_name: str
the name of the API resource, e.g. "Courses", to be used in SQL
con: sqlalchemy.engine.base.Connection
an open database connection, which will not be closed by this function
"""
con.execute(
f"""
UPDATE Unmatched_{resource_name}
SET CreateDate = (
SELECT c.CreateDate
FROM {resource_name} c
WHERE c.SourceId = Unmatched_{resource_name}.SourceId
)
WHERE EXISTS (
SELECT *
FROM {resource_name} c
WHERE c.SourceId = Unmatched_{resource_name}.SourceId
) AND SyncNeeded = 1
"""
)
def _update_resource_table_with_changes(
resource_name: str,
con: sqlalchemy.engine.base.Connection,
):
"""
Update main resource table with new and updated records
Parameters
----------
resource_name: str
the name of the API resource, e.g. "Courses", to be used in SQL
primary_keys: str
a comma separated list of the primary key columns for the resource,
e.g. "id,courseId"
con: sqlalchemy.engine.base.Connection
an open database connection, which will not be closed by this function
"""
CHANGED_ROWS_CTE = f"""
changedRows AS (
SELECT * FROM Unmatched_{resource_name}
WHERE (SourceId) IN (
SELECT SourceId FROM Unmatched_{resource_name}
GROUP BY SourceId
HAVING COUNT(*) > 1
) AND SyncNeeded = 1
)
"""
# delete obsolete data from regular table
con.execute(
# changed rows CTE (from SyncNeeded side only)
f"""
WITH
{CHANGED_ROWS_CTE}
DELETE FROM {resource_name}
WHERE (SourceId) IN (
SELECT SourceId from changedRows
)
"""
)
# insert new and changed data into regular table
con.execute(
# changed rows CTE (from SyncNeeded side only)
# new rows CTE (also from SyncNeeded side)
f"""
WITH
{CHANGED_ROWS_CTE},
newRows AS (
SELECT * FROM Unmatched_{resource_name}
WHERE (SourceId) IN (
SELECT SourceId FROM Unmatched_{resource_name}
GROUP BY SourceId
HAVING COUNT(*) = 1 AND SyncNeeded = 1
)
)
INSERT INTO {resource_name}
SELECT * FROM Unmatched_{resource_name}
WHERE (SourceId) IN (
SELECT SourceId FROM changedRows
UNION ALL
SELECT SourceId FROM newRows
) AND SyncNeeded = 1
"""
)
con.execute(
# reset SyncNeeded flag on main table
f"""
UPDATE {resource_name}
SET SyncNeeded = 0
WHERE SyncNeeded != 0
"""
)
def _update_dataframe_with_true_dates(
resource_df: DataFrame,
identity_columns: List[str],
resource_name: str,
con: sqlalchemy.engine.base.Connection,
) -> DataFrame:
"""
Update main resource DataFrame with reconciled CreateDate/LastModifiedDates
Parameters
----------
resource_df: DataFrame
an API DataFrame with current fetched data which
will be mutated by updating CreateDate/LastModifiedDate
identity_columns: List[str]
a List of the identity columns for the resource dataframe.
resource_name: str
the name of the API resource, e.g. "Courses", to be used in SQL
primary_keys: str
a comma separated list of the primary key columns for the resource,
e.g. "id,courseId"
con: sqlalchemy.engine.base.Connection
an open database connection, which will not be closed by this function
Returns
-------
DataFrame
a DataFrame with reconciled CreateDate/LastModifiedDate
"""
assert (
Series(identity_columns).isin(resource_df.columns).all()
), "Identity columns missing from dataframe"
# fetch DataFrame with reconciled CreateDate/LastModifiedDate for sync records
update_dates_query = f"""
SELECT SourceId, CreateDate, LastModifiedDate
FROM {resource_name}
WHERE (SourceId) IN (
SELECT SourceId FROM Sync_{resource_name}
)
"""
create_date_df = read_sql_query(update_dates_query, con)
create_date_df["SourceId"] = create_date_df["SourceId"].astype("string")
add_sourceid_to(resource_df, identity_columns)
resource_df["SourceId"] = resource_df["SourceId"].astype("string")
result_df = resource_df.join(create_date_df.set_index("SourceId"), on="SourceId")
# reset index so no columns are hidden
result_df.drop(["SourceId"], axis=1, inplace=True)
# convert dates to string format
result_df["CreateDate"] = to_datetime(result_df["CreateDate"]).dt.strftime(DATE_FORMAT)
result_df["LastModifiedDate"] = to_datetime(result_df["LastModifiedDate"]).dt.strftime(DATE_FORMAT)
return result_df
def sync_to_db_without_cleanup(
resource_df: DataFrame,
identity_columns: List[str],
resource_name: str,
sync_db: sqlalchemy.engine.base.Engine,
):
"""
Take fetched data and sync with database. Creates tables when necessary,
but ok if temporary tables are there to start. Does not delete temporary tables when finished.
Parameters
----------
resource_df: DataFrame
a DataFrame with current fetched data
identity_columns: List[str]
a List of the identity columns for the resource dataframe.
resource_name: str
the name of the API resource, e.g. "Courses", to be used in SQL
sync_db: sqlalchemy.engine.base.Engine
an Engine instance for creating database connections
Returns
-------
DataFrame
a DataFrame with current fetched data and reconciled CreateDate/LastModifiedDate
"""
assert (
Series(identity_columns).isin(resource_df.columns).all()
), "Identity columns missing from dataframe"
# In certain cases we can end up with duplicate records, for example
# in Canvas when a course belongs to a sub-account. De-duplicate the
# DataFrame based on the identity_columns
resource_df.drop_duplicates(subset=identity_columns, inplace=True)
_create_sync_table_from_resource_df(
resource_df, identity_columns, resource_name, sync_db
)
with sync_db.connect() as con:
_ensure_main_table_exists(resource_name, con)
_create_unmatched_records_temp_table(resource_name, con)
_get_true_create_dates_for_unmatched_records(resource_name, con)
_update_resource_table_with_changes(resource_name, con)
result_df: DataFrame = _update_dataframe_with_true_dates(
resource_df, identity_columns, resource_name, con
)
return result_df
def cleanup_after_sync(resource_name: str, sync_db: sqlalchemy.engine.base.Engine):
"""
Delete sync temporary tables if they exist
Parameters
----------
resource_name: str
the name of the API resource, e.g. "Courses", to be used in SQL
sync_db: sqlalchemy.engine.base.Engine
an Engine instance for creating database connections
"""
with sync_db.connect() as con:
con.execute(f"DROP TABLE IF EXISTS Sync_{resource_name}")
con.execute(f"DROP TABLE IF EXISTS Unmatched_{resource_name}")
|
<filename>custom_components/ecowitt/__init__.py
"""The Ecowitt Weather Station Component."""
import asyncio
import logging
import time
from pyecowitt import (
EcoWittListener,
WINDCHILL_OLD,
WINDCHILL_NEW,
WINDCHILL_HYBRID,
)
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.const import (
DEGREE,
EVENT_HOMEASSISTANT_STOP,
CONF_PORT,
CONF_UNIT_SYSTEM_METRIC,
CONF_UNIT_SYSTEM_IMPERIAL,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
POWER_WATT,
TEMP_CELSIUS,
UNIT_PERCENTAGE,
PRESSURE_HPA,
PRESSURE_INHG,
LENGTH_INCHES,
SPEED_KILOMETERS_PER_HOUR,
SPEED_MILES_PER_HOUR,
TIME_HOURS,
TIME_DAYS,
TIME_WEEKS,
TIME_MONTHS,
TIME_YEARS,
UV_INDEX,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_PRESSURE,
)
_LOGGER = logging.getLogger(__name__)
TYPE_SENSOR = "sensor"
DOMAIN = "ecowitt"
DATA_CONFIG = "config"
DATA_ECOWITT = "ecowitt_listener"
DATA_STATION = "station"
DATA_PASSKEY = "PASSKEY"
DATA_STATIONTYPE = "stationtype"
DATA_FREQ = "freq"
DATA_MODEL = "model"
CONF_UNIT_BARO = "barounit"
CONF_UNIT_WIND = "windunit"
CONF_UNIT_RAIN = "rainunit"
CONF_UNIT_WINDCHILL = "windchillunit"
TYPE_BAROMABSHPA = "baromabshpa"
TYPE_BAROMRELHPA = "baromrelhpa"
TYPE_BAROMABSIN = "baromabsin"
TYPE_BAROMRELIN = "baromrelin"
TYPE_RAINRATEIN = "rainratein"
TYPE_EVENTRAININ = "eventrainin"
TYPE_HOURLYRAININ = "hourlyrainin"
TYPE_TOTALRAININ = "totalrainin"
TYPE_DAILYRAININ = "dailyrainin"
TYPE_WEEKLYRAININ = "weeklyrainin"
TYPE_MONTHLYRAININ = "monthlyrainin"
TYPE_YEARLYRAININ = "yearlyrainin"
TYPE_RAINRATEMM = "rainratemm"
TYPE_EVENTRAINMM = "eventrainmm"
TYPE_HOURLYRAINMM = "hourlyrainmm"
TYPE_TOTALRAINMM = "totalrainmm"
TYPE_DAILYRAINMM = "dailyrainmm"
TYPE_WEEKLYRAINMM = "weeklyrainmm"
TYPE_MONTHLYRAINMM = "monthlyrainmm"
TYPE_YEARLYRAINMM = "yearlyrainmm"
TYPE_HUMIDITY = "humidity"
TYPE_HUMIDITY1 = "humidity1"
TYPE_HUMIDITY2 = "humidity2"
TYPE_HUMIDITY3 = "humidity3"
TYPE_HUMIDITY4 = "humidity4"
TYPE_HUMIDITY5 = "humidity5"
TYPE_HUMIDITY6 = "humidity6"
TYPE_HUMIDITY7 = "humidity7"
TYPE_HUMIDITY8 = "humidity8"
TYPE_HUMIDITYIN = "humidityin"
TYPE_WINDDIR = "winddir"
TYPE_WINDSPEEDKMH = "windspeedkmh"
TYPE_WINDGUSTKMH = "windgustkmh"
TYPE_WINDSPEEDMPH = "windspeedmph"
TYPE_WINDGUSTMPH = "windgustmph"
TYPE_MAXDAILYGUST = "maxdailygust"
TYPE_MAXDAILYGUSTKMH = "maxdailygustkmh"
TYPE_TEMPC = "tempc"
TYPE_TEMPINC = "tempinc"
TYPE_TEMP1C = "temp1c"
TYPE_TEMP2C = "temp2c"
TYPE_TEMP3C = "temp3c"
TYPE_TEMP4C = "temp4c"
TYPE_TEMP5C = "temp5c"
TYPE_TEMP6C = "temp6c"
TYPE_TEMP7C = "temp7c"
TYPE_TEMP8C = "temp8c"
TYPE_DEWPOINTC = "dewpointc"
TYPE_WINDCHILLC = "windchillc"
TYPE_SOLARRADIATION = "solarradiation"
TYPE_UV = "uv"
TYPE_SOILMOISTURE1 = "soilmoisture1"
TYPE_SOILMOISTURE2 = "soilmoisture2"
TYPE_SOILMOISTURE3 = "soilmoisture3"
TYPE_SOILMOISTURE4 = "soilmoisture4"
TYPE_SOILMOISTURE5 = "soilmoisture5"
TYPE_SOILMOISTURE6 = "soilmoisture6"
TYPE_SOILMOISTURE7 = "soilmoisture7"
TYPE_SOILMOISTURE8 = "soilmoisture8"
TYPE_PM25_CH1 = "pm25_ch1"
TYPE_PM25_CH2 = "pm25_ch2"
TYPE_PM25_CH3 = "pm25_ch3"
TYPE_PM25_CH4 = "pm25_ch4"
TYPE_PM25_AVG_24H_CH1 = "pm25_avg_24h_ch1"
TYPE_PM25_AVG_24H_CH2 = "pm25_avg_24h_ch2"
TYPE_PM25_AVG_24H_CH3 = "pm25_avg_24h_ch3"
TYPE_PM25_AVG_24H_CH4 = "pm25_avg_24h_ch4"
TYPE_WH68BATT = "wh68batt"
TYPE_WH40BATT = "wh40batt"
TYPE_WH26BATT = "wh26batt"
TYPE_WH65BATT = "wh65batt"
TYPE_SOILBATT1 = "soilbatt1"
TYPE_SOILBATT2 = "soilbatt2"
TYPE_SOILBATT3 = "soilbatt3"
TYPE_SOILBATT4 = "soilbatt4"
TYPE_SOILBATT5 = "soilbatt5"
TYPE_SOILBATT6 = "soilbatt6"
TYPE_SOILBATT7 = "soilbatt7"
TYPE_SOILBATT8 = "soilbatt8"
TYPE_BATTERY1 = "batt1"
TYPE_BATTERY2 = "batt2"
TYPE_BATTERY3 = "batt3"
TYPE_BATTERY4 = "batt4"
TYPE_BATTERY5 = "batt5"
TYPE_BATTERY6 = "batt6"
TYPE_BATTERY7 = "batt7"
TYPE_BATTERY8 = "batt8"
S_METRIC = 1
S_IMPERIAL = 2
W_TYPE_NEW = "new"
W_TYPE_OLD = "old"
W_TYPE_HYBRID = "hybrid"
# Name, unit_of_measure, type, device_class, icon, metric=1
# name, uom, kind, device_class, icon, metric = SENSOR_TYPES[x]
SENSOR_TYPES = {
TYPE_BAROMABSHPA: ("Absolute Pressure", PRESSURE_HPA,
TYPE_SENSOR, DEVICE_CLASS_PRESSURE,
"mdi:gauge", S_METRIC),
TYPE_BAROMRELHPA: ("Relative Pressure", PRESSURE_HPA,
TYPE_SENSOR, DEVICE_CLASS_PRESSURE,
"mdi:gauge", S_METRIC),
TYPE_BAROMABSIN: ("Absolute Pressure", PRESSURE_INHG,
TYPE_SENSOR, DEVICE_CLASS_PRESSURE,
"mdi:gauge", S_IMPERIAL),
TYPE_BAROMRELIN: ("Relative Pressure", PRESSURE_INHG,
TYPE_SENSOR, DEVICE_CLASS_PRESSURE,
"mdi:gauge", S_IMPERIAL),
TYPE_RAINRATEIN: ("Rain Rate", f"{LENGTH_INCHES}/{TIME_HOURS}",
TYPE_SENSOR, None, "mdi:water", S_IMPERIAL),
TYPE_EVENTRAININ: ("Event Rain Rate", f"{LENGTH_INCHES}/{TIME_HOURS}",
TYPE_SENSOR, None, "mdi:water", S_IMPERIAL),
TYPE_HOURLYRAININ: ("Hourly Rain Rate", f"{LENGTH_INCHES}/{TIME_HOURS}",
TYPE_SENSOR, None, "mdi:water", S_IMPERIAL),
TYPE_TOTALRAININ: ("Total Rain Rate", f"{LENGTH_INCHES}/{TIME_HOURS}",
TYPE_SENSOR, None, "mdi:water", S_IMPERIAL),
TYPE_DAILYRAININ: ("Daily Rain Rate", f"{LENGTH_INCHES}/{TIME_DAYS}",
TYPE_SENSOR, None, "mdi:water", S_IMPERIAL),
TYPE_WEEKLYRAININ: ("Weekly Rain Rate", f"{LENGTH_INCHES}/{TIME_WEEKS}",
TYPE_SENSOR, None, "mdi:water", S_IMPERIAL),
TYPE_MONTHLYRAININ: ("Monthly Rain Rate", f"{LENGTH_INCHES}/{TIME_MONTHS}",
TYPE_SENSOR, None, "mdi:water", S_IMPERIAL),
TYPE_YEARLYRAININ: ("Yearly Rain Rate", f"{LENGTH_INCHES}/{TIME_YEARS}",
TYPE_SENSOR, None, "mdi:water", S_IMPERIAL),
TYPE_RAINRATEMM: ("Rain Rate", f"mm/{TIME_HOURS}",
TYPE_SENSOR, None, "mdi:water", S_METRIC),
TYPE_EVENTRAINMM: ("Event Rain Rate", f"mm/{TIME_HOURS}",
TYPE_SENSOR, None, "mdi:water", S_METRIC),
TYPE_HOURLYRAINMM: ("Hourly Rain Rate", f"mm/{TIME_HOURS}",
TYPE_SENSOR, None, "mdi:water", S_METRIC),
TYPE_TOTALRAINMM: ("Total Rain Rate", f"mm/{TIME_HOURS}",
TYPE_SENSOR, None, "mdi:water", S_METRIC),
TYPE_DAILYRAINMM: ("Daily Rain Rate", f"mm/{TIME_DAYS}",
TYPE_SENSOR, None, "mdi:water", S_METRIC),
TYPE_WEEKLYRAINMM: ("Weekly Rain Rate", f"mm/{TIME_WEEKS}",
TYPE_SENSOR, None, "mdi:water", S_METRIC),
TYPE_MONTHLYRAINMM: ("Monthly Rain Rate", f"mm/{TIME_MONTHS}",
TYPE_SENSOR, None, "mdi:water", S_METRIC),
TYPE_YEARLYRAINMM: ("Yearly Rain Rate", f"mm/{TIME_YEARS}",
TYPE_SENSOR, None, "mdi:water", S_METRIC),
TYPE_HUMIDITY: ("Humidity", UNIT_PERCENTAGE,
TYPE_SENSOR, DEVICE_CLASS_HUMIDITY,
"mdi:water-percent", 0),
TYPE_HUMIDITYIN: ("Indoor Humidity", UNIT_PERCENTAGE,
TYPE_SENSOR, DEVICE_CLASS_HUMIDITY,
"mdi:water-percent", 0),
TYPE_HUMIDITY1: ("Humidity 1", UNIT_PERCENTAGE,
TYPE_SENSOR, DEVICE_CLASS_HUMIDITY,
"mdi:water-percent", 0),
TYPE_HUMIDITY2: ("Humidity 2", UNIT_PERCENTAGE,
TYPE_SENSOR, DEVICE_CLASS_HUMIDITY,
"mdi:water-percent", 0),
TYPE_HUMIDITY3: ("Humidity 3", UNIT_PERCENTAGE,
TYPE_SENSOR, DEVICE_CLASS_HUMIDITY,
"mdi:water-percent", 0),
TYPE_HUMIDITY4: ("Humidity 4", UNIT_PERCENTAGE,
TYPE_SENSOR, DEVICE_CLASS_HUMIDITY,
"mdi:water-percent", 0),
TYPE_HUMIDITY5: ("Humidity 5", UNIT_PERCENTAGE,
TYPE_SENSOR, DEVICE_CLASS_HUMIDITY,
"mdi:water-percent", 0),
TYPE_HUMIDITY6: ("Humidity 6", UNIT_PERCENTAGE,
TYPE_SENSOR, DEVICE_CLASS_HUMIDITY,
"mdi:water-percent", 0),
TYPE_HUMIDITY7: ("Humidity 7", UNIT_PERCENTAGE,
TYPE_SENSOR, DEVICE_CLASS_HUMIDITY,
"mdi:water-percent", 0),
TYPE_HUMIDITY8: ("Humidity 8", UNIT_PERCENTAGE,
TYPE_SENSOR, DEVICE_CLASS_HUMIDITY,
"mdi:water-percent", 0),
TYPE_WINDDIR: ("Wind Direction", DEGREE,
TYPE_SENSOR, None, "mdi:water-percent", 0),
TYPE_WINDSPEEDKMH: ("Wind Speed", SPEED_KILOMETERS_PER_HOUR,
TYPE_SENSOR, None, "mdi:weather-windy", S_METRIC),
TYPE_WINDGUSTKMH: ("Wind Gust", SPEED_KILOMETERS_PER_HOUR,
TYPE_SENSOR, None, "mdi:weather-windy", S_METRIC),
TYPE_WINDSPEEDMPH: ("Wind Speed", SPEED_MILES_PER_HOUR,
TYPE_SENSOR, None, "mdi:weather-windy", S_IMPERIAL),
TYPE_WINDGUSTMPH: ("Wind Gust", SPEED_MILES_PER_HOUR,
TYPE_SENSOR, None, "mdi:weather-windy", S_IMPERIAL),
TYPE_MAXDAILYGUST: ("Max Daily Wind Gust", SPEED_MILES_PER_HOUR,
TYPE_SENSOR, None, "mdi:weather-windy", S_IMPERIAL),
TYPE_MAXDAILYGUSTKMH: ("Max Daily Wind Gust", SPEED_MILES_PER_HOUR,
TYPE_SENSOR, None, "mdi:weather-windy", S_METRIC),
TYPE_TEMPC: ("Outdoor Temperature", TEMP_CELSIUS,
TYPE_SENSOR, DEVICE_CLASS_TEMPERATURE, "mdi:thermometer", 0),
TYPE_TEMP1C: ("Temperature 1", TEMP_CELSIUS,
TYPE_SENSOR, DEVICE_CLASS_TEMPERATURE, "mdi:thermometer", 0),
TYPE_TEMP2C: ("Temperature 2", TEMP_CELSIUS,
TYPE_SENSOR, DEVICE_CLASS_TEMPERATURE, "mdi:thermometer", 0),
TYPE_TEMP3C: ("Temperature 3", TEMP_CELSIUS,
TYPE_SENSOR, DEVICE_CLASS_TEMPERATURE, "mdi:thermometer", 0),
TYPE_TEMP4C: ("Temperature 4", TEMP_CELSIUS,
TYPE_SENSOR, DEVICE_CLASS_TEMPERATURE, "mdi:thermometer", 0),
TYPE_TEMP5C: ("Temperature 5", TEMP_CELSIUS,
TYPE_SENSOR, DEVICE_CLASS_TEMPERATURE, "mdi:thermometer", 0),
TYPE_TEMP6C: ("Temperature 6", TEMP_CELSIUS,
TYPE_SENSOR, DEVICE_CLASS_TEMPERATURE, "mdi:thermometer", 0),
TYPE_TEMP7C: ("Temperature 7", TEMP_CELSIUS,
TYPE_SENSOR, DEVICE_CLASS_TEMPERATURE, "mdi:thermometer", 0),
TYPE_TEMP8C: ("Temperature 8", TEMP_CELSIUS,
TYPE_SENSOR, DEVICE_CLASS_TEMPERATURE, "mdi:thermometer", 0),
TYPE_TEMPINC: ("Indoor Temperature", TEMP_CELSIUS,
TYPE_SENSOR, DEVICE_CLASS_TEMPERATURE,
"mdi:thermometer", 0),
TYPE_DEWPOINTC: ("Dewpoint", TEMP_CELSIUS,
TYPE_SENSOR, DEVICE_CLASS_TEMPERATURE,
"mdi:thermometer", 0),
TYPE_WINDCHILLC: ("Windchill", TEMP_CELSIUS,
TYPE_SENSOR, DEVICE_CLASS_TEMPERATURE,
"mdi:thermometer", 0),
TYPE_SOLARRADIATION: ("Solar Radiation", f"{POWER_WATT}/m^2",
TYPE_SENSOR, DEVICE_CLASS_ILLUMINANCE,
"mdi:weather-sunny", 0),
TYPE_UV: ("UV Index", UV_INDEX,
TYPE_SENSOR, None, "mdi:sunglasses", 0),
TYPE_SOILMOISTURE1: ("Soil Moisture 1", UNIT_PERCENTAGE,
TYPE_SENSOR, DEVICE_CLASS_HUMIDITY,
"mdi:water-percent", 0),
TYPE_SOILMOISTURE2: ("Soil Moisture 2", UNIT_PERCENTAGE,
TYPE_SENSOR, DEVICE_CLASS_HUMIDITY,
"mdi:water-percent", 0),
TYPE_SOILMOISTURE3: ("Soil Moisture 3", UNIT_PERCENTAGE,
TYPE_SENSOR, DEVICE_CLASS_HUMIDITY,
"mdi:water-percent", 0),
TYPE_SOILMOISTURE4: ("Soil Moisture 4", UNIT_PERCENTAGE,
TYPE_SENSOR, DEVICE_CLASS_HUMIDITY,
"mdi:water-percent", 0),
TYPE_SOILMOISTURE5: ("Soil Moisture 5", UNIT_PERCENTAGE,
TYPE_SENSOR, DEVICE_CLASS_HUMIDITY,
"mdi:water-percent", 0),
TYPE_SOILMOISTURE6: ("Soil Moisture 6", UNIT_PERCENTAGE,
TYPE_SENSOR, DEVICE_CLASS_HUMIDITY,
"mdi:water-percent", 0),
TYPE_SOILMOISTURE7: ("Soil Moisture 7", UNIT_PERCENTAGE,
TYPE_SENSOR, DEVICE_CLASS_HUMIDITY,
"mdi:water-percent", 0),
TYPE_SOILMOISTURE8: ("Soil Moisture 8", UNIT_PERCENTAGE,
TYPE_SENSOR, DEVICE_CLASS_HUMIDITY,
"mdi:water-percent", 0),
TYPE_PM25_CH1: ("PM2.5 1", CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
TYPE_SENSOR, None, "mdi:eye", 0),
TYPE_PM25_CH2: ("PM2.5 2", CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
TYPE_SENSOR, None, "mdi:eye", 0),
TYPE_PM25_CH3: ("PM2.5 3", CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
TYPE_SENSOR, None, "mdi:eye", 0),
TYPE_PM25_CH4: ("PM2.5 4", CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
TYPE_SENSOR, None, "mdi:eye", 0),
TYPE_PM25_AVG_24H_CH1: ("PM2.5 24h average 1",
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
TYPE_SENSOR, None, "mdi:eye", 0),
TYPE_PM25_AVG_24H_CH2: ("PM2.5 24h average 2",
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
TYPE_SENSOR, None, "mdi:eye", 0),
TYPE_PM25_AVG_24H_CH3: ("PM2.5 24h average 3",
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
TYPE_SENSOR, None, "mdi:eye", 0),
TYPE_PM25_AVG_24H_CH4: ("PM2.5 24h average 4",
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
TYPE_SENSOR, None, "mdi:eye", 0),
TYPE_WH68BATT: ("WH68 Battery", "BATT", TYPE_SENSOR,
None, "mdi:battery", 0),
TYPE_WH40BATT: ("WH40 Battery", "BATT", TYPE_SENSOR,
None, "mdi:battery", 0),
TYPE_WH26BATT: ("WH26 Battery", "BATT", TYPE_SENSOR,
None, "mdi:battery", 0),
TYPE_WH65BATT: ("WH65 Battery", "BATT", TYPE_SENSOR,
None, "mdi:battery", 0),
TYPE_SOILBATT1: ("Soil Moisture 1 Battery", "BATT", TYPE_SENSOR,
None, "mdi:battery", 0),
TYPE_SOILBATT2: ("Soil Moisture 2 Battery", "BATT", TYPE_SENSOR,
None, "mdi:battery", 0),
TYPE_SOILBATT3: ("Soil Moisture 3 Battery", "BATT", TYPE_SENSOR,
None, "mdi:battery", 0),
TYPE_SOILBATT4: ("Soil Moisture 4 Battery", "BATT", TYPE_SENSOR,
None, "mdi:battery", 0),
TYPE_SOILBATT5: ("Soil Moisture 5 Battery", "BATT", TYPE_SENSOR,
None, "mdi:battery", 0),
TYPE_SOILBATT6: ("Soil Moisture 6 Battery", "BATT", TYPE_SENSOR,
None, "mdi:battery", 0),
TYPE_SOILBATT7: ("Soil Moisture 7 Battery", "BATT", TYPE_SENSOR,
None, "mdi:battery", 0),
TYPE_SOILBATT8: ("Soil Moisture 8 Battery", "BATT", TYPE_SENSOR,
None, "mdi:battery", 0),
TYPE_BATTERY1: ("Battery 1", "BATT", TYPE_SENSOR,
None, "mdi:battery", 0),
TYPE_BATTERY2: ("Battery 2", "BATT", TYPE_SENSOR,
None, "mdi:battery", 0),
TYPE_BATTERY3: ("Battery 3", "BATT", TYPE_SENSOR,
None, "mdi:battery", 0),
TYPE_BATTERY4: ("Battery 4", "BATT", TYPE_SENSOR,
None, "mdi:battery", 0),
TYPE_BATTERY5: ("Battery 5", "BATT", TYPE_SENSOR,
None, "mdi:battery", 0),
TYPE_BATTERY6: ("Battery 6", "BATT", TYPE_SENSOR,
None, "mdi:battery", 0),
TYPE_BATTERY7: ("Battery 7", "BATT", TYPE_SENSOR,
None, "mdi:battery", 0),
TYPE_BATTERY8: ("Battery 8", "BATT", TYPE_SENSOR,
None, "mdi:battery", 0),
}
IGNORED_SENSORS = [
'tempinf',
'tempf',
'temp1f',
'temp2f',
'temp3f',
'temp4f',
'temp5f',
'temp6f',
'temp7f',
'temp8f',
'dateutc',
'windgustms',
'windspeedms',
'maxdailygustms',
'windchillf',
'dewpointf',
DATA_PASSKEY,
DATA_STATIONTYPE,
DATA_FREQ,
DATA_MODEL,
]
COMPONENT_SCHEMA = vol.Schema(
{
vol.Required(CONF_PORT): cv.port,
vol.Optional(CONF_UNIT_BARO,
default=CONF_UNIT_SYSTEM_METRIC): cv.string,
vol.Optional(CONF_UNIT_WIND,
default=CONF_UNIT_SYSTEM_IMPERIAL): cv.string,
vol.Optional(CONF_UNIT_RAIN,
default=CONF_UNIT_SYSTEM_IMPERIAL): cv.string,
vol.Optional(CONF_UNIT_WINDCHILL,
default=W_TYPE_HYBRID): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema({DOMAIN: COMPONENT_SCHEMA}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass: HomeAssistant, config):
"""Set up the Ecowitt component."""
hass.data[DOMAIN] = {}
all_sensors = []
if DOMAIN not in config:
return True
conf = config[DOMAIN]
# Store config
hass.data[DOMAIN][DATA_CONFIG] = conf
hass.data[DOMAIN][DATA_STATION] = {}
# preload some model info
stationinfo = hass.data[DOMAIN][DATA_STATION]
stationinfo[DATA_STATIONTYPE] = "Unknown"
stationinfo[DATA_FREQ] = "Unknown"
stationinfo[DATA_MODEL] = "Unknown"
# setup the base connection
ws = EcoWittListener(port=conf[CONF_PORT])
hass.data[DOMAIN][DATA_ECOWITT] = ws
if conf[CONF_UNIT_WINDCHILL] == W_TYPE_OLD:
ws.set_windchill(WINDCHILL_OLD)
if conf[CONF_UNIT_WINDCHILL] == W_TYPE_NEW:
ws.set_windchill(WINDCHILL_NEW)
if conf[CONF_UNIT_WINDCHILL] == W_TYPE_HYBRID:
ws.set_windchill(WINDCHILL_HYBRID)
hass.loop.create_task(ws.listen())
async def close_server(*args):
""" Close the ecowitt server."""
await ws.stop()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, close_server)
# go to sleep until we get the first report
await ws.wait_for_valid_data()
# check if we have model info, etc.
if DATA_PASSKEY in ws.last_values:
stationinfo[DATA_PASSKEY] = ws.last_values[DATA_PASSKEY]
ws.last_values.pop(DATA_PASSKEY, None)
else:
_LOGGER.error("No passkey, cannot set unique id.")
return False
if DATA_STATIONTYPE in ws.last_values:
stationinfo[DATA_STATIONTYPE] = ws.last_values[DATA_STATIONTYPE]
ws.last_values.pop(DATA_STATIONTYPE, None)
if DATA_FREQ in ws.last_values:
stationinfo[DATA_FREQ] = ws.last_values[DATA_FREQ]
ws.last_values.pop(DATA_FREQ, None)
if DATA_MODEL in ws.last_values:
stationinfo[DATA_MODEL] = ws.last_values[DATA_MODEL]
ws.last_values.pop(DATA_MODEL, None)
# load the sensors we have
for sensor in ws.last_values.keys():
if sensor not in SENSOR_TYPES:
if sensor not in IGNORED_SENSORS:
_LOGGER.warning("Unhandled sensor type %s", sensor)
continue
# Is this a metric or imperial sensor, lookup and skip
name, uom, kind, device_class, icon, metric = SENSOR_TYPES[sensor]
if "baro" in sensor:
if (conf[CONF_UNIT_BARO] == CONF_UNIT_SYSTEM_IMPERIAL and
metric == S_METRIC):
continue
if (conf[CONF_UNIT_BARO] == CONF_UNIT_SYSTEM_METRIC and
metric == S_IMPERIAL):
continue
if "rain" in sensor:
if (conf[CONF_UNIT_RAIN] == CONF_UNIT_SYSTEM_IMPERIAL and
metric == S_METRIC):
continue
if (conf[CONF_UNIT_RAIN] == CONF_UNIT_SYSTEM_METRIC and
metric == S_IMPERIAL):
continue
if "wind" in sensor:
if (conf[CONF_UNIT_WIND] == CONF_UNIT_SYSTEM_IMPERIAL and
metric == S_METRIC):
continue
if (conf[CONF_UNIT_WIND] == CONF_UNIT_SYSTEM_METRIC and
metric == S_IMPERIAL):
continue
all_sensors.append(sensor)
if not all_sensors:
_LOGGER.error("No sensors found to monitor, check device config.")
return False
hass.async_create_task(
async_load_platform(hass, "sensor", DOMAIN, all_sensors, config)
)
async def _async_ecowitt_update_cb(weather_data):
"""Primary update callback called from pyecowitt."""
_LOGGER.debug("Primary update callback triggered.")
for sensor in weather_data.keys():
if sensor not in SENSOR_TYPES:
if sensor not in IGNORED_SENSORS:
_LOGGER.warning("Unhandled sensor type %s value %s, " +
"file a PR.", sensor, weather_data[sensor])
async_dispatcher_send(hass, DOMAIN)
ws.register_listener(_async_ecowitt_update_cb)
return True
class EcowittEntity(Entity):
"""Base class for Ecowitt Weather Station."""
def __init__(self, hass, key, name):
"""Construct the entity."""
self.hass = hass
self._key = key
self._name = name
self._stationinfo = hass.data[DOMAIN][DATA_STATION]
self._ws = hass.data[DOMAIN][DATA_ECOWITT]
@property
def should_poll(self):
"""Ecowitt is a push."""
return False
@property
def unique_id(self):
"""Return a unique ID for this sensor."""
return f"{self._stationinfo[DATA_PASSKEY]}-{self._key}"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_info(self):
"""Return device information for this sensor."""
return {
"station": self._stationinfo[DATA_STATIONTYPE],
"model": self._stationinfo[DATA_MODEL],
"frequency": self._stationinfo[DATA_FREQ],
}
async def async_added_to_hass(self):
"""Setup a listener for the entity."""
async_dispatcher_connect(self.hass, DOMAIN, self._update_callback)
@callback
def _update_callback(self) -> None:
"""Call from dispatcher when state changes."""
_LOGGER.debug("Updating state with new data. %s", self._name)
self.async_schedule_update_ha_state(force_refresh=True)
@property
def assumed_state(self) -> bool:
"""Return whether the state is based on actual reading from device."""
if (self._ws.lastupd + 5 * 60) < time.time():
return True
return False
|
<filename>steem/markets.py
import time
from decimal import Decimal
from operator import mul
from pprint import pprint
from statistics import mean
import grequests
import steem as stm
from steem.amount import Amount
class Tickers(object):
@staticmethod
def btc_usd_ticker(verbose=False):
prices = {}
urls = [
"https://api.bitfinex.com/v1/pubticker/BTCUSD",
"https://api.exchange.coinbase.com/products/BTC-USD/ticker",
"https://www.okcoin.com/api/v1/ticker.do?symbol=btc_usd",
"https://www.bitstamp.net/api/v2/ticker/btcusd/",
"https://btc-e.com/api/2/btc_usd/ticker",
]
rs = (grequests.get(u, timeout=2) for u in urls)
responses = list(grequests.map(rs, exception_handler=lambda x, y: ""))
for r in [x for x in responses if hasattr(x, "status_code") and x.status_code == 200 and x.json()]:
if "bitfinex" in r.url:
data = r.json()
prices['bitfinex'] = {'price': float(data['last_price']), 'volume': float(data['volume'])}
elif "coinbase" in r.url:
data = r.json()
prices['coinbase'] = {'price': float(data['price']), 'volume': float(data['volume'])}
elif "okcoin" in r.url:
data = r.json()["ticker"]
prices['okcoin'] = {'price': float(data['last']), 'volume': float(data['vol'])}
elif "bitstamp" in r.url:
data = r.json()
prices['bitstamp'] = {'price': float(data['last']), 'volume': float(data['volume'])}
elif "btce" in r.url:
data = r.json()["ticker"]
prices['btce'] = {'price': float(data['avg']), 'volume': float(data['vol_cur'])}
if verbose:
pprint(prices)
if len(prices) == 0:
raise RuntimeError("Obtaining BTC/USD prices has failed from all sources.")
# vwap
return Tickers._wva([x['price'] for x in prices.values()], [x['volume'] for x in prices.values()])
@staticmethod
def steem_btc_ticker():
prices = {}
urls = [
"https://poloniex.com/public?command=returnTicker",
"https://bittrex.com/api/v1.1/public/getticker?market=BTC-STEEM",
]
rs = (grequests.get(u, timeout=2) for u in urls)
responses = list(grequests.map(rs, exception_handler=lambda x, y: ""))
for r in [x for x in responses if hasattr(x, "status_code") and x.status_code == 200 and x.json()]:
if "poloniex" in r.url:
data = r.json()["BTC_STEEM"]
prices['poloniex'] = {'price': float(data['last']), 'volume': float(data['baseVolume'])}
elif "bittrex" in r.url:
data = r.json()["result"]
price = (data['Bid'] + data['Ask']) / 2
prices['bittrex'] = {'price': price, 'volume': 0}
if len(prices) == 0:
raise RuntimeError("Obtaining STEEM/BTC prices has failed from all sources.")
return mean([x['price'] for x in prices.values()])
@staticmethod
def sbd_btc_ticker(verbose=False):
prices = {}
urls = [
"https://poloniex.com/public?command=returnTicker",
"https://bittrex.com/api/v1.1/public/getticker?market=BTC-SBD",
]
rs = (grequests.get(u, timeout=2) for u in urls)
responses = list(grequests.map(rs, exception_handler=lambda x, y: ""))
for r in [x for x in responses if hasattr(x, "status_code") and x.status_code == 200 and x.json()]:
if "poloniex" in r.url:
data = r.json()["BTC_SBD"]
if verbose:
print("Spread on Poloniex is %.2f%%" % Tickers.calc_spread(data['highestBid'], data['lowestAsk']))
prices['poloniex'] = {'price': float(data['last']), 'volume': float(data['baseVolume'])}
elif "bittrex" in r.url:
data = r.json()["result"]
if verbose:
print("Spread on Bittfex is %.2f%%" % Tickers.calc_spread(data['Bid'] + data['Ask']))
price = (data['Bid'] + data['Ask']) / 2
prices['bittrex'] = {'price': price, 'volume': 0}
if len(prices) == 0:
raise RuntimeError("Obtaining SBD/BTC prices has failed from all sources.")
return mean([x['price'] for x in prices.values()])
@staticmethod
def calc_spread(bid, ask):
return (1 - (Decimal(bid) / Decimal(ask))) * 100
@staticmethod
def _wva(values, weights):
""" Calculates a weighted average
"""
assert len(values) == len(weights) and len(weights) > 0
return sum([mul(*x) for x in zip(values, weights)]) / sum(weights)
class Markets(Tickers):
def __init__(self, cache_timeout=60, steem_instance=None):
if not steem_instance:
steem_instance = stm.Steem()
self.steem = steem_instance
self._cache_timeout = cache_timeout
self._cache_timer = time.time()
self._btc_usd = None
self._steem_btc = None
self._sbd_btc = None
def _has_cache_expired(self):
if self._cache_timer + self._cache_timeout < time.time():
self._cache_timer = time.time()
return True
return False
def btc_usd(self):
if (self._btc_usd is None) or self._has_cache_expired():
self._btc_usd = self.btc_usd_ticker()
return self._btc_usd
def steem_btc(self):
if (self._steem_btc is None) or self._has_cache_expired():
self._steem_btc = self.steem_btc_ticker()
return self._steem_btc
def sbd_btc(self):
if (self._sbd_btc is None) or self._has_cache_expired():
self._sbd_btc = self.sbd_btc_ticker()
return self._sbd_btc
def steem_sbd_implied(self):
return self.steem_btc() / self.sbd_btc()
def steem_usd_implied(self):
return self.steem_btc() * self.btc_usd()
def sbd_usd_implied(self):
return self.sbd_btc() * self.btc_usd()
def avg_witness_price(self, take=10):
price_history = self.steem.rpc.get_feed_history()['price_history']
return mean([Amount(x['base']).amount * Amount(x['quote']).amount for x in price_history[-take:]])
|
<reponame>ratelang/pytest-ratl
import pytest
def _compile(
source_code,
*,
lark_grammar,
vyper_interface_codes=None,
evm_version=None,
vyper_output_formats=("abi", "bytecode"),
mpc_output_formats=None
):
from ratl import RatelCompiler
ratel_compiler = RatelCompiler()
output = ratel_compiler.compile(
source_code,
vyper_output_formats=vyper_output_formats,
vyper_interface_codes=vyper_interface_codes,
evm_version=evm_version,
mpc_output_formats=mpc_output_formats,
)
lark_grammar.parse(ratel_compiler._vyper_code + "\n") # Test grammar.
return output
# NOTE Taken and adapted from vyperlang/vyper.
def _get_contract(
w3, source_code, *args, lark_grammar, contract_factory_class, **kwargs
):
out = _compile(
source_code,
lark_grammar=lark_grammar,
vyper_output_formats=("abi", "bytecode"),
vyper_interface_codes=kwargs.pop("interface_codes", None),
evm_version=kwargs.pop("evm_version", None),
mpc_output_formats=kwargs.pop("mpc_output_formats", None),
)
vyper_output = out["vyper"]
abi = vyper_output["abi"]
bytecode = vyper_output["bytecode"]
value = (
kwargs.pop("value_in_eth", 0) * 10 ** 18
) # Handle deploying with an eth value.
c = w3.eth.contract(abi=abi, bytecode=bytecode)
deploy_transaction = c.constructor(*args)
tx_info = {
"from": w3.eth.accounts[0],
"value": value,
"gasPrice": 0,
}
tx_info.update(kwargs)
tx_hash = deploy_transaction.transact(tx_info)
address = w3.eth.getTransactionReceipt(tx_hash)["contractAddress"]
contract = w3.eth.contract(
address,
abi=abi,
bytecode=bytecode,
ContractFactoryClass=contract_factory_class,
)
return contract
@pytest.fixture
def get_compiled_code(lark_grammar):
def _compiled_code(
source_code,
vyper_output_formats=("abi", "bytecode"),
vyper_interface_codes=None,
evm_version=None,
mpc_output_formats=None,
):
return _compile(
source_code,
lark_grammar=lark_grammar,
vyper_output_formats=vyper_output_formats,
vyper_interface_codes=vyper_interface_codes,
evm_version=evm_version,
mpc_output_formats=mpc_output_formats,
)
return _compiled_code
@pytest.fixture
def get_ratl_contract(w3, _VyperContract, lark_grammar):
def get_contract(
source_code,
*args,
lark_grammar=lark_grammar,
contract_factory_class=_VyperContract,
**kwargs
):
return _get_contract(
w3,
source_code,
*args,
lark_grammar=lark_grammar,
contract_factory_class=_VyperContract,
**kwargs
)
return get_contract
# def test_compile(mpc_contract_code):
# from tests.grammar.conftest import get_lark_grammar
# from ratel import RatelCompiler
#
# LARK_GRAMMAR = get_lark_grammar()
#
# ratel_compiler = RatelCompiler()
# out = ratel_compiler.compile(
# mpc_contract_code, vyper_output_formats=["abi", "bytecode"],
# )
#
# vyper_source = ratel_compiler._vyper_code
# LARK_GRAMMAR.parse(vyper_source + "\n") # Test grammar.
# mpc_output = out["mpc"]
# mpc_src_code = mpc_output["src_code"]
# exec(mpc_src_code, globals())
# assert multiply(3, 4) == 12 # noqa F821
|
<filename>apluslms_file_transfer/client/fileinfo.py
import os
import json
import requests
from io import BytesIO
from hashlib import sha256
import logging
from apluslms_file_transfer.exceptions import GetFileUpdateError
from apluslms_file_transfer.color_print import PrintColor
logger = logging.getLogger(__name__)
def get_manifest(file):
"""
Get the manifest (modification time, checksum) of a file
:param str file: path of the file
:returns: the dict of the file manifest (key:'mtime', 'checksum')
:rtype: dict
"""
st = os.stat(file)
return {"mtime": st.st_mtime_ns,
"checksum": 'sha256:' + sha256(open(file, 'rb').read()).hexdigest()}
def get_files_manifest_in_folder(directory):
"""
Get the manifest of files in a folder
:param str directory: the path of the directory
:return: a nested dict {rel_file_name: {"mtime":, "checksum":}}
:rtype: dict
"""
# IGNORE = set(['.git', '.idea', '__pycache__']) # or NONIGNORE if the dir/file starting with '.' is ignored
manifest = dict()
for basedir, dirs, files in os.walk(directory):
dirs[:] = [d for d in dirs if not d.startswith('.')]
files = [f for f in files if not f.startswith('.')]
for filename in files:
file = os.path.join(basedir, filename)
file_manifest = get_manifest(file)
file_name = os.path.relpath(file, start=directory)
manifest[file_name] = file_manifest
return manifest
def get_files_to_upload(url, headers, target_dir):
"""
Send request to the server to get the collection of files to upload
:param str url: the url posted to the server
:param dict headers: the headers in the posted request
:param str target_dir: the directory path to upload
:returns:
- files_upload (:py:class:`list`) - a list of uploaded files (tuple (file_path, file_size))
- pid (:py:class:`str`) - a unique id of this file deployment process
"""
manifest = get_files_manifest_in_folder(target_dir)
buffer = BytesIO()
buffer.write(json.dumps(manifest).encode('utf-8'))
buffer.seek(0)
try:
get_files_r = requests.post(url, headers=headers,
files={"manifest_client": buffer.getvalue()})
if get_files_r.status_code != 200:
raise GetFileUpdateError(get_files_r.text)
if not get_files_r.json().get("exist"):
# upload the whole folder if the course not exist in the server yet
# print("The course {} will be newly added".format(os.environ['PLUGIN_COURSE']))
files_upload = [(target_dir, os.path.getsize(target_dir))]
# path, dirs, files = next(os.walk(target_dir))
# PrintColor.info("The course does not exist before. "
# "{} new files to upload".format(len(files)))
PrintColor.info("The course does not exist before. "
"The whole directory will be uploaded")
else:
# else get the files to add/update
# print("The course {} already exists, will be updated".format(os.environ['PLUGIN_COURSE']))
files_new = get_files_r.json().get("files_new")
files_update = get_files_r.json().get("files_update")
files_upload_dict = {**files_new, **files_update}
files_upload = list()
for f in list(files_upload_dict.keys()):
full_path = os.path.join(target_dir, f)
file_size = os.path.getsize(full_path)
files_upload.append((full_path, file_size))
PrintColor.info("The course already exists. "
"{} files to upload: {} new files, {} updated files".
format(len(files_upload_dict), len(files_new), len(files_update)))
process_id = get_files_r.json().get("process_id")
except:
raise
return files_upload, process_id
|
<reponame>rogue26/processy.io
from django.conf import settings
from django.http import HttpResponseRedirect
from bootstrap_modal_forms.generic import BSModalFormView
from projects.models import Project, Workstream, WorkstreamType, Deliverable, DeliverableType, Task, TaskType, \
TeamMember, Specification, Condition, Resource
from projects.forms import WorkstreamForm, DeliverableForm, TaskForm, TeamMemberForm
class ConfigureWorkstream(BSModalFormView):
template_name = 'modals/add_edit.html'
form_class = WorkstreamForm
success_url = '/'
def dispatch(self, *args, **kwargs):
# I /guess/ this is the best place to initialize an instance variable?
self.workstream = Workstream.objects.get(id=self.kwargs['item_id'])
return super().dispatch(*args, **kwargs)
def get_form(self, form_class=None):
form = super(ConfigureWorkstream, self).get_form()
form.fields['deliverables'].queryset = Deliverable.objects.filter(project=self.workstream.project)
return form
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['project'] = self.workstream.project
context['header_text'] = "Add new workstream"
context['button_text'] = "Save workstream"
selected_deliverables = Deliverable.objects.filter(workstream=self.workstream)
context["selected_deliverables"] = [_.id for _ in selected_deliverables]
return context
def get_initial(self):
"""
Returns the initial data to use for forms on this view.
"""
initial = super().get_initial()
initial['name'] = self.workstream.name
initial['category'] = self.workstream.category
initial['description'] = self.workstream.description
initial['objective'] = self.workstream.objective
initial['motivation'] = self.workstream.motivation
initial['owner'] = self.workstream.owner
return initial
def form_valid(self, form):
if not self.request.is_ajax() or self.request.POST.get('asyncUpdate') == 'True':
updated_form_data = self.request.POST
self.workstream.category = WorkstreamType.objects.get(id=updated_form_data.get('category'))
self.workstream.name = updated_form_data.get('name')
self.workstream.description = updated_form_data.get('description')
self.workstream.objective = updated_form_data.get('objective')
self.workstream.motivation = updated_form_data.get('motivation')
self.workstream.owner = updated_form_data.get('owner')
deliverables = Deliverable.objects.filter(pk__in=updated_form_data.getlist('deliverables'))
self.workstream.deliverable_set.set(deliverables)
self.workstream.save()
else:
pass
return HttpResponseRedirect(self.get_success_url())
class ConfigureDeliverable(BSModalFormView):
template_name = 'modals/add_edit.html'
form_class = DeliverableForm
success_url = '/'
def dispatch(self, *args, **kwargs):
# I /guess/ this is the best place to initialize an instance variable?
self.deliverable = Deliverable.objects.get(id=self.kwargs['item_id'])
return super().dispatch(*args, **kwargs)
def get_form(self, form_class=None):
form = super(ConfigureDeliverable, self).get_form()
# form.fields['workstreams'].queryset = Workstream.objects.filter(project_id=self.kwargs['project_id'])
form.fields['tasks'].queryset = Task.objects.filter(project=self.deliverable.project)
return form
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['project'] = self.deliverable.project
context['header_text'] = "Add new deliverable"
context['button_text'] = "Save deliverable"
# todo: write these as one query and get id values
selected_tasks = Task.objects.filter(deliverable=self.deliverable)
context["selected_tasks"] = [_.id for _ in selected_tasks]
return context
def get_initial(self):
"""
Returns the initial data to use for forms on this view.
"""
initial = super().get_initial()
initial['name'] = self.deliverable.name
initial['category'] = self.deliverable.category
initial['description'] = self.deliverable.description
initial['scope'] = self.deliverable.scope
initial['workstream'] = self.deliverable.workstream
# note - manytomany and foreignkey fields are set by passing the list of currently checked
# items and setting the appropriate check value in the template
return initial
def form_valid(self, form):
if not self.request.is_ajax() or self.request.POST.get('asyncUpdate') == 'True':
updated_form_data = self.request.POST
self.deliverable.name = updated_form_data.get('name')
self.deliverable.category = DeliverableType.objects.get(id=updated_form_data.get('category'))
self.deliverable.description = updated_form_data.get('description')
self.deliverable.scope = updated_form_data.get('scope')
self.deliverable.workstream = Workstream.objects.get(id=updated_form_data.get('workstream'))
specifications = Specification.objects.filter(pk__in=updated_form_data.getlist('specifications'))
self.deliverable.specification_set.set(specifications)
conditions = Condition.objects.filter(pk__in=updated_form_data.getlist('conditions'))
self.deliverable.condition_set.set(conditions)
tasks = Task.objects.filter(pk__in=updated_form_data.getlist('tasks'))
self.deliverable.task_set.set(tasks)
self.deliverable.save()
else:
pass
return HttpResponseRedirect(self.get_success_url())
class ConfigureTask(BSModalFormView):
template_name = 'modals/add_edit.html'
form_class = TaskForm
success_url = '/'
def dispatch(self, *args, **kwargs):
# I /guess/ this is the best place to initialize an instance variable?
self.task = Task.objects.get(id=kwargs['item_id'])
return super().dispatch(*args, **kwargs)
def get_form(self, form_class=None):
form = super().get_form()
# form.fields['deliverables'].queryset = Deliverable.objects.filter(project=self.task.project)
form.fields['parent_tasks'].queryset = Task.objects.filter(project=self.task.project).exclude(
id=self.task.id)
# form.fields['team_members'].queryset = TeamMember.objects.filter(project=self.task.project)
return form
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['project'] = self.task.project
context['header_text'] = "Add new task"
context['button_text'] = "Save task"
context["selected_parent_tasks"] = self.task.parent_tasks.values_list('id', flat=True)
# context["selected_required_resources"] = self.task.required_resources.values_list('id', flat=True)
return context
def get_initial(self):
"""
Returns the initial data to use for forms on this view.
"""
initial = super().get_initial()
initial['name'] = self.task.name
initial['description'] = self.task.description
initial['category'] = self.task.category
initial['baseline_fte_days'] = self.task.baseline_fte_days
initial['start'] = self.task.start
initial['end'] = self.task.end
initial['deliverable'] = self.task.deliverable
# initial['status'] = current_task.status
initial['team_member'] = self.task.team_member
# note - manytomany and foreignkey fields are set by passing the list of currently checked
# items and setting the appropriate check value in the template
return initial
def form_valid(self, form):
if not self.request.is_ajax() or self.request.POST.get('asyncUpdate') == 'True':
updated_form_data = self.request.POST
self.task.name = updated_form_data.get('name')
self.task.description = updated_form_data.get('description')
self.task.category = TaskType.objects.get(id=updated_form_data.get('category'))
self.task.baseline_fte_days = updated_form_data.get('baseline_fte_days')
self.task.status = updated_form_data.get('status')
try:
self.task.team_member = updated_form_data.get('team_member')
except ValueError:
self.task.team_member = None
self.task.deliverable = Deliverable.objects.get(id=updated_form_data.get('deliverable'))
self.task.resources_required.set(
Resource.objects.filter(pk__in=updated_form_data.getlist('resources_required')))
self.task.parent_tasks.set(Task.objects.filter(pk__in=updated_form_data.getlist('parent_tasks')))
# self.task.complexity_drivers.set(
# ComplexityDriver.objects.filter(pk__in=updated_form_data.getlist('complexity_drivers')))
self.task.save()
else:
pass
return HttpResponseRedirect(self.get_success_url())
class ConfigureTeamMember(BSModalFormView):
template_name = 'modals/add_edit.html'
form_class = TeamMemberForm
success_url = '/'
def dispatch(self, *args, **kwargs):
# I /guess/ this is the best place to initialize an instance variable?
self.team_member = TeamMember.objects.get(id=kwargs['item_id'])
return super().dispatch(*args, **kwargs)
def get_form(self, form_class=None):
form = super().get_form()
form.fields['user'].queryset = settings.AUTH_USER_MODEL.objects.filter(
organization=self.request.user.organization)
form.fields['tasks'].queryset = Task.objects.filter(project=self.team_member.project)
return form
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['header_text'] = "Add new"
context['button_text'] = "Add"
context["selected_tasks"] = Task.objects \
.filter(team_member=self.team_member) \
.values_list('id', flat=True)
return context
def get_initial(self):
"""
Returns the initial data to use for forms on this view.
"""
initial = super().get_initial()
initial['user'] = self.team_member.user
initial['first_name'] = self.team_member.first_name
initial['last_name'] = self.team_member.last_name
initial['project_availability'] = self.team_member.project_availability
# note - manytomany and foreignkey fields are set by passing the list of currently checked
# items and setting the appropriate check value in the template
return initial
def form_valid(self, form):
if not self.request.is_ajax() or self.request.POST.get('asyncUpdate') == 'True':
self.team_member.first_name = self.request.POST.get('first_name')
self.team_member.last_name = self.request.POST.get('last_name')
self.team_member.project_availability = self.request.POST.get('project_availability')
self.team_member.save()
task_ids_assigned = self.request.POST.getlist('tasks')
team_member_tasks = Task.objects.filter(team_member=self.team_member)
team_member_tasks.update(team_member=None)
tasks_assigned = Task.objects.filter(id__in=task_ids_assigned)
tasks_assigned.update(team_member=self.team_member)
tasks_to_be_saved = team_member_tasks | tasks_assigned
for task in tasks_to_be_saved.distinct():
task.save()
else:
pass
return HttpResponseRedirect(self.get_success_url())
|
# coding: utf-8
"""
Fulfillment API
Use the Fulfillment API to complete the process of packaging, addressing, handling, and shipping each order on behalf of the seller, in accordance with the payment method and timing specified at checkout. # noqa: E501
OpenAPI spec version: v1.19.9
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CancelStatus(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'cancel_requests': 'list[CancelRequest]',
'cancel_state': 'str',
'cancelled_date': 'str'
}
attribute_map = {
'cancel_requests': 'cancelRequests',
'cancel_state': 'cancelState',
'cancelled_date': 'cancelledDate'
}
def __init__(self, cancel_requests=None, cancel_state=None, cancelled_date=None): # noqa: E501
"""CancelStatus - a model defined in Swagger""" # noqa: E501
self._cancel_requests = None
self._cancel_state = None
self._cancelled_date = None
self.discriminator = None
if cancel_requests is not None:
self.cancel_requests = cancel_requests
if cancel_state is not None:
self.cancel_state = cancel_state
if cancelled_date is not None:
self.cancelled_date = cancelled_date
@property
def cancel_requests(self):
"""Gets the cancel_requests of this CancelStatus. # noqa: E501
This array contains details of one or more buyer requests to cancel the order. <br /><br /><b>For the getOrders call:</b> This array is returned but is always empty.<br /><b>For the getOrder call:</b> This array is returned fully populated with information about any cancellation requests. # noqa: E501
:return: The cancel_requests of this CancelStatus. # noqa: E501
:rtype: list[CancelRequest]
"""
return self._cancel_requests
@cancel_requests.setter
def cancel_requests(self, cancel_requests):
"""Sets the cancel_requests of this CancelStatus.
This array contains details of one or more buyer requests to cancel the order. <br /><br /><b>For the getOrders call:</b> This array is returned but is always empty.<br /><b>For the getOrder call:</b> This array is returned fully populated with information about any cancellation requests. # noqa: E501
:param cancel_requests: The cancel_requests of this CancelStatus. # noqa: E501
:type: list[CancelRequest]
"""
self._cancel_requests = cancel_requests
@property
def cancel_state(self):
"""Gets the cancel_state of this CancelStatus. # noqa: E501
The state of the order with regard to cancellation. This field is always returned, and if there are no cancellation requests, a value of <code>NONE_REQUESTED</code> is returned. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/sell/fulfillment/types/sel:CancelStateEnum'>eBay API documentation</a> # noqa: E501
:return: The cancel_state of this CancelStatus. # noqa: E501
:rtype: str
"""
return self._cancel_state
@cancel_state.setter
def cancel_state(self, cancel_state):
"""Sets the cancel_state of this CancelStatus.
The state of the order with regard to cancellation. This field is always returned, and if there are no cancellation requests, a value of <code>NONE_REQUESTED</code> is returned. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/sell/fulfillment/types/sel:CancelStateEnum'>eBay API documentation</a> # noqa: E501
:param cancel_state: The cancel_state of this CancelStatus. # noqa: E501
:type: str
"""
self._cancel_state = cancel_state
@property
def cancelled_date(self):
"""Gets the cancelled_date of this CancelStatus. # noqa: E501
The date and time the order was cancelled, if applicable. This timestamp is in ISO 8601 format, which uses the 24-hour Universal Coordinated Time (UTC) clock. <br /><br /><b>Format:</b> <code>[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss].[sss]Z</code> <br /><b>Example:</b> <code>2015-08-04T19:09:02.768Z</code> # noqa: E501
:return: The cancelled_date of this CancelStatus. # noqa: E501
:rtype: str
"""
return self._cancelled_date
@cancelled_date.setter
def cancelled_date(self, cancelled_date):
"""Sets the cancelled_date of this CancelStatus.
The date and time the order was cancelled, if applicable. This timestamp is in ISO 8601 format, which uses the 24-hour Universal Coordinated Time (UTC) clock. <br /><br /><b>Format:</b> <code>[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss].[sss]Z</code> <br /><b>Example:</b> <code>2015-08-04T19:09:02.768Z</code> # noqa: E501
:param cancelled_date: The cancelled_date of this CancelStatus. # noqa: E501
:type: str
"""
self._cancelled_date = cancelled_date
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CancelStatus, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CancelStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import hashlib
import settings
import os
import converter
import shutil
class MediaTask:
def __init__(self, filepath_in):
self.filepath_in = filepath_in
self._prepare_filepaths_out(filepath_in)
def _prepare_filepaths_out(self, filepath_in):
self.filename_out = hashlib.md5(filepath_in.encode('utf-8')).hexdigest() + "." + settings.fileextension_out
self.filepath_out = os.path.join(settings.out_dir, self.filename_out)
self.filepath_ok = os.path.join(settings.ok_dir, self.filename_out)
self.filepath_ko = os.path.join(settings.ko_dir, self.filename_out)
self.filepath_kill = os.path.join(settings.kill_dir, self.filename_out)
def run(self):
filesize_before_mb = os.path.getsize(self.filepath_in) / (1024 * 1024)
#print(self.filepath_in + " ({:0.1f} MB):".format(filesize_before_mb))
try:
self.check_filesize_in()
self.check_not_already_done()
if not self.check_kill_file():
if not self.check_replace_original():
conv = converter.Converter(self.filepath_in, self.filepath_out)
self.check_original_codec(conv)
self.convert(conv)
except RuntimeError as error:
#print("\tIgnored : " + str(error))
pass
except KeyError as error:
print(self.filepath_in + " ({:0.1f} MB):".format(filesize_before_mb))
print("\tIgnored : " + str(error))
def check_filesize_in(self):
filesize_before_mb = os.path.getsize(self.filepath_in) / (1024 * 1024)
if (filesize_before_mb < settings.filesize_in_min_mb):
raise RuntimeError("Input filesize too small (< {:0.1f} MB)".format(settings.filesize_in_min_mb))
elif (filesize_before_mb > settings.filesize_in_max_mb):
raise RuntimeError("Input filesize too big (> {:0.1f} MB)".format(settings.filesize_in_max_mb))
def check_not_already_done(self):
if os.path.isfile(self.filepath_out):
filesize_after_mb = os.path.getsize(self.filepath_out) / (1024 * 1024)
raise KeyError("Already converted in OUT : {} ({:0.1f} MB)".format(self.filename_out, filesize_after_mb))
elif os.path.isfile(self.filepath_ko):
filesize_after_mb = os.path.getsize(self.filepath_ko) / (1024 * 1024)
raise RuntimeError("Rejected by user in KO : {} ({:0.1f} MB)".format(self.filename_out, filesize_after_mb))
def check_kill_file(self):
if os.path.isfile(self.filepath_kill):
os.remove(self.filepath_in)
os.remove(self.filepath_kill)
print("Killed")
return True
return False
def check_replace_original(self):
if os.path.isfile(self.filepath_ok):
filesize_after_mb = os.path.getsize(self.filepath_ok) / (1024 * 1024)
shutil.copystat(self.filepath_in, self.filepath_ok)
os.remove(self.filepath_in)
if os.path.isfile(self.filepath_in + ".modd"):
os.remove(self.filepath_in + ".modd")
if os.path.isfile(self.filepath_in + ".tnl"):
os.remove(self.filepath_in + ".tnl")
if os.path.isfile(self.filepath_in.replace(".MP4", ".thm")):
os.remove(self.filepath_in.replace(".MP4", ".thm"))
shutil.move(self.filepath_ok, self.filepath_in)
print("Replaced by {} ({:0.1f} MB)".format(self.filepath_ok, filesize_after_mb))
return True
return False
def check_original_codec(self, conv):
if conv.mediainfo.video_codec_name == 'h265' or conv.mediainfo.video_codec_name == 'hevc':
raise RuntimeError("Codec already H265")
elif (conv.mediainfo.bit_rate / 1000 < settings.maxbitrate_kbits):
raise RuntimeError("Bitrate {:0.1f} < {:d} Kbits/s".format(conv.mediainfo.bit_rate / 1000, settings.maxbitrate_kbits))
def convert(self, conv):
conv.run()
if os.path.isfile(self.filepath_in):
filesize_before_mb = os.path.getsize(self.filepath_in) / (1024 * 1024)
filesize_after = os.path.getsize(self.filepath_out)
filesize_after_mb = filesize_after / (1024 * 1024)
if filesize_after_mb < 0.001:
raise RuntimeError("Output file size problem ({} : {:d} bytes)".format(self.filepath_out, filesize_after))
else:
print("Output size : {:0.1f} MB ({:0.1f}x smaller)".format(filesize_after_mb, filesize_before_mb / filesize_after_mb))
else:
raise RuntimeError("Output file not found after conversion ({})".format(self.filepath_out))
|
from .nn import NN
from .. import activations
from .. import initializers
from .. import regularizers
from ... import config
from ...backend import tf
from ...utils import timing
class MfNN(NN):
"""Multifidelity neural networks."""
def __init__(
self,
layer_sizes_low_fidelity,
layer_sizes_high_fidelity,
activation,
kernel_initializer,
regularization=None,
residue=False,
trainable_low_fidelity=True,
trainable_high_fidelity=True,
):
super().__init__()
self.layer_size_lo = layer_sizes_low_fidelity
self.layer_size_hi = layer_sizes_high_fidelity
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.regularizer = regularizers.get(regularization)
self.residue = residue
self.trainable_lo = trainable_low_fidelity
self.trainable_hi = trainable_high_fidelity
@property
def inputs(self):
return self.X
@property
def outputs(self):
return [self.y_lo, self.y_hi]
@property
def targets(self):
return [self.target_lo, self.target_hi]
@timing
def build(self):
print("Building multifidelity neural network...")
self.X = tf.placeholder(config.real(tf), [None, self.layer_size_lo[0]])
# Low fidelity
y = self.X
for i in range(len(self.layer_size_lo) - 2):
y = self._dense(
y,
self.layer_size_lo[i + 1],
activation=self.activation,
regularizer=self.regularizer,
trainable=self.trainable_lo,
)
self.y_lo = self._dense(
y,
self.layer_size_lo[-1],
regularizer=self.regularizer,
trainable=self.trainable_lo,
)
# High fidelity
X_hi = tf.concat([self.X, self.y_lo], 1)
# Linear
y_hi_l = self._dense(X_hi, self.layer_size_hi[-1], trainable=self.trainable_hi)
# Nonlinear
y = X_hi
for i in range(len(self.layer_size_hi) - 1):
y = self._dense(
y,
self.layer_size_hi[i],
activation=self.activation,
regularizer=self.regularizer,
trainable=self.trainable_hi,
)
y_hi_nl = self._dense(
y,
self.layer_size_hi[-1],
use_bias=False,
regularizer=self.regularizer,
trainable=self.trainable_hi,
)
# Linear + nonlinear
if not self.residue:
alpha = tf.Variable(0, dtype=config.real(tf), trainable=self.trainable_hi)
alpha = activations.get("tanh")(alpha)
self.y_hi = y_hi_l + alpha * y_hi_nl
else:
alpha1 = tf.Variable(0, dtype=config.real(tf), trainable=self.trainable_hi)
alpha1 = activations.get("tanh")(alpha1)
alpha2 = tf.Variable(0, dtype=config.real(tf), trainable=self.trainable_hi)
alpha2 = activations.get("tanh")(alpha2)
self.y_hi = self.y_lo + 0.1 * (alpha1 * y_hi_l + alpha2 * y_hi_nl)
self.target_lo = tf.placeholder(config.real(tf), [None, self.layer_size_lo[-1]])
self.target_hi = tf.placeholder(config.real(tf), [None, self.layer_size_hi[-1]])
self.built = True
def _dense(
self,
inputs,
units,
activation=None,
use_bias=True,
regularizer=None,
trainable=True,
):
return tf.layers.dense(
inputs,
units,
activation=activation,
use_bias=use_bias,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=regularizer,
trainable=trainable,
)
|
import matplotlib
matplotlib.use('Agg')
import os
from utils import check_dir
import numpy as np
import scipy
import matplotlib.pyplot as plt
from time import time
from models import BIVA
class DeepVAEEvaluator(object):
def __init__(self, images, n_images=5, iw_samples=1000, eval_every=1, preprocess_batch=lambda x: x, seed=1234):
self.number, self.height, self.width, self.channels = images.shape
self.rng = np.random.RandomState(seed)
self.images = images
self.n_images = n_images ** 2
self.iw_samples = iw_samples
self.eval_every = eval_every
self.preprocess_batch = preprocess_batch
def deep_vae_iw5000(self, model, epoch):
assert isinstance(model, BIVA), "The model is not an instance of Deep VAE."
if not epoch % self.eval_every == 0: return
now = time()
batch_size = 1
validation_losses = []
for update in range(int(self.number / batch_size)):
batch_validation = self.preprocess_batch(self.images[update * batch_size: (update + 1) * batch_size])
batch_losses = model.session.run(model.op_loss,
feed_dict={model.ph_input: batch_validation, model.ph_temp: 1.,
model.ph_eq: 1, model.ph_iw: self.iw_samples,
model.ph_is_training: False})
if np.isnan(np.mean(batch_losses)):
model.logger.info("bad sample")
continue
validation_losses += [np.mean(batch_losses)]
if update % 100 == 0:
print(
"updates: {}/{}, elapsed time: {:.2f}, elbo: {:.3f}".format(update * batch_size, self.number,
time() - now,
np.nanmean(validation_losses)))
model.logger.info("\nepoch: {}, iw: {}, elapsed time: {:.2f}, elbo: {:.3f}\n".format(epoch, self.iw_samples,
time() - now,
np.nanmean(
validation_losses)))
def deep_vae_generate_evaluator(self, model, epoch):
assert isinstance(model, BIVA), "The model is not an instance of Deep VAE."
if not epoch % self.eval_every == 0: return
# Compute the shape of the highest latent variable.
z_top_shp = model.session.run(model.q_layers_spatial[-1][0], feed_dict={model.ph_input: self.images[:1],
model.ph_eq: 1, model.ph_iw: 1,
model.ph_is_training: False}).shape
# Sampled reconstruction from z_L
z = self.rng.normal(0, 1, [self.n_images] + list(z_top_shp)[1:])
px_z = model.session.run(model.op_generate,
feed_dict={model.q_layers_spatial[-1][0]: z, model.ph_is_training: False})
out_dir = check_dir(os.path.join(model.model_path, "generations"))
path = os.path.join(out_dir, 'epoch_{}_out.png'.format(epoch))
if self.channels > 1:
save_images(px_z * 255, int(np.sqrt(self.n_images)), self.channels, self.height, self.width, path)
else:
save_gray_scale(px_z, int(np.sqrt(self.n_images)), self.height, self.width, path)
def save_gray_scale(images, count, height, width, path):
plt.figure()
i = 0
img_out = np.zeros((height * count, width * count))
for x in range(count):
for y in range(count):
xa, xb = x * width, (x + 1) * width
ya, yb = y * height, (y + 1) * height
im = np.reshape(images[i], (height, width))
img_out[ya:yb, xa:xb] = im
i += 1
plt.matshow(img_out, cmap="gray")
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.savefig(path)
def save_images(images, count, channels, height, width, path):
images = images.reshape((count, count, height, width, channels))
images = images.transpose(1, 2, 0, 3, 4)
images = images.reshape((height * count, width * count, channels))
scipy.misc.toimage(images, cmin=0.0, cmax=255.0).save(path)
|
"""
Copy files for all finished simulations to a new directory, change seed number and job name.
python add_run.py RUN1 RUN1-2
"""
import os
import sys
import glob
import shutil
def check_finished(sim_dir, file_name='lammps_out.txt'):
finished = False
dir_list = os.listdir(sim_dir)
if file_name in dir_list:
with open(os.path.join(sim_dir, file_name), 'r') as lout:
lammps_lines = lout.readlines()
if len(lammps_lines) > 0:
if 'Total wall time' in lammps_lines[-1]:
walltime = lammps_lines[-1].split()[-1]
print('%-20s -> finished in %s' % (os.path.basename(sim_dir), walltime))
finished = True
elif any(['log' in f for f in dir_list]):
print('%-20s -> NOT completed' % os.path.basename(sim_dir))
else:
print('%-20s -> ERROR' % os.path.basename(sim_dir))
else:
print('%-20s -> NOT started' % os.path.basename(sim_dir))
else:
print('%-20s -> Lammps out file not found' % os.path.basename(sim_dir))
return finished
def change_seed(source_input, dest_input, seed=None, add_seed=1):
""" Change seed number of Lammps input """
with open(source_input, 'r') as inp_src:
input_lines = inp_src.readlines()
for i, line in enumerate(input_lines):
if 'seed equal' in line:
seed_index = i
if seed is None:
seed = int(input_lines[seed_index].split()[3]) + add_seed
input_lines[seed_index] = 'variable seed equal %i\n' % seed
with open(dest_input, 'w') as inp_dest:
for line in input_lines:
inp_dest.write(line)
return None
def change_job_name(source_input, dest_input, job_name=None, run=None):
""" Change job name for Lammps slurm submission """
with open(source_input, 'r') as inp_src:
input_lines = inp_src.readlines()
for i, line in enumerate(input_lines):
if '--job-name' in line:
job_index = i
if job_name is None and run is not None:
job_name = '%s-%i' % (input_lines[job_index].split('=')[1].strip(), int(run))
input_lines[job_index] = '#SBATCH --job-name=%s\n' % job_name
with open(dest_input, 'w') as inp_dest:
for line in input_lines:
inp_dest.write(line)
return None
run_dir = sys.argv[1]
new_run = sys.argv[2]
run_id = int(new_run.split('-')[1])
zfs_dir = '/zfs1/7/cwilmer/kbs37/Lammps/TC'
zfs_run_dir = os.path.join(zfs_dir, run_dir)
if os.path.isdir(new_run):
del_new_run = input('New run directory exists, delete?: ')
if del_new_run in ['y', 'Y', 'yes']:
shutil.rmtree(new_run)
results = []
for mof in os.listdir(run_dir):
mof_dir = os.path.join(run_dir, mof)
zfs_mof_dir = os.path.join(zfs_run_dir, mof)
finished = check_finished(zfs_mof_dir)
if finished:
results.append(finished)
# Make new directory
new_mof_dir = os.path.join(new_run, mof)
os.makedirs(new_mof_dir)
# Copy all files
sim_files = []
for sf in ['in.*', 'job.*', 'data.*', 'simpar.yaml']:
sim_files += glob.glob(os.path.join(mof_dir, sf))
if len(sim_files) != 4:
print('Simulation files are not 4', sim_files)
for simf in sim_files:
shutil.copy(simf, new_mof_dir)
# Change seed number
inp_file = os.path.join(mof_dir, 'in.%s' % mof)
new_inp_file = os.path.join(new_mof_dir, 'in.%s' % mof)
change_seed(inp_file, new_inp_file, add_seed=run_id)
# Change job name
job_file = os.path.join(mof_dir, 'job.%s' % mof)
new_job_file = os.path.join(new_mof_dir, 'job.%s' % mof)
change_job_name(job_file, new_job_file, run=run_id)
|
"""CmdStan method variational tests"""
import os
import unittest
from math import fabs
import pytest
from testfixtures import LogCapture
from cmdstanpy.cmdstan_args import CmdStanArgs, VariationalArgs
from cmdstanpy.model import CmdStanModel
from cmdstanpy.stanfit import CmdStanVB, RunSet, from_csv
HERE = os.path.dirname(os.path.abspath(__file__))
DATAFILES_PATH = os.path.join(HERE, 'data')
class CmdStanVBTest(unittest.TestCase):
# pylint: disable=no-self-use
@pytest.fixture(scope='class', autouse=True)
def do_clean_up(self):
for root, _, files in os.walk(
os.path.join(DATAFILES_PATH, 'variational')
):
for filename in files:
_, ext = os.path.splitext(filename)
if ext.lower() in ('.o', '.d', '.hpp', '.exe', '') and (
filename != ".gitignore"
):
filepath = os.path.join(root, filename)
os.remove(filepath)
def test_instantiate(self):
stan = os.path.join(
DATAFILES_PATH, 'variational', 'eta_should_be_big.stan'
)
model = CmdStanModel(stan_file=stan)
no_data = {}
args = VariationalArgs(algorithm='meanfield')
cmdstan_args = CmdStanArgs(
model_name=model.name,
model_exe=model.exe_file,
chain_ids=None,
data=no_data,
method_args=args,
)
runset = RunSet(args=cmdstan_args, chains=1)
runset._csv_files = [
os.path.join(DATAFILES_PATH, 'variational', 'eta_big_output.csv')
]
variational = CmdStanVB(runset)
self.assertIn(
'CmdStanVB: model=eta_should_be_big', variational.__repr__()
)
self.assertIn('method=variational', variational.__repr__())
self.assertEqual(
variational.column_names,
('lp__', 'log_p__', 'log_g__', 'mu[1]', 'mu[2]'),
)
self.assertAlmostEqual(
variational.variational_params_dict['mu[1]'], 31.0299, places=2
)
self.assertAlmostEqual(
variational.variational_params_dict['mu[2]'], 28.8141, places=2
)
self.assertEqual(variational.variational_sample.shape, (1000, 5))
def test_instantiate_from_csvfiles(self):
csvfiles_path = os.path.join(DATAFILES_PATH, 'variational')
variational = from_csv(path=csvfiles_path)
self.assertIn(
'CmdStanVB: model=eta_should_be_big', variational.__repr__()
)
self.assertIn('method=variational', variational.__repr__())
self.assertEqual(
variational.column_names,
('lp__', 'log_p__', 'log_g__', 'mu[1]', 'mu[2]'),
)
self.assertAlmostEqual(
variational.variational_params_dict['mu[1]'], 31.0299, places=2
)
self.assertAlmostEqual(
variational.variational_params_dict['mu[2]'], 28.8141, places=2
)
self.assertEqual(variational.variational_sample.shape, (1000, 5))
def test_variables(self):
# pylint: disable=C0103
stan = os.path.join(
DATAFILES_PATH, 'variational', 'eta_should_be_big.stan'
)
model = CmdStanModel(stan_file=stan)
variational = model.variational(algorithm='meanfield', seed=12345)
self.assertEqual(
variational.column_names,
('lp__', 'log_p__', 'log_g__', 'mu[1]', 'mu[2]'),
)
self.assertEqual(1, len(variational.metadata.stan_vars_dims))
self.assertTrue('mu' in variational.metadata.stan_vars_dims)
self.assertEqual(variational.metadata.stan_vars_dims['mu'], (2,))
mu = variational.stan_variable(var='mu')
self.assertEqual(mu.shape, (2,))
with self.assertRaises(ValueError):
variational.stan_variable(var='eta')
with self.assertRaises(ValueError):
variational.stan_variable(var='lp__')
def test_variables_3d(self):
# construct fit using existing sampler output
stan = os.path.join(DATAFILES_PATH, 'multidim_vars.stan')
jdata = os.path.join(DATAFILES_PATH, 'logistic.data.R')
multidim_model = CmdStanModel(stan_file=stan)
multidim_variational = multidim_model.variational(
data=jdata,
seed=1239812093,
algorithm='meanfield',
)
self.assertEqual(3, len(multidim_variational.metadata.stan_vars_dims))
self.assertTrue('y_rep' in multidim_variational.metadata.stan_vars_dims)
self.assertEqual(
multidim_variational.metadata.stan_vars_dims['y_rep'], (5, 4, 3)
)
var_y_rep = multidim_variational.stan_variable(var='y_rep')
self.assertEqual(var_y_rep.shape, (5, 4, 3))
var_beta = multidim_variational.stan_variable(var='beta')
self.assertEqual(var_beta.shape, (2,)) # 1-element tuple
var_frac_60 = multidim_variational.stan_variable(var='frac_60')
self.assertEqual(var_frac_60.shape, ())
vars = multidim_variational.stan_variables()
self.assertEqual(
len(vars), len(multidim_variational.metadata.stan_vars_dims)
)
self.assertTrue('y_rep' in vars)
self.assertEqual(vars['y_rep'].shape, (5, 4, 3))
self.assertTrue('beta' in vars)
self.assertEqual(vars['beta'].shape, (2,))
self.assertTrue('frac_60' in vars)
self.assertEqual(vars['frac_60'].shape, ())
with self.assertRaises(ValueError):
multidim_variational.stan_variable(var='beta', name='yrep')
with LogCapture() as log:
self.assertEqual(
multidim_variational.stan_variable(name='beta').shape, (2,)
)
log.check_present(
(
'cmdstanpy',
'WARNING',
'Keyword "name" is deprecated, use "var" instead.',
)
)
class VariationalTest(unittest.TestCase):
# pylint: disable=no-self-use
@pytest.fixture(scope='class', autouse=True)
def do_clean_up(self):
for root, _, files in os.walk(
os.path.join(DATAFILES_PATH, 'variational')
):
for filename in files:
_, ext = os.path.splitext(filename)
if ext.lower() in ('.o', '.d', '.hpp', '.exe', '') and (
filename != ".gitignore"
):
filepath = os.path.join(root, filename)
os.remove(filepath)
def test_variational_good(self):
stan = os.path.join(
DATAFILES_PATH, 'variational', 'eta_should_be_big.stan'
)
model = CmdStanModel(stan_file=stan)
variational = model.variational(algorithm='meanfield', seed=12345)
self.assertEqual(
variational.column_names,
('lp__', 'log_p__', 'log_g__', 'mu[1]', 'mu[2]'),
)
self.assertAlmostEqual(
variational.variational_params_np[3], 31.0418, places=2
)
self.assertAlmostEqual(
variational.variational_params_np[4], 27.4463, places=2
)
self.assertAlmostEqual(
variational.variational_params_dict['mu[1]'], 31.0418, places=2
)
self.assertAlmostEqual(
variational.variational_params_dict['mu[2]'], 27.4463, places=2
)
self.assertEqual(
variational.variational_params_np[0],
variational.variational_params_pd['lp__'][0],
)
self.assertEqual(
variational.variational_params_np[3],
variational.variational_params_pd['mu[1]'][0],
)
self.assertEqual(
variational.variational_params_np[4],
variational.variational_params_pd['mu[2]'][0],
)
self.assertEqual(variational.variational_sample.shape, (1000, 5))
def test_variational_missing_args(self):
self.assertTrue(True)
def test_variational_eta_small(self):
stan = os.path.join(
DATAFILES_PATH, 'variational', 'eta_should_be_small.stan'
)
model = CmdStanModel(stan_file=stan)
variational = model.variational(algorithm='meanfield', seed=12345)
self.assertEqual(
variational.column_names,
('lp__', 'log_p__', 'log_g__', 'mu[1]', 'mu[2]'),
)
self.assertAlmostEqual(
fabs(variational.variational_params_dict['mu[1]']), 0.08, places=1
)
self.assertAlmostEqual(
fabs(variational.variational_params_dict['mu[2]']), 0.09, places=1
)
self.assertTrue(True)
def test_variational_eta_fail(self):
stan = os.path.join(
DATAFILES_PATH, 'variational', 'eta_should_fail.stan'
)
model = CmdStanModel(stan_file=stan)
with self.assertRaisesRegex(
RuntimeError,
r'algorithm may not have converged\.\n.*require_converged',
):
model.variational(algorithm='meanfield', seed=12345)
with LogCapture() as log:
model.variational(
algorithm='meanfield', seed=12345, require_converged=False
)
log.check_present(
(
'cmdstanpy',
'WARNING',
'The algorithm may not have converged.\n'
'Proceeding because require_converged is set to False',
)
)
if __name__ == '__main__':
unittest.main()
|
"""
This script contains a function that trains a model with given parameters and saves it.
"""
import os
import pickle
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard, CSVLogger, ModelCheckpoint
from src.BRAVENET import bravenet_config
from src.BRAVENET.utils.architectures.bravenet import get_bravenet
from src.BRAVENET.utils.architectures.helper_architecture import ds_loss
from src.generalutils import helper
from src.BRAVENET.utils.datagenerator import DataGenerator
print("TensorFlow version: ", tf.__version__)
def train(version, n_epochs, batch_size, lr, dr, l1, l2, bn, deconvolution, n_base_filters,
depth, filter_size, activation, final_activation, n_classes, optimizer, loss_function, metrics,
n_train_patients, n_val_patients, checkpoint_model, models_dir,
train_feature_files, train_label_files, val_feature_files, val_label_files,
train_feature_files_big=None, train_label_files_big=None, val_feature_files_big=None,
val_label_files_big=None, normalize_features=True,
max_values_for_normalization=None, transfer_learning=False, transfer_weights_path=None):
"""
Trains one model with given samples and given parameters and saves it.
:param version: The name describes the net version. String.
:param n_epochs: The number of epochs. Positive integer.
:param batch_size: The size of one mini-batch. Positive integer.
:param lr: The learning rate. Positive float.
:param dr: The dropout rate. Positive float or None.
:param l1: The L1 regularization. Positive float or None.
:param l2: The L2 regularization. Positive float or None.
:param bn: True for training with batch normalization. Boolean.
:param deconvolution: True for using deconvolution instead of up-sampling layer. Boolean.
:param n_base_filters: The number of filters in the first convolutional layer of the net. Positive integer.
:param depth: The number of levels of the net. Positive integer.
:param filter_size: The size of the 3D convolutional filters. Tuple of three positive integers.
:param activation: The activation after the convolutional layers. String.
:param final_activation: The activation in the final layer. String.
:param n_classes: The number of class labels to be predicted. Positive integer.
:param optimizer: The optimization algorithm used for training. E.g. Adam. String.
:param loss_function: The loss function. String.
:param metrics: List of metrics (i.e. performance measures). List of strings.
:param n_train_patients: The number of training samples. Positive integer.
:param n_val_patients: The number of validation samples. Positive integer.
:param checkpoint_model: True for saving the model after each epochs during the training. Boolean.
:param models_dir: String. Directory path where the model will be stored.
:param train_feature_files: List of file names containing features from training set. List of strings.
:param train_label_files: List of file names containing labels from training set. List of strings.
:param val_feature_files: List of file names containing features from validation set. List of strings.
:param val_label_files: List of file names containing labels from validation set. List of strings.
:param train_feature_files_big: List of file names containing features in double-sized volume from training set.
List of strings.
:param train_label_files_big: List of file names containing labels in double-sized volume from training set.
List of strings.
:param val_feature_files_big: List of file names containing features in double-sized volume from validation set.
List of strings.
:param val_label_files_big: List of file names containing labels in double-sized volume from validation set.
List of strings.
:param normalize_features: True for scale input data between 0 an 1.
:param max_values_for_normalization: Max values for scaling.
:param transfer_learning: True for initialize network with pretrained weights.
:param transfer_weights_path: Path to pretrained weights.
"""
print('network version', version)
print('number of epochs', n_epochs)
print('batch size', batch_size)
print('learning rate', lr)
print('dropout rate', dr)
print('L1', l1)
print('L2', l2)
print('batch normalization', bn)
print('deconvolution', deconvolution)
# Get number of training and validation samples.
n_train_samples = len(train_feature_files) if train_feature_files else len(train_feature_files_big)
n_val_samples = len(val_feature_files) if val_feature_files else len(val_feature_files_big)
# -----------------------------------------------------------
# CREATING NAME OF CURRENT RUN
# -----------------------------------------------------------
run_name = bravenet_config.get_run_name(version=version, n_epochs=n_epochs, batch_size=batch_size, learning_rate=lr,
dropout_rate=dr, l1=l1, l2=l2, batch_normalization=bn,
deconvolution=deconvolution, n_base_filters=n_base_filters,
n_patients_train=n_train_patients, n_patients_val=n_val_patients)
formatting_run_name = bravenet_config.get_run_name(version=version, n_epochs=n_epochs, batch_size=batch_size,
learning_rate=lr, dropout_rate=dr, l1=l1, l2=l2,
batch_normalization=bn, deconvolution=deconvolution,
n_base_filters=n_base_filters, n_patients_train=n_train_patients,
n_patients_val=n_val_patients, formatting_epoch=True)
# File paths.
if not os.path.exists(models_dir):
os.makedirs(models_dir)
model_filepath = bravenet_config.get_model_filepath(models_dir, run_name)
train_metadata_filepath = bravenet_config.get_train_metadata_filepath(models_dir, run_name)
train_history_filepath = bravenet_config.get_train_history_filepath(models_dir, run_name)
logdir = bravenet_config.LOG_DIR
# -----------------------------------------------------------
# CREATING MODEL
# -----------------------------------------------------------
input_shape = (bravenet_config.PATCH_SIZE_X, bravenet_config.PATCH_SIZE_Y, bravenet_config.PATCH_SIZE_Z,
bravenet_config.NUM_FEATURES)
# Double all dimensions except the last one because that is the number of feature channels.
input_shape_big = tuple(v * 2 if i < len(input_shape) - 1 else v for i, v in enumerate(input_shape))
num_outputs = depth - 1
# Load specific architectures according to the model version.
model = get_bravenet(input_shapes=[input_shape_big, input_shape], n_classes=n_classes,
activation=activation, final_activation=final_activation, n_base_filters=n_base_filters,
depth=depth, optimizer=optimizer, learning_rate=lr, dropout=dr, l1=l1, l2=l2,
batch_normalization=bn, loss_function=loss_function, metrics=metrics, filter_size=filter_size,
deconvolution=deconvolution)
# -----------------------------------------------------------
# TRAINING MODEL
# -----------------------------------------------------------
starttime_training = helper.start_time_measuring('training')
# SET CALLBACKS
callbacks = []
# keras callback for tensorboard logging
tb = TensorBoard(log_dir=logdir, histogram_freq=1)
callbacks.append(tb)
# keras callback for saving the training history to csv file
csv_logger = CSVLogger(train_history_filepath)
callbacks.append(csv_logger)
# keras ModelCheckpoint callback saves the model after every epoch, monitors the val_dice and does not overwrite
# if the val_dice gets worse
if checkpoint_model:
mc = ModelCheckpoint(bravenet_config.get_model_filepath(models_dir, formatting_run_name),
monitor='val_loss', verbose=1, save_best_only=False,
mode='min')
callbacks.append(mc)
# DATAGENERATORS
train_generator = DataGenerator(train_feature_files, train_feature_files_big, train_label_files,
train_label_files_big, bravenet_config.SAMPLES_PATH, batch_size=batch_size,
dim=input_shape[:-1], dim_big=input_shape_big[:-1],
n_channels=bravenet_config.NUM_FEATURES, num_outputs=num_outputs, shuffle=True,
normalize_features=normalize_features,
max_values_for_normalization=max_values_for_normalization)
val_generator = DataGenerator(val_feature_files, val_feature_files_big, val_label_files, val_label_files_big,
bravenet_config.SAMPLES_PATH, batch_size=batch_size, dim=input_shape[:-1],
dim_big=input_shape_big[:-1], n_channels=bravenet_config.NUM_FEATURES,
num_outputs=num_outputs, shuffle=True, normalize_features=normalize_features,
max_values_for_normalization=max_values_for_normalization)
# TRANSFER LEARNING
if transfer_learning:
# Load weights
# untrained_model = clone_model(model)
model.load_weights(transfer_weights_path, by_name=True)
print('Weights loaded.')
# Multiple loss
loss, loss_weights = ds_loss(depth=depth, loss_function=loss_function)
model.compile(optimizer=optimizer(lr=lr), loss=loss, metrics=metrics, loss_weights=loss_weights)
# untrained_model.compile(optimizer=optimizer(lr=lr), loss=loss, metrics=metrics, loss_weights=loss_weights)
print('model compiled.')
# TRAIN
history = None
try:
history = model.fit_generator(
generator=train_generator,
validation_data=val_generator,
steps_per_epoch=n_train_samples // batch_size,
validation_steps=n_val_samples // batch_size,
epochs=n_epochs,
verbose=2, shuffle=True, callbacks=callbacks)
except KeyboardInterrupt:
print("KeyboardInterrupt has been caught.")
exit(0)
finally:
if history is not None:
duration_training = helper.end_time_measuring(starttime_training, 'training')
# SAVING MODEL AND PARAMS
if checkpoint_model:
print('Model was checkpointed -> not saving the model from last epoch.')
else:
print('Model was not checkpointed -> saving the model from last epoch to:', model_filepath)
model.save(model_filepath)
print('Saving params to ', train_metadata_filepath)
history.params['version'] = version
history.params['batchsize'] = batch_size
history.params['learning_rate'] = lr
history.params['dropout_rate'] = dr
history.params['l1'] = l1
history.params['l2'] = l2
history.params['batch_norm'] = bn
history.params['deconvolution'] = deconvolution
history.params['num_base_filters'] = n_base_filters
history.params['loss'] = loss_function
history.params['samples'] = n_train_samples
history.params['val_samples'] = n_val_samples
history.params['total_time'] = duration_training
history.params['normalize features'] = normalize_features
history.params['max_values_for_normalization'] = max_values_for_normalization
results = {'params': history.params, 'history': history.history}
with open(train_metadata_filepath, 'wb') as handle:
pickle.dump(results, handle)
return history
|
<filename>predict.py<gh_stars>0
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Activation, Dropout, Dense, Lambda
from tensorflow.keras.layers import BatchNormalization as BatchNorm
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.utils import to_categorical
from Data_Parser import getNotes
import numpy
import music21
from Data_Parser import getNotes
import pickle
import random
#CONSTANTS
OUTPUT_DIR = 'final_output_ep100_t7'
WEIGHTS_DIR = 'final_weights_ep100'
SEQUENCE_LEN = 20
LOADED = True # must change if songs are added to training/testing data
#HYPERPARAMETERS
TEMP = 0.7
LSTM_LAYER_SIZE = 256
DROPOUT_RATE = 0.2
EPOCHS = 50
BATCH_SIZE = 64
N_NEW_NOTES = 200
def main():
input, output, mapping = getNotes(SEQUENCE_LEN, False, LOADED) # getNotes(int, bool train, bool loaded)
test_input = [[mapping[note] for note in sequence] for sequence in input]
model = rebuild_model(test_input, mapping)
test_output = [mapping[note]for note in output]
test_input_np = numpy.reshape(test_input, (len(test_input), len(test_input[0]), 1))
test_output = to_categorical(test_output, num_classes = len(mapping))
model.evaluate(test_input_np, test_output, batch_size=BATCH_SIZE)
makeNotes(model, test_input, mapping)
def rebuild_model(test_input, mapping):
test_input = numpy.reshape(test_input, (len(test_input), len(test_input[0]), 1))
#New
model = Sequential()
model.add(LSTM(LSTM_LAYER_SIZE, # num nodes
input_shape=(test_input.shape[1], test_input.shape[2]), # Since this is the first layer, we know dimentions of input
return_sequences=True)) # creates recurrence
model.add(LSTM(LSTM_LAYER_SIZE,
return_sequences=True, # creates recurrence
recurrent_dropout=DROPOUT_RATE,)) # fraction to leave out from recurrence
model.add(LSTM(LSTM_LAYER_SIZE)) # multiple LSTM layers create Deep Neural Network for greater accuracy
model.add(BatchNorm()) # normalizes inputs to neural network layers to make training faster
model.add(Dropout(DROPOUT_RATE)) # prevents overfitting
model.add(Dense(len(mapping))) # classification layer - output must be same dimentions as mapping
model.add(Lambda(lambda x: x / TEMP))# adds temperature settings
model.add(Activation('softmax')) # transforms output into a probability distribution
model.compile(loss='categorical_crossentropy', optimizer='adam')
#load weights
model.load_weights('%s.hdf5' %WEIGHTS_DIR)
return model
def makeNotes(model, test_input, mapping):
start = numpy.random.randint(0, len(test_input)-1)
int_to_note = dict((mapping[note], note) for note in mapping.keys())
initial_sequence = test_input[start]
#output = [] we used this for error checking
s = music21.stream.Stream()
for i in range(N_NEW_NOTES):
prediction_input = numpy.reshape(initial_sequence, (1, len(initial_sequence), 1))
prediction = model.predict(prediction_input, verbose=0)
index = numpy.random.choice(numpy.arange(len(prediction[0])), p = prediction[0]) # samples from distribution
result = int_to_note[index]
#add the note to output stream
if "." in result:
note = music21.chord.Chord(result.split("."))
#print("created_chord")
elif (result == 'R'):
note = music21.note.Rest()
else:
note = music21.note.Note(result)
#print("created_note")
s.append(note)
#output.append(result)
initial_sequence.append(index)
initial_sequence = initial_sequence[1:len(initial_sequence)]
s.write('midi', fp="%s.mid" %OUTPUT_DIR)
#print(output)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.