gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import time
import os
import json
import glob
import traceback
class SolarDb:
def __init__(self, filenamePrefix, config):
self.config = config
self.m_filenamePrefix = filenamePrefix
# self.data = {} [<name>] = {}
# ["10minute_mAsec"] = int
# ["today_mAsec_min"] = int
# ["today_mAsec_max"] = int
# ["today_mAsec"] = int
# ["prev_mAsec_min"] = int
# ["prev_mAsec_max"] = int
# ["prev_mAsec"] = int
self.createEmptyDataStructure()
daysToRead = 4; # today plus 3 previous
for index in range(daysToRead-1, -1, -1):
self.readDayLog(index)
# self.reset_todays_data()
self.fileUpdateInterval = 10 # minutes
cur_time_full = time.time()
cur_time_full_struct = time.localtime(cur_time_full)
cur_10_min_block = int((cur_time_full_struct.tm_hour*60 + cur_time_full_struct.tm_min)/self.fileUpdateInterval)
self.last_10_min_block = cur_10_min_block
self.cur_date_str = None
# original config stuff
self.totalEnergy = 0.0;
self.m_sensorNames = [];
self.m_voltages = [];
self.m_currents = [];
self.m_times = [];
self.m_date = "0000_00_00";
self.m_filename = "unknown"
self.m_prev_sampleWindow = -1;
for index in xrange(6):
emptyList = []
self.m_voltages.append(emptyList);
emptyList = []
self.m_currents.append(emptyList);
averages = {}
averages["voltage"] = [];
averages["current"] = [];
for index in xrange(6):
averages["voltage"].append( 0.0 );
averages["current"].append( 0 );
self.averages = averages
self.averages_dataPoints = 0
#~ def accumulateEnergy(self, solarData): # probably belongs in SolarDb fixme.
#~ # 0-panel; 1-bat 1; 2-bat 2; 3-load; 4-bat 3; 5-bat 4
#~ powerInts = []
#~ for index in range(len(solarData)):
#~ value = int(solarData["current"][index])
#~ powerInts.append(value)
#~ panelPwr = powerInts[0]
#~ loadPwr = powerInts[3]
#~ self.currentPanelPwr = int( panelPwr )
#~ self.currentLoadPwr = int( loadPwr )
#~ # add new readings to totals; assume 1 second integration window
#~ for index in range(solarData):
#~ self.todayStats[index]["cumulativeEnergy"] = self.todayStats[index]["cumulativeEnergy"] + int(solarData["current"][index])
#~ self.prevStats[index]["cumulativeEnergy"] = self.prevStats[index]["cumulativeEnergy"] + int(solarData["current"][index])
#~ if self.prevStats[index]["cumulativeEnergy"] < self.prevStats[index]["minEnergy"]:
#~ self.prevStats[index]["minEnergy"] = self.prevStats[index]["cumulativeEnergy"];
#~ elif self.prevStats[index]["cumulativeEnergy"] > self.prevStats[index]["maxEnergy"]:
#~ self.prevStats[index]["maxEnergy"] = self.prevStats[index]["cumulativeEnergy"]
def createEmptyDataStructure(self):
self.data = {}
for entry in self.config:
tempVal = {}
tempVal["10minute_mAsec"] = 0
tempVal["10minute_mAsec_min"] = 999999999
tempVal["10minute_mAsec_max"] = -999999999
tempVal["10minute_count"] = 0
tempVal["10minute_v"] = 0.0 # total volt values for use in calc of average
tempVal["10minute_v_min"] = 999999999.0
tempVal["10minute_v_max"] = -999999999.0
tempVal["10minute_mA"] = 0 # total mA values for use in calc of average
tempVal["10minute_mA_min"] = 999999999
tempVal["10minute_mA_max"] = -999999999
tempVal["today_mAsec"] = 0
tempVal["today_mAsec_min"] = 999999999 # mA*Sec
tempVal["today_mAsec_max"] = -999999999
tempVal["today_count"] = 0
tempVal["prev_mAsec"] = 0
tempVal["prev_mAsec_min"] = 999999999
tempVal["prev_mAsec_max"] = -999999999
tempVal["prev_count"] = 0
self.data[entry["name"]] = tempVal
def addEntry(self, data):
# solarData = {} ['names'] = [index] = strings
# ['voltage'] = [index] = float
# ['current'] = [index] = int
cur_time_secs = time.time()
# update file if needed
reset_10min, reset_day = self.evaluate_rollovers(cur_time_secs)
# write data entry if needed and flush old totals
if reset_10min:
self.write_data_to_file(cur_time_secs)
self.reset_10_min_data()
# reset daily totals if needed
if reset_day:
self.reset_todays_data()
self.cur_time_str = time.strftime("%H:%M:%S", time.localtime(cur_time_secs))
self.cur_date_str = time.strftime("%Y_%m_%d", time.localtime(cur_time_secs)) # update date after write so that we use previous date so midnight works
# accumulate the new data entry
for index in xrange(len(data["voltage"])):
name = data['names'][index]
power_mA = data['current'][index]
# accumulate mA hours
entry = self.data[name]
entry['10minute_mAsec'] = entry['10minute_mAsec'] + power_mA
entry['10minute_v'] = entry['10minute_v'] + data['voltage'][index]
entry['10minute_mA'] = entry['10minute_mA'] + power_mA
entry['today_mAsec'] = entry['today_mAsec'] + power_mA
entry['prev_mAsec'] = entry['prev_mAsec'] + power_mA
# update count values
entry = self.data[name]
entry['10minute_count'] = entry['10minute_count'] + 1
entry['today_count'] = entry['today_count'] + 1
entry['prev_count'] = entry['prev_count'] + 1
# update 10 min block min/max values
if entry['10minute_mAsec_min'] > entry['10minute_mAsec']: # if tracked min is too big
entry['10minute_mAsec_min'] = entry['10minute_mAsec']
if entry['10minute_mAsec_max'] < entry['10minute_mAsec']: # if tracked max is too small
entry['10minute_mAsec_max'] = entry['10minute_mAsec']
if entry['10minute_mA_min'] > power_mA: # if tracked min is too big
entry['10minute_mA_min'] = power_mA
if entry['10minute_mA_max'] < power_mA: # if tracked max is too small
entry['10minute_mA_max'] = power_mA
if entry['10minute_v_min'] > data['voltage'][index]: # if tracked min is too big
entry['10minute_v_min'] = data['voltage'][index]
if entry['10minute_v_max'] < data['voltage'][index]: # if tracked max is too small
entry['10minute_v_max'] = data['voltage'][index]
# update today min/max values
if entry['today_mAsec_min'] > entry['today_mAsec']: # if tracked min is too big
entry['today_mAsec_min'] = entry['today_mAsec']
if entry['today_mAsec_max'] < entry['today_mAsec']: # if tracked max is too small
entry['today_mAsec_max'] = entry['today_mAsec']
# update cumulative min/max values
if entry['prev_mAsec_min'] > entry['prev_mAsec']:
entry['prev_mAsec_min'] = entry['prev_mAsec']
if entry['prev_mAsec_max'] < entry['prev_mAsec']:
entry['prev_mAsec_max'] = entry['prev_mAsec']
# entry = {} ["time"] = seconds from time.time()
# ["samples"] = number of samples present in this file
# ["inputs"] = {}
# [<sourceName>] = [] = <mAsec>,<mAsec_min>,<mAsec_max>
def get_10min_entry(self, cur_time_secs):
data = {}
data['time_sec'] = cur_time_secs
data['time'] = self.cur_time_str
data['inputs'] = {}
for index in range(len(self.config)):
name = self.config[index]["name"]
data['inputs'][name] = []
data['inputs'][name].append( self.data[name]['10minute_mAsec'])
# data['inputs'][name].append( self.data[name]['10minute_mAsec_min'])
# data['inputs'][name].append( self.data[name]['10minute_mAsec_max'])
data['inputs'][name].append( self.data[name]["10minute_v"]/self.data[name]['10minute_count']) # average voltage
data['inputs'][name].append( self.data[name]["10minute_v_min"])
data['inputs'][name].append( self.data[name]["10minute_v_max"])
data['inputs'][name].append( int(self.data[name]["10minute_mA"]/self.data[name]['10minute_count'])) # average current
data['inputs'][name].append( self.data[name]["10minute_mA_min"])
data['inputs'][name].append( self.data[name]["10minute_mA_max"] )
data['samples'] = self.data[name]['10minute_count']
return data
def write_data_to_file(self, cur_time_secs):
# if (self.data['Panel']['10minute_count'] > 0): # make sure there is some data to write. helps with very first run
if self.cur_date_str != None:
data = self.get_10min_entry(cur_time_secs)
data_json = json.dumps(data)
self.m_filename = self.m_filenamePrefix+self.cur_date_str+".csv"
f= open(self.m_filename,"a+") # open for writing with append. create if needed
f.write(data_json +'\n')
f.close()
def evaluate_rollovers(self, cur_time_secs):
write_needed = False
new_file_needed = False
cur_time_full_struct = time.localtime(cur_time_secs)
cur_10_min_block = int((cur_time_full_struct.tm_hour*60 + cur_time_full_struct.tm_min)/self.fileUpdateInterval)
# print "cur_10_min_block=%d" %(cur_10_min_block)
# write data to a file if its time
if cur_10_min_block != self.last_10_min_block:
self.last_10_min_block = cur_10_min_block
write_needed = True
if cur_10_min_block == 0:
new_file_needed = True
return write_needed, new_file_needed
def reset_todays_data(self):
for index in range(len(self.config)):
name = self.config[index]["name"]
self.data[name]["today_mAsec_min"] = 999999999 # mA*Sec
self.data[name]["today_mAsec_max"] = -999999999
self.data[name]["today_mAsec"] = 0
self.data[name]["today_count"] = 0
def reset_10_min_data(self):
for index in range(len(self.config)):
name = self.config[index]["name"]
self.data[name]["10minute_mAsec"] = 0
self.data[name]["10minute_mAsec_min"] = 999999999
self.data[name]["10minute_mAsec_max"] = -999999999
self.data[name]["10minute_v"] = 0.0
self.data[name]["10minute_v_min"] = 999999999.0
self.data[name]["10minute_v_max"] = -999999999.0
self.data[name]["10minute_mA"] = 0
self.data[name]["10minute_mA_min"] = 999999999
self.data[name]["10minute_mA_max"] = -999999999
self.data[name]["10minute_count"] = 0
def formerly_addEntry_stuff(self):
rolledOverToNewDay = False
# figure the new point into the averages.
for index in xrange(len(data["voltage"])):
self.averages["voltage"][index] = self.averages["voltage"][index] + data["voltage"][index];
self.averages["current"][index] = self.averages["current"][index] + data["current"][index];
self.averages_dataPoints = self.averages_dataPoints +1;
if ( self.averages_dataPoints == 10):
# if rollover, flush the old data to the file.
sampleWindow = int(time[3:5])/10
if ( self.m_prev_sampleWindow != sampleWindow ) and (self.m_date != "0000_00_00"): # the hour rolled over.
m_prev_sampleWindow = sampleWindow;
self.m_filename = self.m_filenamePrefix+self.m_date+".csv"
# create the file if necessary
if not os.path.exists(self.m_filename):
f = open(self.m_filename, 'w')
headerLineText = "time"
for index in xrange(6):
newSection = ",%s voltage,%s current" % (data["names"][index], data["names"][index])
headerLineText = headerLineText+newSection
f.write(headerLineText)
#~ f.write("time,%s voltage,%s current,%s voltage,%s current,%s voltage,%s current,%s voltage,%s current\n" % (data["names"][0], data["names"][0], data["names"][1], data["names"][1], data["names"][2], data["names"][2], data["names"][3], data["names"][3]))
f.close();
rolledOverToNewDay = True
# append the current data
f = open(self.m_filename, 'a')
print("length=%d" % (len(self.m_voltages[0])))
for index in xrange(len(self.m_voltages[0])):
f.write(self.m_times[index]);
f.write(",");
for sensorIndex in xrange(6):
f.write("%s,%s" % (self.m_voltages[sensorIndex][index],self.m_currents[sensorIndex][index] ))
if (sensorIndex != 5):
f.write(",");
f.write("\n");
f.close()
# clear the cached data for the next hour
self.m_voltages = [];
self.m_currents = [];
self.m_times = [];
for index in xrange(6):
emptyList = []
self.m_voltages.append(emptyList);
emptyList = []
self.m_currents.append(emptyList);
self.m_date = date;
self.m_prev_sampleWindow = sampleWindow
self.m_times.append(time);
for index in xrange(len(data["voltage"])):
voltageAvg = self.averages["voltage"][index] / self.averages_dataPoints;
currentAvg = self.averages["current"][index] / self.averages_dataPoints;
self.m_voltages[index].append(voltageAvg);
self.m_currents[index].append(currentAvg);
self.m_sensorNames.append(data["names"][index] );
# print("avgV=%2.3f avgC=%d" % (voltageAvg,currentAvg))
for index in xrange(len(data["voltage"])): # clear out the averages for next time.
self.averages["voltage"][index] = 0.0;
self.averages["current"][index] = 0;
self.averages_dataPoints = 0;
return rolledOverToNewDay;
def readDayLog(self,fileIndex, startup=True):
filename = self.getFilenameFromIndex(fileIndex)
temp_data = {} # temp data indexed by source in dictionary.
if filename != None:
# read data file into logs
fp = open(filename, 'r')
contents = fp.readlines()
fp.close()
for index in range(len(contents)):
fileDataEntry = {}
try:
fileDataEntry = json.loads(contents[index])
except Exception:
print(traceback.format_exc())
record_time = '00:00:00'
if 'time' in fileDataEntry:
record_time = fileDataEntry['time']
if 'inputs' in fileDataEntry:
for source_name in fileDataEntry['inputs']:
if source_name in self.data:
mAsec = 0
v_avg = 0
v_min = 0
v_max = 0
mA_avg = 0
mA_min = 0
mA_max = 0
if len(fileDataEntry['inputs'][source_name]) == 3: # we are format 1.0
mAsec = fileDataEntry['inputs'][source_name][0]
elif len(fileDataEntry['inputs'][source_name]) == 7: # we are format 2.0
mAsec = fileDataEntry['inputs'][source_name][0]
v_avg = fileDataEntry['inputs'][source_name][1]
v_min = fileDataEntry['inputs'][source_name][2]
v_max = fileDataEntry['inputs'][source_name][3]
mA_avg = fileDataEntry['inputs'][source_name][4]
mA_min = fileDataEntry['inputs'][source_name][5]
mA_max = fileDataEntry['inputs'][source_name][6]
entry = self.data[source_name]
if startup == True:
if fileIndex == 0: # only track today stuff if file is for today
entry['today_mAsec'] = entry['today_mAsec'] + mAsec
entry['today_count'] = entry['today_count'] + 1
if entry['today_mAsec_min'] > entry['today_mAsec']: # if tracked min is too big
entry['today_mAsec_min'] = entry['today_mAsec']
if entry['today_mAsec_max'] < entry['today_mAsec']: # if tracked max is too small
entry['today_mAsec_max'] = entry['today_mAsec']
entry['prev_mAsec'] = entry['prev_mAsec'] + mAsec
entry['prev_count'] = entry['prev_count'] + 1
if entry['prev_mAsec_min'] > entry['prev_mAsec']:
entry['prev_mAsec_min'] = entry['prev_mAsec']
if entry['prev_mAsec_max'] < entry['prev_mAsec']:
entry['prev_mAsec_max'] = entry['prev_mAsec']
if not source_name in temp_data:
temp_data[source_name] = {}
temp_data[source_name]['mAsec'] = []
temp_data[source_name]['v_avg'] = []
temp_data[source_name]['v_max'] = []
temp_data[source_name]['v_min'] = []
temp_data[source_name]['mA_avg'] = []
temp_data[source_name]['mA_max'] = []
temp_data[source_name]['mA_min'] = []
temp_data[source_name]['time'] = []
temp_data[source_name]['mAsec'] .append(mAsec )
temp_data[source_name]['v_avg'] .append(v_avg )
temp_data[source_name]['v_max'] .append(v_max )
temp_data[source_name]['v_min'] .append(v_min )
temp_data[source_name]['mA_avg'] .append(mA_avg)
temp_data[source_name]['mA_max'] .append(mA_max)
temp_data[source_name]['mA_min'] .append(mA_min)
record_time_secs = int(record_time[0:2])*60*60 + int(record_time[3:5])*60 + int(record_time[6:8])
temp_data[source_name]['time'] .append(record_time_secs)
return temp_data, filename
# return_val = {} [<name>] = {}
# ['mAsec'] = []
# ['v_avg'] = []
# ['v_max'] = []
# ['v_min'] = []
# ['mA_avg'] = []
# ['mA_max'] = []
# ['mA_min'] = []
# ['time'] = [] = '11:09:59' = 'HH:MM:SS'
# orig notes # entry = {} ["time"] = seconds from time.time()
# orig notes # ["samples"] = number of samples present in this file
# orig notes # ["inputs"] = {}
# orig notes # [<sourceName>] = [] = <mAsec>,<mAsec_min>,<mAsec_max>
def readDayLog_orig(self,fileIndex):
returnVal = [];
filename = self.getFilenameFromIndex(fileIndex)
for index in xrange(6):
tempVal = {} # put an empty dictionary for each array entry.
tempVal["name"] = []
tempVal["voltage"] = []
tempVal["current"] = []
tempVal["time"] = []
returnVal.append(tempVal);
fileHandle = open(filename,"r");
rawLines = fileHandle.readlines();
firstLineFields = rawLines[0].split(",");
for chanIndex in xrange(6):
returnVal[chanIndex]["name"] = firstLineFields[1+chanIndex*2][:-8]; # strip off " voltage" from the end for the base name.
#~ returnVal[0]["name"] = firstLineFields[1][:-8]; # strip off " voltage" from the end for the base name.
#~ returnVal[1]["name"] = firstLineFields[3][:-8]; # strip off " voltage" from the end for the base name.
#~ returnVal[2]["name"] = firstLineFields[5][:-8]; # strip off " voltage" from the end for the base name.
#~ returnVal[3]["name"] = firstLineFields[7][:-8]; # strip off " voltage" from the end for the base name.
for chanIndex in xrange(6):
returnVal[chanIndex]["maxVoltage"] = -99999999.0 # very small.
returnVal[chanIndex]["minVoltage"] = 99999999.0 # very big.
returnVal[chanIndex]["maxCurrent"] = -99999999 # very small.
returnVal[chanIndex]["minCurrent"] = 99999999 # very big.
returnVal[chanIndex]["maxPower"] = -99999999 # very small.
returnVal[chanIndex]["minPower"] = 99999999 # very big.
for index in xrange(1,len(rawLines)):
fields = rawLines[index].split(",");
for chanIndex in xrange(6):
returnVal[chanIndex]["voltage"].append(float(fields[1+chanIndex*2]))
returnVal[chanIndex]["current"].append(int(fields[2+chanIndex*2]))
returnVal[chanIndex]["time"].append(fields[0])
if (returnVal[chanIndex]["maxVoltage"] < float(fields[1+chanIndex*2])):
returnVal[chanIndex]["maxVoltage"] = float(fields[1+chanIndex*2])
if (returnVal[chanIndex]["minVoltage"] > float(fields[1+chanIndex*2])):
returnVal[chanIndex]["minVoltage"] = float(fields[1+chanIndex*2])
if (returnVal[chanIndex]["maxCurrent"] < int(fields[2+chanIndex*2])):
returnVal[chanIndex]["maxCurrent"] = int(fields[2+chanIndex*2])
if (returnVal[chanIndex]["minCurrent"] > int(fields[2+chanIndex*2])):
returnVal[chanIndex]["minCurrent"] = int(fields[2+chanIndex*2])
if (returnVal[chanIndex]["maxPower"] < float(fields[1+chanIndex*2])*int(fields[2+chanIndex*2])):
returnVal[chanIndex]["maxPower"] = float(fields[1+chanIndex*2])*int(fields[2+chanIndex*2])
if (returnVal[chanIndex]["minPower"] > float(fields[1+chanIndex*2])*int(fields[2+chanIndex*2])):
returnVal[chanIndex]["minPower"] = float(fields[1+chanIndex*2])*int(fields[2+chanIndex*2])
fileHandle.close()
return (returnVal, filename);
def getFilenameFromIndex(self, index):
fileList = []
returnValue = None
pattern = self.m_filenamePrefix + "*.csv"
for file in glob.glob( pattern ):
fileList.append(file)
fileList.sort()
fileList.reverse()
if index < 0:
returnValue = fileList[0]
elif index >= len(fileList):
returnValue = None
else:
returnValue = fileList[index]
return returnValue
#~ def setupSolar():
#~ mySolarSensors = SolarSensors()
#~ # ina = INA219(0x40);
#~ # mySolarSensors.addSensor("Panel", ina ); # no jumpers.
#~ # mySolarSensors.addSensor("Battery1", ina ); # A0 jumper.
#~ # mySolarSensors.addSensor("Battery2", ina ); # A1 jumper.
#~ # mySolarSensors.addSensor("Load", ina ); # A0 and A1 jumpers.
#~ mySolarSensors.addSensor("Panel", INA219(0x45), scale=2.0 ); # A0 and A1 jumpers.
#~ # mySolarSensors.addSensor("Dead", INA219(0x43) );
#~ mySolarSensors.addSensor("Batt 5", INA219(0x49) );
#~ mySolarSensors.addSensor("Batt 6", INA219(0x41) );
#~ mySolarSensors.addSensor("Load", INA219(0x40), scale=2.0);
#~ mySolarSensors.addSensor("Batt 7", INA219(0x42) );
#~ mySolarSensors.addSensor("Batt 8", INA219(0x43) );
#~ mySolarSensors.addSensor("Batt 4", INA219(0x48) );
#~ mySolarSensors.addSensor("Batt 3", INA219(0x47) );
#~ mySolarSensors.addSensor("Batt 2", INA219(0x4a) );
#~ mySolarSensors.addSensor("Batt 1", INA219(0x46) );
#~ mySolar = Solar(mySolarSensors, Timestamper() );
#~ return mySolar;
|
|
#!/usr/bin/env python
# encoding: utf-8
# PYTHON_ARGCOMPLETE_OK
# from __future__ imports must occur at the beginning of the file
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
### imports
import os
import sys
import time
import io
import json
import pprint
import codecs
import threading
import traceback
import shutil
# unify Python 2 and 3
if sys.version_info[0] == 2:
from Queue import Queue
elif sys.version_info[0] == 3:
unicode = str
basestring = str
long = int
raw_input = input
from queue import Queue
from . import const
from . import printer_console
from .printer_util import (iswindows, human_size, interpret_size)
from .printer import (
bannerwarn, plog, pdbg, pinfo, pwarn, perr)
pr = printer_console.pr
prcolor = printer_console.prcolor
ask = printer_console.ask
pprgr = printer_console.pprgr
human_size
interpret_size
plog
pdbg
pinfo
pwarn
def remove_backslash(s):
return s.replace(r'\/', r'/')
rb = remove_backslash
# no idea who screws the sys.stdout.encoding
# the locale is 'UTF-8', sys.stdin.encoding is 'UTF-8',
# BUT, sys.stdout.encoding is None ...
def fixenc(stdenc):
if iswindows():
bannerwarn("WARNING: StdOut encoding '{}' is unable to encode CJK strings.\n" \
"Files with non-ASCII names may not be handled correctly.".format(stdenc))
else:
# fix by @xslidian
if not stdenc:
stdenc = 'utf-8'
sys.stdout = codecs.getwriter(stdenc)(sys.stdout)
sys.stderr = codecs.getwriter(stdenc)(sys.stderr)
# http://stackoverflow.com/questions/9403986/python-3-traceback-fails-when-no-exception-is-active
def formatex(ex):
s = ''
if ex and isinstance(ex, Exception):
s = "Exception:\n{} - {}\nStack:\n{}".format(
type(ex), ex, ''.join(traceback.format_stack()))
return s
# marshaling
def str2bool(s):
if isinstance(s, basestring):
if s:
sc = s.lower()[0]
if sc == 't' or sc == 'y' or (sc >= '1' and sc <= '9'):
return True
else:
return False
else:
return False
else:
# don't change
return s
def str2int(s):
if isinstance(s, basestring):
return int(s)
else:
# don't change
return s
def str2float(s):
if isinstance(s, basestring):
return float(s)
else:
# don't change
return s
# guarantee no-exception
def copyfile(src, dst):
result = const.ENoError
try:
shutil.copyfile(src, dst)
except (shutil.Error, IOError) as ex:
perr("Fail to copy '{}' to '{}'.\n{}".format(
src, dst, formatex(ex)))
result = const.EFailToCreateLocalFile
return result
def movefile(src, dst):
result = const.ENoError
try:
shutil.move(src, dst)
except (shutil.Error, OSError) as ex:
perr("Fail to move '{}' to '{}'.\n{}".format(
src, dst, formatex(ex)))
result = const.EFailToCreateLocalFile
return result
def removefile(path, verbose = False):
result = const.ENoError
try:
if verbose:
pr("Removing local file '{}'".format(path))
if path:
os.remove(path)
except Exception as ex:
perr("Fail to remove local fle '{}'.\n{}".format(
path, formatex(ex)))
result = const.EFailToDeleteFile
return result
def removedir(path, verbose = False):
result = const.ENoError
try:
if verbose:
pr("Removing local directory '{}'".format(path))
if path:
shutil.rmtree(path)
except Exception as ex:
perr("Fail to remove local directory '{}'.\n{}".format(
path, formatex(ex)))
result = const.EFailToDeleteDir
return result
def removepath(path):
if os.path.isdir(path):
return removedir(path)
elif os.path.isfile(path):
return removefile(path)
else:
perr("Can't remove '{}', it's non-file and none-dir.".format(path))
return const.EArgument
def makedir(path, mode = 0o777, verbose = False):
result = const.ENoError
if verbose:
pr("Creating local directory '{}'".format(path))
if path and not os.path.exists(path):
try:
os.makedirs(path, mode)
except os.error as ex:
perr("Failed at creating local dir '{}'.\n{}".format(
path, formatex(ex)))
result = const.EFailToCreateLocalDir
return result
# guarantee no-exception
def getfilesize(path):
size = -1
try:
size = os.path.getsize(path)
except os.error as ex:
perr("Exception occured while getting size of '{}'.\n{}".format(
path, formatex(ex)))
return size
# guarantee no-exception
def getfilemtime(path):
mtime = -1
try:
mtime = os.path.getmtime(path)
except os.error as ex:
perr("Exception occured while getting modification time of '{}'.\n{}".format(
path, formatex(ex)))
return mtime
def getfilemtime_int(path):
# just int it, this is reliable no matter how stat_float_times() is changed
return int(getfilemtime(path))
# mtime = getfilemtime(path)
# if (mtime == -1):
# return mtime
#
# if os.stat_float_times():
# mtime = int(mtime)
#
# return mtime
# seems os.path.join() doesn't handle Unicode well
def joinpath(first, second, sep = os.sep):
head = ''
if first:
head = first.rstrip(sep) + sep
tail = ''
if second:
tail = second.lstrip(sep)
return head + tail
# CAN Python make Unicode right?
# http://houtianze.github.io/python/unicode/json/2016/01/03/another-python-unicode-fisaco-on-json.html
def jsondump_actual(data, f):
if sys.version_info[0] == 2:
f.write(unicode(json.dumps(data, ensure_ascii = False, sort_keys = True, indent = 2)))
elif sys.version_info[0] == 3:
json.dump(data, f, ensure_ascii = False, sort_keys = True, indent = 2)
# no try ... except protection, will throw exceptions
def jsondump(data, filename, semaphore):
if semaphore:
with semaphore:
with io.open(filename, 'w', encoding = 'utf-8') as f:
jsondump_actual(data, f)
else:
with io.open(filename, 'w', encoding = 'utf-8') as f:
jsondump_actual(data, f)
def jsondump_no_exception(data, filename, semaphore):
try:
jsondump(data, filename, semaphore)
except Exception as ex:
perr("Fail to dump json '{}' to file '{}'.\nException:\n{}".format(
data, filename, formatex(ex)))
# no try ... except protection, will throw exceptions
def jsonload(filename):
with io.open(filename, 'r', encoding = 'utf-8') as f:
return json.load(f)
def jsonload_no_exception(filename):
try:
jsonload(filename)
# In `python 3`, the exception when failing to parse is `json.JSONDecodeError` (subclass of `ValueError`)
# but in `python 2`, it's just `ValueError`
except Exception as ex:
perr("Fail to load '{}' as json, exception:\n{}".format(filename, formatex(ex)))
return {}
def ls_type(isdir):
return 'D' if isdir else 'F'
def ls_time(itime):
return time.strftime('%Y-%m-%d, %H:%M:%S', time.localtime(itime))
# no leading, trailing '/'
# remote path rule:
# - all public methods of ByPy shall accept remote path as "partial path"
# (before calling get_pcs_path())
# - all private methods of ByPy shall accept remote path as "full path"
# (after calling get_pcs_path())
def get_pcs_path(path):
if not path or path == '/' or path == '\\':
return const.AppPcsPath
return (const.AppPcsPath + '/' + path.strip('/')).rstrip('/')
def is_pcs_root_path(path):
return path == const.AppPcsPath or path == const.AppPcsPath + '/'
def print_pcs_list_bare(list):
if list:
for f in list:
pr("{} {} {} {} {} {}".format(
ls_type(f['isdir']),
f['path'],
f['size'],
ls_time(f['ctime']),
ls_time(f['mtime']),
f['md5'] if 'md5' in f else ''))
def print_pcs_list(json, foundmsg = "Found:", notfoundmsg = "Nothing found."):
list = json['list']
if list:
pr(foundmsg)
print_pcs_list_bare(list)
else:
pr(notfoundmsg)
# https://stackoverflow.com/questions/10883399/unable-to-encode-decode-pprint-output
class MyPrettyPrinter(pprint.PrettyPrinter):
def format(self, obj, context, maxlevels, level):
if isinstance(obj, unicode):
#return (obj.encode('utf8'), True, False)
return (obj, True, False)
if isinstance(obj, bytes):
convert = False
#for c in obj:
# if ord(c) >= 128:
# convert = True
# break
try:
codecs.decode(obj)
except:
convert = True
if convert:
return ("0x{}".format(obj), True, False)
return pprint.PrettyPrinter.format(self, obj, context, maxlevels, level)
class NewThread(threading.Thread):
def __init__(self, func):
threading.Thread.__init__(self)
self.func = func
def run(self):
self.func()
def startthread(func):
NewThread(func).start()
def inc_list_size(li, size = 3, filler = 0):
i = len(li)
while (i < size):
li.append(filler)
i += 1
def comp_semver(v1, v2):
v1a = v1.split('.')
v2a = v2.split('.')
v1ia = [int(i) for i in v1a]
v2ia = [int(i) for i in v2a]
inc_list_size(v1ia, 3)
inc_list_size(v2ia, 3)
i = 0
while (i < 3):
if v1ia[i] != v2ia[i]:
return v1ia[i] - v2ia[i]
i += 1
return 0
# NOT in use, see deque
class FixedSizeQueue(object):
def __init__(self, size = 1024):
self.size = size
self.q = Queue()
def put(self, item):
if self.q.qsize() >= self.size:
self.q.get()
self.q.put(item)
def get(self):
return self.q.get()
def nop(*args):
pass
# vim: tabstop=4 noexpandtab shiftwidth=4 softtabstop=4 ff=unix fileencoding=utf-8
|
|
"""
aggregation.py contains utility functions to handle multiple named and lambda
kwarg aggregations in groupby and DataFrame/Series aggregation
"""
from collections import defaultdict
from functools import partial
from typing import Any, Callable, DefaultDict, List, Sequence, Tuple, Union
from pandas.core.dtypes.common import is_dict_like, is_list_like
import pandas.core.common as com
from pandas.core.indexes.api import Index
def is_multi_agg_with_relabel(**kwargs) -> bool:
"""
Check whether kwargs passed to .agg look like multi-agg with relabeling.
Parameters
----------
**kwargs : dict
Returns
-------
bool
Examples
--------
>>> is_multi_agg_with_relabel(a="max")
False
>>> is_multi_agg_with_relabel(a_max=("a", "max"), a_min=("a", "min"))
True
>>> is_multi_agg_with_relabel()
False
"""
return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and (
len(kwargs) > 0
)
def normalize_keyword_aggregation(kwargs: dict) -> Tuple[dict, List[str], List[int]]:
"""
Normalize user-provided "named aggregation" kwargs.
Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs
to the old Dict[str, List[scalar]]].
Parameters
----------
kwargs : dict
Returns
-------
aggspec : dict
The transformed kwargs.
columns : List[str]
The user-provided keys.
col_idx_order : List[int]
List of columns indices.
Examples
--------
>>> normalize_keyword_aggregation({"output": ("input", "sum")})
(defaultdict(<class 'list'>, {'input': ['sum']}), ('output',), array([0]))
"""
# Normalize the aggregation functions as Mapping[column, List[func]],
# process normally, then fixup the names.
# TODO: aggspec type: typing.Dict[str, List[AggScalar]]
# May be hitting https://github.com/python/mypy/issues/5958
# saying it doesn't have an attribute __name__
aggspec: DefaultDict = defaultdict(list)
order = []
columns, pairs = list(zip(*kwargs.items()))
for name, (column, aggfunc) in zip(columns, pairs):
aggspec[column].append(aggfunc)
order.append((column, com.get_callable_name(aggfunc) or aggfunc))
# uniquify aggfunc name if duplicated in order list
uniquified_order = _make_unique_kwarg_list(order)
# GH 25719, due to aggspec will change the order of assigned columns in aggregation
# uniquified_aggspec will store uniquified order list and will compare it with order
# based on index
aggspec_order = [
(column, com.get_callable_name(aggfunc) or aggfunc)
for column, aggfuncs in aggspec.items()
for aggfunc in aggfuncs
]
uniquified_aggspec = _make_unique_kwarg_list(aggspec_order)
# get the new index of columns by comparison
col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order)
return aggspec, columns, col_idx_order
def _make_unique_kwarg_list(
seq: Sequence[Tuple[Any, Any]]
) -> Sequence[Tuple[Any, Any]]:
"""
Uniquify aggfunc name of the pairs in the order list
Examples:
--------
>>> kwarg_list = [('a', '<lambda>'), ('a', '<lambda>'), ('b', '<lambda>')]
>>> _make_unique_kwarg_list(kwarg_list)
[('a', '<lambda>_0'), ('a', '<lambda>_1'), ('b', '<lambda>')]
"""
return [
(pair[0], "_".join([pair[1], str(seq[:i].count(pair))]))
if seq.count(pair) > 1
else pair
for i, pair in enumerate(seq)
]
# TODO: Can't use, because mypy doesn't like us setting __name__
# error: "partial[Any]" has no attribute "__name__"
# the type is:
# typing.Sequence[Callable[..., ScalarResult]]
# -> typing.Sequence[Callable[..., ScalarResult]]:
def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]:
"""
Possibly mangle a list of aggfuncs.
Parameters
----------
aggfuncs : Sequence
Returns
-------
mangled: list-like
A new AggSpec sequence, where lambdas have been converted
to have unique names.
Notes
-----
If just one aggfunc is passed, the name will not be mangled.
"""
if len(aggfuncs) <= 1:
# don't mangle for .agg([lambda x: .])
return aggfuncs
i = 0
mangled_aggfuncs = []
for aggfunc in aggfuncs:
if com.get_callable_name(aggfunc) == "<lambda>":
aggfunc = partial(aggfunc)
aggfunc.__name__ = f"<lambda_{i}>"
i += 1
mangled_aggfuncs.append(aggfunc)
return mangled_aggfuncs
def maybe_mangle_lambdas(agg_spec: Any) -> Any:
"""
Make new lambdas with unique names.
Parameters
----------
agg_spec : Any
An argument to GroupBy.agg.
Non-dict-like `agg_spec` are pass through as is.
For dict-like `agg_spec` a new spec is returned
with name-mangled lambdas.
Returns
-------
mangled : Any
Same type as the input.
Examples
--------
>>> maybe_mangle_lambdas('sum')
'sum'
>>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP
[<function __main__.<lambda_0>,
<function pandas...._make_lambda.<locals>.f(*args, **kwargs)>]
"""
is_dict = is_dict_like(agg_spec)
if not (is_dict or is_list_like(agg_spec)):
return agg_spec
mangled_aggspec = type(agg_spec)() # dict or OrderedDict
if is_dict:
for key, aggfuncs in agg_spec.items():
if is_list_like(aggfuncs) and not is_dict_like(aggfuncs):
mangled_aggfuncs = _managle_lambda_list(aggfuncs)
else:
mangled_aggfuncs = aggfuncs
mangled_aggspec[key] = mangled_aggfuncs
else:
mangled_aggspec = _managle_lambda_list(agg_spec)
return mangled_aggspec
def validate_func_kwargs(
kwargs: dict,
) -> Tuple[List[str], List[Union[str, Callable[..., Any]]]]:
"""
Validates types of user-provided "named aggregation" kwargs.
`TypeError` is raised if aggfunc is not `str` or callable.
Parameters
----------
kwargs : dict
Returns
-------
columns : List[str]
List of user-provied keys.
func : List[Union[str, callable[...,Any]]]
List of user-provided aggfuncs
Examples
--------
>>> validate_func_kwargs({'one': 'min', 'two': 'max'})
(['one', 'two'], ['min', 'max'])
"""
no_arg_message = "Must provide 'func' or named aggregation **kwargs."
tuple_given_message = "func is expected but recieved {} in **kwargs."
columns = list(kwargs)
func = []
for col_func in kwargs.values():
if not (isinstance(col_func, str) or callable(col_func)):
raise TypeError(tuple_given_message.format(type(col_func).__name__))
func.append(col_func)
if not columns:
raise TypeError(no_arg_message)
return columns, func
|
|
import argparse
import logging
import os
import subprocess
import time
import yaml
from StringIO import StringIO
import teuthology
from . import orchestra
import orchestra.remote
from .orchestra import run
from .config import FakeNamespace
from .lock import list_locks
from .lock import unlock_one
from .lock import find_stale_locks
from .lockstatus import get_status
from .misc import config_file
from .misc import merge_configs
from .misc import get_testdir
from .misc import get_user
from .misc import reconnect
from .parallel import parallel
from .task import install as install_task
from .task.internal import check_lock, add_remotes, connect
log = logging.getLogger(__name__)
def clear_firewall(ctx):
"""
Remove any iptables rules created by teuthology. These rules are
identified by containing a comment with 'teuthology' in it. Non-teuthology
firewall rules are unaffected.
"""
ctx.cluster.run(
args=[
"sudo", "sh", "-c",
"iptables-save | grep -v teuthology | iptables-restore"
],
wait=False,
)
def shutdown_daemons(ctx):
nodes = {}
for remote in ctx.cluster.remotes.iterkeys():
proc = remote.run(
args=[
'if', 'grep', '-q', 'ceph-fuse', '/etc/mtab', run.Raw(';'),
'then',
'grep', 'ceph-fuse', '/etc/mtab', run.Raw('|'),
'grep', '-o', " /.* fuse", run.Raw('|'),
'grep', '-o', "/.* ", run.Raw('|'),
'xargs', '-n', '1', 'sudo', 'fusermount', '-u', run.Raw(';'),
'fi',
run.Raw(';'),
'if', 'grep', '-q', 'rbd-fuse', '/etc/mtab', run.Raw(';'),
'then',
'grep', 'rbd-fuse', '/etc/mtab', run.Raw('|'),
'grep', '-o', " /.* fuse", run.Raw('|'),
'grep', '-o', "/.* ", run.Raw('|'),
'xargs', '-n', '1', 'sudo', 'fusermount', '-u', run.Raw(';'),
'fi',
run.Raw(';'),
'sudo',
'killall',
'--quiet',
'ceph-mon',
'ceph-osd',
'ceph-mds',
'ceph-fuse',
'ceph-disk',
'radosgw',
'ceph_test_rados',
'rados',
'rbd-fuse',
'apache2',
run.Raw('||'),
'true', # ignore errors from ceph binaries not being found
],
wait=False,
)
nodes[remote.name] = proc
for name, proc in nodes.iteritems():
log.info('Waiting for %s to finish shutdowns...', name)
proc.wait()
def kill_hadoop(ctx):
for remote in ctx.cluster.remotes.iterkeys():
pids_out = StringIO()
ps_proc = remote.run(args=[
"ps", "-eo", "pid,cmd",
run.Raw("|"), "grep", "java.*hadoop",
run.Raw("|"), "grep", "-v", "grep"
], stdout=pids_out, check_status=False)
if ps_proc.exitstatus == 0:
for line in pids_out.getvalue().strip().split("\n"):
pid, cmdline = line.split(None, 1)
log.info("Killing PID {0} ({1})".format(pid, cmdline))
remote.run(args=["kill", "-9", pid], check_status=False)
def find_kernel_mounts(ctx):
nodes = {}
log.info('Looking for kernel mounts to handle...')
for remote in ctx.cluster.remotes.iterkeys():
proc = remote.run(
args=[
'grep', '-q', ' ceph ', '/etc/mtab',
run.Raw('||'),
'grep', '-q', '^/dev/rbd', '/etc/mtab',
],
wait=False,
)
nodes[remote] = proc
kernel_mounts = list()
for remote, proc in nodes.iteritems():
try:
proc.wait()
log.debug('kernel mount exists on %s', remote.name)
kernel_mounts.append(remote)
except run.CommandFailedError: # no mounts!
log.debug('no kernel mount on %s', remote.name)
return kernel_mounts
def remove_kernel_mounts(ctx, kernel_mounts):
"""
properly we should be able to just do a forced unmount,
but that doesn't seem to be working, so you should reboot instead
"""
nodes = {}
for remote in kernel_mounts:
log.info('clearing kernel mount from %s', remote.name)
proc = remote.run(
args=[
'grep', 'ceph', '/etc/mtab', run.Raw('|'),
'grep', '-o', "on /.* type", run.Raw('|'),
'grep', '-o', "/.* ", run.Raw('|'),
'xargs', '-r',
'sudo', 'umount', '-f', run.Raw(';'),
'fi'
],
wait=False
)
nodes[remote] = proc
for remote, proc in nodes:
proc.wait()
def remove_osd_mounts(ctx):
"""
unmount any osd data mounts (scratch disks)
"""
ctx.cluster.run(
args=[
'grep',
'/var/lib/ceph/osd/',
'/etc/mtab',
run.Raw('|'),
'awk', '{print $2}', run.Raw('|'),
'xargs', '-r',
'sudo', 'umount', run.Raw(';'),
'true'
],
)
def remove_osd_tmpfs(ctx):
"""
unmount tmpfs mounts
"""
ctx.cluster.run(
args=[
'egrep', 'tmpfs\s+/mnt', '/etc/mtab', run.Raw('|'),
'awk', '{print $2}', run.Raw('|'),
'xargs', '-r',
'sudo', 'umount', run.Raw(';'),
'true'
],
)
def reboot(ctx, remotes):
nodes = {}
for remote in remotes:
log.info('rebooting %s', remote.name)
try:
proc = remote.run( # note use of -n to force a no-sync reboot
args=[
'sync',
run.Raw('&'),
'sleep', '5',
run.Raw(';'),
'sudo', 'reboot', '-f', '-n'
],
wait=False
)
except Exception:
log.exception('ignoring exception during reboot command')
nodes[remote] = proc
# we just ignore these procs because reboot -f doesn't actually
# send anything back to the ssh client!
# for remote, proc in nodes.iteritems():
# proc.wait()
if remotes:
log.info('waiting for nodes to reboot')
time.sleep(8) # if we try and reconnect too quickly, it succeeds!
reconnect(ctx, 480) # allow 8 minutes for the reboots
def reset_syslog_dir(ctx):
nodes = {}
for remote in ctx.cluster.remotes.iterkeys():
proc = remote.run(
args=[
'if', 'test', '-e', '/etc/rsyslog.d/80-cephtest.conf',
run.Raw(';'),
'then',
'sudo', 'rm', '-f', '--', '/etc/rsyslog.d/80-cephtest.conf',
run.Raw('&&'),
'sudo', 'service', 'rsyslog', 'restart',
run.Raw(';'),
'fi',
run.Raw(';'),
],
wait=False,
)
nodes[remote.name] = proc
for name, proc in nodes.iteritems():
log.info('Waiting for %s to restart syslog...', name)
proc.wait()
def dpkg_configure(ctx):
nodes = {}
for remote in ctx.cluster.remotes.iterkeys():
if remote.os.package_type != 'deb':
continue
proc = remote.run(
args=[
'sudo', 'dpkg', '--configure', '-a',
run.Raw(';'),
'sudo', 'DEBIAN_FRONTEND=noninteractive',
'apt-get', '-y', '--force-yes', '-f', 'install',
run.Raw('||'),
':',
],
wait=False,
)
nodes[remote.name] = proc
for name, proc in nodes.iteritems():
log.info(
'Waiting for %s to dpkg --configure -a and apt-get -f install...',
name)
proc.wait()
def remove_yum_timedhosts(ctx):
# Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1233329
log.info("Removing yum timedhosts files...")
for remote in ctx.cluster.remotes.iterkeys():
if remote.os.package_type != 'rpm':
continue
remote.run(
args="sudo find /var/cache/yum -name 'timedhosts' -exec rm {} \;",
check_status=False,
)
def remove_installed_packages(ctx):
dpkg_configure(ctx)
conf = {'project': 'ceph'}
install_task.remove_packages(
ctx,
conf,
{"deb": install_task.PACKAGES['ceph']['deb'] +
['salt-common', 'salt-minion', 'calamari-server', 'python-rados'],
"rpm": install_task.PACKAGES['ceph']['rpm'] +
['salt-common', 'salt-minion', 'calamari-server']})
install_task.remove_sources(ctx, conf)
install_task.purge_data(ctx)
def remove_testing_tree(ctx):
nodes = {}
for remote in ctx.cluster.remotes.iterkeys():
proc = remote.run(
args=[
'sudo', 'rm', '-rf', get_testdir(ctx),
# just for old time's sake
run.Raw('&&'),
'sudo', 'rm', '-rf', '/tmp/cephtest',
run.Raw('&&'),
'sudo', 'rm', '-rf', '/home/ubuntu/cephtest',
run.Raw('&&'),
'sudo', 'rm', '-rf', '/etc/ceph',
],
wait=False,
)
nodes[remote.name] = proc
for name, proc in nodes.iteritems():
log.info('Waiting for %s to clear filesystem...', name)
proc.wait()
def remove_configuration_files(ctx):
"""
Goes through a list of commonly used configuration files used for testing
that should not be left behind.
For example, sometimes ceph-deploy may be configured via
``~/.cephdeploy.conf`` to alter how it handles installation by specifying
a default section in its config with custom locations.
"""
nodes = {}
for remote in ctx.cluster.remotes.iterkeys():
proc = remote.run(
args=[
'rm', '-f', '/home/ubuntu/.cephdeploy.conf'
],
wait=False,
)
nodes[remote.name] = proc
for name, proc in nodes.iteritems():
log.info('removing temporary configuration files on %s', name)
proc.wait()
def synch_clocks(remotes):
nodes = {}
for remote in remotes:
proc = remote.run(
args=[
'sudo', 'service', 'ntp', 'stop',
run.Raw('&&'),
'sudo', 'ntpdate-debian',
run.Raw('&&'),
'sudo', 'hwclock', '--systohc', '--utc',
run.Raw('&&'),
'sudo', 'service', 'ntp', 'start',
run.Raw('||'),
'true', # ignore errors; we may be racing with ntpd startup
],
wait=False,
)
nodes[remote.name] = proc
for name, proc in nodes.iteritems():
log.info('Waiting for clock to synchronize on %s...', name)
proc.wait()
def main(args):
ctx = FakeNamespace(args)
if ctx.verbose:
teuthology.log.setLevel(logging.DEBUG)
info = {}
if ctx.archive:
ctx.config = config_file(ctx.archive + '/config.yaml')
ifn = os.path.join(ctx.archive, 'info.yaml')
if os.path.exists(ifn):
with file(ifn, 'r') as fd:
info = yaml.load(fd.read())
if not ctx.pid:
ctx.pid = info.get('pid')
if not ctx.pid:
ctx.pid = int(open(ctx.archive + '/pid').read().rstrip('\n'))
if not ctx.owner:
ctx.owner = info.get('owner')
if not ctx.owner:
ctx.owner = open(ctx.archive + '/owner').read().rstrip('\n')
if ctx.targets:
ctx.config = merge_configs(ctx.targets)
if ctx.stale:
stale_nodes = find_stale_locks(ctx.owner)
targets = dict()
for node in stale_nodes:
targets[node['name']] = node['ssh_pub_key']
ctx.config = dict(targets=targets)
log.info(
'\n '.join(
['targets:', ] + yaml.safe_dump(
ctx.config['targets'],
default_flow_style=False).splitlines()))
if ctx.dry_run:
log.info("Not actually nuking anything since --dry-run was passed")
return
if ctx.owner is None:
ctx.owner = get_user()
if ctx.pid:
if ctx.archive:
log.info('Killing teuthology process at pid %d', ctx.pid)
os.system('grep -q %s /proc/%d/cmdline && sudo kill %d' % (
ctx.archive,
ctx.pid,
ctx.pid))
else:
subprocess.check_call(["kill", "-9", str(ctx.pid)])
nuke(ctx, ctx.unlock, ctx.synch_clocks, ctx.reboot_all, ctx.noipmi)
def nuke(ctx, should_unlock, sync_clocks=True, reboot_all=True, noipmi=False):
if 'targets' not in ctx.config:
return
total_unnuked = {}
targets = dict(ctx.config['targets'])
if ctx.name:
log.info('Checking targets against current locks')
locks = list_locks()
# Remove targets who's description doesn't match archive name.
for lock in locks:
for target in targets:
if target == lock['name']:
if ctx.name not in lock['description']:
del ctx.config['targets'][lock['name']]
log.info(
"Not nuking %s because description doesn't match",
lock['name'])
with parallel() as p:
for target, hostkey in ctx.config['targets'].iteritems():
p.spawn(
nuke_one,
ctx,
{target: hostkey},
should_unlock,
sync_clocks,
reboot_all,
ctx.config.get('check-locks', True),
noipmi,
)
for unnuked in p:
if unnuked:
total_unnuked.update(unnuked)
if total_unnuked:
log.error('Could not nuke the following targets:\n' +
'\n '.join(['targets:', ] +
yaml.safe_dump(
total_unnuked,
default_flow_style=False).splitlines()))
def nuke_one(ctx, target, should_unlock, synch_clocks, reboot_all,
check_locks, noipmi):
ret = None
ctx = argparse.Namespace(
config=dict(targets=target),
owner=ctx.owner,
check_locks=check_locks,
synch_clocks=synch_clocks,
reboot_all=reboot_all,
teuthology_config=ctx.teuthology_config,
name=ctx.name,
noipmi=noipmi,
)
try:
nuke_helper(ctx, should_unlock)
except Exception:
log.exception('Could not nuke %s' % target)
# not re-raising the so that parallel calls aren't killed
ret = target
else:
if should_unlock:
unlock_one(ctx, target.keys()[0], ctx.owner)
return ret
def nuke_helper(ctx, should_unlock):
# ensure node is up with ipmi
(target,) = ctx.config['targets'].keys()
host = target.split('@')[-1]
shortname = host.split('.')[0]
if should_unlock:
if 'vpm' in shortname:
return
status_info = get_status(host)
if status_info['is_vm'] and status_info['machine_type'] == 'openstack':
return
log.debug('shortname: %s' % shortname)
log.debug('{ctx}'.format(ctx=ctx))
if (not ctx.noipmi and 'ipmi_user' in ctx.teuthology_config and
'vpm' not in shortname):
console = orchestra.remote.getRemoteConsole(
name=host,
ipmiuser=ctx.teuthology_config['ipmi_user'],
ipmipass=ctx.teuthology_config['ipmi_password'],
ipmidomain=ctx.teuthology_config['ipmi_domain'])
cname = '{host}.{domain}'.format(
host=shortname,
domain=ctx.teuthology_config['ipmi_domain'])
log.info('checking console status of %s' % cname)
if not console.check_status():
# not powered on or can't get IPMI status. Try to power on
console.power_on()
# try to get status again, waiting for login prompt this time
log.info('checking console status of %s' % cname)
if not console.check_status(100):
log.error('Failed to get console status for %s, ' +
'disabling console...' % cname)
log.info('console ready on %s' % cname)
else:
log.info('console ready on %s' % cname)
if ctx.check_locks:
# does not check to ensure if the node is 'up'
# we want to be able to nuke a downed node
check_lock(ctx, None, check_up=False)
add_remotes(ctx, None)
connect(ctx, None)
log.info("Clearing teuthology firewall rules...")
clear_firewall(ctx)
log.info("Cleared teuthology firewall rules.")
log.info('Unmount ceph-fuse and killing daemons...')
shutdown_daemons(ctx)
log.info('All daemons killed.')
need_reboot = find_kernel_mounts(ctx)
# no need to unmount anything if we're rebooting
if ctx.reboot_all:
need_reboot = ctx.cluster.remotes.keys()
else:
log.info('Unmount any osd data directories...')
remove_osd_mounts(ctx)
log.info('Unmount any osd tmpfs dirs...')
remove_osd_tmpfs(ctx)
# log.info('Dealing with any kernel mounts...')
# remove_kernel_mounts(ctx, need_reboot)
log.info("Terminating Hadoop services...")
kill_hadoop(ctx)
if need_reboot:
reboot(ctx, need_reboot)
log.info('All kernel mounts gone.')
log.info('Synchronizing clocks...')
if ctx.synch_clocks:
need_reboot = ctx.cluster.remotes.keys()
synch_clocks(need_reboot)
log.info('Making sure firmware.git is not locked...')
ctx.cluster.run(args=['sudo', 'rm', '-f',
'/lib/firmware/updates/.git/index.lock', ])
remove_configuration_files(ctx)
log.info('Reseting syslog output locations...')
reset_syslog_dir(ctx)
log.info('Clearing filesystem of test data...')
remove_testing_tree(ctx)
log.info('Filesystem Cleared.')
remove_yum_timedhosts(ctx)
remove_installed_packages(ctx)
log.info('Installed packages removed.')
|
|
# The MIT License (MIT)
#
# Copyright (c) 2016 Yutkin Dmitry
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import requests
import sys
import datetime
from datetime import timedelta
from multiprocessing import current_process, cpu_count
from .argparser import parse_args
from .constants import VKAPI_URL, VKAPI_VERSION, APP_ACCESS_KEY
from .utils import get_page_id, VKApiError, pretty_print
from .post import Post
import logging
logging.basicConfig(
level=logging.INFO,
format="[\033[92m%(levelname)s %(asctime)s\033[0m]: %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
)
# Removing noisy debug messages from lib request
logging.getLogger("urllib3").setLevel(logging.CRITICAL)
logger = logging.getLogger()
class PostDownloader:
def __init__(self, page_id, from_date=None, to_date=None):
self.page_id = page_id
self.api_url = VKAPI_URL + "wall.get"
self.request_params = {
"owner_id": self.page_id,
"v": VKAPI_VERSION,
"access_token": APP_ACCESS_KEY,
}
self.from_date = from_date or datetime.date.min
self.to_date = to_date or datetime.date.max
def _number_of_posts(self):
""" Returns total number of post on the page """
self.request_params.update({"offset": 0, "count": 1})
response = requests.get(self.api_url, params=self.request_params).json()
if "error" in response:
raise VKApiError(response["error"]["error_msg"])
total_posts = response["response"]["count"]
logger.debug("Posts to fetch: {}".format(total_posts))
return total_posts
def fetch(self, init_offset=0, num_to_fetch=None):
""" Downloads 'num_to_fetch' posts starting from 'init_offset' position """
num_to_fetch = num_to_fetch or self._number_of_posts()
self.request_params['offset'] = init_offset
self.request_params['count'] = min(num_to_fetch, 100)
logger.debug(
"{} trying to download {} posts".format(
current_process().name, num_to_fetch
)
)
fetched_posts, fetched_counter = [], 0
while fetched_counter != num_to_fetch:
response = requests.get(self.api_url, self.request_params).json()
if "error" in response:
raise VKApiError(response["error"]["error_msg"])
posts = response["response"]["items"]
fetched_counter += len(posts)
logger.debug(
"{} downloaded {}/{} posts".format(
current_process().name, fetched_counter, num_to_fetch
)
)
for post in posts:
post = Post(
id=post["id"],
text=post["text"],
likes=post["likes"]["count"],
reposts=post["reposts"]["count"],
date=datetime.date.fromtimestamp(post["date"]),
url="https://vk.com/wall{}_{}".format(self.page_id, post["id"]),
is_pinned=post.get("is_pinned", 0)
)
if self.from_date <= post.date <= self.to_date:
fetched_posts.append(post)
# Early stopping, all subsequent post should be discarded
elif post.date < self.from_date and post.is_pinned == 0:
logger.debug(
"{} finally returns {} posts".format(
current_process().name, len(fetched_posts)
)
)
return fetched_posts
self.request_params["offset"] += 100
self.request_params["count"] = min(num_to_fetch - fetched_counter, 100)
logger.debug(
"{} returns eventually {} posts".format(
current_process().name, len(fetched_posts)
)
)
return fetched_posts
def parallel_fetch(self, max_workers=None):
"""
Downloads posts in parallel processes.
Each worker downloads independent segment.
"""
from concurrent.futures import ProcessPoolExecutor
from concurrent.futures import as_completed
# Total number of posts to download
num_posts = self._number_of_posts()
num_workers = max_workers or cpu_count()
fetched_posts = []
with ProcessPoolExecutor(max_workers=num_workers) as executor:
futures = []
for offset, count in self._distribute_posts(num_posts, num_workers):
futures.append(executor.submit(self.fetch, offset, count))
for future in as_completed(futures):
try:
fetched_posts.extend(future.result())
except Exception as error:
logger.error(error)
return fetched_posts
def _distribute_posts(self, total_posts, workers):
"""
Uniformly distributes posts for downloading between workers.
Returns next start position for downloading and number of posts to fetch.
"""
per_worker = total_posts // workers + total_posts % workers
for offset in range(0, total_posts, per_worker):
if (offset + per_worker) < total_posts:
yield offset, per_worker
else:
yield offset, total_posts - offset
def main():
args = vars(parse_args())
if args["verbose"]:
logger.setLevel(logging.DEBUG)
if args["days"]:
if args["from"] or args["to"]:
logger.error(
"vktop: error: -d/--days option cannot be used with "
"-f/--from or -t/--to options"
)
sys.exit(1)
else:
args["from"] = datetime.date.today() - timedelta(days=args["days"])
try:
page_id = get_page_id(args["url"])
except (RuntimeError, requests.exceptions.ConnectionError) as error:
logger.error(error)
sys.exit(1)
logger.info("Downloading posts. This may take some time, be patient...")
downloader = PostDownloader(page_id, args["from"], args["to"])
try:
if sys.version_info > (3, 0):
posts = downloader.parallel_fetch(args["workers"])
else:
# TODO:
# Python 2.x does not support concurrent.futures out of the box,
# therefore in Python 2.x using synchronous downloading
if args["workers"]:
logger.warning("Python 2 does not support parallel downloading!")
posts = downloader.fetch()
except (KeyboardInterrupt, VKApiError, Exception) as err:
logger.error(err)
sys.exit(1)
logger.debug("Sorting of {} posts".format(len(posts)))
if args["reposts"]:
posts = sorted(posts, key=lambda x: (-x.reposts, -x.likes))
else:
posts = sorted(posts, key=lambda x: (-x.likes, -x.reposts))
pretty_print(posts[: args["top"]])
if __name__ == "__main__":
main()
|
|
from geopar.angle_class import Angle
__author__ = 'mostly satbek' # edits by eric braude
class TriangulatedFigure:
"""
Class Invariants
1: self.triangles is a list of triangle, each with a unique set of vertices
2: For every triangle t1 in self.triangles, there is a t2 in self.triangles
such that t1.points and t2.points share 2 elements
"""
def __init__(self, triangles=None):
# the Triangle objects that make up self
if triangles:
self._triangles = triangles
else:
self._triangles = []
def __str__(self):
"""
Returns a string representation of self.
"""
return_str = ""
for current_triangle in self._triangles:
return_str += str(current_triangle)
return_str += "\n"
return return_str
def add(self, a_triangle):
# !!!
# Precondition 1: a_triangle is a Triangle instance
# Precondition 2: len(self.triangles) < 2
# --XOR--
# a_triangle ... is not in self.triangles AND
# ... shares two vertices with a Triangle in old(self.triangles)
# Postcondition: a_triangle is in self.triangles
self._triangles.append(a_triangle)
def all_angles_are_known(self):
"""
Returns True if all angles in self are known, False otherwise.
"""
for triangle in self._triangles:
if triangle.has_unknown_angle():
return False
return True
def angle_points_of_unknown_angles_at(self, a_point):
"""
Returns a list of angle points of unknown angles at a_point.
PRE: a_point is in self.get_points
POST: list_of_points contains angle points of unknown angles
"""
list_of_points = []
triangles = self.triangles_at(a_point)
for triangle in triangles:
angle = triangle.angle_of_point(a_point)
if not angle.is_known():
angle_points = triangle.get_angle_points_by_point(a_point)
list_of_points.append(angle_points)
return list_of_points
def make_angles_known_at(self, a_point):
"""
Computes an unknown angle at a point by using 360 degrees rule.
PRE1: a_point is an interior point of a triangulated figure a_tf
PRE2: there is exactly one unknown angle at a_point
POST: unknown angle (see PRE2) is computed
"""
# (Counted) unknowns_count contains the number of unknown angles at a_point
# unknowns_count is used to keep PRE1 true
unknowns_count = self.number_of_unknown_angles_at(a_point)
# (Summed up) angles_sum is a sum of known angles at a_point
angles_sum = self.sum_of_known_angles_at(a_point)
# (Found and set) unknown_angle is the value of the unknown_angle
unknown_angle = 360 - angles_sum
if unknowns_count == 1:
# (Recorded) angle_points is a list of angle_points of unknown_angle at a_point
angle_points = self.angle_points_of_unknown_angles_at(a_point)[-1]
self.set_angle_by_angle_points(*angle_points, unknown_angle)
def get_angle_by_angle_points(self, p1, p2, p3):
"""
Returns an angle in a triangulated figure by the angle's angle points.
PRE1: (p1 and p2 and p3) are in self.get_points()
PRE2: Points are in clockwise order
"""
for triangle in self._triangles:
if triangle.has_all_points([p1, p2, p3]):
return triangle.angle_of_point(p2)
def get_id(self):
# 'id' of a triangulated figure is an integer number (result of built-in hash() function)
# that is unique to every triangulated figure with different configurations.
# That is, two triangulated figures with equivalent configurations have the same states.
return hash(str(sorted(list(map(hash, self._triangles)))))
def get_interior_points(self):
"""
Returns the list of interior points in self.
OBJECTIVES:
(Found 1a): found the points that have more than 2 triangles attached to them
AND
(Found 1b): saved them in point_nums, alongside with number of triangles that they are in
(Found 2): found interior points
(Complement): returned interior_points
"""
# (Found 1a)
all_points = self.get_points()
point_nums = []
for point in all_points:
n = len(self.triangles_at(point))
if n > 2:
# (Found 1b)
point_nums.append((point, n))
# (Found 2)
interior_points = []
for point_num in point_nums:
points = []
for triangle in self.get_triangles():
if triangle.has_point(point_num[0]):
points.extend(triangle.get_points())
if len(set(points)) == point_num[1] + 1:
interior_points.append(point_num[0])
# (Complement): all interior points found
return interior_points
def get_points(self):
"""
Returns a set of all points that make up self.
"""
all_points = list()
for triangle in self._triangles:
all_points.extend(triangle.get_points())
return list(set(all_points))
def get_triangles(self):
"""
Returns a list of triangles that make up self.
"""
return self._triangles
def is_empty(self):
"""
Returns True if self has no triangles, False otherwise.
"""
return not bool(self._triangles)
def number_of_unknown_angles_at(self, a_point):
"""
Returns the number of unknown angles at a_point.
PRE: a_point is in self.get_points
POST: count contains the number of unknown angles at a_point
"""
count = 0
triangles = self.triangles_at(a_point)
for triangle in triangles:
angle = triangle.angle_of_point(a_point)
if not angle.is_known():
count += 1
return count
def set_angle_by_angle_points(self, p1, p2, p3, angle_):
"""
Sets an angle in a triangulated figure by the angle's angle points.
Any angle in a Triangulated Figure can be described by a unique set of points
called angle points. In geometry,
A
|
|
|_ a
|_|________C
B
angle a can be referred to as ABC. A, B, and C are vertices of line segments AB and BC.
In this project, we use the same geometric notion to describe an angle in a triangulated figure.
Since points in triangulated figure are unique, any angle has its own set of unique points.
We call them angle points. To make things consistent, we describe an angle by its angle points
in clockwise order. So, for the above example, angle points for angle a would be CBA.
PRE1: (p1 and p2 and p3) are in self.get_points()
PRE2: Points are in clockwise order
PRE3: angle_ is (Angle or int or float) instance
PRE4: angle_ has the same dimensionality as any of known angles in self
POST: !!!
"""
for triangle in self._triangles:
if triangle.has_all_points([p1, p2, p3]):
triangle.set_angle_by_point(p2, angle_)
def sum_of_known_angles_at(self, a_point):
"""
Returns the sum of known angles at a_point.
PRE: a_point is in self.get_points
POST: sum_angles contains the sum of known angles at a_point
"""
sum_angles = 0
triangles = self.triangles_at(a_point)
for triangle in triangles:
angle = triangle.angle_of_point(a_point)
if angle.is_known():
sum_angles += angle
return sum_angles
def triangles_at(self, a_point):
"""
Returns the (contiguous) list of self.triangles containing a_point in clockwise order.
PRE: At least one triangle in self.triangles contains a_point
"""
# [Collected]: triangles_with_a_point =
# the triangles in self.triangles containing a_point
triangles_with_a_point = []
for triangle in self._triangles:
if triangle.has_point(a_point):
triangles_with_a_point.append(triangle)
# (In Order): triangles_in_order is a non-empty sub-list of
# triangles_with_a_point, which is in clockwise order
# AND triangles_remaining = triangles_with_a_point\triangles_in_order
triangles_in_order = [triangles_with_a_point[0]]
triangles_remaining = triangles_with_a_point[1:]
while len(triangles_in_order) < len(triangles_with_a_point):
for triangle_ in triangles_remaining:
point_following = triangles_in_order[0].point_following(a_point)
if triangle_.point_preceding(a_point) == point_following:
triangles_in_order.insert(0, triangle_)
triangles_remaining.remove(triangle_)
break
point_preceding = triangles_in_order[-1].point_preceding(a_point)
if triangle_.point_following(a_point) == point_preceding:
triangles_in_order.append(triangle_)
triangles_remaining.remove(triangle_)
break
# (Complement): len(triangles_in_order) = len(triangles_with_a_point)
return triangles_in_order
|
|
from __future__ import absolute_import
from collections import namedtuple
from copy import deepcopy
import logging
import random
import sys
import time
import six
from kafka.client import SimpleClient
from kafka.common import (
check_error, NotLeaderForPartitionError, UnknownTopicOrPartitionError,
OffsetOutOfRangeError, RequestTimedOutError, KafkaMessage, ConsumerTimeout,
FailedPayloadsError, KafkaUnavailableError, KafkaConfigurationError
)
from kafka.metrics.metrics import Metrics
from kafka.metrics.stats.rate import Rate
from kafka.protocol.message import PartialMessage
from kafka.structs import (
FetchRequestPayload, OffsetCommitRequestPayload, OffsetFetchRequestPayload,
OffsetRequestPayload
)
logger = logging.getLogger(__name__)
OffsetsStruct = namedtuple("OffsetsStruct", ["fetch", "highwater", "commit", "task_done"])
DEFAULT_CONSUMER_CONFIG = {
'client_id': __name__,
'group_id': None,
'bootstrap_servers': [],
'socket_timeout_ms': 30 * 1000,
'fetch_message_max_bytes': 1024 * 1024,
'auto_offset_reset': 'largest',
'fetch_min_bytes': 1,
'fetch_wait_max_ms': 100,
'refresh_leader_backoff_ms': 200,
'deserializer_class': lambda msg: msg,
'auto_commit_enable': False,
'auto_commit_interval_ms': 60 * 1000,
'auto_commit_interval_messages': None,
'consumer_timeout_ms': -1,
'metrics_reporter': None,
'offset_storage': 'zookeeper',
# Currently unused
'socket_receive_buffer_bytes': 64 * 1024,
'num_consumer_fetchers': 1,
'default_fetcher_backoff_ms': 1000,
'queued_max_message_chunks': 10,
'rebalance_max_retries': 4,
'rebalance_backoff_ms': 2000,
}
DEPRECATED_CONFIG_KEYS = {
'metadata_broker_list': 'bootstrap_servers',
}
class KafkaConsumer(object):
"""A simpler kafka consumer"""
DEFAULT_CONFIG = deepcopy(DEFAULT_CONSUMER_CONFIG)
def __init__(self, *topics, **configs):
self.configure(**configs)
self.set_topic_partitions(*topics)
def configure(self, **configs):
"""Configure the consumer instance
Configuration settings can be passed to constructor,
otherwise defaults will be used:
Keyword Arguments:
bootstrap_servers (list): List of initial broker nodes the consumer
should contact to bootstrap initial cluster metadata. This does
not have to be the full node list. It just needs to have at
least one broker that will respond to a Metadata API Request.
client_id (str): a unique name for this client. Defaults to
'kafka.consumer.kafka'.
group_id (str): the name of the consumer group to join,
Offsets are fetched / committed to this group name.
fetch_message_max_bytes (int, optional): Maximum bytes for each
topic/partition fetch request. Defaults to 1024*1024.
fetch_min_bytes (int, optional): Minimum amount of data the server
should return for a fetch request, otherwise wait up to
fetch_wait_max_ms for more data to accumulate. Defaults to 1.
fetch_wait_max_ms (int, optional): Maximum time for the server to
block waiting for fetch_min_bytes messages to accumulate.
Defaults to 100.
refresh_leader_backoff_ms (int, optional): Milliseconds to backoff
when refreshing metadata on errors (subject to random jitter).
Defaults to 200.
socket_timeout_ms (int, optional): TCP socket timeout in
milliseconds. Defaults to 30*1000.
auto_offset_reset (str, optional): A policy for resetting offsets on
OffsetOutOfRange errors. 'smallest' will move to the oldest
available message, 'largest' will move to the most recent. Any
ofther value will raise the exception. Defaults to 'largest'.
deserializer_class (callable, optional): Any callable that takes a
raw message value and returns a deserialized value. Defaults to
lambda msg: msg.
auto_commit_enable (bool, optional): Enabling auto-commit will cause
the KafkaConsumer to periodically commit offsets without an
explicit call to commit(). Defaults to False.
auto_commit_interval_ms (int, optional): If auto_commit_enabled,
the milliseconds between automatic offset commits. Defaults to
60 * 1000.
auto_commit_interval_messages (int, optional): If
auto_commit_enabled, a number of messages consumed between
automatic offset commits. Defaults to None (disabled).
consumer_timeout_ms (int, optional): number of millisecond to throw
a timeout exception to the consumer if no message is available
for consumption. Defaults to -1 (dont throw exception).
Configuration parameters are described in more detail at
http://kafka.apache.org/documentation.html#highlevelconsumerapi
"""
configs = self._deprecate_configs(**configs)
self._config = {}
for key in self.DEFAULT_CONFIG:
self._config[key] = configs.pop(key, self.DEFAULT_CONFIG[key])
if configs:
raise KafkaConfigurationError('Unknown configuration key(s): ' +
str(list(configs.keys())))
if self._config['auto_commit_enable']:
if not self._config['group_id']:
raise KafkaConfigurationError(
'KafkaConsumer configured to auto-commit '
'without required consumer group (group_id)'
)
# Check auto-commit configuration
if self._config['auto_commit_enable']:
logger.info("Configuring consumer to auto-commit offsets")
self._reset_auto_commit()
if not self._config['bootstrap_servers']:
raise KafkaConfigurationError(
'bootstrap_servers required to configure KafkaConsumer'
)
reporters = [self._config['metrics_reporter']()] if \
self._config['metrics_reporter'] else []
metrics = Metrics(reporters=reporters)
self.metrics = KafkaConsumerMetrics(metrics)
self._client = SimpleClient(
self._config['bootstrap_servers'],
client_id=self._config['client_id'],
timeout=(self._config['socket_timeout_ms'] / 1000.0),
metrics=metrics,
)
def set_topic_partitions(self, *topics):
"""
Set the topic/partitions to consume
Optionally specify offsets to start from
Accepts types:
* str (utf-8): topic name (will consume all available partitions)
* tuple: (topic, partition)
* dict:
- { topic: partition }
- { topic: [partition list] }
- { topic: (partition tuple,) }
Optionally, offsets can be specified directly:
* tuple: (topic, partition, offset)
* dict: { (topic, partition): offset, ... }
Example:
.. code:: python
kafka = KafkaConsumer()
# Consume topic1-all; topic2-partition2; topic3-partition0
kafka.set_topic_partitions("topic1", ("topic2", 2), {"topic3": 0})
# Consume topic1-0 starting at offset 12, and topic2-1 at offset 45
# using tuples --
kafka.set_topic_partitions(("topic1", 0, 12), ("topic2", 1, 45))
# using dict --
kafka.set_topic_partitions({ ("topic1", 0): 12, ("topic2", 1): 45 })
"""
self._topics = []
self._client.load_metadata_for_topics()
# Setup offsets
self._offsets = OffsetsStruct(fetch=dict(),
commit=dict(),
highwater=dict(),
task_done=dict())
# Handle different topic types
for arg in topics:
# Topic name str -- all partitions
if isinstance(arg, (six.string_types, six.binary_type)):
topic = arg
for partition in self._client.get_partition_ids_for_topic(topic):
self._consume_topic_partition(topic, partition)
# (topic, partition [, offset]) tuple
elif isinstance(arg, tuple):
topic = arg[0]
partition = arg[1]
self._consume_topic_partition(topic, partition)
if len(arg) == 3:
offset = arg[2]
self._offsets.fetch[(topic, partition)] = offset
# { topic: partitions, ... } dict
elif isinstance(arg, dict):
for key, value in six.iteritems(arg):
# key can be string (a topic)
if isinstance(key, (six.string_types, six.binary_type)):
topic = key
# topic: partition
if isinstance(value, int):
self._consume_topic_partition(topic, value)
# topic: [ partition1, partition2, ... ]
elif isinstance(value, (list, tuple)):
for partition in value:
self._consume_topic_partition(topic, partition)
else:
raise KafkaConfigurationError(
'Unknown topic type '
'(dict key must be int or list/tuple of ints)'
)
# (topic, partition): offset
elif isinstance(key, tuple):
topic = key[0]
partition = key[1]
self._consume_topic_partition(topic, partition)
self._offsets.fetch[(topic, partition)] = value
else:
raise KafkaConfigurationError('Unknown topic type (%s)' % type(arg))
# If we have a consumer group, try to fetch stored offsets
if self._config['group_id']:
self._get_commit_offsets()
# Update missing fetch/commit offsets
for topic_partition in self._topics:
# Commit offsets default is None
if topic_partition not in self._offsets.commit:
self._offsets.commit[topic_partition] = None
# Skip if we already have a fetch offset from user args
if topic_partition not in self._offsets.fetch:
# Fetch offsets default is (1) commit
if self._offsets.commit[topic_partition] is not None:
self._offsets.fetch[topic_partition] = self._offsets.commit[topic_partition]
# or (2) auto reset
else:
self._offsets.fetch[topic_partition] = self._reset_partition_offset(topic_partition)
# highwater marks (received from server on fetch response)
# and task_done (set locally by user)
# should always get initialized to None
self._reset_highwater_offsets()
self._reset_task_done_offsets()
# Reset message iterator in case we were in the middle of one
self._reset_message_iterator()
def close(self):
"""Close this consumer's underlying client."""
self._client.close()
def next(self):
"""Return the next available message
Blocks indefinitely unless consumer_timeout_ms > 0
Returns:
a single KafkaMessage from the message iterator
Raises:
ConsumerTimeout after consumer_timeout_ms and no message
Note:
This is also the method called internally during iteration
"""
self._set_consumer_timeout_start()
while True:
try:
return six.next(self._get_message_iterator())
# Handle batch completion
except StopIteration:
self._reset_message_iterator()
self._check_consumer_timeout()
def fetch_messages(self):
"""Sends FetchRequests for all topic/partitions set for consumption
Returns:
Generator that yields KafkaMessage structs
after deserializing with the configured `deserializer_class`
Note:
Refreshes metadata on errors, and resets fetch offset on
OffsetOutOfRange, per the configured `auto_offset_reset` policy
See Also:
Key KafkaConsumer configuration parameters:
* `fetch_message_max_bytes`
* `fetch_max_wait_ms`
* `fetch_min_bytes`
* `deserializer_class`
* `auto_offset_reset`
"""
max_bytes = self._config['fetch_message_max_bytes']
max_wait_time = self._config['fetch_wait_max_ms']
min_bytes = self._config['fetch_min_bytes']
if not self._topics:
raise KafkaConfigurationError('No topics or partitions configured')
if not self._offsets.fetch:
raise KafkaConfigurationError(
'No fetch offsets found when calling fetch_messages'
)
fetches = [FetchRequestPayload(topic, partition,
self._offsets.fetch[(topic, partition)],
max_bytes)
for (topic, partition) in self._topics]
# send_fetch_request will batch topic/partition requests by leader
responses = self._client.send_fetch_request(
fetches,
max_wait_time=max_wait_time,
min_bytes=min_bytes,
fail_on_error=False
)
for resp in responses:
if isinstance(resp, FailedPayloadsError):
self.metrics.record('failed-payloads', 1)
logger.warning('FailedPayloadsError attempting to fetch data')
self._refresh_metadata_on_error()
continue
topic = resp.topic
partition = resp.partition
try:
check_error(resp)
except OffsetOutOfRangeError:
self.metrics.record('offset-out-of-range', 1)
logger.warning('OffsetOutOfRange: topic %s, partition %d, '
'offset %d (Highwatermark: %d)',
topic, partition,
self._offsets.fetch[(topic, partition)],
resp.highwaterMark)
# Reset offset
self._offsets.fetch[(topic, partition)] = (
self._reset_partition_offset((topic, partition))
)
continue
except NotLeaderForPartitionError:
self.metrics.record('not-leader-for-partition', 1)
logger.warning("NotLeaderForPartitionError for %s - %d. "
"Metadata may be out of date",
topic, partition)
self._refresh_metadata_on_error()
continue
except RequestTimedOutError:
self.metrics.record('request-timed-out', 1)
logger.warning("RequestTimedOutError for %s - %d",
topic, partition)
continue
# Track server highwater mark
self._offsets.highwater[(topic, partition)] = resp.highwaterMark
# Check for partial message and remove
if resp.messages and isinstance(resp.messages[-1].message, PartialMessage):
resp.messages.pop()
# Yield each message
# Kafka-python could raise an exception during iteration
# we are not catching -- user will need to address
for (offset, message) in resp.messages:
# deserializer_class could raise an exception here
val = self._config['deserializer_class'](message.value)
msg = KafkaMessage(topic, partition, offset, message.key, val)
# in some cases the server will return earlier messages
# than we requested. skip them per kafka spec
if offset < self._offsets.fetch[(topic, partition)]:
logger.debug('message offset less than fetched offset '
'skipping: %s', msg)
continue
# Only increment fetch offset
# if we safely got the message and deserialized
self._offsets.fetch[(topic, partition)] = offset + 1
# Then yield to user
yield msg
def get_partition_offsets(self, topic, partition, request_time_ms, max_num_offsets):
"""Request available fetch offsets for a single topic/partition
Keyword Arguments:
topic (str): topic for offset request
partition (int): partition for offset request
request_time_ms (int): Used to ask for all messages before a
certain time (ms). There are two special values.
Specify -1 to receive the latest offset (i.e. the offset of the
next coming message) and -2 to receive the earliest available
offset. Note that because offsets are pulled in descending
order, asking for the earliest offset will always return you a
single element.
max_num_offsets (int): Maximum offsets to include in the OffsetResponse
Returns:
a list of offsets in the OffsetResponse submitted for the provided
topic / partition. See:
https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetAPI
"""
reqs = [OffsetRequestPayload(topic, partition, request_time_ms, max_num_offsets)]
(resp,) = self._client.send_offset_request(reqs)
check_error(resp)
# Just for sanity..
# probably unnecessary
assert resp.topic == topic
assert resp.partition == partition
return resp.offsets
def offsets(self, group=None):
"""Get internal consumer offset values
Keyword Arguments:
group: Either "fetch", "commit", "task_done", or "highwater".
If no group specified, returns all groups.
Returns:
A copy of internal offsets struct
"""
if not group:
return {
'fetch': self.offsets('fetch'),
'commit': self.offsets('commit'),
'task_done': self.offsets('task_done'),
'highwater': self.offsets('highwater')
}
else:
return dict(deepcopy(getattr(self._offsets, group)))
def task_done(self, message):
"""Mark a fetched message as consumed.
Offsets for messages marked as "task_done" will be stored back
to the kafka cluster for this consumer group on commit()
Arguments:
message (KafkaMessage): the message to mark as complete
Returns:
True, unless the topic-partition for this message has not
been configured for the consumer. In normal operation, this
should not happen. But see github issue 364.
"""
topic_partition = (message.topic, message.partition)
if topic_partition not in self._topics:
logger.warning('Unrecognized topic/partition in task_done message: '
'{0}:{1}'.format(*topic_partition))
return False
offset = message.offset
# Warn on non-contiguous offsets
prev_done = self._offsets.task_done[topic_partition]
if prev_done is not None and offset != (prev_done + 1):
logger.warning('Marking task_done on a non-continuous offset: %d != %d + 1',
offset, prev_done)
# Warn on smaller offsets than previous commit
# "commit" offsets are actually the offset of the next message to fetch.
prev_commit = self._offsets.commit[topic_partition]
if prev_commit is not None and ((offset + 1) <= prev_commit):
logger.warning('Marking task_done on a previously committed offset?: %d (+1) <= %d',
offset, prev_commit)
self._offsets.task_done[topic_partition] = offset
# Check for auto-commit
if self._does_auto_commit_messages():
self._incr_auto_commit_message_count()
if self._should_auto_commit():
self.commit()
return True
def commit(self):
"""Store consumed message offsets (marked via task_done())
to kafka cluster for this consumer_group.
Returns:
True on success, or False if no offsets were found for commit
Note:
this functionality requires server version >=0.8.1.1
https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetCommit/FetchAPI
"""
if not self._config['group_id']:
logger.warning('Cannot commit without a group_id!')
raise KafkaConfigurationError(
'Attempted to commit offsets '
'without a configured consumer group (group_id)'
)
# API supports storing metadata with each commit
# but for now it is unused
metadata = b''
offsets = self._offsets.task_done
commits = []
for topic_partition, task_done_offset in six.iteritems(offsets):
# Skip if None
if task_done_offset is None:
continue
# Commit offsets as the next offset to fetch
# which is consistent with the Java Client
# task_done is marked by messages consumed,
# so add one to mark the next message for fetching
commit_offset = (task_done_offset + 1)
# Skip if no change from previous committed
if commit_offset == self._offsets.commit[topic_partition]:
continue
commits.append(
OffsetCommitRequestPayload(topic_partition[0], topic_partition[1],
commit_offset, metadata)
)
if commits:
logger.info('committing consumer offsets to group %s', self._config['group_id'])
resps = []
if self._config['offset_storage'] in ['zookeeper', 'dual']:
resps += self._client.send_offset_commit_request(
self._config['group_id'], commits,
fail_on_error=False,
)
if self._config['offset_storage'] in ['kafka', 'dual']:
resps += self._client.send_offset_commit_request_kafka(
self._config['group_id'], commits,
fail_on_error=False,
)
for r in resps:
check_error(r)
topic_partition = (r.topic, r.partition)
task_done = self._offsets.task_done[topic_partition]
self._offsets.commit[topic_partition] = (task_done + 1)
if self._config['auto_commit_enable']:
self._reset_auto_commit()
return True
else:
logger.info('No new offsets found to commit in group %s', self._config['group_id'])
return False
#
# Topic/partition management private methods
#
def _consume_topic_partition(self, topic, partition):
topic = topic
if not isinstance(partition, int):
raise KafkaConfigurationError('Unknown partition type (%s) '
'-- expected int' % type(partition))
if topic not in self._client.topic_partitions:
raise UnknownTopicOrPartitionError("Topic %s not found in broker metadata" % topic)
if partition not in self._client.get_partition_ids_for_topic(topic):
raise UnknownTopicOrPartitionError("Partition %d not found in Topic %s "
"in broker metadata" % (partition, topic))
logger.info("Configuring consumer to fetch topic '%s', partition %d", topic, partition)
self._topics.append((topic, partition))
def _refresh_metadata_on_error(self):
refresh_ms = self._config['refresh_leader_backoff_ms']
jitter_pct = 0.20
sleep_ms = random.randint(
int((1.0 - 0.5 * jitter_pct) * refresh_ms),
int((1.0 + 0.5 * jitter_pct) * refresh_ms)
)
while True:
logger.info("Sleeping for refresh_leader_backoff_ms: %d", sleep_ms)
time.sleep(sleep_ms / 1000.0)
try:
self._client.load_metadata_for_topics()
except KafkaUnavailableError:
logger.warning("Unable to refresh topic metadata... cluster unavailable")
self._check_consumer_timeout()
else:
logger.info("Topic metadata refreshed")
return
#
# Offset-managment private methods
#
def _get_commit_offsets(self):
logger.info("Consumer fetching stored offsets")
for topic_partition in self._topics:
resps = []
if self._config['offset_storage'] in ('zookeeper', 'dual'):
resps += self._client.send_offset_fetch_request(
self._config['group_id'],
[OffsetFetchRequestPayload(topic_partition[0], topic_partition[1])],
fail_on_error=False)
if self._config['offset_storage'] in ('kafka', 'dual'):
resps += self._client.send_offset_fetch_request_kafka(
self._config['group_id'],
[OffsetFetchRequestPayload(topic_partition[0], topic_partition[1])],
fail_on_error=False)
try:
for r in resps:
check_error(r)
# API spec says server wont set an error here
# but 0.8.1.1 does actually...
except UnknownTopicOrPartitionError:
pass
# -1 offset signals no commit is currently stored
max_offset = max(r.offset for r in resps)
if max_offset == -1:
self._offsets.commit[topic_partition] = None
# Otherwise we committed the stored offset
# and need to fetch the next one
else:
self._offsets.commit[topic_partition] = max_offset
def _reset_highwater_offsets(self):
for topic_partition in self._topics:
self._offsets.highwater[topic_partition] = None
def _reset_task_done_offsets(self):
for topic_partition in self._topics:
self._offsets.task_done[topic_partition] = None
def _reset_partition_offset(self, topic_partition):
(topic, partition) = topic_partition
LATEST = -1
EARLIEST = -2
request_time_ms = None
if self._config['auto_offset_reset'] == 'largest':
request_time_ms = LATEST
elif self._config['auto_offset_reset'] == 'smallest':
request_time_ms = EARLIEST
else:
# Let's raise an reasonable exception type if user calls
# outside of an exception context
if sys.exc_info() == (None, None, None):
raise OffsetOutOfRangeError('Cannot reset partition offsets without a '
'valid auto_offset_reset setting '
'(largest|smallest)')
# Otherwise we should re-raise the upstream exception
# b/c it typically includes additional data about
# the request that triggered it, and we do not want to drop that
raise # pylint: disable-msg=E0704
(offset, ) = self.get_partition_offsets(topic, partition,
request_time_ms, max_num_offsets=1)
return offset
#
# Consumer Timeout private methods
#
def _set_consumer_timeout_start(self):
self._consumer_timeout = False
if self._config['consumer_timeout_ms'] >= 0:
self._consumer_timeout = time.time() + (self._config['consumer_timeout_ms'] / 1000.0)
def _check_consumer_timeout(self):
if self._consumer_timeout and time.time() > self._consumer_timeout:
raise ConsumerTimeout('Consumer timed out after %d ms' % + self._config['consumer_timeout_ms'])
#
# Autocommit private methods
#
def _should_auto_commit(self):
if self._does_auto_commit_ms():
if time.time() >= self._next_commit_time:
return True
if self._does_auto_commit_messages():
if self._uncommitted_message_count >= self._config['auto_commit_interval_messages']:
return True
return False
def _reset_auto_commit(self):
self._uncommitted_message_count = 0
self._next_commit_time = None
if self._does_auto_commit_ms():
self._next_commit_time = time.time() + (self._config['auto_commit_interval_ms'] / 1000.0)
def _incr_auto_commit_message_count(self, n=1):
self._uncommitted_message_count += n
def _does_auto_commit_ms(self):
if not self._config['auto_commit_enable']:
return False
conf = self._config['auto_commit_interval_ms']
if conf is not None and conf > 0:
return True
return False
def _does_auto_commit_messages(self):
if not self._config['auto_commit_enable']:
return False
conf = self._config['auto_commit_interval_messages']
if conf is not None and conf > 0:
return True
return False
#
# Message iterator private methods
#
def __iter__(self):
return self
def __next__(self):
return self.next()
def _get_message_iterator(self):
# Fetch a new batch if needed
if self._msg_iter is None:
self._msg_iter = self.fetch_messages()
return self._msg_iter
def _reset_message_iterator(self):
self._msg_iter = None
#
# python private methods
#
def __repr__(self):
return '<{0} topics=({1})>'.format(
self.__class__.__name__,
'|'.join(["%s-%d" % topic_partition
for topic_partition in self._topics])
)
#
# other private methods
#
def _deprecate_configs(self, **configs):
for old, new in six.iteritems(DEPRECATED_CONFIG_KEYS):
if old in configs:
logger.warning('Deprecated Kafka Consumer configuration: %s. '
'Please use %s instead.', old, new)
old_value = configs.pop(old)
if new not in configs:
configs[new] = old_value
return configs
class KafkaConsumerMetrics(object):
def __init__(self, metrics):
self.metrics = metrics
self.group_name = 'legacy-kafka-consumer'
self.sensors = {}
def record(self, sensor_name, value):
sensor = self.sensors.get(sensor_name)
if not sensor:
sensor = self.metrics.sensor(sensor_name)
sensor.add(
self.metrics.metric_name(
sensor_name + '-rate',
self.group_name,
"Rate of {}".format(sensor_name),
),
Rate(),
)
self.sensors[sensor_name] = sensor
sensor.record(value)
|
|
from decimal import Decimal
from django.db.models.query_utils import Q
from corehq import Domain
from corehq.apps.accounting import generator
from corehq.apps.accounting.models import BillingAccount, DefaultProductPlan, SoftwarePlanEdition, Subscription
from corehq.apps.commtrack.models import StockState, SupplyPointCase
from corehq.apps.locations.models import SQLLocation, LocationType
from datetime import timedelta, datetime
from dateutil import rrule
from dateutil.rrule import MO
from django.utils import html
from corehq.util.quickcache import quickcache
from corehq.apps.products.models import SQLProduct
from corehq.apps.sms.api import add_msg_tags
from corehq.apps.sms.models import SMSLog, OUTGOING
from corehq.apps.users.models import CommCareUser
from custom.ewsghana.models import EWSGhanaConfig
TEST_DOMAIN = 'ewsghana-receipts-test'
def get_descendants(location_id):
return SQLLocation.objects.get(
location_id=location_id
).get_descendants().exclude(supply_point_id__isnull=True).exclude(is_archived=True)
def get_second_week(start_date, end_date):
mondays = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, until=end_date, byweekday=(MO,), bysetpos=2))
for monday in mondays:
yield {
'start_date': monday,
'end_date': monday + timedelta(days=6)
}
def make_url(report_class, domain, string_params, args):
try:
return html.escape(
report_class.get_url(
domain=domain
) + string_params % args
)
except KeyError:
return None
# Calculate last full period (Friday - Thursday)
def calculate_last_period(enddate):
# checking if Thursday was already in this week
i = enddate.weekday() - 3
if i < 0:
# today is Monday, Tuesday or Wednesday -> calculate Thursday from previous week
last_th = enddate + timedelta(days=-i, weeks=-1)
else:
# today is Thursday, Friday, Saturday or Sunday -> calculate Thursday from this week
last_th = enddate - timedelta(days=i)
fr_before = last_th - timedelta(days=6)
return fr_before, last_th
def send_test_message(verified_number, text, metadata=None):
msg = SMSLog(
couch_recipient_doc_type=verified_number.owner_doc_type,
couch_recipient=verified_number.owner_id,
phone_number="+" + str(verified_number.phone_number),
direction=OUTGOING,
date=datetime.utcnow(),
domain=verified_number.domain,
text=text,
processed=True,
datetime_to_process=datetime.utcnow(),
queued_timestamp=datetime.utcnow()
)
msg.save()
add_msg_tags(msg, metadata)
return True
def get_products_ids_assigned_to_rel_sp(domain, active_location=None):
def filter_relevant(queryset):
return queryset.filter(
supply_point_id__isnull=False
).values_list(
'products__product_id',
flat=True
)
if active_location:
sql_location = active_location.sql_location
products = []
if sql_location.supply_point_id:
products.append(sql_location.products.values_list('product_id', flat=True))
products += list(
filter_relevant(sql_location.get_descendants())
)
return products
else:
return filter_relevant(SQLLocation.objects.filter(domain=domain, is_archived=False))
def prepare_domain(domain_name):
from corehq.apps.commtrack.tests import bootstrap_domain
domain = bootstrap_domain(domain_name)
def _make_loc_type(name, administrative=False, parent_type=None):
return LocationType.objects.get_or_create(
domain=domain_name,
name=name,
administrative=administrative,
parent_type=parent_type,
)[0]
country = _make_loc_type(name="country", administrative=True)
_make_loc_type(name="Central Medical Store", parent_type=country)
_make_loc_type(name="Teaching Hospital", parent_type=country)
region = _make_loc_type(name="region", administrative=True, parent_type=country)
_make_loc_type(name="Regional Medical Store", parent_type=region)
_make_loc_type(name="Regional Hospital", parent_type=region)
district = _make_loc_type(name="district", administrative=True, parent_type=region)
_make_loc_type(name="Clinic", parent_type=district)
_make_loc_type(name="District Hospital", parent_type=district)
_make_loc_type(name="Health Centre", parent_type=district)
_make_loc_type(name="CHPS Facility", parent_type=district)
_make_loc_type(name="Hospital", parent_type=district)
_make_loc_type(name="Psychiatric Hospital", parent_type=district)
_make_loc_type(name="Polyclinic", parent_type=district)
_make_loc_type(name="facility", parent_type=district)
generator.instantiate_accounting_for_tests()
account = BillingAccount.get_or_create_account_by_domain(
domain.name,
created_by="automated-test",
)[0]
plan = DefaultProductPlan.get_default_plan_by_domain(
domain, edition=SoftwarePlanEdition.ADVANCED
)
subscription = Subscription.new_domain_subscription(
account,
domain.name,
plan
)
subscription.is_active = True
subscription.save()
ews_config = EWSGhanaConfig(enabled=True, domain=domain.name)
ews_config.save()
return domain
TEST_LOCATION_TYPE = 'outlet'
TEST_USER = 'commtrack-user'
TEST_NUMBER = '5551234'
TEST_PASSWORD = 'secret'
TEST_BACKEND = 'test-backend'
def bootstrap_user(username=TEST_USER, domain=TEST_DOMAIN,
phone_number=TEST_NUMBER, password=TEST_PASSWORD,
backend=TEST_BACKEND, first_name='', last_name='',
home_loc=None, user_data=None,
):
from corehq.apps.commtrack.helpers import make_supply_point
user_data = user_data or {}
user = CommCareUser.create(
domain,
username,
password,
phone_numbers=[TEST_NUMBER],
user_data=user_data,
first_name=first_name,
last_name=last_name
)
if not SupplyPointCase.get_by_location(home_loc):
make_supply_point(domain, home_loc)
home_loc.save()
user.set_location(home_loc)
user.save_verified_number(domain, phone_number, verified=True, backend_id=backend)
return CommCareUser.wrap(user.to_json())
REORDER_LEVEL = Decimal("1.5")
class ProductsReportHelper(object):
def __init__(self, location, transactions):
self.location = location
self.transactions = transactions
def reported_products_ids(self):
return {transaction.product_id for transaction in self.transactions}
def reported_products(self):
return SQLProduct.objects.filter(product_id__in=self.reported_products_ids())
def missing_products(self):
products_ids = SQLProduct.objects.filter(
domain=self.location.domain,
is_archived=False
).values_list('product_id')
date = datetime.utcnow() - timedelta(days=7)
earlier_reported_products = StockState.objects.filter(
product_id__in=products_ids,
case_id=self.location.supply_point_id
).exclude(last_modified_date__lte=date).values_list('product_id', flat=True).distinct()
missing_products = self.location.products.distinct().values_list(
'product_id', flat=True
).exclude(product_id__in=earlier_reported_products).exclude(product_id__in=self.reported_products_ids())
if not missing_products:
return []
return SQLProduct.objects.filter(product_id__in=missing_products)
def stock_states(self):
product_ids = [product.product_id for product in self.reported_products()]
return StockState.objects.filter(
product_id__in=product_ids,
case_id=self.location.supply_point_id
)
def stockouts(self):
return self.stock_states().filter(
stock_on_hand=0
).distinct('sql_product__code').order_by('sql_product__code')
def reorders(self):
reorders = []
for stockout in list(self.stockouts()) + self.low_supply():
monthly_consumption = stockout.get_monthly_consumption()
if monthly_consumption is None:
reorders.append((stockout.sql_product.code, None))
else:
reorders.append((stockout.sql_product.code, int(monthly_consumption * REORDER_LEVEL)))
return reorders
def _get_facilities_with_stock_category(self, category):
return [
stock_state
for stock_state in self.stock_states().distinct('sql_product__code').order_by('sql_product__code')
if stock_state.stock_category == category
]
def low_supply(self):
return self._get_facilities_with_stock_category('understock')
def overstocked(self):
return self._get_facilities_with_stock_category('overstock')
def receipts(self):
return [
transaction
for transaction in self.transactions
if transaction.action == 'receipts' and transaction.quantity != '0'
]
def can_receive_email(user, verified_number):
return user.email and verified_number.backend_id and verified_number.backend_id == 'MOBILE_BACKEND_TWILIO'
@quickcache(['domain'])
def get_country_id(domain):
return SQLLocation.objects.filter(domain=domain, location_type__name='country')[0].location_id
def has_input_stock_permissions(couch_user, location, domain):
domain_membership = couch_user.get_domain_membership(domain)
if not couch_user.is_web_user() or not domain_membership or not domain_membership.location_id:
return False
try:
user_location = SQLLocation.objects.get(location_id=domain_membership.location_id)
except SQLLocation.DoesNotExist:
return False
if not user_location.location_type.administrative:
if user_location.location_id != location.location_id:
return False
else:
parents = location.get_ancestors().values_list('location_id', flat=True)
if user_location.location_id not in parents:
return False
return True
def first_item(items, f):
for item in items:
if f(item):
return item
REPORT_MAPPING = {
'dashboard_report': 'custom.ewsghana.reports.specific_reports.dashboard_report.DashboardReport',
'stock_status': 'custom.ewsghana.reports.specific_reports.stock_status_report.StockStatus',
'reporting_page': 'custom.ewsghana.reports.specific_reports.reporting_rates.ReportingRatesReport',
'ews_mapreport': 'custom.ewsghana.reports.maps.EWSMapReport',
'cms_rms_summary_report': 'custom.ewsghana.reports.email_reports.CMSRMSReport',
'stock_summary_report': 'custom.ewsghana.reports.email_reports.StockSummaryReport'
}
def filter_slugs_by_role(couch_user, domain):
slugs = [
['dashboard_report', 'Dashboard'],
['stock_status', 'Stock Status'],
['reporting_page', 'Reporting'],
['ews_mapreport', 'Maps'],
['stock_summary_report', 'Stock Summary'],
['cms_rms_summary_report', 'CMS and RMS Summary']
]
if couch_user.is_domain_admin(domain) or couch_user.is_superuser:
return slugs
domain_membership = couch_user.get_domain_membership(domain)
permissions = domain_membership.permissions
if not permissions.view_reports:
return [slug for slug in slugs if REPORT_MAPPING[slug[0]] in permissions.view_report_list]
def ews_date_format(date):
return date.strftime("%b %d, %Y")
TEACHING_HOSPITAL_MAPPING = {
'kath': {'parent_external_id': '319'},
'kbth': {'parent_external_id': '2'},
}
TEACHING_HOSPITALS = ['kath', 'kbth', 'ccmh', 'trh']
def drange(start, stop, step):
r = start
while r < stop:
yield r
r += step
def get_products_for_locations(locations):
return SQLProduct.objects.filter(
pk__in=locations.values_list('_products', flat=True),
).exclude(is_archived=True)
def get_products_for_locations_by_program(locations, program):
return SQLProduct.objects.filter(
pk__in=locations.values_list('_products', flat=True),
program_id=program
).exclude(is_archived=True)
def get_products_for_locations_by_products(locations, products):
return SQLProduct.objects.filter(
pk__in=locations.values_list('_products', flat=True),
).filter(pk__in=products).exclude(is_archived=True)
def get_supply_points(domain, location_id):
supply_points = []
if location_id:
location = SQLLocation.objects.get(
domain=domain,
location_id=location_id
)
if location.location_type.name == 'country':
supply_points = SQLLocation.objects.filter(
Q(parent__location_id=location_id, is_archived=False) |
Q(location_type__name='Regional Medical Store', domain=domain) |
Q(location_type__name='Teaching Hospital', domain=domain)
).order_by('name').exclude(supply_point_id__isnull=True)
else:
supply_points = SQLLocation.objects.filter(
parent__location_id=location_id, is_archived=False,
location_type__administrative=False,
).order_by('name').exclude(supply_point_id__isnull=True)
return supply_points
|
|
#(c) 2016 by Authors
#This file is a part of ABruijn program.
#Released under the BSD license (see LICENSE file)
"""
Separates alignment into small bubbles for further correction
"""
from __future__ import absolute_import
from __future__ import division
import logging
from bisect import bisect
from flye.six.moves import range
import multiprocessing
import signal
import flye.utils.fasta_parser as fp
import flye.config.py_cfg as cfg
from flye.polishing.alignment import shift_gaps, get_uniform_alignments
from flye.utils.sam_parser import SynchronizedSamReader
from flye.six.moves import zip
logger = logging.getLogger()
class ProfileInfo(object):
__slots__ = ("nucl", "num_inserts", "num_deletions",
"num_missmatch", "coverage")
def __init__(self):
self.nucl = ""
self.num_inserts = 0
self.num_deletions = 0
self.num_missmatch = 0
self.coverage = 0
class Bubble(object):
__slots__ = ("contig_id", "position", "branches", "consensus")
def __init__(self, contig_id, position):
self.contig_id = contig_id
self.position = position
self.branches = []
self.consensus = ""
def _thread_worker(aln_reader, contigs_info, err_mode,
results_queue, error_queue, bubbles_file_handle,
bubbles_file_lock):
"""
Will run in parallel
"""
try:
while not aln_reader.is_eof():
ctg_id, ctg_aln = aln_reader.get_chunk()
if ctg_id is None:
break
#logger.debug("Processing {0}".format(ctg_id))
#get top unifom alignments
ctg_aln = get_uniform_alignments(ctg_aln, contigs_info[ctg_id].length)
profile, aln_errors = _compute_profile(ctg_aln, err_mode,
contigs_info[ctg_id].length)
partition, num_long_bubbles = _get_partition(profile, err_mode)
ctg_bubbles = _get_bubble_seqs(ctg_aln, err_mode, profile, partition,
contigs_info[ctg_id])
mean_cov = sum([len(b.branches) for b in ctg_bubbles]) // (len(ctg_bubbles) + 1)
ctg_bubbles, num_empty, num_long_branch = \
_postprocess_bubbles(ctg_bubbles)
results_queue.put((ctg_id, len(ctg_bubbles), num_long_bubbles,
num_empty, num_long_branch, aln_errors,
mean_cov))
with bubbles_file_lock:
_output_bubbles(ctg_bubbles, bubbles_file_handle)
del profile
del ctg_bubbles
except Exception as e:
error_queue.put(e)
def make_bubbles(alignment_path, contigs_info, contigs_path,
err_mode, num_proc, bubbles_out):
"""
The main function: takes an alignment and returns bubbles
"""
aln_reader = SynchronizedSamReader(alignment_path,
fp.read_sequence_dict(contigs_path),
cfg.vals["max_read_coverage"],
use_secondary=True)
manager = multiprocessing.Manager()
results_queue = manager.Queue()
error_queue = manager.Queue()
#making sure the main process catches SIGINT
orig_sigint = signal.signal(signal.SIGINT, signal.SIG_IGN)
threads = []
bubbles_out_lock = multiprocessing.Lock()
bubbles_out_handle = open(bubbles_out, "w")
for _ in range(num_proc):
threads.append(multiprocessing.Process(target=_thread_worker,
args=(aln_reader, contigs_info,
err_mode, results_queue,
error_queue, bubbles_out_handle,
bubbles_out_lock)))
signal.signal(signal.SIGINT, orig_sigint)
for t in threads:
t.start()
try:
for t in threads:
t.join()
if t.exitcode == -9:
logger.error("Looks like the system ran out of memory")
if t.exitcode != 0:
raise Exception("One of the processes exited with code: {0}"
.format(t.exitcode))
except KeyboardInterrupt:
for t in threads:
t.terminate()
raise
if not error_queue.empty():
raise error_queue.get()
aln_reader.close()
total_bubbles = 0
total_long_bubbles = 0
total_long_branches = 0
total_empty = 0
total_aln_errors = []
coverage_stats = {}
while not results_queue.empty():
(ctg_id, num_bubbles, num_long_bubbles,
num_empty, num_long_branch,
aln_errors, mean_coverage) = results_queue.get()
total_long_bubbles += num_long_bubbles
total_long_branches += num_long_branch
total_empty += num_empty
total_aln_errors.extend(aln_errors)
total_bubbles += num_bubbles
coverage_stats[ctg_id] = mean_coverage
mean_aln_error = sum(total_aln_errors) / (len(total_aln_errors) + 1)
logger.debug("Generated %d bubbles", total_bubbles)
logger.debug("Split %d long bubbles", total_long_bubbles)
logger.debug("Skipped %d empty bubbles", total_empty)
logger.debug("Skipped %d bubbles with long branches", total_long_branches)
return coverage_stats, mean_aln_error
def _output_bubbles(bubbles, out_stream):
"""
Outputs list of bubbles into file
"""
for bubble in bubbles:
out_stream.write(">{0} {1} {2}\n".format(bubble.contig_id,
bubble.position,
len(bubble.branches)))
out_stream.write(bubble.consensus + "\n")
for branch_id, branch in enumerate(bubble.branches):
out_stream.write(">{0}\n".format(branch_id))
out_stream.write(branch + "\n")
out_stream.flush()
def _postprocess_bubbles(bubbles):
MAX_BUBBLE = cfg.vals["max_bubble_length"]
MAX_BRANCHES = cfg.vals["max_bubble_branches"]
new_bubbles = []
long_branches = 0
empty_bubbles = 0
for bubble in bubbles:
if len(bubble.branches) == 0:
#logger.debug("Empty bubble {0}".format(bubble.position))
empty_bubbles += 1
continue
new_branches = []
median_branch = (sorted(bubble.branches, key=len)[len(bubble.branches) // 2])
if len(median_branch) == 0:
continue
#Bubble is TOO BIIG, will not correct it (maybe at the next iteration)
if len(median_branch) > MAX_BUBBLE * 1.5:
new_branches = [median_branch]
long_branches += 1
else:
for branch in bubble.branches:
incons_rate = abs(len(branch) - len(median_branch)) / len(median_branch)
if incons_rate < 0.5:
if len(branch) == 0:
branch = "A"
#logger.debug("Zero branch")
new_branches.append(branch)
if (abs(len(median_branch) - len(bubble.consensus)) > len(median_branch) // 2):
bubble.consensus = median_branch
if len(new_branches) > MAX_BRANCHES:
new_branches = new_branches[:MAX_BRANCHES]
new_bubbles.append(Bubble(bubble.contig_id, bubble.position))
new_bubbles[-1].consensus = bubble.consensus
new_bubbles[-1].branches = new_branches
return new_bubbles, empty_bubbles, long_branches
def _is_solid_kmer(profile, position, err_mode):
"""
Checks if the kmer at given position is solid
"""
MISSMATCH_RATE = cfg.vals["err_modes"][err_mode]["solid_missmatch"]
INS_RATE = cfg.vals["err_modes"][err_mode]["solid_indel"]
SOLID_LEN = cfg.vals["solid_kmer_length"]
for i in range(position, position + SOLID_LEN):
if profile[i].coverage == 0:
return False
local_missmatch = (profile[i].num_missmatch +
profile[i].num_deletions) / profile[i].coverage
local_ins = profile[i].num_inserts / profile[i].coverage
if local_missmatch > MISSMATCH_RATE or local_ins > INS_RATE:
return False
return True
def _is_simple_kmer(profile, position):
"""
Checks if the kmer with center at the given position is simple
"""
SIMPLE_LEN = cfg.vals["simple_kmer_length"]
extended_len = SIMPLE_LEN * 2
nucl_str = [p.nucl for p in profile[position - extended_len // 2 :
position + extended_len // 2]]
#single nucleotide homopolymers
for i in range(extended_len // 2 - SIMPLE_LEN // 2,
extended_len // 2 + SIMPLE_LEN // 2 - 1):
if nucl_str[i] == nucl_str[i + 1]:
return False
#dinucleotide homopolymers
for shift in [0, 1]:
for i in range(SIMPLE_LEN - shift - 1):
pos = extended_len // 2 - SIMPLE_LEN + shift + i * 2
if (nucl_str[pos : pos + 2] == nucl_str[pos + 2 : pos + 4]):
return False
#trinucleotide homopolymers
#for shift in [0, 1, 2]:
# for i in xrange(SIMPLE_LEN - shift - 1):
# pos = shift + i * 3
# if (nucl_str[pos : pos + 3] == nucl_str[pos + 3 : pos + 6]):
# #logger.debug("tri" + "".join(nucl_str))
# return False
return True
def _compute_profile(alignment, platform, genome_len):
"""
Computes alignment profile
"""
max_aln_err = cfg.vals["err_modes"][platform]["max_aln_error"]
min_aln_len = cfg.vals["min_polish_aln_len"]
aln_errors = []
#filtered = 0
profile = [ProfileInfo() for _ in range(genome_len)]
for aln in alignment:
if aln.err_rate > max_aln_err or len(aln.qry_seq) < min_aln_len:
#filtered += 1
continue
aln_errors.append(aln.err_rate)
qry_seq = shift_gaps(aln.trg_seq, aln.qry_seq)
trg_seq = shift_gaps(qry_seq, aln.trg_seq)
trg_pos = aln.trg_start
for trg_nuc, qry_nuc in zip(trg_seq, qry_seq):
if trg_nuc == "-":
trg_pos -= 1
if trg_pos >= genome_len:
trg_pos -= genome_len
prof_elem = profile[trg_pos]
if trg_nuc == "-":
prof_elem.num_inserts += 1
else:
prof_elem.nucl = trg_nuc
prof_elem.coverage += 1
if qry_nuc == "-":
prof_elem.num_deletions += 1
elif trg_nuc != qry_nuc:
prof_elem.num_missmatch += 1
trg_pos += 1
#logger.debug("Filtered: {0} out of {1}".format(filtered, len(alignment)))
return profile, aln_errors
def _get_partition(profile, err_mode):
"""
Partitions genome into sub-alignments at solid regions / simple kmers
"""
#logger.debug("Partitioning genome")
SOLID_LEN = cfg.vals["solid_kmer_length"]
SIMPLE_LEN = cfg.vals["simple_kmer_length"]
MAX_BUBBLE = cfg.vals["max_bubble_length"]
solid_flags = [False for _ in range(len(profile))]
prof_pos = 0
while prof_pos < len(profile) - SOLID_LEN:
if _is_solid_kmer(profile, prof_pos, err_mode):
for i in range(prof_pos, prof_pos + SOLID_LEN):
solid_flags[i] = True
prof_pos += SOLID_LEN
else:
prof_pos += 1
partition = []
prev_partition = SOLID_LEN
long_bubbles = 0
prof_pos = SOLID_LEN
while prof_pos < len(profile) - SOLID_LEN:
cur_partition = prof_pos + SIMPLE_LEN // 2
landmark = (all(solid_flags[prof_pos : prof_pos + SIMPLE_LEN]) and
_is_simple_kmer(profile, cur_partition))
if prof_pos - prev_partition > MAX_BUBBLE:
long_bubbles += 1
if landmark or prof_pos - prev_partition > MAX_BUBBLE:
partition.append(cur_partition)
prev_partition = cur_partition
prof_pos += SOLID_LEN
else:
prof_pos += 1
#logger.debug("Partitioned into {0} segments".format(len(partition) + 1))
#logger.debug("Long bubbles: {0}".format(long_bubbles))
return partition, long_bubbles
def _get_bubble_seqs(alignment, platform, profile, partition, contig_info):
"""
Given genome landmarks, forms bubble sequences
"""
if not partition:
return []
#max_aln_err = cfg.vals["err_modes"][platform]["max_aln_error"]
bubbles = []
ext_partition = [0] + partition + [contig_info.length]
for p_left, p_right in zip(ext_partition[:-1], ext_partition[1:]):
bubbles.append(Bubble(contig_info.id, p_left))
consensus = [p.nucl for p in profile[p_left : p_right]]
bubbles[-1].consensus = "".join(consensus)
for aln in alignment:
#if aln.err_rate > max_aln_err: continue
bubble_id = bisect(partition, aln.trg_start % contig_info.length)
next_bubble_start = ext_partition[bubble_id + 1]
chromosome_start = (bubble_id == 0 and
not contig_info.type == "circular")
chromosome_end = (aln.trg_end > partition[-1] and not
contig_info.type == "circular")
branch_start = None
first_segment = True
trg_pos = aln.trg_start
for i, trg_nuc in enumerate(aln.trg_seq):
if trg_nuc == "-":
continue
if trg_pos >= contig_info.length:
trg_pos -= contig_info.length
if trg_pos >= next_bubble_start or trg_pos == 0:
if not first_segment or chromosome_start:
branch_seq = fp.to_acgt(aln.qry_seq[branch_start : i].replace("-", ""))
bubbles[bubble_id].branches.append(branch_seq)
first_segment = False
bubble_id = bisect(partition, trg_pos)
next_bubble_start = ext_partition[bubble_id + 1]
branch_start = i
trg_pos += 1
if chromosome_end:
branch_seq = fp.to_acgt(aln.qry_seq[branch_start:].replace("-", ""))
bubbles[-1].branches.append(branch_seq)
return bubbles
|
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is Mozilla Sheriff Duty.
#
# The Initial Developer of the Original Code is Mozilla Corporation.
# Portions created by the Initial Developer are Copyright (C) 2011
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import re
from urlparse import urlparse
import datetime
from django.test import TestCase
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth import REDIRECT_FIELD_NAME
from nose.tools import eq_, ok_
from commons.urlresolvers import reverse
try:
import ldap
from users.auth.backends import MozillaLDAPBackend
except ImportError:
MozillaLDAPBackend = None
class UsersTest(TestCase):
def test_login(self):
self.client.get('/')
url = reverse('users.login')
response = self.client.get(url)
eq_(response.status_code, 200)
mortal = User.objects.create(
username='mortal',
first_name='Mortal',
last_name='Joe'
)
mortal.set_password('secret')
mortal.save()
response = self.client.post(url, {'username': 'mortal',
'password': 'wrong'})
eq_(response.status_code, 200)
ok_('errorlist' in response.content)
response = self.client.post(url, {'username': 'mortal',
'password': 'secret'})
eq_(response.status_code, 302)
path = urlparse(response['location']).path
eq_(path, settings.LOGIN_REDIRECT_URL)
response = self.client.get('/')
eq_(response.status_code, 200)
ok_('Mortal' in response.content)
url = reverse('users.logout')
response = self.client.get(url)
eq_(response.status_code, 302)
path = urlparse(response['location']).path
eq_(path, settings.LOGOUT_REDIRECT_URL)
response = self.client.get('/')
eq_(response.status_code, 200)
ok_('Mortal' not in response.content)
def _get_all_inputs(self, html):
_input_regex = re.compile('<input (.*?)>', re.M | re.DOTALL)
_attrs_regex = re.compile('(\w+)="([^"]+)"')
all_attrs = {}
for input in _input_regex.findall(html):
attrs = dict(_attrs_regex.findall(input))
all_attrs[attrs.get('name', attrs.get('id', ''))] = attrs
return all_attrs
def test_login_next_redirect(self):
url = reverse('users.login')
response = self.client.get(url, {'next': '/foo/bar'})
eq_(response.status_code, 200)
attrs = self._get_all_inputs(response.content)
ok_(attrs[REDIRECT_FIELD_NAME])
eq_(attrs[REDIRECT_FIELD_NAME]['value'], '/foo/bar')
mortal = User.objects.create_user(
'mortal', 'mortal', password='secret'
)
mortal.set_password('secret')
mortal.save()
response = self.client.post(url, {'username': 'mortal',
'password': 'secret',
'next': '/foo/bar'})
eq_(response.status_code, 302)
path = urlparse(response['location']).path
eq_(path, '/foo/bar')
def test_login_failure(self):
url = reverse('users.login')
mortal = User.objects.create(
username='mortal',
first_name='Mortal',
last_name='Joe',
email='mortal@mozilla.com',
)
mortal.set_password('secret')
mortal.save()
response = self.client.post(url, {'username': 'mortal',
'password': 'xxx'})
eq_(response.status_code, 200)
ok_('errorlist' in response.content)
response = self.client.post(url, {'username': 'xxx',
'password': 'secret'})
eq_(response.status_code, 200)
ok_('errorlist' in response.content)
def test_login_rememberme(self):
url = reverse('users.login')
mortal = User.objects.create(
username='mortal',
first_name='Mortal',
last_name='Joe'
)
mortal.set_password('secret')
mortal.save()
response = self.client.post(url, {'username': 'mortal',
'password': 'secret',
'rememberme': 'yes'})
eq_(response.status_code, 302)
expires = self.client.cookies['sessionid']['expires']
date = expires.split()[1]
then = datetime.datetime.strptime(date, '%d-%b-%Y')
today = datetime.datetime.today()
days = settings.SESSION_COOKIE_AGE / 24 / 3600
eq_((then - today).days + 1, days)
def test_login_by_email(self):
url = reverse('users.login')
mortal = User.objects.create(
username='mortal',
email='mortal@hotmail.com',
first_name='Mortal',
last_name='Joe'
)
mortal.set_password('secret')
mortal.save()
response = self.client.post(url, {'username': 'Mortal@hotmail.com',
'password': 'secret'})
eq_(response.status_code, 302)
response = self.client.get('/')
eq_(response.status_code, 200)
ok_('Mortal' in response.content)
def test_changing_your_username(self):
url = reverse('users.settings')
response = self.client.get(url)
eq_(response.status_code, 302)
path = urlparse(response['location']).path
eq_(path, settings.LOGIN_URL)
mortal = User.objects.create(
username='mortal',
email='mortal@hotmail.com',
first_name='Mortal',
last_name='Joe'
)
mortal.set_password('secret')
mortal.save()
assert self.client.login(username='mortal', password='secret')
url = reverse('users.settings')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('value="%s"' % mortal.username in response.content)
User.objects.create_user(
'maxpower',
'maxpower@mozilla.com',
password='secret',
)
response = self.client.post(url, {'username':' Maxpower '})
eq_(response.status_code, 200)
ok_('errorlist' in response.content)
response = self.client.post(url, {'username':'homer '})
eq_(response.status_code, 302)
ok_(User.objects.get(username='homer'))
ok_(not User.objects.filter(username='mortal').exists())
# stupid but I should be able to save my own username twice
response = self.client.post(url, {'username':'homer'})
ok_(User.objects.get(username='homer'))
response = self.client.post(url, {'username':'Homer'})
ok_(User.objects.get(username='Homer'))
def test_mozilla_ldap_backend_basic(self):
if MozillaLDAPBackend is None:
return
back = MozillaLDAPBackend()
class LDAPUser:
def __init__(self, attrs):
self.attrs = attrs
ldap_user = LDAPUser({'mail':['mail@peterbe.com']})
user, created = back.get_or_create_user('peter', ldap_user)
ok_(created)
ok_(user)
eq_(user.username, 'peter')
peppe = User.objects.create_user(
'peppe',
'mail@peterbe.com',
)
user, created = back.get_or_create_user('peter', ldap_user)
ok_(not created)
eq_(user, peppe)
username = back.ldap_to_django_username('mail@peterbe.com')
eq_(username, 'peppe')
username = back.ldap_to_django_username('lois@peterbe.com')
eq_(username, 'lois')
def test_login_username_form_field(self):
url = reverse('users.login')
response = self.client.get(url)
eq_(response.status_code, 200)
html = response.content.split('<form')[1].split('</form')[0]
inputs = self._get_all_inputs(html)
input = inputs['username']
eq_(input['autocorrect'], 'off')
eq_(input['spellcheck'], 'false')
eq_(input['autocapitalize'], 'off')
eq_(input['type'], 'email')
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from oslo_serialization import jsonutils
from neutron.common import constants
from neutron.extensions import portbindings
from neutron.i18n import _LW
from neutron.plugins.ml2 import db
from neutron.plugins.ml2 import driver_api as api
LOG = log.getLogger(__name__)
class MechanismDriverContext(object):
"""MechanismDriver context base class."""
def __init__(self, plugin, plugin_context):
self._plugin = plugin
# This temporarily creates a reference loop, but the
# lifetime of PortContext is limited to a single
# method call of the plugin.
self._plugin_context = plugin_context
class NetworkContext(MechanismDriverContext, api.NetworkContext):
def __init__(self, plugin, plugin_context, network,
original_network=None):
super(NetworkContext, self).__init__(plugin, plugin_context)
self._network = network
self._original_network = original_network
self._segments = db.get_network_segments(plugin_context.session,
network['id'])
@property
def current(self):
return self._network
@property
def original(self):
return self._original_network
@property
def network_segments(self):
return self._segments
class SubnetContext(MechanismDriverContext, api.SubnetContext):
def __init__(self, plugin, plugin_context, subnet, network,
original_subnet=None):
super(SubnetContext, self).__init__(plugin, plugin_context)
self._subnet = subnet
self._original_subnet = original_subnet
self._network_context = NetworkContext(plugin, plugin_context,
network)
@property
def current(self):
return self._subnet
@property
def original(self):
return self._original_subnet
@property
def network(self):
return self._network_context
class PortContext(MechanismDriverContext, api.PortContext):
def __init__(self, plugin, plugin_context, port, network, binding,
binding_levels, original_port=None):
super(PortContext, self).__init__(plugin, plugin_context)
self._port = port
self._original_port = original_port
self._network_context = NetworkContext(plugin, plugin_context,
network)
self._binding = binding
self._binding_levels = binding_levels
self._segments_to_bind = None
self._new_bound_segment = None
self._next_segments_to_bind = None
if original_port:
self._original_vif_type = binding.vif_type
self._original_vif_details = self._plugin._get_vif_details(binding)
self._original_binding_levels = self._binding_levels
else:
self._original_vif_type = None
self._original_vif_details = None
self._original_binding_levels = None
self._new_port_status = None
# The following methods are for use by the ML2 plugin and are not
# part of the driver API.
def _prepare_to_bind(self, segments_to_bind):
self._segments_to_bind = segments_to_bind
self._new_bound_segment = None
self._next_segments_to_bind = None
def _clear_binding_levels(self):
self._binding_levels = []
def _push_binding_level(self, binding_level):
self._binding_levels.append(binding_level)
def _pop_binding_level(self):
return self._binding_levels.pop()
# The following implement the abstract methods and properties of
# the driver API.
@property
def current(self):
return self._port
@property
def original(self):
return self._original_port
@property
def status(self):
# REVISIT(rkukura): Eliminate special DVR case as part of
# resolving bug 1367391?
if self._port['device_owner'] == constants.DEVICE_OWNER_DVR_INTERFACE:
return self._binding.status
return self._port['status']
@property
def original_status(self):
# REVISIT(rkukura): Should return host-specific status for DVR
# ports. Fix as part of resolving bug 1367391.
if self._original_port:
return self._original_port['status']
@property
def network(self):
return self._network_context
@property
def binding_levels(self):
if self._binding_levels:
return [{
api.BOUND_DRIVER: level.driver,
api.BOUND_SEGMENT: self._expand_segment(level.segment_id)
} for level in self._binding_levels]
@property
def original_binding_levels(self):
if self._original_binding_levels:
return [{
api.BOUND_DRIVER: level.driver,
api.BOUND_SEGMENT: self._expand_segment(level.segment_id)
} for level in self._original_binding_levels]
@property
def top_bound_segment(self):
if self._binding_levels:
return self._expand_segment(self._binding_levels[0].segment_id)
@property
def original_top_bound_segment(self):
if self._original_binding_levels:
return self._expand_segment(
self._original_binding_levels[0].segment_id)
@property
def bottom_bound_segment(self):
if self._binding_levels:
return self._expand_segment(self._binding_levels[-1].segment_id)
@property
def original_bottom_bound_segment(self):
if self._original_binding_levels:
return self._expand_segment(
self._original_binding_levels[-1].segment_id)
def _expand_segment(self, segment_id):
segment = db.get_segment_by_id(self._plugin_context.session,
segment_id)
if not segment:
LOG.warning(_LW("Could not expand segment %s"), segment_id)
return segment
@property
def host(self):
# REVISIT(rkukura): Eliminate special DVR case as part of
# resolving bug 1367391?
if self._port['device_owner'] == constants.DEVICE_OWNER_DVR_INTERFACE:
return self._binding.host
return self._port.get(portbindings.HOST_ID)
@property
def original_host(self):
# REVISIT(rkukura): Eliminate special DVR case as part of
# resolving bug 1367391?
if self._port['device_owner'] == constants.DEVICE_OWNER_DVR_INTERFACE:
return self._original_port and self._binding.host
else:
return (self._original_port and
self._original_port.get(portbindings.HOST_ID))
@property
def vif_type(self):
return self._binding.vif_type
@property
def original_vif_type(self):
return self._original_vif_type
@property
def vif_details(self):
return self._plugin._get_vif_details(self._binding)
@property
def original_vif_details(self):
return self._original_vif_details
@property
def segments_to_bind(self):
return self._segments_to_bind
def host_agents(self, agent_type):
return self._plugin.get_agents(self._plugin_context,
filters={'agent_type': [agent_type],
'host': [self._binding.host]})
def set_binding(self, segment_id, vif_type, vif_details,
status=None):
# TODO(rkukura) Verify binding allowed, segment in network
self._new_bound_segment = segment_id
self._binding.vif_type = vif_type
self._binding.vif_details = jsonutils.dumps(vif_details)
self._new_port_status = status
def continue_binding(self, segment_id, next_segments_to_bind):
# TODO(rkukura) Verify binding allowed, segment in network
self._new_bound_segment = segment_id
self._next_segments_to_bind = next_segments_to_bind
def allocate_dynamic_segment(self, segment):
network_id = self._network_context.current['id']
return self._plugin.type_manager.allocate_dynamic_segment(
self._plugin_context.session, network_id, segment)
def release_dynamic_segment(self, segment_id):
return self._plugin.type_manager.release_dynamic_segment(
self._plugin_context.session, segment_id)
|
|
import anemoi as an
import pandas as pd
import numpy as np
import itertools
def wind_speed_data_for_annual_shear(mast_data, wind_speed_sensors=None, match_data=True):
"""Perform checks on wind speed data for shear calculations."""
ano_data = an.utils.mast_data.return_data_from_anemometers(mast_data)
if match_data:
ano_data = ano_data.dropna()
an.utils.mast_data.check_mast_data_not_empty(ano_data)
if wind_speed_sensors is not None:
assert isinstance(wind_speed_sensors, list), 'Need a list of wind speed sensors for annual shear calculation'
ano_data = an.utils.mast_data.return_data_from_sensors_by_name(ano_data, wind_speed_sensors)
heights = an.utils.mast_data.sensor_heights(ano_data)
orients = an.utils.mast_data.sensor_orients(ano_data)
wind_speed_sensors = an.utils.mast_data.sensor_names(ano_data)
ano_data.columns = an.utils.mast_data.remove_sensor_levels_from_mast_data_columns(ano_data.columns)
ano_data = ano_data.dropna()
return ano_data, wind_speed_sensors, heights, orients
def check_and_return_wind_dir_data_for_shear(mast_data, wind_dir_sensor):
"""Perform checks on wind direction data for shear calculations."""
assert wind_dir_sensor is not None, 'Need to specify a wind vane for directional shear calculations'
vane_data = an.utils.mast_data.return_data_from_sensors_by_name(mast_data, wind_dir_sensor)
vane_data.columns = an.utils.mast_data.remove_sensor_levels_from_mast_data_columns(vane_data.columns)
return vane_data
### SHEAR METHODS - Single Mast ###
def alpha_time_series(mast_data, wind_speed_sensors=None, heights=None, match_data=True):
"""Returns a time series of alpha values from a time series of wind speeds.
:Parameters:
mast_data: DataFrame
Measured data from MetMast.data
wind_speed_sensors: list
Specific wind speeds sensors
heights: list
List of the specified sensor heights
:Returns:
out: DataFrame
Time series of alpha values with the same index as the input mast_data
"""
ano_data, wind_speed_sensors, heights, orients = wind_speed_data_for_annual_shear(mast_data, wind_speed_sensors,
match_data=match_data)
assert len(set(orients)) == 1, 'Can only calculate an alpha time series from similarly oriented sensors'
ln_heights = np.log(heights) - np.mean(np.log(heights))
ln_heights = pd.DataFrame(index=mast_data.index, columns=wind_speed_sensors,
data=np.tile(ln_heights, (mast_data.shape[0], 1)))
ln_heights_avg = ln_heights.mean(axis=1)
ln_heights = ln_heights.sub(ln_heights_avg, axis=0)
ln_wind_speeds = ano_data.apply(np.log)
ln_wind_speeds_avg = ln_wind_speeds.mean(axis=1)
ln_wind_speeds = ln_wind_speeds.sub(ln_wind_speeds_avg, axis=0)
shear_alpha = (ln_heights * ln_wind_speeds).sum(axis=1) / (ln_heights ** 2).sum(axis=1)
shear_alpha = shear_alpha.to_frame(name='alpha')
return shear_alpha
def alpha_annual_profile_from_alpha_time_series(mast_data, wind_speed_sensors=None, heights=None, match_data=True):
"""Returns monthly mean alpha values from a time series of wind speeds.
:Parameters:
mast_data: DataFrame
Measured data from MetMast.data
wind_speed_sensors: list, default all anemometers
Specific wind speeds sensors
heights: list
List of the specified sensor heights
:Returns:
out: DataFrame
Mean alpha values indexed by month (annual shear profile)
"""
alpha_ts = alpha_time_series(mast_data, wind_speed_sensors=wind_speed_sensors, heights=heights)
alpha_profile = alpha_ts.groupby(alpha_ts.index.month).mean()
alpha_profile.index.name = 'month'
return alpha_profile
def alpha_mean_from_alpha_time_series(mast_data, wind_speed_sensors=None, heights=None, match_data=True):
"""Returns the mean of monthly means of the alpha time series from wind speed mast_data.
:Parameters:
mast_data: DataFrame
Measured data from MetMast.data
wind_speed_sensors: list, default all anemometers
Specific wind speeds sensors
heights: list
List of the specified sensor heights
:Returns:
out: DataFrame
Mean of monthly means of an alpha time series
"""
alpha_ts = alpha_time_series(mast_data, wind_speed_sensors=wind_speed_sensors, heights=heights)
alpha = an.utils.mast_data.return_momm(alpha_ts)
return alpha
def alpha_annual_profile_from_wind_speed_time_series(mast_data, wind_speed_sensors=None, heights=None, match_data=True):
"""Returns monthly mean alpha values from a time series of wind speeds.
:Parameters:
mast_data: DataFrame
Measured data from MetMast.data
wind_speed_sensors: list, default all anemometers
Specific wind speeds sensors
heights: list
List of the specified sensor heights
:Returns:
out: DataFrame
Mean alpha values indexed by month (annual shear profile)
"""
ano_data, wind_speed_sensors, heights, orients = wind_speed_data_for_annual_shear(mast_data, wind_speed_sensors,
match_data=match_data)
assert len(set(orients)) == 1, 'Can only calculate an alpha time series from similarly oriented sensors'
ws_profile = ano_data.groupby(ano_data.index.month).mean()
ws_profile.index.name = 'month'
alpha_profile = alpha_time_series(ws_profile, wind_speed_sensors=wind_speed_sensors, heights=heights)
return alpha_profile
def alpha_mean_from_wind_speed_time_series(mast_data, wind_speed_sensors=None, heights=None, match_data=True):
"""Returns alpha values from the mean of monthly means of a time series of wind speeds.
:Parameters:
mast_data: DataFrame
Measured data from MetMast.data
wind_speed_sensors: list, default all anemometers
Specific wind speeds sensors
heights: list
List of the specified sensor heights
:Returns:
out: DataFrame
Alpha value from the mean of monthly means of a wind speed time series
"""
ano_data, wind_speed_sensors, heights, orients = wind_speed_data_for_annual_shear(mast_data, wind_speed_sensors,
match_data=match_data)
assert len(set(orients)) == 1, 'Can only calculate an alpha time series from similarly oriented sensors'
ano_data_momm = an.utils.mast_data.return_momm(ano_data).T
alpha = alpha_time_series(ano_data_momm, wind_speed_sensors=wind_speed_sensors, heights=heights).values[0][0]
alpha = pd.DataFrame(index=['momm'], columns=['alpha'], data=alpha)
return alpha
def alpha_dir_profile_from_wind_speed_time_series(mast_data, wind_dir_sensor, dir_sectors=16, wind_speed_sensors=None,
match_data=True):
"""Returns mean alpha values by direction bin from a time series of wind speeds.
:Parameters:
mast_data: DataFrame
Measured data from MetMast.data
wind_dir_sensors: list
Specific wind wind vane for directional binning
dir_sectors: int, default 16
Number of equally spaced direction sectors in which to bin the mean shear values
wind_speed_sensors: list, default all anemometers
Specific wind speeds sensors
heights: list
List of the specified sensor heights
:Returns:
out: DataFrame
Mean alpha values indexed by the specified number of direction bins (directional shear profile)
"""
wind_speed_data, wind_speed_sensors, heights, orients = wind_speed_data_for_annual_shear(mast_data,
wind_speed_sensors,
match_data=match_data)
wind_dir_data = check_and_return_wind_dir_data_for_shear(mast_data, wind_dir_sensor=wind_dir_sensor)
alpha_ts = alpha_time_series(wind_speed_data, wind_speed_sensors=wind_speed_sensors, heights=heights)
alpha_ts = pd.concat([alpha_ts, wind_dir_data], axis=1).dropna()
alpha_ts.columns = ['alpha', 'dir']
dir_bin_ts = an.analysis.wind_rose.append_dir_bin(alpha_ts.dir, dir_sectors=dir_sectors).to_frame('dir_bin')
alpha_dir_ts = pd.concat([alpha_ts, dir_bin_ts], axis=1).dropna()
alpha_by_dir = alpha_dir_ts.loc[:, ['alpha', 'dir_bin']].groupby('dir_bin').mean()
return alpha_by_dir
def alpha_matrix_for_each_sensor_combination_from_mast_data(mast_data, include_reverse_combinations=False):
"""Returns a DataFrame of annual alpha values, indexed by sensor name, from an.MetMast.data.
:Parameters:
mast_data: an.MetMast.data
Pandas DataFrame of measured data from MetMast.data
:Returns:
out: DataFrame
Alpha values from a single mast, indexed by sensor name
"""
wind_speed_data, wind_speed_sensors, heights, orients = an.analysis.shear.wind_speed_data_for_annual_shear(
mast_data)
alpha_matrix = pd.DataFrame(index=wind_speed_sensors, columns=wind_speed_sensors)
alpha_matrix.index.name = 'sensor'
alpha_matrix.columns.name = 'sensor'
if alpha_matrix.shape[0] < 2:
return alpha_matrix
sensor_combinations = itertools.combinations(wind_speed_sensors, 2)
for sensor_combination in sensor_combinations:
alpha = an.analysis.shear.alpha_mean_from_wind_speed_time_series(wind_speed_data, wind_speed_sensors=list(
sensor_combination)).alpha[0]
alpha_matrix.loc[sensor_combination[0], sensor_combination[1]] = alpha
if include_reverse_combinations:
alpha_matrix.loc[sensor_combination[1], sensor_combination[0]] = alpha
alpha_matrix = alpha_matrix.dropna(how='all')
alpha_matrix.columns = an.utils.mast_data.remove_and_add_sensor_levels_to_mast_data_columns(alpha_matrix.columns)
alpha_matrix.columns = alpha_matrix.columns.droplevel(['type', 'signal'])
alpha_matrix.columns = alpha_matrix.columns.swaplevel('orient', 'height')
alpha_matrix.index = an.utils.mast_data.remove_and_add_sensor_levels_to_mast_data_columns(alpha_matrix.index)
alpha_matrix.index = alpha_matrix.index.droplevel(['type', 'signal'])
alpha_matrix.index = alpha_matrix.index.swaplevel('orient', 'height')
return alpha_matrix
def alpha_matrix_from_mast_data(mast_data, include_reverse_combinations=False):
"""Returns a DataFrame of annual alpha values, indexed by sensor name, from an.MetMast.data.
:Parameters:
mast_data: an.MetMast.data
Pandas DataFrame of measured data from MetMast.data
:Returns:
out: DataFrame
Alpha values from a single mast, indexed by sensor name
"""
ano_data = an.utils.mast_data.return_data_from_anemometers(mast_data)
unique_orients = an.utils.mast_data.sensor_orients_unique(ano_data)
alpha_matrix = []
for unique_orient in unique_orients:
ano_data_orient = an.utils.mast_data.return_data_from_sensors_by_orient(ano_data, sensor_orient=unique_orient)
alpha_matrix_orient = alpha_matrix_for_each_sensor_combination_from_mast_data(ano_data_orient,
include_reverse_combinations=include_reverse_combinations)
alpha_matrix.append(alpha_matrix_orient)
alpha_matrix = pd.concat(alpha_matrix, axis=0, sort=True).dropna(how='all')
alpha_matrix.index.name = 'sensor'
alpha_matrix.columns.name = 'sensor'
return alpha_matrix
def alpha_annual_avg_from_mast_alpha_matrix(alpha_matrix):
"""Returns a DataFrame of an annual alpha value from a single alpha matrix.
:Parameters:
mast: an.MetMast
Measured data from MetMast.data
:Returns:
out: DataFrame
Average alpha value from a single mast.
"""
annual_avg_alpha = alpha_matrix.melt(value_name='alpha').alpha.mean()
annual_avg_alpha = pd.DataFrame(index=['avg'], columns=['alpha'], data=annual_avg_alpha)
return annual_avg_alpha
def mast_annual(mast):
"""Returns a DataFrame of annual alpha values from a single mast indexed by sensor orientation, height, and name.
:Parameters:
mast: an.MetMast
Measured data from MetMast.data
:Returns:
out: DataFrame
Alpha values from a single mast by sensor orientation and height
"""
alpha_matrix = alpha_matrix_from_mast_data(mast.data)
return alpha_matrix
def mast_annual_avg(mast):
"""Returns a DataFrame of an annual alpha value from a single mast, indexed by mast name.
:Parameters:
mast: an.MetMast
Measured data from MetMast.data
:Returns:
out: DataFrame
Average alpha value from a single mast.
"""
alpha_matrix = mast_annual(mast)
annual_avg_alpha = alpha_annual_avg_from_mast_alpha_matrix(alpha_matrix)
annual_avg_alpha = pd.DataFrame(index=[mast.name], columns=['alpha'], data=annual_avg_alpha.loc['avg', 'alpha'])
annual_avg_alpha.index.name = 'mast'
return annual_avg_alpha
def mast_directional(mast, wind_dir_sensor=None, dir_sectors=16, wind_speed_sensors=None):
"""Returns a DataFrame of annual alpha values from a single mast, indexed by direction bin.
Alpha only calcualted for time steps with valid measurements from each wind speed sensor.
:Parameters:
mast: an.MetMast
Measured data from MetMast.data
wind_dir_sensors: list, default mast.primary_vane
Specific wind wind vane for directional binning
dir_sectors: int, default 16
Number of equally spaced direction sectors in which to bin the mean shear values
wind_speed_sensors: list, default all anemometers
Specific wind speeds sensors
:Returns:
out: DataFrame
Mean alpha values indexed by the specified number of direction bins (directional shear profile)
"""
ano_data, wind_speed_sensors, heights, orients = wind_speed_data_for_annual_shear(mast.data,
wind_speed_sensors=wind_speed_sensors)
if wind_dir_sensor is None:
wind_dir_sensor = mast.primary_vane
wind_dir_data = check_and_return_wind_dir_data_for_shear(mast.data, wind_dir_sensor=wind_dir_sensor)
mast_data = pd.concat([wind_speed_data, wind_dir_data], axis=1).dropna()
shear_analysis_mast = alpha_dir_profile_from_wind_speed_time_series(mast_data, wind_dir_sensor,
dir_sectors=dir_sectors,
wind_speed_sensors=wind_speed_sensors)
mast.remove_and_add_sensor_levels()
return shear_analysis_mast
def mast_directional_by_orient(mast, wind_dir_sensor=None, dir_sectors=16):
"""Returns a DataFrame of annual alpha values from a single mast, indexed by direction bin.
Alpha only calcualted for time steps with valid measurements from each wind speed sensor.
:Parameters:
mast: an.MetMast
Measured data from MetMast.data
wind_dir_sensors: list, default mast.primary_vane
Specific wind wind vane for directional binning
dir_sectors: int, default 16
Number of equally spaced direction sectors in which to bin the mean shear values
:Returns:
out: DataFrame
Mean alpha values indexed by the specified number of direction bins (directional shear profile)
"""
anemometers = mast.data.loc[:, pd.IndexSlice['SPD', :, :, 'AVG', :]].columns.get_level_values(
level='sensor').tolist()
anemometer_data = mast.return_sensor_data(sensors=anemometers)
anemometer_orients = sorted(anemometer_data.columns.get_level_values(level='orient').unique().tolist())
alpha_by_dir = []
for anemometer_orient in anemometer_orients:
anemometers = anemometer_data.loc[:, pd.IndexSlice[:, :, anemometer_orient]].columns.get_level_values(
level='sensor').tolist()
alpha_by_dir.append(mast_directional(mast=mast,
wind_dir_sensor=wind_dir_sensor,
dir_sectors=dir_sectors,
wind_speed_sensors=anemometers))
alpha_by_dir = pd.concat(alpha_by_dir, axis=1, keys=anemometer_orients, names=['orient', 'alpha'])
alpha_by_dir.columns = alpha_by_dir.columns.droplevel(level='alpha')
alpha_by_dir.index = alpha_by_dir.index.values * 360.0 / dir_sectors
alpha_by_dir.loc[0.0, :] = alpha_by_dir.loc[360.0, :]
alpha_by_dir = alpha_by_dir.sort_index()
return alpha_by_dir
def mast_monthly_by_orient(mast):
"""Returns a DataFrame of monthly time series of alpha values from a single mast for each sensor orientation.
Alpha only calcualted for time steps with valid measurements from each wind speed sensor.
:Parameters:
mast: an.MetMast
Measured data from MetMast.data
:Returns:
out: DataFrame
Mean alpha values for each sensor orientation, indexed by month
"""
anemometers = mast.data.loc[:, pd.IndexSlice['SPD', :, :, 'AVG', :]].columns.get_level_values(
level='sensor').tolist()
anemometer_data = mast.return_sensor_data(sensors=anemometers)
anemometer_orients = sorted(anemometer_data.columns.get_level_values(level='orient').unique().tolist())
alpha_ts_by_orient = []
for anemometer_orient in anemometer_orients:
anemometer_data = an.utils.mast_data.remove_and_add_sensor_levels(anemometer_data)
anemometers = anemometer_data.loc[:, pd.IndexSlice[:, :, anemometer_orient]].columns.get_level_values(
level='sensor').tolist()
alpha_ts = an.analysis.shear.alpha_time_series(anemometer_data, wind_speed_sensors=anemometers)
alpha_ts_by_orient.append(alpha_ts)
alpha_ts_by_orient = pd.concat(alpha_ts_by_orient, axis=1, keys=anemometer_orients, names=['orient', 'alpha'])
alpha_ts_by_orient.columns = alpha_ts_by_orient.columns.droplevel(level='alpha')
monthly_alpha_ts_by_orient = alpha_ts_by_orient.resample('MS').mean()
return monthly_alpha_ts_by_orient
def mast_annual_profile_by_orient(mast):
"""Returns a DataFrame of annual alpha profiles from a single mast for each sensor orientation.
:Parameters:
mast: an.MetMast
Measured data from MetMast.data
:Returns:
out: DataFrame
Annual alpha profiles for each sensor orientation, indexed by month
"""
monthly_alpha_ts_by_orient = mast_monthly_by_orient(mast)
annual_alpha_profiles_by_orient = monthly_alpha_ts_by_orient.groupby(
[monthly_alpha_ts_by_orient.index.year, monthly_alpha_ts_by_orient.index.month]).mean()
annual_alpha_profiles_by_orient.index.names = ['year', 'month']
annual_alpha_profiles_by_orient = annual_alpha_profiles_by_orient.unstack(level='year')
return annual_alpha_profiles_by_orient
def site_annual(masts):
"""Returns a DataFrame of annual alpha values from a multiple site masts, indexed by mast, sensor orientation, and height.
:Parameters:
masts : list
List of MetMast objects from which all anemometer data is extracted
:Returns:
out: DataFrame
Alpha values from multiple site masts by mast, sensor orientation, and height
"""
shear_analysis_site = []
mast_names = []
for mast in masts:
mast_names.append(mast.name)
shear_analysis_site.append(mast_annual(mast))
shear_analysis_site = pd.concat(shear_analysis_site, axis=1, keys=mast_names)
shear_analysis_site.columns.names = ['Mast', 'height']
shear_analysis_site = shear_analysis_site.dropna(axis=1, how='all')
return shear_analysis_site
def site_annual_avg(masts):
"""Returns a DataFrame of annual alpha values from multiple site masts, indexed by mast.
:Parameters:
masts : list
List of MetMast objects from which all anemometer data is extracted
:Returns:
out: DataFrame
Alpha values from multiple site masts, indexed by mast
"""
annual_avg_alpha = site_annual(masts).stack().mean().to_frame('alpha')
return annual_avg_alpha
def site_directional(masts, dir_sectors=16):
"""Returns a DataFrame of annual alpha values from a single mast, indexed by direction bin.
Alpha only calcualted for time steps with valid measurements from each wind speed sensor.
:Parameters:
masts : list
List of MetMast objects from which all anemometer data is extracted
dir_sectors: int, default 16
Number of equally spaced direction sectors in which to bin the mean shear values
:Returns:
out: DataFrame
Mean alpha values for each mast indexed by the specified number of direction bins (directional shear profile)
"""
shear_analysis_site = []
mast_names = []
for mast in masts:
mast_names.append(mast.name)
shear_analysis_site.append(mast_directional(mast))
shear_analysis_site = pd.concat(shear_analysis_site, axis=1)
shear_analysis_site.columns = mast_names
shear_analysis_site.columns.names = ['Mast']
shear_analysis_site = shear_analysis_site.dropna(axis=1, how='all')
return shear_analysis_site
def site_mean(masts):
"""Returns a DataFrame of the mean annual alpha value from each site masts. Uses all avaialble anemometer combinations.
:Parameters:
masts : list
List of MetMast objects from which all anemometer data is extracted
:Returns:
out: DataFrame
Average annual alpha values from each site mast using all available anemometer combinations
"""
shear_results = shear_analysis_site(masts)
shear_results = shear_results.T.unstack().mean(axis=1).to_frame('alpha')
return shear_results
def site_mean_from_results(shear_results):
"""Returns a DataFrame of the mean annual alpha value from each site mast from a previously run shear analysis.
This allows the user to choose the heights and oreintations used within the final calculated alpha value.
:Parameters:
shear results : DataFrame
DataFrame of shear results from shear.shear_analysis_annual or shear.shear_analysis_site
:Returns:
out: DataFrame
Average annual alpha values from each site mast using all the provided anemometer combinations
"""
shear_results = shear_results.T.unstack().replace('-', np.nan).mean(axis=1).to_frame('alpha')
return shear_results
|
|
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import tslib, lib, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas.compat import u, PY2, PY3, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
_ensure_int32,
_ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
@pytest.mark.parametrize(
"ll",
[
[], [1], (1, ), (1, 2), {'a': 1},
set([1, 'a']), Series([1]),
Series([]), Series(['a']).str])
def test_is_list_like_passes(ll):
assert inference.is_list_like(ll)
@pytest.mark.parametrize(
"ll", [1, '2', object(), str])
def test_is_list_like_fails(ll):
assert not inference.is_list_like(ll)
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, set([1, 'a']), Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
def test_is_file_like():
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
if PY3:
from unittest import mock
assert not is_file(mock.Mock())
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, collections.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = set(['', 'NULL', 'nan'])
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with tm.assert_raises_regex(ValueError, msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = set([-999, -999.0])
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = set([2**63])
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
# Dummy class used for testing with Python objects
class Dummy():
pass
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
assert result == 'integer'
result = lib.infer_dtype([])
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'decimal'
def test_string(self):
pass
def test_unicode(self):
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'unicode' if PY2 else 'string'
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([n, pd.Period('2011-01', freq='D'), n])
assert lib.infer_dtype(arr) == 'period'
# different type of nat
arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
@pytest.mark.parametrize(
"data",
[
[datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],
[Timestamp("20170612"), Timestamp("20170311")],
[Timestamp("20170612", tz='US/Eastern'),
Timestamp("20170311", tz='US/Eastern')],
[date(2017, 6, 12),
Timestamp("20170311", tz='US/Eastern')],
[np.datetime64("2017-06-12"), np.datetime64("2017-03-11")],
[np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)]
]
)
def test_infer_datetimelike_array_datetime(self, data):
assert lib.infer_datetimelike_array(data) == "datetime"
@pytest.mark.parametrize(
"data",
[
[timedelta(2017, 6, 12), timedelta(2017, 3, 11)],
[timedelta(2017, 6, 12), date(2017, 3, 11)],
[np.timedelta64(2017, "D"), np.timedelta64(6, "s")],
[np.timedelta64(2017, "D"), timedelta(2017, 3, 11)]
]
)
def test_infer_datetimelike_array_timedelta(self, data):
assert lib.infer_datetimelike_array(data) == "timedelta"
def test_infer_datetimelike_array_date(self):
arr = [date(2017, 6, 12), date(2017, 3, 11)]
assert lib.infer_datetimelike_array(arr) == "date"
@pytest.mark.parametrize(
"data",
[
["2017-06-12", "2017-03-11"],
[20170612, 20170311],
[20170612.5, 20170311.8],
[Dummy(), Dummy()],
[Timestamp("20170612"), Timestamp("20170311", tz='US/Eastern')],
[Timestamp("20170612"), 20170311],
[timedelta(2017, 6, 12), Timestamp("20170311", tz='US/Eastern')]
]
)
def test_infer_datetimelike_array_mixed(self, data):
assert lib.infer_datetimelike_array(data) == "mixed"
@pytest.mark.parametrize(
"first, expected",
[
[[None], "mixed"],
[[np.nan], "mixed"],
[[pd.NaT], "nat"],
[[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"],
[[np.datetime64("2017-06-12"), pd.NaT], "datetime"],
[[date(2017, 6, 12), pd.NaT], "date"],
[[timedelta(2017, 6, 12), pd.NaT], "timedelta"],
[[np.timedelta64(2017, "D"), pd.NaT], "timedelta"]
]
)
@pytest.mark.parametrize("second", [None, np.nan])
def test_infer_datetimelike_array_nan_nat_like(self, first, second,
expected):
first.append(second)
assert lib.infer_datetimelike_array(first) == expected
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr) == 'floating'
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr) == 'mixed'
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr) == 'datetime'
# np.datetime64(nat)
arr = np.array([np.datetime64('nat')])
assert lib.infer_dtype(arr) == 'datetime64'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([pd.NaT, n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64('nat'),
np.timedelta64('nat'), np.nan])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_array(arr)
assert not lib.is_timedelta64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_array(arr)
assert lib.is_timedelta64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_array(arr)
assert not lib.is_timedelta64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert lib.is_timedelta_array(arr)
assert lib.is_timedelta64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_array(arr)
assert not lib.is_timedelta64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
assert lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='US/Eastern')],
dtype=object))
assert not lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='CET')],
dtype=object))
@pytest.mark.parametrize(
"func",
[
'is_datetime_array',
'is_datetime64_array',
'is_bool_array',
'is_timedelta_array',
'is_timedelta64_array',
'is_timedelta_or_timedelta64_array',
'is_date_array',
'is_time_array',
'is_interval_array',
'is_period_array'])
def test_other_dtypes_for_array(self, func):
func = getattr(lib, func)
arr = np.array(['foo', 'bar'])
assert not func(arr)
arr = np.array([1, 2])
assert not func(arr)
def test_date(self):
dates = [date(2012, 1, day) for day in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'date'
dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan]
result = lib.infer_dtype(dates)
assert result == 'mixed'
result = lib.infer_dtype(dates, skipna=True)
assert result == 'date'
def test_is_numeric_array(self):
assert lib.is_float_array(np.array([1, 2.0]))
assert lib.is_float_array(np.array([1, 2.0, np.nan]))
assert not lib.is_float_array(np.array([1, 2]))
assert lib.is_integer_array(np.array([1, 2]))
assert not lib.is_integer_array(np.array([1, 2.0]))
def test_is_string_array(self):
assert lib.is_string_array(np.array(['foo', 'bar']))
assert not lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=False)
assert lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=True)
assert not lib.is_string_array(np.array([1, 2]))
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array([[1, 2, 3, None, None],
[4, 5, 6, None, None]], dtype=object)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_is_period(self):
assert lib.is_period(pd.Period('2011-01', freq='M'))
assert not lib.is_period(pd.PeriodIndex(['2011-01'], freq='M'))
assert not lib.is_period(pd.Timestamp('2011-01'))
assert not lib.is_period(1)
assert not lib.is_period(np.nan)
def test_categorical(self):
# GH 8974
from pandas import Categorical, Series
arr = Categorical(list('abc'))
result = lib.infer_dtype(arr)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr))
assert result == 'categorical'
arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)
result = lib.infer_dtype(arr)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr))
assert result == 'categorical'
class TestNumberScalar(object):
def test_is_number(self):
assert is_number(True)
assert is_number(1)
assert is_number(1.1)
assert is_number(1 + 3j)
assert is_number(np.bool(False))
assert is_number(np.int64(1))
assert is_number(np.float64(1.1))
assert is_number(np.complex128(1 + 3j))
assert is_number(np.nan)
assert not is_number(None)
assert not is_number('x')
assert not is_number(datetime(2011, 1, 1))
assert not is_number(np.datetime64('2011-01-01'))
assert not is_number(Timestamp('2011-01-01'))
assert not is_number(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_number(timedelta(1000))
assert not is_number(Timedelta('1 days'))
# questionable
assert not is_number(np.bool_(False))
assert is_number(np.timedelta64(1, 'D'))
def test_is_bool(self):
assert is_bool(True)
assert is_bool(np.bool(False))
assert is_bool(np.bool_(False))
assert not is_bool(1)
assert not is_bool(1.1)
assert not is_bool(1 + 3j)
assert not is_bool(np.int64(1))
assert not is_bool(np.float64(1.1))
assert not is_bool(np.complex128(1 + 3j))
assert not is_bool(np.nan)
assert not is_bool(None)
assert not is_bool('x')
assert not is_bool(datetime(2011, 1, 1))
assert not is_bool(np.datetime64('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_bool(timedelta(1000))
assert not is_bool(np.timedelta64(1, 'D'))
assert not is_bool(Timedelta('1 days'))
def test_is_integer(self):
assert is_integer(1)
assert is_integer(np.int64(1))
assert not is_integer(True)
assert not is_integer(1.1)
assert not is_integer(1 + 3j)
assert not is_integer(np.bool(False))
assert not is_integer(np.bool_(False))
assert not is_integer(np.float64(1.1))
assert not is_integer(np.complex128(1 + 3j))
assert not is_integer(np.nan)
assert not is_integer(None)
assert not is_integer('x')
assert not is_integer(datetime(2011, 1, 1))
assert not is_integer(np.datetime64('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_integer(timedelta(1000))
assert not is_integer(Timedelta('1 days'))
# questionable
assert is_integer(np.timedelta64(1, 'D'))
def test_is_float(self):
assert is_float(1.1)
assert is_float(np.float64(1.1))
assert is_float(np.nan)
assert not is_float(True)
assert not is_float(1)
assert not is_float(1 + 3j)
assert not is_float(np.bool(False))
assert not is_float(np.bool_(False))
assert not is_float(np.int64(1))
assert not is_float(np.complex128(1 + 3j))
assert not is_float(None)
assert not is_float('x')
assert not is_float(datetime(2011, 1, 1))
assert not is_float(np.datetime64('2011-01-01'))
assert not is_float(Timestamp('2011-01-01'))
assert not is_float(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_float(timedelta(1000))
assert not is_float(np.timedelta64(1, 'D'))
assert not is_float(Timedelta('1 days'))
def test_is_datetime_dtypes(self):
ts = pd.date_range('20130101', periods=3)
tsa = pd.date_range('20130101', periods=3, tz='US/Eastern')
assert is_datetime64_dtype('datetime64')
assert is_datetime64_dtype('datetime64[ns]')
assert is_datetime64_dtype(ts)
assert not is_datetime64_dtype(tsa)
assert not is_datetime64_ns_dtype('datetime64')
assert is_datetime64_ns_dtype('datetime64[ns]')
assert is_datetime64_ns_dtype(ts)
assert is_datetime64_ns_dtype(tsa)
assert is_datetime64_any_dtype('datetime64')
assert is_datetime64_any_dtype('datetime64[ns]')
assert is_datetime64_any_dtype(ts)
assert is_datetime64_any_dtype(tsa)
assert not is_datetime64tz_dtype('datetime64')
assert not is_datetime64tz_dtype('datetime64[ns]')
assert not is_datetime64tz_dtype(ts)
assert is_datetime64tz_dtype(tsa)
for tz in ['US/Eastern', 'UTC']:
dtype = 'datetime64[ns, {}]'.format(tz)
assert not is_datetime64_dtype(dtype)
assert is_datetime64tz_dtype(dtype)
assert is_datetime64_ns_dtype(dtype)
assert is_datetime64_any_dtype(dtype)
def test_is_timedelta(self):
assert is_timedelta64_dtype('timedelta64')
assert is_timedelta64_dtype('timedelta64[ns]')
assert not is_timedelta64_ns_dtype('timedelta64')
assert is_timedelta64_ns_dtype('timedelta64[ns]')
tdi = TimedeltaIndex([1e14, 2e14], dtype='timedelta64')
assert is_timedelta64_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi.astype('timedelta64[ns]'))
# Conversion to Int64Index:
assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64'))
assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64[h]'))
class TestIsScalar(object):
def test_is_scalar_builtin_scalars(self):
assert is_scalar(None)
assert is_scalar(True)
assert is_scalar(False)
assert is_scalar(0.)
assert is_scalar(np.nan)
assert is_scalar('foobar')
assert is_scalar(b'foobar')
assert is_scalar(u('efoobar'))
assert is_scalar(datetime(2014, 1, 1))
assert is_scalar(date(2014, 1, 1))
assert is_scalar(time(12, 0))
assert is_scalar(timedelta(hours=1))
assert is_scalar(pd.NaT)
def test_is_scalar_builtin_nonscalars(self):
assert not is_scalar({})
assert not is_scalar([])
assert not is_scalar([1])
assert not is_scalar(())
assert not is_scalar((1, ))
assert not is_scalar(slice(None))
assert not is_scalar(Ellipsis)
def test_is_scalar_numpy_array_scalars(self):
assert is_scalar(np.int64(1))
assert is_scalar(np.float64(1.))
assert is_scalar(np.int32(1))
assert is_scalar(np.object_('foobar'))
assert is_scalar(np.str_('foobar'))
assert is_scalar(np.unicode_(u('foobar')))
assert is_scalar(np.bytes_(b'foobar'))
assert is_scalar(np.datetime64('2014-01-01'))
assert is_scalar(np.timedelta64(1, 'h'))
def test_is_scalar_numpy_zerodim_arrays(self):
for zerodim in [np.array(1), np.array('foobar'),
np.array(np.datetime64('2014-01-01')),
np.array(np.timedelta64(1, 'h')),
np.array(np.datetime64('NaT'))]:
assert not is_scalar(zerodim)
assert is_scalar(lib.item_from_zerodim(zerodim))
def test_is_scalar_numpy_arrays(self):
assert not is_scalar(np.array([]))
assert not is_scalar(np.array([[]]))
assert not is_scalar(np.matrix('1; 2'))
def test_is_scalar_pandas_scalars(self):
assert is_scalar(Timestamp('2014-01-01'))
assert is_scalar(Timedelta(hours=1))
assert is_scalar(Period('2014-01-01'))
assert is_scalar(Interval(left=0, right=1))
assert is_scalar(DateOffset(days=1))
def test_is_scalar_pandas_containers(self):
assert not is_scalar(Series())
assert not is_scalar(Series([1]))
assert not is_scalar(DataFrame())
assert not is_scalar(DataFrame([[1]]))
with catch_warnings(record=True):
assert not is_scalar(Panel())
assert not is_scalar(Panel([[[1]]]))
assert not is_scalar(Index([]))
assert not is_scalar(Index([1]))
def test_datetimeindex_from_empty_datetime64_array():
for unit in ['ms', 'us', 'ns']:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
assert (len(idx) == 0)
def test_nan_to_nat_conversions():
df = DataFrame(dict({
'A': np.asarray(
lrange(10), dtype='float64'),
'B': Timestamp('20010101')
}))
df.iloc[3:6, :] = np.nan
result = df.loc[4, 'B'].value
assert (result == tslib.iNaT)
s = df['B'].copy()
s._data = s._data.setitem(indexer=tuple([slice(8, 9)]), value=np.nan)
assert (isna(s[8]))
# numpy < 1.7.0 is wrong
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= LooseVersion('1.7.0'):
assert (s[8].value == np.datetime64('NaT').astype(np.int64))
@td.skip_if_no_scipy
def test_is_scipy_sparse(spmatrix): # noqa: F811
assert is_scipy_sparse(spmatrix([[0, 1]]))
assert not is_scipy_sparse(np.array([1]))
def test_ensure_int32():
values = np.arange(10, dtype=np.int32)
result = _ensure_int32(values)
assert (result.dtype == np.int32)
values = np.arange(10, dtype=np.int64)
result = _ensure_int32(values)
assert (result.dtype == np.int32)
def test_ensure_categorical():
values = np.arange(10, dtype=np.int32)
result = _ensure_categorical(values)
assert (result.dtype == 'category')
values = Categorical(values)
result = _ensure_categorical(values)
tm.assert_categorical_equal(result, values)
|
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'HSV-Moder.ui'
#
# Created by: PyQt5 UI code generator 5.9
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(965, 730)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 260, 271, 364))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(10, 10, 10, 10)
self.verticalLayout.setSpacing(5)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_h = QtWidgets.QHBoxLayout()
self.horizontalLayout_h.setContentsMargins(10, 10, 10, 10)
self.horizontalLayout_h.setSpacing(10)
self.horizontalLayout_h.setObjectName("horizontalLayout_h")
self.label_h = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_h.setObjectName("label_h")
self.horizontalLayout_h.addWidget(self.label_h)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setContentsMargins(5, 5, 5, 5)
self.verticalLayout_3.setSpacing(5)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.hSliderT = QtWidgets.QSlider(self.verticalLayoutWidget)
self.hSliderT.setMaximum(255)
self.hSliderT.setOrientation(QtCore.Qt.Horizontal)
self.hSliderT.setObjectName("hSliderT")
self.horizontalLayout_4.addWidget(self.hSliderT)
self.hSliderTValue = QtWidgets.QLabel(self.verticalLayoutWidget)
self.hSliderTValue.setObjectName("hSliderTValue")
self.horizontalLayout_4.addWidget(self.hSliderTValue)
self.verticalLayout_3.addLayout(self.horizontalLayout_4)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.hSliderB = QtWidgets.QSlider(self.verticalLayoutWidget)
self.hSliderB.setMaximum(255)
self.hSliderB.setOrientation(QtCore.Qt.Horizontal)
self.hSliderB.setObjectName("hSliderB")
self.horizontalLayout.addWidget(self.hSliderB)
self.hSliderBValue = QtWidgets.QLabel(self.verticalLayoutWidget)
self.hSliderBValue.setLayoutDirection(QtCore.Qt.RightToLeft)
self.hSliderBValue.setObjectName("hSliderBValue")
self.horizontalLayout.addWidget(self.hSliderBValue)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.horizontalLayout_h.addLayout(self.verticalLayout_3)
self.verticalLayout.addLayout(self.horizontalLayout_h)
self.horizontalLayout_s = QtWidgets.QHBoxLayout()
self.horizontalLayout_s.setContentsMargins(10, 10, 10, 10)
self.horizontalLayout_s.setSpacing(10)
self.horizontalLayout_s.setObjectName("horizontalLayout_s")
self.label_s = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_s.setObjectName("label_s")
self.horizontalLayout_s.addWidget(self.label_s)
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setContentsMargins(5, 5, 5, 5)
self.verticalLayout_5.setSpacing(5)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.sSliderT = QtWidgets.QSlider(self.verticalLayoutWidget)
self.sSliderT.setMinimumSize(QtCore.QSize(0, 0))
self.sSliderT.setMaximum(255)
self.sSliderT.setOrientation(QtCore.Qt.Horizontal)
self.sSliderT.setObjectName("sSliderT")
self.horizontalLayout_5.addWidget(self.sSliderT)
self.sSliderTValue = QtWidgets.QLabel(self.verticalLayoutWidget)
self.sSliderTValue.setObjectName("sSliderTValue")
self.horizontalLayout_5.addWidget(self.sSliderTValue)
self.verticalLayout_5.addLayout(self.horizontalLayout_5)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.sSliderB = QtWidgets.QSlider(self.verticalLayoutWidget)
self.sSliderB.setMaximum(255)
self.sSliderB.setOrientation(QtCore.Qt.Horizontal)
self.sSliderB.setObjectName("sSliderB")
self.horizontalLayout_6.addWidget(self.sSliderB)
self.sSliderBValue = QtWidgets.QLabel(self.verticalLayoutWidget)
self.sSliderBValue.setObjectName("sSliderBValue")
self.horizontalLayout_6.addWidget(self.sSliderBValue)
self.verticalLayout_5.addLayout(self.horizontalLayout_6)
self.horizontalLayout_s.addLayout(self.verticalLayout_5)
self.verticalLayout.addLayout(self.horizontalLayout_s)
self.horizontalLayout_v = QtWidgets.QHBoxLayout()
self.horizontalLayout_v.setContentsMargins(10, 10, 10, 10)
self.horizontalLayout_v.setSpacing(10)
self.horizontalLayout_v.setObjectName("horizontalLayout_v")
self.label_4 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_4.setObjectName("label_4")
self.horizontalLayout_v.addWidget(self.label_4)
self.verticalLayout_6 = QtWidgets.QVBoxLayout()
self.verticalLayout_6.setContentsMargins(5, 5, 5, 5)
self.verticalLayout_6.setSpacing(5)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.vSliderT = QtWidgets.QSlider(self.verticalLayoutWidget)
self.vSliderT.setMaximum(255)
self.vSliderT.setOrientation(QtCore.Qt.Horizontal)
self.vSliderT.setObjectName("vSliderT")
self.horizontalLayout_7.addWidget(self.vSliderT)
self.vSliderTValue = QtWidgets.QLabel(self.verticalLayoutWidget)
self.vSliderTValue.setObjectName("vSliderTValue")
self.horizontalLayout_7.addWidget(self.vSliderTValue)
self.verticalLayout_6.addLayout(self.horizontalLayout_7)
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.vSliderB = QtWidgets.QSlider(self.verticalLayoutWidget)
self.vSliderB.setMaximum(255)
self.vSliderB.setOrientation(QtCore.Qt.Horizontal)
self.vSliderB.setObjectName("vSliderB")
self.horizontalLayout_8.addWidget(self.vSliderB)
self.vSliderBValue = QtWidgets.QLabel(self.verticalLayoutWidget)
self.vSliderBValue.setObjectName("vSliderBValue")
self.horizontalLayout_8.addWidget(self.vSliderBValue)
self.verticalLayout_6.addLayout(self.horizontalLayout_8)
self.horizontalLayout_v.addLayout(self.verticalLayout_6)
self.verticalLayout.addLayout(self.horizontalLayout_v)
self.pic = QtWidgets.QLabel(self.centralwidget)
self.pic.setGeometry(QtCore.QRect(290, 60, 640, 480))
self.pic.setObjectName("pic")
self.verticalLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(10, 40, 271, 136))
self.verticalLayoutWidget_2.setObjectName("verticalLayoutWidget_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label = QtWidgets.QLabel(self.verticalLayoutWidget_2)
self.label.setMaximumSize(QtCore.QSize(16777215, 40))
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.verticalLayout_2.addWidget(self.label)
self.filePath = QtWidgets.QTextEdit(self.verticalLayoutWidget_2)
self.filePath.setMaximumSize(QtCore.QSize(800, 40))
font = QtGui.QFont()
font.setPointSize(18)
self.filePath.setFont(font)
self.filePath.setAutoFillBackground(False)
self.filePath.setObjectName("filePath")
self.verticalLayout_2.addWidget(self.filePath)
self.picLoad = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
self.picLoad.setMaximumSize(QtCore.QSize(16777215, 40))
self.picLoad.setObjectName("picLoad")
self.verticalLayout_2.addWidget(self.picLoad)
self.bBack = QtWidgets.QPushButton(self.centralwidget)
self.bBack.setGeometry(QtCore.QRect(450, 570, 99, 27))
self.bBack.setObjectName("bBack")
self.bNext = QtWidgets.QPushButton(self.centralwidget)
self.bNext.setGeometry(QtCore.QRect(690, 570, 99, 27))
self.bNext.setObjectName("bNext")
self.lSelector = QtWidgets.QLabel(self.centralwidget)
self.lSelector.setGeometry(QtCore.QRect(580, 570, 67, 21))
self.lSelector.setAlignment(QtCore.Qt.AlignCenter)
self.lSelector.setObjectName("lSelector")
self.cErosion = QtWidgets.QCheckBox(self.centralwidget)
self.cErosion.setGeometry(QtCore.QRect(10, 190, 97, 22))
self.cErosion.setObjectName("cErosion")
self.cDilate = QtWidgets.QCheckBox(self.centralwidget)
self.cDilate.setGeometry(QtCore.QRect(130, 190, 97, 22))
self.cDilate.setObjectName("cDilate")
self.cModulation = QtWidgets.QCheckBox(self.centralwidget)
self.cModulation.setGeometry(QtCore.QRect(10, 230, 97, 22))
self.cModulation.setObjectName("cModulation")
self.verticalLayoutWidget.raise_()
self.pic.raise_()
self.verticalLayoutWidget_2.raise_()
self.bBack.raise_()
self.bNext.raise_()
self.lSelector.raise_()
self.cErosion.raise_()
self.cDilate.raise_()
self.cModulation.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 965, 25))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label_h.setText(_translate("MainWindow", "H"))
self.hSliderTValue.setText(_translate("MainWindow", "0"))
self.hSliderBValue.setText(_translate("MainWindow", "0"))
self.label_s.setText(_translate("MainWindow", "S"))
self.sSliderTValue.setText(_translate("MainWindow", "0"))
self.sSliderBValue.setText(_translate("MainWindow", "0"))
self.label_4.setText(_translate("MainWindow", "V"))
self.vSliderTValue.setText(_translate("MainWindow", "0"))
self.vSliderBValue.setText(_translate("MainWindow", "0"))
self.pic.setText(_translate("MainWindow", "TextLabel"))
self.label.setText(_translate("MainWindow", "Archivo"))
self.filePath.setPlaceholderText(_translate("MainWindow", "Ruta de archivo"))
self.picLoad.setText(_translate("MainWindow", "Cargar"))
self.bBack.setText(_translate("MainWindow", "Back"))
self.bNext.setText(_translate("MainWindow", "Next"))
self.lSelector.setText(_translate("MainWindow", "0"))
self.cErosion.setText(_translate("MainWindow", "Erosion"))
self.cDilate.setText(_translate("MainWindow", "Dilate"))
self.cModulation.setText(_translate("MainWindow", "Modulation"))
|
|
""" Module: IDL:Tango:1.0
Automagically generated by:-
The ORB called Fnorb v1.1.Return.of.Fnorb
"""
_FNORB_ID = "IDL:Tango:1.0"
# Fnorb modules.
import Fnorb.orb.CORBA
import Fnorb.orb.TypeManager
import Fnorb.orb.Util
class Device_skel(Fnorb.orb.CORBA.Object_skel):
""" Interface: IDL:Tango/Device:1.0 """
_FNORB_ID = "IDL:Tango/Device:1.0"
def _skel__get_name(self, server_request):
""" Attribute: IDL:Tango/Device/name:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_string)
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_name()
# Create the reply.
server_request.results(results)
return
def _skel__get_description(self, server_request):
""" Attribute: IDL:Tango/Device/description:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_string)
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_description()
# Create the reply.
server_request.results(results)
return
def _skel__get_state(self, server_request):
""" Attribute: IDL:Tango/Device/state:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevState:1.0"))
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_state()
# Create the reply.
server_request.results(results)
return
def _skel__get_status(self, server_request):
""" Attribute: IDL:Tango/Device/status:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_string)
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_status()
# Create the reply.
server_request.results(results)
return
def _skel__get_adm_name(self, server_request):
""" Attribute: IDL:Tango/Device/adm_name:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_string)
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_adm_name()
# Create the reply.
server_request.results(results)
return
def _skel_command_inout(self, server_request):
""" Operation: IDL:Tango/Device/command_inout:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_string)
inputs.append(Fnorb.orb.CORBA.TC_any)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_any)
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.command_inout, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_get_attribute_config(self, server_request):
""" Operation: IDL:Tango/Device/get_attribute_config:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevVarStringArray:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/AttributeConfigList:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.get_attribute_config, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_set_attribute_config(self, server_request):
""" Operation: IDL:Tango/Device/set_attribute_config:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/AttributeConfigList:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.set_attribute_config, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_read_attributes(self, server_request):
""" Operation: IDL:Tango/Device/read_attributes:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevVarStringArray:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/AttributeValueList:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.read_attributes, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_write_attributes(self, server_request):
""" Operation: IDL:Tango/Device/write_attributes:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/AttributeValueList:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.write_attributes, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_ping(self, server_request):
""" Operation: IDL:Tango/Device/ping:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.ping, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_black_box(self, server_request):
""" Operation: IDL:Tango/Device/black_box:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_long)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevVarStringArray:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.black_box, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_info(self, server_request):
""" Operation: IDL:Tango/Device/info:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevInfo:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.info, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_command_list_query(self, server_request):
""" Operation: IDL:Tango/Device/command_list_query:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevCmdInfoList:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.command_list_query, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_command_query(self, server_request):
""" Operation: IDL:Tango/Device/command_query:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_string)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevCmdInfo:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.command_query, arguments)
# Create the reply.
server_request.results(results)
return
class Device_2_skel(Fnorb.orb.CORBA.Object_skel, Device_skel):
""" Interface: IDL:Tango/Device_2:1.0 """
_FNORB_ID = "IDL:Tango/Device_2:1.0"
def _skel_command_inout_2(self, server_request):
""" Operation: IDL:Tango/Device_2/command_inout_2:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_string)
inputs.append(Fnorb.orb.CORBA.TC_any)
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevSource:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_any)
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.command_inout_2, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_read_attributes_2(self, server_request):
""" Operation: IDL:Tango/Device_2/read_attributes_2:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevVarStringArray:1.0"))
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevSource:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/AttributeValueList:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.read_attributes_2, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_get_attribute_config_2(self, server_request):
""" Operation: IDL:Tango/Device_2/get_attribute_config_2:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevVarStringArray:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/AttributeConfigList_2:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.get_attribute_config_2, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_command_list_query_2(self, server_request):
""" Operation: IDL:Tango/Device_2/command_list_query_2:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevCmdInfoList_2:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.command_list_query_2, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_command_query_2(self, server_request):
""" Operation: IDL:Tango/Device_2/command_query_2:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_string)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevCmdInfo_2:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.command_query_2, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_command_inout_history_2(self, server_request):
""" Operation: IDL:Tango/Device_2/command_inout_history_2:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_string)
inputs.append(Fnorb.orb.CORBA.TC_long)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevCmdHistoryList:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.command_inout_history_2, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_read_attribute_history_2(self, server_request):
""" Operation: IDL:Tango/Device_2/read_attribute_history_2:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_string)
inputs.append(Fnorb.orb.CORBA.TC_long)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevAttrHistoryList:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.read_attribute_history_2, arguments)
# Create the reply.
server_request.results(results)
return
class Device_3_skel(Fnorb.orb.CORBA.Object_skel, Device_2_skel):
""" Interface: IDL:Tango/Device_3:1.0 """
_FNORB_ID = "IDL:Tango/Device_3:1.0"
def _skel_read_attributes_3(self, server_request):
""" Operation: IDL:Tango/Device_3/read_attributes_3:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevVarStringArray:1.0"))
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevSource:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/AttributeValueList_3:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.read_attributes_3, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_write_attributes_3(self, server_request):
""" Operation: IDL:Tango/Device_3/write_attributes_3:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/AttributeValueList:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/MultiDevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.write_attributes_3, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_read_attribute_history_3(self, server_request):
""" Operation: IDL:Tango/Device_3/read_attribute_history_3:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_string)
inputs.append(Fnorb.orb.CORBA.TC_long)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevAttrHistoryList_3:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.read_attribute_history_3, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_info_3(self, server_request):
""" Operation: IDL:Tango/Device_3/info_3:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevInfo_3:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.info_3, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_get_attribute_config_3(self, server_request):
""" Operation: IDL:Tango/Device_3/get_attribute_config_3:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevVarStringArray:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/AttributeConfigList_3:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.get_attribute_config_3, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_set_attribute_config_3(self, server_request):
""" Operation: IDL:Tango/Device_3/set_attribute_config_3:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/AttributeConfigList_3:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.set_attribute_config_3, arguments)
# Create the reply.
server_request.results(results)
return
class Device_4_skel(Fnorb.orb.CORBA.Object_skel, Device_3_skel):
""" Interface: IDL:Tango/Device_4:1.0 """
_FNORB_ID = "IDL:Tango/Device_4:1.0"
def _skel_read_attribute_history_4(self, server_request):
""" Operation: IDL:Tango/Device_4/read_attribute_history_4:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_string)
inputs.append(Fnorb.orb.CORBA.TC_long)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevAttrHistory_4:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.read_attribute_history_4, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_command_inout_history_4(self, server_request):
""" Operation: IDL:Tango/Device_4/command_inout_history_4:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_string)
inputs.append(Fnorb.orb.CORBA.TC_long)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevCmdHistory_4:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.command_inout_history_4, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_command_inout_4(self, server_request):
""" Operation: IDL:Tango/Device_4/command_inout_4:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_string)
inputs.append(Fnorb.orb.CORBA.TC_any)
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevSource:1.0"))
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/ClntIdent:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_any)
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.command_inout_4, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_read_attributes_4(self, server_request):
""" Operation: IDL:Tango/Device_4/read_attributes_4:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevVarStringArray:1.0"))
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevSource:1.0"))
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/ClntIdent:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/AttributeValueList_4:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.read_attributes_4, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_write_attributes_4(self, server_request):
""" Operation: IDL:Tango/Device_4/write_attributes_4:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/AttributeValueList_4:1.0"))
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/ClntIdent:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/MultiDevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.write_attributes_4, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_set_attribute_config_4(self, server_request):
""" Operation: IDL:Tango/Device_4/set_attribute_config_4:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/AttributeConfigList_3:1.0"))
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/ClntIdent:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.set_attribute_config_4, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_write_read_attributes_4(self, server_request):
""" Operation: IDL:Tango/Device_4/write_read_attributes_4:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/AttributeValueList_4:1.0"))
inputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/ClntIdent:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:Tango/AttributeValueList_4:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/DevFailed:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:Tango/MultiDevFailed:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.write_read_attributes_4, arguments)
# Create the reply.
server_request.results(results)
return
#############################################################################
|
|
import random, sys, time, pygame, os
from pygame.locals import *
#import wifitools
import pickle
import sqlite3
import datetime
import medManager
import userManager
##TODO: change for PI
#os.environ["SDL_FBDEV"] = "/dev/fb1"
#os.environ["SDL_MOUSEDEV"] = "/dev/input/touchscreen"
#os.environ["SDL_MOUSEDRV"] = "TSLIB"
FPS = 30
WINDOWWIDTH = 320
WINDOWHEIGHT = 240
FLASHSPEED = 500 # in milliseconds
FLASHDELAY = 200 # in milliseconds
BUTTONSIZE = 80
BUTTONGAPSIZE = 10
# R G B
WHITE = (255, 255, 255)
BLACK = ( 0, 0, 0)
DARK = ( 18, 40, 13)
MID = ( 47, 82, 20)
LIGHT = ( 77, 129, 41)
DARKGRAY = ( 64, 64, 64)
bgColor = BLACK
HEADER = 30
BORDER = 10
# status bar
RECTSTATUS = pygame.Rect(0, 0, WINDOWWIDTH, HEADER)
IMAGE_WIFI0 = pygame.image.load("images/wifi0.BMP")
IMAGE_WIFI1 = pygame.image.load("images/wifi1.BMP")
IMAGE_WIFI2 = pygame.image.load("images/wifi2.BMP")
IMAGE_WIFI3 = pygame.image.load("images/wifi3.BMP")
IMAGE_WIFI4 = pygame.image.load("images/wifi4.BMP")
IMAGE_ALARM = pygame.image.load("images/alarm.BMP")
IMAGE_NOALARM = pygame.image.load("images/noalarm.BMP")
IMAGE_ALERT = pygame.image.load("images/alert.BMP")
IMAGE_WARNING = pygame.image.load("images/warning.BMP")
IMAGE_LOCK = pygame.image.load("images/lock.BMP")
IMAGE_TIMER = pygame.image.load("images/timer.BMP")
IMAGE_GEARS = pygame.image.load("images/gears.png")
IMAGE_BACK = pygame.image.load("images/back.png")
IMAGE_PILL = pygame.image.load("images/pill.png")
IMAGE_LOAD = pygame.image.load("images/load.png")
IMAGE_PLUS = pygame.image.load("images/plus.png")
IMAGE_FACE = pygame.image.load("images/face.png")
IMAGE_POWER = pygame.image.load("images/power.png")
IMAGE_BRIGHTNESS = pygame.image.load("images/brightness.png")
IMAGE_WIFI_SETTINGS = pygame.image.load("images/wifi_settings.png")
# main menu
RECT_BG = pygame.Rect(0, HEADER, 210, 320)
BUTTON_1 = pygame.Rect(10, HEADER+BORDER, 90, 90)
BUTTON_2 = pygame.Rect(110, HEADER+BORDER, 90, 90)
BUTTON_3 = pygame.Rect(210, HEADER+BORDER, 90, 90)
BUTTON_4 = pygame.Rect(10, HEADER+BORDER+100, 90, 90)
BUTTON_5 = pygame.Rect(110, HEADER+BORDER+100, 90, 90)
BUTTON_6 = pygame.Rect(210, HEADER+BORDER+100, 90, 90)
LIST_1 = pygame.Rect(10, HEADER+BORDER, 190, 40)
LIST_2 = pygame.Rect(10, HEADER+BORDER+50, 190, 40)
LIST_3 = pygame.Rect(10, HEADER+BORDER+100, 190, 40)
LIST_4 = pygame.Rect(10, HEADER+BORDER+150, 190, 40)
LIST_UP = pygame.Rect(210, HEADER+BORDER, 90, 40)
LIST_DN = pygame.Rect(210, HEADER+BORDER+50, 90, 40)
ACTION_DISPENSE = 1
ACTION_STATUS = 2
ACTION_LOAD = 3
ACTION_SETTINGS = 4
ACTION_BACK = 5
ACTION_WIFI = 6
ACTION_ADDUSER = 7
ACTION_USER1 = 8
ACTION_USER2 = 9
ACTION_USER3 = 10
ACTION_USER4 = 11
ACTION_SHUTDOWN = 12
ACTION_BRIGHTNESS = 13
ACTION_LIST_1 = 14
ACTION_LIST_2 = 15
ACTION_LIST_3 = 16
ACTION_LIST_4 = 17
ACTION_LIST_UP = 18
ACTION_LIST_DN = 19
ACTION_VENDING = 20
ACTION_HOME = 21
ACTION_MANAGE = 22
MENU_MAIN = 0
MENU_SETTINGS = 1
MENU_WIFI = 2
MENU_USERS = 3
MENU_ADDUSER = 4
MENU_DISPENSE = 5
MENU_LOAD = 6
MENU_LOADING = 7
MENU_SHUTDOWN = 8
MENU_BRIGHTNESS = 9
MENU_VENDING = 10
MENU_MANAGE = 11
##TODO: change for PI
wifipercent = 100 #int(wifitools.get_main_percent())
users = ['Joe', 'Amy', 'Dad', 'Mom']
pickle.dump(users, open("data/users.pkl","wb"))
users = pickle.load(open("data/users.pkl","rb"))
#global vars
list_position = 0
list_next = 0
list_id = 0
current_user = 0
pill_id = 0
pill_name = 0
def main():
global FPSCLOCK, DISPLAYSURF, BASICFONT, BEEP1, BEEP2, BEEP3, BEEP4
current_menu = MENU_MAIN
global list_position
global list_next
global list_id
global current_user
global pill_id
global pill_name
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
##TODO: change for PI
#pygame.mouse.set_visible(0)
IMAGE_GEARS = pygame.image.load("images/gears.png").convert_alpha()
IMAGE_BACK = pygame.image.load("images/back.png").convert_alpha()
IMAGE_PILL = pygame.image.load("images/pill.png").convert_alpha()
IMAGE_LOAD = pygame.image.load("images/load.png").convert_alpha()
IMAGE_PLUS = pygame.image.load("images/plus.png").convert_alpha()
IMAGE_FACE = pygame.image.load("images/face.png").convert_alpha()
IMAGE_POWER = pygame.image.load("images/power.png").convert_alpha()
IMAGE_BRIGHTNESS = pygame.image.load("images/brightness.png").convert_alpha()
IMAGE_WIFI_SETTINGS = pygame.image.load("images/wifi_settings.png").convert_alpha()
# when False, the pattern is playing. when True, waiting for the player to click a colored button:
waitingForInput = False
updateDisplay = True
while True: # main game loop
clickedButton = None # button that was clicked
if updateDisplay:
DISPLAYSURF.fill(bgColor)
drawStatusBar()
if current_menu == MENU_MAIN:
drawMainMenu()
elif current_menu == MENU_SETTINGS:
drawSettingsMenu()
elif current_menu == MENU_WIFI:
drawWIFIMenu()
elif current_menu == MENU_USERS:
drawUsersMenu()
elif current_menu == MENU_DISPENSE:
drawDispenseMenu()
elif current_menu == MENU_ADDUSER:
drawAddUserMenu()
elif current_menu == MENU_LOAD:
drawLoadMenu()
elif current_menu == MENU_LOADING:
drawLoadingMenu()
elif current_menu == MENU_SHUTDOWN:
drawShutdownMenu()
elif current_menu == MENU_BRIGHTNESS:
drawBrightnessMenu()
elif current_menu == MENU_VENDING:
drawVendingMenu()
elif current_menu == MENU_MANAGE:
drawManageMenu()
updateDisplay = False
checkForQuit()
for event in pygame.event.get(): # event handling loop
if event.type == MOUSEBUTTONUP:
mousex, mousey = event.pos
clickedButton = getButtonClicked(mousex, mousey, current_menu)
# wait for the player to enter buttons
if clickedButton:
if clickedButton == ACTION_SETTINGS:
current_menu = MENU_SETTINGS
elif clickedButton == ACTION_WIFI:
current_menu = MENU_WIFI
elif clickedButton == ACTION_HOME:
current_menu = MENU_MAIN
elif clickedButton == ACTION_BRIGHTNESS:
current_menu = MENU_BRIGHTNESS
elif clickedButton == ACTION_LOAD:
current_menu = MENU_LOAD
elif clickedButton == ACTION_DISPENSE:
current_menu = MENU_USERS
elif clickedButton == ACTION_SHUTDOWN:
current_menu = MENU_SHUTDOWN
elif clickedButton == ACTION_ADDUSER:
current_menu = MENU_ADDUSER
elif clickedButton == ACTION_MANAGE:
current_menu = MENU_MANAGE
elif clickedButton == ACTION_USER1:
current_menu = MENU_DISPENSE
current_user = users[0]
elif clickedButton == ACTION_USER2:
current_menu = MENU_DISPENSE
current_user = users[1]
elif clickedButton == ACTION_USER3:
current_menu = MENU_DISPENSE
current_user = users[2]
elif clickedButton == ACTION_USER4:
current_menu = MENU_DISPENSE
current_user = users[3]
elif clickedButton == ACTION_LIST_1:
if(list_position < list_next):
list_id = list_position
if(current_menu == MENU_DISPENSE):
current_menu = MENU_VENDING
elif(current_menu == MENU_LOAD):
current_menu = MENU_LOADING
elif clickedButton == ACTION_LIST_2:
if(list_position+1 < list_next):
list_id = list_position+1
if(current_menu == MENU_DISPENSE):
current_menu = MENU_VENDING
elif(current_menu == MENU_LOAD):
current_menu = MENU_LOADING
elif clickedButton == ACTION_LIST_3:
if(list_position+2 < list_next):
list_id = list_position+2
if(current_menu == MENU_DISPENSE):
current_menu = MENU_VENDING
elif(current_menu == MENU_LOAD):
current_menu = MENU_LOADING
elif clickedButton == ACTION_LIST_4:
if(list_position+3 < list_next):
list_id = list_position+3
if(current_menu == MENU_DISPENSE):
current_menu = MENU_VENDING
elif(current_menu == MENU_LOAD):
current_menu = MENU_LOADING
elif clickedButton == ACTION_LIST_DN:
if(list_next > 4):
list_position = list_position + 4
elif clickedButton == ACTION_LIST_UP:
if(list_position > 0):
list_position = list_position - 4
elif clickedButton == ACTION_BACK:
if current_menu == MENU_WIFI:
current_menu = MENU_SETTINGS
elif current_menu == MENU_SETTINGS:
current_menu = MENU_MAIN
elif current_menu == MENU_USERS:
current_menu = MENU_MAIN
elif current_menu == MENU_DISPENSE:
current_menu = MENU_USERS
elif current_menu == MENU_ADDUSER:
current_menu = MENU_USERS
elif current_menu == MENU_LOAD:
current_menu = MENU_MAIN
elif current_menu == MENU_SHUTDOWN:
current_menu = MENU_MAIN
elif current_menu == MENU_BRIGHTNESS:
current_menu = MENU_SETTINGS
elif current_menu == MENU_VENDING:
current_menu = MENU_DISPENSE
elif current_menu == MENU_LOADING:
current_menu = MENU_LOAD
elif current_menu == MENU_MANAGE:
current_menu = MENU_MAIN
updateDisplay = True
pygame.display.update()
FPSCLOCK.tick(FPS)
def terminate():
pygame.quit()
sys.exit()
def checkForQuit():
for event in pygame.event.get(QUIT): # get all the QUIT events
terminate() # terminate if any QUIT events are present
for event in pygame.event.get(KEYUP): # get all the KEYUP events
if event.key == K_ESCAPE:
terminate() # terminate if the KEYUP event was for the Esc key
pygame.event.post(event) # put the other KEYUP event objects back
def getButtonClicked(x, y, current_menu):
global list_id
global list_position
global list_next
global pill_id
if current_menu == MENU_MAIN:
if BUTTON_1.collidepoint( (x, y) ):#dispense
return ACTION_DISPENSE
elif BUTTON_2.collidepoint( (x, y) ):#status
return ACTION_LOAD
elif BUTTON_3.collidepoint( (x, y) ):
return ACTION_STATUS
elif BUTTON_4.collidepoint( (x, y) ):
return ACTION_MANAGE
elif BUTTON_5.collidepoint( (x, y) ):#shutdown
return ACTION_SHUTDOWN
elif BUTTON_6.collidepoint( (x, y) ):#settings
return ACTION_SETTINGS
elif current_menu == MENU_SETTINGS:
if BUTTON_6.collidepoint( (x, y) ):#back
return ACTION_BACK
elif BUTTON_5.collidepoint( (x, y) ):#wifi
return ACTION_WIFI
elif BUTTON_4.collidepoint( (x, y) ):#wifi
return ACTION_BRIGHTNESS
elif current_menu == MENU_WIFI:
if BUTTON_6.collidepoint( (x, y) ):#back
return ACTION_BACK
elif current_menu == MENU_VENDING:
if BUTTON_6.collidepoint( (x, y) ):#back
pill_id = 0
pill_name = ""
return ACTION_BACK
if BUTTON_5.collidepoint( (x, y) ):#back
x = medManager.getMedX(pill_id)
y = medManager.getMedY(pill_id)
print(pill_id, x,y)
#TODO: do vending here
medManager.removeInventory(x,y)
list_id = 0
list_next = 0
list_position = 0
pill_id = 0
pill_name = ""
return ACTION_HOME
elif current_menu == MENU_LOADING:
if BUTTON_6.collidepoint( (x, y) ):#back
pill_id = 0
pill_name = ""
return ACTION_BACK
if BUTTON_5.collidepoint( (x, y) ):#back
print(pill_id, x,y)
#TODO: do vending here
x = medManager.getFreeSpaceX()
y = medManager.getFreeSpaceY()
d = datetime.date(2016, 11, 23)
medManager.addInventory(x, y, pill_id, d)
print("inserted into: ", x, y)
#return ACTION_HOME
elif current_menu == MENU_BRIGHTNESS:
if BUTTON_6.collidepoint( (x, y) ):#back
return ACTION_BACK
elif current_menu == MENU_DISPENSE:
if BUTTON_6.collidepoint( (x, y) ):#back
list_id = 0
list_next = 0
list_position = 0
return ACTION_BACK
if LIST_UP.collidepoint( (x, y) ):#back
return ACTION_LIST_UP
if LIST_DN.collidepoint( (x, y) ):#back
return ACTION_LIST_DN
if LIST_1.collidepoint( (x, y) ):#back
return ACTION_LIST_1
if LIST_2.collidepoint( (x, y) ):#back
return ACTION_LIST_2
if LIST_3.collidepoint( (x, y) ):#back
return ACTION_LIST_3
if LIST_4.collidepoint( (x, y) ):#back
return ACTION_LIST_4
elif current_menu == MENU_ADDUSER:
if BUTTON_6.collidepoint( (x, y) ):#back
return ACTION_BACK
elif current_menu == MENU_MANAGE:
if BUTTON_6.collidepoint( (x, y) ):#back
list_id = 0
list_next = 0
list_position = 0
return ACTION_BACK
if LIST_UP.collidepoint( (x, y) ):#back
return ACTION_LIST_UP
if LIST_DN.collidepoint( (x, y) ):#back
return ACTION_LIST_DN
if LIST_1.collidepoint( (x, y) ):#back
return ACTION_LIST_1
if LIST_2.collidepoint( (x, y) ):#back
return ACTION_LIST_2
if LIST_3.collidepoint( (x, y) ):#back
return ACTION_LIST_3
if LIST_4.collidepoint( (x, y) ):#back
return ACTION_LIST_4
elif current_menu == MENU_LOAD:
if BUTTON_6.collidepoint( (x, y) ):#back
list_id = 0
list_next = 0
list_position = 0
return ACTION_BACK
if LIST_UP.collidepoint( (x, y) ):#back
return ACTION_LIST_UP
if LIST_DN.collidepoint( (x, y) ):#back
return ACTION_LIST_DN
if LIST_1.collidepoint( (x, y) ):#back
return ACTION_LIST_1
if LIST_2.collidepoint( (x, y) ):#back
return ACTION_LIST_2
if LIST_3.collidepoint( (x, y) ):#back
return ACTION_LIST_3
if LIST_4.collidepoint( (x, y) ):#back
return ACTION_LIST_4
elif current_menu == MENU_SHUTDOWN:
if BUTTON_5.collidepoint( (x, y) ):#back
os.system("sudo shutdown -h now") #shut down the system
if BUTTON_6.collidepoint( (x, y) ):#back
return ACTION_BACK
elif current_menu == MENU_USERS:
if BUTTON_6.collidepoint( (x, y) ):#back
return ACTION_BACK
if BUTTON_3.collidepoint( (x, y) ):#back
return ACTION_ADDUSER
if BUTTON_1.collidepoint( (x, y) ):#back
return ACTION_USER1
if BUTTON_2.collidepoint( (x, y) ):#back
return ACTION_USER2
if BUTTON_4.collidepoint( (x, y) ):#back
return ACTION_USER3
if BUTTON_5.collidepoint( (x, y) ):#back
return ACTION_USER4
return None
def drawStatusBar():
pygame.draw.rect(DISPLAYSURF, DARKGRAY, RECTSTATUS)
if wifipercent == 0:
DISPLAYSURF.blit(IMAGE_WIFI0, (0,0))
elif wifipercent <= 20:
DISPLAYSURF.blit(IMAGE_WIFI1, (0,0))
elif wifipercent <= 40:
DISPLAYSURF.blit(IMAGE_WIFI2, (0,0))
elif wifipercent <= 60:
DISPLAYSURF.blit(IMAGE_WIFI3, (0,0))
elif wifipercent <= 80:
DISPLAYSURF.blit(IMAGE_WIFI4, (0,0))
else:
DISPLAYSURF.blit(IMAGE_WIFI0, (0,0))
DISPLAYSURF.blit(IMAGE_ALARM, (50,0))
DISPLAYSURF.blit(IMAGE_NOALARM, (80,0))
DISPLAYSURF.blit(IMAGE_ALERT, (110,0))
DISPLAYSURF.blit(IMAGE_WARNING, (140,0))
DISPLAYSURF.blit(IMAGE_LOCK, (170,0))
DISPLAYSURF.blit(IMAGE_TIMER, (200,0))
def drawUsersMenu():
i=0
for user in users:
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render(user, 1, BLACK)
if i == 0:
pygame.draw.rect(DISPLAYSURF, LIGHT, BUTTON_1)
DISPLAYSURF.blit(IMAGE_FACE, (25,HEADER+BORDER+10))
DISPLAYSURF.blit(label, (15, HEADER+BORDER+70))
elif i == 1:
pygame.draw.rect(DISPLAYSURF, LIGHT, BUTTON_2)
DISPLAYSURF.blit(IMAGE_FACE, (125,HEADER+BORDER+10))
DISPLAYSURF.blit(label, (115, HEADER+BORDER+70))
elif i == 2:
pygame.draw.rect(DISPLAYSURF, LIGHT, BUTTON_4)
DISPLAYSURF.blit(IMAGE_FACE, (25,HEADER+BORDER+110))
DISPLAYSURF.blit(label, (15, HEADER+BORDER+170))
elif i == 3:
pygame.draw.rect(DISPLAYSURF, LIGHT, BUTTON_5)
DISPLAYSURF.blit(IMAGE_FACE, (125,HEADER+BORDER+110))
DISPLAYSURF.blit(label, (115, HEADER+BORDER+170))
i=i+1
pygame.draw.rect(DISPLAYSURF, MID, BUTTON_3)
DISPLAYSURF.blit(IMAGE_PLUS, (225,HEADER+BORDER+10))
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render("ADD User", 1, BLACK)
DISPLAYSURF.blit(label, (220, HEADER+BORDER+70))
pygame.draw.rect(DISPLAYSURF, MID, BUTTON_6)
DISPLAYSURF.blit(IMAGE_BACK, (225,HEADER+BORDER+110))
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render("Back", 1, BLACK)
DISPLAYSURF.blit(label, (237, 210))
def drawSettingsMenu():
pygame.draw.rect(DISPLAYSURF, LIGHT, BUTTON_4)
DISPLAYSURF.blit(IMAGE_BRIGHTNESS, (25,HEADER+BORDER+110))
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render("Brightness", 1, BLACK)
DISPLAYSURF.blit(label, (11, 210))
pygame.draw.rect(DISPLAYSURF, LIGHT, BUTTON_5)
DISPLAYSURF.blit(IMAGE_WIFI_SETTINGS, (125,HEADER+BORDER+110))
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render("WIFI", 1, BLACK)
DISPLAYSURF.blit(label, (137, 210))
pygame.draw.rect(DISPLAYSURF, MID, BUTTON_6)
DISPLAYSURF.blit(IMAGE_BACK, (225,HEADER+BORDER+110))
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render("Back", 1, BLACK)
DISPLAYSURF.blit(label, (237, 210))
def drawWIFIMenu():
myfont = pygame.font.SysFont("monospace", 15)
##TODO: change for PI
ip = 0#wifitools.get_connection_info('ip')
mask = 0#wifitools.get_connection_info('mask')
brd = 0#wifitools.get_connection_info('broadcast')
mac = 0#wifitools.get_connection_info('mac')
label = myfont.render(' IP Address: '+ip, 1, WHITE)
DISPLAYSURF.blit(label, (10, HEADER+20))
label = myfont.render(' Broadcast: '+brd, 1, WHITE)
DISPLAYSURF.blit(label, (10, HEADER+35))
label = myfont.render(' Net Mask: '+mask, 1, WHITE)
DISPLAYSURF.blit(label, (10, HEADER+50))
label = myfont.render('MAC Address: '+mac, 1, WHITE)
DISPLAYSURF.blit(label, (10, HEADER+65))
pygame.draw.rect(DISPLAYSURF, MID, BUTTON_6)
DISPLAYSURF.blit(IMAGE_BACK, (225,HEADER+BORDER+110))
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render("Back", 1, BLACK)
DISPLAYSURF.blit(label, (237, 210))
def drawBrightnessMenu():
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render('Adjust Screen Brightness', 1, WHITE)
DISPLAYSURF.blit(label, (10, HEADER+20))
SLIDER_OUT = pygame.Rect(5, HEADER+BORDER+40, 200, 30)
pygame.draw.rect(DISPLAYSURF, MID, SLIDER_OUT)
brightness = 160
#print brightness
SLIDER_VAL = pygame.Rect(10, HEADER+BORDER+45, brightness, 20)
pygame.draw.rect(DISPLAYSURF, LIGHT, SLIDER_VAL)
pygame.draw.rect(DISPLAYSURF, MID, BUTTON_6)
DISPLAYSURF.blit(IMAGE_BACK, (225,HEADER+BORDER+110))
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render("Back", 1, BLACK)
DISPLAYSURF.blit(label, (237, 210))
def drawDispenseMenu():
global list_position
global list_next
pygame.draw.rect(DISPLAYSURF, LIGHT, LIST_1)
pygame.draw.rect(DISPLAYSURF, LIGHT, LIST_2)
pygame.draw.rect(DISPLAYSURF, LIGHT, LIST_3)
pygame.draw.rect(DISPLAYSURF, LIGHT, LIST_4)
myfont = pygame.font.SysFont("monospace", 15)
i=0
for med in medManager.getInventory(0):
if(i >= list_position):
label = myfont.render(med[1], 1, BLACK)
DISPLAYSURF.blit(label, (15, HEADER+20+50*(i-list_position)))
print(med[1])
i=i+1
list_next = i - list_position
if(list_position > 0):
pygame.draw.rect(DISPLAYSURF, MID, LIST_UP)
else:
pygame.draw.rect(DISPLAYSURF, DARK, LIST_UP)
label = myfont.render("Last", 1, BLACK)
DISPLAYSURF.blit(label, (215, HEADER+20))
if(list_next > 4):
pygame.draw.rect(DISPLAYSURF, MID, LIST_DN)
else:
pygame.draw.rect(DISPLAYSURF, DARK, LIST_DN)
label = myfont.render("Next", 1, BLACK)
DISPLAYSURF.blit(label, (215, HEADER+70))
pygame.draw.rect(DISPLAYSURF, MID, BUTTON_6)
DISPLAYSURF.blit(IMAGE_BACK, (225,HEADER+BORDER+110))
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render("Back", 1, BLACK)
DISPLAYSURF.blit(label, (237, 210))
def drawAddUserMenu():
myfont = pygame.font.SysFont("monospace", 15)
i = 0
for letter in ['q','w','e','r','t','y','u','i','o','p']:
button = pygame.Rect(10+30*i, HEADER+75, 25, 30)
pygame.draw.rect(DISPLAYSURF, MID, button)
label = myfont.render(letter, 1, WHITE)
DISPLAYSURF.blit(label, (20+30*i, HEADER+80))
i = i + 1
i = 0
for letter in ['a','s','d','f','g','h','j','k','l']:
button = pygame.Rect(25+30*i, HEADER+110, 25, 30)
pygame.draw.rect(DISPLAYSURF, MID, button)
label = myfont.render(letter, 1, WHITE)
DISPLAYSURF.blit(label, (35+30*i, HEADER+115))
i = i + 1
i = 0
for letter in ['^','z','x','c','v','b','n','m',',','.']:
button = pygame.Rect(10+30*i, HEADER+145, 25, 30)
pygame.draw.rect(DISPLAYSURF, MID, button)
label = myfont.render(letter, 1, WHITE)
DISPLAYSURF.blit(label, (20+30*i, HEADER+150))
i = i + 1
i = 0
for letter in ['space', 'del', 'done']:
button = pygame.Rect(25+90*i, HEADER+180, 85, 30)
pygame.draw.rect(DISPLAYSURF, MID, button)
label = myfont.render(letter, 1, WHITE)
DISPLAYSURF.blit(label, (35+90*i, HEADER+185))
i = i + 1
#pygame.draw.rect(DISPLAYSURF, MID, BUTTON_6)
#DISPLAYSURF.blit(IMAGE_BACK, (225,HEADER+BORDER+110))
#myfont = pygame.font.SysFont("monospace", 15)
#label = myfont.render("Back", 1, BLACK)
#DISPLAYSURF.blit(label, (237, 210))
def drawShutdownMenu():
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render('Push the \'Turn Off\'', 1, WHITE)
DISPLAYSURF.blit(label, (10, HEADER+20))
label = myfont.render('button one more time', 1, WHITE)
DISPLAYSURF.blit(label, (10, HEADER+35))
label = myfont.render('to shut down machine', 1, WHITE)
DISPLAYSURF.blit(label, (10, HEADER+50))
pygame.draw.rect(DISPLAYSURF, LIGHT, BUTTON_5)
DISPLAYSURF.blit(IMAGE_POWER, (125,HEADER+BORDER+110))
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render("Turn Off", 1, BLACK)
DISPLAYSURF.blit(label, (117, 210))
pygame.draw.rect(DISPLAYSURF, MID, BUTTON_6)
DISPLAYSURF.blit(IMAGE_BACK, (225,HEADER+BORDER+110))
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render("Back", 1, BLACK)
DISPLAYSURF.blit(label, (237, 210))
def drawVendingMenu():
global list_id
global pill_id
global pill_name
i=0
for med in medManager.getInventory(0):
if(i == list_id):
pill_name = med[1]
pill_id = med[0]
i=i+1
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render('Push the \'Dispense\'', 1, WHITE)
DISPLAYSURF.blit(label, (10, HEADER+20))
label = myfont.render('button one more time', 1, WHITE)
DISPLAYSURF.blit(label, (10, HEADER+35))
label = myfont.render('to dispense one pill of:', 1, WHITE)
DISPLAYSURF.blit(label, (10, HEADER+50))
label = myfont.render(pill_name, 1, WHITE)
DISPLAYSURF.blit(label, (10, HEADER+75))
pygame.draw.rect(DISPLAYSURF, LIGHT, BUTTON_5)
DISPLAYSURF.blit(IMAGE_PILL, (125,HEADER+BORDER+110))
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render("Dispense", 1, BLACK)
DISPLAYSURF.blit(label, (117, 210))
pygame.draw.rect(DISPLAYSURF, MID, BUTTON_6)
DISPLAYSURF.blit(IMAGE_BACK, (225,HEADER+BORDER+110))
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render("Back", 1, BLACK)
DISPLAYSURF.blit(label, (237, 210))
def drawLoadMenu():
global list_position
global list_next
pygame.draw.rect(DISPLAYSURF, LIGHT, LIST_1)
pygame.draw.rect(DISPLAYSURF, LIGHT, LIST_2)
pygame.draw.rect(DISPLAYSURF, LIGHT, LIST_3)
pygame.draw.rect(DISPLAYSURF, LIGHT, LIST_4)
myfont = pygame.font.SysFont("monospace", 15)
i=0
for med in medManager.getInventory(1):
if(i >= list_position):
label = myfont.render(med[1], 1, BLACK)
DISPLAYSURF.blit(label, (15, HEADER+20+50*(i-list_position)))
print(med[1])
i=i+1
list_next = i - list_position
if(list_position > 0):
pygame.draw.rect(DISPLAYSURF, MID, LIST_UP)
else:
pygame.draw.rect(DISPLAYSURF, DARK, LIST_UP)
label = myfont.render("Last", 1, BLACK)
DISPLAYSURF.blit(label, (215, HEADER+20))
if(list_next > 4):
pygame.draw.rect(DISPLAYSURF, MID, LIST_DN)
else:
pygame.draw.rect(DISPLAYSURF, DARK, LIST_DN)
label = myfont.render("Next", 1, BLACK)
DISPLAYSURF.blit(label, (215, HEADER+70))
pygame.draw.rect(DISPLAYSURF, MID, BUTTON_6)
DISPLAYSURF.blit(IMAGE_BACK, (225,HEADER+BORDER+110))
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render("Back", 1, BLACK)
DISPLAYSURF.blit(label, (237, 210))
def drawLoadingMenu():
global list_id
global pill_id
global pill_name
i=0
for med in medManager.getInventory(1):
if(i == list_id):
pill_name = med[1]
pill_id = med[0]
i=i+1
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render('Push the \'Load\'', 1, WHITE)
DISPLAYSURF.blit(label, (10, HEADER+20))
label = myfont.render('once for each pill you', 1, WHITE)
DISPLAYSURF.blit(label, (10, HEADER+35))
label = myfont.render('want to load of:', 1, WHITE)
DISPLAYSURF.blit(label, (10, HEADER+50))
label = myfont.render(pill_name, 1, WHITE)
DISPLAYSURF.blit(label, (10, HEADER+75))
pygame.draw.rect(DISPLAYSURF, LIGHT, BUTTON_5)
DISPLAYSURF.blit(IMAGE_PILL, (125,HEADER+BORDER+110))
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render("Load", 1, BLACK)
DISPLAYSURF.blit(label, (117, 210))
pygame.draw.rect(DISPLAYSURF, MID, BUTTON_6)
DISPLAYSURF.blit(IMAGE_BACK, (225,HEADER+BORDER+110))
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render("Back", 1, BLACK)
DISPLAYSURF.blit(label, (237, 210))
def drawManageMenu():
global list_position
global list_next
pygame.draw.rect(DISPLAYSURF, LIGHT, LIST_1)
pygame.draw.rect(DISPLAYSURF, LIGHT, LIST_2)
pygame.draw.rect(DISPLAYSURF, LIGHT, LIST_3)
pygame.draw.rect(DISPLAYSURF, LIGHT, LIST_4)
myfont = pygame.font.SysFont("monospace", 15)
i=0
for med in medManager.getInventory(1):
if(i >= list_position):
label = myfont.render(med[1], 1, BLACK)
DISPLAYSURF.blit(label, (15, HEADER+20+50*(i-list_position)))
print(med[1])
i=i+1
list_next = i - list_position
if(list_position > 0):
pygame.draw.rect(DISPLAYSURF, MID, LIST_UP)
else:
pygame.draw.rect(DISPLAYSURF, DARK, LIST_UP)
label = myfont.render("Last", 1, BLACK)
DISPLAYSURF.blit(label, (215, HEADER+20))
if(list_next > 4):
pygame.draw.rect(DISPLAYSURF, MID, LIST_DN)
else:
pygame.draw.rect(DISPLAYSURF, DARK, LIST_DN)
label = myfont.render("Next", 1, BLACK)
DISPLAYSURF.blit(label, (215, HEADER+70))
pygame.draw.rect(DISPLAYSURF, MID, BUTTON_6)
DISPLAYSURF.blit(IMAGE_BACK, (225,HEADER+BORDER+110))
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render("Back", 1, BLACK)
DISPLAYSURF.blit(label, (237, 210))
def drawMainMenu():
pygame.draw.rect(DISPLAYSURF, LIGHT, BUTTON_1) #dispense
DISPLAYSURF.blit(IMAGE_PILL, (25,HEADER+BORDER+10))
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render("Dispense", 1, BLACK)
DISPLAYSURF.blit(label, (19, 110))
pygame.draw.rect(DISPLAYSURF, LIGHT, BUTTON_2) #load
DISPLAYSURF.blit(IMAGE_LOAD, (125,HEADER+BORDER+10))
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render("Load", 1, BLACK)
DISPLAYSURF.blit(label, (137, 110))
pygame.draw.rect(DISPLAYSURF, LIGHT, BUTTON_3) #status
pygame.draw.rect(DISPLAYSURF, LIGHT, BUTTON_4)
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render("Manage", 1, BLACK)
DISPLAYSURF.blit(label, (22, 210))
pygame.draw.rect(DISPLAYSURF, MID, BUTTON_5)#power
DISPLAYSURF.blit(IMAGE_POWER, (125,HEADER+BORDER+110))
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render("Turn Off", 1, BLACK)
DISPLAYSURF.blit(label, (117, 210))
pygame.draw.rect(DISPLAYSURF, LIGHT, BUTTON_6)#settings
DISPLAYSURF.blit(IMAGE_GEARS, (225,HEADER+BORDER+110))
myfont = pygame.font.SysFont("monospace", 15)
label = myfont.render("Settings", 1, BLACK)
DISPLAYSURF.blit(label, (217, 210))
if __name__ == '__main__':
main()
|
|
"""Extend introspect.py for Java based Jython classes."""
from org.python.core import PyReflectedFunction
from java.lang import Class, Object
from java.lang.reflect import Modifier
from introspect import *
from sets import Set
import string
import re
import types
__author__ = "Don Coleman <dcoleman@chariotsolutions.com>"
_re_import_package = re.compile('import\s+(.+)\.') # import package
# TODO need to check for a trailing '.' example: "from java import lang." don't autocomplete on trailing '.'
_re_from_package_import = re.compile('from\s+(\w+(?:\.\w+)*)\.?(?:\s*import\s*)?') # from package import class
def completePackageName(target):
""" Get a package object given the full name."""
targetComponents = target.split('.')
base = targetComponents[0]
baseModule = __import__(base, globals(), locals())
module = baseModule
for component in targetComponents[1:]:
module = getattr(module, component)
list = dir(module)
list.remove('__name__')
list.append('*')
return list
def getPackageName(command):
match = _re_import_package.match(command)
if not match:
#try the other re
match = _re_from_package_import.match(command)
return match.groups()[0]
def getAutoCompleteList(command='', locals=None, includeMagic=1, includeSingle=1, includeDouble=1):
"""
Return list of auto-completion options for command.
The list of options will be based on the locals namespace.
"""
# Temp KLUDGE here rather than in console.py
command += "."
attributes = []
# Get the proper chunk of code from the command.
root = getRoot(command, terminator='.')
# check to see if the user is attempting to import a package
# this may need to adjust this so that it doesn't pollute the namespace
if command.startswith('import ') or command.startswith('from '):
target = getPackageName(command)
return completePackageName(target)
try:
if locals is not None:
object = eval(root, locals)
else:
object = eval(root)
except:
return attributes
if ispython(object): # use existing code
attributes = getAttributeNames(object, includeMagic, includeSingle, includeDouble)
else:
if inspect.isclass(object):
attributes = staticMethodNames(object)
attributes.extend(staticFieldNames(object))
else:
attributes = list(instanceMethodNames(object.__class__))
return attributes
def instanceMethodNames(clazz):
"""return a Set of instance method name for a Class"""
method_names = Set()
declared_methods = Class.getDeclaredMethods(clazz)
for method in declared_methods:
modifiers = method.getModifiers()
if not Modifier.isStatic(modifiers) and Modifier.isPublic(modifiers):
name = method.name
method_names.add(name)
if name.startswith("get") and len(name) > 3 and len(method.getParameterTypes()) == 0:
property_name = name[3].lower() + name[4:]
method_names.add(property_name)
for eachBase in clazz.__bases__:
if not ispython(eachBase):
method_names = method_names | instanceMethodNames(eachBase)
return method_names
def staticMethodNames(clazz):
"""return a list of static method name for a class"""
static_methods = {}
declared_methods = Class.getDeclaredMethods(clazz)
for method in declared_methods:
if Modifier.isStatic(method.getModifiers()) and Modifier.isPublic(method.getModifiers()):
static_methods[method.name] = method
methods = static_methods.keys()
for eachBase in clazz.__bases__:
# with Jython 2.5 type is a base of Object, which puts asName in the list
# will be a problem for real Java objects that extend Python objects
# see similar "fixes" in instanceMethodNames and staticFieldNames
if not ispython(eachBase):
methods.extend(staticMethodNames(eachBase))
return methods
def staticFieldNames(clazz):
"""return a list of static field names for class"""
static_fields = {}
declared_fields = Class.getDeclaredFields(clazz)
for field in declared_fields:
if Modifier.isStatic(field.getModifiers()) and Modifier.isPublic(field.getModifiers()):
static_fields[field.name] = field
fields = static_fields.keys()
for eachBase in clazz.__bases__:
if not ispython(eachBase):
fields.extend(staticFieldNames(eachBase))
return fields
def getCallTipJava(command='', locals=None):
"""For a command, return a tuple of object name, argspec, tip text.
The call tip information will be based on the locals namespace."""
calltip = ('', '', '') # object name, argspec, tip text.
# Get the proper chunk of code from the command.
root = getRoot(command, terminator='(')
try:
if locals is not None:
object = eval(root, locals)
else:
object = eval(root)
except:
return calltip
if ispython(object):
# Patrick's code handles python code
# TODO fix in future because getCallTip runs eval() again
return getCallTip(command, locals)
name = ''
try:
name = object.__name__
except AttributeError:
pass
tipList = []
argspec = '' # not using argspec for Java
if inspect.isclass(object):
# get the constructor(s)
# TODO consider getting modifiers since jython can access private methods
constructors = object.getConstructors()
for constructor in constructors:
paramList = []
paramTypes = constructor.getParameterTypes()
# paramTypes is an array of classes, we need Strings
# TODO consider list comprehension
for param in paramTypes:
paramList.append(param.__name__)
paramString = string.join(paramList,', ')
tip = "%s(%s)" % (constructor.name, paramString)
tipList.append(tip)
elif inspect.ismethod(object) or isinstance(object, PyReflectedFunction):
method = object
try:
object = method.im_class
except: # PyReflectedFunction
object = method.argslist[0].declaringClass
# java allows overloading so we may have more than one method
methodArray = object.getMethods()
for eachMethod in methodArray:
if eachMethod.name == method.__name__:
paramList = []
for eachParam in eachMethod.parameterTypes:
paramList.append(eachParam.__name__)
paramString = string.join(paramList,', ')
# create a python style string a la PyCrust
# we're showing the parameter type rather than the parameter name, since that's all I can get
# we need to show multiple methods for overloading
# do we want to show the method visibility? how about exceptions?
# note: name, return type and exceptions same for EVERY overload method
tip = "%s(%s) -> %s" % (eachMethod.name, paramString, "unkown_return_type")
tipList.append(tip)
tip_text = beautify(string.join(tipList,"\n"))
calltip = (name, argspec, tip_text)
return calltip
def beautify(tip_text):
"Make the call tip text prettier"
tip_text = tip_text.replace("java.lang.", "")
if "[" in tip_text:
tip_text = tip_text.replace("[B", "byte[]")
tip_text = tip_text.replace("[S", "short[]")
tip_text = tip_text.replace("[I", "int[]")
tip_text = tip_text.replace("[J", "long[]")
tip_text = tip_text.replace("[F", "float[]")
tip_text = tip_text.replace("[D", "double[]")
tip_text = tip_text.replace("[Z", "boolean[]")
tip_text = tip_text.replace("[C", "char[]")
return tip_text
def ispython21(object):
"""
Figure out if this is Python code or Java Code
"""
pyclass = 0
pycode = 0
pyinstance = 0
if inspect.isclass(object):
try:
object.__doc__
pyclass = 1
except AttributeError:
pyclass = 0
elif inspect.ismethod(object):
try:
object.__dict__
pycode = 1
except AttributeError:
pycode = 0
else: # I guess an instance of an object falls here
try:
object.__dict__
pyinstance = 1
except AttributeError:
pyinstance = 0
# print "object", object, "pyclass", pyclass, "pycode", pycode, "returning", pyclass | pycode
return pyclass | pycode | pyinstance
def ispython22(object):
"""
Return true if object is Python code.
"""
object_type = type(object)
if object_type.__name__.startswith("java") or isinstance(object, PyReflectedFunction):
python = False
elif object_type is types.MethodType:
# both Java and Python methods return MethodType
try:
object.__dict__
python = True
except AttributeError:
python = False
else:
# assume everything else is python
python = True
return python
def ispython25(object):
"""
Return true if object is Python code.
"""
if isinstance(object, Class):
python = False
elif isinstance(object, Object):
python = False
elif isinstance(object, PyReflectedFunction):
python = False
elif type(object) == types.MethodType and not ispython(object.im_class):
python = False
else:
# assume everything else is python
python = True
return python
# Dynamically assign the version of ispython
# To deal with differences between Jython 2.1, 2.2 and 2.5
if sys.version == '2.1':
ispython = ispython21
elif sys.version.startswith('2.5'):
ispython = ispython25
else:
ispython = ispython22
def debug(name, value=None):
if value == None:
print >> sys.stderr, name
else:
print >> sys.stderr, "%s = %s" % (name, value)
|
|
#!/usr/bin/env python
import argparse
import binascii
import copy
import datetime
import hashlib
import json
import logging
import os
import shutil
import struct
import subprocess
import tempfile
import xml.etree.ElementTree as ET
from collections import defaultdict
from Bio.Data import CodonTable
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('jbrowse')
TODAY = datetime.datetime.now().strftime("%Y-%m-%d")
GALAXY_INFRASTRUCTURE_URL = None
class ColorScaling(object):
COLOR_FUNCTION_TEMPLATE = """
function(feature, variableName, glyphObject, track) {{
var score = {score};
{opacity}
return 'rgba({red}, {green}, {blue}, ' + opacity + ')';
}}
"""
COLOR_FUNCTION_TEMPLATE_QUAL = r"""
function(feature, variableName, glyphObject, track) {{
var search_up = function self(sf, attr){{
if(sf.get(attr) !== undefined){{
return sf.get(attr);
}}
if(sf.parent() === undefined) {{
return;
}}else{{
return self(sf.parent(), attr);
}}
}};
var search_down = function self(sf, attr){{
if(sf.get(attr) !== undefined){{
return sf.get(attr);
}}
if(sf.children() === undefined) {{
return;
}}else{{
var kids = sf.children();
for(var child_idx in kids){{
var x = self(kids[child_idx], attr);
if(x !== undefined){{
return x;
}}
}}
return;
}}
}};
var color = ({user_spec_color} || search_up(feature, 'color') || search_down(feature, 'color') || {auto_gen_color});
var score = (search_up(feature, 'score') || search_down(feature, 'score'));
{opacity}
if(score === undefined){{ opacity = 1; }}
var result = /^#?([a-f\d]{{2}})([a-f\d]{{2}})([a-f\d]{{2}})$/i.exec(color);
var red = parseInt(result[1], 16);
var green = parseInt(result[2], 16);
var blue = parseInt(result[3], 16);
if(isNaN(opacity) || opacity < 0){{ opacity = 0; }}
return 'rgba(' + red + ',' + green + ',' + blue + ',' + opacity + ')';
}}
"""
OPACITY_MATH = {
'linear': """
var opacity = (score - ({min})) / (({max}) - ({min}));
""",
'logarithmic': """
var opacity = (score - ({min})) / (({max}) - ({min}));
opacity = Math.log10(opacity) + Math.log10({max});
""",
'blast': """
var opacity = 0;
if(score == 0.0) {{
opacity = 1;
}} else {{
opacity = (20 - Math.log10(score)) / 180;
}}
"""
}
BREWER_COLOUR_IDX = 0
BREWER_COLOUR_SCHEMES = [
(166, 206, 227),
(31, 120, 180),
(178, 223, 138),
(51, 160, 44),
(251, 154, 153),
(227, 26, 28),
(253, 191, 111),
(255, 127, 0),
(202, 178, 214),
(106, 61, 154),
(255, 255, 153),
(177, 89, 40),
(228, 26, 28),
(55, 126, 184),
(77, 175, 74),
(152, 78, 163),
(255, 127, 0),
]
BREWER_DIVERGING_PALLETES = {
'BrBg': ("#543005", "#003c30"),
'PiYg': ("#8e0152", "#276419"),
'PRGn': ("#40004b", "#00441b"),
'PuOr': ("#7f3b08", "#2d004b"),
'RdBu': ("#67001f", "#053061"),
'RdGy': ("#67001f", "#1a1a1a"),
'RdYlBu': ("#a50026", "#313695"),
'RdYlGn': ("#a50026", "#006837"),
'Spectral': ("#9e0142", "#5e4fa2"),
}
def __init__(self):
self.brewer_colour_idx = 0
def rgb_from_hex(self, hexstr):
# http://stackoverflow.com/questions/4296249/how-do-i-convert-a-hex-triplet-to-an-rgb-tuple-and-back
return struct.unpack('BBB', binascii.unhexlify(hexstr))
def min_max_gff(self, gff_file):
min_val = None
max_val = None
with open(gff_file, 'r') as handle:
for line in handle:
try:
value = float(line.split('\t')[5])
min_val = min(value, (min_val or value))
max_val = max(value, (max_val or value))
if value < min_val:
min_val = value
if value > max_val:
max_val = value
except Exception:
pass
return min_val, max_val
def hex_from_rgb(self, r, g, b):
return '#%02x%02x%02x' % (r, g, b)
def _get_colours(self):
r, g, b = self.BREWER_COLOUR_SCHEMES[self.brewer_colour_idx % len(self.BREWER_COLOUR_SCHEMES)]
self.brewer_colour_idx += 1
return r, g, b
def parse_menus(self, track):
trackConfig = {'menuTemplate': [{}, {}, {}, {}]}
if 'menu' in track['menus']:
menu_list = [track['menus']['menu']]
if isinstance(track['menus']['menu'], list):
menu_list = track['menus']['menu']
for m in menu_list:
tpl = {
'action': m['action'],
'label': m.get('label', '{name}'),
'iconClass': m.get('iconClass', 'dijitIconBookmark'),
}
if 'url' in m:
tpl['url'] = m['url']
if 'content' in m:
tpl['content'] = m['content']
if 'title' in m:
tpl['title'] = m['title']
trackConfig['menuTemplate'].append(tpl)
return trackConfig
def parse_colours(self, track, trackFormat, gff3=None):
# Wiggle tracks have a bicolor pallete
trackConfig = {'style': {}}
if trackFormat == 'wiggle':
trackConfig['style']['pos_color'] = track['wiggle']['color_pos']
trackConfig['style']['neg_color'] = track['wiggle']['color_neg']
if trackConfig['style']['pos_color'] == '__auto__':
trackConfig['style']['neg_color'] = self.hex_from_rgb(*self._get_colours())
trackConfig['style']['pos_color'] = self.hex_from_rgb(*self._get_colours())
# Wiggle tracks can change colour at a specified place
bc_pivot = track['wiggle']['bicolor_pivot']
if bc_pivot not in ('mean', 'zero'):
# The values are either one of those two strings
# or a number
bc_pivot = float(bc_pivot)
trackConfig['bicolor_pivot'] = bc_pivot
elif 'scaling' in track:
if track['scaling']['method'] == 'ignore':
if track['scaling']['scheme']['color'] != '__auto__':
trackConfig['style']['color'] = track['scaling']['scheme']['color']
else:
trackConfig['style']['color'] = self.hex_from_rgb(*self._get_colours())
else:
# Scored method
algo = track['scaling']['algo']
# linear, logarithmic, blast
scales = track['scaling']['scales']
# type __auto__, manual (min, max)
scheme = track['scaling']['scheme']
# scheme -> (type (opacity), color)
# ==================================
# GENE CALLS OR BLAST
# ==================================
if trackFormat == 'blast':
red, green, blue = self._get_colours()
color_function = self.COLOR_FUNCTION_TEMPLATE.format(**{
'score': "feature._parent.get('score')",
'opacity': self.OPACITY_MATH['blast'],
'red': red,
'green': green,
'blue': blue,
})
trackConfig['style']['color'] = color_function.replace('\n', '')
elif trackFormat == 'gene_calls':
# Default values, based on GFF3 spec
min_val = 0
max_val = 1000
# Get min/max and build a scoring function since JBrowse doesn't
if scales['type'] == 'automatic' or scales['type'] == '__auto__':
min_val, max_val = self.min_max_gff(gff3)
else:
min_val = scales.get('min', 0)
max_val = scales.get('max', 1000)
if scheme['color'] == '__auto__':
user_color = 'undefined'
auto_color = "'%s'" % self.hex_from_rgb(*self._get_colours())
elif scheme['color'].startswith('#'):
user_color = "'%s'" % self.hex_from_rgb(*self.rgb_from_hex(scheme['color'][1:]))
auto_color = 'undefined'
else:
user_color = 'undefined'
auto_color = "'%s'" % self.hex_from_rgb(*self._get_colours())
color_function = self.COLOR_FUNCTION_TEMPLATE_QUAL.format(**{
'opacity': self.OPACITY_MATH[algo].format(**{'max': max_val, 'min': min_val}),
'user_spec_color': user_color,
'auto_gen_color': auto_color,
})
trackConfig['style']['color'] = color_function.replace('\n', '')
return trackConfig
def etree_to_dict(t):
d = {t.tag: {} if t.attrib else None}
children = list(t)
if children:
dd = defaultdict(list)
for dc in map(etree_to_dict, children):
for k, v in dc.items():
dd[k].append(v)
d = {t.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}}
if t.attrib:
d[t.tag].update(('@' + k, v) for k, v in t.attrib.items())
if t.text:
text = t.text.strip()
if children or t.attrib:
if text:
d[t.tag]['#text'] = text
else:
d[t.tag] = text
return d
# score comes from feature._parent.get('score') or feature.get('score')
INSTALLED_TO = os.path.dirname(os.path.realpath(__file__))
def metadata_from_node(node):
metadata = {}
try:
if len(node.findall('dataset')) != 1:
# exit early
return metadata
except Exception:
return {}
for (key, value) in node.findall('dataset')[0].attrib.items():
metadata['dataset_%s' % key] = value
for (key, value) in node.findall('history')[0].attrib.items():
metadata['history_%s' % key] = value
for (key, value) in node.findall('metadata')[0].attrib.items():
metadata['metadata_%s' % key] = value
for (key, value) in node.findall('tool')[0].attrib.items():
metadata['tool_%s' % key] = value
# Additional Mappings applied:
metadata['dataset_edam_format'] = '<a target="_blank" href="http://edamontology.org/{0}">{1}</a>'.format(metadata['dataset_edam_format'], metadata['dataset_file_ext'])
metadata['history_user_email'] = '<a href="mailto:{0}">{0}</a>'.format(metadata['history_user_email'])
metadata['history_display_name'] = '<a target="_blank" href="{galaxy}/history/view/{encoded_hist_id}">{hist_name}</a>'.format(
galaxy=GALAXY_INFRASTRUCTURE_URL,
encoded_hist_id=metadata['history_id'],
hist_name=metadata['history_display_name']
)
metadata['tool_tool'] = '<a target="_blank" href="{galaxy}/datasets/{encoded_id}/show_params">{tool_id}</a>'.format(
galaxy=GALAXY_INFRASTRUCTURE_URL,
encoded_id=metadata['dataset_id'],
tool_id=metadata['tool_tool_id'],
tool_version=metadata['tool_tool_version'],
)
return metadata
class JbrowseConnector(object):
def __init__(self, jbrowse, outdir, genomes, standalone=False, gencode=1):
self.TN_TABLE = {
'gff3': '--gff',
'gff': '--gff',
'bed': '--bed',
'genbank': '--gbk',
}
self.cs = ColorScaling()
self.jbrowse = jbrowse
self.outdir = outdir
self.genome_paths = genomes
self.standalone = standalone
self.gencode = gencode
self.tracksToIndex = []
if standalone:
self.clone_jbrowse(self.jbrowse, self.outdir)
else:
try:
os.makedirs(self.outdir)
except OSError:
# Ignore if the folder exists
pass
try:
os.makedirs(os.path.join(self.outdir, 'data', 'raw'))
except OSError:
# Ignore if the folder exists
pass
self.process_genomes()
self.update_gencode()
def update_gencode(self):
table = CodonTable.unambiguous_dna_by_id[int(self.gencode)]
trackList = os.path.join(self.outdir, 'data', 'trackList.json')
with open(trackList, 'r') as handle:
trackListData = json.load(handle)
trackListData['tracks'][0].update({
'codonStarts': table.start_codons,
'codonStops': table.stop_codons,
'codonTable': table.forward_table,
})
with open(trackList, 'w') as handle:
json.dump(trackListData, handle, indent=2)
def subprocess_check_call(self, command):
log.debug('cd %s && %s', self.outdir, ' '.join(command))
subprocess.check_call(command, cwd=self.outdir)
def _jbrowse_bin(self, command):
return os.path.realpath(os.path.join(self.jbrowse, 'bin', command))
def process_genomes(self):
for genome_node in self.genome_paths:
# TODO: Waiting on https://github.com/GMOD/jbrowse/pull/884
self.subprocess_check_call([
'perl', self._jbrowse_bin('prepare-refseqs.pl'),
'--fasta', genome_node['path']])
def generate_names(self):
# Generate names
args = [
'perl', self._jbrowse_bin('generate-names.pl'),
'--hashBits', '16'
]
tracks = ','.join(self.tracksToIndex)
if tracks:
args += ['--tracks', tracks]
else:
# No tracks to index, index only the refseq
args += ['--tracks', 'DNA']
self.subprocess_check_call(args)
def _add_json(self, json_data):
cmd = [
'perl', self._jbrowse_bin('add-json.pl'),
json.dumps(json_data),
os.path.join('data', 'trackList.json')
]
self.subprocess_check_call(cmd)
def _add_track_json(self, json_data):
if len(json_data) == 0:
return
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.write(json.dumps(json_data))
tmp.close()
cmd = ['perl', self._jbrowse_bin('add-track-json.pl'), tmp.name,
os.path.join('data', 'trackList.json')]
self.subprocess_check_call(cmd)
os.unlink(tmp.name)
def _blastxml_to_gff3(self, xml, min_gap=10):
gff3_unrebased = tempfile.NamedTemporaryFile(delete=False)
cmd = ['python', os.path.join(INSTALLED_TO, 'blastxml_to_gapped_gff3.py'),
'--trim', '--trim_end', '--min_gap', str(min_gap), xml]
log.debug('cd %s && %s > %s', self.outdir, ' '.join(cmd), gff3_unrebased.name)
subprocess.check_call(cmd, cwd=self.outdir, stdout=gff3_unrebased)
gff3_unrebased.close()
return gff3_unrebased.name
def add_blastxml(self, data, trackData, blastOpts, **kwargs):
gff3 = self._blastxml_to_gff3(data, min_gap=blastOpts['min_gap'])
if 'parent' in blastOpts and blastOpts['parent'] != 'None':
gff3_rebased = tempfile.NamedTemporaryFile(delete=False)
cmd = ['python', os.path.join(INSTALLED_TO, 'gff3_rebase.py')]
if blastOpts.get('protein', 'false') == 'true':
cmd.append('--protein2dna')
cmd.extend([os.path.realpath(blastOpts['parent']), gff3])
log.debug('cd %s && %s > %s', self.outdir, ' '.join(cmd), gff3_rebased.name)
subprocess.check_call(cmd, cwd=self.outdir, stdout=gff3_rebased)
gff3_rebased.close()
# Replace original gff3 file
shutil.copy(gff3_rebased.name, gff3)
os.unlink(gff3_rebased.name)
config = {
'glyph': 'JBrowse/View/FeatureGlyph/Segments',
"category": trackData['category'],
}
clientConfig = trackData['style']
cmd = ['perl', self._jbrowse_bin('flatfile-to-json.pl'),
'--gff', gff3,
'--trackLabel', trackData['label'],
'--key', trackData['key'],
'--clientConfig', json.dumps(clientConfig),
'--config', json.dumps(config),
'--trackType', 'BlastView/View/Track/CanvasFeatures'
]
# className in --clientConfig is ignored, it needs to be set with --className
if 'className' in trackData['style']:
cmd += ['--className', trackData['style']['className']]
self.subprocess_check_call(cmd)
os.unlink(gff3)
if blastOpts.get('index', 'false') == 'true':
self.tracksToIndex.append("%s" % trackData['label'])
def add_bigwig(self, data, trackData, wiggleOpts, **kwargs):
dest = os.path.join('data', 'raw', trackData['label'] + '.bw')
cmd = ['ln', '-s', data, dest]
self.subprocess_check_call(cmd)
url = os.path.join('raw', trackData['label'] + '.bw')
trackData.update({
"urlTemplate": url,
"storeClass": "JBrowse/Store/SeqFeature/BigWig",
"type": "JBrowse/View/Track/Wiggle/Density",
})
trackData['type'] = wiggleOpts['type']
trackData['variance_band'] = True if wiggleOpts['variance_band'] == 'true' else False
if 'min' in wiggleOpts and 'max' in wiggleOpts:
trackData['min_score'] = wiggleOpts['min']
trackData['max_score'] = wiggleOpts['max']
else:
trackData['autoscale'] = wiggleOpts.get('autoscale', 'local')
trackData['scale'] = wiggleOpts['scale']
self._add_track_json(trackData)
def add_bam(self, data, trackData, bamOpts, bam_index=None, **kwargs):
dest = os.path.join('data', 'raw', trackData['label'] + '.bam')
cmd = ['ln', '-s', os.path.realpath(data), dest]
self.subprocess_check_call(cmd)
cmd = ['ln', '-s', os.path.realpath(bam_index), dest + '.bai']
self.subprocess_check_call(cmd)
url = os.path.join('raw', trackData['label'] + '.bam')
trackData.update({
"urlTemplate": url,
"type": "JBrowse/View/Track/Alignments2",
"storeClass": "JBrowse/Store/SeqFeature/BAM",
})
# Apollo will only switch to the (prettier) 'bam-read' className if it's not set explicitly in the track config
# So remove the default 'feature' value for these bam tracks
if 'className' in trackData['style'] and trackData['style']['className'] == 'feature':
del trackData['style']['className']
self._add_track_json(trackData)
if bamOpts.get('auto_snp', 'false') == 'true':
trackData2 = copy.copy(trackData)
trackData2.update({
"type": "JBrowse/View/Track/SNPCoverage",
"key": trackData['key'] + " - SNPs/Coverage",
"label": trackData['label'] + "_autosnp",
})
self._add_track_json(trackData2)
def add_vcf(self, data, trackData, vcfOpts={}, **kwargs):
dest = os.path.join('data', 'raw', trackData['label'] + '.vcf')
# ln?
cmd = ['ln', '-s', data, dest]
self.subprocess_check_call(cmd)
cmd = ['bgzip', dest]
self.subprocess_check_call(cmd)
cmd = ['tabix', '-p', 'vcf', dest + '.gz']
self.subprocess_check_call(cmd)
url = os.path.join('raw', trackData['label'] + '.vcf')
trackData.update({
"urlTemplate": url,
"type": "JBrowse/View/Track/HTMLVariants",
"storeClass": "JBrowse/Store/SeqFeature/VCFTabix",
})
self._add_track_json(trackData)
def add_features(self, data, format, trackData, gffOpts, metadata=None, **kwargs):
cmd = [
'perl', self._jbrowse_bin('flatfile-to-json.pl'),
self.TN_TABLE.get(format, 'gff'),
data,
'--trackLabel', trackData['label'],
'--key', trackData['key']
]
# className in --clientConfig is ignored, it needs to be set with --className
if 'className' in trackData['style']:
cmd += ['--className', trackData['style']['className']]
config = copy.copy(trackData)
clientConfig = trackData['style']
del config['style']
if 'match' in gffOpts:
config['glyph'] = 'JBrowse/View/FeatureGlyph/Segments'
if bool(gffOpts['match']):
# Can be empty for CanvasFeatures = will take all by default
cmd += ['--type', gffOpts['match']]
cmd += ['--clientConfig', json.dumps(clientConfig),
]
trackType = 'JBrowse/View/Track/CanvasFeatures'
if 'trackType' in gffOpts:
trackType = gffOpts['trackType']
if trackType == 'JBrowse/View/Track/CanvasFeatures':
if 'transcriptType' in gffOpts and gffOpts['transcriptType']:
config['transcriptType'] = gffOpts['transcriptType']
if 'subParts' in gffOpts and gffOpts['subParts']:
config['subParts'] = gffOpts['subParts']
if 'impliedUTRs' in gffOpts and gffOpts['impliedUTRs']:
config['impliedUTRs'] = gffOpts['impliedUTRs']
elif trackType == 'JBrowse/View/Track/HTMLFeatures':
if 'transcriptType' in gffOpts and gffOpts['transcriptType']:
cmd += ['--type', gffOpts['transcriptType']]
cmd += [
'--trackType', gffOpts['trackType']
]
if metadata:
config.update({'metadata': metadata})
cmd.extend(['--config', json.dumps(config)])
self.subprocess_check_call(cmd)
if gffOpts.get('index', 'false') == 'true':
self.tracksToIndex.append("%s" % trackData['label'])
def add_rest(self, url, trackData):
data = {
"label": trackData['label'],
"key": trackData['key'],
"category": trackData['category'],
"type": "JBrowse/View/Track/HTMLFeatures",
"storeClass": "JBrowse/Store/SeqFeature/REST",
"baseUrl": url,
"query": {
"organism": "tyrannosaurus"
}
}
self._add_track_json(data)
def process_annotations(self, track):
category = track['category'].replace('__pd__date__pd__', TODAY)
outputTrackConfig = {
'style': {
'label': track['style'].get('label', 'description'),
'className': track['style'].get('className', 'feature'),
'description': track['style'].get('description', ''),
},
'overridePlugins': track['style'].get('overridePlugins', False) == 'True',
'overrideDraggable': track['style'].get('overrideDraggable', False) == 'True',
'maxHeight': track['style'].get('maxHeight', '600'),
'category': category,
}
mapped_chars = {
'>': '__gt__',
'<': '__lt__',
"'": '__sq__',
'"': '__dq__',
'[': '__ob__',
']': '__cb__',
'{': '__oc__',
'}': '__cc__',
'@': '__at__',
'#': '__pd__'
}
for i, (dataset_path, dataset_ext, track_human_label, extra_metadata) in enumerate(track['trackfiles']):
# Unsanitize labels (element_identifiers are always sanitized by Galaxy)
for key, value in mapped_chars.items():
track_human_label = track_human_label.replace(value, key)
log.info('Processing %s / %s', category, track_human_label)
outputTrackConfig['key'] = track_human_label
# We add extra data to hash for the case of REST + SPARQL.
try:
rest_url = track['conf']['options']['url']
except KeyError:
rest_url = ''
# I chose to use track['category'] instead of 'category' here. This
# is intentional. This way re-running the tool on a different date
# will not generate different hashes and make comparison of outputs
# much simpler.
hashData = [dataset_path, track_human_label, track['category'], rest_url]
hashData = '|'.join(hashData).encode('utf-8')
outputTrackConfig['label'] = hashlib.md5(hashData).hexdigest() + '_%s' % i
# Colour parsing is complex due to different track types having
# different colour options.
colourOptions = self.cs.parse_colours(track['conf']['options'], track['format'], gff3=dataset_path)
# This used to be done with a dict.update() call, however that wiped out any previous style settings...
for key in colourOptions:
if key == 'style':
for subkey in colourOptions['style']:
outputTrackConfig['style'][subkey] = colourOptions['style'][subkey]
else:
outputTrackConfig[key] = colourOptions[key]
if 'menus' in track['conf']['options']:
menus = self.cs.parse_menus(track['conf']['options'])
outputTrackConfig.update(menus)
# import pprint; pprint.pprint(track)
# import sys; sys.exit()
if dataset_ext in ('gff', 'gff3', 'bed'):
self.add_features(dataset_path, dataset_ext, outputTrackConfig,
track['conf']['options']['gff'], metadata=extra_metadata)
elif dataset_ext == 'bigwig':
self.add_bigwig(dataset_path, outputTrackConfig,
track['conf']['options']['wiggle'], metadata=extra_metadata)
elif dataset_ext == 'bam':
real_indexes = track['conf']['options']['pileup']['bam_indices']['bam_index']
if not isinstance(real_indexes, list):
# <bam_indices>
# <bam_index>/path/to/a.bam.bai</bam_index>
# </bam_indices>
#
# The above will result in the 'bam_index' key containing a
# string. If there are two or more indices, the container
# becomes a list. Fun!
real_indexes = [real_indexes]
self.add_bam(dataset_path, outputTrackConfig,
track['conf']['options']['pileup'],
bam_index=real_indexes[i], metadata=extra_metadata)
elif dataset_ext == 'blastxml':
self.add_blastxml(dataset_path, outputTrackConfig, track['conf']['options']['blast'], metadata=extra_metadata)
elif dataset_ext == 'vcf':
self.add_vcf(dataset_path, outputTrackConfig, metadata=extra_metadata)
elif dataset_ext == 'rest':
self.add_rest(track['conf']['options']['url'], outputTrackConfig, metadata=extra_metadata)
else:
log.warn('Do not know how to handle %s', dataset_ext)
# Return non-human label for use in other fields
yield outputTrackConfig['label']
def add_final_data(self, data):
viz_data = {}
if len(data['visibility']['default_on']) > 0:
viz_data['defaultTracks'] = ','.join(data['visibility']['default_on'])
if len(data['visibility']['always']) > 0:
viz_data['alwaysOnTracks'] = ','.join(data['visibility']['always'])
if len(data['visibility']['force']) > 0:
viz_data['forceTracks'] = ','.join(data['visibility']['force'])
generalData = {}
if data['general']['aboutDescription'] is not None:
generalData['aboutThisBrowser'] = {'description': data['general']['aboutDescription'].strip()}
generalData['view'] = {
'trackPadding': data['general']['trackPadding']
}
generalData['shareLink'] = (data['general']['shareLink'] == 'true')
generalData['show_tracklist'] = (data['general']['show_tracklist'] == 'true')
generalData['show_nav'] = (data['general']['show_nav'] == 'true')
generalData['show_overview'] = (data['general']['show_overview'] == 'true')
generalData['show_menu'] = (data['general']['show_menu'] == 'true')
generalData['hideGenomeOptions'] = (data['general']['hideGenomeOptions'] == 'true')
generalData['plugins'] = data['plugins']
viz_data.update(generalData)
self._add_json(viz_data)
if 'GCContent' in data['plugins_python']:
self._add_track_json({
"storeClass": "JBrowse/Store/SeqFeature/SequenceChunks",
"type": "GCContent/View/Track/GCContentXY",
"label": "GCContentXY",
"urlTemplate": "seq/{refseq_dirpath}/{refseq}-",
"bicolor_pivot": 0.5
# TODO: Expose params for everyone.
})
if 'ComboTrackSelector' in data['plugins_python']:
with open(os.path.join(self.outdir, 'data', 'trackList.json'), 'r') as handle:
trackListJson = json.load(handle)
trackListJson.update({
"trackSelector": {
"renameFacets": {
"tool_tool": "Tool ID",
"tool_tool_id": "Tool ID",
"tool_tool_version": "Tool Version",
"dataset_edam_format": "EDAM",
"dataset_size": "Size",
"history_display_name": "History Name",
"history_user_email": "Owner",
"metadata_dbkey": "Dbkey",
},
"displayColumns": [
"key",
"tool_tool",
"tool_tool_version",
"dataset_edam_format",
"dataset_size",
"history_display_name",
"history_user_email",
"metadata_dbkey",
],
"type": "Faceted",
"title": ["Galaxy Metadata"],
"escapeHTMLInData": False
},
"trackMetadata": {
"indexFacets": [
"category",
"key",
"tool_tool_id",
"tool_tool_version",
"dataset_edam_format",
"history_user_email",
"history_display_name"
]
}
})
with open(os.path.join(self.outdir, 'data', 'trackList2.json'), 'w') as handle:
json.dump(trackListJson, handle)
def clone_jbrowse(self, jbrowse_dir, destination):
"""Clone a JBrowse directory into a destination directory.
"""
# JBrowse seems to have included some bad symlinks, cp ignores bad symlinks
# unlike copytree
cmd = ['cp', '-r', os.path.join(jbrowse_dir, '.'), destination]
log.debug(' '.join(cmd))
subprocess.check_call(cmd)
cmd = ['mkdir', '-p', os.path.join(destination, 'data', 'raw')]
log.debug(' '.join(cmd))
subprocess.check_call(cmd)
# http://unix.stackexchange.com/a/38691/22785
# JBrowse releases come with some broken symlinks
cmd = ['find', destination, '-type', 'l', '-xtype', 'l']
log.debug(' '.join(cmd))
symlinks = subprocess.check_output(cmd)
for i in symlinks:
try:
os.unlink(i)
except OSError:
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="", epilog="")
parser.add_argument('xml', type=argparse.FileType('r'), help='Track Configuration')
parser.add_argument('--jbrowse', help='Folder containing a jbrowse release')
parser.add_argument('--outdir', help='Output directory', default='out')
parser.add_argument('--standalone', help='Standalone mode includes a copy of JBrowse', action='store_true')
parser.add_argument('--version', '-V', action='version', version="%(prog)s 0.7.0")
args = parser.parse_args()
tree = ET.parse(args.xml.name)
root = tree.getroot()
jc = JbrowseConnector(
jbrowse=args.jbrowse,
outdir=args.outdir,
genomes=[
{
'path': os.path.realpath(x.attrib['path']),
'meta': metadata_from_node(x.find('metadata'))
}
for x in root.findall('metadata/genomes/genome')
],
standalone=args.standalone,
gencode=root.find('metadata/gencode').text
)
extra_data = {
'visibility': {
'default_on': [],
'default_off': [],
'force': [],
'always': [],
},
'general': {
'defaultLocation': root.find('metadata/general/defaultLocation').text,
'trackPadding': int(root.find('metadata/general/trackPadding').text),
'shareLink': root.find('metadata/general/shareLink').text,
'aboutDescription': root.find('metadata/general/aboutDescription').text,
'show_tracklist': root.find('metadata/general/show_tracklist').text,
'show_nav': root.find('metadata/general/show_nav').text,
'show_overview': root.find('metadata/general/show_overview').text,
'show_menu': root.find('metadata/general/show_menu').text,
'hideGenomeOptions': root.find('metadata/general/hideGenomeOptions').text,
},
'plugins': [{
'location': 'https://cdn.rawgit.com/TAMU-CPT/blastview/97572a21b7f011c2b4d9a0b5af40e292d694cbef/',
'name': 'BlastView'
}],
'plugins_python': ['BlastView'],
}
plugins = root.find('plugins').attrib
if plugins['GCContent'] == 'True':
extra_data['plugins_python'].append('GCContent')
extra_data['plugins'].append({
'location': 'https://cdn.rawgit.com/elsiklab/gccontent/5c8b0582ecebf9edf684c76af8075fb3d30ec3fa/',
'name': 'GCContent'
})
if plugins['Bookmarks'] == 'True':
extra_data['plugins'].append({
'location': 'https://cdn.rawgit.com/TAMU-CPT/bookmarks-jbrowse/5242694120274c86e1ccd5cb0e5e943e78f82393/',
'name': 'Bookmarks'
})
if plugins['ComboTrackSelector'] == 'True':
extra_data['plugins_python'].append('ComboTrackSelector')
extra_data['plugins'].append({
'location': 'https://cdn.rawgit.com/Arabidopsis-Information-Portal/ComboTrackSelector/52403928d5ccbe2e3a86b0fa5eb8e61c0f2e2f57',
'icon': 'https://galaxyproject.org/images/logos/galaxy-icon-square.png',
'name': 'ComboTrackSelector'
})
if plugins['theme'] == 'Minimalist':
extra_data['plugins'].append({
'location': 'https://cdn.rawgit.com/erasche/jbrowse-minimalist-theme/d698718442da306cf87f033c72ddb745f3077775/',
'name': 'MinimalistTheme'
})
elif plugins['theme'] == 'Dark':
extra_data['plugins'].append({
'location': 'https://cdn.rawgit.com/erasche/jbrowse-dark-theme/689eceb7e33bbc1b9b15518d45a5a79b2e5d0a26/',
'name': 'DarkTheme'
})
GALAXY_INFRASTRUCTURE_URL = root.find('metadata/galaxyUrl').text
# Sometimes this comes as `localhost` without a protocol
if not GALAXY_INFRASTRUCTURE_URL.startswith('http'):
# so we'll prepend `http://` and hope for the best. Requests *should*
# be GET and not POST so it should redirect OK
GALAXY_INFRASTRUCTURE_URL = 'http://' + GALAXY_INFRASTRUCTURE_URL
for track in root.findall('tracks/track'):
track_conf = {}
track_conf['trackfiles'] = []
for x in track.findall('files/trackFile'):
metadata = metadata_from_node(x.find('metadata'))
track_conf['trackfiles'].append((
os.path.realpath(x.attrib['path']),
x.attrib['ext'],
x.attrib['label'],
metadata
))
track_conf['category'] = track.attrib['cat']
track_conf['format'] = track.attrib['format']
try:
# Only pertains to gff3 + blastxml. TODO?
track_conf['style'] = {t.tag: t.text for t in track.find('options/style')}
except TypeError as te:
track_conf['style'] = {}
pass
track_conf['conf'] = etree_to_dict(track.find('options'))
keys = jc.process_annotations(track_conf)
for key in keys:
extra_data['visibility'][track.attrib.get('visibility', 'default_off')].append(key)
jc.add_final_data(extra_data)
jc.generate_names()
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Implements rotations, including spherical rotations as defined in WCS Paper II
[1]_
`RotateNative2Celestial` and `RotateCelestial2Native` follow the convention in
WCS Paper II to rotate to/from a native sphere and the celestial sphere.
The implementation uses `EulerAngleRotation`. The model parameters are
three angles: the longitude (``lon``) and latitude (``lat``) of the fiducial point
in the celestial system (``CRVAL`` keywords in FITS), and the longitude of the celestial
pole in the native system (``lon_pole``). The Euler angles are ``lon+90``, ``90-lat``
and ``-(lon_pole-90)``.
References
----------
.. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II)
"""
import math
import numpy as np
from .core import Model
from .parameters import Parameter
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product
from astropy import units as u
from .utils import _to_radian, _to_orig_unit
__all__ = ['RotateCelestial2Native', 'RotateNative2Celestial', 'Rotation2D',
'EulerAngleRotation', 'RotationSequence3D', 'SphericalRotationSequence']
def _create_matrix(angles, axes_order):
matrices = []
for angle, axis in zip(angles, axes_order):
if isinstance(angle, u.Quantity):
angle = angle.value
angle = angle.item()
matrices.append(rotation_matrix(angle, axis, unit=u.rad))
result = matrix_product(*matrices[::-1])
return result
def spherical2cartesian(alpha, delta):
alpha = np.deg2rad(alpha)
delta = np.deg2rad(delta)
x = np.cos(alpha) * np.cos(delta)
y = np.cos(delta) * np.sin(alpha)
z = np.sin(delta)
return np.array([x, y, z])
def cartesian2spherical(x, y, z):
h = np.hypot(x, y)
alpha = np.rad2deg(np.arctan2(y, x))
delta = np.rad2deg(np.arctan2(z, h))
return alpha, delta
class RotationSequence3D(Model):
"""
Perform a series of rotations about different axis in 3D space.
Positive angles represent a counter-clockwise rotation.
Parameters
----------
angles : array_like
Angles of rotation in deg in the order of axes_order.
axes_order : str
A sequence of 'x', 'y', 'z' corresponding to axis of rotation.
Examples
--------
>>> model = RotationSequence3D([1.1, 2.1, 3.1, 4.1], axes_order='xyzx')
"""
standard_broadcasting = False
_separable = False
n_inputs = 3
n_outputs = 3
angles = Parameter(default=[], getter=_to_orig_unit, setter=_to_radian)
def __init__(self, angles, axes_order, name=None):
self.axes = ['x', 'y', 'z']
unrecognized = set(axes_order).difference(self.axes)
if unrecognized:
raise ValueError("Unrecognized axis label {0}; "
"should be one of {1} ".format(unrecognized,
self.axes))
self.axes_order = axes_order
if len(angles) != len(axes_order):
raise ValueError("The number of angles {0} should match the number \
of axes {1}.".format(len(angles),
len(axes_order)))
super().__init__(angles, name=name)
self._inputs = ('x', 'y', 'z')
self._outputs = ('x', 'y', 'z')
@property
def inverse(self):
"""Inverse rotation."""
angles = self.angles.value[::-1] * -1
return self.__class__(angles, axes_order=self.axes_order[::-1])
def evaluate(self, x, y, z, angles):
"""
Apply the rotation to a set of 3D Cartesian coordinates.
"""
if x.shape != y.shape != z.shape:
raise ValueError("Expected input arrays to have the same shape")
# Note: If the original shape was () (an array scalar) convert to a
# 1-element 1-D array on output for consistency with most other models
orig_shape = x.shape or (1,)
inarr = np.array([x.flatten(), y.flatten(), z.flatten()])
result = np.dot(_create_matrix(angles[0], self.axes_order), inarr)
x, y, z = result[0], result[1], result[2]
x.shape = y.shape = z.shape = orig_shape
return x, y, z
class SphericalRotationSequence(RotationSequence3D):
"""
Perform a sequence of rotations about arbitrary number of axes
in spherical coordinates.
Parameters
----------
angles : list
A sequence of angles (in deg).
axes_order : str
A sequence of characters ('x', 'y', or 'z') corresponding to the
axis of rotation and matching the order in ``angles``.
"""
def __init__(self, angles, axes_order, name=None, **kwargs):
self._n_inputs = 2
self._n_outputs = 2
super().__init__(angles, axes_order=axes_order, name=name, **kwargs)
self._inputs = ("lon", "lat")
self._outputs = ("lon", "lat")
@property
def n_inputs(self):
return self._n_inputs
@property
def n_outputs(self):
return self._n_outputs
def evaluate(self, lon, lat, angles):
x, y, z = spherical2cartesian(lon, lat)
x1, y1, z1 = super().evaluate(x, y, z, angles)
lon, lat = cartesian2spherical(x1, y1, z1)
return lon, lat
class _EulerRotation:
"""
Base class which does the actual computation.
"""
_separable = False
def evaluate(self, alpha, delta, phi, theta, psi, axes_order):
shape = None
if isinstance(alpha, np.ndarray) and alpha.ndim == 2:
alpha = alpha.flatten()
delta = delta.flatten()
shape = alpha.shape
inp = spherical2cartesian(alpha, delta)
matrix = _create_matrix([phi, theta, psi], axes_order)
result = np.dot(matrix, inp)
a, b = cartesian2spherical(*result)
if shape is not None:
a.shape = shape
b.shape = shape
return a, b
_input_units_strict = True
_input_units_allow_dimensionless = True
@property
def input_units(self):
""" Input units. """
return {'alpha': u.deg, 'delta': u.deg}
@property
def return_units(self):
""" Output units. """
return {'alpha': u.deg, 'delta': u.deg}
class EulerAngleRotation(_EulerRotation, Model):
"""
Implements Euler angle intrinsic rotations.
Rotates one coordinate system into another (fixed) coordinate system.
All coordinate systems are right-handed. The sign of the angles is
determined by the right-hand rule..
Parameters
----------
phi, theta, psi : float or `~astropy.units.Quantity`
"proper" Euler angles in deg.
If floats, they should be in deg.
axes_order : str
A 3 character string, a combination of 'x', 'y' and 'z',
where each character denotes an axis in 3D space.
"""
n_inputs = 2
n_outputs = 2
phi = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
theta = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
psi = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
def __init__(self, phi, theta, psi, axes_order, **kwargs):
self.axes = ['x', 'y', 'z']
if len(axes_order) != 3:
raise TypeError(
"Expected axes_order to be a character sequence of length 3,"
"got {}".format(axes_order))
unrecognized = set(axes_order).difference(self.axes)
if unrecognized:
raise ValueError("Unrecognized axis label {}; "
"should be one of {} ".format(unrecognized, self.axes))
self.axes_order = axes_order
qs = [isinstance(par, u.Quantity) for par in [phi, theta, psi]]
if any(qs) and not all(qs):
raise TypeError("All parameters should be of the same type - float or Quantity.")
super().__init__(phi=phi, theta=theta, psi=psi, **kwargs)
self._inputs = ('alpha', 'delta')
self._outputs = ('alpha', 'delta')
def inverse(self):
return self.__class__(phi=-self.psi,
theta=-self.theta,
psi=-self.phi,
axes_order=self.axes_order[::-1])
def evaluate(self, alpha, delta, phi, theta, psi):
a, b = super().evaluate(alpha, delta, phi, theta, psi, self.axes_order)
return a, b
class _SkyRotation(_EulerRotation, Model):
"""
Base class for RotateNative2Celestial and RotateCelestial2Native.
"""
lon = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
lat = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
lon_pole = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
def __init__(self, lon, lat, lon_pole, **kwargs):
qs = [isinstance(par, u.Quantity) for par in [lon, lat, lon_pole]]
if any(qs) and not all(qs):
raise TypeError("All parameters should be of the same type - float or Quantity.")
super().__init__(lon, lat, lon_pole, **kwargs)
self.axes_order = 'zxz'
def _evaluate(self, phi, theta, lon, lat, lon_pole):
alpha, delta = super().evaluate(phi, theta, lon, lat, lon_pole,
self.axes_order)
mask = alpha < 0
if isinstance(mask, np.ndarray):
alpha[mask] += 360
else:
alpha += 360
return alpha, delta
class RotateNative2Celestial(_SkyRotation):
"""
Transform from Native to Celestial Spherical Coordinates.
Parameters
----------
lon : float or or `~astropy.units.Quantity`
Celestial longitude of the fiducial point.
lat : float or or `~astropy.units.Quantity`
Celestial latitude of the fiducial point.
lon_pole : float or or `~astropy.units.Quantity`
Longitude of the celestial pole in the native system.
Notes
-----
If ``lon``, ``lat`` and ``lon_pole`` are numerical values they
should be in units of deg. Inputs are angles on the native sphere.
Outputs are angles on the celestial sphere.
"""
n_inputs = 2
n_outputs = 2
@property
def input_units(self):
""" Input units. """
return {'phi_N': u.deg, 'theta_N': u.deg}
@property
def return_units(self):
""" Output units. """
return {'alpha_C': u.deg, 'delta_C': u.deg}
def __init__(self, lon, lat, lon_pole, **kwargs):
super().__init__(lon, lat, lon_pole, **kwargs)
self.inputs = ('phi_N', 'theta_N')
self.outputs = ('alpha_C', 'delta_C')
def evaluate(self, phi_N, theta_N, lon, lat, lon_pole):
"""
Parameters
----------
phi_N, theta_N : float (deg) or `~astropy.units.Quantity`
Angles in the Native coordinate system.
lon, lat, lon_pole : float (in deg) or `~astropy.units.Quantity`
Parameter values when the model was initialized.
Returns
-------
alpha_C, delta_C : float (deg) or `~astropy.units.Quantity`
Angles on the Celestial sphere.
"""
# The values are in radians since they have already been through the setter.
if isinstance(lon, u.Quantity):
lon = lon.value
lat = lat.value
lon_pole = lon_pole.value
# Convert to Euler angles
phi = lon_pole - np.pi / 2
theta = - (np.pi / 2 - lat)
psi = -(np.pi / 2 + lon)
alpha_C, delta_C = super()._evaluate(phi_N, theta_N, phi, theta, psi)
return alpha_C, delta_C
@property
def inverse(self):
# convert to angles on the celestial sphere
return RotateCelestial2Native(self.lon, self.lat, self.lon_pole)
class RotateCelestial2Native(_SkyRotation):
"""
Transform from Celestial to Native Spherical Coordinates.
Parameters
----------
lon : float or or `~astropy.units.Quantity`
Celestial longitude of the fiducial point.
lat : float or or `~astropy.units.Quantity`
Celestial latitude of the fiducial point.
lon_pole : float or or `~astropy.units.Quantity`
Longitude of the celestial pole in the native system.
Notes
-----
If ``lon``, ``lat`` and ``lon_pole`` are numerical values they should be
in units of deg. Inputs are angles on the celestial sphere.
Outputs are angles on the native sphere.
"""
n_inputs = 2
n_outputs = 2
@property
def input_units(self):
""" Input units. """
return {'alpha_C': u.deg, 'delta_C': u.deg}
@property
def return_units(self):
""" Output units. """
return {'phi_N': u.deg, 'theta_N': u.deg}
def __init__(self, lon, lat, lon_pole, **kwargs):
super().__init__(lon, lat, lon_pole, **kwargs)
# Inputs are angles on the celestial sphere
self.inputs = ('alpha_C', 'delta_C')
# Outputs are angles on the native sphere
self.outputs = ('phi_N', 'theta_N')
def evaluate(self, alpha_C, delta_C, lon, lat, lon_pole):
"""
Parameters
----------
alpha_C, delta_C : float (deg) or `~astropy.units.Quantity`
Angles in the Celestial coordinate frame.
lon, lat, lon_pole : float (deg) or `~astropy.units.Quantity`
Parameter values when the model was initialized.
Returns
-------
phi_N, theta_N : float (deg) or `~astropy.units.Quantity`
Angles on the Native sphere.
"""
if isinstance(lon, u.Quantity):
lon = lon.value
lat = lat.value
lon_pole = lon_pole.value
# Convert to Euler angles
phi = (np.pi / 2 + lon)
theta = (np.pi / 2 - lat)
psi = -(lon_pole - np.pi / 2)
phi_N, theta_N = super()._evaluate(alpha_C, delta_C, phi, theta, psi)
return phi_N, theta_N
@property
def inverse(self):
return RotateNative2Celestial(self.lon, self.lat, self.lon_pole)
class Rotation2D(Model):
"""
Perform a 2D rotation given an angle.
Positive angles represent a counter-clockwise rotation and vice-versa.
Parameters
----------
angle : float or `~astropy.units.Quantity`
Angle of rotation (if float it should be in deg).
"""
n_inputs = 2
n_outputs = 2
_separable = False
angle = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
def __init__(self, angle=angle, **kwargs):
super().__init__(angle=angle, **kwargs)
self._inputs = ("x", "y")
self._outputs = ("x", "y")
@property
def inverse(self):
"""Inverse rotation."""
return self.__class__(angle=-self.angle)
@classmethod
def evaluate(cls, x, y, angle):
"""
Rotate (x, y) about ``angle``.
Parameters
----------
x, y : array_like
Input quantities
angle : float (deg) or `~astropy.units.Quantity`
Angle of rotations.
"""
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
# If one argument has units, enforce they both have units and they are compatible.
x_unit = getattr(x, 'unit', None)
y_unit = getattr(y, 'unit', None)
has_units = x_unit is not None and y_unit is not None
if x_unit != y_unit:
if has_units and y_unit.is_equivalent(x_unit):
y = y.to(x_unit)
y_unit = x_unit
else:
raise u.UnitsError("x and y must have compatible units")
# Note: If the original shape was () (an array scalar) convert to a
# 1-element 1-D array on output for consistency with most other models
orig_shape = x.shape or (1,)
inarr = np.array([x.flatten(), y.flatten()])
if isinstance(angle, u.Quantity):
angle = angle.to_value(u.rad)
result = np.dot(cls._compute_matrix(angle), inarr)
x, y = result[0], result[1]
x.shape = y.shape = orig_shape
if has_units:
return u.Quantity(x, unit=x_unit), u.Quantity(y, unit=y_unit)
else:
return x, y
@staticmethod
def _compute_matrix(angle):
return np.array([[math.cos(angle), -math.sin(angle)],
[math.sin(angle), math.cos(angle)]],
dtype=np.float64)
|
|
"""Conversion tool from CTF to FIF
"""
# Author: Eric Larson <larson.eric.d<gmail.com>
#
# License: BSD (3-clause)
import os
from os import path as op
import numpy as np
from ...utils import verbose, logger
from ...externals.six import string_types
from ..base import _BaseRaw
from ..utils import _mult_cal_one, _blk_read_lims
from .res4 import _read_res4, _make_ctf_name
from .hc import _read_hc
from .eeg import _read_eeg, _read_pos
from .trans import _make_ctf_coord_trans_set
from .info import _compose_meas_info
from .constants import CTF
def read_raw_ctf(directory, system_clock='truncate', preload=False,
verbose=None):
"""Raw object from CTF directory
Parameters
----------
directory : str
Path to the KIT data (ending in ``'.ds'``).
system_clock : str
How to treat the system clock. Use "truncate" (default) to truncate
the data file when the system clock drops to zero, and use "ignore"
to ignore the system clock (e.g., if head positions are measured
multiple times during a recording).
preload : bool or str (default False)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
raw : instance of RawCTF
The raw data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
Notes
-----
.. versionadded:: 0.11
"""
return RawCTF(directory, system_clock, preload=preload, verbose=verbose)
class RawCTF(_BaseRaw):
"""Raw object from CTF directory
Parameters
----------
directory : str
Path to the KIT data (ending in ``'.ds'``).
system_clock : str
How to treat the system clock. Use "truncate" (default) to truncate
the data file when the system clock drops to zero, and use "ignore"
to ignore the system clock (e.g., if head positions are measured
multiple times during a recording).
preload : bool or str (default False)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, directory, system_clock='truncate', preload=False,
verbose=None):
# adapted from mne_ctf2fiff.c
if not isinstance(directory, string_types) or \
not directory.endswith('.ds'):
raise TypeError('directory must be a directory ending with ".ds"')
if not op.isdir(directory):
raise ValueError('directory does not exist: "%s"' % directory)
known_types = ['ignore', 'truncate']
if not isinstance(system_clock, string_types) or \
system_clock not in known_types:
raise ValueError('system_clock must be one of %s, not %s'
% (known_types, system_clock))
logger.info('ds directory : %s' % directory)
res4 = _read_res4(directory) # Read the magical res4 file
coils = _read_hc(directory) # Read the coil locations
eeg = _read_eeg(directory) # Read the EEG electrode loc info
# Investigate the coil location data to get the coordinate trans
coord_trans = _make_ctf_coord_trans_set(res4, coils)
digs = _read_pos(directory, coord_trans)
# Compose a structure which makes fiff writing a piece of cake
info = _compose_meas_info(res4, coils, coord_trans, eeg)
info['dig'] += digs
# Determine how our data is distributed across files
fnames = list()
last_samps = list()
raw_extras = list()
while(True):
suffix = 'meg4' if len(fnames) == 0 else ('%d_meg4' % len(fnames))
meg4_name = _make_ctf_name(directory, suffix, raise_error=False)
if meg4_name is None:
break
# check how much data is in the file
sample_info = _get_sample_info(meg4_name, res4, system_clock)
if sample_info['n_samp'] == 0:
break
if len(fnames) == 0:
info['buffer_size_sec'] = \
sample_info['block_size'] / info['sfreq']
info['filename'] = directory
fnames.append(meg4_name)
last_samps.append(sample_info['n_samp'] - 1)
raw_extras.append(sample_info)
super(RawCTF, self).__init__(
info, preload, last_samps=last_samps, filenames=fnames,
raw_extras=raw_extras, orig_format='int', verbose=verbose)
@verbose
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a chunk of raw data"""
si = self._raw_extras[fi]
offset = 0
trial_start_idx, r_lims, d_lims = _blk_read_lims(start, stop,
int(si['block_size']))
with open(self._filenames[fi], 'rb') as fid:
for bi in range(len(r_lims)):
samp_offset = (bi + trial_start_idx) * si['res4_nsamp']
n_read = min(si['n_samp_tot'] - samp_offset, si['block_size'])
# read the chunk of data
pos = CTF.HEADER_SIZE
pos += samp_offset * si['n_chan'] * 4
fid.seek(pos, 0)
this_data = np.fromfile(fid, '>i4',
count=si['n_chan'] * n_read)
this_data.shape = (si['n_chan'], n_read)
this_data = this_data[:, r_lims[bi, 0]:r_lims[bi, 1]]
data_view = data[:, d_lims[bi, 0]:d_lims[bi, 1]]
_mult_cal_one(data_view, this_data, idx, cals, mult)
offset += n_read
def _get_sample_info(fname, res4, system_clock):
"""Helper to determine the number of valid samples"""
logger.info('Finding samples for %s: ' % (fname,))
if CTF.SYSTEM_CLOCK_CH in res4['ch_names']:
clock_ch = res4['ch_names'].index(CTF.SYSTEM_CLOCK_CH)
else:
clock_ch = None
for k, ch in enumerate(res4['chs']):
if ch['ch_name'] == CTF.SYSTEM_CLOCK_CH:
clock_ch = k
break
with open(fname, 'rb') as fid:
fid.seek(0, os.SEEK_END)
st_size = fid.tell()
fid.seek(0, 0)
if (st_size - CTF.HEADER_SIZE) % (4 * res4['nsamp'] *
res4['nchan']) != 0:
raise RuntimeError('The number of samples is not an even multiple '
'of the trial size')
n_samp_tot = (st_size - CTF.HEADER_SIZE) // (4 * res4['nchan'])
n_trial = n_samp_tot // res4['nsamp']
n_samp = n_samp_tot
if clock_ch is None:
logger.info(' System clock channel is not available, assuming '
'all samples to be valid.')
elif system_clock == 'ignore':
logger.info(' System clock channel is available, but ignored.')
else: # use it
logger.info(' System clock channel is available, checking '
'which samples are valid.')
for t in range(n_trial):
# Skip to the correct trial
samp_offset = t * res4['nsamp']
offset = CTF.HEADER_SIZE + (samp_offset * res4['nchan'] +
(clock_ch * res4['nsamp'])) * 4
fid.seek(offset, 0)
this_data = np.fromstring(fid.read(4 * res4['nsamp']), '>i4')
if len(this_data) != res4['nsamp']:
raise RuntimeError('Cannot read data for trial %d'
% (t + 1))
end = np.where(this_data == 0)[0]
if len(end) > 0:
n_samp = samp_offset + end[0]
break
if n_samp < res4['nsamp']:
n_trial = 1
logger.info(' %d x %d = %d samples from %d chs'
% (n_trial, n_samp, n_samp, res4['nchan']))
else:
n_trial = n_samp // res4['nsamp']
n_omit = n_samp_tot - n_samp
n_samp = n_trial * res4['nsamp']
logger.info(' %d x %d = %d samples from %d chs'
% (n_trial, res4['nsamp'], n_samp, res4['nchan']))
if n_omit != 0:
logger.info(' %d samples omitted at the end' % n_omit)
return dict(n_samp=n_samp, n_samp_tot=n_samp_tot, block_size=res4['nsamp'],
n_trial=n_trial, res4_nsamp=res4['nsamp'],
n_chan=res4['nchan'])
|
|
import sys
class colorize(str):
"""
Pretty simple to use::
colorize.make('foo').bold
colorize.make('foo').green
colorize.make('foo').yellow
colorize.make('foo').red
colorize.make('foo').blue
Otherwise you could go the long way (for example if you are
testing this class)::
string = colorize('foo')
string._set_attributes()
string.red
"""
def __init__(self, string):
self.stdout = sys.__stdout__
self.appends = ''
self.prepends = ''
self.isatty = self.stdout.isatty()
def _set_attributes(self):
"""
Sets the attributes here because the str class does not
allow to pass in anything other than a string to the constructor
so we can't really mess with the other attributes.
"""
for k, v in self.__colors__.items():
setattr(self, k, self.make_color(v))
def make_color(self, color):
if not self.isatty or self.is_windows:
return self
return color + self + '\033[0m' + self.appends
@property
def __colors__(self):
return dict(
blue = '\033[34m',
green = '\033[92m',
yellow = '\033[33m',
red = '\033[91m',
bold = '\033[1m',
ends = '\033[0m'
)
@property
def is_windows(self):
if sys.platform == 'win32':
return True
return False
@classmethod
def make(cls, string):
"""
A helper method to return itself and workaround the fact that
the str object doesn't allow extra arguments passed in to the
constructor
"""
obj = cls(string)
obj._set_attributes()
return obj
#
# Common string manipulations
#
red_arrow = colorize.make('-->').red
blue_arrow = colorize.make('-->').blue
yellow = lambda x: colorize.make(x).yellow
blue = lambda x: colorize.make(x).blue
green = lambda x: colorize.make(x).green
red = lambda x: colorize.make(x).red
bold = lambda x: colorize.make(x).bold
CRITICAL = 5
ERROR = 4
WARNING = 3
INFO = 2
DEBUG = 1
_level_names = {
CRITICAL : 'critical',
WARNING : 'warning',
INFO : 'info',
ERROR : 'error',
DEBUG : 'debug'
}
_reverse_level_names = dict((v, k) for (k, v) in _level_names.items())
_level_colors = {
'remote' : 'bold',
'critical' : 'red',
'warning' : 'yellow',
'info' : 'blue',
'debug' : 'blue',
'error' : 'red'
}
class _Write(object):
def __init__(self, _writer=None, prefix='', suffix='', clear_line=False, flush=False):
self._writer = _writer or sys.stdout
self.suffix = suffix
self.prefix = prefix
self.flush = flush
self.clear_line = clear_line
def bold(self, string):
self.write(bold(string))
def raw(self, string):
self.write(string + '\n')
def write(self, line):
padding = ''
if self.clear_line:
if len(line) > 80:
padding = ' ' * 10
else:
padding = ' ' * (80 - len(line))
line = line + padding
self._writer.write(self.prefix + line + self.suffix)
if self.flush:
self._writer.flush()
write = _Write()
loader = _Write(prefix='\r', clear_line=True)
class LogMessage(object):
def __init__(self, level_name, message, writer=None, config_level=None):
self.level_name = level_name
self.message = message
self.writer = writer or sys.stdout
self.config_level = config_level or self.get_config_level()
def skip(self):
if self.level_int >= self.config_level:
return False
return True
def header(self):
colored = colorize.make(self.base_string)
return getattr(colored, self.level_color)
@property
def base_string(self):
if self.config_level < 2:
return "--> [%s]" % self.level_name
return "-->"
@property
def level_int(self):
if self.level_name == 'remote':
return 2
return _reverse_level_names.get(self.level_name, 4)
@property
def level_color(self):
return _level_colors.get(self.level_name, 'info')
def line(self):
msg = self.message.rstrip('\n')
return "%s %s\n" % (self.header(), msg)
def write(self):
if not self.skip():
self.writer.write(self.line())
def get_config_level(self):
import ceph_medic
level = ceph_medic.config.verbosity
return _reverse_level_names.get(level, 4)
def error(message):
return LogMessage('error', message).write()
def debug(message):
return LogMessage('debug', message).write()
def info(message):
return LogMessage('info', message).write()
def warning(message):
return LogMessage('warning', message).write()
def critical(message):
return LogMessage('critical', message).write()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
from oslo.config import cfg
import testtools
import webtest
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.common import config
from neutron.common import exceptions
from neutron import context
from neutron.db import api as db
from neutron.db import quota_db
from neutron import quota
from neutron.tests import base
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_extensions
from neutron.tests.unit import testlib_api
TARGET_PLUGIN = ('neutron.plugins.linuxbridge.lb_neutron_plugin'
'.LinuxBridgePluginV2')
_get_path = test_api_v2._get_path
class QuotaExtensionTestCase(testlib_api.WebTestCase):
def setUp(self):
super(QuotaExtensionTestCase, self).setUp()
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
# Create the default configurations
args = ['--config-file', test_extensions.etcdir('neutron.conf.test')]
config.parse(args=args)
# Update the plugin and extensions path
self.setup_coreplugin(TARGET_PLUGIN)
cfg.CONF.set_override(
'quota_items',
['network', 'subnet', 'port', 'extra1'],
group='QUOTAS')
quota.QUOTAS = quota.QuotaEngine()
quota.register_resources_from_config()
self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True)
self.plugin = self._plugin_patcher.start()
self.plugin.return_value.supported_extension_aliases = ['quotas']
# QUOTAS will register the items in conf when starting
# extra1 here is added later, so have to do it manually
quota.QUOTAS.register_resource_by_name('extra1')
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
db.configure_db()
app = config.load_paste_app('extensions_test_app')
ext_middleware = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
self.api = webtest.TestApp(ext_middleware)
def tearDown(self):
self._plugin_patcher.stop()
self.api = None
self.plugin = None
db.clear_db()
cfg.CONF.reset()
# Restore the global RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
super(QuotaExtensionTestCase, self).tearDown()
class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
fmt = 'json'
def setUp(self):
cfg.CONF.set_override(
'quota_driver',
'neutron.db.quota_db.DbQuotaDriver',
group='QUOTAS')
super(QuotaExtensionDbTestCase, self).setUp()
def test_quotas_loaded_right(self):
res = self.api.get(_get_path('quotas', fmt=self.fmt))
quota = self.deserialize(res)
self.assertEqual([], quota['quotas'])
self.assertEqual(200, res.status_int)
def test_quotas_default_values(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
self.assertEqual(-1, quota['quota']['extra1'])
def test_show_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_show_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_show_quotas_with_owner_tenant(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_list_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual([], quota['quotas'])
def test_list_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_with_non_integer_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 'abc'}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_with_negative_integer_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': -2}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_to_unlimited(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': -1}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=False)
self.assertEqual(200, res.status_int)
def test_update_quotas_exceeding_current_limit(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 120}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=False)
self.assertEqual(200, res.status_int)
def test_update_quotas_with_non_support_resource_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'abc': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
env2 = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env2)
quota = self.deserialize(res)
self.assertEqual(100, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_update_attributes(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
quotas = {'quota': {'extra1': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
env2 = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env2)
quota = self.deserialize(res)
self.assertEqual(100, quota['quota']['extra1'])
def test_delete_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(204, res.status_int)
def test_delete_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_quotas_loaded_bad_returns_404(self):
try:
res = self.api.get(_get_path('quotas'), expect_errors=True)
self.assertEqual(404, res.status_int)
except Exception:
pass
def test_quotas_limit_check(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 5}}
res = self.api.put(_get_path('quotas', id=tenant_id,
fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
quota.QUOTAS.limit_check(context.Context('', tenant_id),
tenant_id,
network=4)
def test_quotas_limit_check_with_invalid_quota_value(self):
tenant_id = 'tenant_id1'
with testtools.ExpectedException(exceptions.InvalidQuotaValue):
quota.QUOTAS.limit_check(context.Context('', tenant_id),
tenant_id,
network=-2)
def test_quotas_get_tenant_from_request_context(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(quota['tenant']['tenant_id'], tenant_id)
def test_quotas_get_tenant_from_empty_request_context_returns_400(self):
env = {'neutron.context': context.Context('', '',
is_admin=True)}
res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(400, res.status_int)
class QuotaExtensionDbTestCaseXML(QuotaExtensionDbTestCase):
fmt = 'xml'
class QuotaExtensionCfgTestCase(QuotaExtensionTestCase):
fmt = 'json'
def setUp(self):
cfg.CONF.set_override(
'quota_driver',
'neutron.quota.ConfDriver',
group='QUOTAS')
super(QuotaExtensionCfgTestCase, self).setUp()
def test_quotas_default_values(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
self.assertEqual(-1, quota['quota']['extra1'])
def test_show_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
def test_show_quotas_without_admin_forbidden(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_forbidden(self):
tenant_id = 'tenant_id1'
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas),
expect_errors=True)
self.assertEqual(403, res.status_int)
def test_delete_quotas_forbidden(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
class QuotaExtensionCfgTestCaseXML(QuotaExtensionCfgTestCase):
fmt = 'xml'
class TestDbQuotaDriver(base.BaseTestCase):
"""Test for neutron.db.quota_db.DbQuotaDriver."""
def test_get_tenant_quotas_arg(self):
"""Call neutron.db.quota_db.DbQuotaDriver._get_quotas."""
driver = quota_db.DbQuotaDriver()
ctx = context.Context('', 'bar')
foo_quotas = {'network': 5}
default_quotas = {'network': 10}
target_tenant = 'foo'
with mock.patch.object(quota_db.DbQuotaDriver,
'get_tenant_quotas',
return_value=foo_quotas) as get_tenant_quotas:
quotas = driver._get_quotas(ctx,
target_tenant,
default_quotas,
['network'])
self.assertEqual(quotas, foo_quotas)
get_tenant_quotas.assert_called_once_with(ctx,
default_quotas,
target_tenant)
class TestQuotaDriverLoad(base.BaseTestCase):
def setUp(self):
super(TestQuotaDriverLoad, self).setUp()
# Make sure QuotaEngine is reinitialized in each test.
quota.QUOTAS._driver = None
def _test_quota_driver(self, cfg_driver, loaded_driver,
with_quota_db_module=True):
cfg.CONF.set_override('quota_driver', cfg_driver, group='QUOTAS')
with mock.patch.dict(sys.modules, {}):
if (not with_quota_db_module and
'neutron.db.quota_db' in sys.modules):
del sys.modules['neutron.db.quota_db']
driver = quota.QUOTAS.get_driver()
self.assertEqual(loaded_driver, driver.__class__.__name__)
def test_quota_db_driver_with_quotas_table(self):
self._test_quota_driver('neutron.db.quota_db.DbQuotaDriver',
'DbQuotaDriver', True)
def test_quota_db_driver_fallback_conf_driver(self):
self._test_quota_driver('neutron.db.quota_db.DbQuotaDriver',
'ConfDriver', False)
def test_quota_conf_driver(self):
self._test_quota_driver('neutron.quota.ConfDriver',
'ConfDriver', True)
|
|
#!/usr/bin/env python
"""Simple parsers for the output of WMI queries."""
import binascii
import calendar
from grr.lib import parsers
from grr.lib import rdfvalue
from grr.lib import time_utils
class WMIInstalledSoftwareParser(parsers.WMIQueryParser):
"""Parser for WMI output. Yields SoftwarePackage rdfvalues."""
output_types = ["SoftwarePackage"]
supported_artifacts = ["WMIInstalledSoftware"]
def Parse(self, query, result, knowledge_base):
"""Parse the wmi packages output."""
_ = query, knowledge_base
status = rdfvalue.SoftwarePackage.InstallState.INSTALLED
soft = rdfvalue.SoftwarePackage(
name=result["Name"],
description=result["Description"],
version=result["Version"],
install_state=status)
yield soft
class WMIHotfixesSoftwareParser(parsers.WMIQueryParser):
"""Parser for WMI output. Yields SoftwarePackage rdfvalues."""
output_types = ["SoftwarePackage"]
supported_artifacts = ["WMIHotFixes"]
def Parse(self, query, result, knowledge_base):
"""Parse the wmi packages output."""
_ = query, knowledge_base
status = rdfvalue.SoftwarePackage.InstallState.INSTALLED
result = result.ToDict()
# InstalledOn comes back in a godawful format such as '7/10/2013'.
installed_on = time_utils.AmericanDateToEpoch(result.get("InstalledOn", ""))
soft = rdfvalue.SoftwarePackage(
name=result.get("HotFixID"),
description=result.get("Caption"),
installed_by=result.get("InstalledBy"),
install_state=status,
installed_on=installed_on)
yield soft
class WMIUserParser(parsers.WMIQueryParser):
"""Parser for WMI Win32_UserAccount and Win32_UserProfile output."""
output_types = ["KnowledgeBaseUser"]
supported_artifacts = ["WMIProfileUsersHomeDir",
"WMIAccountUsersDomain",
"WMIUsers"]
account_mapping = {
# Win32_UserAccount
"Name": "username",
"Domain": "userdomain",
"SID": "sid",
# Win32_UserProfile
"LocalPath": "homedir"
}
def Parse(self, query, result, knowledge_base):
"""Parse the wmi Win32_UserAccount output."""
_ = query, knowledge_base
kb_user = rdfvalue.KnowledgeBaseUser()
for wmi_key, kb_key in self.account_mapping.items():
try:
kb_user.Set(kb_key, result[wmi_key])
except KeyError:
pass
# We need at least a sid or a username. If these are missing its likely we
# retrieved just the userdomain for an AD account that has a name collision
# with a local account that is correctly populated. We drop the bogus
# domain account.
if kb_user.sid or kb_user.username:
yield kb_user
class WMILogicalDisksParser(parsers.WMIQueryParser):
"""Parser for LogicalDisk WMI output. Yields Volume rdfvalues."""
output_types = ["Volume"]
supported_artifacts = ["WMILogicalDisks"]
def Parse(self, query, result, knowledge_base):
"""Parse the wmi packages output."""
_ = query, knowledge_base
result = result.ToDict()
winvolume = rdfvalue.WindowsVolume(drive_letter=result.get("DeviceID"),
drive_type=result.get("DriveType"))
try:
size = int(result.get("Size"))
except ValueError:
size = None
try:
free_space = int(result.get("FreeSpace"))
except ValueError:
free_space = None
# Since we don't get the sector sizes from WMI, we just set them at 1 byte
volume = rdfvalue.Volume(windows=winvolume,
name=result.get("VolumeName"),
file_system_type=result.get("FileSystem"),
serial_number=result.get("VolumeSerialNumber"),
sectors_per_allocation_unit=1,
bytes_per_sector=1,
total_allocation_units=size,
actual_available_allocation_units=free_space)
yield volume
class WMIComputerSystemProductParser(parsers.WMIQueryParser):
"""Parser for WMI Output. Yeilds Identifying Number."""
output_types = ["HardwareInfo"]
supported_artifacts = ["WMIComputerSystemProduct"]
def Parse(self, query, result, knowledge_base):
"""Parse the WMI output to get Identifying Number."""
# Currently we are only grabbing the Identifying Number
# as the serial number (catches the unique number for VMs).
# This could be changed to include more information from
# Win32_ComputerSystemProduct.
_ = query, knowledge_base
yield rdfvalue.HardwareInfo(serial_number=result["IdentifyingNumber"])
class WMIInterfacesParser(parsers.WMIQueryParser):
"""Parser for WMI output. Yields SoftwarePackage rdfvalues."""
output_types = ["Interface", "DNSClientConfiguration"]
supported_artifacts = []
def WMITimeStrToRDFDatetime(self, timestr):
"""Return RDFDatetime from string like 20140825162259.000000-420.
Args:
timestr: WMI time string
Returns:
rdfvalue.RDFDatetime
We have some timezone manipulation work to do here because the UTC offset is
in minutes rather than +-HHMM
"""
# We use manual parsing here because the time functions provided (datetime,
# dateutil) do not properly deal with timezone information.
offset_minutes = timestr[21:]
year = timestr[:4]
month = timestr[4:6]
day = timestr[6:8]
hours = timestr[8:10]
minutes = timestr[10:12]
seconds = timestr[12:14]
microseconds = timestr[15:21]
unix_seconds = calendar.timegm(
map(int, [year, month, day, hours, minutes, seconds]))
unix_seconds -= int(offset_minutes) * 60
return rdfvalue.RDFDatetime(unix_seconds * 1e6 + int(microseconds))
def _ConvertIPs(self, io_tuples, interface, output_dict):
for inputkey, outputkey in io_tuples:
addresses = []
if isinstance(interface[inputkey], list):
for ip_address in interface[inputkey]:
addresses.append(rdfvalue.NetworkAddress(
human_readable_address=ip_address))
else:
addresses.append(rdfvalue.NetworkAddress(
human_readable_address=interface[inputkey]))
output_dict[outputkey] = addresses
return output_dict
def Parse(self, query, result, knowledge_base):
"""Parse the wmi packages output."""
_ = query, knowledge_base
args = {"ifname": result["Description"]}
args["mac_address"] = binascii.unhexlify(
result["MACAddress"].replace(":", ""))
self._ConvertIPs([("IPAddress", "addresses"),
("DefaultIPGateway", "ip_gateway_list"),
("DHCPServer", "dhcp_server_list")], result, args)
if "DHCPLeaseExpires" in result:
args["dhcp_lease_expires"] = self.WMITimeStrToRDFDatetime(
result["DHCPLeaseExpires"])
if "DHCPLeaseObtained" in result:
args["dhcp_lease_obtained"] = self.WMITimeStrToRDFDatetime(
result["DHCPLeaseObtained"])
yield rdfvalue.Interface(**args)
yield rdfvalue.DNSClientConfiguration(
dns_server=result["DNSServerSearchOrder"],
dns_suffix=result["DNSDomainSuffixSearchOrder"])
|
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager
from django.conf import settings
from django.template import loader
from django.template.context import Context
from django.utils.http import urlsafe_base64_encode
from django.contrib.auth.tokens import PasswordResetTokenGenerator, default_token_generator
from django.core import validators
from .helper import Memail
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.core.mail import send_mail
import os, string, random
MILESTONE_STATUS = (
('planned', 'Planned'),
('started', 'Started'),
('finished', 'Finished'),
)
PIETRACK_ROLES = (
('PIE_Admin', 'PIE Admin'),
('Org_Admin', 'Organization Admin'),
('PIE_User', 'PIE User'),
)
def rand_str(number):
''.join(random.sample(string.ascii_lowercase, number))
def url(self, filename):
if self.__class__ == "Project":
return "%s/%s/%s" % (self.slug, rand_str(6), filename)
return "%s/%s/%s" % (self.project.slug, rand_str(6), filename)
class Organization(models.Model):
name = models.CharField(max_length=250, verbose_name=_("name"), unique=True)
slug = models.SlugField(max_length=250, unique=True, null=False, blank=True, verbose_name=_("slug"))
def profile_path(instance, filename):
return os.path.join('profile/', str(instance.username), str(instance.username) + '.jpg')
class User(AbstractBaseUser, PermissionsMixin):
username = models.CharField(max_length=30, unique=True)
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('email address'), unique=True)
is_staff = models.BooleanField(_('staff status'), default=False)
is_active = models.BooleanField(_('active'), default=True)
date_joined = models.DateTimeField(_('date joined'), auto_now_add=True)
email_verified = models.BooleanField(default=False)
organization = models.ForeignKey(Organization)
pietrack_role = models.CharField(_('pietrack_role'), max_length=30, choices=PIETRACK_ROLES)
profile_pic = models.FileField(upload_to=profile_path, null=True, blank=True)
biography = models.TextField(_('biography'), default=False)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
objects = UserManager()
def __str__(self):
return '%s %s' % (self.first_name, self.last_name)
def get_full_name(self):
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
send_mail(subject, message, from_email, [self.email], **kwargs)
def send_reset_pwd_mail(self):
uidb64 = urlsafe_base64_encode(str(self.pk))
token = default_token_generator.make_token(self)
t = loader.get_template('emails/resetpwd_email.html')
c = Context({'uidb64': uidb64, 'token': token})
rendered = t.render(c)
Memail(settings.DEFAULT_FROM_EMAIL, "Reset your password", rendered, self.email)
def send_activate_mail(self):
uidb64 = urlsafe_base64_encode(str(self.pk))
token = default_token_generator.make_token(self)
t = loader.get_template('emails/activate_email.html')
c = Context({'uidb64': uidb64, 'token': token})
subject = "Activate your account"
rendered = t.render(c)
Memail(settings.DEFAULT_FROM_EMAIL, subject, rendered, self.email)
class Project(models.Model):
name = models.CharField(max_length=250, verbose_name=_("name"))
slug = models.SlugField(max_length=250, null=False, blank=True, verbose_name=_("slug"))
description = models.TextField(verbose_name=_("description"))
created_date = models.DateTimeField(verbose_name=_("created date"), auto_now_add=True)
modified_date = models.DateTimeField(verbose_name=_("modified date"))
members = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name="projects")
logo = models.FileField(upload_to=url, blank=True, null=True)
organization = models.ForeignKey(Organization)
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null = True, blank = True)
def __str__(self):
return self.name
class Meta:
unique_together = [("name", "organization")]
class Attachment(models.Model):
uploaded_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True)
created_date = models.DateTimeField(verbose_name=_("created date"), auto_now_add=True)
attached_file = models.FileField(max_length=500, null=True, blank=True, upload_to=url,
verbose_name=_("attached file"))
order = models.IntegerField(default=0, verbose_name=_("order"))
project = models.ForeignKey(Project)
class Role(models.Model):
name = models.CharField(max_length=200, verbose_name=_("name"))
slug = models.SlugField(max_length=250, null=False, blank=True, verbose_name=_("slug"))
project = models.ForeignKey(Project, null=True, blank=False, related_name="roles", verbose_name=_("project"))
users = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name="user_roles")
class Meta:
unique_together = [("slug", "project")]
def __str__(self):
return self.name
class Milestone(models.Model):
name = models.CharField(max_length=200, db_index=True, verbose_name=_("name"))
# TODO: Change the unique restriction to a unique together with the project id
slug = models.SlugField(max_length=250, db_index=True, null=False, blank=True, verbose_name=_("slug"))
project = models.ForeignKey(Project, related_name="milestones", verbose_name=_("project"))
estimated_start = models.DateField(verbose_name=_("estimated start date"))
estimated_finish = models.DateField(verbose_name=_("estimated finish date"))
created_date = models.DateTimeField(verbose_name=_("created date"), auto_now_add=True)
modified_date = models.DateTimeField(verbose_name=_("modified date"))
status = models.CharField(max_length=200, choices=MILESTONE_STATUS, default="planned")
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, blank = True, null = True)
class Meta:
ordering = ["created_date"]
unique_together = [("name", "project"), ("slug", "project")]
def __str__(self):
return self.name
class Requirement(models.Model):
name = models.CharField(max_length=200, verbose_name=_("name"))
slug = models.SlugField(max_length=250, null=False, blank=True, verbose_name=_("slug"))
description = models.TextField(verbose_name=_("description"))
project = models.ForeignKey(Project, null=True, blank=False, related_name="requirements", verbose_name=_("project"))
milestone = models.ForeignKey(Milestone, null=True, blank=False, related_name="requirements")
def __str__(self):
return self.name
class TicketStatus(models.Model):
name = models.CharField(max_length=255, verbose_name=_("name"))
slug = models.SlugField(max_length=255, null=False, blank=True, verbose_name=_("slug"))
color = models.CharField(max_length=20, default="#999999", verbose_name=_("color"))
project = models.ForeignKey(Project, related_name="task_statuses", verbose_name=_("project"))
order = models.IntegerField(default=1,blank=True)
class Meta:
unique_together = (("project", "name"), ("project", "slug"))
def __str__(self):
return self.name
class Priority(models.Model):
name = models.CharField(max_length=255, verbose_name=_("name"))
slug = models.SlugField(max_length=255, null=False, blank=True, verbose_name=_("slug"))
color = models.CharField(max_length=20, default="#999999", verbose_name=_("color"))
project = models.ForeignKey(Project, related_name="priorities", verbose_name=_("project"))
order = models.IntegerField(default=1,blank=True)
class Meta:
unique_together = ("project", "name")
def __str__(self):
return self.name
class Severity(models.Model):
name = models.CharField(max_length=255, verbose_name=_("name"))
slug = models.SlugField(max_length=255, null=False, blank=True, verbose_name=_("slug"))
color = models.CharField(max_length=20, default="#999999", verbose_name=_("color"))
project = models.ForeignKey(Project, related_name="severities", verbose_name=_("project"))
order = models.IntegerField(default=1,blank=True)
class Meta:
unique_together = ("project", "name")
def __str__(self):
return self.name
class Ticket(models.Model):
name = models.CharField(max_length=200, verbose_name=_("name"))
slug = models.SlugField(max_length=250, null=False, blank=True, verbose_name=_("slug"))
project = models.ForeignKey(Project, related_name="project_tickets", verbose_name=_("project"))
assigned_to = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True)
milestone = models.ForeignKey(Milestone, null=True, blank=True, default=None, related_name="tasks",
verbose_name=_("milestone"))
requirement = models.ForeignKey(Requirement, null=True, blank=True, default=None, related_name="tasks",
verbose_name=_("milestone"))
created_date = models.DateTimeField(verbose_name=_("created date"), auto_now_add=True)
modified_date = models.DateTimeField(verbose_name=_("modified date"), auto_now_add= True)
finished_date = models.DateTimeField(null=True, blank=True, verbose_name=_("finished date"))
order = models.IntegerField(default=1)
description = models.TextField(null=False, blank=True, verbose_name=_("description"))
attachments = models.ManyToManyField(Attachment, blank=True)
reference = models.ManyToManyField('self', related_name='references', blank=True)
status = models.ForeignKey(TicketStatus, null=True, blank=True, related_name="tickets", verbose_name=_("status"))
severity = models.ForeignKey(Severity, null=True, blank=True, related_name="severity_tickets",
verbose_name=_("severity"))
priority = models.ForeignKey(Priority, null=True, blank=True, related_name="priority_tickets",
verbose_name=_("priority"))
ticket_type = models.CharField(max_length=50, default = 'task', blank = True)
target_date = models.DateField(null=True, blank=True)
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="user_tickets", null=True, blank=True)
def __str__(self):
return self.name
class Comment(models.Model):
comment = models.TextField(null=False)
commented_by = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="comments")
ticket = models.ForeignKey(Ticket, related_name="ticket_comments")
attachments = models.ManyToManyField(Attachment, blank=True)
created = models.DateTimeField(auto_now_add=True)
# class Meta:
# index_together = [('content_type', 'object_id', 'namespace'), ]
class Timeline(models.Model):
content_type = models.ForeignKey(ContentType, related_name="content_type_timelines")
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
namespace = models.CharField(max_length=250, default="default", db_index=True)
event_type = models.CharField(max_length=250, db_index=True)
project = models.ForeignKey(Project, null=True)
data = models.TextField(null=False, blank=True, verbose_name=_("data"))
data_content_type = models.ForeignKey(ContentType, related_name="data_timelines")
created = models.DateTimeField(auto_now_add=True)
class Meta:
index_together = [('content_type', 'object_id', 'namespace'), ]
|
|
import collections
import os
import sys
import time
import traceback
import six
from chainer import reporter as reporter_module
from chainer import serializer as serializer_module
from chainer.training import extension as extension_module
from chainer.training import trigger as trigger_module
from chainer.utils import argument
# Select the best-resolution timer function
try:
_get_time = time.perf_counter
except AttributeError:
if os.name == 'nt':
_get_time = time.clock
else:
_get_time = time.time
class _ExtensionEntry(object):
def __init__(self, extension, priority, trigger):
self.extension = extension
self.trigger = trigger
self.priority = priority
class Trainer(object):
"""The standard training loop in Chainer.
Trainer is an implementation of a training loop. Users can invoke the
training by calling the :meth:`run` method.
Each iteration of the training loop proceeds as follows.
- Update of the parameters. It includes the mini-batch loading, forward
and backward computations, and an execution of the update formula.
These are all done by the update object held by the trainer.
- Invocation of trainer extensions in the descending order of their
priorities. A trigger object is attached to each extension, and it
decides at each iteration whether the extension should be executed.
Trigger objects are callable objects that take the trainer object as the
argument and return a boolean value indicating whether the extension
should be called or not.
Extensions are callable objects that take the trainer object as the
argument. There are three ways to define custom extensions: inheriting the
:class:`Extension` class, decorating functions by :func:`make_extension`,
and defining any callable including lambda functions. See
:class:`Extension` for more details on custom extensions and how to
configure them.
Users can register extensions to the trainer by calling the :meth:`extend`
method, where some configurations can be added.
- Trigger object, which is also explained above. In most cases,
:class:`IntervalTrigger` is used, in which case users can simply specify
a tuple of the interval length and its unit, like
``(1000, 'iteration')`` or ``(1, 'epoch')``.
- The order of execution of extensions is determined by their priorities.
Extensions of higher priorities are invoked earlier. There are three
standard values for the priorities:
- ``PRIORITY_WRITER``. This is the priority for extensions that write
some records to the :attr:`observation` dictionary. It includes cases
that the extension directly adds values to the observation dictionary,
or the extension uses the :func:`chainer.report` function to report
values to the observation dictionary.
- ``PRIORITY_EDITOR``. This is the priority for extensions that edit the
:attr:`observation` dictionary based on already reported values.
- ``PRIORITY_READER``. This is the priority for extensions that only read
records from the :attr:`observation` dictionary. This is also suitable
for extensions that do not use the :attr:`observation` dictionary at
all.
The current state of the trainer object and objects handled by the trainer
can be serialized through the standard serialization protocol of Chainer.
It enables us to easily suspend and resume the training loop.
.. code-block:: python
>>> serializers.save_npz('my.trainer', trainer) # To suspend and save
>>> serializers.load_npz('my.trainer', trainer) # To load and resume
The :meth:`~chainer.training.extensions.snapshot` method makes regular
snapshots of the :class:`~chainer.training.Trainer` object during training.
.. note::
The serialization does not recover everything of the training loop. It
only recovers the states which change over the training (e.g.
parameters, optimizer states, the batch iterator state, extension
states, etc.). You must initialize the objects correctly before
deserializing the states.
On the other hand, it means that users can change the settings on
deserialization. For example, the exit condition can be changed on the
deserialization, so users can train the model for some iterations,
suspend it, and then resume it with larger number of total iterations.
During the training, it also creates a :class:`~chainer.Reporter` object to
store observed values on each update. For each iteration, it creates a
fresh observation dictionary and stores it in the :attr:`observation`
attribute.
Links of the target model of each optimizer are registered to the reporter
object as observers, where the name of each observer is constructed as the
format ``<optimizer name><link name>``. The link name is given by the
:meth:`chainer.Link.namedlink` method, which represents the path to each
link in the hierarchy. Other observers can be registered by accessing the
reporter object via the :attr:`reporter` attribute.
The default trainer is `plain`, i.e., it does not contain any extensions.
Args:
updater (~chainer.training.Updater): Updater object. It defines how to
update the models.
stop_trigger: Trigger that determines when to stop the training loop.
If it is not callable, it is passed to :class:`IntervalTrigger`.
out: Output directory.
extensions: Extensions registered to the trainer.
Attributes:
updater: The updater object for this trainer.
stop_trigger: Trigger that determines when to stop the training loop.
The training loop stops at the iteration on which this trigger
returns ``True``.
observation: Observation of values made at the last update. See the
:class:`Reporter` class for details.
out: Output directory.
reporter: Reporter object to report observed values.
"""
def __init__(self, updater, stop_trigger=None, out='result',
extensions=None):
self.updater = updater
self.stop_trigger = trigger_module.get_trigger(stop_trigger)
self.observation = {}
self.out = out
if extensions is None:
extensions = []
reporter = reporter_module.Reporter()
for name, optimizer in six.iteritems(updater.get_all_optimizers()):
reporter.add_observer(name, optimizer.target)
reporter.add_observers(
name, optimizer.target.namedlinks(skipself=True))
self.reporter = reporter
self._done = False
self._extensions = collections.OrderedDict()
self._start_at = None
self._snapshot_elapsed_time = 0.0
self._final_elapsed_time = None
updater.connect_trainer(self)
for ext in extensions:
self.extend(ext)
@property
def elapsed_time(self):
"""Total time used for the training.
The time is in seconds. If the training is resumed from snapshot, it
includes the time of all the previous training to get the current
state of the trainer.
"""
if self._done:
return self._final_elapsed_time
if self._start_at is None:
raise RuntimeError('training has not been started yet')
return _get_time() - self._start_at + self._snapshot_elapsed_time
def extend(self, extension, name=None, trigger=None, priority=None,
**kwargs):
"""Registers an extension to the trainer.
:class:`Extension` is a callable object which is called after each
update unless the corresponding trigger object decides to skip the
iteration. The order of execution is determined by priorities:
extensions with higher priorities are called earlier in each iteration.
Extensions with the same priority are invoked in the order of
registrations.
If two or more extensions with the same name are registered, suffixes
are added to the names of the second to last extensions. The suffix is
``_N`` where N is the ordinal of the extensions.
See :class:`Extension` for the interface of extensions.
Args:
extension: Extension to register.
name (str): Name of the extension. If it is omitted, the
:attr:`Extension.name` attribute of the extension is used or
the :attr:`Extension.default_name` attribute of the extension
if `name` is is set to `None` or is undefined.
Note that the name would be suffixed by an ordinal in case of
duplicated names as explained above.
trigger (tuple or Trigger): Trigger object that determines when to
invoke the extension. If it is ``None``, ``extension.trigger``
is used instead. If it is ``None`` and the extension does not
have the trigger attribute, the extension is triggered at every
iteration by default. If the trigger is not callable, it is
passed to :class:`IntervalTrigger` to build an interval
trigger.
priority (int): Invocation priority of the extension. Extensions
are invoked in the descending order of priorities in each
iteration. If this is ``None``, ``extension.priority`` is used
instead.
"""
if kwargs:
argument.check_unexpected_kwargs(
kwargs,
invoke_before_training='invoke_before_training has been '
'removed since Chainer v2.0.0. Use initializer= instead.')
argument.assert_kwargs_empty(kwargs)
if name is None:
name = getattr(extension, 'name', None)
if name is None:
name = getattr(extension, 'default_name', None)
if name is None:
name = getattr(extension, '__name__', None)
if name is None:
raise TypeError('name is not given for the extension')
if name == 'training':
raise ValueError(
'the name "training" is prohibited as an extension name')
if trigger is None:
trigger = getattr(extension, 'trigger', (1, 'iteration'))
trigger = trigger_module.get_trigger(trigger)
if priority is None:
priority = getattr(
extension, 'priority', extension_module.PRIORITY_READER)
modified_name = name
ordinal = 0
while modified_name in self._extensions:
ordinal += 1
modified_name = '%s_%d' % (name, ordinal)
extension.name = modified_name
self._extensions[modified_name] = _ExtensionEntry(
extension, priority, trigger)
def get_extension(self, name):
"""Returns the extension of a given name.
Args:
name (str): Name of the extension.
Returns:
Extension.
"""
extensions = self._extensions
if name in extensions:
return extensions[name].extension
else:
raise ValueError('extension %s not found' % name)
def run(self, show_loop_exception_msg=True):
"""Executes the training loop.
This method is the core of ``Trainer``. It executes the whole loop of
training the models.
Note that this method cannot run multiple times for one trainer object.
"""
if self._done:
raise RuntimeError('cannot run training loop multiple times')
try:
os.makedirs(self.out)
except OSError:
pass
# sort extensions by priorities
extension_order = sorted(
self._extensions.keys(),
key=lambda name: self._extensions[name].priority, reverse=True)
extensions = [(name, self._extensions[name])
for name in extension_order]
self._start_at = _get_time()
# invoke initializer of each extension
for _, entry in extensions:
initializer = getattr(entry.extension, 'initialize', None)
finished = getattr(entry.trigger, 'finished', False)
if initializer and not finished:
initializer(self)
update = self.updater.update
reporter = self.reporter
stop_trigger = self.stop_trigger
# main training loop
try:
while not stop_trigger(self):
self.observation = {}
with reporter.scope(self.observation):
update()
for name, entry in extensions:
if entry.trigger(self):
entry.extension(self)
except Exception as e:
if show_loop_exception_msg:
# Show the exception here, as it will appear as if chainer
# hanged in case any finalize method below deadlocks.
f = sys.stderr
f.write('Exception in main training loop: {}\n'.format(e))
f.write('Traceback (most recent call last):\n')
traceback.print_tb(sys.exc_info()[2])
f.write('Will finalize trainer extensions and updater before '
'reraising the exception.\n')
# In Python 2, sys.exc_info() is updated if any folloing
# exceptions happens even if it's in a limited scope (like
# try-catch clause below). Thus the exception from main
# loop is preserved here.
exc_info = sys.exc_info()
for _, entry in extensions:
handler = getattr(entry.extension, 'on_error', None)
if handler:
try:
# It is guaranteed all handlers are called,
# but exceptions thrown by those handlers are
# just printed and ignored, as well as its
# return values.
handler(self, e, sys.exc_info()[2])
except Exception as he:
f.write('Exception in error handler: {}\n'.format(he))
f.write('Traceback (most recent call last):\n')
traceback.print_tb(sys.exc_info()[2])
six.reraise(*exc_info)
finally:
for _, entry in extensions:
finalize = getattr(entry.extension, 'finalize', None)
if finalize:
finalize()
self.updater.finalize()
self._final_elapsed_time = self.elapsed_time
self._done = True
def serialize(self, serializer):
self.updater.serialize(serializer['updater'])
if hasattr(self.stop_trigger, 'serialize'):
self.stop_trigger.serialize(serializer['stop_trigger'])
s = serializer['extensions']
t = serializer['extension_triggers']
for name, entry in six.iteritems(self._extensions):
if hasattr(entry.extension, 'serialize'):
entry.extension.serialize(s[name])
if hasattr(entry.trigger, 'serialize'):
entry.trigger.serialize(t[name])
if isinstance(serializer, serializer_module.Serializer):
serializer('_snapshot_elapsed_time', self.elapsed_time)
else:
self._snapshot_elapsed_time = serializer(
'_snapshot_elapsed_time', 0.0)
|
|
__author__ = 'krishnab'
from operator import neg, truediv
import numpy as np
import pandas as pd
from numpy.random import binomial
from models.Models import Base_model
class Basic_stochastic_model_fixed_promotion(Base_model):
def __init__(self, **kwds):
Base_model.__init__(self, **kwds)
self.name = "Stochastic Model(sim_orig)"
self.label = "promote-hire"
def run_model(self):
## initialize data structure
self.res = np.zeros([self.duration, 12], dtype=np.float32)
self.res[0, 0] = self.nf1
self.res[0, 1] = self.nf2
self.res[0, 2] = self.nf3
self.res[0, 3] = self.nm1
self.res[0, 4] = self.nm2
self.res[0, 5] = self.nm3
self.res[0, 6] = self.vac3
self.res[0, 7] = self.vac2
self.res[0, 8] = self.vac1
self.res[0, 9] = self.female_promotion_probability_1
self.res[0, 10] = self.female_promotion_probability_2
self.res[0, 11] = np.float32(
sum(list([self.nf1, self.nf2, self.nf3])) / sum(list([self.nf1,
self.nf2,
self.nf3,
self.nm1,
self.nm2,
self.nm3])))
hiring_rate_female_level_1 = self.bf1
hiring_rate_female_level_2 = self.bf2
hiring_rate_female_level_3 = self.bf3
attrition_rate_female_level_1 = self.df1
attrition_rate_female_level_2 = self.df2
attrition_rate_female_level_3 = self.df3
attrition_rate_male_level_1 = self.dm1
attrition_rate_male_level_2 = self.dm2
attrition_rate_male_level_3 = self.dm3
probability_of_outside_hire_level_3 = self.phire3
probability_of_outside_hire_level_2 = self.phire2
male_promotion_probability_1_2 = self.male_promotion_probability_1
male_promotion_probability_2_3 = self.male_promotion_probability_2
for i in range(1, self.duration):
# initialize variables for this iteration
prev_number_of_females_level_1 = self.res[i - 1, 0]
prev_number_of_females_level_2 = self.res[i - 1, 1]
prev_number_of_females_level_3 = self.res[i - 1, 2]
prev_number_of_males_level_1 = self.res[i - 1, 3]
prev_number_of_males_level_2 = self.res[i - 1, 4]
prev_number_of_males_level_3 = self.res[i - 1, 5]
prev_number_of_vacancies_level_3 = self.res[i - 1, 6]
prev_number_of_vacancies_level_2 = self.res[i - 1, 7]
prev_number_of_vacancies_level_1 = self.res[i - 1, 8]
prev_promotion_rate_female_level_1 = self.female_promotion_probability_1
prev_promotion_rate_female_level_2 = self.female_promotion_probability_2
if np.isnan(prev_promotion_rate_female_level_1):
prev_promotion_rate_female_level_1 = 0
if np.isnan(prev_promotion_rate_female_level_2):
prev_promotion_rate_female_level_2 = 0
prev_gender_proportion_of_department = np.float32(
sum(list([prev_number_of_females_level_1,
prev_number_of_females_level_2,
prev_number_of_females_level_3])) / (
sum(list([prev_number_of_females_level_1,
prev_number_of_females_level_2,
prev_number_of_females_level_3,
prev_number_of_males_level_1,
prev_number_of_males_level_2,
prev_number_of_males_level_3]))))
# Process Model
# first both female and males leave the department according to binomial probability.
female_attrition_level_3 = binomial(prev_number_of_females_level_3,
attrition_rate_female_level_3)
male_attrition_level_3 = binomial(prev_number_of_males_level_3,
attrition_rate_male_level_3)
# the departures create a set of vacancies. These vacancies are the basis for new hiring
total_vacancies_3 = female_attrition_level_3 + male_attrition_level_3
# women are hired first and then men
hiring_female_3 = binomial(total_vacancies_3,
probability_of_outside_hire_level_3 * hiring_rate_female_level_3)
hiring_male_3 = binomial(max(0, total_vacancies_3 - hiring_female_3),
probability_of_outside_hire_level_3 * (
1 - hiring_rate_female_level_3))
total_hiring_3 = hiring_female_3 + hiring_male_3
# level 3 vacancies that are not filled by new hires create opportunities
# for promotion from level 2. Again women are promoted first and men second.
# Also note the error trap that if we try to promote more professors from
# level 2 than there exist at level 2, then we will prevent this from happening.
vacancies_remaining_after_hiring_3 = total_vacancies_3 - total_hiring_3
potential_promotions_after_hiring_3 = max(0,
vacancies_remaining_after_hiring_3)
promotions_of_females_level_2_3 = binomial(min(
potential_promotions_after_hiring_3,
prev_number_of_females_level_2),
prev_promotion_rate_female_level_2)
promotions_of_males_level_2_3 = binomial(max(0,min(
potential_promotions_after_hiring_3-promotions_of_females_level_2_3,
prev_number_of_males_level_2)), male_promotion_probability_2_3)
# attrition at level 2 - either people leave from attrition or promotion
female_attrition_level_2 = binomial(
max(0,
prev_number_of_females_level_2 - promotions_of_females_level_2_3),
attrition_rate_female_level_2)
male_attrition_level_2 = binomial(max(0,
prev_number_of_males_level_2 - promotions_of_males_level_2_3),
attrition_rate_male_level_2)
# the departures create a set of vacancies. These vacancies are the basis for new hiring
total_vacancies_2 = sum(list([female_attrition_level_2,
male_attrition_level_2,
promotions_of_females_level_2_3,
promotions_of_males_level_2_3]))
hiring_female_2 = binomial(max(0,total_vacancies_2),
probability_of_outside_hire_level_2 * hiring_rate_female_level_2)
hiring_male_2 = binomial(max(0,total_vacancies_2-hiring_female_2),
probability_of_outside_hire_level_2 * (1-hiring_rate_female_level_2))
total_hiring_2 = hiring_female_2 + hiring_male_2
vacancies_remaining_after_hiring_2 = total_vacancies_2 - total_hiring_2
potential_promotions_after_hiring_2 = max(0,
vacancies_remaining_after_hiring_2)
promotions_of_females_level_1_2 = binomial(max(0,
min(potential_promotions_after_hiring_2, prev_number_of_females_level_1)),
prev_promotion_rate_female_level_1)
promotions_of_males_level_1_2 = binomial(max(0,min(
potential_promotions_after_hiring_2 - promotions_of_females_level_1_2, prev_number_of_males_level_1)),
male_promotion_probability_1_2)
## Level 1
female_attrition_level_1 = binomial(max(0,prev_number_of_females_level_1-promotions_of_females_level_1_2),
attrition_rate_female_level_1)
male_attrition_level_1 = binomial(max(0,prev_number_of_males_level_1-promotions_of_males_level_1_2),
attrition_rate_male_level_1)
total_vacancies_1 = sum(list([female_attrition_level_1,
male_attrition_level_1,
promotions_of_females_level_1_2,
promotions_of_males_level_1_2]))
hiring_female_1 = binomial(max(0,total_vacancies_1),
hiring_rate_female_level_1)
hiring_male_1 = binomial(max(0,total_vacancies_1 - hiring_female_1),
1 - hiring_rate_female_level_1)
# Write state variables to array and move to next iteration
self.res[i, 0] = number_of_females_level_1 = sum(
list([prev_number_of_females_level_1,
neg(female_attrition_level_1),
neg(promotions_of_females_level_1_2),
hiring_female_1]))
assert (number_of_females_level_1 >= 0), "negative number of females 1"
self.res[i, 1] = number_of_females_level_2 = max(0, sum(
list([prev_number_of_females_level_2,
neg(female_attrition_level_2),
neg(promotions_of_females_level_2_3),
promotions_of_females_level_1_2,
hiring_female_2])))
self.res[i, 2] = number_of_females_level_3 = sum(list([
prev_number_of_females_level_3,
neg(female_attrition_level_3),
promotions_of_females_level_2_3,
hiring_female_3]))
self.res[i, 3] = number_of_males_level_1 = sum(list([
prev_number_of_males_level_1,
neg(male_attrition_level_1),
neg(promotions_of_males_level_1_2),
hiring_male_1]))
self.res[i, 4] = number_of_males_level_2 = sum(
list([prev_number_of_males_level_2,
neg(male_attrition_level_2),
neg(promotions_of_males_level_2_3),
promotions_of_males_level_1_2,
hiring_male_2]))
self.res[i, 5] = number_of_males_level_3 = sum(
list([prev_number_of_males_level_3,
neg(male_attrition_level_3),
promotions_of_males_level_2_3,
hiring_male_3]))
self.res[i, 6] = number_of_vacancies_level_3 = sum(list([
male_attrition_level_3,
female_attrition_level_3]))
self.res[i, 7] = number_of_vacancies_level_2 = sum(list([
male_attrition_level_2,
female_attrition_level_2,
promotions_of_females_level_2_3,
promotions_of_males_level_2_3]))
self.res[i, 8] = number_of_vacancies_level_1 = sum(list([
male_attrition_level_1,
female_attrition_level_1,
promotions_of_males_level_1_2,
promotions_of_females_level_1_2]))
self.res[i, 9] = promotion_rate_female_level_1 = np.float32(
number_of_females_level_1 / sum(list([number_of_females_level_1,
number_of_males_level_1])))
self.res[i, 10] = promotion_rate_women_level_2 = np.float32(
number_of_females_level_2 / sum(list([number_of_females_level_2,
number_of_males_level_2])))
self.res[i, 11] = gender_proportion_of_department = np.float32(
truediv(sum(list([number_of_females_level_1,
number_of_females_level_2,
number_of_females_level_3])), sum(list([
number_of_females_level_1,
number_of_females_level_2,
number_of_females_level_3,
number_of_males_level_1,
number_of_males_level_2,
number_of_males_level_3]))))
# print(self.res[i,:])
## Print Data matrix
df_ = pd.DataFrame(self.res)
df_.columns = ['f1',
'f2',
'f3',
'm1',
'm2',
'm3',
't3',
't2',
't1',
'prom1',
'prom2',
'gendprop']
# print(df_)
recarray_results = df_.to_records(index=True)
self.run = recarray_results
return recarray_results
|
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.config import cfg
import testtools
from neutron.common import exceptions as exc
from neutron.common import topics
from neutron import context
from neutron.db import db_base_plugin_v2
from neutron.db import models_v2
from neutron.extensions import flavor as ext_flavor
from neutron.openstack.common import uuidutils
from neutron.plugins.metaplugin import meta_neutron_plugin
from neutron.tests.unit import testlib_api
from neutron.tests.unit import testlib_plugin
CONF_FILE = ""
META_PATH = "neutron.plugins.metaplugin"
FAKE_PATH = "neutron.tests.unit.metaplugin"
PROXY_PATH = "%s.proxy_neutron_plugin.ProxyPluginV2" % META_PATH
PLUGIN_LIST = """
fake1:%s.fake_plugin.Fake1,fake2:%s.fake_plugin.Fake2,proxy:%s
""".strip() % (FAKE_PATH, FAKE_PATH, PROXY_PATH)
L3_PLUGIN_LIST = """
fake1:%s.fake_plugin.Fake1,fake2:%s.fake_plugin.Fake2
""".strip() % (FAKE_PATH, FAKE_PATH)
def setup_metaplugin_conf(has_l3=True):
cfg.CONF.set_override('auth_url', 'http://localhost:35357/v2.0',
'PROXY')
cfg.CONF.set_override('auth_region', 'RegionOne', 'PROXY')
cfg.CONF.set_override('admin_user', 'neutron', 'PROXY')
cfg.CONF.set_override('admin_password', 'password', 'PROXY')
cfg.CONF.set_override('admin_tenant_name', 'service', 'PROXY')
cfg.CONF.set_override('plugin_list', PLUGIN_LIST, 'META')
if has_l3:
cfg.CONF.set_override('l3_plugin_list', L3_PLUGIN_LIST, 'META')
else:
cfg.CONF.set_override('l3_plugin_list', "", 'META')
cfg.CONF.set_override('default_flavor', 'fake2', 'META')
cfg.CONF.set_override('default_l3_flavor', 'fake1', 'META')
cfg.CONF.set_override('base_mac', "12:34:56:78:90:ab")
#TODO(nati) remove this after subnet quota change is merged
cfg.CONF.set_override('max_dns_nameservers', 10)
# Hooks registered by metaplugin must not exist for other plugins UT.
# So hooks must be unregistered (overwrite to None in fact).
def unregister_meta_hooks():
db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
models_v2.Network, 'metaplugin_net', None, None, None)
db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
models_v2.Port, 'metaplugin_port', None, None, None)
class MetaNeutronPluginV2Test(testlib_api.SqlTestCase,
testlib_plugin.PluginSetupHelper):
"""Class conisting of MetaNeutronPluginV2 unit tests."""
has_l3 = True
def setUp(self):
super(MetaNeutronPluginV2Test, self).setUp()
self.fake_tenant_id = uuidutils.generate_uuid()
self.context = context.get_admin_context()
self.addCleanup(unregister_meta_hooks)
setup_metaplugin_conf(self.has_l3)
self.client_cls_p = mock.patch('neutronclient.v2_0.client.Client')
client_cls = self.client_cls_p.start()
self.client_inst = mock.Mock()
client_cls.return_value = self.client_inst
self.client_inst.create_network.return_value = \
{'id': 'fake_id'}
self.client_inst.create_port.return_value = \
{'id': 'fake_id'}
self.client_inst.create_subnet.return_value = \
{'id': 'fake_id'}
self.client_inst.update_network.return_value = \
{'id': 'fake_id'}
self.client_inst.update_port.return_value = \
{'id': 'fake_id'}
self.client_inst.update_subnet.return_value = \
{'id': 'fake_id'}
self.client_inst.delete_network.return_value = True
self.client_inst.delete_port.return_value = True
self.client_inst.delete_subnet.return_value = True
plugin = (meta_neutron_plugin.MetaPluginV2.__module__ + '.'
+ meta_neutron_plugin.MetaPluginV2.__name__)
self.setup_coreplugin(plugin)
self.plugin = meta_neutron_plugin.MetaPluginV2(configfile=None)
def _fake_network(self, flavor):
data = {'network': {'name': flavor,
'admin_state_up': True,
'shared': False,
'router:external': [],
'tenant_id': self.fake_tenant_id,
ext_flavor.FLAVOR_NETWORK: flavor}}
return data
def _fake_port(self, net_id):
return {'port': {'name': net_id,
'network_id': net_id,
'admin_state_up': True,
'device_id': 'bad_device_id',
'device_owner': 'bad_device_owner',
'admin_state_up': True,
'host_routes': [],
'fixed_ips': [],
'mac_address': self.plugin._generate_mac(),
'tenant_id': self.fake_tenant_id}}
def _fake_subnet(self, net_id):
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.254'}]
return {'subnet': {'name': net_id,
'network_id': net_id,
'gateway_ip': '10.0.0.1',
'dns_nameservers': ['10.0.0.2'],
'host_routes': [],
'cidr': '10.0.0.0/24',
'allocation_pools': allocation_pools,
'enable_dhcp': True,
'ip_version': 4}}
def _fake_router(self, flavor):
data = {'router': {'name': flavor, 'admin_state_up': True,
'tenant_id': self.fake_tenant_id,
ext_flavor.FLAVOR_ROUTER: flavor,
'external_gateway_info': None}}
return data
def test_create_delete_network(self):
network1 = self._fake_network('fake1')
ret1 = self.plugin.create_network(self.context, network1)
self.assertEqual('fake1', ret1[ext_flavor.FLAVOR_NETWORK])
network2 = self._fake_network('fake2')
ret2 = self.plugin.create_network(self.context, network2)
self.assertEqual('fake2', ret2[ext_flavor.FLAVOR_NETWORK])
network3 = self._fake_network('proxy')
ret3 = self.plugin.create_network(self.context, network3)
self.assertEqual('proxy', ret3[ext_flavor.FLAVOR_NETWORK])
db_ret1 = self.plugin.get_network(self.context, ret1['id'])
self.assertEqual('fake1', db_ret1['name'])
db_ret2 = self.plugin.get_network(self.context, ret2['id'])
self.assertEqual('fake2', db_ret2['name'])
db_ret3 = self.plugin.get_network(self.context, ret3['id'])
self.assertEqual('proxy', db_ret3['name'])
db_ret4 = self.plugin.get_networks(self.context)
self.assertEqual(3, len(db_ret4))
db_ret5 = self.plugin.get_networks(
self.context,
{ext_flavor.FLAVOR_NETWORK: ['fake1']})
self.assertEqual(1, len(db_ret5))
self.assertEqual('fake1', db_ret5[0]['name'])
self.plugin.delete_network(self.context, ret1['id'])
self.plugin.delete_network(self.context, ret2['id'])
self.plugin.delete_network(self.context, ret3['id'])
def test_create_delete_port(self):
network1 = self._fake_network('fake1')
network_ret1 = self.plugin.create_network(self.context, network1)
network2 = self._fake_network('fake2')
network_ret2 = self.plugin.create_network(self.context, network2)
network3 = self._fake_network('proxy')
network_ret3 = self.plugin.create_network(self.context, network3)
port1 = self._fake_port(network_ret1['id'])
port2 = self._fake_port(network_ret2['id'])
port3 = self._fake_port(network_ret3['id'])
port1_ret = self.plugin.create_port(self.context, port1)
port2_ret = self.plugin.create_port(self.context, port2)
port3_ret = self.plugin.create_port(self.context, port3)
ports_all = self.plugin.get_ports(self.context)
self.assertEqual(network_ret1['id'], port1_ret['network_id'])
self.assertEqual(network_ret2['id'], port2_ret['network_id'])
self.assertEqual(network_ret3['id'], port3_ret['network_id'])
self.assertEqual(3, len(ports_all))
port1_dict = self.plugin._make_port_dict(port1_ret)
port2_dict = self.plugin._make_port_dict(port2_ret)
port3_dict = self.plugin._make_port_dict(port3_ret)
self.assertEqual(port1_dict, port1_ret)
self.assertEqual(port2_dict, port2_ret)
self.assertEqual(port3_dict, port3_ret)
port1['port']['admin_state_up'] = False
port2['port']['admin_state_up'] = False
port3['port']['admin_state_up'] = False
self.plugin.update_port(self.context, port1_ret['id'], port1)
self.plugin.update_port(self.context, port2_ret['id'], port2)
self.plugin.update_port(self.context, port3_ret['id'], port3)
port_in_db1 = self.plugin.get_port(self.context, port1_ret['id'])
port_in_db2 = self.plugin.get_port(self.context, port2_ret['id'])
port_in_db3 = self.plugin.get_port(self.context, port3_ret['id'])
self.assertEqual(False, port_in_db1['admin_state_up'])
self.assertEqual(False, port_in_db2['admin_state_up'])
self.assertEqual(False, port_in_db3['admin_state_up'])
self.plugin.delete_port(self.context, port1_ret['id'])
self.plugin.delete_port(self.context, port2_ret['id'])
self.plugin.delete_port(self.context, port3_ret['id'])
self.plugin.delete_network(self.context, network_ret1['id'])
self.plugin.delete_network(self.context, network_ret2['id'])
self.plugin.delete_network(self.context, network_ret3['id'])
def test_create_delete_subnet(self):
# for this test we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
network1 = self._fake_network('fake1')
network_ret1 = self.plugin.create_network(self.context, network1)
network2 = self._fake_network('fake2')
network_ret2 = self.plugin.create_network(self.context, network2)
network3 = self._fake_network('proxy')
network_ret3 = self.plugin.create_network(self.context, network3)
subnet1 = self._fake_subnet(network_ret1['id'])
subnet2 = self._fake_subnet(network_ret2['id'])
subnet3 = self._fake_subnet(network_ret3['id'])
subnet1_ret = self.plugin.create_subnet(self.context, subnet1)
subnet2_ret = self.plugin.create_subnet(self.context, subnet2)
subnet3_ret = self.plugin.create_subnet(self.context, subnet3)
self.assertEqual(network_ret1['id'], subnet1_ret['network_id'])
self.assertEqual(network_ret2['id'], subnet2_ret['network_id'])
self.assertEqual(network_ret3['id'], subnet3_ret['network_id'])
subnet_in_db1 = self.plugin.get_subnet(self.context, subnet1_ret['id'])
subnet_in_db2 = self.plugin.get_subnet(self.context, subnet2_ret['id'])
subnet_in_db3 = self.plugin.get_subnet(self.context, subnet3_ret['id'])
subnet1['subnet']['allocation_pools'].pop()
subnet2['subnet']['allocation_pools'].pop()
subnet3['subnet']['allocation_pools'].pop()
self.plugin.update_subnet(self.context,
subnet1_ret['id'], subnet1)
self.plugin.update_subnet(self.context,
subnet2_ret['id'], subnet2)
self.plugin.update_subnet(self.context,
subnet3_ret['id'], subnet3)
subnet_in_db1 = self.plugin.get_subnet(self.context, subnet1_ret['id'])
subnet_in_db2 = self.plugin.get_subnet(self.context, subnet2_ret['id'])
subnet_in_db3 = self.plugin.get_subnet(self.context, subnet3_ret['id'])
self.assertEqual(4, subnet_in_db1['ip_version'])
self.assertEqual(4, subnet_in_db2['ip_version'])
self.assertEqual(4, subnet_in_db3['ip_version'])
self.plugin.delete_subnet(self.context, subnet1_ret['id'])
self.plugin.delete_subnet(self.context, subnet2_ret['id'])
self.plugin.delete_subnet(self.context, subnet3_ret['id'])
self.plugin.delete_network(self.context, network_ret1['id'])
self.plugin.delete_network(self.context, network_ret2['id'])
self.plugin.delete_network(self.context, network_ret3['id'])
def test_create_delete_router(self):
router1 = self._fake_router('fake1')
router_ret1 = self.plugin.create_router(self.context, router1)
router2 = self._fake_router('fake2')
router_ret2 = self.plugin.create_router(self.context, router2)
self.assertEqual('fake1', router_ret1[ext_flavor.FLAVOR_ROUTER])
self.assertEqual('fake2', router_ret2[ext_flavor.FLAVOR_ROUTER])
router_in_db1 = self.plugin.get_router(self.context, router_ret1['id'])
router_in_db2 = self.plugin.get_router(self.context, router_ret2['id'])
self.assertEqual('fake1', router_in_db1[ext_flavor.FLAVOR_ROUTER])
self.assertEqual('fake2', router_in_db2[ext_flavor.FLAVOR_ROUTER])
self.plugin.delete_router(self.context, router_ret1['id'])
self.plugin.delete_router(self.context, router_ret2['id'])
with testtools.ExpectedException(meta_neutron_plugin.FlavorNotFound):
self.plugin.get_router(self.context, router_ret1['id'])
def test_extension_method(self):
"""Test if plugin methods are accessible from self.plugin
This test compensates for the nondeterministic ordering of
self.plugin's plugins dictionary. Fake Plugin 1 and Fake Plugin 2
both have a function called fake_func and the order of
self.plugin.plugins will determine which fake_func is called.
"""
fake1 = self.plugin.plugins.keys().index('fake1')
fake2 = self.plugin.plugins.keys().index('fake2')
fake1_before_fake2 = fake1 < fake2
fake_func_return = 'fake1' if fake1_before_fake2 else 'fake2'
self.assertEqual(fake_func_return, self.plugin.fake_func())
self.assertEqual('fake2', self.plugin.fake_func2())
def test_extension_not_implemented_method(self):
try:
self.plugin.not_implemented()
except AttributeError:
return
except Exception:
self.fail("AttributeError Error is not raised")
self.fail("No Error is not raised")
def test_create_network_flavor_fail(self):
with mock.patch('neutron.plugins.metaplugin.meta_db_v2.'
'add_network_flavor_binding',
side_effect=Exception):
network = self._fake_network('fake1')
self.assertRaises(meta_neutron_plugin.FaildToAddFlavorBinding,
self.plugin.create_network,
self.context,
network)
count = self.plugin.get_networks_count(self.context)
self.assertEqual(count, 0)
def test_create_router_flavor_fail(self):
with mock.patch('neutron.plugins.metaplugin.meta_db_v2.'
'add_router_flavor_binding',
side_effect=Exception):
router = self._fake_router('fake1')
self.assertRaises(meta_neutron_plugin.FaildToAddFlavorBinding,
self.plugin.create_router,
self.context,
router)
count = self.plugin.get_routers_count(self.context)
self.assertEqual(count, 0)
class MetaNeutronPluginV2TestWithoutL3(MetaNeutronPluginV2Test):
"""Tests without l3_plugin_list configration."""
has_l3 = False
def test_supported_extension_aliases(self):
self.assertEqual(self.plugin.supported_extension_aliases,
['flavor', 'external-net'])
def test_create_delete_router(self):
self.skipTest("Test case without router")
def test_create_router_flavor_fail(self):
self.skipTest("Test case without router")
class MetaNeutronPluginV2TestRpcFlavor(testlib_api.SqlTestCase):
"""Tests for rpc_flavor."""
def setUp(self):
super(MetaNeutronPluginV2TestRpcFlavor, self).setUp()
self.addCleanup(unregister_meta_hooks)
def test_rpc_flavor(self):
setup_metaplugin_conf()
cfg.CONF.set_override('rpc_flavor', 'fake1', 'META')
self.plugin = meta_neutron_plugin.MetaPluginV2()
self.assertEqual(topics.PLUGIN, 'q-plugin')
ret = self.plugin.rpc_workers_supported()
self.assertFalse(ret)
def test_invalid_rpc_flavor(self):
setup_metaplugin_conf()
cfg.CONF.set_override('rpc_flavor', 'fake-fake', 'META')
self.assertRaises(exc.Invalid,
meta_neutron_plugin.MetaPluginV2)
self.assertEqual(topics.PLUGIN, 'q-plugin')
def test_rpc_flavor_multiple_rpc_workers(self):
setup_metaplugin_conf()
cfg.CONF.set_override('rpc_flavor', 'fake2', 'META')
self.plugin = meta_neutron_plugin.MetaPluginV2()
self.assertEqual(topics.PLUGIN, 'q-plugin')
ret = self.plugin.rpc_workers_supported()
self.assertTrue(ret)
ret = self.plugin.start_rpc_listeners()
self.assertEqual('OK', ret)
|
|
"""
Custom manager for Objects.
"""
from itertools import chain
from django.db.models import Q
from django.conf import settings
from django.db.models.fields import exceptions
from evennia.typeclasses.managers import TypedObjectManager, TypeclassManager
from evennia.typeclasses.managers import returns_typeclass, returns_typeclass_list
from evennia.utils import utils
from evennia.utils.utils import to_unicode, is_iter, make_iter, string_partial_matching
__all__ = ("ObjectManager",)
_GA = object.__getattribute__
# delayed import
_ATTR = None
# Try to use a custom way to parse id-tagged multimatches.
_AT_MULTIMATCH_INPUT = utils.variable_from_module(*settings.SEARCH_AT_MULTIMATCH_INPUT.rsplit('.', 1))
class ObjectDBManager(TypedObjectManager):
"""
This ObjectManager implements methods for searching
and manipulating Objects directly from the database.
Evennia-specific search methods (will return Typeclasses or
lists of Typeclasses, whereas Django-general methods will return
Querysets or database objects).
dbref (converter)
get_id (alias: dbref_search)
get_dbref_range
object_totals
typeclass_search
get_object_with_player
get_objs_with_key_and_typeclass
get_objs_with_attr
get_objs_with_attr_match
get_objs_with_db_property
get_objs_with_db_property_match
get_objs_with_key_or_alias
get_contents
object_search (interface to many of the above methods,
equivalent to evennia.search_object)
copy_object
"""
#
# ObjectManager Get methods
#
# player related
@returns_typeclass
def get_object_with_player(self, ostring, exact=True, candidates=None):
"""
Search for an object based on its player's name or dbref.
Args:
ostring (str or int): Search criterion or dbref. Searching
for a player is sometimes initiated by appending an `*` to
the beginning of the search criterion (e.g. in
local_and_global_search). This is stripped here.
exact (bool, optional): Require an exact player match.
candidates (list, optional): Only search among this list of possible
object candidates.
Return:
match (Object or list): One or more matching results.
"""
ostring = to_unicode(ostring).lstrip('*')
# simplest case - search by dbref
dbref = self.dbref(ostring)
if dbref:
return dbref
# not a dbref. Search by name.
cand_restriction = candidates != None and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj]) or Q()
if exact:
return self.filter(cand_restriction & Q(db_player__username__iexact=ostring))
else: # fuzzy matching
ply_cands = self.filter(cand_restriction & Q(playerdb__username__istartswith=ostring)).values_list("db_key", flat=True)
if candidates:
index_matches = string_partial_matching(ply_cands, ostring, ret_index=True)
return [obj for ind, obj in enumerate(make_iter(candidates)) if ind in index_matches]
else:
return string_partial_matching(ply_cands, ostring, ret_index=False)
@returns_typeclass_list
def get_objs_with_key_and_typeclass(self, oname, otypeclass_path, candidates=None):
"""
Returns objects based on simultaneous key and typeclass match.
Args:
oname (str): Object key to search for
otypeclass_path (str): Full Python path to tyepclass to search for
candidates (list, optional): Only match among the given list of candidates.
Returns:
matches (list): The matching objects.
"""
cand_restriction = candidates != None and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj]) or Q()
return self.filter(cand_restriction & Q(db_key__iexact=oname, db_typeclass_path__exact=otypeclass_path))
# attr/property related
@returns_typeclass_list
def get_objs_with_attr(self, attribute_name, candidates=None):
"""
Get objects based on having a certain Attribute defined.
Args:
attribute_name (str): Attribute name to search for.
candidates (list, optional): Only match among the given list of candidates.
Returns:
matches (list): All objects having the given attribute_name defined at all.
"""
cand_restriction = candidates != None and Q(db_attributes__db_obj__pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj]) or Q()
return list(self.filter(cand_restriction & Q(db_attributes__db_key=attribute_name)))
@returns_typeclass_list
def get_objs_with_attr_value(self, attribute_name, attribute_value, candidates=None, typeclasses=None):
"""
Get all objects having the given attrname set to the given value.
Args:
attribute_name (str): Attribute key to search for.
attribute_value (str): Attribute value to search for.
candidates (list, optional): Candidate objects to limit search to.
typeclasses (list, optional): Python pats to restrict matches with.
Returns:
matches (list): Objects fullfilling both the `attribute_name` and `attribute_value` criterions.
Notes:
This uses the Attribute's PickledField to transparently search the database by matching
the internal representation. This is reasonably effective but since Attribute values
cannot be indexed, searching by Attribute key is to be preferred whenever possible.
"""
cand_restriction = candidates != None and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj]) or Q()
type_restriction = typeclasses and Q(db_typeclass_path__in=make_iter(typeclasses)) or Q()
## This doesn't work if attribute_value is an object. Workaround below
if isinstance(attribute_value, (basestring, int, float, bool, long)):
return self.filter(cand_restriction & type_restriction & Q(db_attributes__db_key=attribute_name, db_attributes__db_value=attribute_value))
else:
# We have to loop for safety since the referenced lookup gives deepcopy error if attribute value is an object.
global _ATTR
if not _ATTR:
from evennia.typeclasses.models import Attribute as _ATTR
cands = list(self.filter(cand_restriction & type_restriction & Q(db_attributes__db_key=attribute_name)))
results = [attr.objectdb_set.all() for attr in _ATTR.objects.filter(objectdb__in=cands, db_value=attribute_value)]
return chain(*results)
@returns_typeclass_list
def get_objs_with_db_property(self, property_name, candidates=None):
"""
Get all objects having a given db field property.
Args:
property_name (str): The name of the field to match for.
candidates (list, optional): Only search among th egiven candidates.
Returns:
matches (list): The found matches.
"""
property_name = "db_%s" % property_name.lstrip('db_')
cand_restriction = candidates != None and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj]) or Q()
querykwargs = {property_name:None}
try:
return list(self.filter(cand_restriction).exclude(Q(**querykwargs)))
except exceptions.FieldError:
return []
@returns_typeclass_list
def get_objs_with_db_property_value(self, property_name, property_value, candidates=None, typeclasses=None):
"""
Get objects with a specific field name and value.
Args:
property_name (str): Field name to search for.
property_value (any): Value required for field with `property_name` to have.
candidates (list, optional): List of objects to limit search to.
typeclasses (list, optional): List of typeclass-path strings to restrict matches with
"""
if isinstance(property_value, basestring):
property_value = to_unicode(property_value)
if isinstance(property_name, basestring):
if not property_name.startswith('db_'):
property_name = "db_%s" % property_name
querykwargs = {property_name:property_value}
cand_restriction = candidates != None and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj]) or Q()
type_restriction = typeclasses and Q(db_typeclass_path__in=make_iter(typeclasses)) or Q()
try:
return list(self.filter(cand_restriction & type_restriction & Q(**querykwargs)))
except exceptions.FieldError:
return []
except ValueError:
from evennia.utils import logger
logger.log_errmsg("The property '%s' does not support search criteria of the type %s." % (property_name, type(property_value)))
return []
@returns_typeclass_list
def get_contents(self, location, excludeobj=None):
"""
Get all objects that has a location set to this one.
Args:
location (Object): Where to get contents from.
excludeobj (Object or list, optional): One or more objects
to exclude from the match.
Returns:
contents (list): Matching contents, without excludeobj, if given.
"""
exclude_restriction = Q(pk__in=[_GA(obj, "id") for obj in make_iter(excludeobj)]) if excludeobj else Q()
return self.filter(db_location=location).exclude(exclude_restriction)
@returns_typeclass_list
def get_objs_with_key_or_alias(self, ostring, exact=True,
candidates=None, typeclasses=None):
"""
Args:
ostring (str): A search criterion.
exact (bool, optional): Require exact match of ostring
(still case-insensitive). If `False`, will do fuzzy matching
using `evennia.utils.utils.string_partial_matching` algorithm.
candidates (list): Only match among these candidates.
typeclasses (list): Only match objects with typeclasses having thess path strings.
Returns:
matches (list): A list of matches of length 0, 1 or more.
"""
if not isinstance(ostring, basestring):
if hasattr(ostring, "key"):
ostring = ostring.key
else:
return []
if is_iter(candidates) and not len(candidates):
# if candidates is an empty iterable there can be no matches
# Exit early.
return []
# build query objects
candidates_id = [_GA(obj, "id") for obj in make_iter(candidates) if obj]
cand_restriction = candidates != None and Q(pk__in=make_iter(candidates_id)) or Q()
type_restriction = typeclasses and Q(db_typeclass_path__in=make_iter(typeclasses)) or Q()
if exact:
# exact match - do direct search
return self.filter(cand_restriction & type_restriction & (Q(db_key__iexact=ostring) |
Q(db_tags__db_key__iexact=ostring) & Q(db_tags__db_tagtype__iexact="alias"))).distinct()
elif candidates:
# fuzzy with candidates
key_candidates = self.filter(cand_restriction & type_restriction)
else:
# fuzzy without supplied candidates - we select our own candidates
key_candidates = self.filter(type_restriction & (Q(db_key__istartswith=ostring) | Q(db_tags__db_key__istartswith=ostring))).distinct()
candidates_id = [_GA(obj, "id") for obj in key_candidates]
# fuzzy matching
key_strings = key_candidates.values_list("db_key", flat=True).order_by("id")
index_matches = string_partial_matching(key_strings, ostring, ret_index=True)
if index_matches:
return [obj for ind, obj in enumerate(key_candidates) if ind in index_matches]
else:
alias_candidates = self.filter(id__in=candidates_id, db_tags__db_tagtype__iexact="alias")
alias_strings = alias_candidates.values_list("db_key", flat=True)
index_matches = string_partial_matching(alias_strings, ostring, ret_index=True)
if index_matches:
return [alias.db_obj for ind, alias in enumerate(alias_candidates) if ind in index_matches]
return []
# main search methods and helper functions
@returns_typeclass_list
def object_search(self, searchdata,
attribute_name=None,
typeclass=None,
candidates=None,
exact=True):
"""
Search as an object globally or in a list of candidates and
return results. The result is always an Object. Always returns
a list.
Args:
searchdata (str or Object): The entity to match for. This is
usually a key string but may also be an object itself.
By default (if no `attribute_name` is set), this will
search `object.key` and `object.aliases` in order.
Can also be on the form #dbref, which will (if
`exact=True`) be matched against primary key.
attribute_name (str): Use this named Attribute to
match searchdata against, instead of the defaults. If
this is the name of a database field (with or without
the `db_` prefix), that will be matched too.
typeclass (str or TypeClass): restrict matches to objects
having this typeclass. This will help speed up global
searches.
candidates (list): If supplied, search will
only be performed among the candidates in this list. A
common list of candidates is the contents of the
current location searched.
exact (bool): Match names/aliases exactly or partially.
Partial matching matches the beginning of words in the
names/aliases, using a matching routine to separate
multiple matches in names with multiple components (so
"bi sw" will match "Big sword"). Since this is more
expensive than exact matching, it is recommended to be
used together with the `candidates` keyword to limit the
number of possibilities. This value has no meaning if
searching for attributes/properties.
Returns:
matches (list): Matching objects
"""
def _searcher(searchdata, candidates, typeclass, exact=False):
"""
Helper method for searching objects. `typeclass` is only used
for global searching (no candidates)
"""
if attribute_name:
# attribute/property search (always exact).
matches = self.get_objs_with_db_property_value(attribute_name, searchdata, candidates=candidates, typeclasses=typeclass)
if matches:
return matches
return self.get_objs_with_attr_value(attribute_name, searchdata, candidates=candidates, typeclasses=typeclass)
else:
# normal key/alias search
return self.get_objs_with_key_or_alias(searchdata, exact=exact, candidates=candidates, typeclasses=typeclass)
if not searchdata and searchdata != 0:
return []
if typeclass:
# typeclass may also be a list
typeclasses = make_iter(typeclass)
for i, typeclass in enumerate(make_iter(typeclasses)):
if callable(typeclass):
typeclasses[i] = u"%s.%s" % (typeclass.__module__, typeclass.__name__)
else:
typeclasses[i] = u"%s" % typeclass
typeclass = typeclasses
if candidates:
# Convenience check to make sure candidates are really dbobjs
candidates = [cand for cand in make_iter(candidates) if cand]
if typeclass:
candidates = [cand for cand in candidates
if _GA(cand, "db_typeclass_path") in typeclass]
dbref = not attribute_name and exact and self.dbref(searchdata)
if dbref is not None:
# Easiest case - dbref matching (always exact)
dbref_match = self.dbref_search(dbref)
if dbref_match:
if not candidates or dbref_match in candidates:
return [dbref_match]
else:
return []
# Search through all possibilities.
match_number = None
# always run first check exact - we don't want partial matches
# if on the form of 1-keyword etc.
matches = _searcher(searchdata, candidates, typeclass, exact=True)
if not matches:
# no matches found - check if we are dealing with N-keyword
# query - if so, strip it.
match_number, searchdata = _AT_MULTIMATCH_INPUT(searchdata)
# run search again, with the exactness set by call
if match_number is not None or not exact:
matches = _searcher(searchdata, candidates, typeclass, exact=exact)
# deal with result
if len(matches) > 1 and match_number is not None:
# multiple matches, but a number was given to separate them
try:
matches = [matches[match_number]]
except IndexError:
pass
# return a list (possibly empty)
return matches
#
# ObjectManager Copy method
#
def copy_object(self, original_object, new_key=None,
new_location=None, new_home=None,
new_permissions=None, new_locks=None,
new_aliases=None, new_destination=None):
"""
Create and return a new object as a copy of the original object. All
will be identical to the original except for the arguments given
specifically to this method.
Args:
original_object (Object): The object to make a copy from.
new_key (str, optional): Name of the copy, if different
from the original.
new_location (Object, optional): Alternate location.
new_home (Object, optional): Change the home location
new_aliases (list, optional): Give alternate object
aliases as a list of strings.
new_destination (Object, optional): Used only by exits.
Returns:
copy (Object or None): The copy of `original_object`,
optionally modified as per the ingoing keyword
arguments. `None` if an error was encountered.
"""
# get all the object's stats
typeclass_path = original_object.typeclass_path
if not new_key:
new_key = original_object.key
if not new_location:
new_location = original_object.location
if not new_home:
new_home = original_object.home
if not new_aliases:
new_aliases = original_object.aliases.all()
if not new_locks:
new_locks = original_object.db_lock_storage
if not new_permissions:
new_permissions = original_object.permissions.all()
if not new_destination:
new_destination = original_object.destination
# create new object
from evennia.utils import create
from evennia.scripts.models import ScriptDB
new_object = create.create_object(typeclass_path,
key=new_key,
location=new_location,
home=new_home,
permissions=new_permissions,
locks=new_locks,
aliases=new_aliases,
destination=new_destination)
if not new_object:
return None
# copy over all attributes from old to new.
for attr in original_object.attributes.all():
new_object.attributes.add(attr.key, attr.value)
# copy over all cmdsets, if any
for icmdset, cmdset in enumerate(original_object.cmdset.all()):
if icmdset == 0:
new_object.cmdset.add_default(cmdset)
else:
new_object.cmdset.add(cmdset)
# copy over all scripts, if any
for script in original_object.scripts.all():
ScriptDB.objects.copy_script(script, new_obj=new_object)
return new_object
def clear_all_sessids(self):
"""
Clear the db_sessid field of all objects having also the
db_player field set.
"""
self.filter(db_sessid__isnull=False).update(db_sessid=None)
class ObjectManager(ObjectDBManager, TypeclassManager):
pass
|
|
import pexpect
import time
import weakref
from fluxgui.exceptions import *
class XfluxController(object):
"""
A controller that starts and interacts with an xflux process.
"""
def __init__(self, color='3400', pause_color='6500', **kwargs):
if 'zipcode' not in kwargs and 'latitude' not in kwargs:
raise XfluxError(
"Required key not found (either zipcode or latitude)")
if 'longitude' not in kwargs:
kwargs['longitude'] = 0
self.init_kwargs = kwargs
self._current_color = str(color)
self._pause_color = str(pause_color)
self.states = {
"INIT": _InitState(self),
"RUNNING": _RunningState(self),
"PAUSED": _PauseState(self),
"TERMINATED": _TerminatedState(self),
}
self.state = self.states["INIT"]
def start(self, startup_args=None):
self.state.start(startup_args)
def stop(self):
self.state.stop()
def preview_color(self, preview_color):
self.state.preview(preview_color)
def toggle_pause(self):
self.state.toggle_pause()
def set_xflux_latitude(self, lat):
self.state.set_setting(latitude=lat)
def set_xflux_longitude(self, longit):
self.state.set_setting(longitude=longit)
def set_xflux_zipcode(self, zipc):
self.state.set_setting(zipcode=zipc)
def _set_xflux_color(self, col):
self.state.set_setting(color=col)
def _get_xflux_color(self):
self._c()
index = self._xflux.expect("Color.*")
color = -1
if index == 0:
color = self._xflux.after[10:14]
return color
color=property(_get_xflux_color, _set_xflux_color)
def _start(self, startup_args=None):
if not startup_args:
startup_args = self._create_startup_arg_list(self._current_color,
**self.init_kwargs)
try:
previous_instances = pexpect.run('pgrep -d, -u %s xflux' % pexpect.run('whoami')).strip()
if previous_instances != "":
for process in previous_instances.split(","):
pexpect.run('kill -9 %s' % process)
self._xflux = pexpect.spawn("xflux", startup_args)
#logfile=file("tmp/xfluxout.txt",'w'))
except pexpect.ExceptionPexpect:
raise FileNotFoundError(
"\nError: Please install xflux in the PATH \n")
def _stop(self):
try:
if self._xflux.terminate(force=True):
return True
else:
return False
except Exception:
# xflux has crashed in the meantime?
return True
def _preview_color(self, preview_color, return_color):
# could probably be implemented better
preview_color = str(preview_color)
self._set_xflux_screen_color(preview_color)
self._c()
#while self.color != preview_color:
#time.sleep(.5)
time.sleep(5)
self._set_xflux_screen_color(return_color)
self._c()
_settings_map = {
'latitude':'l=',
'longitude':'g=',
'zipcode':'z=',
'color':'k=',
}
def _set_xflux_setting(self, **kwargs):
for key, value in kwargs.items():
if key in self._settings_map:
if key == 'color':
self._set_xflux_screen_color(value)
self._current_color = str(value)
# hackish - changing the current color unpauses xflux,
# must reflect that with state change
if self.state == self.states["PAUSED"]:
self.state = self.states["RUNNING"]
else:
self._xflux.sendline(self._settings_map[key]+str(value))
self._c()
def _create_startup_arg_list(self, color='3400', **kwargs):
startup_args = []
if "zipcode" in kwargs and kwargs['zipcode']:
startup_args += ["-z", str(kwargs["zipcode"])]
if "latitude" in kwargs and kwargs['latitude']:
# by default xflux uses latitude even if zipcode is given
startup_args += ["-l", str(kwargs["latitude"])]
if "longitude" in kwargs and kwargs['longitude']:
startup_args += ["-g", str(kwargs["longitude"])]
startup_args += ["-k", str(color), "-nofork"] # nofork is vital
return startup_args
def _change_color_immediately(self, new_color):
self._set_xflux_screen_color(new_color)
self._c()
def _p(self):
# seems to bring color up to "off" then transitions back down (at night)
# takes color down to night color then back up to off (during day)
# I assume this is supposed to be "preview" or something like it
# but it doesn't work the way it should for a preview so it isn't used
self._xflux.sendline("p")
def _c(self):
# prints Colortemp=#### in xflux process
# Also: When called after a color change (sendline(k=#))
# makes changes immediate
# (see use in toggle_pause() and preview_color())
self._xflux.sendline("c")
def _set_xflux_screen_color(self, color):
# use _set_xflux_color unless keeping
# self._current_color the same is necessary
self._xflux.sendline("k="+str(color))
class _XfluxState(object):
can_change_settings = False
def __init__(self, controller_instance):
self.controller_ref = weakref.ref(controller_instance)
def start(self, startup_args):
raise MethodUnavailableError(
"Xflux cannot start in its current state")
def stop(self):
raise MethodUnavailableError(
"Xflux cannot stop in its current state")
def preview(self, preview_color):
raise MethodUnavailableError(
"Xflux cannot preview in its current state")
def toggle_pause(self):
raise MethodUnavailableError(
"Xflux cannot pause/unpause in its current state")
def set_setting(self, **kwargs):
raise MethodUnavailableError(
"Xflux cannot alter settings in its current state")
class _InitState(_XfluxState):
def start(self, startup_args):
self.controller_ref()._start(startup_args)
self.controller_ref().state = self.controller_ref().states["RUNNING"]
def stop(self):
return True
def set_setting(self, **kwargs):
for key, value in kwargs.items():
self.controller_ref().init_kwargs[key] = str(value)
class _TerminatedState(_XfluxState):
def stop(self):
return True
class _AliveState(_XfluxState):
can_change_settings = True
def stop(self):
success = self.controller_ref()._stop()
if success:
self.controller_ref().state = \
self.controller_ref().states["TERMINATED"]
return success
def set_setting(self, **kwargs):
self.controller_ref()._set_xflux_setting(**kwargs)
class _RunningState(_AliveState):
def toggle_pause(self):
self.controller_ref()._change_color_immediately(
self.controller_ref()._pause_color)
self.controller_ref().state = self.controller_ref().states["PAUSED"]
def preview(self, preview_color):
self.controller_ref()._preview_color(preview_color,
self.controller_ref()._current_color)
class _PauseState(_AliveState):
def toggle_pause(self):
self.controller_ref()._change_color_immediately(
self.controller_ref()._current_color)
self.controller_ref().state = self.controller_ref().states["RUNNING"]
def preview(self, preview_color):
self.controller_ref()._preview_color(preview_color,
self.controller_ref()._pause_color)
|
|
from hashlib import sha1
import mimetypes
import traceback
import warnings
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.core.files.storage import default_storage
from django.core.urlresolvers import reverse
from django.core.validators import ipv4_re
from django.db import models
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
import requests
import vidscraper
from djvidscraper.utils import get_api_keys, download_thumbnail
from djvidscraper.signals import (pre_video_import, post_video_import,
pre_feed_import_publish,
post_feed_import_publish)
class FeedImportIdentifier(models.Model):
"""
Represents a single identifier for a video, seen during an import of a
given feed.
"""
identifier_hash = models.CharField(max_length=40)
feed = models.ForeignKey('Feed')
def __unicode__(self):
return self.identifier_hash
class FeedImport(models.Model):
created_timestamp = models.DateTimeField(auto_now_add=True)
modified_timestamp = models.DateTimeField(auto_now=True)
is_complete = models.BooleanField(default=False)
#: Denormalized field displaying (eventually accurate) count of
#: errors during the import process.
error_count = models.PositiveIntegerField(default=0)
#: Denormalized field displaying (eventually accurate) count of
#: videos imported during the import process.
import_count = models.PositiveIntegerField(default=0)
feed = models.ForeignKey('Feed', related_name='imports')
class Meta:
get_latest_by = 'created_timestamp'
ordering = ['-created_timestamp']
def _get_identifier_hashes(self, vidscraper_video):
identifiers = (
vidscraper_video.guid,
vidscraper_video.link,
vidscraper_video.flash_enclosure_url,
vidscraper_video.embed_code
)
if vidscraper_video.files is not None:
identifiers += tuple(f.url for f in vidscraper_video.files
if not f.expires)
return [sha1(i).hexdigest() for i in identifiers if i]
def is_seen(self, vidscraper_video):
hashes = self._get_identifier_hashes(vidscraper_video)
if not hashes:
return False
kwargs = {
'feed': self.feed,
'identifier_hash__in': hashes,
}
return FeedImportIdentifier.objects.filter(**kwargs).exists()
def mark_seen(self, vidscraper_video):
hashes = self._get_identifier_hashes(vidscraper_video)
# TODO: Use bulk_create.
for identifier_hash in hashes:
kwargs = {
'feed': self.feed,
'identifier_hash': identifier_hash,
}
FeedImportIdentifier.objects.create(**kwargs)
def run(self):
feed = self.feed
try:
iterator = feed.get_iterator()
iterator.load()
feed.update_metadata(iterator)
except Exception:
self.record_step(FeedImportStep.IMPORT_ERRORED,
with_traceback=True)
return
try:
for vidscraper_video in iterator:
try:
vidscraper_video.load()
if self.is_seen(vidscraper_video):
self.record_step(FeedImportStep.VIDEO_SEEN)
if feed.stop_if_seen:
break
else:
continue
video = Video.from_vidscraper_video(
vidscraper_video,
status=Video.UNPUBLISHED,
commit=False,
feed=feed,
sites=feed.sites.all(),
owner=feed.owner,
owner_email=feed.owner_email,
owner_session=feed.owner_session,
)
try:
video.clean_fields()
video.validate_unique()
except ValidationError:
self.record_step(FeedImportStep.VIDEO_INVALID,
with_traceback=True)
video.save()
try:
video.save_m2m()
except Exception:
video.delete()
raise
self.mark_seen(vidscraper_video)
self.record_step(FeedImportStep.VIDEO_IMPORTED,
video=video)
except Exception:
self.record_step(FeedImportStep.VIDEO_ERRORED,
with_traceback=True)
# Update timestamp (and potentially counts) after each
# video.
self.save()
except Exception:
self.record_step(FeedImportStep.IMPORT_ERRORED,
with_traceback=True)
# Pt 2: Mark videos active all at once.
if not feed.moderate_imported_videos:
to_publish = Video.objects.filter(feedimportstep__feed_import=self,
status=Video.UNPUBLISHED)
for receiver, response in pre_feed_import_publish.send_robust(
sender=self, to_publish=to_publish):
if response:
# Basic sanity check: should be a video queryset.
if (isinstance(response, models.Queryset) and
response.model == Video):
to_publish = response
else:
if isinstance(response, Exception):
warnings.warn("pre_feed_import_publish listener "
"raised exception")
else:
warnings.warn("pre_feed_import_publish returned "
"incorrect response")
to_publish.update(status=Video.PUBLISHED)
published = Video.objects.filter(feedimportstep__feed_import=self,
status=Video.PUBLISHED,
published_datetime=now())
post_feed_import_publish.send_robust(sender=self,
published=published)
Video.objects.filter(feedimportstep__feed_import=self,
status=Video.UNPUBLISHED
).update(status=Video.NEEDS_MODERATION)
self.is_complete = True
self.save()
def record_step(self, step_type, video=None, with_traceback=False):
if step_type in (FeedImportStep.VIDEO_ERRORED,
FeedImportStep.IMPORT_ERRORED):
self.error_count += 1
if step_type == FeedImportStep.VIDEO_IMPORTED:
self.import_count += 1
tb = traceback.format_exc() if with_traceback else ''
self.steps.create(step_type=step_type,
video=video,
traceback=tb)
class FeedImportStep(models.Model):
#: Something errored on the import level.
IMPORT_ERRORED = 'import errored'
#: A video was found to already be in the database - i.e. previously
#: imported.
VIDEO_SEEN = 'video seen'
#: Something semi-expected is wrong with the video which prevents
#: it from being imported.
VIDEO_INVALID = 'video invalid'
#: Something unexpected happened during an import of a video.
VIDEO_ERRORED = 'video errored'
#: A video was successfully imported.
VIDEO_IMPORTED = 'video imported'
STEP_TYPE_CHOICES = (
(IMPORT_ERRORED, _(u'Import errored')),
(VIDEO_SEEN, _(u'Video seen')),
(VIDEO_INVALID, _(u'Video invalid')),
(VIDEO_ERRORED, _(u'Video errored')),
(VIDEO_IMPORTED, _(u'Video imported')),
)
step_type = models.CharField(max_length=14,
choices=STEP_TYPE_CHOICES)
video = models.OneToOneField('Video',
blank=True,
null=True,
on_delete=models.SET_NULL)
traceback = models.TextField(blank=True)
timestamp = models.DateTimeField(auto_now_add=True)
feed_import = models.ForeignKey(FeedImport, related_name='steps')
def __unicode__(self):
return unicode(self.step_type)
class Feed(models.Model):
"""
Represents an automated feed import in the database.
"""
sites = models.ManyToManyField(Site)
thumbnail = models.ImageField(
upload_to='djvidscraper/feed/thumbnail/%Y/%m/%d/',
blank=True,
max_length=255)
modified_timestamp = models.DateTimeField(auto_now=True)
created_timestamp = models.DateTimeField(auto_now_add=True)
# Import settings
moderate_imported_videos = models.BooleanField(default=False)
enable_automatic_imports = models.BooleanField(default=True)
# Feeds are expected to stay in the same order.
stop_if_seen = models.BooleanField(default=True)
should_update_metadata = models.BooleanField(
default=True,
verbose_name="Update metadata on next import"
)
#: Original url entered by a user when adding this feed.
original_url = models.URLField(max_length=400)
# Feed metadata
name = models.CharField(max_length=250, blank=True)
description = models.TextField(blank=True)
#: Webpage where the contents of this feed could be browsed.
web_url = models.URLField(blank=True, max_length=400)
# Owner info. Owner is the person who created the video. Should always
# have editing access.
owner = models.ForeignKey('auth.User', null=True, blank=True)
owner_email = models.EmailField(max_length=250,
blank=True)
owner_session = models.ForeignKey('sessions.Session',
blank=True, null=True)
# Cached information from the import.
external_etag = models.CharField(max_length=250, blank=True)
external_last_modified = models.DateTimeField(blank=True, null=True)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse('djvidscraper_feed_detail', kwargs={'pk': self.pk})
def start_import(self):
imp = FeedImport()
imp.feed = self
imp.save()
imp.run()
def get_iterator(self):
return vidscraper.auto_feed(
self.original_url,
max_results=None,
api_keys=get_api_keys(),
etag=self.external_etag or None,
last_modified=self.external_last_modified,
)
get_iterator.alters_data = True
def update_metadata(self, iterator):
save = False
# Always update etag and last_modified.
etag = getattr(iterator, 'etag', None) or ''
if (etag and etag != self.external_etag):
self.external_etag = etag
save = True
last_modified = getattr(iterator, 'last_modified', None)
if last_modified is not None:
self.external_last_modified = last_modified
save = True
# If the feed metadata is marked to be updated, do it.
if self.should_update_metadata:
self.name = iterator.title or self.original_url
self.external_url = iterator.webpage or ''
self.description = iterator.description or ''
# Only update metadata once.
self.should_update_metadata = False
save = True
if save:
self.save()
class Video(models.Model):
UNPUBLISHED = 'unpublished'
NEEDS_MODERATION = 'needs moderation'
PUBLISHED = 'published'
HIDDEN = 'hidden'
STATUS_CHOICES = (
(UNPUBLISHED, _(u'Unpublished')),
(NEEDS_MODERATION, _(u'Needs moderation')),
(PUBLISHED, _(u'Published')),
(HIDDEN, _(u'Hidden')),
)
# Video core data
#: This field contains a URL which a user gave as "the" URL
#: for this video. It may or may not be the same as ``external_url``
#: or a file url. It may not even exist, if they're using embedding.
original_url = models.URLField(max_length=400, blank=True)
# Video metadata
#: Canonical web home of the video as best as we can tell.
web_url = models.URLField(max_length=400, blank=True)
embed_code = models.TextField(blank=True)
flash_enclosure_url = models.URLField(max_length=400, blank=True)
name = models.CharField(max_length=250)
description = models.TextField(blank=True)
thumbnail = models.ImageField(
upload_to='djvidscraper/video/thumbnail/%Y/%m/%d/',
blank=True,
max_length=255)
guid = models.CharField(max_length=250, blank=True)
# Technically duplication, but the only other way to get this would
# be to check the import step's import's feed. Which would be silly.
feed = models.ForeignKey(Feed, blank=True, null=True,
related_name='videos')
# Owner info. Owner is the person who created the video. Should always
# have editing access.
owner = models.ForeignKey('auth.User', null=True, blank=True)
owner_email = models.EmailField(max_length=250,
blank=True)
owner_session = models.ForeignKey('sessions.Session',
blank=True, null=True)
# Cached information from vidscraper.
external_user_username = models.CharField(max_length=250, blank=True)
external_user_url = models.URLField(blank=True, max_length=400)
external_thumbnail_url = models.URLField(blank=True, max_length=400)
external_thumbnail_tries = models.PositiveSmallIntegerField(default=0)
external_published_datetime = models.DateTimeField(null=True, blank=True)
# Other internal use.
sites = models.ManyToManyField(Site)
status = models.CharField(max_length=16,
choices=STATUS_CHOICES,
default=UNPUBLISHED)
modified_timestamp = models.DateTimeField(auto_now=True)
created_timestamp = models.DateTimeField(auto_now_add=True)
published_datetime = models.DateTimeField(null=True, blank=True)
class Meta:
ordering = ['-published_datetime', '-modified_timestamp']
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse('djvidscraper_video_detail', kwargs={'pk': self.pk})
@classmethod
def from_vidscraper_video(cls, video, status=None, commit=True,
feed=None, sites=None, owner=None,
owner_email=None, owner_session=None):
"""
Builds a :class:`Video` instance from a
:class:`vidscraper.videos.Video` instance. If `commit` is False,
the :class:`Video` will not be saved, and the created instance will
have a `save_m2m()` method that must be called after you call `save()`.
"""
pre_video_import.send_robust(sender=cls, vidscraper_video=video)
if status is None:
status = cls.NEEDS_MODERATION
instance = cls(
original_url=video.url,
web_url=video.link or '',
embed_code=video.embed_code or '',
flash_enclosure_url=video.flash_enclosure_url or '',
name=video.title or '',
description=video.description or '',
guid=video.guid or '',
feed=feed,
owner=owner,
owner_email=owner_email or '',
owner_session=owner_session,
external_user_username=video.user or '',
external_user_url=video.user_url or '',
external_thumbnail_url=video.thumbnail_url or '',
external_published_datetime=video.publish_datetime,
status=status,
published_datetime=now() if status == cls.PUBLISHED else None,
)
if not sites:
sites = [Site.objects.get_current()]
def save_m2m():
instance.sites = sites
if video.files:
for video_file in video.files:
if video_file.expires is None:
VideoFile.objects.create(video=instance,
url=video_file.url,
length=video_file.length,
mimetype=video_file.mime_type)
instance.download_external_thumbnail()
post_video_import.send_robust(sender=cls, instance=instance,
vidscraper_video=video)
if commit:
instance.save()
save_m2m()
else:
instance.save_m2m = save_m2m
return instance
def download_external_thumbnail(self, override_thumbnail=False):
"""Try to download and save an external thumbnail."""
if not self.external_thumbnail_url:
return
if self.thumbnail and not override_thumbnail:
return
from django.conf import settings
max_retries = getattr(settings,
'DJVIDSCRAPER_MAX_DOWNLOAD_RETRIES',
3)
if self.external_thumbnail_tries > max_retries:
return
try:
final_path = download_thumbnail(self.external_thumbnail_url,
self,
'thumbnail')
except Exception:
self.external_thumbnail_tries += 1
self.save()
else:
try:
self.thumbnail = final_path
self.save()
except Exception:
default_storage.delete(final_path)
download_external_thumbnail.alters_data = True
class VideoFile(models.Model):
video = models.ForeignKey(Video, related_name='files')
url = models.URLField(max_length=2048)
length = models.PositiveIntegerField(null=True, blank=True)
mimetype = models.CharField(max_length=60, blank=True)
def fetch_metadata(self):
"""
Do a HEAD request on self.url to try to get metadata
(self.length and self.mimetype).
Note that while this method fills in those attributes, it does *not*
call self.save() - so be sure to do so after calling this method!
"""
if not self.url:
return
try:
response = requests.head(self.url, timeout=5)
if response.status_code == 302:
response = requests.head(response.headers['location'],
timeout=5)
except Exception:
pass
else:
if response.status_code != 200:
return
self.length = response.headers.get('content-length')
self.mimetype = response.headers.get('content-type', '')
if self.mimetype in ('application/octet-stream', ''):
# We got a not-useful MIME type; guess!
guess = mimetypes.guess_type(self.url)
if guess[0] is not None:
self.mimetype = guess[0]
class FeaturedVideo(models.Model):
"""M2M connecting sites to videos."""
site = models.ForeignKey(Site)
video = models.ForeignKey(Video)
order = models.PositiveSmallIntegerField(default=1)
created_timestamp = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = ('site', 'video')
ordering = ('order', 'created_timestamp')
class WatchManager(models.Manager):
def from_request(self, request, video):
"""
Creates a Watch based on an HTTP request. If the request came
from localhost, check to see if it was forwarded to (hopefully) get the
right IP address.
"""
user_agent = request.META.get('HTTP_USER_AGENT', '')
ip = request.META.get('REMOTE_ADDR', '0.0.0.0')
if not ipv4_re.match(ip):
ip = '0.0.0.0'
if hasattr(request, 'user') and request.user.is_authenticated():
user = request.user
else:
user = None
self.create(video=video,
user=user,
ip_address=ip,
user_agent=user_agent)
class Watch(models.Model):
"""
Record of a video being watched.
"""
video = models.ForeignKey(Video)
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
user = models.ForeignKey('auth.User', blank=True, null=True)
ip_address = models.IPAddressField()
# Watch queries may want to exlude "bot", "spider", "crawler", etc.
# from counts.
user_agent = models.CharField(max_length=255, blank=True)
objects = WatchManager()
|
|
import inspect
import time
import types
import unittest
from mock import (
call, create_autospec, MagicMock,
Mock, ANY, patch, PropertyMock
)
from mock.mock import _Call, _CallList, _callable
from mock import IS_PYPY
from datetime import datetime
from functools import partial
import pytest
class SomeClass(object):
def one(self, a, b): pass
def two(self): pass
def three(self, a=None): pass
class AnyTest(unittest.TestCase):
def test_any(self):
self.assertEqual(ANY, object())
mock = Mock()
mock(ANY)
mock.assert_called_with(ANY)
mock = Mock()
mock(foo=ANY)
mock.assert_called_with(foo=ANY)
def test_repr(self):
self.assertEqual(repr(ANY), '<ANY>')
self.assertEqual(str(ANY), '<ANY>')
def test_any_and_datetime(self):
mock = Mock()
mock(datetime.now(), foo=datetime.now())
mock.assert_called_with(ANY, foo=ANY)
def test_any_mock_calls_comparison_order(self):
mock = Mock()
class Foo(object):
def __eq__(self, other): pass
def __ne__(self, other): pass
for d in datetime.now(), Foo():
mock.reset_mock()
mock(d, foo=d, bar=d)
mock.method(d, zinga=d, alpha=d)
mock().method(a1=d, z99=d)
expected = [
call(ANY, foo=ANY, bar=ANY),
call.method(ANY, zinga=ANY, alpha=ANY),
call(), call().method(a1=ANY, z99=ANY)
]
self.assertEqual(expected, mock.mock_calls)
self.assertEqual(mock.mock_calls, expected)
def test_any_no_spec(self):
# This is a regression test for bpo-37555
class Foo:
def __eq__(self, other): pass
mock = Mock()
mock(Foo(), 1)
mock.assert_has_calls([call(ANY, 1)])
mock.assert_called_with(ANY, 1)
mock.assert_any_call(ANY, 1)
def test_any_and_spec_set(self):
# This is a regression test for bpo-37555
class Foo:
def __eq__(self, other): pass
mock = Mock(spec=Foo)
mock(Foo(), 1)
mock.assert_has_calls([call(ANY, 1)])
mock.assert_called_with(ANY, 1)
mock.assert_any_call(ANY, 1)
class CallTest(unittest.TestCase):
def test_call_with_call(self):
kall = _Call()
self.assertEqual(kall, _Call())
self.assertEqual(kall, _Call(('',)))
self.assertEqual(kall, _Call(((),)))
self.assertEqual(kall, _Call(({},)))
self.assertEqual(kall, _Call(('', ())))
self.assertEqual(kall, _Call(('', {})))
self.assertEqual(kall, _Call(('', (), {})))
self.assertEqual(kall, _Call(('foo',)))
self.assertEqual(kall, _Call(('bar', ())))
self.assertEqual(kall, _Call(('baz', {})))
self.assertEqual(kall, _Call(('spam', (), {})))
kall = _Call(((1, 2, 3),))
self.assertEqual(kall, _Call(((1, 2, 3),)))
self.assertEqual(kall, _Call(('', (1, 2, 3))))
self.assertEqual(kall, _Call(((1, 2, 3), {})))
self.assertEqual(kall, _Call(('', (1, 2, 3), {})))
kall = _Call(((1, 2, 4),))
self.assertNotEqual(kall, _Call(('', (1, 2, 3))))
self.assertNotEqual(kall, _Call(('', (1, 2, 3), {})))
kall = _Call(('foo', (1, 2, 4),))
self.assertNotEqual(kall, _Call(('', (1, 2, 4))))
self.assertNotEqual(kall, _Call(('', (1, 2, 4), {})))
self.assertNotEqual(kall, _Call(('bar', (1, 2, 4))))
self.assertNotEqual(kall, _Call(('bar', (1, 2, 4), {})))
kall = _Call(({'a': 3},))
self.assertEqual(kall, _Call(('', (), {'a': 3})))
self.assertEqual(kall, _Call(('', {'a': 3})))
self.assertEqual(kall, _Call(((), {'a': 3})))
self.assertEqual(kall, _Call(({'a': 3},)))
def test_empty__Call(self):
args = _Call()
self.assertEqual(args, ())
self.assertEqual(args, ('foo',))
self.assertEqual(args, ((),))
self.assertEqual(args, ('foo', ()))
self.assertEqual(args, ('foo',(), {}))
self.assertEqual(args, ('foo', {}))
self.assertEqual(args, ({},))
def test_named_empty_call(self):
args = _Call(('foo', (), {}))
self.assertEqual(args, ('foo',))
self.assertEqual(args, ('foo', ()))
self.assertEqual(args, ('foo',(), {}))
self.assertEqual(args, ('foo', {}))
self.assertNotEqual(args, ((),))
self.assertNotEqual(args, ())
self.assertNotEqual(args, ({},))
self.assertNotEqual(args, ('bar',))
self.assertNotEqual(args, ('bar', ()))
self.assertNotEqual(args, ('bar', {}))
def test_call_with_args(self):
args = _Call(((1, 2, 3), {}))
self.assertEqual(args, ((1, 2, 3),))
self.assertEqual(args, ('foo', (1, 2, 3)))
self.assertEqual(args, ('foo', (1, 2, 3), {}))
self.assertEqual(args, ((1, 2, 3), {}))
self.assertEqual(args.args, (1, 2, 3))
self.assertEqual(args.kwargs, {})
def test_named_call_with_args(self):
args = _Call(('foo', (1, 2, 3), {}))
self.assertEqual(args, ('foo', (1, 2, 3)))
self.assertEqual(args, ('foo', (1, 2, 3), {}))
self.assertEqual(args.args, (1, 2, 3))
self.assertEqual(args.kwargs, {})
self.assertNotEqual(args, ((1, 2, 3),))
self.assertNotEqual(args, ((1, 2, 3), {}))
def test_call_with_kwargs(self):
args = _Call(((), dict(a=3, b=4)))
self.assertEqual(args, (dict(a=3, b=4),))
self.assertEqual(args, ('foo', dict(a=3, b=4)))
self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
self.assertEqual(args, ((), dict(a=3, b=4)))
self.assertEqual(args.args, ())
self.assertEqual(args.kwargs, dict(a=3, b=4))
def test_named_call_with_kwargs(self):
args = _Call(('foo', (), dict(a=3, b=4)))
self.assertEqual(args, ('foo', dict(a=3, b=4)))
self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
self.assertEqual(args.args, ())
self.assertEqual(args.kwargs, dict(a=3, b=4))
self.assertNotEqual(args, (dict(a=3, b=4),))
self.assertNotEqual(args, ((), dict(a=3, b=4)))
def test_call_with_args_call_empty_name(self):
args = _Call(((1, 2, 3), {}))
self.assertEqual(args, call(1, 2, 3))
self.assertEqual(call(1, 2, 3), args)
self.assertIn(call(1, 2, 3), [args])
def test_call_ne(self):
self.assertNotEqual(_Call(((1, 2, 3),)), call(1, 2))
self.assertFalse(_Call(((1, 2, 3),)) != call(1, 2, 3))
self.assertTrue(_Call(((1, 2), {})) != call(1, 2, 3))
def test_call_non_tuples(self):
kall = _Call(((1, 2, 3),))
for value in 1, None, self, int:
self.assertNotEqual(kall, value)
self.assertFalse(kall == value)
def test_repr(self):
self.assertEqual(repr(_Call()), 'call()')
self.assertEqual(repr(_Call(('foo',))), 'call.foo()')
self.assertEqual(repr(_Call(((1, 2, 3), {'a': 'b'}))),
"call(1, 2, 3, a='b')")
self.assertEqual(repr(_Call(('bar', (1, 2, 3), {'a': 'b'}))),
"call.bar(1, 2, 3, a='b')")
self.assertEqual(repr(call), 'call')
self.assertEqual(str(call), 'call')
self.assertEqual(repr(call()), 'call()')
self.assertEqual(repr(call(1)), 'call(1)')
self.assertEqual(repr(call(zz='thing')), "call(zz='thing')")
self.assertEqual(repr(call().foo), 'call().foo')
self.assertEqual(repr(call(1).foo.bar(a=3).bing),
'call().foo.bar().bing')
self.assertEqual(
repr(call().foo(1, 2, a=3)),
"call().foo(1, 2, a=3)"
)
self.assertEqual(repr(call()()), "call()()")
self.assertEqual(repr(call(1)(2)), "call()(2)")
self.assertEqual(
repr(call()().bar().baz.beep(1)),
"call()().bar().baz.beep(1)"
)
def test_call(self):
self.assertEqual(call(), ('', (), {}))
self.assertEqual(call('foo', 'bar', one=3, two=4),
('', ('foo', 'bar'), {'one': 3, 'two': 4}))
mock = Mock()
mock(1, 2, 3)
mock(a=3, b=6)
self.assertEqual(mock.call_args_list,
[call(1, 2, 3), call(a=3, b=6)])
def test_attribute_call(self):
self.assertEqual(call.foo(1), ('foo', (1,), {}))
self.assertEqual(call.bar.baz(fish='eggs'),
('bar.baz', (), {'fish': 'eggs'}))
mock = Mock()
mock.foo(1, 2 ,3)
mock.bar.baz(a=3, b=6)
self.assertEqual(mock.method_calls,
[call.foo(1, 2, 3), call.bar.baz(a=3, b=6)])
def test_extended_call(self):
result = call(1).foo(2).bar(3, a=4)
self.assertEqual(result, ('().foo().bar', (3,), dict(a=4)))
mock = MagicMock()
mock(1, 2, a=3, b=4)
self.assertEqual(mock.call_args, call(1, 2, a=3, b=4))
self.assertNotEqual(mock.call_args, call(1, 2, 3))
self.assertEqual(mock.call_args_list, [call(1, 2, a=3, b=4)])
self.assertEqual(mock.mock_calls, [call(1, 2, a=3, b=4)])
mock = MagicMock()
mock.foo(1).bar()().baz.beep(a=6)
last_call = call.foo(1).bar()().baz.beep(a=6)
self.assertEqual(mock.mock_calls[-1], last_call)
self.assertEqual(mock.mock_calls, last_call.call_list())
def test_extended_not_equal(self):
a = call(x=1).foo
b = call(x=2).foo
self.assertEqual(a, a)
self.assertEqual(b, b)
self.assertNotEqual(a, b)
def test_nested_calls_not_equal(self):
a = call(x=1).foo().bar
b = call(x=2).foo().bar
self.assertEqual(a, a)
self.assertEqual(b, b)
self.assertNotEqual(a, b)
def test_call_list(self):
mock = MagicMock()
mock(1)
self.assertEqual(call(1).call_list(), mock.mock_calls)
mock = MagicMock()
mock(1).method(2)
self.assertEqual(call(1).method(2).call_list(),
mock.mock_calls)
mock = MagicMock()
mock(1).method(2)(3)
self.assertEqual(call(1).method(2)(3).call_list(),
mock.mock_calls)
mock = MagicMock()
int(mock(1).method(2)(3).foo.bar.baz(4)(5))
kall = call(1).method(2)(3).foo.bar.baz(4)(5).__int__()
self.assertEqual(kall.call_list(), mock.mock_calls)
def test_call_any(self):
self.assertEqual(call, ANY)
m = MagicMock()
int(m)
self.assertEqual(m.mock_calls, [ANY])
self.assertEqual([ANY], m.mock_calls)
def test_two_args_call(self):
args = _Call(((1, 2), {'a': 3}), two=True)
self.assertEqual(len(args), 2)
self.assertEqual(args[0], (1, 2))
self.assertEqual(args[1], {'a': 3})
other_args = _Call(((1, 2), {'a': 3}))
self.assertEqual(args, other_args)
def test_call_with_name(self):
self.assertEqual(_Call((), 'foo')[0], 'foo')
self.assertEqual(_Call((('bar', 'barz'),),)[0], '')
self.assertEqual(_Call((('bar', 'barz'), {'hello': 'world'}),)[0], '')
def test_dunder_call(self):
m = MagicMock()
m().foo()['bar']()
self.assertEqual(
m.mock_calls,
[call(), call().foo(), call().foo().__getitem__('bar'), call().foo().__getitem__()()]
)
m = MagicMock()
m().foo()['bar'] = 1
self.assertEqual(
m.mock_calls,
[call(), call().foo(), call().foo().__setitem__('bar', 1)]
)
m = MagicMock()
iter(m().foo())
self.assertEqual(
m.mock_calls,
[call(), call().foo(), call().foo().__iter__()]
)
class SpecSignatureTest(unittest.TestCase):
def _check_someclass_mock(self, mock):
self.assertRaises(AttributeError, getattr, mock, 'foo')
mock.one(1, 2)
mock.one.assert_called_with(1, 2)
self.assertRaises(AssertionError,
mock.one.assert_called_with, 3, 4)
self.assertRaises(TypeError, mock.one, 1)
mock.two()
mock.two.assert_called_with()
self.assertRaises(AssertionError,
mock.two.assert_called_with, 3)
self.assertRaises(TypeError, mock.two, 1)
mock.three()
mock.three.assert_called_with()
self.assertRaises(AssertionError,
mock.three.assert_called_with, 3)
self.assertRaises(TypeError, mock.three, 3, 2)
mock.three(1)
mock.three.assert_called_with(1)
mock.three(a=1)
mock.three.assert_called_with(a=1)
def test_basic(self):
mock = create_autospec(SomeClass)
self._check_someclass_mock(mock)
mock = create_autospec(SomeClass())
self._check_someclass_mock(mock)
def test_create_autospec_return_value(self):
def f(): pass
mock = create_autospec(f, return_value='foo')
self.assertEqual(mock(), 'foo')
class Foo(object):
pass
mock = create_autospec(Foo, return_value='foo')
self.assertEqual(mock(), 'foo')
def test_autospec_reset_mock(self):
m = create_autospec(int)
int(m)
m.reset_mock()
self.assertEqual(m.__int__.call_count, 0)
def test_mocking_unbound_methods(self):
class Foo(object):
def foo(self, foo): pass
p = patch.object(Foo, 'foo')
mock_foo = p.start()
Foo().foo(1)
mock_foo.assert_called_with(1)
def test_create_autospec_keyword_arguments(self):
class Foo(object):
a = 3
m = create_autospec(Foo, a='3')
self.assertEqual(m.a, '3')
def test_create_autospec_keyword_only_arguments(self):
def foo(a, *, b=None): pass
m = create_autospec(foo)
m(1)
m.assert_called_with(1)
self.assertRaises(TypeError, m, 1, 2)
m(2, b=3)
m.assert_called_with(2, b=3)
def test_function_as_instance_attribute(self):
obj = SomeClass()
def f(a): pass
obj.f = f
mock = create_autospec(obj)
mock.f('bing')
mock.f.assert_called_with('bing')
def test_spec_as_list(self):
# because spec as a list of strings in the mock constructor means
# something very different we treat a list instance as the type.
mock = create_autospec([])
mock.append('foo')
mock.append.assert_called_with('foo')
self.assertRaises(AttributeError, getattr, mock, 'foo')
class Foo(object):
foo = []
mock = create_autospec(Foo)
mock.foo.append(3)
mock.foo.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.foo, 'foo')
def test_attributes(self):
class Sub(SomeClass):
attr = SomeClass()
sub_mock = create_autospec(Sub)
for mock in (sub_mock, sub_mock.attr):
self._check_someclass_mock(mock)
def test_spec_has_descriptor_returning_function(self):
class CrazyDescriptor(object):
def __get__(self, obj, type_):
if obj is None:
return lambda x: None
class MyClass(object):
some_attr = CrazyDescriptor()
mock = create_autospec(MyClass)
mock.some_attr(1)
with self.assertRaises(TypeError):
mock.some_attr()
with self.assertRaises(TypeError):
mock.some_attr(1, 2)
def test_spec_has_function_not_in_bases(self):
class CrazyClass(object):
def __dir__(self):
return super(CrazyClass, self).__dir__()+['crazy']
def __getattr__(self, item):
if item == 'crazy':
return lambda x: x
raise AttributeError(item)
inst = CrazyClass()
with self.assertRaises(AttributeError):
inst.other
self.assertEqual(inst.crazy(42), 42)
mock = create_autospec(inst)
mock.crazy(42)
with self.assertRaises(TypeError):
mock.crazy()
with self.assertRaises(TypeError):
mock.crazy(1, 2)
def test_builtin_functions_types(self):
# we could replace builtin functions / methods with a function
# with *args / **kwargs signature. Using the builtin method type
# as a spec seems to work fairly well though.
class BuiltinSubclass(list):
def bar(self, arg): pass
sorted = sorted
attr = {}
mock = create_autospec(BuiltinSubclass)
mock.append(3)
mock.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.append, 'foo')
mock.bar('foo')
mock.bar.assert_called_with('foo')
self.assertRaises(TypeError, mock.bar, 'foo', 'bar')
self.assertRaises(AttributeError, getattr, mock.bar, 'foo')
mock.sorted([1, 2])
mock.sorted.assert_called_with([1, 2])
self.assertRaises(AttributeError, getattr, mock.sorted, 'foo')
mock.attr.pop(3)
mock.attr.pop.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.attr, 'foo')
def test_method_calls(self):
class Sub(SomeClass):
attr = SomeClass()
mock = create_autospec(Sub)
mock.one(1, 2)
mock.two()
mock.three(3)
expected = [call.one(1, 2), call.two(), call.three(3)]
self.assertEqual(mock.method_calls, expected)
mock.attr.one(1, 2)
mock.attr.two()
mock.attr.three(3)
expected.extend(
[call.attr.one(1, 2), call.attr.two(), call.attr.three(3)]
)
self.assertEqual(mock.method_calls, expected)
def test_magic_methods(self):
class BuiltinSubclass(list):
attr = {}
mock = create_autospec(BuiltinSubclass)
self.assertEqual(list(mock), [])
self.assertRaises(TypeError, int, mock)
self.assertRaises(TypeError, int, mock.attr)
self.assertEqual(list(mock), [])
self.assertIsInstance(mock['foo'], MagicMock)
self.assertIsInstance(mock.attr['foo'], MagicMock)
def test_spec_set(self):
class Sub(SomeClass):
attr = SomeClass()
for spec in (Sub, Sub()):
mock = create_autospec(spec, spec_set=True)
self._check_someclass_mock(mock)
self.assertRaises(AttributeError, setattr, mock, 'foo', 'bar')
self.assertRaises(AttributeError, setattr, mock.attr, 'foo', 'bar')
def test_descriptors(self):
class Foo(object):
@classmethod
def f(cls, a, b): pass
@staticmethod
def g(a, b): pass
class Bar(Foo): pass
class Baz(SomeClass, Bar): pass
for spec in (Foo, Foo(), Bar, Bar(), Baz, Baz()):
mock = create_autospec(spec)
mock.f(1, 2)
mock.f.assert_called_once_with(1, 2)
mock.g(3, 4)
mock.g.assert_called_once_with(3, 4)
def test_recursive(self):
class A(object):
def a(self): pass
foo = 'foo bar baz'
bar = foo
A.B = A
mock = create_autospec(A)
mock()
self.assertFalse(mock.B.called)
mock.a()
mock.B.a()
self.assertEqual(mock.method_calls, [call.a(), call.B.a()])
self.assertIs(A.foo, A.bar)
self.assertIsNot(mock.foo, mock.bar)
mock.foo.lower()
self.assertRaises(AssertionError, mock.bar.lower.assert_called_with)
def test_spec_inheritance_for_classes(self):
class Foo(object):
def a(self, x): pass
class Bar(object):
def f(self, y): pass
class_mock = create_autospec(Foo)
self.assertIsNot(class_mock, class_mock())
for this_mock in class_mock, class_mock():
this_mock.a(x=5)
this_mock.a.assert_called_with(x=5)
this_mock.a.assert_called_with(5)
self.assertRaises(TypeError, this_mock.a, 'foo', 'bar')
self.assertRaises(AttributeError, getattr, this_mock, 'b')
instance_mock = create_autospec(Foo())
instance_mock.a(5)
instance_mock.a.assert_called_with(5)
instance_mock.a.assert_called_with(x=5)
self.assertRaises(TypeError, instance_mock.a, 'foo', 'bar')
self.assertRaises(AttributeError, getattr, instance_mock, 'b')
# The return value isn't isn't callable
self.assertRaises(TypeError, instance_mock)
instance_mock.Bar.f(6)
instance_mock.Bar.f.assert_called_with(6)
instance_mock.Bar.f.assert_called_with(y=6)
self.assertRaises(AttributeError, getattr, instance_mock.Bar, 'g')
instance_mock.Bar().f(6)
instance_mock.Bar().f.assert_called_with(6)
instance_mock.Bar().f.assert_called_with(y=6)
self.assertRaises(AttributeError, getattr, instance_mock.Bar(), 'g')
def test_inherit(self):
class Foo(object):
a = 3
Foo.Foo = Foo
# class
mock = create_autospec(Foo)
instance = mock()
self.assertRaises(AttributeError, getattr, instance, 'b')
attr_instance = mock.Foo()
self.assertRaises(AttributeError, getattr, attr_instance, 'b')
# instance
mock = create_autospec(Foo())
self.assertRaises(AttributeError, getattr, mock, 'b')
self.assertRaises(TypeError, mock)
# attribute instance
call_result = mock.Foo()
self.assertRaises(AttributeError, getattr, call_result, 'b')
def test_builtins(self):
# used to fail with infinite recursion
create_autospec(1)
create_autospec(int)
create_autospec('foo')
create_autospec(str)
create_autospec({})
create_autospec(dict)
create_autospec([])
create_autospec(list)
create_autospec(set())
create_autospec(set)
create_autospec(1.0)
create_autospec(float)
create_autospec(1j)
create_autospec(complex)
create_autospec(False)
create_autospec(True)
def test_function(self):
def f(a, b): pass
mock = create_autospec(f)
self.assertRaises(TypeError, mock)
mock(1, 2)
mock.assert_called_with(1, 2)
mock.assert_called_with(1, b=2)
mock.assert_called_with(a=1, b=2)
f.f = f
mock = create_autospec(f)
self.assertRaises(TypeError, mock.f)
mock.f(3, 4)
mock.f.assert_called_with(3, 4)
mock.f.assert_called_with(a=3, b=4)
def test_skip_attributeerrors(self):
class Raiser(object):
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance')
class RaiserClass(object):
raiser = Raiser()
@staticmethod
def existing(a, b):
return a + b
self.assertEqual(RaiserClass.existing(1, 2), 3)
s = create_autospec(RaiserClass)
self.assertRaises(TypeError, lambda x: s.existing(1, 2, 3))
self.assertEqual(s.existing(1, 2), s.existing.return_value)
self.assertRaises(AttributeError, lambda: s.nonexisting)
# check we can fetch the raiser attribute and it has no spec
obj = s.raiser
obj.foo, obj.bar
def test_signature_class(self):
class Foo(object):
def __init__(self, a, b=3): pass
mock = create_autospec(Foo)
self.assertRaises(TypeError, mock)
mock(1)
mock.assert_called_once_with(1)
mock.assert_called_once_with(a=1)
self.assertRaises(AssertionError, mock.assert_called_once_with, 2)
mock(4, 5)
mock.assert_called_with(4, 5)
mock.assert_called_with(a=4, b=5)
self.assertRaises(AssertionError, mock.assert_called_with, a=5, b=4)
def test_class_with_no_init(self):
# this used to raise an exception
# due to trying to get a signature from object.__init__
class Foo(object):
pass
create_autospec(Foo)
def test_signature_callable(self):
class Callable(object):
def __init__(self, x, y): pass
def __call__(self, a): pass
mock = create_autospec(Callable)
mock(1, 2)
mock.assert_called_once_with(1, 2)
mock.assert_called_once_with(x=1, y=2)
self.assertRaises(TypeError, mock, 'a')
instance = mock(1, 2)
self.assertRaises(TypeError, instance)
instance(a='a')
instance.assert_called_once_with('a')
instance.assert_called_once_with(a='a')
instance('a')
instance.assert_called_with('a')
instance.assert_called_with(a='a')
mock = create_autospec(Callable(1, 2))
mock(a='a')
mock.assert_called_once_with(a='a')
self.assertRaises(TypeError, mock)
mock('a')
mock.assert_called_with('a')
def test_signature_noncallable(self):
class NonCallable(object):
def __init__(self):
pass
mock = create_autospec(NonCallable)
instance = mock()
mock.assert_called_once_with()
self.assertRaises(TypeError, mock, 'a')
self.assertRaises(TypeError, instance)
self.assertRaises(TypeError, instance, 'a')
mock = create_autospec(NonCallable())
self.assertRaises(TypeError, mock)
self.assertRaises(TypeError, mock, 'a')
def test_create_autospec_none(self):
class Foo(object):
bar = None
mock = create_autospec(Foo)
none = mock.bar
self.assertNotIsInstance(none, type(None))
none.foo()
none.foo.assert_called_once_with()
def test_autospec_functions_with_self_in_odd_place(self):
class Foo(object):
def f(a, self): pass
a = create_autospec(Foo)
a.f(10)
a.f.assert_called_with(10)
a.f.assert_called_with(self=10)
a.f(self=10)
a.f.assert_called_with(10)
a.f.assert_called_with(self=10)
def test_autospec_data_descriptor(self):
class Descriptor(object):
def __init__(self, value):
self.value = value
def __get__(self, obj, cls=None):
return self
def __set__(self, obj, value): pass
class MyProperty(property):
pass
class Foo(object):
__slots__ = ['slot']
@property
def prop(self): pass
@MyProperty
def subprop(self): pass
desc = Descriptor(42)
foo = create_autospec(Foo)
def check_data_descriptor(mock_attr):
# Data descriptors don't have a spec.
self.assertIsInstance(mock_attr, MagicMock)
mock_attr(1, 2, 3)
mock_attr.abc(4, 5, 6)
mock_attr.assert_called_once_with(1, 2, 3)
mock_attr.abc.assert_called_once_with(4, 5, 6)
# property
check_data_descriptor(foo.prop)
# property subclass
check_data_descriptor(foo.subprop)
# class __slot__
check_data_descriptor(foo.slot)
# plain data descriptor
check_data_descriptor(foo.desc)
def test_autospec_on_bound_builtin_function(self):
meth = types.MethodType(time.ctime, time.time())
self.assertIsInstance(meth(), str)
mocked = create_autospec(meth)
# no signature, so no spec to check against
mocked()
mocked.assert_called_once_with()
mocked.reset_mock()
# but pypy gets this right:
if IS_PYPY:
with self.assertRaises(TypeError):
mocked(4, 5, 6)
else:
mocked(4, 5, 6)
mocked.assert_called_once_with(4, 5, 6)
def test_autospec_getattr_partial_function(self):
# bpo-32153 : getattr returning partial functions without
# __name__ should not create AttributeError in create_autospec
class Foo:
def __getattr__(self, attribute):
return partial(lambda name: name, attribute)
proxy = Foo()
autospec = create_autospec(proxy)
self.assertFalse(hasattr(autospec, '__name__'))
def test_spec_inspect_signature(self):
def myfunc(x, y): pass
mock = create_autospec(myfunc)
mock(1, 2)
mock(x=1, y=2)
self.assertEqual(inspect.signature(mock), inspect.signature(myfunc))
self.assertEqual(mock.mock_calls, [call(1, 2), call(x=1, y=2)])
self.assertRaises(TypeError, mock, 1)
def test_spec_inspect_signature_annotations(self):
def foo(a: int, b: int=10, *, c:int) -> int:
return a + b + c
self.assertEqual(foo(1, 2 , c=3), 6)
mock = create_autospec(foo)
mock(1, 2, c=3)
mock(1, c=3)
self.assertEqual(inspect.signature(mock), inspect.signature(foo))
self.assertEqual(mock.mock_calls, [call(1, 2, c=3), call(1, c=3)])
self.assertRaises(TypeError, mock, 1)
self.assertRaises(TypeError, mock, 1, 2, 3, c=4)
def test_spec_function_no_name(self):
func = lambda: 'nope'
mock = create_autospec(func)
self.assertEqual(mock.__name__, 'funcopy')
def test_spec_function_assert_has_calls(self):
def f(a): pass
mock = create_autospec(f)
mock(1)
mock.assert_has_calls([call(1)])
with self.assertRaises(AssertionError):
mock.assert_has_calls([call(2)])
def test_spec_function_assert_any_call(self):
def f(a): pass
mock = create_autospec(f)
mock(1)
mock.assert_any_call(1)
with self.assertRaises(AssertionError):
mock.assert_any_call(2)
def test_spec_function_reset_mock(self):
def f(a): pass
rv = Mock()
mock = create_autospec(f, return_value=rv)
mock(1)(2)
self.assertEqual(mock.mock_calls, [call(1)])
self.assertEqual(rv.mock_calls, [call(2)])
mock.reset_mock()
self.assertEqual(mock.mock_calls, [])
self.assertEqual(rv.mock_calls, [])
class TestCallList(unittest.TestCase):
def test_args_list_contains_call_list(self):
mock = Mock()
self.assertIsInstance(mock.call_args_list, _CallList)
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
for kall in call(1, 2), call(a=3), call(3, 4), call(b=6):
self.assertIn(kall, mock.call_args_list)
calls = [call(a=3), call(3, 4)]
self.assertIn(calls, mock.call_args_list)
calls = [call(1, 2), call(a=3)]
self.assertIn(calls, mock.call_args_list)
calls = [call(3, 4), call(b=6)]
self.assertIn(calls, mock.call_args_list)
calls = [call(3, 4)]
self.assertIn(calls, mock.call_args_list)
self.assertNotIn(call('fish'), mock.call_args_list)
self.assertNotIn([call('fish')], mock.call_args_list)
def test_call_list_str(self):
mock = Mock()
mock(1, 2)
mock.foo(a=3)
mock.foo.bar().baz('fish', cat='dog')
expected = (
"[call(1, 2),\n"
" call.foo(a=3),\n"
" call.foo.bar(),\n"
" call.foo.bar().baz('fish', cat='dog')]"
)
self.assertEqual(str(mock.mock_calls), expected)
def test_propertymock(self):
p = patch('%s.SomeClass.one' % __name__, new_callable=PropertyMock)
mock = p.start()
try:
SomeClass.one
mock.assert_called_once_with()
s = SomeClass()
s.one
mock.assert_called_with()
self.assertEqual(mock.mock_calls, [call(), call()])
s.one = 3
self.assertEqual(mock.mock_calls, [call(), call(), call(3)])
finally:
p.stop()
def test_propertymock_returnvalue(self):
m = MagicMock()
p = PropertyMock()
type(m).foo = p
returned = m.foo
p.assert_called_once_with()
self.assertIsInstance(returned, MagicMock)
self.assertNotIsInstance(returned, PropertyMock)
class TestCallablePredicate(unittest.TestCase):
def test_type(self):
for obj in [str, bytes, int, list, tuple, SomeClass]:
self.assertTrue(_callable(obj))
def test_call_magic_method(self):
class Callable:
def __call__(self): pass
instance = Callable()
self.assertTrue(_callable(instance))
def test_staticmethod(self):
class WithStaticMethod:
@staticmethod
def staticfunc(): pass
self.assertTrue(_callable(WithStaticMethod.staticfunc))
def test_non_callable_staticmethod(self):
class BadStaticMethod:
not_callable = staticmethod(None)
self.assertFalse(_callable(BadStaticMethod.not_callable))
def test_classmethod(self):
class WithClassMethod:
@classmethod
def classfunc(cls): pass
self.assertTrue(_callable(WithClassMethod.classfunc))
def test_non_callable_classmethod(self):
class BadClassMethod:
not_callable = classmethod(None)
self.assertFalse(_callable(BadClassMethod.not_callable))
if __name__ == '__main__':
unittest.main()
|
|
"""Test the init file for the Insteon component."""
import asyncio
import logging
from pyinsteon.address import Address
from homeassistant.components import insteon
from homeassistant.components.insteon.const import (
CONF_CAT,
CONF_OVERRIDE,
CONF_SUBCAT,
CONF_X10,
DOMAIN,
PORT_HUB_V1,
PORT_HUB_V2,
)
from homeassistant.const import (
CONF_ADDRESS,
CONF_DEVICE,
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.setup import async_setup_component
from .const import (
MOCK_ADDRESS,
MOCK_CAT,
MOCK_IMPORT_CONFIG_PLM,
MOCK_IMPORT_FULL_CONFIG_HUB_V1,
MOCK_IMPORT_FULL_CONFIG_HUB_V2,
MOCK_IMPORT_FULL_CONFIG_PLM,
MOCK_IMPORT_MINIMUM_HUB_V1,
MOCK_IMPORT_MINIMUM_HUB_V2,
MOCK_SUBCAT,
MOCK_USER_INPUT_PLM,
PATCH_CONNECTION,
)
from .mock_devices import MockDevices
from tests.async_mock import patch
from tests.common import MockConfigEntry
_LOGGER = logging.getLogger(__name__)
async def mock_successful_connection(*args, **kwargs):
"""Return a successful connection."""
return True
async def mock_failed_connection(*args, **kwargs):
"""Return a failed connection."""
raise ConnectionError("Connection failed")
async def test_setup_entry(hass: HomeAssistantType):
"""Test setting up the entry."""
config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_INPUT_PLM)
config_entry.add_to_hass(hass)
with patch.object(
insteon, "async_connect", new=mock_successful_connection
), patch.object(insteon, "async_close") as mock_close, patch.object(
insteon, "devices", new=MockDevices()
):
assert await async_setup_component(
hass,
insteon.DOMAIN,
{},
)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
# pylint: disable=no-member
assert insteon.devices.async_save.call_count == 1
assert mock_close.called
async def test_import_plm(hass: HomeAssistantType):
"""Test setting up the entry from YAML to a PLM."""
config = {}
config[DOMAIN] = MOCK_IMPORT_CONFIG_PLM
with patch.object(
insteon, "async_connect", new=mock_successful_connection
), patch.object(insteon, "close_insteon_connection"), patch.object(
insteon, "devices", new=MockDevices()
), patch(
PATCH_CONNECTION, new=mock_successful_connection
):
assert await async_setup_component(
hass,
insteon.DOMAIN,
config,
)
await hass.async_block_till_done()
await asyncio.sleep(0.01)
assert hass.config_entries.async_entries(DOMAIN)
data = hass.config_entries.async_entries(DOMAIN)[0].data
assert data[CONF_DEVICE] == MOCK_IMPORT_CONFIG_PLM[CONF_PORT]
assert CONF_PORT not in data
async def test_import_hub1(hass: HomeAssistantType):
"""Test setting up the entry from YAML to a hub v1."""
config = {}
config[DOMAIN] = MOCK_IMPORT_MINIMUM_HUB_V1
with patch.object(
insteon, "async_connect", new=mock_successful_connection
), patch.object(insteon, "close_insteon_connection"), patch.object(
insteon, "devices", new=MockDevices()
), patch(
PATCH_CONNECTION, new=mock_successful_connection
):
assert await async_setup_component(
hass,
insteon.DOMAIN,
config,
)
await hass.async_block_till_done()
await asyncio.sleep(0.01)
assert hass.config_entries.async_entries(DOMAIN)
data = hass.config_entries.async_entries(DOMAIN)[0].data
assert data[CONF_HOST] == MOCK_IMPORT_FULL_CONFIG_HUB_V1[CONF_HOST]
assert data[CONF_PORT] == PORT_HUB_V1
assert CONF_USERNAME not in data
assert CONF_PASSWORD not in data
async def test_import_hub2(hass: HomeAssistantType):
"""Test setting up the entry from YAML to a hub v2."""
config = {}
config[DOMAIN] = MOCK_IMPORT_MINIMUM_HUB_V2
with patch.object(
insteon, "async_connect", new=mock_successful_connection
), patch.object(insteon, "close_insteon_connection"), patch.object(
insteon, "devices", new=MockDevices()
), patch(
PATCH_CONNECTION, new=mock_successful_connection
):
assert await async_setup_component(
hass,
insteon.DOMAIN,
config,
)
await hass.async_block_till_done()
await asyncio.sleep(0.01)
assert hass.config_entries.async_entries(DOMAIN)
data = hass.config_entries.async_entries(DOMAIN)[0].data
assert data[CONF_HOST] == MOCK_IMPORT_FULL_CONFIG_HUB_V2[CONF_HOST]
assert data[CONF_PORT] == PORT_HUB_V2
assert data[CONF_USERNAME] == MOCK_IMPORT_MINIMUM_HUB_V2[CONF_USERNAME]
assert data[CONF_PASSWORD] == MOCK_IMPORT_MINIMUM_HUB_V2[CONF_PASSWORD]
async def test_import_options(hass: HomeAssistantType):
"""Test setting up the entry from YAML including options."""
config = {}
config[DOMAIN] = MOCK_IMPORT_FULL_CONFIG_PLM
with patch.object(
insteon, "async_connect", new=mock_successful_connection
), patch.object(insteon, "close_insteon_connection"), patch.object(
insteon, "devices", new=MockDevices()
), patch(
PATCH_CONNECTION, new=mock_successful_connection
):
assert await async_setup_component(
hass,
insteon.DOMAIN,
config,
)
await hass.async_block_till_done()
await asyncio.sleep(0.01) # Need to yield to async processes
# pylint: disable=no-member
assert insteon.devices.add_x10_device.call_count == 2
assert insteon.devices.set_id.call_count == 1
options = hass.config_entries.async_entries(DOMAIN)[0].options
assert len(options[CONF_OVERRIDE]) == 1
assert options[CONF_OVERRIDE][0][CONF_ADDRESS] == str(Address(MOCK_ADDRESS))
assert options[CONF_OVERRIDE][0][CONF_CAT] == MOCK_CAT
assert options[CONF_OVERRIDE][0][CONF_SUBCAT] == MOCK_SUBCAT
assert len(options[CONF_X10]) == 2
assert options[CONF_X10][0] == MOCK_IMPORT_FULL_CONFIG_PLM[CONF_X10][0]
assert options[CONF_X10][1] == MOCK_IMPORT_FULL_CONFIG_PLM[CONF_X10][1]
async def test_import_failed_connection(hass: HomeAssistantType):
"""Test a failed connection in import does not create a config entry."""
config = {}
config[DOMAIN] = MOCK_IMPORT_CONFIG_PLM
with patch.object(
insteon, "async_connect", new=mock_failed_connection
), patch.object(insteon, "async_close"), patch.object(
insteon, "devices", new=MockDevices(connected=False)
):
assert await async_setup_component(
hass,
insteon.DOMAIN,
config,
)
await hass.async_block_till_done()
assert not hass.config_entries.async_entries(DOMAIN)
async def test_setup_entry_failed_connection(hass: HomeAssistantType, caplog):
"""Test setting up the entry with a failed connection."""
config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_INPUT_PLM)
config_entry.add_to_hass(hass)
with patch.object(
insteon, "async_connect", new=mock_failed_connection
), patch.object(insteon, "devices", new=MockDevices(connected=False)):
assert await async_setup_component(
hass,
insteon.DOMAIN,
{},
)
assert "Could not connect to Insteon modem" in caplog.text
|
|
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TMultiplexedProtocol
import thrift.Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
import importlib
import re
import socket
import struct
import sys
class ThriftClient(object):
MATCH_SPEC_T = "_match_spec_t"
ACTION_SPEC_T = "_action_spec_t"
TABLE_ADD_WITH = "_table_add_with_"
TABLE_MODIFY_WITH = "_table_modify_with_"
TABLE_DELETE = "_table_delete"
ADD_MEMBER_WITH = "_add_member_with_"
MODIFY_MEMBER_WITH = "_modify_member_with_"
DEL_MEMBER = "_del_member"
CREATE_GROUP = "_create_group"
DEL_GROUP = "_del_group"
GET_FIRST_ENTRY_HANDLE = "_get_first_entry_handle"
GET_NEXT_ENTRY_HANDLES = "_get_next_entry_handles"
GET_ENTRY = "_get_entry"
THRIFT_SPEC = "thrift_spec"
SET_DEFAULT_ACTION = "_set_default_action_"
def __init__(self, module, hostname, port, p4_name):
self.p4_client_module = importlib.import_module(".".join(["p4_pd_rpc", p4_name]))
self.mc_client_module = importlib.import_module(".".join(["mc_pd_rpc", "mc"]))
self.conn_mgr_client_module = importlib.import_module(".".join(["conn_mgr_pd_rpc",
"conn_mgr"]))
self._p4_name = p4_name
self._utils = importlib.import_module("utils")
self.setup(hostname, port)
self._session_handle = self._conn_mgr.client_init(16)
from res_pd_rpc.ttypes import DevTarget_t
self._dev_target = DevTarget_t(0, self._utils.hex_to_i16(0xFFFF))
def get_spec_prefix(self):
return self._p4_name + '_'
def setup(self, hostname, port):
# Set up thrift client and contact server
self._transport = TSocket.TSocket(hostname, port)
self._transport = TTransport.TBufferedTransport(self._transport)
bprotocol = TBinaryProtocol.TBinaryProtocol(self._transport)
self._mc_protocol = TMultiplexedProtocol.TMultiplexedProtocol(bprotocol, "mc")
self._conn_mgr_protocol = TMultiplexedProtocol.TMultiplexedProtocol(bprotocol, "conn_mgr")
self._p4_protocol = TMultiplexedProtocol.TMultiplexedProtocol(bprotocol, self._p4_name)
self._client = self.p4_client_module.Client(self._p4_protocol)
self._mc = self.mc_client_module.Client(self._mc_protocol)
self._conn_mgr = self.conn_mgr_client_module.Client(self._conn_mgr_protocol)
self._transport.open()
def get_match_field_names(self, table_name):
return self.get_parameter_names(table_name, ThriftClient.MATCH_SPEC_T)
def get_action_parameter_names(self, action_name):
return self.get_parameter_names(action_name, ThriftClient.ACTION_SPEC_T)
def get_spec_class(self, name, spec_suffix):
spec_name = self.get_spec_prefix() + name + spec_suffix
return getattr(self.p4_client_module, spec_name)
def get_parameter_names(self, name, spec_suffix):
try:
spec_class = self.get_spec_class(name, spec_suffix)
parameter_names = [x[2] for x in spec_class.thrift_spec[1:]]
except AttributeError:
raise AttributeError("Spec not found for %s" % name)
return parameter_names
def set_default_action(self, table_name, action_name, action_spec_tuple):
add_entry_parameters = [self._session_handle, self._dev_target]
if action_spec_tuple != ():
add_entry_parameters.append(self.get_action_spec(action_name, action_spec_tuple))
return self.get_set_default_action_function(table_name, action_name)(*add_entry_parameters)
def add_entry(self, table_name, match_spec_tuple, action_name, action_spec_tuple, priority):
match_spec = self.get_match_spec(table_name, match_spec_tuple)
add_entry_parameters = [self._session_handle, self._dev_target, match_spec]
if priority != None:
add_entry_parameters.append(priority)
if action_spec_tuple != ():
add_entry_parameters.append(self.get_action_spec(action_name, action_spec_tuple))
return self.get_add_entry_function(table_name, action_name)(*add_entry_parameters)
def add_entry_with_selector(self, table_name, match_spec_tuple, group_handle):
match_spec = self.get_match_spec(table_name, match_spec_tuple)
add_entry_with_selector_parameters = [self._session_handle,
self._dev_target, match_spec, int(group_handle)]
return self.get_add_entry_with_selector(table_name)(*add_entry_with_selector_parameters)
def add_entry_with_member(self, table_name, match_spec_tuple, member_handle):
match_spec = self.get_match_spec(table_name, match_spec_tuple)
add_entry_with_member_parameters = [self._session_handle,
self._dev_target, match_spec, int(member_handle)]
return self.get_add_entry_with_member(table_name)(*add_entry_with_member_parameters)
def modify_entry(self, table_name, entry_handle, action_name, action_spec_tuple):
modify_entry_parameters = [ self._session_handle, self._dev_target.dev_id, int(entry_handle) ]
if action_spec_tuple is not ():
modify_entry_parameters.append(self.get_action_spec(action_name, action_spec_tuple))
return self.get_modify_entry_function(table_name, action_name)(*modify_entry_parameters)
def delete_entry(self, table_name, entry_handle):
delete_entry_function_name = "%s%s" % (table_name, ThriftClient.TABLE_DELETE)
return getattr(self._client, delete_entry_function_name)(self._session_handle, self._dev_target.dev_id, int(entry_handle))
def add_member(self, action_profile_name, action_name, action_spec_tuple):
action_spec = self.get_action_spec(action_name, action_spec_tuple)
add_entry_parameters = [self._session_handle, self._dev_target]
if action_spec_tuple != ():
add_entry_parameters.append(self.get_action_spec(action_name, action_spec_tuple))
return self.get_add_member_function(action_profile_name, action_name)(*add_entry_parameters)
def delete_member(self, action_profile_name, member_handle):
return self.get_delete_member_function(action_profile_name)(self._session_handle, self._dev_target.dev_id, int(member_handle))
def create_group(self, action_profile_name, max_group_size):
return self.get_create_group_function(action_profile_name)(self._session_handle, self._dev_target, int(max_group_size))
def delete_group(self, action_profile_name, group_handle):
return self.get_delete_group_function(action_profile_name)(self._session_handle, self._dev_target.dev_id, group_handle)
def get_first_entry_handle(self, table_name):
first_entry_handle = int(self.get_get_first_entry_handle_function(table_name)(self._session_handle, self._dev_target))
if first_entry_handle < 0:
return "No entry handle found"
else:
return first_entry_handle
def get_next_entry_handles(self, table_name, entry_handle, n):
return self.get_get_next_entry_handles_function(table_name)(self._session_handle, self._dev_target.dev_id, entry_handle, n)
def show_entry(self, table_name, entry_handle):
return self.get_show_entry_function(table_name)(self._session_handle, self._dev_target.dev_id, entry_handle)
def get_match_spec(self, table_name, match_spec_tuple):
match_spec_class = self.get_spec_class(table_name, ThriftClient.MATCH_SPEC_T)
return self.get_spec_from_spec_tuple(match_spec_class, match_spec_tuple)
def get_action_spec(self, action_name, action_spec_tuple):
action_spec_class = self.get_spec_class(action_name, ThriftClient.ACTION_SPEC_T)
return self.get_spec_from_spec_tuple(action_spec_class, action_spec_tuple)
def get_spec_from_spec_tuple(self, spec_class, spec_string):
thrift_spec = getattr(spec_class, ThriftClient.THRIFT_SPEC)
spec_parameters = []
for i in range(1, len(thrift_spec)):
parameter_type = thrift_spec[i][1]
if parameter_type == thrift.Thrift.TType.STRING:
is_success = False
try:
parameter = self._utils.macAddr_to_string(spec_string[i - 1])
if len(parameter) == 6:
spec_parameters.append(parameter)
is_success = True
except:
pass
if not is_success:
try:
parameter = socket.inet_pton(socket.AF_INET6, spec_string[i - 1])
if len(parameter) == 16:
spec_parameters.append(parameter)
is_success = True
except:
pass
if not is_success:
parameter = spec_string[i - 1]
try:
width, v = parameter.split('w')
width = int(width)
assert(width > 0)
v = int(v, 0)
except:
print "Make sure you prepend the length (in bytes) of the field"
print "A valid input is 8w0x55 for a 64-bit field set to 0x55"
raise ValueError("Cannot parse %s to TType.STRING" % parameter)
array = []
while v > 0:
array.append(v % 256)
v /= 256
width -= 1
if width < 0:
print "Value overflow"
raise ValueError("Cannot parse %s to TType.STRING" % parameter)
while width > 0:
array.append(0)
width -= 1
array.reverse()
parameter = self._utils.bytes_to_string(array)
spec_parameters.append(parameter)
if parameter_type == thrift.Thrift.TType.BYTE:
spec_parameters.append(self._utils.hex_to_byte(spec_string[i - 1]))
if parameter_type == thrift.Thrift.TType.I16:
parameter = int(spec_string[i - 1], 0)
spec_parameters.append(self._utils.hex_to_i16(parameter))
if parameter_type == thrift.Thrift.TType.I32:
is_success = False
try:
spec_parameters.append(self._utils.ipv4Addr_to_i32(spec_string[i - 1]))
is_success = True
except:
pass
if not is_success:
parameter = int(spec_string[i - 1], 0)
try:
spec_parameters.append(self._utils.hex_to_i32(parameter))
except socket.error:
raise ValueError("Cannot parse %s to TType.I32" % spec_string[i - 1])
return spec_class(*spec_parameters)
def get_table_names(self):
table_names = []
for function in dir(self.p4_client_module):
regex = '^(?P<table_name>\S+)%s' % (ThriftClient.SET_DEFAULT_ACTION)
m = re.search(regex, function)
if m is not None and m.group("table_name") not in table_names:
table_names.append(m.group("table_name"))
return table_names
def get_action_names(self, parent_object_name):
action_names = []
for function in dir(self._client):
regex = '^%s%s(?P<action_name>\S+)' % (parent_object_name, ThriftClient.TABLE_ADD_WITH)
m = re.search(regex, function)
if m is not None:
action_names.append(m.group("action_name"))
else:
regex = '^%s%s(?P<action_name>\S+)' % (parent_object_name, ThriftClient.ADD_MEMBER_WITH)
m = re.search(regex, function)
if m is not None:
action_names.append(m.group("action_name"))
return action_names
def get_match_data_names(self, table_name):
match_spec_class = self.get_spec_class(table_name, ThriftClient.MATCH_SPEC_T)
return [ x[2] for x in match_spec_class.thrift_spec[1:] ]
def get_action_data_names(self, action_name):
action_spec_class = self.get_spec_class(action_name, ThriftClient.ACTION_SPEC_T)
return [ x[2] for x in action_spec_class.thrift_spec[1:] ]
def get_add_entry_function(self, table_name, action_name):
add_entry_function_name = "%s%s%s" % (table_name, ThriftClient.TABLE_ADD_WITH, action_name)
return getattr(self._client, add_entry_function_name)
def get_set_default_action_function(self, table_name, action_name):
add_entry_function_name = "%s%s%s" % (table_name, ThriftClient.SET_DEFAULT_ACTION, action_name)
return getattr(self._client, add_entry_function_name)
def get_modify_entry_function(self, table_name, action_name):
modify_entry_function_name = "%s%s%s" % (table_name, ThriftClient.TABLE_MODIFY_WITH, action_name)
return getattr(self._client, modify_entry_function_name)
def get_get_first_entry_handle_function(self, table_name):
get_first_entry_handle_function_name = "%s%s" % (table_name, ThriftClient.GET_FIRST_ENTRY_HANDLE)
return getattr(self._client, get_first_entry_handle_function_name)
def get_get_next_entry_handles_function(self, table_name):
get_next_entry_handles_function_name = "%s%s" % (table_name, ThriftClient.GET_NEXT_ENTRY_HANDLES)
return getattr(self._client, get_next_entry_handles_function_name)
def get_show_entry_function(self, table_name):
show_entry_function_name = "%s%s" % (table_name, ThriftClient.GET_ENTRY)
return getattr(self._client, show_entry_function_name)
def get_add_member_function(self, action_profile_name, action_name):
add_member_function_name = "%s%s%s" % (action_profile_name, ThriftClient.ADD_MEMBER_WITH, action_name)
return getattr(self._client, add_member_function_name)
def get_modify_member_function(self, action_profile_name, action_name):
modify_member_function_name = "%s%s%s" % (action_profile_name, ThriftClient.MODIFY_MEMBER_WITH, action_name)
return getattr(self._client, modify_member_function_name)
def get_delete_member_function(self, action_profile_name):
delete_member_function_name = "%s%s" % (action_profile_name, ThriftClient.DEL_MEMBER)
return getattr(self._client, delete_member_function_name)
def get_create_group_function(self, action_profile_name):
create_group_function_name = "%s%s" % (action_profile_name, ThriftClient.CREATE_GROUP)
return getattr(self._client, create_group_function_name)
def get_delete_group_function(self, action_profile_name):
delete_group_function_name = "%s%s" % (action_profile_name, ThriftClient.DEL_GROUP)
return getattr(self._client, delete_group_function_name)
# Multicast api
def mc_mgrp_create(self, mgid):
return self._mc.mc_mgrp_create(self._session_handle, self._dev_target.dev_id, mgid)
def mc_node_create(self, rid, port_map, lag_map):
return self._mc.mc_node_create(self._session_handle, self._dev_target.dev_id, rid, port_map, lag_map)
def mc_node_update(self, l1_hdl, port_map, lag_map):
return self._mc.mc_node_update(self._session_handle, self._dev_target.dev_id, port_map, lag_map)
def mc_mgrp_destroy(self, mgrp_hdl):
return self._mc.mc_mgrp_destroy(self._session_handle, self._dev_target.dev_id, mgrp_hdl)
def mc_node_destroy(self, l1_hdl):
return self._mc.mc_node_destroy(self._session_handle, self._dev_target.dev_id, l1_hdl)
def mc_associate_node(self, grp_hdl, l1_hdl):
return self._mc.mc_associate_node(self._session_handle, self._dev_target.dev_id, grp_hdl, l1_hdl)
def mc_dissociate_node(self, grp_hdl, l1_hdl):
return self._mc.mc_dissociate_node(self._session_handle, self._dev_target.dev_id, grp_hdl, l1_hdl)
|
|
"""
Views that inherit from Django's class-based generic views and add methods
for building flat files.
"""
import os
import six
import sys
import gzip
import shutil
import logging
import mimetypes
from django.conf import settings
from bakery import DEFAULT_GZIP_CONTENT_TYPES
from django.test.client import RequestFactory
from django.views.generic import ListView, RedirectView
from django.views.generic import TemplateView, DetailView
from django.core.urlresolvers import reverse, NoReverseMatch
logger = logging.getLogger(__name__)
class BuildableMixin(object):
"""
Common methods we will use in buildable views.
"""
def get_content(self):
"""
How to render the HTML or other content for the page.
If you choose to render using something other than a Django template,
like HttpResponse for instance, you will want to override this.
"""
return self.get(self.request).render().content
def prep_directory(self, path):
"""
Prepares a new directory to store the file at the provided path,
if needed.
"""
dirname = os.path.dirname(path)
if dirname:
dirname = os.path.join(settings.BUILD_DIR, dirname)
os.path.exists(dirname) or os.makedirs(dirname)
def build_file(self, path, html):
if self.is_gzippable(path):
self.gzip_file(path, html)
else:
self.write_file(path, html)
def write_file(self, path, html):
"""
Writes out the provided HTML to the provided path.
"""
logger.debug("Building HTML file to %s" % path)
outfile = open(path, 'wb')
outfile.write(six.binary_type(html))
outfile.close()
def is_gzippable(self, path):
"""
Returns a boolean indicating if the provided file path is a candidate
for gzipping.
"""
# First check if gzipping is allowed by the global setting
if not getattr(settings, 'BAKERY_GZIP', False):
return False
# Then check if the content type of this particular file is gzippable
whitelist = getattr(
settings,
'GZIP_CONTENT_TYPES',
DEFAULT_GZIP_CONTENT_TYPES
)
return mimetypes.guess_type(path)[0] in whitelist
def gzip_file(self, path, html):
"""
Zips up the provided HTML as a companion for the provided path.
Intended to take advantage of the peculiarities of
Amazon S3's GZIP service.
mtime, an option that writes a timestamp to the output file
is set to 0, to avoid having s3cmd do unnecessary uploads because
of differences in the timestamp
"""
logger.debug("Building gzipped HTML file to %s" % path)
if float(sys.version[:3]) >= 2.7:
outfile = gzip.GzipFile(path, 'wb', mtime=0)
else:
outfile = gzip.GzipFile(path, 'wb')
outfile.write(six.binary_type(html))
outfile.close()
class BuildableTemplateView(TemplateView, BuildableMixin):
"""
Renders and builds a simple template.
When inherited, the child class should include the following attributes.
build_path:
The target location of the built file in the BUILD_DIR.
`index.html` would place it at the built site's root.
`foo/index.html` would place it inside a subdirectory.
template_name:
The name of the template you would like Django to render.
"""
@property
def build_method(self):
return self.build
def build(self):
logger.debug("Building %s" % self.template_name)
self.request = RequestFactory().get(self.build_path)
path = os.path.join(settings.BUILD_DIR, self.build_path)
self.prep_directory(self.build_path)
self.build_file(path, self.get_content())
class BuildableListView(ListView, BuildableMixin):
"""
Render and builds a page about a list of objects.
Required attributes:
model or queryset:
Where the list of objects should come from. `self.queryset` can
be any iterable of items, not just a queryset.
build_path:
The target location of the built file in the BUILD_DIR.
`index.html` would place it at the built site's root.
`foo/index.html` would place it inside a subdirectory.
`index.html is the default.
template_name:
The name of the template you would like Django to render. You need
to override this if you don't want to rely on the Django defaults.
"""
build_path = 'index.html'
@property
def build_method(self):
return self.build_queryset
def build_queryset(self):
logger.debug("Building %s" % self.build_path)
self.request = RequestFactory().get(self.build_path)
self.prep_directory(self.build_path)
path = os.path.join(settings.BUILD_DIR, self.build_path)
self.build_file(path, self.get_content())
class BuildableDetailView(DetailView, BuildableMixin):
"""
Render and build a "detail" view of an object.
Required attributes:
queryset:
the model instance the objects are looked up from.
template_name:
The name of the template you would like Django to render. You need
to override this if you don't want to rely on the Django defaults.
"""
@property
def build_method(self):
return self.build_queryset
def get_url(self, obj):
"""
The URL at which the detail page should appear.
"""
return obj.get_absolute_url()
def get_build_path(self, obj):
"""
Used to determine where to build the detail page. Override this if you
would like your detail page at a different location. By default it
will be built at get_url() + "index.html"
"""
path = os.path.join(settings.BUILD_DIR, self.get_url(obj)[1:])
os.path.exists(path) or os.makedirs(path)
return os.path.join(path, 'index.html')
def set_kwargs(self, obj):
self.kwargs = {
'pk': getattr(obj, 'pk', None),
'slug': getattr(obj, self.get_slug_field(), None),
}
def build_object(self, obj):
logger.debug("Building %s" % obj)
self.request = RequestFactory().get(self.get_url(obj))
self.set_kwargs(obj)
path = self.get_build_path(obj)
self.build_file(path, self.get_content())
def build_queryset(self):
[self.build_object(o) for o in self.get_queryset().all()]
def unbuild_object(self, obj):
"""
Deletes the directory at self.get_build_path.
"""
logger.debug("Unbuilding %s" % obj)
path = os.path.split(self.get_build_path(obj))[0]
if os.path.exists(path):
shutil.rmtree(path)
class Buildable404View(BuildableTemplateView):
"""
The default Django 404 page, but built out.
"""
build_path = '404.html'
template_name = '404.html'
class BuildableRedirectView(RedirectView, BuildableMixin):
"""
Render and build a redirect.
Required attributes:
build_path:
The URL being requested, which will be published as a flatfile
with a redirect away from it.
url:
The URL where redirect will send the user. Operates
in the same way as the standard generic RedirectView.
"""
permanent = True
def get_content(self):
html = """
<html>
<head>
<meta http-equiv="Refresh" content="1;url=%s" />
</head>
<body></body>
</html>
"""
html = html % self.get_redirect_url()
return html.encode("utf-8")
@property
def build_method(self):
return self.build
def build(self):
logger.debug("Building redirect from %s to %s" % (
self.build_path,
self.get_redirect_url()
))
self.request = RequestFactory().get(self.build_path)
path = os.path.join(settings.BUILD_DIR, self.build_path)
self.prep_directory(self.build_path)
self.build_file(path, self.get_content())
def get_redirect_url(self, *args, **kwargs):
"""
Return the URL redirect to. Keyword arguments from the
URL pattern match generating the redirect request
are provided as kwargs to this method.
"""
if self.url:
url = self.url % kwargs
elif self.pattern_name:
try:
url = reverse(self.pattern_name, args=args, kwargs=kwargs)
except NoReverseMatch:
return None
else:
return None
return url
def post_publish(self, bucket):
logger.debug("Adding S3 redirect header from %s to %s" % (
self.build_path,
self.get_redirect_url()
))
key = bucket.get_key(self.build_path)
key.copy(
key.bucket,
key.name,
preserve_acl=True,
metadata={'Content-Type': 'text/html'}
)
key.set_redirect(self.get_redirect_url())
key.make_public()
|
|
# -*- coding: utf-8 -*-
"""This file contains a helper library to read binary files."""
import binascii
import logging
import os
from plaso.lib import py2to3
def ByteArrayCopyToString(byte_array, codepage=u'utf-8'):
"""Copies a UTF-8 encoded byte array into a Unicode string.
Args:
byte_array: A byte array containing an UTF-8 encoded string.
codepage: The codepage of the byte stream.
Returns:
A Unicode string.
"""
byte_stream = b''.join(map(chr, byte_array))
return ByteStreamCopyToString(byte_stream, codepage=codepage)
def ByteStreamCopyToString(byte_stream, codepage=u'utf-8'):
"""Copies a UTF-8 encoded byte stream into a Unicode string.
Args:
byte_stream: A byte stream containing an UTF-8 encoded string.
codepage: The codepage of the byte stream.
Returns:
A Unicode string.
"""
try:
string = byte_stream.decode(codepage)
except UnicodeDecodeError:
logging.warning(
u'Unable to decode {0:s} formatted byte stream.'.format(codepage))
string = byte_stream.decode(codepage, errors='ignore')
string, _, _ = string.partition(u'\x00')
return string
def ByteStreamCopyToUTF16Stream(byte_stream, byte_stream_size=None):
"""Reads an UTF-16 formatted stream from a byte stream.
The UTF-16 formatted stream should be terminated by an end-of-string
character (\x00\x00). Otherwise the function reads up to the byte stream size.
Args:
byte_stream: The byte stream that contains the UTF-16 formatted stream.
byte_stream_size: Optional byte stream size or None if the entire
byte stream should be read.
Returns:
String containing the UTF-16 formatted stream.
"""
byte_stream_index = 0
if not byte_stream_size:
byte_stream_size = len(byte_stream)
while byte_stream_index + 1 < byte_stream_size:
if (byte_stream[byte_stream_index] == b'\x00' and
byte_stream[byte_stream_index + 1] == b'\x00'):
break
byte_stream_index += 2
return byte_stream[0:byte_stream_index]
def ReadUTF16Stream(file_object, offset=None, byte_size=0):
"""Reads an UTF-16 formatted stream from a file-like object.
Reads an UTF-16 formatted stream that's terminated by
an end-of-string character (\x00\x00) or up to the byte size.
Args:
file_object: A file-like object to read the data from.
offset: An offset into the file object data, if -1 or not set
the current location into the file object data is used.
byte_size: Maximum number of bytes to read or 0 if the function
should keep reading up to the end of file.
Returns:
An Unicode string.
"""
if offset is not None:
file_object.seek(offset, os.SEEK_SET)
char_buffer = []
stream_index = 0
char_raw = file_object.read(2)
while char_raw:
if byte_size and stream_index >= byte_size:
break
if b'\x00\x00' in char_raw:
break
char_buffer.append(char_raw)
stream_index += 2
char_raw = file_object.read(2)
return ReadUTF16(b''.join(char_buffer))
def UTF16StreamCopyToString(byte_stream, byte_stream_size=None):
"""Copies an UTF-16 formatted byte stream to a string.
The UTF-16 formatted byte stream should be terminated by an end-of-string
character (\x00\x00). Otherwise the function reads up to the byte stream size.
Args:
byte_stream: The UTF-16 formatted byte stream.
byte_stream_size: The byte stream size or None if the entire byte stream
should be used.
Returns:
An Unicode string.
"""
utf16_stream = ByteStreamCopyToUTF16Stream(
byte_stream, byte_stream_size=byte_stream_size)
try:
return utf16_stream.decode(u'utf-16-le')
except (UnicodeDecodeError, UnicodeEncodeError) as exception:
logging.error(u'Unable to decode string: {0:s} with error: {1:s}'.format(
HexifyBuffer(utf16_stream), exception))
return utf16_stream.decode(u'utf-16-le', errors=u'ignore')
def ArrayOfUTF16StreamCopyToString(byte_stream, byte_stream_size=None):
"""Copies an array of UTF-16 formatted byte streams to an array of strings.
The UTF-16 formatted byte stream should be terminated by an end-of-string
character (\x00\x00). Otherwise the function reads up to the byte stream size.
Args:
byte_stream: The UTF-16 formatted byte stream.
byte_stream_size: The byte stream size or None if the entire byte stream
should be used.
Returns:
An array of Unicode strings.
"""
array_of_strings = []
utf16_stream_start = 0
byte_stream_index = 0
if not byte_stream_size:
byte_stream_size = len(byte_stream)
while byte_stream_index + 1 < byte_stream_size:
if (byte_stream[byte_stream_index] == b'\x00' and
byte_stream[byte_stream_index + 1] == b'\x00'):
if byte_stream_index - utf16_stream_start <= 2:
break
array_of_strings.append(
byte_stream[utf16_stream_start:byte_stream_index].decode(
u'utf-16-le'))
utf16_stream_start = byte_stream_index + 2
byte_stream_index += 2
return array_of_strings
def ArrayOfUTF16StreamCopyToStringTable(byte_stream, byte_stream_size=None):
"""Copies an array of UTF-16 formatted byte streams to a string table.
The string table is a dict of strings with the byte offset as their key.
The UTF-16 formatted byte stream should be terminated by an end-of-string
character (\x00\x00). Otherwise the function reads up to the byte stream size.
Args:
byte_stream: The UTF-16 formatted byte stream.
byte_stream_size: The byte stream size or None if the entire byte stream
should be used.
Returns:
A dict of Unicode strings with the byte offset as their key.
"""
string_table = {}
utf16_stream_start = 0
byte_stream_index = 0
if not byte_stream_size:
byte_stream_size = len(byte_stream)
while byte_stream_index + 1 < byte_stream_size:
if (byte_stream[byte_stream_index] == b'\x00' and
byte_stream[byte_stream_index + 1] == b'\x00'):
if byte_stream_index - utf16_stream_start <= 2:
break
string = byte_stream[utf16_stream_start:byte_stream_index].decode(
u'utf-16-le')
string_table[utf16_stream_start] = string
utf16_stream_start = byte_stream_index + 2
byte_stream_index += 2
return string_table
def ReadUTF16(string_buffer):
"""Returns a decoded UTF-16 string from a string buffer."""
if isinstance(string_buffer, (list, tuple)):
use_buffer = u''.join(string_buffer)
else:
use_buffer = string_buffer
if not isinstance(use_buffer, py2to3.STRING_TYPES):
return u''
try:
return use_buffer.decode(u'utf-16').replace(u'\x00', u'')
except SyntaxError as exception:
logging.error(u'Unable to decode string: {0:s} with error: {1:s}.'.format(
HexifyBuffer(string_buffer), exception))
except (UnicodeDecodeError, UnicodeEncodeError) as exception:
logging.error(u'Unable to decode string: {0:s} with error: {1:s}'.format(
HexifyBuffer(string_buffer), exception))
return use_buffer.decode(u'utf-16', errors=u'ignore').replace(u'\x00', u'')
def HexifyBuffer(string_buffer):
"""Return a string with the hex representation of a string buffer."""
chars = []
for char in string_buffer:
chars.append(binascii.hexlify(char))
return u'\\x{0:s}'.format(u'\\x'.join(chars))
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for XLA JIT compiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
def nhwc_to_format(x, data_format):
"""Converts a numpy array from NHWC format to `data_format`."""
rank = len(x.shape)
if data_format == "NCHW":
return np.transpose(x, [0, rank - 1] + list(range(1, rank - 1)))
elif data_format == "NHWC":
return x
else:
raise ValueError("Unknown format {}".format(data_format))
class UnaryOpsTest(xla_test.XLATestCase):
"""Test cases for unary operators."""
def _assertOpOutputMatchesExpected(self,
op,
inp,
expected,
equality_test=None,
rtol=1e-3,
atol=1e-5):
"""Verifies that 'op' produces 'expected' when fed input 'inp' .
Args:
op: operator to test
inp: numpy input array to use as input to 'op'.
expected: numpy array representing the expected output of 'op'.
equality_test: either None, or a function that tests two numpy arrays for
equality. If None, self.assertAllClose is used.
rtol: relative tolerance for equality test.
atol: absolute tolerance for equality test.
"""
with self.session() as session:
with self.test_scope():
pinp = array_ops.placeholder(
dtypes.as_dtype(inp.dtype), inp.shape, name="a")
output = op(pinp)
result = session.run(output, {pinp: inp})
if equality_test is None:
self.assertEqual(output.dtype, expected.dtype)
self.assertAllCloseAccordingToType(
expected, result, rtol=rtol, atol=atol, bfloat16_rtol=0.03)
else:
equality_test(result, expected, rtol=rtol, atol=atol)
def ListsAreClose(self, result, expected, rtol, atol):
"""Tests closeness of two lists of floats."""
self.assertEqual(len(result), len(expected))
for i in xrange(len(result)):
self.assertAllClose(result[i], expected[i], rtol, atol)
@test_util.disable_mlir_bridge(
"MlirHloBuilder::Iota missing required for xla::Diag")
def testAllTypeOps(self):
for dtype in self.numeric_types - {np.int8, np.uint8}:
self._assertOpOutputMatchesExpected(
array_ops.diag, np.array([1, 2, 3, 4], dtype=dtype),
np.array(
[[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.diag_part,
np.arange(36).reshape([2, 3, 2, 3]).astype(dtype),
np.array([[0, 7, 14], [21, 28, 35]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.diag, np.array([[1, 2], [3, 4]], dtype=dtype),
np.array(
[[[[1, 0], [0, 0]], [[0, 2], [0, 0]]], [[[0, 0], [3, 0]],
[[0, 0], [0, 4]]]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.identity,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-1, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.prevent_gradient,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-1, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[[[]]]]], dtype=dtype),
expected=np.array([], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[1], [2]]], dtype=dtype),
expected=np.array([1, 2], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[1]], [[2]]], dtype=dtype),
expected=np.array([1, 2], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
expected=np.array([[1, 2], [3, 4]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.stop_gradient,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-1, 1]], dtype=dtype))
def testLog(self):
for dtype in self.float_types - {dtypes.bfloat16.as_numpy_dtype}:
tol = 1e-4 if dtype == np.float32 else 1e-9
x = np.linspace(-np.e, np.e, num=1000, dtype=dtype)
self._assertOpOutputMatchesExpected(
math_ops.log, x, expected=np.log(x), atol=tol, rtol=tol)
x = np.linspace(0., np.e * 1e-30, num=1000, dtype=dtype)
self._assertOpOutputMatchesExpected(
math_ops.log, x, expected=np.log(x), atol=tol, rtol=tol)
x = np.linspace(0., np.pi * 1e30, num=1000, dtype=dtype)
self._assertOpOutputMatchesExpected(
math_ops.log, x, expected=np.log(x), atol=tol, rtol=tol)
def testSin(self):
for dtype in self.float_types - {dtypes.bfloat16.as_numpy_dtype}:
tol = 1e-6 if dtype == np.float32 else 1e-12
x = np.linspace(-4 * np.e, 4 * np.e, num=1000, dtype=dtype)
self._assertOpOutputMatchesExpected(
math_ops.sin, x, expected=np.sin(x), rtol=tol, atol=tol)
x = np.linspace(0., np.e * 1e-30, num=1000, dtype=dtype)
self._assertOpOutputMatchesExpected(
math_ops.sin, x, expected=np.sin(x), rtol=tol, atol=tol)
if dtype == np.float64:
x = np.linspace(0., np.e * 1e8, num=1000, dtype=dtype)
self._assertOpOutputMatchesExpected(
math_ops.sin, x, expected=np.sin(x), rtol=tol, atol=1e-5)
def testCos(self):
for dtype in self.float_types - {dtypes.bfloat16.as_numpy_dtype}:
tol = 1e-6 if dtype == np.float32 else 1e-12
x = np.linspace(-4 * np.e, 4 * np.e, num=1000, dtype=dtype)
self._assertOpOutputMatchesExpected(
math_ops.cos, x, expected=np.cos(x), rtol=tol, atol=tol)
x = np.linspace(0., np.e * 1e-30, num=1000, dtype=dtype)
self._assertOpOutputMatchesExpected(
math_ops.cos, x, expected=np.cos(x), rtol=tol, atol=tol)
if dtype == np.float64:
x = np.linspace(0., np.e * 1e8, num=1000, dtype=dtype)
self._assertOpOutputMatchesExpected(
math_ops.cos, x, expected=np.cos(x), rtol=tol, atol=1e-5)
@test_util.disable_mlir_bridge(
"TODO(b/153812660): Handle tf.Softmax compilation")
def testFloatOps(self):
for dtype in self.float_types:
x = np.arange(-0.90, 0.90, 0.25)
self._assertOpOutputMatchesExpected(
math_ops.acos, x.astype(dtype), expected=np.arccos(x).astype(dtype))
self._assertOpOutputMatchesExpected(
math_ops.asin, x.astype(dtype), expected=np.arcsin(x).astype(dtype))
x = np.arange(-3, 3).reshape(1, 3, 2)
self._assertOpOutputMatchesExpected(
math_ops.atan, x.astype(dtype), expected=np.arctan(x).astype(dtype))
self._assertOpOutputMatchesExpected(
math_ops.acosh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[0, 1.3169579, 1.76274717, 2.06343707], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.asinh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[0.88137359, 1.44363548, 1.81844646, 2.09471255], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.atanh,
np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype),
expected=np.array(
[0.10033535, 0.20273255, 0.3095196, 0.42364893], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.ceil,
np.array([[-1.7, 1.2]], dtype=dtype),
expected=np.array([[-1, 2]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.cosh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[1.54308063, 3.76219569, 10.067662, 27.30823284], dtype=dtype))
# Disable float16 testing for now
if dtype != np.float16:
x = np.arange(-10, 10, 1).astype(dtype)
with self.session() as session:
erf_x = session.run(math_ops.erf(x))
erfc_x = session.run(math_ops.erfc(x))
self._assertOpOutputMatchesExpected(math_ops.erf, x, expected=erf_x)
self._assertOpOutputMatchesExpected(math_ops.erfc, x, expected=erfc_x)
self._assertOpOutputMatchesExpected(
math_ops.exp,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[0.36787945, 2.7182817]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.expm1,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-0.63212056, 1.71828183]], dtype=dtype),
rtol=1e-5)
self._assertOpOutputMatchesExpected(
math_ops.floor,
np.array([[-1.7, 1.2]], dtype=dtype),
expected=np.array([[-2, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.is_finite,
np.array(
[[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]], dtype=dtype),
expected=np.array([[0, 1, 1, 1, 1, 1, 1, 0, 0]], dtype=np.bool))
# Tests for tf.nn ops.
self._assertOpOutputMatchesExpected(
nn_ops.l2_loss, np.array([[[]]], dtype=dtype), expected=dtype(0))
self._assertOpOutputMatchesExpected(nn_ops.l2_loss, dtype(4), dtype(8))
self._assertOpOutputMatchesExpected(
nn_ops.l2_loss, np.array([[-2, 4]], dtype=dtype), expected=dtype(10))
self._assertOpOutputMatchesExpected(
math_ops.reciprocal,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[1, 0.5]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.log,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0, 0.69314718]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sin,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0.841478, 0.909302]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.cos,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0.540297, -0.41614]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.log1p,
np.array([[1e-14, 1e-15, 0.6]], dtype=dtype),
expected=np.log1p(np.array([[1e-14, 1e-15, 0.6]],
dtype=dtype)).astype(dtype),
rtol=1e-4,
atol=1e-6)
self._assertOpOutputMatchesExpected(
math_ops.rint,
np.array(
[[-1.7, 1.2, 4.0, 0.0], [-3.5, -2.5, -1.5, -0.5],
[0.5, 1.5, 2.5, 3.5]],
dtype=dtype),
expected=np.array(
[[-2, 1, 4, 0], [-4, -2, -2, 0], [0, 2, 2, 4]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.round,
np.array(
[[-1.7, 1.2, 4.0, 0.0], [-3.5, -2.5, -1.5, -0.5],
[0.5, 1.5, 2.5, 3.5]],
dtype=dtype),
expected=np.array(
[[-2, 1, 4, 0], [-4, -2, -2, 0], [0, 2, 2, 4]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.rsqrt,
np.array([[4, 16]], dtype=dtype),
expected=np.array([[0.5, 0.25]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sigmoid,
np.array([[1, 1, 1, 1], [1, 2, 3, 4]], dtype=dtype),
expected=np.array(
[[0.7310586, 0.7310586, 0.7310586, 0.7310586],
[0.7310586, 0.880797, 0.95257413, 0.98201376]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sigmoid,
np.array([-300, -150, 0, 150, 300], dtype=dtype),
expected=np.array([0, 0, 0.5, 1, 1], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sinh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[1.17520119, 3.62686041, 10.01787493, 27.2899172], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sqrt,
np.array([[4, 9]], dtype=dtype),
expected=np.array([[2, 3]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.tan,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[1.55740772, -2.18503986, -0.14254654, 1.15782128], dtype=dtype))
# TODO(b/130689556): Turn this on for CPU when we start honoring NaNs.
if self.device != "XLA_CPU":
self._assertOpOutputMatchesExpected(
math_ops.tanh,
np.array([[1, 2, 3, 4], [np.inf, -np.inf, np.nan, 20],
[19, -19, 22, -22]],
dtype=dtype),
expected=np.array(
[[0.76159418, 0.96402758, 0.99505478, 0.99932933],
[1.0, -1.0, np.nan, 1.0], [1.0, -1.0, 1.0, -1.0]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.log_softmax,
np.array([[1, 1, 1, 1], [1, 2, 3, 4]], dtype=dtype),
expected=np.array(
[[-1.3862944, -1.3862944, -1.3862944, -1.3862944],
[-3.4401896, -2.4401896, -1.4401897, -0.44018969]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.elu,
np.array([[-1, 0, 1, -1e-6]], dtype=dtype),
expected=np.array([[-0.63212056, 0, 1, -9.999995e-07]], dtype=dtype),
rtol=1e-5,
atol=1e-6)
self._assertOpOutputMatchesExpected(
nn_ops.selu,
np.array([[-1, 0, 1, -1e-5]], dtype=dtype),
expected=np.array(
[[-1.11133074, 0., 1.05070099, -1.758090550379974e-05]],
dtype=dtype),
rtol=1e-5,
atol=1e-6)
self._assertOpOutputMatchesExpected(
nn_ops.relu,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[0, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.relu6,
np.array([[-0.05, 6.05, 5]], dtype=dtype),
expected=np.array([[0, 6, 5]], dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.leaky_relu,
np.array([[-2, -1, 0, 1, 2]], dtype=dtype),
expected=np.array([[-0.4, -0.2, 0.0, 1.0, 2.0]], dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softmax,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array([0.032058604, 0.087144323, 0.23688284, 0.64391428],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softmax,
np.array([[1, 1, 1, 1], [1, 2, 3, 4]], dtype=dtype),
expected=np.array(
[[0.25, 0.25, 0.25, 0.25],
[0.032058604, 0.087144323, 0.23688284, 0.64391428]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softmax,
np.array([[[1, 1], [1, 1]], [[1, 2], [3, 4]]], dtype=dtype),
expected=np.array(
[[[0.5, 0.5], [0.5, 0.5]],
[[0.26894142, 0.73105858], [0.26894142, 0.73105858]]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softsign,
np.array([[-2, -1, 0, 1, 2]], dtype=dtype),
expected=np.array(
[[-0.66666669, -0.5, 0, 0.5, 0.66666669]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sign,
np.array([[-2.0, -1.0, -0.0, +0.0, 1.0, 2.0]], dtype=dtype),
expected=np.array([[-1.0, -1.0, -0.0, +0.0, 1.0, 1.0]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.is_finite,
np.array(
[[42, float("inf"), -123], [float("nan"), 0, -0.0]], dtype=dtype),
expected=np.array(
[[True, False, True], [False, True, True]], dtype=np.bool))
self._assertOpOutputMatchesExpected(
math_ops.lgamma,
np.array(0.5, dtype=dtype),
expected=np.array(np.log(np.pi) / 2, dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.lgamma,
np.array(
[[1, 2, 3], [4, 5, 6], [1 / 2, 3 / 2, 5 / 2],
[-3 / 2, -7 / 2, -11 / 2]],
dtype=dtype),
expected=np.array(
[
[0, 0, np.log(2.0)],
[np.log(6.0), np.log(24.0),
np.log(120)],
[
np.log(np.pi) / 2,
np.log(np.pi) / 2 - np.log(2),
np.log(np.pi) / 2 - np.log(4) + np.log(3)
],
[
np.log(np.pi) / 2 - np.log(3) + np.log(4),
np.log(np.pi) / 2 - np.log(105) + np.log(16),
np.log(np.pi) / 2 - np.log(10395) + np.log(64),
],
],
dtype=dtype))
# The actual result is complex. Take the real part.
self._assertOpOutputMatchesExpected(
math_ops.lgamma,
np.array([-1 / 2, -5 / 2, -9 / 2], dtype=dtype),
expected=np.array(
[
np.log(np.pi) / 2 + np.log(2),
np.log(np.pi) / 2 - np.log(15) + np.log(8),
np.log(np.pi) / 2 - np.log(945) + np.log(32),
],
dtype=dtype),
atol=1e-4)
self._assertOpOutputMatchesExpected(
math_ops.digamma,
np.array(
[[1.0, 0.5, 1 / 3.0], [0.25, 1 / 6.0, 0.125], [2.0, 3.0, 4.0],
[6.0, 8.0, 9.0]],
dtype=dtype),
expected=np.array(
[
[
-np.euler_gamma, -2 * np.log(2) - np.euler_gamma,
-np.pi / 2 / np.sqrt(3) - 3 * np.log(3) / 2 -
np.euler_gamma
],
[
-np.pi / 2 - 3 * np.log(2) - np.euler_gamma,
-np.pi * np.sqrt(3) / 2 - 2 * np.log(2) -
3 * np.log(3) / 2 - np.euler_gamma,
-np.pi / 2 - 4 * np.log(2) -
(np.pi + np.log(2 + np.sqrt(2)) - np.log(2 - np.sqrt(2)))
/ np.sqrt(2) - np.euler_gamma
],
[
1 - np.euler_gamma, 1.5 - np.euler_gamma,
11 / 6.0 - np.euler_gamma
],
[
137 / 60.0 - np.euler_gamma, 363 / 140.0 - np.euler_gamma,
761 / 280.0 - np.euler_gamma
],
],
dtype=dtype))
def quantize_and_dequantize_v2(x):
return array_ops.quantize_and_dequantize_v2(
x, -127, 127, signed_input=True, num_bits=8)
self._assertOpOutputMatchesExpected(
quantize_and_dequantize_v2,
np.array([-1, -0.5, 0, 0.3], dtype=dtype),
expected=np.array([-1., -0.5, 0., 0.296875], dtype=dtype))
def quantize_and_dequantize_v2_round_half_up(x):
return array_ops.quantize_and_dequantize_v2(
x,
-1,
1.0,
signed_input=True,
num_bits=8,
range_given=True,
round_mode="HALF_UP")
self._assertOpOutputMatchesExpected(
quantize_and_dequantize_v2_round_half_up,
np.array([-0.8, -0.5, 0, 0.3, 0.8, -2, 33], dtype=dtype),
expected=np.array([
-102.0 / 127,
-63.0 / 127,
0,
38.0 / 127,
102.0 / 127,
-128.0 / 127,
1,
],
dtype=dtype))
def quantize_and_dequantize_v2_round_half_to_even(x):
return array_ops.quantize_and_dequantize_v2(
x,
-1.0,
1.0,
signed_input=True,
num_bits=8,
range_given=True,
round_mode="HALF_TO_EVEN")
self._assertOpOutputMatchesExpected(
quantize_and_dequantize_v2_round_half_to_even,
np.array(
[
-0.8,
# The -0.5 should become -63.5 after scaling and with
# rounding this should become -64. But with the test
# unary_ops_test_cpu_ondemand, this fails as the result
# before scaling becomes -63.499996 and gets rounded to -63.
# TODO(sreenik): Some one more familiar with this test needs
# to take a look and resolve this. This works on all other
# variations of the platform like cpu, and gpu.
# -0.5,
0,
0.3,
0.8,
-2,
33
],
dtype=dtype),
expected=np.array(
[
-102.0 / 127,
# -64.0 / 127,
0,
38.0 / 127,
102.0 / 127,
-128.0 / 127,
1,
],
dtype=dtype))
def quantize_and_dequantize_v3(x):
return array_ops.quantize_and_dequantize_v3(
x, -127, 127, num_bits=8, signed_input=True, range_given=False)
self._assertOpOutputMatchesExpected(
quantize_and_dequantize_v3,
np.array([-1, -0.5, 0, 0.3], dtype=dtype),
expected=np.array([-1., -0.5, 0., 0.296875], dtype=dtype))
@test_util.disable_mlir_bridge(
"Complex types not supported in CreateDenseElementsAttrFromLiteral")
def testComplexOps(self):
for dtype in self.complex_types:
self._assertOpOutputMatchesExpected(
math_ops.acosh,
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype),
expected=np.arccosh(
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.asinh,
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype),
expected=np.arcsinh(
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.atanh,
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype),
expected=np.arctanh(
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.cosh,
np.array([1j, 2 - 3j, 3, 4 + 2j], dtype=dtype),
expected=np.cosh(np.array([1j, 2 - 3j, 3, 4 + 2j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.sinh,
np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype),
expected=np.sinh(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.exp,
np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype),
expected=np.exp(np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.expm1,
np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype),
expected=np.expm1(np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype)),
rtol=1e-6,
atol=1e-6)
# For real part close to zero, or imaginary part close to a multiple of
# pi.
self._assertOpOutputMatchesExpected(
math_ops.expm1,
np.array([[1e-11 + 1j, -1e-11 - 1j, 1. + 1e-11j,
-1. - 1e-11j, 1e-13j + 1e-13j]], dtype=dtype),
# TODO(srvasude): Use numpy as the source of truth after we depend on
# latest numpy with this pull request:
# https://github.com/numpy/numpy/pull/15110.
# The numbers below were generated by scipy.special.expm1.
expected=np.array([[
-4.59697694e-01+8.41470985e-01j,
-4.59697694e-01-8.41470985e-01j,
1.71828183e+00+2.71828183e-11j,
-6.32120559e-01-3.67879441e-12j,
-2.00000000e-26+2.00000000e-13j]], dtype=dtype),
rtol=1e-09,
atol=1e-20)
self._assertOpOutputMatchesExpected(
math_ops.reciprocal,
np.array([[1, 2j, 2 + 3j]], dtype=dtype),
expected=1.0 / np.array([[1, 2j, 2 + 3j]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.log,
np.array([[5j, 3 - 2j]], dtype=dtype),
expected=np.log(np.array([[5j, 3 - 2j]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.sin,
np.array([[5j, 3 - 2j]], dtype=dtype),
expected=np.sin(np.array([[5j, 3 - 2j]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.cos,
np.array([[5j, 3 - 2j]], dtype=dtype),
expected=np.cos(np.array([[5j, 3 - 2j]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.log1p,
np.array([[1e-14, 1e-15j, 0.6 - 0.3j]], dtype=dtype),
expected=np.log1p(
np.array([[1e-14, 1e-15j, 0.6 - 0.3j]], dtype=dtype)),
rtol=1e-4,
atol=1e-6)
val = np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)
self._assertOpOutputMatchesExpected(
math_ops.rsqrt, val, expected=1 / np.sqrt(val))
self._assertOpOutputMatchesExpected(
math_ops.sigmoid, val, expected=1 / (1 + np.exp(-val)))
self._assertOpOutputMatchesExpected(
math_ops.sqrt, val, expected=np.sqrt(val))
self._assertOpOutputMatchesExpected(
math_ops.tanh,
np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype),
expected=np.tanh(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.tan,
np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype),
expected=np.tan(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)))
ctypes = {np.complex64: np.float32, np.complex128: np.float64}
self._assertOpOutputMatchesExpected(
math_ops.abs,
np.array([[3 - 4j, -1j, np.inf]], dtype=dtype),
expected=np.array([[5, 1, np.inf]], dtype=ctypes[dtype]))
self._assertOpOutputMatchesExpected(
math_ops.negative,
np.array([[-1 + 2j, -3j]], dtype=dtype),
expected=np.array([[1 - 2j, 3j]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.square,
np.array([[-2 - 3j, 3 + 4j, 5j]], dtype=dtype),
expected=np.array([[-2 - 3j, 3 + 4j, 5j]], dtype=dtype)**2)
self._assertOpOutputMatchesExpected(
array_ops.zeros_like,
np.array([[4j, 3 - 2j], [2, -1j]], dtype=dtype),
expected=np.array([[0, 0], [0, 0]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.ones_like,
np.array([[-4j, 3 + 2j], [2, -1j]], dtype=dtype),
expected=np.array([[1, 1], [1, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.angle,
np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
expected=np.angle(np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.conj,
np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
expected=np.array([1 - 3j, -4 - 7j, 2.7, 3j], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.imag,
np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
expected=np.array([3, 7, 0, -3], dtype=ctypes[dtype]))
self._assertOpOutputMatchesExpected(
math_ops.real,
np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
expected=np.array([1, -4, 2.7, 0], dtype=ctypes[dtype]))
@test_util.disable_mlir_bridge("TODO(b/153896312): Handle unsigned ints")
def testIntOps(self):
for dtype in self.int_types:
self._assertOpOutputMatchesExpected(
bitwise_ops.invert,
np.array([0, -1, 1, 16, 42], dtype=dtype),
expected=np.array([-1, 0, -2, -17, -43], dtype=dtype))
def testNumericOps(self):
for dtype in self.numeric_types - {np.int8, np.uint8}:
self._assertOpOutputMatchesExpected(
math_ops.abs,
np.array([[2, -1]], dtype=dtype),
expected=np.array([[2, 1]], dtype=np.real(dtype(0)).dtype))
self._assertOpOutputMatchesExpected(
math_ops.negative,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[1, -1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.square,
np.array([[-2, 3]], dtype=dtype),
expected=np.array([[4, 9]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.zeros_like,
np.array([[4, 3], [2, 1]], dtype=dtype),
expected=np.array([[0, 0], [0, 0]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.ones_like,
np.array([[4, 3], [2, 1]], dtype=dtype),
expected=np.array([[1, 1], [1, 1]], dtype=dtype))
# TODO(phawkins): these tests fail unless fastmath optimizations
# are disabled. Use more robust IsInf/IsNaN detection and enable these
# tests.
@unittest.skip("test case fails in fast-math mode")
def testIsInfAndIsNan(self):
for dtype in self.float_types:
self._assertOpOutputMatchesExpected(
math_ops.is_inf,
np.array(
[[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]], dtype=dtype),
expected=np.array([[1, 0, 0, 0, 0, 0, 0, 1, 0]], dtype=np.bool))
self._assertOpOutputMatchesExpected(
math_ops.is_nan,
np.array(
[[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]], dtype=dtype),
expected=np.array([[0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype=np.bool))
self._assertOpOutputMatchesExpected(
math_ops.sign,
np.array([[np.nan]], dtype=dtype),
expected=np.array([[0.0]], dtype=dtype))
def testLogicalOps(self):
self._assertOpOutputMatchesExpected(
math_ops.logical_not,
np.array([[True, False], [False, True]], dtype=np.bool),
expected=np.array([[False, True], [True, False]], dtype=np.bool))
def testBiasAddGrad(self):
self._assertOpOutputMatchesExpected(
gen_nn_ops.bias_add_grad,
np.array([[1., 2.], [3., 4.]], dtype=np.float32),
expected=np.array([4., 6.], dtype=np.float32))
self._assertOpOutputMatchesExpected(
lambda x: gen_nn_ops.bias_add_grad(x, data_format="NCHW"),
np.array(
[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]], dtype=np.float32),
expected=np.array([14., 22.], dtype=np.float32))
@test_util.disable_mlir_bridge("TODO(b/153812660): Handle tf.Cast compilation"
)
def testCast(self):
shapes = [[], [4], [2, 3], [2, 0, 4]]
types = {
dtypes.bool, dtypes.float32, dtypes.float64, dtypes.complex64,
dtypes.int32, dtypes.int64, dtypes.uint32, dtypes.uint64
}
for src_type in types:
for dst_type in types:
src_np_dtype = src_type.as_numpy_dtype
dst_np_dtype = dst_type.as_numpy_dtype
for shape in shapes:
src = np.arange(np.prod(shape)).astype(src_np_dtype)
if src_type in self.complex_tf_types:
src += (np.arange(np.prod(shape)) * 2j).astype(src_np_dtype)
src = src.reshape(shape)
dst = src.astype(dst_np_dtype)
self._assertOpOutputMatchesExpected(
lambda x, dst_type=dst_type: math_ops.cast(x, dst_type),
src,
expected=dst)
# Check special values.
if src_type.is_integer:
imin = np.iinfo(src_np_dtype).min
imax = np.iinfo(src_np_dtype).max
src = np.array([imin, imax, 0, 1, -1], dtype=src_np_dtype)
elif src_type in self.float_tf_types:
if dst_type.is_integer:
imin = np.iinfo(dst_np_dtype).min
imax = np.iinfo(dst_np_dtype).max // 2
src = np.array([imin, imax, 0, 1], dtype=src_np_dtype)
elif dst_type in self.float_tf_types:
fmin = np.finfo(dst_np_dtype).min
fmax = np.finfo(dst_np_dtype).max
tiny = np.finfo(dst_np_dtype).tiny
eps = np.finfo(dst_np_dtype).eps
src = np.array(
[fmin, fmax, np.nan, eps, -eps, tiny, -tiny, np.inf, -np.inf],
dtype=src_np_dtype)
dst = src.astype(dst_np_dtype)
self._assertOpOutputMatchesExpected(
lambda x, dst_type=dst_type: math_ops.cast(x, dst_type),
src,
expected=dst)
@test_util.disable_mlir_bridge(
"TODO(b/153812660): Handle tf.Bitcast compilation")
def testBitcast(self):
self._assertOpOutputMatchesExpected(
lambda x: array_ops.bitcast(x, dtypes.int32),
np.array([1, 0x3f800000], np.int32),
expected=np.array([1, 0x3f800000], np.int32))
self._assertOpOutputMatchesExpected(
lambda x: array_ops.bitcast(x, dtypes.float32),
np.array([1, 0x3f800000], np.int32),
expected=np.array([1e-45, 1.0], np.float32))
self._assertOpOutputMatchesExpected(
lambda x: array_ops.bitcast(x, dtypes.int32),
np.array([1e-45, 1.0], np.float32),
expected=np.array([1, 0x3f800000], np.int32))
if np.int64 in self.numeric_types:
self._assertOpOutputMatchesExpected(
lambda x: array_ops.bitcast(x, dtypes.int64),
np.array([1, 0x100000003f800000], np.uint64),
expected=np.array([1, 0x100000003f800000], np.int64))
self._assertOpOutputMatchesExpected(
lambda x: array_ops.bitcast(x, dtypes.uint64),
np.array([1, 0x100000003f800000], np.int64),
expected=np.array([1, 0x100000003f800000], np.uint64))
@test_util.disable_mlir_bridge(
"TODO(b/153812660): Handle tf.InvertPermutation compilation")
def testInvertPermutation(self):
self._assertOpOutputMatchesExpected(
array_ops.invert_permutation,
np.array([1, 2, 0], np.int32),
expected=np.array([2, 0, 1], dtype=np.int32))
@test_util.disable_mlir_bridge(
"TODO(b/153812660): Handle tf.InvertPermutation compilation")
def testInvertPermutationTwiceIsNoop(self):
self._assertOpOutputMatchesExpected(
lambda x: array_ops.invert_permutation(array_ops.invert_permutation(x)),
np.array([1, 2, 0], np.int32),
expected=np.array([1, 2, 0], dtype=np.int32))
def testRank(self):
rank_op = lambda x: array_ops.rank_internal(x, optimize=False)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
rank_op, dtype(7), expected=np.int32(0))
self._assertOpOutputMatchesExpected(
rank_op, np.array([[], []], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
rank_op, np.array([-1, 1], dtype=dtype), expected=np.int32(1))
self._assertOpOutputMatchesExpected(
rank_op, np.array([[-1, 1]], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
rank_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.int32(2))
def testShape(self):
shape_op = lambda x: array_ops.shape_internal(x, optimize=False)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
shape_op, dtype(7), expected=np.array([], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([[], []], dtype=dtype),
expected=np.array([2, 0], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([-1, 1], dtype=dtype),
expected=np.array([2], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([1, 2], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.array([3, 1], dtype=np.int32))
def testSize(self):
size_op = lambda x: array_ops.size_internal(x, optimize=False)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
size_op, dtype(7), expected=np.int32(1))
self._assertOpOutputMatchesExpected(
size_op, np.array([[], []], dtype=dtype), expected=np.int32(0))
self._assertOpOutputMatchesExpected(
size_op, np.array([-1, 1], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
size_op, np.array([[-1, 1]], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
size_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.int32(3))
def testSizeWithInt64OutType(self):
def size_op(x):
return array_ops.size_internal(x, optimize=False, out_type=np.int64)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
size_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.int64(3))
def testUnpack(self):
self._assertOpOutputMatchesExpected(
array_ops.unstack,
np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=np.float32),
expected=[
np.array([1., 2.], dtype=np.float32),
np.array([3., 4.], dtype=np.float32),
np.array([5., 6.], dtype=np.float32),
],
equality_test=self.ListsAreClose)
self._assertOpOutputMatchesExpected(
lambda x: array_ops.unstack(x, axis=1),
np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=np.float32),
expected=[
np.array([1., 3., 5.], dtype=np.float32),
np.array([2., 4., 6.], dtype=np.float32),
],
equality_test=self.ListsAreClose)
@test_util.disable_mlir_bridge(
"TODO(b/153812660): Handle tf.DepthToSpace compilation")
def testDepthToSpace(self):
def make_op(data_format):
def op(x):
return array_ops.depth_to_space(
x, block_size=2, data_format=data_format)
return op
for dtype in self.numeric_types:
for data_format in ["NCHW", "NHWC"]:
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array([[[[1, 2, 3, 4]]]], dtype=dtype), data_format),
expected=nhwc_to_format(
np.array([[[[1], [2]], [[3], [4]]]], dtype=dtype), data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array(
[[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]], dtype=dtype),
data_format),
expected=nhwc_to_format(
np.array(
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]],
dtype=dtype), data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array(
[[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12],
[13, 14, 15, 16]]]],
dtype=dtype), data_format),
expected=nhwc_to_format(
np.array(
[[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]],
dtype=dtype), data_format))
self._assertOpOutputMatchesExpected(
make_op("NCHW_VECT_C"),
np.arange(32, dtype=dtype).reshape((1, 8, 1, 1, 4)),
expected=np.array([[[[[0, 1], [8, 9]], [[16, 17], [24, 25]]],
[[[2, 3], [10, 11]], [[18, 19], [26, 27]]],
[[[4, 5], [12, 13]], [[20, 21], [28, 29]]],
[[[6, 7], [14, 15]], [[22, 23], [30, 31]]]]],
dtype=dtype))
@test_util.disable_mlir_bridge(
"TODO(b/153812660): Handle tf.SpaceToDepth compilation")
def testSpaceToDepth(self):
def make_op(data_format):
def op(x):
return array_ops.space_to_depth(
x, block_size=2, data_format=data_format)
return op
for dtype in self.numeric_types:
for data_format in ["NCHW", "NHWC"]:
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array([[[[1], [2]], [[3], [4]]]], dtype=dtype), data_format),
expected=nhwc_to_format(
np.array([[[[1, 2, 3, 4]]]], dtype=dtype), data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array(
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]],
dtype=dtype), data_format),
expected=nhwc_to_format(
np.array(
[[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]], dtype=dtype),
data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array(
[[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]],
dtype=dtype), data_format),
expected=nhwc_to_format(
np.array(
[[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12],
[13, 14, 15, 16]]]],
dtype=dtype), data_format))
self._assertOpOutputMatchesExpected(
make_op("NCHW_VECT_C"),
np.arange(32, dtype=dtype).reshape((1, 2, 2, 2, 4)),
expected=np.array([[[[[0, 1, 2, 3, 16, 17, 18, 19]]],
[[[4, 5, 6, 7, 20, 21, 22, 23]]],
[[[8, 9, 10, 11, 24, 25, 26, 27]]],
[[[12, 13, 14, 15, 28, 29, 30, 31]]]]],
dtype=dtype))
def _assertSoftplusMatchesExpected(self, features, dtype):
features = np.array(features, dtype=dtype)
zero = np.asarray(0).astype(dtype)
expected = np.logaddexp(zero, features).astype(dtype)
self._assertOpOutputMatchesExpected(
nn_ops.softplus, features, expected=expected, rtol=1e-6, atol=9.1e-6)
@test_util.disable_mlir_bridge(
"bf16 type not supported in CreateDenseElementsAttrFromLiteral")
def testSoftplus(self):
for dtype in self.float_types:
self._assertSoftplusMatchesExpected([[-2, 0, 8]], dtype)
self._assertSoftplusMatchesExpected(
[[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]], dtype)
if dtype == dtypes.bfloat16.as_numpy_dtype:
log_eps = np.log(np.finfo(np.float32).eps)
else:
log_eps = np.log(np.finfo(dtype).eps)
one = dtype(1)
ten = dtype(10)
self._assertSoftplusMatchesExpected([
log_eps, log_eps - one, log_eps + one, log_eps - ten, log_eps + ten,
-log_eps, -log_eps - one, -log_eps + one, -log_eps - ten,
-log_eps + ten
], dtype)
if __name__ == "__main__":
googletest.main()
|
|
import json
import os
import tempfile
from django.conf import settings
from django.core.cache import cache
from django.core.files.storage import default_storage as storage
from django.db.models import Q
from django.test.utils import override_settings
import mock
from PIL import Image
from pyquery import PyQuery as pq
from olympia import amo
from olympia.amo.tests import TestCase
from olympia.amo.helpers import user_media_path
from olympia.amo.tests import (
addon_factory, formset, initial, req_factory_factory)
from olympia.amo.tests.test_helpers import get_image_path
from olympia.amo.urlresolvers import reverse
from olympia.addons.forms import AddonFormBasic
from olympia.addons.models import (
Addon, AddonCategory, AddonDependency, Category)
from olympia.bandwagon.models import (
Collection, CollectionAddon, FeaturedCollection)
from olympia.devhub.models import ActivityLog
from olympia.devhub.views import edit_theme
from olympia.tags.models import Tag, AddonTag
from olympia.users.models import UserProfile
def get_section_url(addon, section, edit=False):
args = [addon.slug, section]
if edit:
args.append('edit')
return reverse('devhub.addons.section', args=args)
@override_settings(MEDIA_ROOT=None) # Make it overridable.
class BaseTestEdit(TestCase):
fixtures = ['base/users', 'base/addon_3615',
'base/addon_5579', 'base/addon_3615_categories']
listed = True
def setUp(self):
# Make new for each test.
settings.MEDIA_ROOT = tempfile.mkdtemp()
super(BaseTestEdit, self).setUp()
assert self.client.login(email='del@icio.us')
addon = self.get_addon()
if self.listed:
self.make_addon_listed(addon)
ac = AddonCategory.objects.filter(addon=addon, category__id=22)[0]
ac.feature = False
ac.save()
AddonCategory.objects.filter(addon=addon,
category__id__in=[1, 71]).delete()
cache.clear()
self.tags = ['tag3', 'tag2', 'tag1']
for t in self.tags:
Tag(tag_text=t).save_tag(addon)
else:
self.make_addon_unlisted(addon)
self.url = addon.get_dev_url()
self.user = UserProfile.objects.get(pk=55021)
self.addon = self.get_addon()
def get_addon(self):
return Addon.objects.no_cache().get(id=3615)
def get_url(self, section, edit=False):
return get_section_url(self.addon, section, edit)
def get_dict(self, **kw):
result = {'name': 'new name', 'slug': 'test_slug',
'summary': 'new summary'}
if self.listed:
fs = formset(self.cat_initial, initial_count=1)
result.update({'is_experimental': True,
'tags': ', '.join(self.tags)})
result.update(fs)
result.update(**kw)
return result
class BaseTestEditBasic(BaseTestEdit):
def setUp(self):
super(BaseTestEditBasic, self).setUp()
self.basic_edit_url = self.get_url('basic', edit=True)
if self.listed:
ctx = self.client.get(self.basic_edit_url).context
self.cat_initial = initial(ctx['cat_form'].initial_forms[0])
def test_redirect(self):
# /addon/:id => /addon/:id/edit
response = self.client.get(
'/en-US/developers/addon/3615/', follow=True)
self.assert3xx(response, self.url, 301)
def test_edit(self):
old_name = self.addon.name
data = self.get_dict()
response = self.client.post(self.basic_edit_url, data)
assert response.status_code == 200
addon = self.get_addon()
assert unicode(addon.name) == data['name']
assert addon.name.id == old_name.id
assert unicode(addon.slug) == data['slug']
assert unicode(addon.summary) == data['summary']
if self.listed:
assert [unicode(t) for t in addon.tags.all()] == sorted(self.tags)
def test_edit_check_description(self):
# Make sure bug 629779 doesn't return.
old_desc = self.addon.description
data = self.get_dict()
response = self.client.post(self.basic_edit_url, data)
assert response.status_code == 200
addon = self.get_addon()
assert addon.description == old_desc
def test_edit_slug_invalid(self):
old_edit = self.basic_edit_url
data = self.get_dict(name='', slug='invalid')
response = self.client.post(self.basic_edit_url, data)
doc = pq(response.content)
assert doc('form').attr('action') == old_edit
def test_edit_slug_valid(self):
old_edit = self.basic_edit_url
data = self.get_dict(slug='valid')
response = self.client.post(self.basic_edit_url, data)
doc = pq(response.content)
assert doc('form').attr('action') != old_edit
def test_edit_summary_escaping(self):
data = self.get_dict()
data['summary'] = '<b>oh my</b>'
response = self.client.post(self.basic_edit_url, data)
assert response.status_code == 200
# Fetch the page so the LinkifiedTranslation gets in cache.
response = self.client.get(
reverse('devhub.addons.edit', args=[data['slug']]))
assert pq(response.content)('[data-name=summary]').html().strip() == (
'<span lang="en-us"><b>oh my</b></span>')
# Now make sure we don't have escaped content in the rendered form.
form = AddonFormBasic(instance=self.get_addon(),
request=req_factory_factory('/'))
html = pq('<body>%s</body>' % form['summary'])('[lang="en-us"]').html()
assert html.strip() == '<b>oh my</b>'
def test_edit_as_developer(self):
self.login('regular@mozilla.com')
data = self.get_dict()
response = self.client.post(self.basic_edit_url, data)
# Make sure we get errors when they are just regular users.
assert response.status_code == 403 if self.listed else 404
devuser = UserProfile.objects.get(pk=999)
self.get_addon().addonuser_set.create(
user=devuser, role=amo.AUTHOR_ROLE_DEV)
response = self.client.post(self.basic_edit_url, data)
assert response.status_code == 200
addon = self.get_addon()
assert unicode(addon.name) == data['name']
assert unicode(addon.slug) == data['slug']
assert unicode(addon.summary) == data['summary']
if self.listed:
assert [unicode(t) for t in addon.tags.all()] == sorted(self.tags)
def test_edit_name_required(self):
data = self.get_dict(name='', slug='test_addon')
response = self.client.post(self.basic_edit_url, data)
assert response.status_code == 200
self.assertFormError(
response, 'form', 'name', 'This field is required.')
def test_edit_name_spaces(self):
data = self.get_dict(name=' ', slug='test_addon')
response = self.client.post(self.basic_edit_url, data)
assert response.status_code == 200
self.assertFormError(
response, 'form', 'name', 'This field is required.')
def test_edit_slugs_unique(self):
Addon.objects.get(id=5579).update(slug='test_slug')
data = self.get_dict()
response = self.client.post(self.basic_edit_url, data)
assert response.status_code == 200
self.assertFormError(
response, 'form', 'slug',
'This slug is already in use. Please choose another.')
def test_edit_name_not_empty(self):
data = self.get_dict(name='', slug=self.addon.slug,
summary=self.addon.summary)
response = self.client.post(self.basic_edit_url, data)
self.assertFormError(
response, 'form', 'name', 'This field is required.')
def test_edit_name_max_length(self):
data = self.get_dict(name='xx' * 70, slug=self.addon.slug,
summary=self.addon.summary)
response = self.client.post(self.basic_edit_url, data)
self.assertFormError(response, 'form', 'name',
'Ensure this value has at most 50 '
'characters (it has 140).')
def test_edit_summary_max_length(self):
data = self.get_dict(name=self.addon.name, slug=self.addon.slug,
summary='x' * 251)
response = self.client.post(self.basic_edit_url, data)
self.assertFormError(response, 'form', 'summary',
'Ensure this value has at most 250 '
'characters (it has 251).')
def test_nav_links(self):
if self.listed:
links = [
self.addon.get_dev_url('edit'), # Edit Information
self.addon.get_dev_url('owner'), # Manage Authors
self.addon.get_dev_url('profile'), # Manage Developer Profile
self.addon.get_dev_url('payments'), # Manage Payments
self.addon.get_dev_url('versions'), # Manage Status & Versions
self.addon.get_url_path(), # View Listing
reverse('devhub.feed', args=[self.addon.slug]), # View Recent
reverse('stats.overview', args=[self.addon.slug]), # Stats
reverse('compat.reporter_detail', args=[self.addon.guid]),
]
else:
links = [
self.addon.get_dev_url('edit'), # Edit Information
self.addon.get_dev_url('owner'), # Manage Authors
self.addon.get_dev_url('versions'), # Manage Status & Versions
reverse('devhub.feed', args=[self.addon.slug]), # View Recent
reverse('compat.reporter_detail', args=[self.addon.guid]),
]
response = self.client.get(self.url)
doc_links = [
unicode(a.attrib['href'])
for a in pq(response.content)('#edit-addon-nav').find('li a')]
assert links == doc_links
class TestEditBasicListed(BaseTestEditBasic):
def test_edit_add_tag(self):
count = ActivityLog.objects.all().count()
self.tags.insert(0, 'tag4')
data = self.get_dict()
response = self.client.post(self.basic_edit_url, data)
assert response.status_code == 200
result = pq(response.content)('#addon_tags_edit').eq(0).text()
assert result == ', '.join(sorted(self.tags))
html = ('<a href="/en-US/firefox/tag/tag4">tag4</a> added to '
'<a href="/en-US/firefox/addon/test_slug/">new name</a>.')
assert ActivityLog.objects.for_addons(self.addon).get(
action=amo.LOG.ADD_TAG.id).to_string() == html
assert ActivityLog.objects.filter(
action=amo.LOG.ADD_TAG.id).count() == count + 1
def test_edit_denied_tag(self):
Tag.objects.get_or_create(tag_text='blue', denied=True)
data = self.get_dict(tags='blue')
response = self.client.post(self.basic_edit_url, data)
assert response.status_code == 200
error = 'Invalid tag: blue'
self.assertFormError(response, 'form', 'tags', error)
def test_edit_denied_tags_2(self):
Tag.objects.get_or_create(tag_text='blue', denied=True)
Tag.objects.get_or_create(tag_text='darn', denied=True)
data = self.get_dict(tags='blue, darn, swearword')
response = self.client.post(self.basic_edit_url, data)
assert response.status_code == 200
error = 'Invalid tags: blue, darn'
self.assertFormError(response, 'form', 'tags', error)
def test_edit_denied_tags_3(self):
Tag.objects.get_or_create(tag_text='blue', denied=True)
Tag.objects.get_or_create(tag_text='darn', denied=True)
Tag.objects.get_or_create(tag_text='swearword', denied=True)
data = self.get_dict(tags='blue, darn, swearword')
response = self.client.post(self.basic_edit_url, data)
assert response.status_code == 200
error = 'Invalid tags: blue, darn, swearword'
self.assertFormError(response, 'form', 'tags', error)
def test_edit_remove_tag(self):
self.tags.remove('tag2')
count = ActivityLog.objects.all().count()
data = self.get_dict()
response = self.client.post(self.basic_edit_url, data)
assert response.status_code == 200
result = pq(response.content)('#addon_tags_edit').eq(0).text()
assert result == ', '.join(sorted(self.tags))
assert ActivityLog.objects.filter(
action=amo.LOG.REMOVE_TAG.id).count() == count + 1
def test_edit_minlength_tags(self):
tags = self.tags
tags.append('a' * (amo.MIN_TAG_LENGTH - 1))
data = self.get_dict()
response = self.client.post(self.basic_edit_url, data)
assert response.status_code == 200
self.assertFormError(response, 'form', 'tags',
'All tags must be at least %d characters.' %
amo.MIN_TAG_LENGTH)
def test_edit_max_tags(self):
tags = self.tags
for i in range(amo.MAX_TAGS + 1):
tags.append('test%d' % i)
data = self.get_dict()
response = self.client.post(self.basic_edit_url, data)
self.assertFormError(
response, 'form', 'tags',
'You have %d too many tags.' % (len(tags) - amo.MAX_TAGS))
def test_edit_tag_empty_after_slug(self):
start = Tag.objects.all().count()
data = self.get_dict(tags='>>')
self.client.post(self.basic_edit_url, data)
# Check that the tag did not get created.
assert start == Tag.objects.all().count()
def test_edit_tag_slugified(self):
data = self.get_dict(tags='<script>alert("foo")</script>')
self.client.post(self.basic_edit_url, data)
tag = Tag.objects.all().order_by('-pk')[0]
assert tag.tag_text == 'scriptalertfooscript'
def test_edit_categories_add(self):
assert [c.id for c in self.get_addon().all_categories] == [22]
self.cat_initial['categories'] = [22, 1]
self.client.post(self.basic_edit_url, self.get_dict())
addon_cats = self.get_addon().categories.values_list('id', flat=True)
assert sorted(addon_cats) == [1, 22]
def _feature_addon(self, addon_id=3615):
c_addon = CollectionAddon.objects.create(
addon_id=addon_id, collection=Collection.objects.create())
FeaturedCollection.objects.create(collection=c_addon.collection,
application=amo.FIREFOX.id)
cache.clear()
def test_edit_categories_add_featured(self):
"""Ensure that categories cannot be changed for featured add-ons."""
self._feature_addon()
self.cat_initial['categories'] = [22, 1]
response = self.client.post(self.basic_edit_url, self.get_dict())
addon_cats = self.get_addon().categories.values_list('id', flat=True)
assert response.context['cat_form'].errors[0]['categories'] == (
['Categories cannot be changed while your add-on is featured for '
'this application.'])
# This add-on's categories should not change.
assert sorted(addon_cats) == [22]
def test_edit_categories_add_new_creatured_admin(self):
"""Ensure that admins can change categories for creatured add-ons."""
assert self.client.login(email='admin@mozilla.com')
self._feature_addon()
response = self.client.get(self.basic_edit_url)
doc = pq(response.content)
assert doc('#addon-categories-edit div.addon-app-cats').length == 1
assert doc('#addon-categories-edit > p').length == 0
self.cat_initial['categories'] = [22, 1]
response = self.client.post(self.basic_edit_url, self.get_dict())
addon_cats = self.get_addon().categories.values_list('id', flat=True)
assert 'categories' not in response.context['cat_form'].errors[0]
# This add-on's categories should change.
assert sorted(addon_cats) == [1, 22]
def test_edit_categories_disable_creatured(self):
"""Ensure that other forms are okay when disabling category changes."""
self._feature_addon()
self.cat_initial['categories'] = [22, 1]
data = self.get_dict()
self.client.post(self.basic_edit_url, data)
assert unicode(self.get_addon().name) == data['name']
def test_edit_categories_no_disclaimer(self):
"""Ensure that there is a not disclaimer for non-creatured add-ons."""
response = self.client.get(self.basic_edit_url)
doc = pq(response.content)
assert doc('#addon-categories-edit div.addon-app-cats').length == 1
assert doc('#addon-categories-edit > p').length == 0
def test_edit_no_previous_categories(self):
AddonCategory.objects.filter(addon=self.addon).delete()
response = self.client.get(self.basic_edit_url)
assert response.status_code == 200
self.cat_initial['categories'] = [22, 71]
response = self.client.post(self.basic_edit_url, self.get_dict())
self.addon = self.get_addon()
addon_cats = self.addon.categories.values_list('id', flat=True)
assert sorted(addon_cats) == [22, 71]
# Make sure the categories list we display to the user in the response
# has been updated.
assert set(response.context['addon'].all_categories) == set(
self.addon.all_categories)
def test_edit_categories_addandremove(self):
AddonCategory(addon=self.addon, category_id=1).save()
assert sorted(
[c.id for c in self.get_addon().all_categories]) == [1, 22]
self.cat_initial['categories'] = [22, 71]
response = self.client.post(self.basic_edit_url, self.get_dict())
self.addon = self.get_addon()
addon_cats = self.addon.categories.values_list('id', flat=True)
assert sorted(addon_cats) == [22, 71]
# Make sure the categories list we display to the user in the response
# has been updated.
assert set(response.context['addon'].all_categories) == set(
self.addon.all_categories)
def test_edit_categories_xss(self):
category = Category.objects.get(id=22)
category.db_name = '<script>alert("test");</script>'
category.slug = 'xssattempt'
category.save()
self.cat_initial['categories'] = [22, 71]
response = self.client.post(
self.basic_edit_url, formset(self.cat_initial, initial_count=1))
assert '<script>alert' not in response.content
assert '<script>alert' in response.content
def test_edit_categories_remove(self):
category = Category.objects.get(id=1)
AddonCategory(addon=self.addon, category=category).save()
assert sorted(
[cat.id for cat in self.get_addon().all_categories]) == [1, 22]
self.cat_initial['categories'] = [22]
response = self.client.post(self.basic_edit_url, self.get_dict())
self.addon = self.get_addon()
addon_cats = self.addon.categories.values_list('id', flat=True)
assert sorted(addon_cats) == [22]
# Make sure the categories list we display to the user in the response
# has been updated.
assert set(response.context['addon'].all_categories) == set(
self.addon.all_categories)
def test_edit_categories_required(self):
del self.cat_initial['categories']
response = self.client.post(
self.basic_edit_url, formset(self.cat_initial, initial_count=1))
assert response.context['cat_form'].errors[0]['categories'] == (
['This field is required.'])
def test_edit_categories_max(self):
assert amo.MAX_CATEGORIES == 2
self.cat_initial['categories'] = [22, 1, 71]
response = self.client.post(
self.basic_edit_url, formset(self.cat_initial, initial_count=1))
assert response.context['cat_form'].errors[0]['categories'] == (
['You can have only 2 categories.'])
def test_edit_categories_other_failure(self):
Category.objects.get(id=22).update(misc=True)
self.cat_initial['categories'] = [22, 1]
response = self.client.post(
self.basic_edit_url, formset(self.cat_initial, initial_count=1))
assert response.context['cat_form'].errors[0]['categories'] == (
['The miscellaneous category cannot be combined with additional '
'categories.'])
def test_edit_categories_nonexistent(self):
self.cat_initial['categories'] = [100]
response = self.client.post(
self.basic_edit_url, formset(self.cat_initial, initial_count=1))
assert response.context['cat_form'].errors[0]['categories'] == (
['Select a valid choice. 100 is not one of the available '
'choices.'])
def test_edit_restricted_tags(self):
addon = self.get_addon()
tag = Tag.objects.create(
tag_text='i_am_a_restricted_tag', restricted=True)
AddonTag.objects.create(tag=tag, addon=addon)
res = self.client.get(self.basic_edit_url)
divs = pq(res.content)('#addon_tags_edit .edit-addon-details')
assert len(divs) == 2
assert 'i_am_a_restricted_tag' in divs.eq(1).text()
def test_text_not_none_when_has_flags(self):
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('#addon-flags').text() == (
'This add-on requires external software.')
def test_text_none_when_no_flags(self):
addon = self.get_addon()
addon.update(external_software=False)
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('#addon-flags').text() == 'None'
def test_nav_links(self):
activity_url = reverse('devhub.feed', args=['a3615'])
response = self.client.get(self.url)
doc = pq(response.content)('#edit-addon-nav')
assert doc('ul:last').find('li a').eq(1).attr('href') == (
activity_url)
assert doc('.view-stats').length == 1
def test_nav_links_admin(self):
assert self.client.login(email='admin@mozilla.com')
response = self.client.get(self.url)
doc = pq(response.content)('#edit-addon-nav')
links = doc('ul:last').find('li a')
assert links.eq(1).attr('href') == reverse(
'editors.review', args=[self.addon.slug])
assert links.eq(2).attr('href') == reverse(
'zadmin.addon_manage', args=[self.addon.slug])
def test_not_experimental_flag(self):
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('#experimental-edit').text() == (
'This add-on is ready for general use.')
def test_experimental_flag(self):
self.get_addon().update(is_experimental=True)
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('#experimental-edit').text() == (
'This add-on is experimental.')
def get_l10n_urls(self):
paths = ('devhub.addons.edit', 'devhub.addons.profile',
'devhub.addons.owner')
return [reverse(p, args=['a3615']) for p in paths]
def test_l10n(self):
Addon.objects.get(id=3615).update(default_locale='en-US')
for url in self.get_l10n_urls():
response = self.client.get(url)
assert pq(
response.content)('#l10n-menu').attr('data-default') == 'en-us'
def test_l10n_not_us(self):
Addon.objects.get(id=3615).update(default_locale='fr')
for url in self.get_l10n_urls():
response = self.client.get(url)
assert pq(
response.content)('#l10n-menu').attr('data-default') == 'fr'
def test_l10n_not_us_id_url(self):
Addon.objects.get(id=3615).update(default_locale='fr')
for url in self.get_l10n_urls():
url = '/id' + url[6:]
response = self.client.get(url)
assert pq(
response.content)('#l10n-menu').attr('data-default') == 'fr'
class TestEditMedia(BaseTestEdit):
def setUp(self):
super(TestEditMedia, self).setUp()
self.media_edit_url = self.get_url('media', True)
self.icon_upload = reverse('devhub.addons.upload_icon',
args=[self.addon.slug])
self.preview_upload = reverse('devhub.addons.upload_preview',
args=[self.addon.slug])
def formset_new_form(self, *args, **kw):
ctx = self.client.get(self.media_edit_url).context
blank = initial(ctx['preview_form'].forms[-1])
blank.update(**kw)
return blank
def formset_media(self, *args, **kw):
kw.setdefault('initial_count', 0)
kw.setdefault('prefix', 'files')
fs = formset(*[a for a in args] + [self.formset_new_form()], **kw)
return {k: '' if v is None else v for k, v in fs.items()}
def test_icon_upload_attributes(self):
doc = pq(self.client.get(self.media_edit_url).content)
field = doc('input[name=icon_upload]')
assert field.length == 1
assert sorted(field.attr('data-allowed-types').split('|')) == (
['image/jpeg', 'image/png'])
assert field.attr('data-upload-url') == self.icon_upload
def test_edit_media_defaulticon(self):
data = {'icon_type': ''}
data_formset = self.formset_media(**data)
response = self.client.post(self.media_edit_url, data_formset)
assert response.context['form'].errors == {}
addon = self.get_addon()
assert addon.get_icon_url(64).endswith('icons/default-64.png')
for k in data:
assert unicode(getattr(addon, k)) == data[k]
def test_edit_media_preuploadedicon(self):
data = {'icon_type': 'icon/appearance'}
data_formset = self.formset_media(**data)
response = self.client.post(self.media_edit_url, data_formset)
assert response.context['form'].errors == {}
addon = self.get_addon()
assert addon.get_icon_url(64).endswith('icons/appearance-64.png')
for k in data:
assert unicode(getattr(addon, k)) == data[k]
def test_edit_media_uploadedicon(self):
img = get_image_path('mozilla.png')
src_image = open(img, 'rb')
data = {'upload_image': src_image}
response = self.client.post(self.icon_upload, data)
response_json = json.loads(response.content)
addon = self.get_addon()
# Now, save the form so it gets moved properly.
data = {
'icon_type': 'image/png',
'icon_upload_hash': response_json['upload_hash']
}
data_formset = self.formset_media(**data)
response = self.client.post(self.media_edit_url, data_formset)
assert response.context['form'].errors == {}
addon = self.get_addon()
# Unfortunate hardcoding of URL
url = addon.get_icon_url(64)
assert ('addon_icons/3/%s' % addon.id) in url, (
'Unexpected path: %r' % url)
assert data['icon_type'] == 'image/png'
# Check that it was actually uploaded
dirname = os.path.join(user_media_path('addon_icons'),
'%s' % (addon.id / 1000))
dest = os.path.join(dirname, '%s-32.png' % addon.id)
assert storage.exists(dest)
assert Image.open(storage.open(dest)).size == (32, 12)
def test_edit_media_icon_log(self):
self.test_edit_media_uploadedicon()
log = ActivityLog.objects.all()
assert log.count() == 1
assert log[0].action == amo.LOG.CHANGE_ICON.id
def test_edit_media_uploadedicon_noresize(self):
img = "static/img/notifications/error.png"
src_image = open(img, 'rb')
data = {'upload_image': src_image}
response = self.client.post(self.icon_upload, data)
response_json = json.loads(response.content)
addon = self.get_addon()
# Now, save the form so it gets moved properly.
data = {
'icon_type': 'image/png',
'icon_upload_hash': response_json['upload_hash']
}
data_formset = self.formset_media(**data)
response = self.client.post(self.media_edit_url, data_formset)
assert response.context['form'].errors == {}
addon = self.get_addon()
# Unfortunate hardcoding of URL
addon_url = addon.get_icon_url(64).split('?')[0]
assert addon_url.endswith('addon_icons/3/%s-64.png' % addon.id), (
'Unexpected path: %r' % addon_url)
assert data['icon_type'] == 'image/png'
# Check that it was actually uploaded
dirname = os.path.join(user_media_path('addon_icons'),
'%s' % (addon.id / 1000))
dest = os.path.join(dirname, '%s-64.png' % addon.id)
assert storage.exists(dest)
assert Image.open(storage.open(dest)).size == (48, 48)
def check_image_type(self, url, msg):
img = 'static/js/zamboni/devhub.js'
src_image = open(img, 'rb')
res = self.client.post(url, {'upload_image': src_image})
response_json = json.loads(res.content)
assert response_json['errors'][0] == msg
def test_edit_media_icon_wrong_type(self):
self.check_image_type(self.icon_upload,
'Icons must be either PNG or JPG.')
def test_edit_media_screenshot_wrong_type(self):
self.check_image_type(self.preview_upload,
'Images must be either PNG or JPG.')
def setup_image_status(self):
addon = self.get_addon()
self.icon_dest = os.path.join(addon.get_icon_dir(),
'%s-32.png' % addon.id)
os.makedirs(os.path.dirname(self.icon_dest))
with storage.open(self.icon_dest, 'w') as f:
f.write('some icon data\n')
self.preview = addon.previews.create()
self.preview.save()
os.makedirs(os.path.dirname(self.preview.thumbnail_path))
with storage.open(self.preview.thumbnail_path, 'w') as f:
f.write('some icon data\n')
self.url = reverse('devhub.ajax.image.status', args=[addon.slug])
def test_image_status_no_choice(self):
addon = self.get_addon()
addon.update(icon_type='')
url = reverse('devhub.ajax.image.status', args=[addon.slug])
result = json.loads(self.client.get(url).content)
assert result['icons']
def test_image_status_works(self):
self.setup_image_status()
result = json.loads(self.client.get(self.url).content)
assert result['icons']
def test_image_status_fails(self):
self.setup_image_status()
storage.delete(self.icon_dest)
result = json.loads(self.client.get(self.url).content)
assert not result['icons']
def test_preview_status_works(self):
self.setup_image_status()
result = json.loads(self.client.get(self.url).content)
assert result['previews']
# No previews means that all the images are done.
self.addon.previews.all().delete()
result = json.loads(self.client.get(self.url).content)
assert result['previews']
def test_preview_status_fails(self):
self.setup_image_status()
storage.delete(self.preview.thumbnail_path)
result = json.loads(self.client.get(self.url).content)
assert not result['previews']
def test_image_status_persona(self):
self.setup_image_status()
storage.delete(self.icon_dest)
self.get_addon().update(type=amo.ADDON_PERSONA)
result = json.loads(self.client.get(self.url).content)
assert result['icons']
def test_image_status_default(self):
self.setup_image_status()
storage.delete(self.icon_dest)
self.get_addon().update(icon_type='icon/photos')
result = json.loads(self.client.get(self.url).content)
assert result['icons']
def check_image_animated(self, url, msg):
filehandle = open(get_image_path('animated.png'), 'rb')
res = self.client.post(url, {'upload_image': filehandle})
response_json = json.loads(res.content)
assert response_json['errors'][0] == msg
def test_icon_animated(self):
self.check_image_animated(self.icon_upload,
'Icons cannot be animated.')
def test_screenshot_animated(self):
self.check_image_animated(self.preview_upload,
'Images cannot be animated.')
def preview_add(self, amount=1):
img = get_image_path('mozilla.png')
src_image = open(img, 'rb')
data = {'upload_image': src_image}
data_formset = self.formset_media(**data)
url = self.preview_upload
response = self.client.post(url, data_formset)
details = json.loads(response.content)
upload_hash = details['upload_hash']
# Create and post with the formset.
fields = []
for i in range(amount):
fields.append(self.formset_new_form(caption='hi',
upload_hash=upload_hash,
position=i))
data_formset = self.formset_media(*fields)
self.client.post(self.media_edit_url, data_formset)
def test_edit_media_preview_add(self):
self.preview_add()
assert str(self.get_addon().previews.all()[0].caption) == 'hi'
def test_edit_media_preview_edit(self):
self.preview_add()
preview = self.get_addon().previews.all()[0]
edited = {'caption': 'bye',
'upload_hash': '',
'id': preview.id,
'position': preview.position,
'file_upload': None}
data_formset = self.formset_media(edited, initial_count=1)
self.client.post(self.media_edit_url, data_formset)
assert str(self.get_addon().previews.all()[0].caption) == 'bye'
assert len(self.get_addon().previews.all()) == 1
def test_edit_media_preview_reorder(self):
self.preview_add(3)
previews = self.get_addon().previews.all()
base = {'upload_hash': '', 'file_upload': None}
# Three preview forms were generated; mix them up here.
one = {'caption': 'first', 'position': 1, 'id': previews[2].id}
two = {'caption': 'second', 'position': 2, 'id': previews[0].id}
three = {'caption': 'third', 'position': 3, 'id': previews[1].id}
one.update(base)
two.update(base)
three.update(base)
# Add them in backwards ("third", "second", "first")
data_formset = self.formset_media(three, two, one, initial_count=3)
assert data_formset['files-0-caption'] == 'third'
assert data_formset['files-1-caption'] == 'second'
assert data_formset['files-2-caption'] == 'first'
self.client.post(self.media_edit_url, data_formset)
# They should come out "first", "second", "third"
assert str(self.get_addon().previews.all()[0].caption) == 'first'
assert str(self.get_addon().previews.all()[1].caption) == 'second'
assert str(self.get_addon().previews.all()[2].caption) == 'third'
def test_edit_media_preview_delete(self):
self.preview_add()
preview = self.get_addon().previews.get()
edited = {'DELETE': 'checked',
'upload_hash': '',
'id': preview.id,
'position': 0,
'file_upload': None}
data_formset = self.formset_media(edited, initial_count=1)
self.client.post(self.media_edit_url, data_formset)
assert len(self.get_addon().previews.all()) == 0
def test_edit_media_preview_add_another(self):
self.preview_add()
self.preview_add()
assert len(self.get_addon().previews.all()) == 2
def test_edit_media_preview_add_two(self):
self.preview_add(2)
assert len(self.get_addon().previews.all()) == 2
class BaseTestEditDetails(BaseTestEdit):
def setUp(self):
super(BaseTestEditDetails, self).setUp()
self.details_url = self.get_url('details')
self.details_edit_url = self.get_url('details', edit=True)
def test_edit(self):
data = {
'description': 'New description with <em>html</em>!',
'default_locale': 'en-US',
'homepage': 'http://twitter.com/fligtarsmom'
}
response = self.client.post(self.details_edit_url, data)
assert response.context['form'].errors == {}
addon = self.get_addon()
for k in data:
assert unicode(getattr(addon, k)) == data[k]
def test_edit_xss(self):
"""
Let's try to put xss in our description, and safe html, and verify
that we are playing safe.
"""
self.addon.description = ("This\n<b>IS</b>"
"<script>alert('awesome')</script>")
self.addon.save()
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('#edit-addon-details span[lang]').html() == (
"This<br/><b>IS</b><script>alert('awesome')</script>")
def test_edit_homepage_optional(self):
data = {
'description': 'New description with <em>html</em>!',
'default_locale': 'en-US',
'homepage': ''
}
response = self.client.post(self.details_edit_url, data)
assert response.context['form'].errors == {}
addon = self.get_addon()
for k in data:
assert unicode(getattr(addon, k)) == data[k]
class TestEditDetailsListed(BaseTestEditDetails):
def test_edit_default_locale_required_trans(self):
# name, summary, and description are required in the new locale.
description, homepage = map(unicode, [self.addon.description,
self.addon.homepage])
# TODO: description should get fixed up with the form.
error = ('Before changing your default locale you must have a name, '
'summary, and description in that locale. '
'You are missing ')
data = {
'description': description,
'homepage': homepage,
'default_locale': 'fr'
}
response = self.client.post(self.details_edit_url, data)
# We can't use assertFormError here, because the missing fields are
# stored in a dict, which isn't ordered.
form_error = response.context['form'].non_field_errors()[0]
assert form_error.startswith(error)
assert "'description'" in form_error
assert "'name'" in form_error
assert "'summary'" in form_error
# Now we have a name.
self.addon.name = {'fr': 'fr name'}
self.addon.save()
response = self.client.post(self.details_edit_url, data)
form_error = response.context['form'].non_field_errors()[0]
assert form_error.startswith(error)
assert "'description'" in form_error
assert "'summary'" in form_error
# Now we have a summary.
self.addon.summary = {'fr': 'fr summary'}
self.addon.save()
response = self.client.post(self.details_edit_url, data)
form_error = response.context['form'].non_field_errors()[0]
assert form_error.startswith(error)
assert "'description'" in form_error
# Now we're sending an fr description with the form.
data['description_fr'] = 'fr description'
response = self.client.post(self.details_edit_url, data)
assert response.context['form'].errors == {}
def test_edit_default_locale_frontend_error(self):
data = {
'description': 'xx',
'homepage': 'https://staticfil.es/',
'default_locale': 'fr'
}
response = self.client.post(self.details_edit_url, data)
self.assertContains(
response, 'Before changing your default locale you must')
def test_edit_locale(self):
addon = self.get_addon()
addon.update(default_locale='en-US')
response = self.client.get(self.details_url)
assert pq(response.content)('.addon_edit_locale').eq(0).text() == (
'English (US)')
class TestEditSupport(BaseTestEdit):
def setUp(self):
super(TestEditSupport, self).setUp()
self.support_url = self.get_url('support')
self.support_edit_url = self.get_url('support', edit=True)
def test_edit_support(self):
data = {
'support_email': 'sjobs@apple.com',
'support_url': 'http://apple.com/'
}
response = self.client.post(self.support_edit_url, data)
assert response.context['form'].errors == {}
addon = self.get_addon()
for k in data:
assert unicode(getattr(addon, k)) == data[k]
def test_edit_support_optional_url(self):
data = {
'support_email': 'sjobs@apple.com',
'support_url': ''
}
response = self.client.post(self.support_edit_url, data)
assert response.context['form'].errors == {}
addon = self.get_addon()
for k in data:
assert unicode(getattr(addon, k)) == data[k]
def test_edit_support_optional_email(self):
data = {
'support_email': '',
'support_url': 'http://apple.com/'
}
response = self.client.post(self.support_edit_url, data)
assert response.context['form'].errors == {}
addon = self.get_addon()
for k in data:
assert unicode(getattr(addon, k)) == data[k]
class TestEditTechnical(BaseTestEdit):
fixtures = BaseTestEdit.fixtures + [
'addons/persona', 'base/addon_40', 'base/addon_1833_yoono',
'base/addon_4664_twitterbar.json',
'base/addon_5299_gcal', 'base/addon_6113']
def setUp(self):
super(TestEditTechnical, self).setUp()
self.dependent_addon = Addon.objects.get(id=5579)
AddonDependency.objects.create(addon=self.addon,
dependent_addon=self.dependent_addon)
self.technical_url = self.get_url('technical')
self.technical_edit_url = self.get_url('technical', edit=True)
ctx = self.client.get(self.technical_edit_url).context
self.dep = initial(ctx['dependency_form'].initial_forms[0])
self.dep_initial = formset(self.dep, prefix='dependencies',
initial_count=1)
def dep_formset(self, *args, **kw):
kw.setdefault('initial_count', 1)
kw.setdefault('prefix', 'dependencies')
return formset(self.dep, *args, **kw)
def formset(self, data):
return self.dep_formset(**data)
def test_log(self):
data = self.formset({'developer_comments': 'This is a test'})
assert ActivityLog.objects.count() == 0
response = self.client.post(self.technical_edit_url, data)
assert response.context['form'].errors == {}
assert ActivityLog.objects.filter(
action=amo.LOG.EDIT_PROPERTIES.id).count() == 1
def test_technical_on(self):
# Turn everything on
data = {
'developer_comments': 'Test comment!',
'external_software': 'on',
'view_source': 'on',
'whiteboard': 'Whiteboard info.'
}
response = self.client.post(
self.technical_edit_url, self.formset(data))
assert response.context['form'].errors == {}
addon = self.get_addon()
for k in data:
if k == 'developer_comments':
assert unicode(getattr(addon, k)) == unicode(data[k])
elif k == 'whiteboard':
assert unicode(getattr(addon, k)) == unicode(data[k])
else:
assert getattr(addon, k) == (data[k] == 'on')
# Andddd offf
data = {'developer_comments': 'Test comment!'}
response = self.client.post(
self.technical_edit_url, self.formset(data))
addon = self.get_addon()
assert not addon.external_software
assert not addon.view_source
def test_technical_devcomment_notrequired(self):
data = {
'developer_comments': '',
'external_software': 'on',
'view_source': 'on'
}
response = self.client.post(
self.technical_edit_url, self.formset(data))
assert response.context['form'].errors == {}
addon = self.get_addon()
for k in data:
if k == 'developer_comments':
assert unicode(getattr(addon, k)) == unicode(data[k])
else:
assert getattr(addon, k) == (data[k] == 'on')
def test_auto_repackage_not_shown(self):
file_ = self.addon.current_version.all_files[0]
file_.jetpack_version = None
file_.save()
response = self.client.get(self.technical_edit_url)
self.assertNotContains(response, 'Upgrade SDK?')
def test_auto_repackage_shown(self):
file_ = self.addon.current_version.all_files[0]
file_.jetpack_version = '1.0'
file_.save()
response = self.client.get(self.technical_edit_url)
self.assertContains(response, 'Upgrade SDK?')
def test_dependencies_none(self):
AddonDependency.objects.all().delete()
assert list(self.addon.all_dependencies) == []
response = self.client.get(self.technical_url)
assert pq(response.content)('#required-addons .empty').length == 1
def test_dependencies_overview(self):
assert [d.id for d in self.addon.all_dependencies] == [5579]
response = self.client.get(self.technical_url)
req = pq(response.content)('#required-addons')
assert req.length == 1
assert req.attr('data-src') == (
reverse('devhub.ajax.dependencies', args=[self.addon.slug]))
assert req.find('li').length == 1
link = req.find('a')
assert link.attr('href') == self.dependent_addon.get_url_path()
assert link.text() == unicode(self.dependent_addon.name)
def test_dependencies_initial(self):
response = self.client.get(self.technical_edit_url)
form = pq(response.content)(
'#required-addons .dependencies li[data-addonid]')
assert form.length == 1
assert form.find('input[id$=-dependent_addon]').val() == (
str(self.dependent_addon.id))
div = form.find('div')
assert div.attr('style') == (
'background-image:url(%s)' % self.dependent_addon.icon_url)
link = div.find('a')
assert link.attr('href') == self.dependent_addon.get_url_path()
assert link.text() == unicode(self.dependent_addon.name)
def test_dependencies_add(self):
addon = Addon.objects.get(id=5299)
assert addon.type == amo.ADDON_EXTENSION
assert addon in list(Addon.objects.public())
data = self.dep_formset({'dependent_addon': addon.id})
response = self.client.post(self.technical_edit_url, data)
assert not any(response.context['dependency_form'].errors)
self.check_dep_ids([self.dependent_addon.id, addon.id])
response = self.client.get(self.technical_edit_url)
reqs = pq(response.content)('#required-addons .dependencies')
assert reqs.find('li[data-addonid]').length == 2
req = reqs.find('li[data-addonid="5299"]')
assert req.length == 1
link = req.find('div a')
assert link.attr('href') == addon.get_url_path()
assert link.text() == unicode(addon.name)
def test_dependencies_limit(self):
deps = Addon.objects.public().exclude(
Q(id__in=[self.addon.id, self.dependent_addon.id]) |
Q(type=amo.ADDON_PERSONA))
args = []
assert deps.count() > 3 # The limit is 3.
for dep in deps:
args.append({'dependent_addon': dep.id})
data = self.dep_formset(*args)
response = self.client.post(self.technical_edit_url, data)
assert response.context['dependency_form'].non_form_errors() == (
['There cannot be more than 3 required add-ons.'])
def test_dependencies_limit_with_deleted_form(self):
deps = Addon.objects.public().exclude(
Q(id__in=[self.addon.id, self.dependent_addon.id]) |
Q(type=amo.ADDON_PERSONA))[:3]
args = []
for dep in deps:
args.append({'dependent_addon': dep.id})
# If we delete one form and add three, everything should be A-OK.
self.dep['DELETE'] = True
data = self.dep_formset(*args)
response = self.client.post(self.technical_edit_url, data)
assert not any(response.context['dependency_form'].errors)
self.check_dep_ids(deps.values_list('id', flat=True))
def check_dep_ids(self, expected=None):
if expected is None:
expected = []
ids = AddonDependency.objects.values_list(
'dependent_addon__id', flat=True)
assert sorted(list(ids)) == sorted(expected)
def check_bad_dep(self, r):
"""This helper checks that bad dependency data doesn't go through."""
assert r.context['dependency_form'].errors[1]['dependent_addon'] == (
['Select a valid choice. That choice is not one of the available '
'choices.'])
self.check_dep_ids([self.dependent_addon.id])
def test_dependencies_add_reviewed(self):
"""Ensure that reviewed add-ons can be made as dependencies."""
addon = Addon.objects.get(id=40)
for status in amo.REVIEWED_STATUSES:
addon.update(status=status)
assert addon in list(Addon.objects.public())
data = self.dep_formset({'dependent_addon': addon.id})
response = self.client.post(self.technical_edit_url, data)
assert not any(response.context['dependency_form'].errors)
self.check_dep_ids([self.dependent_addon.id, addon.id])
AddonDependency.objects.get(dependent_addon=addon).delete()
def test_dependencies_no_add_unreviewed(self):
"""Ensure that unreviewed add-ons cannot be made as dependencies."""
addon = Addon.objects.get(id=40)
for status in amo.UNREVIEWED_ADDON_STATUSES:
addon.update(status=status)
assert addon not in list(Addon.objects.public())
data = self.dep_formset({'dependent_addon': addon.id})
response = self.client.post(self.technical_edit_url, data)
self.check_bad_dep(response)
def test_dependencies_no_add_reviewed_persona(self):
"""Ensure that reviewed Personas cannot be made as dependencies."""
addon = Addon.objects.get(id=15663)
assert addon.type == amo.ADDON_PERSONA
assert addon in list(Addon.objects.public())
data = self.dep_formset({'dependent_addon': addon.id})
response = self.client.post(self.technical_edit_url, data)
self.check_bad_dep(response)
def test_dependencies_no_add_unreviewed_persona(self):
"""Ensure that unreviewed Personas cannot be made as dependencies."""
addon = Addon.objects.get(id=15663)
addon.update(status=amo.STATUS_PENDING)
assert addon.status == amo.STATUS_PENDING
assert addon not in list(Addon.objects.public())
data = self.dep_formset({'dependent_addon': addon.id})
response = self.client.post(self.technical_edit_url, data)
self.check_bad_dep(response)
def test_dependencies_add_self(self):
"""Ensure that an add-on cannot be made dependent on itself."""
data = self.dep_formset({'dependent_addon': self.addon.id})
response = self.client.post(self.technical_edit_url, data)
self.check_bad_dep(response)
def test_dependencies_add_invalid(self):
"""Ensure that a non-existent add-on cannot be a dependency."""
data = self.dep_formset({'dependent_addon': 9999})
response = self.client.post(self.technical_edit_url, data)
self.check_bad_dep(response)
def test_dependencies_add_duplicate(self):
"""Ensure that an add-on cannot be made dependent more than once."""
data = self.dep_formset({'dependent_addon': self.dependent_addon.id})
response = self.client.post(self.technical_edit_url, data)
assert (
response.context['dependency_form'].forms[1].non_field_errors() ==
['Addon dependency with this Addon and Dependent addon already '
'exists.'])
self.check_dep_ids([self.dependent_addon.id])
def test_dependencies_delete(self):
self.dep['DELETE'] = True
data = self.dep_formset(total_count=1, initial_count=1)
response = self.client.post(self.technical_edit_url, data)
assert not any(response.context['dependency_form'].errors)
self.check_dep_ids()
def test_dependencies_add_delete(self):
"""Ensure that we can both delete a dependency and add another."""
self.dep['DELETE'] = True
data = self.dep_formset({'dependent_addon': 5299})
response = self.client.post(self.technical_edit_url, data)
assert not any(response.context['dependency_form'].errors)
self.check_dep_ids([5299])
class TestEditBasicUnlisted(BaseTestEditBasic):
listed = False
class TestEditDetailsUnlisted(BaseTestEditDetails):
listed = False
class TestEditTechnicalUnlisted(BaseTestEdit):
listed = False
def test_whiteboard(self):
edit_url = self.get_url('technical', edit=True)
# It's okay to post empty whiteboard instructions.
response = self.client.post(edit_url, {'whiteboard': ''})
assert response.context['form'].errors == {}
# Let's update it.
response = self.client.post(
edit_url, {'whiteboard': 'important stuff'})
assert response.context['form'].errors == {}
addon = self.get_addon()
assert addon.whiteboard == 'important stuff'
# And clear it again.
response = self.client.post(edit_url, {'whiteboard': ''})
assert response.context['form'].errors == {}
addon = self.get_addon()
assert addon.whiteboard == ''
class TestAdmin(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def login_admin(self):
assert self.client.login(email='admin@mozilla.com')
def login_user(self):
assert self.client.login(email='del@icio.us')
def test_show_admin_settings_admin(self):
self.login_admin()
url = reverse('devhub.addons.edit', args=['a3615'])
response = self.client.get(url)
assert response.status_code == 200
self.assertContains(response, 'Admin Settings')
assert 'admin_form' in response.context
def test_show_admin_settings_nonadmin(self):
self.login_user()
url = reverse('devhub.addons.edit', args=['a3615'])
response = self.client.get(url)
assert response.status_code == 200
self.assertNotContains(response, 'Admin Settings')
assert 'admin_form' not in response.context, (
'AdminForm not expected in context.')
def test_post_as_admin(self):
self.login_admin()
url = reverse('devhub.addons.admin', args=['a3615'])
response = self.client.post(url)
assert response.status_code == 200
def test_post_as_nonadmin(self):
self.login_user()
url = reverse('devhub.addons.admin', args=['a3615'])
response = self.client.post(url)
assert response.status_code == 403
class TestThemeEdit(TestCase):
fixtures = ['base/user_999']
def setUp(self):
super(TestThemeEdit, self).setUp()
self.addon = addon_factory(type=amo.ADDON_PERSONA)
self.user = UserProfile.objects.get()
self.addon.addonuser_set.create(user=self.user)
@mock.patch('olympia.amo.messages.error')
def test_desc_too_long_error(self, message_mock):
data = {'description': 'a' * 501}
req = req_factory_factory(
self.addon.get_dev_url('edit'),
user=self.user, post=True, data=data, session={})
response = edit_theme(req, self.addon.slug)
doc = pq(response.content)
assert 'characters' in doc('#trans-description + ul li').text()
def test_no_reupload_on_pending(self):
self.addon.update(status=amo.STATUS_PENDING)
req = req_factory_factory(
self.addon.get_dev_url('edit'), user=self.user, session={})
response = edit_theme(req, self.addon.slug)
doc = pq(response.content)
assert not doc('a.reupload')
self.addon.update(status=amo.STATUS_PUBLIC)
req = req_factory_factory(
self.addon.get_dev_url('edit'), user=self.user, session={})
response = edit_theme(req, self.addon.slug)
doc = pq(response.content)
assert doc('a.reupload')
def test_color_input_is_empty_at_creation(self):
self.client.login(email='regular@mozilla.com')
response = self.client.get(reverse('devhub.themes.submit'))
doc = pq(response.content)
el = doc('input.color-picker')
assert el.attr('type') == 'text'
assert not el.attr('value')
def test_color_input_is_not_empty_at_edit(self):
color = "123456"
self.addon.persona.accentcolor = color
self.addon.persona.save()
self.client.login(email='regular@mozilla.com')
url = reverse('devhub.themes.edit', args=(self.addon.slug, ))
response = self.client.get(url)
doc = pq(response.content)
el = doc('input#id_accentcolor')
assert el.attr('type') == 'text'
assert el.attr('value') == "#" + color
|
|
#!/usr/bin/env python3
"""This is main runner of generator
"""
import datetime
import logging
import re
import sys
from argparse import ArgumentParser
from collections import namedtuple, OrderedDict
from inspect import getfile
from os.path import basename
from pprint import pformat
from time import sleep
from xml.etree.ElementTree import ParseError as XMLSchemaError
from jinja2 import Environment, FileSystemLoader, TemplateNotFound, UndefinedError
from pathlib2 import Path
from xmlschema import XMLSchema
ROOT = Path(__file__).absolute().parents[0]
sys.path.append(ROOT.joinpath('rpc_spec/InterfaceParser').as_posix())
try:
from parsers.sdl_rpc_v2 import Parser
from parsers.parse_error import ParseError as InterfaceError
from model.interface import Interface
from transformers.generate_error import GenerateError
from transformers.common_producer import InterfaceProducerCommon
from transformers.enums_producer import EnumsProducer
from transformers.functions_producer import FunctionsProducer
from transformers.structs_producer import StructsProducer
except ImportError as message:
print('{}. probably you did not initialize submodule'.format(message))
sys.exit(1)
class Generator:
"""
This class contains only technical features, as follow:
- parsing command-line arguments, or evaluating required Paths interactively;
- calling parsers to get Model from xml;
- calling producers to transform initial Model to dict used in jinja2 templates
Not required to be covered by unit tests cause contains only technical features.
"""
def __init__(self):
self.logger = logging.getLogger(self.__class__.__name__)
self._env = None
@property
def env(self):
"""
:return: jinja2 Environment
"""
return self._env
@env.setter
def env(self, value):
"""
:param value: path with directory with templates
:return: jinja2 Environment
"""
if not Path(value).exists():
self.logger.critical('Directory with templates not found %s', value)
sys.exit(1)
else:
self._env = Environment(loader=FileSystemLoader(value))
@property
def get_version(self):
"""
:return: current version of Generator
"""
return InterfaceProducerCommon.version
def config_logging(self, verbose):
"""
Configure logging
:param verbose: boolean
"""
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%m-%d %H:%M'))
if verbose:
handler.setLevel(logging.DEBUG)
self.logger.setLevel(logging.DEBUG)
else:
handler.setLevel(logging.ERROR)
self.logger.setLevel(logging.ERROR)
logging.getLogger().handlers.clear()
root_logger = logging.getLogger()
root_logger.addHandler(handler)
def evaluate_source_xml_xsd(self, xml, xsd):
"""
:param xml: path to MOBILE_API.xml file
:param xsd: path to .xsd file (optional)
:return: validated path to .xsd file
"""
if not Path(xml).exists():
self.logger.critical('File not found: %s', xml)
sys.exit(1)
if xsd and Path(xsd).exists():
return xsd
replace = xml.replace('.xml', '.xsd')
if xsd and not Path(xsd).exists():
self.logger.critical('File not found: %s', xsd)
sys.exit(1)
elif not xsd and not Path(replace).exists():
self.logger.critical('File not found: %s', replace)
sys.exit(1)
else:
return replace
def evaluate_output_directory(self, output_directory):
"""
:param output_directory: path to output_directory
:return: validated path to output_directory
"""
if output_directory.startswith('/'):
path = Path(output_directory).absolute().resolve()
else:
path = ROOT.joinpath(output_directory).resolve()
if not path.exists():
self.logger.warning('Directory not found: %s, trying to create it', path)
try:
path.mkdir(parents=True, exist_ok=True)
except OSError as message1:
self.logger.critical('Failed to create directory %s, %s', path.as_posix(), message1)
sys.exit(1)
return path
def get_parser(self):
"""
Parsing command-line arguments, or evaluating required Paths interactively.
:return: an instance of argparse.ArgumentParser
"""
if len(sys.argv) == 2 and sys.argv[1] in ('-v', '--version'):
print(self.get_version)
sys.exit(0)
Paths = namedtuple('Paths', 'name path')
xml = Paths('source_xml', ROOT.joinpath('rpc_spec/MOBILE_API.xml'))
required_source = not xml.path.exists()
out = Paths('output_directory', ROOT.parents[0].joinpath('base/src/main/java/'))
output_required = not out.path.exists()
parser = ArgumentParser(description='Proxy Library RPC Generator')
parser.add_argument('-v', '--version', action='store_true', help='print the version and exit')
parser.add_argument('-xml', '--source-xml', '--input-file', required=required_source,
help='should point to MOBILE_API.xml')
parser.add_argument('-xsd', '--source-xsd', required=False)
parser.add_argument('-d', '--output-directory', required=output_required,
help='define the place where the generated output should be placed')
parser.add_argument('-t', '--templates-directory', nargs='?', default=ROOT.joinpath('templates').as_posix(),
help='path to directory with templates')
parser.add_argument('-r', '--regex-pattern', required=False,
help='only elements matched with defined regex pattern will be parsed and generated')
parser.add_argument('--verbose', action='store_true', help='display additional details like logs etc')
parser.add_argument('-e', '--enums', required=False, action='store_true',
help='only specified elements will be generated, if present')
parser.add_argument('-s', '--structs', required=False, action='store_true',
help='only specified elements will be generated, if present')
parser.add_argument('-m', '-f', '--functions', required=False, action='store_true',
help='only specified elements will be generated, if present')
parser.add_argument('-y', '--overwrite', action='store_true',
help='force overwriting of existing files in output directory, ignore confirmation message')
parser.add_argument('-n', '--skip', action='store_true',
help='skip overwriting of existing files in output directory, ignore confirmation message')
args, unknown = parser.parse_known_args()
if unknown:
self.logger.critical('found unknown arguments: %s', ' '.join(unknown))
parser.print_help(sys.stderr)
sys.exit(1)
if args.skip and args.overwrite:
self.logger.critical('please select only one option skip or overwrite')
sys.exit(1)
if not args.enums and not args.structs and not args.functions:
args.enums = args.structs = args.functions = True
for intermediate in (xml, out):
if not getattr(args, intermediate.name) and intermediate.path.exists():
while True:
try:
confirm = input('Confirm default path {} for {} Y/Enter = yes, N = no'
.format(intermediate.path, intermediate.name))
if confirm.lower() == 'y' or not confirm:
self.logger.warning('%s set to %s', intermediate.name, intermediate.path)
setattr(args, intermediate.name, intermediate.path.as_posix())
sleep(0.05)
break
if confirm.lower() == 'n':
self.logger.warning('provide argument %s', intermediate.name)
sys.exit(1)
except KeyboardInterrupt:
print('\nThe user interrupted the execution of the program')
sys.exit(1)
self.config_logging(args.verbose)
args.source_xsd = self.evaluate_source_xml_xsd(args.source_xml, args.source_xsd)
args.output_directory = self.evaluate_output_directory(args.output_directory)
self.env = args.templates_directory
self.logger.info('parsed arguments:\n%s', pformat((vars(args))))
return args
def versions_compatibility_validating(self):
"""version of generator script requires the same or lesser version of parser script.
if the parser script needs to fix a bug (and becomes, e.g. 1.0.1) and the generator script stays at 1.0.0.
As long as the generator script is the same or greater major version, it should be parsable.
This requires some level of backward compatibility. E.g. they have to be the same major version.
"""
regex = r'(\d+\.\d+).(\d)'
parser_origin = Parser().get_version
parser_split = re.findall(regex, parser_origin).pop()
generator_split = re.findall(regex, self.get_version).pop()
parser_major = float(parser_split[0])
generator_major = float(generator_split[0])
if parser_major > generator_major:
self.logger.critical('Generator (%s) requires the same or lesser version of Parser (%s)',
self.get_version, parser_origin)
sys.exit(1)
self.logger.info('Parser type: %s, version %s,\tGenerator version %s',
basename(getfile(Parser().__class__)), parser_origin, self.get_version)
def get_file_content(self, file_name: Path) -> list:
"""
:param file_name:
:return:
"""
try:
with file_name.open('r') as file:
content = file.readlines()
return content
except FileNotFoundError as message1:
self.logger.error(message1)
return []
def get_key_words(self, file_name=ROOT.joinpath('rpc_spec/RpcParser/RESERVED_KEYWORDS')):
"""
:param file_name:
:return:
"""
content = self.get_file_content(file_name)
content = tuple(map(lambda e: re.sub(r'\n', r'', e).strip().casefold(), content))
try:
content = tuple(filter(lambda e: not re.search(r'^#+\s+.+|^$', e), content))
self.logger.debug('key_words: %s', ', '.join(content))
return content
except (IndexError, ValueError, StopIteration) as error1:
self.logger.error('Error while getting key_words, %s %s', type(error1).__name__, error1)
return []
def get_paths(self, file_name=ROOT.joinpath('paths.ini')):
"""
:param file_name: path to file with Paths
:return: namedtuple with Paths to key elements
"""
fields = ('struct_class', 'request_class', 'response_class',
'notification_class', 'enums_package', 'structs_package', 'functions_package')
data = OrderedDict()
content = self.get_file_content(file_name)
for line in content:
if line.startswith('#'):
self.logger.warning('commented property %s, which will be skipped', line.strip())
continue
if re.match(r'^(\w+)\s?=\s?(.+)', line):
if len(line.split('=')) > 2:
self.logger.critical('can not evaluate value, too many separators %s', str(line))
sys.exit(1)
name, var = line.partition('=')[::2]
if name.strip() in data:
self.logger.critical('duplicate key %s', name)
sys.exit(1)
data[name.strip().lower()] = var.strip()
for line in fields:
if line not in data:
self.logger.critical('in %s missed fields: %s ', content, str(line))
sys.exit(1)
Paths = namedtuple('Paths', ' '.join(fields))
return Paths(**data)
def write_file(self, file_name, template, data):
"""
Calling producer/transformer instance to transform initial Model to dict used in jinja2 templates.
Applying transformed dict to jinja2 templates and writing to appropriate file
:param file_name: output java file
:param template: name of template
:param data: transformed model ready for apply to Jinja2 template
"""
file_name.parents[0].mkdir(parents=True, exist_ok=True)
try:
render = self.env.get_template(template).render(data)
with file_name.open('w', encoding='utf-8') as file:
file.write(render)
except (TemplateNotFound, UndefinedError) as message1:
self.logger.error('skipping %s, template not found %s', file_name.as_posix(), message1)
def process(self, directory, skip, overwrite, items, transformer):
"""
Process each item from initial Model. According to provided arguments skipping, overriding or asking what to to.
:param directory: output directory for writing output files
:param skip: if file exist skip it
:param overwrite: if file exist overwrite it
:param items: elements initial Model
:param transformer: producer/transformer instance
"""
directory.mkdir(parents=True, exist_ok=True)
template = type(items[0]).__name__.lower() + '_template.java'
year = datetime.datetime.utcnow().year
for item in items:
if item.name == 'FunctionID':
self.logger.warning('%s will be skipped', item.name)
continue # Skip FunctionID generation
data = transformer.transform(item)
data['year'] = year
file = data['class_name'] + '.java'
file = directory.joinpath(data['package_name'].replace('.', '/')).joinpath(file)
if file.is_file():
if skip:
self.logger.info('Skipping %s', file)
continue
if overwrite:
self.logger.info('Overriding %s', file)
file.unlink()
self.write_file(file, template, data)
else:
while True:
try:
confirm = input('File already exists {}. Overwrite? Y/Enter = yes, N = no\n'.format(file))
if confirm.lower() == 'y' or not confirm:
self.logger.info('Overriding %s', file)
file.unlink()
self.write_file(file, template, data)
break
if confirm.lower() == 'n':
self.logger.info('Skipping %s', file)
break
except KeyboardInterrupt:
print('\nThe user interrupted the execution of the program')
sys.exit(1)
else:
self.logger.info('Writing new %s', file)
self.write_file(file, template, data)
def parser(self, xml, xsd, pattern=None):
"""
Validate xml to match with xsd. Calling parsers to get Model from xml. If provided pattern, filtering Model.
:param xml: path to MOBILE_API.xml
:param xsd: path to MOBILE_API.xsd
:param pattern: regex-pattern from command-line arguments to filter element from initial Model
:return: initial Model
"""
self.logger.info('''Validating XML and generating model with following parameters:
Source xml : %s
Source xsd : %s''', xml, xsd)
try:
schema = XMLSchema(xsd)
if not schema.is_valid(xml):
raise GenerateError(schema.validate(xml))
interface = Parser().parse(xml)
except (InterfaceError, XMLSchemaError, GenerateError) as message1:
self.logger.critical('Invalid XML file content: %s, %s', xml, message1)
sys.exit(1)
enum_names = tuple(interface.enums.keys())
struct_names = tuple(interface.structs.keys())
if pattern:
intermediate = OrderedDict()
intermediate.update({'params': interface.params})
for kind, content in vars(interface).items():
if kind == 'params':
continue
for name, item in content.items():
if re.match(pattern, item.name):
self.logger.info('%s/%s match with %s', kind, item.name, pattern)
if kind in intermediate:
intermediate[kind].update({name: item})
else:
intermediate.update({kind: {name: item}})
interface = Interface(**intermediate)
self.logger.debug({'enums': tuple(interface.enums.keys()),
'structs': tuple(interface.structs.keys()),
'functions': tuple(map(lambda i: i.function_id.name, interface.functions.values())),
'params': interface.params})
return enum_names, struct_names, interface
def main(self):
"""
Entry point for parser and generator
:return: None
"""
args = self.get_parser()
self.versions_compatibility_validating()
enum_names, struct_names, interface = self.parser(xml=args.source_xml, xsd=args.source_xsd,
pattern=args.regex_pattern)
paths = self.get_paths()
key_words = self.get_key_words()
if args.enums and interface.enums:
self.process(args.output_directory, args.skip, args.overwrite, tuple(interface.enums.values()),
EnumsProducer(paths, key_words))
if args.structs and interface.structs:
self.process(args.output_directory, args.skip, args.overwrite, tuple(interface.structs.values()),
StructsProducer(paths, enum_names, struct_names, key_words))
if args.functions and interface.functions:
self.process(args.output_directory, args.skip, args.overwrite, tuple(interface.functions.values()),
FunctionsProducer(paths, enum_names, struct_names, key_words))
if __name__ == '__main__':
Generator().main()
|
|
# Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CommandCursor class to iterate over command results."""
import datetime
from collections import deque
from bson.py3compat import integer_types
from pymongo import helpers, monitoring
from pymongo.errors import AutoReconnect, NotMasterError, OperationFailure
from pymongo.message import _GetMore
class CommandCursor(object):
"""A cursor / iterator over command cursors.
"""
def __init__(self, collection, cursor_info, address, retrieved=0):
"""Create a new command cursor.
"""
self.__collection = collection
self.__id = cursor_info['id']
self.__address = address
self.__data = deque(cursor_info['firstBatch'])
self.__retrieved = retrieved
self.__batch_size = 0
self.__killed = (self.__id == 0)
if "ns" in cursor_info:
self.__ns = cursor_info["ns"]
else:
self.__ns = collection.full_name
def __del__(self):
if self.__id and not self.__killed:
self.__die()
def __die(self):
"""Closes this cursor.
"""
if self.__id and not self.__killed:
self.__collection.database.client.close_cursor(self.__id,
self.__address)
self.__killed = True
def close(self):
"""Explicitly close / kill this cursor. Required for PyPy, Jython and
other Python implementations that don't use reference counting
garbage collection.
"""
self.__die()
def batch_size(self, batch_size):
"""Limits the number of documents returned in one batch. Each batch
requires a round trip to the server. It can be adjusted to optimize
performance and limit data transfer.
.. note:: batch_size can not override MongoDB's internal limits on the
amount of data it will return to the client in a single batch (i.e
if you set batch size to 1,000,000,000, MongoDB will currently only
return 4-16MB of results per batch).
Raises :exc:`TypeError` if `batch_size` is not an integer.
Raises :exc:`ValueError` if `batch_size` is less than ``0``.
:Parameters:
- `batch_size`: The size of each batch of results requested.
"""
if not isinstance(batch_size, integer_types):
raise TypeError("batch_size must be an integer")
if batch_size < 0:
raise ValueError("batch_size must be >= 0")
self.__batch_size = batch_size == 1 and 2 or batch_size
return self
def __send_message(self, operation):
"""Send a getmore message and handle the response.
"""
client = self.__collection.database.client
try:
response = client._send_message_with_response(
operation, address=self.__address)
except AutoReconnect:
# Don't try to send kill cursors on another socket
# or to another server. It can cause a _pinValue
# assertion on some server releases if we get here
# due to a socket timeout.
self.__killed = True
raise
publish = monitoring.enabled()
cmd_duration = response.duration
rqst_id = response.request_id
if publish:
start = datetime.datetime.now()
try:
doc = helpers._unpack_response(response.data,
self.__id,
self.__collection.codec_options)
except OperationFailure as exc:
self.__killed = True
if publish:
duration = (datetime.datetime.now() - start) + cmd_duration
monitoring.publish_command_failure(
duration, exc.details, "getMore", rqst_id, self.__address)
raise
except NotMasterError as exc:
# Don't send kill cursors to another server after a "not master"
# error. It's completely pointless.
self.__killed = True
if publish:
duration = (datetime.datetime.now() - start) + cmd_duration
monitoring.publish_command_failure(
duration, exc.details, "getMore", rqst_id, self.__address)
client._reset_server_and_request_check(self.address)
raise
if publish:
duration = (datetime.datetime.now() - start) + cmd_duration
# Must publish in getMore command response format.
res = {"cursor": {"id": doc["cursor_id"],
"ns": self.__collection.full_name,
"nextBatch": doc["data"]},
"ok": 1}
monitoring.publish_command_success(
duration, res, "getMore", rqst_id, self.__address)
self.__id = doc["cursor_id"]
if self.__id == 0:
self.__killed = True
self.__retrieved += doc["number_returned"]
self.__data = deque(doc["data"])
def _refresh(self):
"""Refreshes the cursor with more data from the server.
Returns the length of self.__data after refresh. Will exit early if
self.__data is already non-empty. Raises OperationFailure when the
cursor cannot be refreshed due to an error on the query.
"""
if len(self.__data) or self.__killed:
return len(self.__data)
if self.__id: # Get More
self.__send_message(
_GetMore(self.__ns, self.__batch_size, self.__id))
else: # Cursor id is zero nothing else to return
self.__killed = True
return len(self.__data)
@property
def alive(self):
"""Does this cursor have the potential to return more data?
Even if :attr:`alive` is ``True``, :meth:`next` can raise
:exc:`StopIteration`. Best to use a for loop::
for doc in collection.aggregate(pipeline):
print(doc)
.. note:: :attr:`alive` can be True while iterating a cursor from
a failed server. In this case :attr:`alive` will return False after
:meth:`next` fails to retrieve the next batch of results from the
server.
"""
return bool(len(self.__data) or (not self.__killed))
@property
def cursor_id(self):
"""Returns the id of the cursor."""
return self.__id
@property
def address(self):
"""The (host, port) of the server used, or None.
.. versionadded:: 3.0
"""
return self.__address
def __iter__(self):
return self
def next(self):
"""Advance the cursor."""
if len(self.__data) or self._refresh():
coll = self.__collection
return coll.database._fix_outgoing(self.__data.popleft(), coll)
else:
raise StopIteration
__next__ = next
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.__die()
|
|
import asyncio
import sys
import time
import unittest
import six
if six.PY3:
from unittest import mock
else:
import mock
from engineio import asyncio_socket
from engineio import exceptions
from engineio import packet
from engineio import payload
def AsyncMock(*args, **kwargs):
"""Return a mock asynchronous function."""
m = mock.MagicMock(*args, **kwargs)
async def mock_coro(*args, **kwargs):
return m(*args, **kwargs)
mock_coro.mock = m
return mock_coro
def _run(coro):
"""Run the given coroutine."""
return asyncio.get_event_loop().run_until_complete(coro)
@unittest.skipIf(sys.version_info < (3, 5), 'only for Python 3.5+')
class TestSocket(unittest.TestCase):
def _get_read_mock_coro(self, payload):
mock_input = mock.MagicMock()
mock_input.read = AsyncMock()
mock_input.read.mock.return_value = payload
return mock_input
def _get_mock_server(self):
mock_server = mock.Mock()
mock_server.ping_timeout = 0.2
mock_server.ping_interval = 0.2
mock_server.async_handlers = False
mock_server._async = {'asyncio': True,
'create_route': mock.MagicMock(),
'translate_request': mock.MagicMock(),
'make_response': mock.MagicMock(),
'websocket': 'w'}
mock_server._async['translate_request'].return_value = 'request'
mock_server._async['make_response'].return_value = 'response'
mock_server._trigger_event = AsyncMock()
def create_queue(*args, **kwargs):
queue = asyncio.Queue(*args, **kwargs)
queue.Empty = asyncio.QueueEmpty
return queue
mock_server.create_queue = create_queue
return mock_server
def test_create(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
self.assertEqual(s.server, mock_server)
self.assertEqual(s.sid, 'sid')
self.assertFalse(s.upgraded)
self.assertFalse(s.closed)
self.assertTrue(hasattr(s.queue, 'get'))
self.assertTrue(hasattr(s.queue, 'put'))
self.assertTrue(hasattr(s.queue, 'task_done'))
self.assertTrue(hasattr(s.queue, 'join'))
def test_empty_poll(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
self.assertRaises(exceptions.QueueEmpty, _run, s.poll())
def test_poll(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
_run(s.send(pkt1))
_run(s.send(pkt2))
self.assertEqual(_run(s.poll()), [pkt1, pkt2])
def test_poll_none(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
_run(s.queue.put(None))
self.assertEqual(_run(s.poll()), [])
def test_ping_pong(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
_run(s.receive(packet.Packet(packet.PING, data='abc')))
r = _run(s.poll())
self.assertEqual(len(r), 1)
self.assertTrue(r[0].encode(), b'3abc')
def test_message_sync_handler(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
_run(s.receive(packet.Packet(packet.MESSAGE, data='foo')))
mock_server._trigger_event.mock.assert_called_once_with(
'message', 'sid', 'foo', run_async=False)
def test_message_async_handler(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
mock_server.async_handlers = True
_run(s.receive(packet.Packet(packet.MESSAGE, data='foo')))
mock_server._trigger_event.mock.assert_called_once_with(
'message', 'sid', 'foo', run_async=True)
def test_invalid_packet(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
self.assertRaises(exceptions.UnknownPacketError, _run,
s.receive(packet.Packet(packet.OPEN)))
def test_timeout(self):
mock_server = self._get_mock_server()
mock_server.ping_interval = -6
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.last_ping = time.time() - 1
s.close = AsyncMock()
_run(s.send('packet'))
s.close.mock.assert_called_once_with(wait=False, abort=False)
def test_polling_read(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'foo')
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
_run(s.send(pkt1))
_run(s.send(pkt2))
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
packets = _run(s.handle_get_request(environ))
self.assertEqual(packets, [pkt1, pkt2])
def test_polling_read_error(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'foo')
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
self.assertRaises(exceptions.QueueEmpty, _run,
s.handle_get_request(environ))
def test_polling_write(self):
mock_server = self._get_mock_server()
mock_server.max_http_buffer_size = 1000
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
p = payload.Payload(packets=[pkt1, pkt2]).encode()
s = asyncio_socket.AsyncSocket(mock_server, 'foo')
s.receive = AsyncMock()
environ = {'REQUEST_METHOD': 'POST', 'QUERY_STRING': 'sid=foo',
'CONTENT_LENGTH': len(p),
'wsgi.input': self._get_read_mock_coro(p)}
_run(s.handle_post_request(environ))
self.assertEqual(s.receive.mock.call_count, 2)
def test_polling_write_too_large(self):
mock_server = self._get_mock_server()
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
p = payload.Payload(packets=[pkt1, pkt2]).encode()
mock_server.max_http_buffer_size = len(p) - 1
s = asyncio_socket.AsyncSocket(mock_server, 'foo')
s.receive = AsyncMock()
environ = {'REQUEST_METHOD': 'POST', 'QUERY_STRING': 'sid=foo',
'CONTENT_LENGTH': len(p),
'wsgi.input': self._get_read_mock_coro(p)}
self.assertRaises(exceptions.ContentTooLongError, _run,
s.handle_post_request(environ))
def test_upgrade_handshake(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'foo')
s._upgrade_websocket = AsyncMock()
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo',
'HTTP_CONNECTION': 'Foo,Upgrade,Bar',
'HTTP_UPGRADE': 'websocket'}
_run(s.handle_get_request(environ))
s._upgrade_websocket.mock.assert_called_once_with(environ)
def test_upgrade(self):
mock_server = self._get_mock_server()
mock_server._async['websocket'] = mock.MagicMock()
mock_ws = AsyncMock()
mock_server._async['websocket'].return_value = mock_ws
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = True
environ = "foo"
_run(s._upgrade_websocket(environ))
mock_server._async['websocket'].assert_called_once_with(
s._websocket_handler)
mock_ws.mock.assert_called_once_with(environ)
def test_upgrade_twice(self):
mock_server = self._get_mock_server()
mock_server._async['websocket'] = mock.MagicMock()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = True
s.upgraded = True
environ = "foo"
self.assertRaises(IOError, _run, s._upgrade_websocket(environ))
def test_upgrade_packet(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = True
_run(s.receive(packet.Packet(packet.UPGRADE)))
r = _run(s.poll())
self.assertEqual(len(r), 1)
self.assertEqual(r[0].encode(), packet.Packet(packet.NOOP).encode())
def test_upgrade_no_probe(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = True
ws = mock.MagicMock()
ws.wait = AsyncMock()
ws.wait.mock.return_value = packet.Packet(packet.NOOP).encode(
always_bytes=False)
_run(s._websocket_handler(ws))
self.assertFalse(s.upgraded)
def test_upgrade_no_upgrade_packet(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = True
s.queue.join = AsyncMock(return_value=None)
ws = mock.MagicMock()
ws.send = AsyncMock()
ws.wait = AsyncMock()
probe = six.text_type('probe')
ws.wait.mock.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.NOOP).encode(always_bytes=False)]
_run(s._websocket_handler(ws))
ws.send.mock.assert_called_once_with(packet.Packet(
packet.PONG, data=probe).encode(always_bytes=False))
self.assertEqual(_run(s.queue.get()).packet_type, packet.NOOP)
self.assertFalse(s.upgraded)
def test_upgrade_not_supported(self):
mock_server = self._get_mock_server()
mock_server._async['websocket'] = None
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = True
environ = "foo"
_run(s._upgrade_websocket(environ))
mock_server._bad_request.assert_called_once_with()
def test_close_packet(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = True
s.close = AsyncMock()
_run(s.receive(packet.Packet(packet.CLOSE)))
s.close.mock.assert_called_once_with(wait=False, abort=True)
def test_websocket_read_write(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = False
s.queue.join = AsyncMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
s.poll = AsyncMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)], None])
ws = mock.MagicMock()
ws.send = AsyncMock()
ws.wait = AsyncMock()
ws.wait.mock.side_effect = [
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
None]
_run(s._websocket_handler(ws))
self.assertTrue(s.connected)
self.assertTrue(s.upgraded)
self.assertEqual(mock_server._trigger_event.mock.call_count, 2)
mock_server._trigger_event.mock.assert_has_calls([
mock.call('message', 'sid', 'foo', run_async=False),
mock.call('disconnect', 'sid')])
ws.send.mock.assert_called_with('4bar')
def test_websocket_upgrade_read_write(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = True
s.queue.join = AsyncMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
probe = six.text_type('probe')
s.poll = AsyncMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)], exceptions.QueueEmpty])
ws = mock.MagicMock()
ws.send = AsyncMock()
ws.wait = AsyncMock()
ws.wait.mock.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.UPGRADE).encode(always_bytes=False),
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
None]
_run(s._websocket_handler(ws))
self.assertTrue(s.upgraded)
self.assertEqual(mock_server._trigger_event.mock.call_count, 2)
mock_server._trigger_event.mock.assert_has_calls([
mock.call('message', 'sid', 'foo', run_async=False),
mock.call('disconnect', 'sid')])
ws.send.mock.assert_called_with('4bar')
def test_websocket_upgrade_with_payload(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = True
s.queue.join = AsyncMock(return_value=None)
probe = six.text_type('probe')
ws = mock.MagicMock()
ws.send = AsyncMock()
ws.wait = AsyncMock()
ws.wait.mock.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.UPGRADE, data=b'2').encode(
always_bytes=False)]
_run(s._websocket_handler(ws))
self.assertTrue(s.upgraded)
def test_websocket_upgrade_with_backlog(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = True
s.queue.join = AsyncMock(return_value=None)
probe = six.text_type('probe')
foo = six.text_type('foo')
ws = mock.MagicMock()
ws.send = AsyncMock()
ws.wait = AsyncMock()
ws.wait.mock.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.UPGRADE, data=b'2').encode(
always_bytes=False)]
s.upgrading = True
_run(s.send(packet.Packet(packet.MESSAGE, data=foo)))
_run(s._websocket_handler(ws))
self.assertTrue(s.upgraded)
self.assertFalse(s.upgrading)
self.assertEqual(s.packet_backlog, [])
ws.send.mock.assert_called_with('4foo')
def test_websocket_read_write_wait_fail(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = False
s.queue.join = AsyncMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
s.poll = AsyncMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)],
[packet.Packet(packet.MESSAGE, data=bar)], exceptions.QueueEmpty])
ws = mock.MagicMock()
ws.send = AsyncMock()
ws.wait = AsyncMock()
ws.wait.mock.side_effect = [
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
RuntimeError]
ws.send.mock.side_effect = [None, RuntimeError]
_run(s._websocket_handler(ws))
self.assertEqual(s.closed, True)
def test_websocket_ignore_invalid_packet(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = False
s.queue.join = AsyncMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
s.poll = AsyncMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)], exceptions.QueueEmpty])
ws = mock.MagicMock()
ws.send = AsyncMock()
ws.wait = AsyncMock()
ws.wait.mock.side_effect = [
packet.Packet(packet.OPEN).encode(always_bytes=False),
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
None]
_run(s._websocket_handler(ws))
self.assertTrue(s.connected)
self.assertEqual(mock_server._trigger_event.mock.call_count, 2)
mock_server._trigger_event.mock.assert_has_calls([
mock.call('message', 'sid', foo, run_async=False),
mock.call('disconnect', 'sid')])
ws.send.mock.assert_called_with('4bar')
def test_send_after_close(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
_run(s.close(wait=False))
self.assertRaises(exceptions.SocketIsClosedError, _run,
s.send(packet.Packet(packet.NOOP)))
def test_close_after_close(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
_run(s.close(wait=False))
self.assertTrue(s.closed)
self.assertEqual(mock_server._trigger_event.mock.call_count, 1)
mock_server._trigger_event.mock.assert_called_once_with('disconnect',
'sid')
_run(s.close())
self.assertEqual(mock_server._trigger_event.mock.call_count, 1)
def test_close_and_wait(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.queue = mock.MagicMock()
s.queue.put = AsyncMock()
s.queue.join = AsyncMock()
_run(s.close(wait=True))
s.queue.join.mock.assert_called_once_with()
def test_close_without_wait(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.queue = mock.MagicMock()
s.queue.put = AsyncMock()
s.queue.join = AsyncMock()
_run(s.close(wait=False))
self.assertEqual(s.queue.join.mock.call_count, 0)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_gtm_monitor_bigip
short_description: Manages F5 BIG-IP GTM BIG-IP monitors
description:
- Manages F5 BIG-IP GTM (now BIG-IP DNS) BIG-IP monitors. This monitor is used by GTM to monitor
BIG-IPs themselves.
version_added: "1.0.0"
options:
name:
description:
- Name of the monitor.
type: str
required: True
parent:
description:
- The parent template of this monitor template. Once this value has
been set, it cannot be changed. By default, this value is the C(bigip)
parent on the C(Common) partition.
type: str
default: "/Common/bigip"
ip:
description:
- IP address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, the default value will be
'*'.
type: str
port:
description:
- Port address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, the default value will be
'*'. Note that if specifying an IP address, you must use a value between 1 and 65535.
type: str
interval:
description:
- Specifies, in seconds, the frequency at which the system issues the monitor
check when either the resource is down or the status of the resource is unknown.
- When creating a new monitor, if this parameter is not provided, the
default value will be C(30). This value B(must) be less than the C(timeout) value.
type: int
timeout:
description:
- Specifies the number of seconds the target has in which to respond to the
monitor request.
- If the target responds within the set time period, it is considered up.
- If the target does not respond within the set time period, it is considered down.
- When this value is set to 0 (zero), the system uses the interval from the parent monitor.
- When creating a new monitor, if this parameter is not provided,
the default value will be C(90).
type: int
ignore_down_response:
description:
- Specifies the monitor allows more than one probe attempt per interval.
- When C(yes), specifies the monitor ignores down responses for the duration of
the monitor timeout. Once the monitor timeout is reached without the system receiving
an up response, the system marks the object down.
- When C(no), specifies the monitor immediately marks an object down when it
receives a down response.
- When creating a new monitor, if this parameter is not provided, the default
value will be C(no).
type: bool
aggregate_dynamic_ratios:
description:
- Specifies how the system combines the module values to create the proportion
(score) for the load balancing operation.
- The score represents the module's estimated capacity for handing traffic.
- Averaged values are appropriate for downstream Web Accelerator or Application
Security Manager (ASM) virtual servers.
- When creating a new monitor, if this parameter is not specified, the default
of C(none) is used, meaning the system does not use the scores in the load
balancing operation.
- When C(none), specifies the monitor ignores the nodes and pool member scores.
- When C(average-nodes), specifies the system averages the dynamic ratios
on the nodes associated with the monitor's target virtual servers and returns
that average as the virtual servers' score.
- When C(sum-nodes), specifies the system adds together the scores of the
nodes associated with the monitor's target virtual servers and uses that value
in the load balancing operation.
- When C(average-members), specifies the system averages the dynamic ratios
on the pool members associated with the monitor's target virtual servers and
returns that average as the virtual servers' score.
- When C(sum-members), specifies the system adds together the scores of the
pool members associated with the monitor's target virtual servers and uses
that value in the load balancing operation.
type: str
choices:
- none
- average-nodes
- sum-nodes
- average-members
- sum-members
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(present), ensures the monitor exists.
- When C(absent), ensures the monitor is removed.
type: str
choices:
- present
- absent
default: present
notes:
- Requires BIG-IP software version >= 12
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create BIG-IP Monitor
bigip_gtm_monitor_bigip:
state: present
ip: 10.10.10.10
name: my_monitor
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Remove BIG-IP Monitor
bigip_gtm_monitor_bigip:
state: absent
name: my_monitor
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Add BIG-IP monitor for all addresses, port 514
bigip_gtm_monitor_bigip:
port: 514
name: my_monitor
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
parent:
description: New parent template of the monitor.
returned: changed
type: str
sample: bigip
ip:
description: The new IP of IP/port definition.
returned: changed
type: str
sample: 10.12.13.14
interval:
description: The new interval at which to run the monitor check.
returned: changed
type: int
sample: 2
timeout:
description: The new timeout in which the remote system must respond to the monitor.
returned: changed
type: int
sample: 10
aggregate_dynamic_ratios:
description: The new aggregate of to the monitor.
returned: changed
type: str
sample: sum-members
ignore_down_response:
description: Whether to ignore the down response or not.
returned: changed
type: bool
sample: True
'''
import os
from datetime import datetime
from ansible.module_utils.basic import (
AnsibleModule, env_fallback
)
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, transform_name, f5_argument_spec
)
from ..module_utils.icontrol import (
module_provisioned, tmos_version
)
from ..module_utils.ipaddress import is_valid_ip
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {
'defaultsFrom': 'parent',
'ignoreDownResponse': 'ignore_down_response',
'aggregateDynamicRatios': 'aggregate_dynamic_ratios',
}
api_attributes = [
'defaultsFrom',
'interval',
'timeout',
'destination',
'ignoreDownResponse',
'aggregateDynamicRatios',
]
returnables = [
'parent',
'ip',
'port',
'interval',
'timeout',
'ignore_down_response',
'aggregate_dynamic_ratios',
]
updatables = [
'destination',
'interval',
'timeout',
'ignore_down_response',
'aggregate_dynamic_ratios',
]
@property
def interval(self):
if self._values['interval'] is None:
return None
if 1 > int(self._values['interval']) > 86400:
raise F5ModuleError(
"Interval value must be between 1 and 86400"
)
return int(self._values['interval'])
@property
def timeout(self):
if self._values['timeout'] is None:
return None
return int(self._values['timeout'])
@property
def type(self):
return 'bigip'
class ApiParameters(Parameters):
@property
def ip(self):
ip, port = self._values['destination'].split(':')
return ip
@property
def port(self):
ip, port = self._values['destination'].split(':')
return int(port)
@property
def ignore_down_response(self):
if self._values['ignore_down_response'] is None:
return None
if self._values['ignore_down_response'] == 'disabled':
return False
return True
class ModuleParameters(Parameters):
@property
def destination(self):
if self.ip is None and self.port is None:
return None
destination = '{0}:{1}'.format(self.ip, self.port)
return destination
@property
def parent(self):
if self._values['parent'] is None:
return None
if self._values['parent'].startswith('/'):
parent = os.path.basename(self._values['parent'])
result = '/{0}/{1}'.format(self.partition, parent)
else:
result = '/{0}/{1}'.format(self.partition, self._values['parent'])
return result
@property
def ip(self):
if self._values['ip'] is None:
return None
if self._values['ip'] in ['*', '0.0.0.0']:
return '*'
elif is_valid_ip(self._values['ip']):
return self._values['ip']
else:
raise F5ModuleError(
"The provided 'ip' parameter is not an IP address."
)
@property
def port(self):
if self._values['port'] is None:
return None
elif self._values['port'] == '*':
return '*'
return int(self._values['port'])
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
raise
return result
class UsableChanges(Changes):
@property
def ignore_down_response(self):
if self._values['ignore_down_response']:
return 'enabled'
return 'disabled'
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
result = self.__default(param)
return result
@property
def parent(self):
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent monitor cannot be changed"
)
@property
def destination(self):
if self.want.ip is None and self.want.port is None:
return None
if self.want.port is None:
self.want.update({'port': self.have.port})
if self.want.ip is None:
self.want.update({'ip': self.have.ip})
if self.want.port in [None, '*'] and self.want.ip != '*':
raise F5ModuleError(
"Specifying an IP address requires that a port number be specified"
)
if self.want.destination != self.have.destination:
return self.want.destination
@property
def interval(self):
if self.want.timeout is not None and self.want.interval is not None:
if self.want.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.timeout is not None:
if self.have.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.interval is not None:
if self.want.interval >= self.have.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
if self.want.interval != self.have.interval:
return self.want.interval
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.have = None
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _set_default_creation_values(self):
if self.want.timeout is None:
self.want.update({'timeout': 120})
if self.want.interval is None:
self.want.update({'interval': 30})
if self.want.ip is None:
self.want.update({'ip': '*'})
if self.want.port is None:
self.want.update({'port': '*'})
if self.want.ignore_down_response is None:
self.want.update({'ignore_down_response': False})
if self.want.aggregate_dynamic_ratios is None:
self.want.update({'aggregate_dynamic_ratios': 'none'})
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
if not module_provisioned(self.client, 'gtm'):
raise F5ModuleError(
"GTM must be provisioned to use this module."
)
changed = False
result = dict()
state = self.want.state
if state in ["present", "disabled"]:
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.module, version)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def create(self):
self._set_changed_options()
self._set_default_creation_values()
if self.module.check_mode:
return True
self.create_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the monitor.")
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/bigip/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/bigip/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/bigip/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/bigip/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/bigip/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return ApiParameters(params=response)
raise F5ModuleError(resp.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(default='/Common/bigip'),
ip=dict(),
port=dict(),
interval=dict(type='int'),
timeout=dict(type='int'),
ignore_down_response=dict(type='bool'),
aggregate_dynamic_ratios=dict(
choices=[
'none', 'average-nodes', 'sum-nodes', 'average-members', 'sum-members'
]
),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""Apply forces to particles."""
import hoomd
from hoomd import _hoomd
from hoomd.md import _md
from hoomd.operation import _HOOMDBaseObject
from hoomd.logging import log
from hoomd.data.typeparam import TypeParameter
from hoomd.data.typeconverter import OnlyTypes
from hoomd.data.parameterdicts import ParameterDict, TypeParameterDict
from hoomd.filter import ParticleFilter
from hoomd.md.manifold import Manifold
import numpy
class _force: # noqa - This will be removed eventually. Needed to build docs.
pass
class Force(_HOOMDBaseObject):
"""Defines a force in HOOMD-blue.
Pair, angle, bond, and other forces are subclasses of this class.
Note:
:py:class:`Force` is the base class for all loggable forces.
Users should not instantiate this class directly.
Initializes some loggable quantities.
"""
@log(requires_run=True)
def energy(self):
"""float: Total contribution to the potential energy of the system \
:math:`[\\mathrm{energy}]`."""
self._cpp_obj.compute(self._simulation.timestep)
return self._cpp_obj.calcEnergySum()
@log(category="particle", requires_run=True)
def energies(self):
"""(*N_particles*, ) `numpy.ndarray` of ``float``: Energy \
contribution from each particle :math:`[\\mathrm{energy}]`.
Attention:
In MPI parallel execution, the array is available on rank 0 only.
`energies` is `None` on ranks >= 1.
"""
self._cpp_obj.compute(self._simulation.timestep)
return self._cpp_obj.getEnergies()
@log(requires_run=True)
def additional_energy(self):
"""float: Additional energy term not included in `energies` \
:math:`[\\mathrm{energy}]`."""
self._cpp_obj.compute(self._simulation.timestep)
return self._cpp_obj.getExternalEnergy()
@log(category="particle", requires_run=True)
def forces(self):
"""(*N_particles*, 3) `numpy.ndarray` of ``float``: The \
force applied to each particle :math:`[\\mathrm{force}]`.
Attention:
In MPI parallel execution, the array is available on rank 0 only.
`forces` is `None` on ranks >= 1.
"""
self._cpp_obj.compute(self._simulation.timestep)
return self._cpp_obj.getForces()
@log(category="particle", requires_run=True)
def torques(self):
"""(*N_particles*, 3) `numpy.ndarray` of ``float``: The torque applied \
to each particle :math:`[\\mathrm{force} \\cdot \\mathrm{length}]`.
Attention:
In MPI parallel execution, the array is available on rank 0 only.
`torques` is `None` on ranks >= 1.
"""
self._cpp_obj.compute(self._simulation.timestep)
return self._cpp_obj.getTorques()
@log(category="particle", requires_run=True)
def virials(self):
"""(*N_particles*, 6) `numpy.ndarray` of ``float``: Virial tensor \
contribution from each particle :math:`[\\mathrm{energy}]`.
The 6 elements form the upper-triangular virial tensor in the order:
xx, xy, xz, yy, yz, zz.
Attention:
To improve performance `Force` objects only compute virials when
needed. When not computed, `virials` is `None`. Virials are computed
on every step when using a `md.methods.NPT` or `md.methods.NPH`
integrator, on steps where a writer is triggered (such as
`write.GSD` which may log pressure or virials), or when
`Simulation.always_compute_pressure` is `True`.
Attention:
In MPI parallel execution, the array is available on rank 0 only.
`virials` is `None` on ranks >= 1.
"""
self._cpp_obj.compute(self._simulation.timestep)
return self._cpp_obj.getVirials()
@log(category="sequence", requires_run=True)
def additional_virial(self):
"""(1, 6) `numpy.ndarray` of ``float``: Additional virial tensor \
term not included in `virials` :math:`[\\mathrm{energy}]`."""
self._cpp_obj.compute(self._simulation.timestep)
virial = []
for i in range(6):
virial.append(self._cpp_obj.getExternalVirial(i))
return numpy.array(virial, dtype=numpy.float64)
class constant(Force): # noqa - this will be renamed when it is ported to v3
R"""Constant force.
Args:
fvec (tuple): force vector :math:`[force]`
tvec (tuple): torque vector :math:`[force \cdot length]`
fx (float): x component of force, retained for backwards compatibility
:math:`[\mathrm{force}]`
fy (float): y component of force, retained for backwards compatibility
:math:`[\mathrm{force}]`
fz (float): z component of force, retained for backwards compatibility
:math:`[\mathrm{force}]`
group (``hoomd.group``): Group for which the force will be set.
callback (`callable`): A python callback invoked every time the forces
are computed
:py:class:`constant` specifies that a constant force should be added to
every particle in the simulation or optionally to all particles in a group.
Note:
Forces are kept constant during the simulation. If a callback should
re-compute particle forces every time step, it needs to overwrite the
old forces of **all** particles with new values.
Note:
Per-particle forces take precedence over a particle group, which takes
precedence over constant forces for all particles.
Examples::
force.constant(fx=1.0, fy=0.5, fz=0.25)
const = force.constant(fvec=(0.4,1.0,0.5))
const = force.constant(fvec=(0.4,1.0,0.5),group=fluid)
const = force.constant(fvec=(0.4,1.0,0.5), tvec=(0,0,1) ,group=fluid)
def updateForces(timestep):
global const
const.setForce(tag=1, fvec=(1.0*timestep,2.0*timestep,3.0*timestep))
const = force.constant(callback=updateForces)
"""
def __init__(
self,
fx=None,
fy=None,
fz=None,
fvec=None,
tvec=None,
group=None,
callback=None,
):
if (fx is not None) and (fy is not None) and (fz is not None):
self.fvec = (fx, fy, fz)
elif fvec is not None:
self.fvec = fvec
else:
self.fvec = (0, 0, 0)
if tvec is not None:
self.tvec = tvec
else:
self.tvec = (0, 0, 0)
if (self.fvec == (0, 0, 0)) and (self.tvec == (0, 0, 0)
and callback is None):
hoomd.context.current.device.cpp_msg.warning(
"The constant force specified has no non-zero components\n")
# initialize the base class
Force.__init__(self)
# create the c++ mirror class
if group is not None:
self.cppForce = _hoomd.ConstForceCompute(
hoomd.context.current.system_definition,
group.cpp_group,
self.fvec[0],
self.fvec[1],
self.fvec[2],
self.tvec[0],
self.tvec[1],
self.tvec[2],
)
else:
self.cppForce = _hoomd.ConstForceCompute(
hoomd.context.current.system_definition,
self.fvec[0],
self.fvec[1],
self.fvec[2],
self.tvec[0],
self.tvec[1],
self.tvec[2],
)
if callback is not None:
self.cppForce.setCallback(callback)
hoomd.context.current.system.addCompute(self.cppForce, self.force_name)
R""" Change the value of the constant force.
Args:
fx (float) New x-component of the force :math:`[\mathrm{force}]`
fy (float) New y-component of the force :math:`[\mathrm{force}]`
fz (float) New z-component of the force :math:`[\mathrm{force}]`
fvec (tuple) New force vector
tvec (tuple) New torque vector
group Group for which the force will be set
tag (int) Particle tag for which the force will be set
.. versionadded:: 2.3
Using setForce() requires that you saved the created constant force in a
variable. i.e.
Examples:
const = force.constant(fx=0.4, fy=1.0, fz=0.5)
const.setForce(fx=0.2, fy=0.1, fz=-0.5)
const.setForce(fx=0.2, fy=0.1, fz=-0.5, group=fluid)
const.setForce(fvec=(0.2,0.1,-0.5), tvec=(0,0,1), group=fluid)
"""
def setForce( # noqa - this will be documented when it is ported to v3
self,
fx=None,
fy=None,
fz=None,
fvec=None,
tvec=None,
group=None,
tag=None,
):
if (fx is not None) and (fy is not None) and (fx is not None):
self.fvec = (fx, fy, fz)
elif fvec is not None:
self.fvec = fvec
else:
self.fvec = (0, 0, 0)
if tvec is not None:
self.tvec = tvec
else:
self.tvec = (0, 0, 0)
if (fvec == (0, 0, 0)) and (tvec == (0, 0, 0)):
hoomd.context.current.device.cpp_msg.warning(
"You are setting the constant force to have no non-zero "
"components\n")
self.check_initialization()
if group is not None:
self.cppForce.setGroupForce(
group.cpp_group,
self.fvec[0],
self.fvec[1],
self.fvec[2],
self.tvec[0],
self.tvec[1],
self.tvec[2],
)
elif tag is not None:
self.cppForce.setParticleForce(
tag,
self.fvec[0],
self.fvec[1],
self.fvec[2],
self.tvec[0],
self.tvec[1],
self.tvec[2],
)
else:
self.cppForce.setForce(
self.fvec[0],
self.fvec[1],
self.fvec[2],
self.tvec[0],
self.tvec[1],
self.tvec[2],
)
R""" Set a python callback to be called before the force is evaluated
Args:
callback (`callable`) The callback function
Examples:
const = force.constant(fx=0.4, fy=1.0, fz=0.5)
def updateForces(timestep):
global const
const.setForce(tag=1, fvec=(1.0*timestep,2.0*timestep,3.0*timestep))
const.set_callback(updateForces)
run(100)
# Reset the callback
const.set_callback(None)
"""
def set_callback(self, callback=None): # noqa - will be ported to v3
self.cppForce.setCallback(callback)
# there are no coeffs to update in the constant force compute
def update_coeffs(self): # noqa - will be ported to v3
pass
class Active(Force):
r"""Active force.
Args:
filter (:py:mod:`hoomd.filter`): Subset of particles on which to apply
active forces.
:py:class:`Active` specifies that an active force should be added to
particles selected by the filter. particles. Obeys :math:`\delta {\bf r}_i
= \delta t v_0 \hat{p}_i`, where :math:`v_0` is the active velocity. In 2D
:math:`\hat{p}_i = (\cos \theta_i, \sin \theta_i)` is the active force
vector for particle :math:`i`. The active force and the active torque
vectors in the particle frame stay constant during the simulation. Hence,
the active forces in the system frame are composed of the forces in particle
frame and the current orientation of the particle.
Note:
To introduce rotational diffusion to the particle orientations, use
`create_diffusion_updater`.
.. seealso::
`hoomd.md.update.ActiveRotationalDiffusion`
Examples::
all = hoomd.filter.All()
active = hoomd.md.force.Active(
filter=hoomd.filter.All()
)
active.active_force['A','B'] = (1,0,0)
active.active_torque['A','B'] = (0,0,0)
rotational_diffusion_updater = active.create_diffusion_updater(
trigger=10)
sim.operations += rotational_diffusion_updater
Attributes:
filter (:py:mod:`hoomd.filter`): Subset of particles on which to apply
active forces.
.. py:attribute:: active_force
Active force vector in the local reference frame of the particle
:math:`[\mathrm{force}]`. It is defined per particle type and stays
constant during the simulation.
Type: `TypeParameter` [``particle_type``, `tuple` [`float`, `float`,
`float`]]
.. py:attribute:: active_torque
Active torque vector in the local reference frame of the particle
:math:`[\mathrm{force} \cdot \mathrm{length}]`. It is defined per
particle type and stays constant during the simulation.
Type: `TypeParameter` [``particle_type``, `tuple` [`float`, `float`,
`float`]]
"""
def __init__(self, filter):
# store metadata
param_dict = ParameterDict(filter=ParticleFilter)
param_dict["filter"] = filter
# set defaults
self._param_dict.update(param_dict)
active_force = TypeParameter(
"active_force",
type_kind="particle_types",
param_dict=TypeParameterDict((1.0, 0.0, 0.0), len_keys=1),
)
active_torque = TypeParameter(
"active_torque",
type_kind="particle_types",
param_dict=TypeParameterDict((0.0, 0.0, 0.0), len_keys=1),
)
self._extend_typeparam([active_force, active_torque])
def _add(self, simulation):
"""Add the operation to a simulation.
Active forces use RNGs. Warn the user if they did not set the seed.
"""
if isinstance(simulation, hoomd.Simulation):
simulation._warn_if_seed_unset()
super()._add(simulation)
def _attach(self):
# initialize the reflected c++ class
sim = self._simulation
if isinstance(sim.device, hoomd.device.CPU):
my_class = _md.ActiveForceCompute
else:
my_class = _md.ActiveForceComputeGPU
self._cpp_obj = my_class(sim.state._cpp_sys_def,
sim.state._get_group(self.filter))
# Attach param_dict and typeparam_dict
super()._attach()
def create_diffusion_updater(self, trigger, rotational_diffusion):
"""Create a rotational diffusion updater for this active force.
Args:
trigger (hoomd.trigger.Trigger): Select the timesteps to update
rotational diffusion.
rotational_diffusion (hoomd.variant.Variant or float): The
rotational diffusion as a function of time or a constant.
Returns:
hoomd.md.update.ActiveRotationalDiffusion:
The rotational diffusion updater.
"""
return hoomd.md.update.ActiveRotationalDiffusion(
trigger, self, rotational_diffusion)
class ActiveOnManifold(Active):
r"""Active force on a manifold.
Args:
filter (`hoomd.filter.ParticleFilter`): Subset of particles on which to
apply active forces.
manifold_constraint (`hoomd.md.manifold.Manifold`): Manifold constraint.
:py:class:`ActiveOnManifold` specifies that a constrained active force
should be added to particles selected by the filter similar to
:py:class:`Active`. The active force vector :math:`\hat{p}_i` is restricted
to the local tangent plane of the manifold constraint at point :math:`{\bf
r}_i`. For more information see :py:class:`Active`.
Hint:
Use `ActiveOnManifold` with a `md.methods.rattle` integration method
with the same manifold constraint.
Examples::
all = filter.All()
sphere = hoomd.md.manifold.Sphere(r=10)
active = hoomd.md.force.ActiveOnManifold(
filter=hoomd.filter.All(), rotation_diff=0.01,
manifold_constraint = sphere
)
active.active_force['A','B'] = (1,0,0)
active.active_torque['A','B'] = (0,0,0)
Attributes:
filter (`hoomd.filter.ParticleFilter`): Subset of particles on which to
apply active forces.
manifold_constraint (`hoomd.md.manifold.Manifold`): Manifold constraint.
.. py:attribute:: active_force
Active force vector in the local reference frame of the particle
:math:`[\mathrm{force}]`. It is defined per particle type and stays
constant during the simulation.
Type: `TypeParameter` [``particle_type``, `tuple` [`float`, `float`,
`float`]]
.. py:attribute:: active_torque
Active torque vector in local reference frame of the particle
:math:`[\mathrm{force} \cdot \mathrm{length}]`. It is defined per
particle type and stays constant during the simulation.
Type: `TypeParameter` [``particle_type``, `tuple` [`float`, `float`,
`float`]]
"""
def __init__(self, filter, manifold_constraint):
# store metadata
super().__init__(filter)
param_dict = ParameterDict(
manifold_constraint=OnlyTypes(Manifold, allow_none=False))
param_dict["manifold_constraint"] = manifold_constraint
self._param_dict.update(param_dict)
def _getattr_param(self, attr):
if self._attached:
if attr == "manifold_constraint":
return self._param_dict["manifold_constraint"]
parameter = getattr(self._cpp_obj, attr)
return parameter
else:
return self._param_dict[attr]
def _setattr_param(self, attr, value):
if attr == "manifold_constraint":
raise AttributeError(
"Cannot set manifold_constraint after construction.")
super()._setattr_param(attr, value)
def _attach(self):
# initialize the reflected c++ class
sim = self._simulation
if not self.manifold_constraint._attached:
self.manifold_constraint._attach()
base_class_str = 'ActiveForceConstraintCompute'
base_class_str += self.manifold_constraint.__class__.__name__
if isinstance(sim.device, hoomd.device.GPU):
base_class_str += "GPU"
self._cpp_obj = getattr(
_md, base_class_str)(sim.state._cpp_sys_def,
sim.state._get_group(self.filter),
self.manifold_constraint._cpp_obj)
# Attach param_dict and typeparam_dict
super()._attach()
|
|
"""
Asynchronous Advantage Actor Critic (A3C) with Continuous Action Space.
Actor Critic History
----------------------
A3C > DDPG (for continuous action space) > AC
Advantage
----------
Train faster and more stable than AC.
Disadvantage
-------------
Have bias.
Reference
----------
Original Paper: https://arxiv.org/pdf/1602.01783.pdf
MorvanZhou's tutorial: https://morvanzhou.github.io/tutorials/
MorvanZhou's code: https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow/blob/master/experiments/Solve_BipedalWalker/A3C.py
Environment
-----------
BipedalWalker-v2 : https://gym.openai.com/envs/BipedalWalker-v2
Reward is given for moving forward, total 300+ points up to the far end.
If the robot falls, it gets -100. Applying motor torque costs a small amount of
points, more optimal agent will get better score. State consists of hull angle
speed, angular velocity, horizontal speed, vertical speed, position of joints
and joints angular speed, legs contact with ground, and 10 lidar rangefinder
measurements. There's no coordinates in the state vector.
Prerequisites
--------------
tensorflow 2.0.0a0
tensorflow-probability 0.6.0
tensorlayer 2.0.0
&&
pip install box2d box2d-kengz --user
To run
------
python tutorial_A3C.py --train/test
"""
import argparse
import multiprocessing
import os
import threading
import time
import gym
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import tensorlayer as tl
tfd = tfp.distributions
tl.logging.set_verbosity(tl.logging.DEBUG)
# add arguments in command --train/test
parser = argparse.ArgumentParser(description='Train or test neural net motor controller.')
parser.add_argument('--train', dest='train', action='store_true', default=False)
parser.add_argument('--test', dest='test', action='store_true', default=True)
args = parser.parse_args()
##################### hyper parameters ####################
ENV_ID = 'BipedalWalker-v2' # BipedalWalkerHardcore-v2 BipedalWalker-v2 LunarLanderContinuous-v2
RANDOM_SEED = 2 # random seed, can be either an int number or None
RENDER = False # render while training
ALG_NAME = 'A3C'
N_WORKERS = multiprocessing.cpu_count() # number of workers according to number of cores in cpu
# N_WORKERS = 2 # manually set number of workers
MAX_GLOBAL_EP = 15000 # number of training episodes
TEST_EPISODES = 10 # number of training episodes
GLOBAL_NET_SCOPE = 'Global_Net'
UPDATE_GLOBAL_ITER = 10 # update global policy after several episodes
GAMMA = 0.99 # reward discount factor
ENTROPY_BETA = 0.005 # factor for entropy boosted exploration
LR_A = 0.00005 # learning rate for actor
LR_C = 0.0001 # learning rate for critic
GLOBAL_RUNNING_R = []
GLOBAL_EP = 0 # will increase during training, stop training when it >= MAX_GLOBAL_EP
################### Asynchronous Advantage Actor Critic (A3C) ####################################
class ACNet(object):
def __init__(self, scope):
self.scope = scope
w_init = tf.keras.initializers.glorot_normal(seed=None) # initializer, glorot=xavier
def get_actor(input_shape): # policy network
with tf.name_scope(self.scope):
ni = tl.layers.Input(input_shape, name='in')
nn = tl.layers.Dense(n_units=500, act=tf.nn.relu6, W_init=w_init, name='la')(ni)
nn = tl.layers.Dense(n_units=300, act=tf.nn.relu6, W_init=w_init, name='la2')(nn)
mu = tl.layers.Dense(n_units=N_A, act=tf.nn.tanh, W_init=w_init, name='mu')(nn)
sigma = tl.layers.Dense(n_units=N_A, act=tf.nn.softplus, W_init=w_init, name='sigma')(nn)
return tl.models.Model(inputs=ni, outputs=[mu, sigma], name=scope + '/Actor')
self.actor = get_actor([None, N_S])
self.actor.train() # train mode for Dropout, BatchNorm
def get_critic(input_shape): # we use Value-function here, but not Q-function.
with tf.name_scope(self.scope):
ni = tl.layers.Input(input_shape, name='in')
nn = tl.layers.Dense(n_units=500, act=tf.nn.relu6, W_init=w_init, name='lc')(ni)
nn = tl.layers.Dense(n_units=300, act=tf.nn.relu6, W_init=w_init, name='lc2')(nn)
v = tl.layers.Dense(n_units=1, W_init=w_init, name='v')(nn)
return tl.models.Model(inputs=ni, outputs=v, name=scope + '/Critic')
self.critic = get_critic([None, N_S])
self.critic.train() # train mode for Dropout, BatchNorm
@tf.function # convert numpy functions to tf.Operations in the TFgraph, return tensor
def update_global(
self, buffer_s, buffer_a, buffer_v_target, globalAC
): # refer to the global Actor-Crtic network for updating it with samples
''' update the global critic '''
with tf.GradientTape() as tape:
self.v = self.critic(buffer_s)
self.v_target = buffer_v_target
td = tf.subtract(self.v_target, self.v, name='TD_error')
self.c_loss = tf.reduce_mean(tf.square(td))
self.c_grads = tape.gradient(self.c_loss, self.critic.trainable_weights)
OPT_C.apply_gradients(zip(self.c_grads, globalAC.critic.trainable_weights)) # local grads applies to global net
# del tape # Drop the reference to the tape
''' update the global actor '''
with tf.GradientTape() as tape:
self.mu, self.sigma = self.actor(buffer_s)
self.test = self.sigma[0]
self.mu, self.sigma = self.mu * A_BOUND[1], self.sigma + 1e-5
normal_dist = tfd.Normal(self.mu, self.sigma) # no tf.contrib for tf2.0
self.a_his = buffer_a # float32
log_prob = normal_dist.log_prob(self.a_his)
exp_v = log_prob * td # td is from the critic part, no gradients for it
entropy = normal_dist.entropy() # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
self.a_grads = tape.gradient(self.a_loss, self.actor.trainable_weights)
OPT_A.apply_gradients(zip(self.a_grads, globalAC.actor.trainable_weights)) # local grads applies to global net
return self.test # for test purpose
@tf.function
def pull_global(self, globalAC): # run by a local, pull weights from the global nets
for l_p, g_p in zip(self.actor.trainable_weights, globalAC.actor.trainable_weights):
l_p.assign(g_p)
for l_p, g_p in zip(self.critic.trainable_weights, globalAC.critic.trainable_weights):
l_p.assign(g_p)
def get_action(self, s, greedy=False): # run by a local
s = s[np.newaxis, :]
self.mu, self.sigma = self.actor(s)
with tf.name_scope('wrap_a_out'):
self.mu, self.sigma = self.mu * A_BOUND[1], self.sigma + 1e-5
if greedy:
return self.mu.numpy()[0]
normal_dist = tfd.Normal(self.mu, self.sigma) # for continuous action space
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=0), *A_BOUND)
return self.A.numpy()[0]
def save(self): # save trained weights
path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID]))
if not os.path.exists(path):
os.makedirs(path)
tl.files.save_npz(self.actor.trainable_weights, name=os.path.join(path, 'model_actor.npz'))
tl.files.save_npz(self.critic.trainable_weights, name=os.path.join(path, 'model_critic.npz'))
def load(self): # load trained weights
path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID]))
tl.files.load_and_assign_npz(name=os.path.join(path, 'model_actor.npz'), network=self.actor)
tl.files.load_and_assign_npz(name=os.path.join(path, 'model_critic.npz'), network=self.critic)
class Worker(object):
def __init__(self, name):
self.env = gym.make(ENV_ID)
self.name = name
self.AC = ACNet(name)
# def work(self):
def work(self, globalAC):
global GLOBAL_RUNNING_R, GLOBAL_EP
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
while True:
# visualize Worker_0 during training
if RENDER and self.name == 'Worker_0' and total_step % 30 == 0:
self.env.render()
s = s.astype('float32') # double to float
a = self.AC.get_action(s)
s_, r, done, _info = self.env.step(a)
s_ = s_.astype('float32') # double to float
# set robot falls reward to -2 instead of -100
if r == -100: r = -2
ep_r += r
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = self.AC.critic(s_[np.newaxis, :])[0, 0] # reduce dim from 2 to 0
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s = tf.convert_to_tensor(np.vstack(buffer_s))
buffer_a = tf.convert_to_tensor(np.vstack(buffer_a))
buffer_v_target = tf.convert_to_tensor(np.vstack(buffer_v_target).astype('float32'))
# update gradients on global network
self.AC.update_global(buffer_s, buffer_a, buffer_v_target, globalAC)
buffer_s, buffer_a, buffer_r = [], [], []
# update local network from global network
self.AC.pull_global(globalAC)
s = s_
total_step += 1
if done:
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
GLOBAL_RUNNING_R.append(ep_r)
else: # moving average
GLOBAL_RUNNING_R.append(0.95 * GLOBAL_RUNNING_R[-1] + 0.05 * ep_r)
print('Training | {}, Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}' \
.format(self.name, GLOBAL_EP, MAX_GLOBAL_EP, ep_r, time.time() - T0))
GLOBAL_EP += 1
break
if __name__ == "__main__":
env = gym.make(ENV_ID)
# reproducible
np.random.seed(RANDOM_SEED)
tf.random.set_seed(RANDOM_SEED)
N_S = env.observation_space.shape[0]
N_A = env.action_space.shape[0]
A_BOUND = [env.action_space.low, env.action_space.high]
A_BOUND[0] = A_BOUND[0].reshape(1, N_A)
A_BOUND[1] = A_BOUND[1].reshape(1, N_A)
with tf.device("/cpu:0"):
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
T0 = time.time()
if args.train:
# ============================= TRAINING ===============================
with tf.device("/cpu:0"):
OPT_A = tf.optimizers.RMSprop(LR_A, name='RMSPropA')
OPT_C = tf.optimizers.RMSprop(LR_C, name='RMSPropC')
workers = []
# Create worker
for i in range(N_WORKERS):
i_name = 'Worker_%i' % i # worker name
workers.append(Worker(i_name))
COORD = tf.train.Coordinator()
# start TF threading
worker_threads = []
for worker in workers:
job = lambda: worker.work(GLOBAL_AC)
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
GLOBAL_AC.save()
plt.plot(GLOBAL_RUNNING_R)
if not os.path.exists('image'):
os.makedirs('image')
plt.savefig(os.path.join('image', '_'.join([ALG_NAME, ENV_ID])))
if args.test:
# ============================= EVALUATION =============================
GLOBAL_AC.load()
for episode in range(TEST_EPISODES):
s = env.reset()
episode_reward = 0
while True:
env.render()
s = s.astype('float32') # double to float
a = GLOBAL_AC.get_action(s, greedy=True)
s, r, d, _ = env.step(a)
episode_reward += r
if d:
break
print(
'Testing | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
episode + 1, TEST_EPISODES, episode_reward,
time.time() - T0
)
)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 Anso Labs, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Nova Storage manages creating, attaching, detaching, and
destroying persistent storage volumes, ala EBS.
Currently uses Ata-over-Ethernet.
"""
import glob
import logging
import random
import socket
import subprocess
import time
from nova import vendor
from tornado import ioloop
from twisted.internet import defer
from nova import datastore
from nova import exception
from nova import flags
from nova import rpc
from nova import utils
from nova import validate
FLAGS = flags.FLAGS
flags.DEFINE_string('storage_dev', '/dev/sdb',
'Physical device to use for volumes')
flags.DEFINE_string('volume_group', 'nova-volumes',
'Name for the VG that will contain exported volumes')
flags.DEFINE_string('aoe_eth_dev', 'eth0',
'Which device to export the volumes on')
flags.DEFINE_string('storage_name',
socket.gethostname(),
'name of this node')
flags.DEFINE_integer('shelf_id',
utils.last_octet(utils.get_my_ip()),
'AoE shelf_id for this node')
flags.DEFINE_string('storage_availability_zone',
'nova',
'availability zone of this node')
flags.DEFINE_boolean('fake_storage', False,
'Should we make real storage volumes to attach?')
# TODO(joshua) Index of volumes by project
def get_volume(volume_id):
""" Returns a redis-backed volume object """
volume_class = Volume
if FLAGS.fake_storage:
volume_class = FakeVolume
if datastore.Redis.instance().sismember('volumes', volume_id):
return volume_class(volume_id=volume_id)
raise exception.Error("Volume does not exist")
class BlockStore(object):
"""
There is one BlockStore running on each volume node.
However, each BlockStore can report on the state of
*all* volumes in the cluster.
"""
def __init__(self):
super(BlockStore, self).__init__()
self.volume_class = Volume
if FLAGS.fake_storage:
self.volume_class = FakeVolume
self._init_volume_group()
def report_state(self):
#TODO: aggregate the state of the system
pass
@validate.rangetest(size=(0, 100))
def create_volume(self, size, user_id, project_id):
"""
Creates an exported volume (fake or real),
restarts exports to make it available.
Volume at this point has size, owner, and zone.
"""
logging.debug("Creating volume of size: %s" % (size))
vol = self.volume_class.create(size, user_id, project_id)
datastore.Redis.instance().sadd('volumes', vol['volume_id'])
datastore.Redis.instance().sadd('volumes:%s' % (FLAGS.storage_name), vol['volume_id'])
self._restart_exports()
return vol['volume_id']
def by_node(self, node_id):
""" returns a list of volumes for a node """
for volume_id in datastore.Redis.instance().smembers('volumes:%s' % (node_id)):
yield self.volume_class(volume_id=volume_id)
@property
def all(self):
""" returns a list of all volumes """
for volume_id in datastore.Redis.instance().smembers('volumes'):
yield self.volume_class(volume_id=volume_id)
def delete_volume(self, volume_id):
logging.debug("Deleting volume with id of: %s" % (volume_id))
vol = get_volume(volume_id)
if vol['status'] == "attached":
raise exception.Error("Volume is still attached")
if vol['node_name'] != FLAGS.storage_name:
raise exception.Error("Volume is not local to this node")
vol.destroy()
datastore.Redis.instance().srem('volumes', vol['volume_id'])
datastore.Redis.instance().srem('volumes:%s' % (FLAGS.storage_name), vol['volume_id'])
return True
def _restart_exports(self):
if FLAGS.fake_storage:
return
utils.runthis("Setting exports to auto: %s", "sudo vblade-persist auto all")
utils.runthis("Starting all exports: %s", "sudo vblade-persist start all")
def _init_volume_group(self):
if FLAGS.fake_storage:
return
utils.runthis("PVCreate returned: %s", "sudo pvcreate %s" % (FLAGS.storage_dev))
utils.runthis("VGCreate returned: %s", "sudo vgcreate %s %s" % (FLAGS.volume_group, FLAGS.storage_dev))
class FakeBlockStore(BlockStore):
def __init__(self):
super(FakeBlockStore, self).__init__()
def _init_volume_group(self):
pass
def _restart_exports(self):
pass
class Volume(datastore.RedisModel):
object_type = 'volume'
def __init__(self, volume_id=None):
super(Volume, self).__init__(object_id=volume_id)
@classmethod
def create(cls, size, user_id, project_id):
volume_id = utils.generate_uid('vol')
vol = cls(volume_id=volume_id)
vol['volume_id'] = volume_id
vol['node_name'] = FLAGS.storage_name
vol['size'] = size
vol['user_id'] = user_id
vol['project_id'] = project_id
vol['availability_zone'] = FLAGS.storage_availability_zone
vol["instance_id"] = 'none'
vol["mountpoint"] = 'none'
vol['attach_time'] = 'none'
vol["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
vol['status'] = "creating" # creating | available | in-use
vol['attach_status'] = "detached" # attaching | attached | detaching | detached
vol['delete_on_termination'] = 'False'
vol.save()
vol.create_lv()
vol.setup_export()
# TODO(joshua) - We need to trigger a fanout message for aoe-discover on all the nodes
# TODO(joshua
vol['status'] = "available"
vol.save()
return vol
def start_attach(self, instance_id, mountpoint):
""" """
self['instance_id'] = instance_id
self['mountpoint'] = mountpoint
self['status'] = "in-use"
self['attach_status'] = "attaching"
self['attach_time'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
self['delete_on_termination'] = 'False'
self.save()
def finish_attach(self):
""" """
self['attach_status'] = "attached"
self.save()
def start_detach(self):
""" """
self['attach_status'] = "detaching"
self.save()
def finish_detach(self):
self['instance_id'] = None
self['mountpoint'] = None
self['status'] = "available"
self['attach_status'] = "detached"
self.save()
def destroy(self):
try:
self._remove_export()
except:
pass
self._delete_lv()
super(Volume, self).destroy()
def create_lv(self):
if str(self['size']) == '0':
sizestr = '100M'
else:
sizestr = '%sG' % self['size']
utils.runthis("Creating LV: %s", "sudo lvcreate -L %s -n %s %s" % (sizestr, self['volume_id'], FLAGS.volume_group))
def _delete_lv(self):
utils.runthis("Removing LV: %s", "sudo lvremove -f %s/%s" % (FLAGS.volume_group, self['volume_id']))
def setup_export(self):
(shelf_id, blade_id) = get_next_aoe_numbers()
self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id)
self['shelf_id'] = shelf_id
self['blade_id'] = blade_id
self.save()
utils.runthis("Creating AOE export: %s",
"sudo vblade-persist setup %s %s %s /dev/%s/%s" %
(shelf_id, blade_id, FLAGS.aoe_eth_dev, FLAGS.volume_group, self['volume_id']))
def _remove_export(self):
utils.runthis("Stopped AOE export: %s", "sudo vblade-persist stop %s %s" % (self['shelf_id'], self['blade_id']))
utils.runthis("Destroyed AOE export: %s", "sudo vblade-persist destroy %s %s" % (self['shelf_id'], self['blade_id']))
class FakeVolume(Volume):
def create_lv(self):
pass
def setup_export(self):
# TODO(???): This may not be good enough?
blade_id = ''.join([random.choice('0123456789') for x in xrange(3)])
self['shelf_id'] = FLAGS.shelf_id
self['blade_id'] = blade_id
self['aoe_device'] = "e%s.%s" % (FLAGS.shelf_id, blade_id)
self.save()
def _remove_export(self):
pass
def _delete_lv(self):
pass
def get_next_aoe_numbers():
aoes = glob.glob("/var/lib/vblade-persist/vblades/e*")
aoes.extend(['e0.0'])
blade_id = int(max([int(a.split('.')[1]) for a in aoes])) + 1
logging.debug("Next blade_id is %s" % (blade_id))
shelf_id = FLAGS.shelf_id
return (shelf_id, blade_id)
|
|
#!/usr/bin/python
"""This module provides the :class:`Deck` object
"""
import deck_of_cards.card as card
import random
import logging
#: a logger object
LOGGER = logging.getLogger(__name__)
class Deck(object):
"""A Deck object
A new deck starts out ordered.
If jokers are included, contains (2 + 4 * 13) :class:`deck_of_cards.card.Card` objects
If no jokers are included, contains (4 * 13) :class:`deck_of_cards.card.Card` objects
"""
#: a boolean to represent if jokers exist in deck
_with_jokers = True
#: an array of unused :class:`deck_of_cards.card.Card` objects that are
#: waiting to be dealt
_cards = []
#: an array of discarded :class:`deck_of_cards.card.Card` objects
_discarded_cards = []
#: an array of :class:`deck_of_cards.card.Card` objects that have been dealt
_in_play_cards = []
def __init__(self, with_jokers=True):
"""
:param bool with_jokers: include jokers if True
"""
LOGGER.debug("Creating a new deck (with_jokers:%s)", with_jokers)
self._with_jokers = with_jokers
self._cards = []
self._discarded_cards = []
self._in_play_cards = []
# add jokers if necessary
if with_jokers:
for _ in xrange(2):
self._cards.append(card.Card(card.JOKER_RANK, card.JOKER_SUIT))
for suit in card.POSSIBLE_SUIT:
for rank in card.POSSIBLE_RANK:
self._cards.append(card.Card(rank, suit))
def __repr__(self):
"""
:returns: unambigious string represenation of deck object
:rtype: str
"""
card_arrays_dict = {
'_cards' : self._cards,
'_discarded_cards' : self._discarded_cards,
'_in_play_cards' : self._in_play_cards,
}
repr_str = 'Deck('
for card_array_str, card_array in card_arrays_dict.iteritems():
repr_str += "%s=[" % card_array_str
if card_array:
for c_card in card_array:
repr_str += repr(c_card) + ', '
repr_str = repr_str[:-2]
repr_str += '], '
repr_str = repr_str[:-2] + ')'
return repr_str
def __str__(self):
"""
:returns: human readable string represenation of deck object
:rtype: str
"""
card_arrays_dict = {
'_cards' : self._cards,
'_discarded_cards' : self._discarded_cards,
'_in_play_cards' : self._in_play_cards,
}
str_str = "Deck(\n\t"
for card_array_str, card_array in card_arrays_dict.iteritems():
str_str += "%s : [" % card_array_str
if card_array:
for c_card in card_array:
str_str += str(c_card) + ', '
str_str = str_str[:-2]
str_str += '],\n\t'
str_str = str_str[:-3] + "\n)"
return str_str
def shuffle(self):
"""Shuffle the unused set of cards in :attr:`_cards`
"""
LOGGER.debug("Shuffling deck")
random.shuffle(self._cards)
def deal(self):
"""Deals a single :class:`deck_of_cards.card.Card` from :attr:`_cards`
Raises an IndexError when :attr:`_cards` is empty
:returns: a single :class:`deck_of_cards.card.Card`
:rtype: :class:`deck_of_cards.card.Card`
:raises: IndexError
"""
LOGGER.debug("Number of cards left : %d", len(self._cards))
try:
# deal the last card from the unused _cards array
deal_card = self._cards.pop()
except IndexError:
raise IndexError('Trying to deal from an empty deck.')
# add the newly dealt card to the _in_play_cards array
self._in_play_cards.append(deal_card)
LOGGER.info("Dealing : %s", deal_card)
return deal_card
def discard(self, cards):
"""Remove `cards` from the :attr:`_in_play_cards` array and add them to
:attr:`_discarded_cards` array
Raises a ValueError when trying to discard a card that does not exist in
:attr:`_in_play_cards`.
:param array cards: an array of :class:`deck_of_cards.card.Card` objects
or a single :class:`deck_of_cards.card.Card`
:raises: ValueError
"""
if not isinstance(cards, list):
cards = [cards]
for discard_card in cards:
try:
self._in_play_cards.remove(discard_card)
LOGGER.info("Discarding %s", discard_card)
except ValueError:
raise ValueError("%s not found in self._in_play_cards" % discard_card)
self._discarded_cards.append(discard_card)
def is_empty(self):
"""This method returns true if the deck(:attr:`_cards`) is empty
:returns: True if deck is empty
:rtype: bool
"""
return not self._cards
def check_deck(self):
"""Check to make sure all the cards are accounted
:returns: True if all cards are accounted
:rtype: bool
"""
# start with a simple card count check
total_possible_cards = (13*4) + (2 if self._with_jokers else 0)
if total_possible_cards != (len(self._cards)
+ len(self._in_play_cards)
+ len(self._discarded_cards)):
return False
return_value = True
# go through all piles of cards and create a dictionary with
# [suit][rank] = number of occurrences of card
card_dict = {}
for pile in [self._cards, self._in_play_cards, self._discarded_cards]:
for c_card in pile:
suit = c_card.get_suit()
rank = c_card.get_rank()
if not suit in card_dict:
card_dict[suit] = {}
if not rank in card_dict[suit]:
card_dict[suit][rank] = 1
else:
card_dict[suit][rank] += 1
# go through generated card_dictionary to make sure that there are the
# appropriate rank of occurrences for each card
for suit in card_dict.keys():
for rank in card_dict[suit].keys():
if 2 == card_dict[suit][rank]:
# check for 2 jokers
if not (card.JOKER_SUIT == suit and card.JOKER_RANK == rank):
return_value = False
elif 1 != card_dict[suit][rank]:
LOGGER.info("Something is wrong with the %s", card.Card(rank, suit))
return_value = False
return return_value
|
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run Performance Test Bisect Tool
This script is used by a try bot to run the bisect script with the parameters
specified in the bisect config file. It checks out a copy of the depot in
a subdirectory 'bisect' of the working directory provided, annd runs the
bisect scrip there.
"""
import json
import optparse
import os
import platform
import re
import shlex
import subprocess
import sys
import traceback
from auto_bisect import bisect_perf_regression
from auto_bisect import bisect_utils
from auto_bisect import math_utils
from auto_bisect import source_control
CROS_BOARD_ENV = 'BISECT_CROS_BOARD'
CROS_IP_ENV = 'BISECT_CROS_IP'
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
SRC_DIR = os.path.join(SCRIPT_DIR, os.path.pardir)
BISECT_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'auto_bisect', 'bisect.cfg')
RUN_TEST_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'run-perf-test.cfg')
WEBKIT_RUN_TEST_CONFIG_PATH = os.path.join(
SRC_DIR, 'third_party', 'WebKit', 'Tools', 'run-perf-test.cfg')
BISECT_SCRIPT_DIR = os.path.join(SCRIPT_DIR, 'auto_bisect')
PERF_BENCHMARKS_PATH = 'tools/perf/benchmarks'
BUILDBOT_BUILDERNAME = 'BUILDBOT_BUILDERNAME'
BENCHMARKS_JSON_FILE = 'benchmarks.json'
class Goma(object):
def __init__(self, path_to_goma):
self._abs_path_to_goma = None
self._abs_path_to_goma_file = None
if not path_to_goma:
return
self._abs_path_to_goma = os.path.abspath(path_to_goma)
filename = 'goma_ctl.bat' if os.name == 'nt' else 'goma_ctl.sh'
self._abs_path_to_goma_file = os.path.join(self._abs_path_to_goma, filename)
def __enter__(self):
if self._HasGomaPath():
self._SetupAndStart()
return self
def __exit__(self, *_):
if self._HasGomaPath():
self._Stop()
def _HasGomaPath(self):
return bool(self._abs_path_to_goma)
def _SetupEnvVars(self):
if os.name == 'nt':
os.environ['CC'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
' cl.exe')
os.environ['CXX'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
' cl.exe')
else:
os.environ['PATH'] = os.pathsep.join([self._abs_path_to_goma,
os.environ['PATH']])
def _SetupAndStart(self):
"""Sets up goma and launches it.
Args:
path_to_goma: Path to goma directory.
Returns:
True if successful."""
self._SetupEnvVars()
# Sometimes goma is lingering around if something went bad on a previous
# run. Stop it before starting a new process. Can ignore the return code
# since it will return an error if it wasn't running.
self._Stop()
if subprocess.call([self._abs_path_to_goma_file, 'start']):
raise RuntimeError('Goma failed to start.')
def _Stop(self):
subprocess.call([self._abs_path_to_goma_file, 'stop'])
def _LoadConfigFile(config_file_path):
"""Attempts to load the specified config file as a module
and grab the global config dict.
Args:
config_file_path: Path to the config file.
Returns:
If successful, returns the config dict loaded from the file. If no
such dictionary could be loaded, returns the empty dictionary.
"""
try:
local_vars = {}
execfile(config_file_path, local_vars)
return local_vars['config']
except Exception:
print
traceback.print_exc()
print
return {}
def _ValidateConfigFile(config_contents, required_parameters):
"""Validates the config file contents, checking whether all values are
non-empty.
Args:
config_contents: A config dictionary.
required_parameters: A list of parameters to check for.
Returns:
True if valid.
"""
for parameter in required_parameters:
if parameter not in config_contents:
return False
value = config_contents[parameter]
if not value or type(value) is not str:
return False
return True
def _ValidatePerfConfigFile(config_contents):
"""Validates the perf config file contents.
This is used when we're doing a perf try job, rather than a bisect.
The config file is called run-perf-test.cfg by default.
The parameters checked are the required parameters; any additional optional
parameters won't be checked and validation will still pass.
Args:
config_contents: A config dictionary.
Returns:
True if valid.
"""
return _ValidateConfigFile(config_contents, required_parameters=['command'])
def _ValidateBisectConfigFile(config_contents):
"""Validates the bisect config file contents.
The parameters checked are the required parameters; any additional optional
parameters won't be checked and validation will still pass.
Args:
config_contents: A config dictionary.
Returns:
True if valid.
"""
return _ValidateConfigFile(
config_contents,
required_parameters=['command', 'good_revision', 'bad_revision'])
def _OutputFailedResults(text_to_print):
bisect_utils.OutputAnnotationStepStart('Results - Failed')
print
print text_to_print
print
bisect_utils.OutputAnnotationStepClosed()
def _CreateBisectOptionsFromConfig(config):
print config['command']
opts_dict = {}
opts_dict['command'] = config['command']
opts_dict['metric'] = config.get('metric')
if config['repeat_count']:
opts_dict['repeat_test_count'] = int(config['repeat_count'])
if config['truncate_percent']:
opts_dict['truncate_percent'] = int(config['truncate_percent'])
if config['max_time_minutes']:
opts_dict['max_time_minutes'] = int(config['max_time_minutes'])
if config.has_key('use_goma'):
opts_dict['use_goma'] = config['use_goma']
if config.has_key('goma_dir'):
opts_dict['goma_dir'] = config['goma_dir']
if config.has_key('improvement_direction'):
opts_dict['improvement_direction'] = int(config['improvement_direction'])
if config.has_key('target_arch'):
opts_dict['target_arch'] = config['target_arch']
if config.has_key('bug_id') and str(config['bug_id']).isdigit():
opts_dict['bug_id'] = config['bug_id']
opts_dict['build_preference'] = 'ninja'
opts_dict['output_buildbot_annotations'] = True
if '--browser=cros' in config['command']:
opts_dict['target_platform'] = 'cros'
if os.environ[CROS_BOARD_ENV] and os.environ[CROS_IP_ENV]:
opts_dict['cros_board'] = os.environ[CROS_BOARD_ENV]
opts_dict['cros_remote_ip'] = os.environ[CROS_IP_ENV]
else:
raise RuntimeError('CrOS build selected, but BISECT_CROS_IP or'
'BISECT_CROS_BOARD undefined.')
elif 'android' in config['command']:
if 'android-chrome-shell' in config['command']:
opts_dict['target_platform'] = 'android'
elif 'android-chrome' in config['command']:
opts_dict['target_platform'] = 'android-chrome'
else:
opts_dict['target_platform'] = 'android'
return bisect_perf_regression.BisectOptions.FromDict(opts_dict)
def _ParseCloudLinksFromOutput(output):
html_results_pattern = re.compile(
r'\s(?P<VALUES>http://storage.googleapis.com/' +
'chromium-telemetry/html-results/results-[a-z0-9-_]+)\s',
re.MULTILINE)
profiler_pattern = re.compile(
r'\s(?P<VALUES>https://console.developers.google.com/' +
'm/cloudstorage/b/[a-z-]+/o/profiler-[a-z0-9-_.]+)\s',
re.MULTILINE)
results = {
'html-results': html_results_pattern.findall(output),
'profiler': profiler_pattern.findall(output),
}
return results
def _ParseAndOutputCloudLinks(
results_without_patch, results_with_patch, annotations_dict):
cloud_links_without_patch = _ParseCloudLinksFromOutput(
results_without_patch[2])
cloud_links_with_patch = _ParseCloudLinksFromOutput(
results_with_patch[2])
cloud_file_link = (cloud_links_without_patch['html-results'][0]
if cloud_links_without_patch['html-results'] else '')
profiler_file_links_with_patch = cloud_links_with_patch['profiler']
profiler_file_links_without_patch = cloud_links_without_patch['profiler']
# Calculate the % difference in the means of the 2 runs.
percent_diff_in_means = None
std_err = None
if (results_with_patch[0].has_key('mean') and
results_with_patch[0].has_key('values')):
percent_diff_in_means = (results_with_patch[0]['mean'] /
max(0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0
std_err = math_utils.PooledStandardError(
[results_with_patch[0]['values'], results_without_patch[0]['values']])
if percent_diff_in_means is not None and std_err is not None:
bisect_utils.OutputAnnotationStepStart('Results - %.02f +- %0.02f delta' %
(percent_diff_in_means, std_err))
print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(20, ' '),
'Std. Error'.center(20, ' '))
print ' %s %s %s' % ('Patch'.center(10, ' '),
('%.02f' % results_with_patch[0]['mean']).center(20, ' '),
('%.02f' % results_with_patch[0]['std_err']).center(20, ' '))
print ' %s %s %s' % ('No Patch'.center(10, ' '),
('%.02f' % results_without_patch[0]['mean']).center(20, ' '),
('%.02f' % results_without_patch[0]['std_err']).center(20, ' '))
if cloud_file_link:
bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
bisect_utils.OutputAnnotationStepClosed()
elif cloud_file_link:
bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
if profiler_file_links_with_patch and profiler_file_links_without_patch:
for i in xrange(len(profiler_file_links_with_patch)):
bisect_utils.OutputAnnotationStepLink(
'%s[%d]' % (annotations_dict.get('profiler_link1'), i),
profiler_file_links_with_patch[i])
for i in xrange(len(profiler_file_links_without_patch)):
bisect_utils.OutputAnnotationStepLink(
'%s[%d]' % (annotations_dict.get('profiler_link2'), i),
profiler_file_links_without_patch[i])
def _ResolveRevisionsFromConfig(config):
if not 'good_revision' in config and not 'bad_revision' in config:
return (None, None)
bad_revision = source_control.ResolveToRevision(
config['bad_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, 100)
if not bad_revision:
raise RuntimeError('Failed to resolve [%s] to git hash.',
config['bad_revision'])
good_revision = source_control.ResolveToRevision(
config['good_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, -100)
if not good_revision:
raise RuntimeError('Failed to resolve [%s] to git hash.',
config['good_revision'])
return (good_revision, bad_revision)
def _GetStepAnnotationStringsDict(config):
if 'good_revision' in config and 'bad_revision' in config:
return {
'build1': 'Building [%s]' % config['good_revision'],
'build2': 'Building [%s]' % config['bad_revision'],
'run1': 'Running [%s]' % config['good_revision'],
'run2': 'Running [%s]' % config['bad_revision'],
'sync1': 'Syncing [%s]' % config['good_revision'],
'sync2': 'Syncing [%s]' % config['bad_revision'],
'results_label1': config['good_revision'],
'results_label2': config['bad_revision'],
'profiler_link1': 'Profiler Data - %s' % config['good_revision'],
'profiler_link2': 'Profiler Data - %s' % config['bad_revision'],
}
else:
return {
'build1': 'Building With Patch',
'build2': 'Building Without Patch',
'run1': 'Running With Patch',
'run2': 'Running Without Patch',
'results_label1': 'Patch',
'results_label2': 'ToT',
'profiler_link1': 'With Patch - Profiler Data',
'profiler_link2': 'Without Patch - Profiler Data',
}
def _RunBuildStepForPerformanceTest(bisect_instance,
build_string,
sync_string,
revision):
if revision:
bisect_utils.OutputAnnotationStepStart(sync_string)
if not source_control.SyncToRevision(revision, 'gclient'):
raise RuntimeError('Failed [%s].' % sync_string)
bisect_utils.OutputAnnotationStepClosed()
bisect_utils.OutputAnnotationStepStart(build_string)
if bisect_utils.RunGClient(['runhooks']):
raise RuntimeError('Failed to run gclient runhooks')
if not bisect_instance.ObtainBuild('chromium'):
raise RuntimeError('Patched version failed to build.')
bisect_utils.OutputAnnotationStepClosed()
def _RunCommandStepForPerformanceTest(bisect_instance,
opts,
reset_on_first_run,
upload_on_last_run,
results_label,
run_string):
bisect_utils.OutputAnnotationStepStart(run_string)
results = bisect_instance.RunPerformanceTestAndParseResults(
opts.command,
opts.metric,
reset_on_first_run=reset_on_first_run,
upload_on_last_run=upload_on_last_run,
results_label=results_label,
allow_flakes=False)
if results[1]:
raise RuntimeError('Patched version failed to run performance test.')
bisect_utils.OutputAnnotationStepClosed()
return results
def _RunPerformanceTest(config):
"""Runs a performance test with and without the current patch.
Args:
config: Contents of the config file, a dictionary.
Attempts to build and run the current revision with and without the
current patch, with the parameters passed in.
"""
# Bisect script expects to be run from the src directory
os.chdir(SRC_DIR)
opts = _CreateBisectOptionsFromConfig(config)
revisions = _ResolveRevisionsFromConfig(config)
annotations_dict = _GetStepAnnotationStringsDict(config)
b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())
_RunBuildStepForPerformanceTest(b,
annotations_dict.get('build1'),
annotations_dict.get('sync1'),
revisions[0])
results_with_patch = _RunCommandStepForPerformanceTest(
b, opts, True, True, annotations_dict['results_label1'],
annotations_dict['run1'])
bisect_utils.OutputAnnotationStepStart('Reverting Patch')
# TODO: When this is re-written to recipes, this should use bot_update's
# revert mechanism to fully revert the client. But for now, since we know that
# the perf try bot currently only supports src/ and src/third_party/WebKit, we
# simply reset those two directories.
bisect_utils.CheckRunGit(['reset', '--hard'])
bisect_utils.CheckRunGit(['reset', '--hard'],
os.path.join('third_party', 'WebKit'))
bisect_utils.OutputAnnotationStepClosed()
_RunBuildStepForPerformanceTest(b,
annotations_dict.get('build2'),
annotations_dict.get('sync2'),
revisions[1])
results_without_patch = _RunCommandStepForPerformanceTest(
b, opts, False, True, annotations_dict['results_label2'],
annotations_dict['run2'])
# Find the link to the cloud stored results file.
_ParseAndOutputCloudLinks(
results_without_patch, results_with_patch, annotations_dict)
def _SetupAndRunPerformanceTest(config, path_to_goma, is_cq_tryjob=False):
"""Attempts to build and run the current revision with and without the
current patch, with the parameters passed in.
Args:
config: The config read from run-perf-test.cfg.
path_to_goma: Path to goma directory.
is_cq_tryjob: Whether or not the try job was initiated by commit queue.
Returns:
An exit code: 0 on success, otherwise 1.
"""
if platform.release() == 'XP':
print 'Windows XP is not supported for perf try jobs because it lacks '
print 'goma support. Please refer to crbug.com/330900.'
return 1
try:
with Goma(path_to_goma) as _:
config['use_goma'] = bool(path_to_goma)
if config['use_goma']:
config['goma_dir'] = os.path.abspath(path_to_goma)
if not is_cq_tryjob:
_RunPerformanceTest(config)
else:
return _RunBenchmarksForCommitQueue(config)
return 0
except RuntimeError, e:
bisect_utils.OutputAnnotationStepFailure()
bisect_utils.OutputAnnotationStepClosed()
_OutputFailedResults('Error: %s' % e.message)
return 1
def _RunBisectionScript(
config, working_directory, path_to_goma, path_to_extra_src, dry_run):
"""Attempts to execute the bisect script with the given parameters.
Args:
config: A dict containing the parameters to pass to the script.
working_directory: A working directory to provide to the bisect script,
where it will store it's own copy of the depot.
path_to_goma: Path to goma directory.
path_to_extra_src: Path to extra source file.
dry_run: Do a dry run, skipping sync, build, and performance testing steps.
Returns:
An exit status code: 0 on success, otherwise 1.
"""
_PrintConfigStep(config)
# Construct the basic command with all necessary arguments.
cmd = [
'python',
os.path.join(BISECT_SCRIPT_DIR, 'bisect_perf_regression.py'),
'--command', config['command'],
'--good_revision', config['good_revision'],
'--bad_revision', config['bad_revision'],
'--working_directory', working_directory,
'--output_buildbot_annotations'
]
# Add flags for any optional config parameters if given in the config.
options = [
('metric', '--metric'),
('repeat_count', '--repeat_test_count'),
('truncate_percent', '--truncate_percent'),
('max_time_minutes', '--max_time_minutes'),
('bisect_mode', '--bisect_mode'),
('improvement_direction', '--improvement_direction'),
('bug_id', '--bug_id'),
('builder_type', '--builder_type'),
('target_arch', '--target_arch'),
]
for config_key, flag in options:
if config.has_key(config_key):
cmd.extend([flag, config[config_key]])
cmd.extend(['--build_preference', 'ninja'])
# Possibly set the target platform name based on the browser name in a
# Telemetry command.
if 'android-chrome-shell' in config['command']:
cmd.extend(['--target_platform', 'android'])
elif 'android-chrome' in config['command']:
cmd.extend(['--target_platform', 'android-chrome'])
elif 'android' in config['command']:
cmd.extend(['--target_platform', 'android'])
if path_to_goma:
# For Windows XP platforms, goma service is not supported.
# Moreover we don't compile chrome when gs_bucket flag is set instead
# use builds archives, therefore ignore goma service for Windows XP.
# See http://crbug.com/330900.
if platform.release() == 'XP':
print ('Goma doesn\'t have a win32 binary, therefore it is not supported '
'on Windows XP platform. Please refer to crbug.com/330900.')
path_to_goma = None
cmd.append('--use_goma')
cmd.append('--goma_dir')
cmd.append(os.path.abspath(path_to_goma))
if path_to_extra_src:
cmd.extend(['--extra_src', path_to_extra_src])
if dry_run:
cmd.extend([
'--debug_ignore_build',
'--debug_ignore_sync',
'--debug_ignore_perf_test'
])
cmd = [str(c) for c in cmd]
with Goma(path_to_goma) as _:
return_code = subprocess.call(cmd)
if return_code:
print ('Error: bisect_perf_regression.py returned with error %d\n'
% return_code)
return return_code
def _PrintConfigStep(config):
"""Prints out the given config, along with Buildbot annotations."""
bisect_utils.OutputAnnotationStepStart('Config')
print
for k, v in config.iteritems():
print ' %s : %s' % (k, v)
print
bisect_utils.OutputAnnotationStepClosed()
def _GetBrowserType(bot_platform):
"""Gets the browser type to be used in the run benchmark command."""
if bot_platform == 'android':
return 'android-chrome-shell'
elif 'x64' in bot_platform:
return 'release_x64'
return 'release'
def _GuessTelemetryTestCommand(bot_platform, test_name=None):
"""Creates a Telemetry benchmark command based on bot and test name."""
command = []
# On Windows, Python scripts should be prefixed with the python command.
if bot_platform == 'win':
command.append('python')
command.append('tools/perf/run_benchmark')
command.append('-v')
command.append('--browser=%s' % _GetBrowserType(bot_platform))
if test_name:
command.append(test_name)
return ' '.join(command)
def _GetConfigBasedOnPlatform(config, bot_name, test_name):
"""Generates required options to create BisectPerformanceMetrics instance."""
opts_dict = {
'command': _GuessTelemetryTestCommand(bot_name, test_name),
'target_arch': 'x64' if 'x64' in bot_name else 'ia32',
'build_preference': 'ninja',
'output_buildbot_annotations': True,
'repeat_test_count': 1,
'bisect_mode': bisect_utils.BISECT_MODE_RETURN_CODE,
}
if 'use_goma' in config:
opts_dict['use_goma'] = config['use_goma']
if 'goma_dir' in config:
opts_dict['goma_dir'] = config['goma_dir']
if 'android-chrome-shell' in opts_dict['command']:
opts_dict['target_platform'] = 'android'
return bisect_perf_regression.BisectOptions.FromDict(opts_dict)
def _GetModifiedFilesFromPatch(cwd=None):
"""Gets list of files modified in the current patch."""
log_output = bisect_utils.CheckRunGit(
['diff', '--no-ext-diff', '--name-only', 'HEAD~1'], cwd=cwd)
modified_files = log_output.split()
return modified_files
def _GetAffectedBenchmarkModuleNames():
"""Gets list of modified benchmark files under tools/perf/benchmarks."""
all_affected_files = _GetModifiedFilesFromPatch()
modified_benchmarks = []
for affected_file in all_affected_files:
if affected_file.startswith(PERF_BENCHMARKS_PATH):
benchmark = os.path.basename(os.path.splitext(affected_file)[0])
modified_benchmarks.append(benchmark)
return modified_benchmarks
def _ListAvailableBenchmarks(bot_platform):
"""Gets all available benchmarks names as a list."""
browser_type = _GetBrowserType(bot_platform)
if os.path.exists(BENCHMARKS_JSON_FILE):
os.remove(BENCHMARKS_JSON_FILE)
command = []
if 'win' in bot_platform:
command.append('python')
command.append('tools/perf/run_benchmark')
command.extend([
'list',
'--browser',
browser_type,
'--json-output',
BENCHMARKS_JSON_FILE])
try:
output, return_code = bisect_utils.RunProcessAndRetrieveOutput(
command=command, cwd=SRC_DIR)
if return_code:
raise RuntimeError('Something went wrong while listing benchmarks. '
'Please review the command line: %s.\nERROR: [%s]' %
(' '.join(command), output))
with open(BENCHMARKS_JSON_FILE) as tests_json:
tests_data = json.load(tests_json)
if tests_data.get('steps'):
return tests_data.get('steps').keys()
finally:
try:
if os.path.exists(BENCHMARKS_JSON_FILE):
os.remove(BENCHMARKS_JSON_FILE)
except OSError as e:
if e.errno != errno.ENOENT:
raise
return None
def _OutputOverallResults(results):
"""Creates results step and prints results on buildbot job."""
test_status = all(current_value == True for current_value in results.values())
bisect_utils.OutputAnnotationStepStart(
'Results - %s' % ('Passed' if test_status else 'Failed'))
print
print 'Results of benchmarks:'
print
for benchmark, result in results.iteritems():
print '%s: %s' % (benchmark, 'Passed' if result else 'Failed')
if not test_status:
bisect_utils.OutputAnnotationStepFailure()
bisect_utils.OutputAnnotationStepClosed()
# Returns 0 for success and 1 for failure.
return 0 if test_status else 1
def _RunBenchmark(bisect_instance, opts, bot_name, benchmark_name):
"""Runs a Telemetry benchmark."""
bisect_utils.OutputAnnotationStepStart(benchmark_name)
command_to_run = _GuessTelemetryTestCommand(bot_name, benchmark_name)
args = shlex.split(command_to_run, posix=not bisect_utils.IsWindowsHost())
output, return_code = bisect_utils.RunProcessAndRetrieveOutput(args, SRC_DIR)
# A value other than 0 indicates that the test couldn't be run, and results
# should also include an error message.
if return_code:
print ('Error: Something went wrong running the benchmark: %s.'
'Please review the command line:%s\n\n%s' %
(benchmark_name, command_to_run, output))
bisect_utils.OutputAnnotationStepFailure()
print output
bisect_utils.OutputAnnotationStepClosed()
# results[1] contains the return code from subprocess that executes test
# command, On successful test run it contains 0 otherwise any non-zero value.
return return_code == 0
def _RunBenchmarksForCommitQueue(config):
"""Runs Telemetry benchmark for the commit queue."""
os.chdir(SRC_DIR)
# To determine the bot platform by reading buildbot name from environment
# variable.
bot_name = os.environ.get(BUILDBOT_BUILDERNAME)
if not bot_name:
bot_name = sys.platform
bot_name = bot_name.split('_')[0]
affected_benchmarks = _GetAffectedBenchmarkModuleNames()
# Abort if there are no changes to benchmark any existing benchmark files.
if not affected_benchmarks:
bisect_utils.OutputAnnotationStepStart('Results')
print
print ('There are no modification to Telemetry benchmarks,'
' aborting the try job.')
bisect_utils.OutputAnnotationStepClosed()
return 0
# Bisect script expects to be run from the src directory
# Gets required options inorder to create BisectPerformanceMetrics instance.
# Since command is a required arg in BisectPerformanceMetrics, we just create
# a dummy command for now.
opts = _GetConfigBasedOnPlatform(config, bot_name, test_name='')
annotations_dict = _GetStepAnnotationStringsDict(config)
b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())
_RunBuildStepForPerformanceTest(b,
annotations_dict.get('build1'),
annotations_dict.get('sync1'),
None)
available_benchmarks = _ListAvailableBenchmarks(bot_name)
overall_results = {}
for affected_benchmark in affected_benchmarks:
for benchmark in available_benchmarks:
if (benchmark.startswith(affected_benchmark) and
not benchmark.endswith('reference')):
overall_results[benchmark] = _RunBenchmark(b, opts, bot_name, benchmark)
return _OutputOverallResults(overall_results)
def _OptionParser():
"""Returns the options parser for run-bisect-perf-regression.py."""
def ConvertJson(option, _, value, parser):
"""Provides an OptionParser callback to unmarshal a JSON string."""
setattr(parser.values, option.dest, json.loads(value))
usage = ('%prog [options] [-- chromium-options]\n'
'Used by a try bot to run the bisection script using the parameters'
' provided in the auto_bisect/bisect.cfg file.')
parser = optparse.OptionParser(usage=usage)
parser.add_option('-w', '--working_directory',
type='str',
help='A working directory to supply to the bisection '
'script, which will use it as the location to checkout '
'a copy of the chromium depot.')
parser.add_option('-p', '--path_to_goma',
type='str',
help='Path to goma directory. If this is supplied, goma '
'builds will be enabled.')
parser.add_option('--path_to_config',
type='str',
help='Path to the config file to use. If this is supplied, '
'the bisect script will use this to override the default '
'config file path. The script will attempt to load it '
'as a bisect config first, then a perf config.')
parser.add_option('--extra_src',
type='str',
help='Path to extra source file. If this is supplied, '
'bisect script will use this to override default behavior.')
parser.add_option('--dry_run',
action="store_true",
help='The script will perform the full bisect, but '
'without syncing, building, or running the performance '
'tests.')
# This argument is passed by buildbot to supply build properties to the bisect
# script. Note: Don't change "--build-properties" property name.
parser.add_option('--build-properties', action='callback',
dest='build_properties',
callback=ConvertJson, type='string',
nargs=1, default={},
help='build properties in JSON format')
return parser
def main():
"""Entry point for run-bisect-perf-regression.py.
Reads the config file, and then tries to either bisect a regression or
just run a performance test, depending on the particular config parameters
specified in the config file.
"""
parser = _OptionParser()
opts, _ = parser.parse_args()
# Use the default config file path unless one was specified.
config_path = BISECT_CONFIG_PATH
if opts.path_to_config:
config_path = opts.path_to_config
config = _LoadConfigFile(config_path)
# Check if the config is valid for running bisect job.
config_is_valid = _ValidateBisectConfigFile(config)
if config and config_is_valid:
if not opts.working_directory:
print 'Error: missing required parameter: --working_directory\n'
parser.print_help()
return 1
return _RunBisectionScript(
config, opts.working_directory, opts.path_to_goma, opts.extra_src,
opts.dry_run)
# If it wasn't valid for running a bisect, then maybe the user wanted
# to run a perf test instead of a bisect job. Try reading any possible
# perf test config files.
perf_cfg_files = [RUN_TEST_CONFIG_PATH, WEBKIT_RUN_TEST_CONFIG_PATH]
for current_perf_cfg_file in perf_cfg_files:
if opts.path_to_config:
path_to_perf_cfg = opts.path_to_config
else:
path_to_perf_cfg = os.path.join(
os.path.abspath(os.path.dirname(sys.argv[0])),
current_perf_cfg_file)
config = _LoadConfigFile(path_to_perf_cfg)
config_is_valid = _ValidatePerfConfigFile(config)
if config and config_is_valid:
return _SetupAndRunPerformanceTest(config, opts.path_to_goma)
# If there are no changes to config file, then check if the request is
# from commit-bot, if so then run the modified Telemetry benchmarks for the
# patch.
if opts.build_properties.get('requester') == 'commit-bot@chromium.org':
return _SetupAndRunPerformanceTest(
config={}, path_to_goma=opts.path_to_goma, is_cq_tryjob=True)
print ('Error: Could not load config file. Double check your changes to '
'auto_bisect/bisect.cfg or run-perf-test.cfg for syntax errors.\n')
return 1
if __name__ == '__main__':
sys.exit(main())
|
|
#!/usr/bin/env python
#
# Copyright (C) 2013-2015 eNovance SAS <licensing@enovance.com>
# Author: Erwan Velu <erwan@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import getopt
import check
import utils
import compare_sets
import shutil
import numpy
import glob
def print_help():
print '''cardiff
-h --help : Print this help
-p <pattern> or --pattern <pattern> : A pattern in regexp to select input files
-o <dir> or --output_dir <dir> : Output directory if pattern is defined
this directory will report the diff files
if systems does not match
-l <level> or --log-level <level> : Show only the log levels selected
: level is a comma separated list of the
following levels
: INFO, ERROR, WARNING, SUMMARY, DETAIL
: SUMMARY is the default view
-g <group> or --group <group> : Select the target group for DETAIL level
(supports regexp)
-c <cat> or --category <cat> : Select the target category for DETAIL
level (supports regexp)
-i <item> or --item <item> : Select the item for select group with
DETAIL level (supports regexp)
-I <list> or --ignore <list> : Disable the grouping segregration on the
coma separated list of components :
cpu, hpa, disk, firmware, memory,
network, system, megaraid, ahci, ipmi
-r <dir1>[,<dir2>,<dir3>, ...] : Perform the rampup analysis on directory
containing results from dahc.
In such mode, no need to provide
a pattern. Print the compared results
if several dirs are separated by
a comma
Examples:
$ cardiff.py -p 'sample/*.hw' -l DETAIL -g '1' -c 'loops_per_sec' \
-i 'logical_1.*'
$ cardiff.py -p 'sample/*.hw' -l DETAIL -g '1' -c 'standalone_rand.*_4k_IOps' \
-i 'sd.*'
$ cardiff.py -p 'sample/*.hw' -l DETAIL -g '0' -c '1G' -i '.*'
$ cardiff.py -p '*hw' -I disk,cpu -o plop
$ cardiff.py -r '/var/lib/edeploy/health/dahc/cpu_load/2014_09_15-12h17'
'''
def compare_disks(global_params, bench_values, unique_id, systems_groups):
systems = utils.find_sub_element(bench_values, unique_id, 'pdisk')
groups = check.physical_megaraid_disks(global_params, systems, unique_id)
compare_sets.compute_similar_hosts_list(
systems_groups,
compare_sets.get_hosts_list_from_result(groups))
systems = utils.find_sub_element(bench_values, unique_id, 'disk')
groups = check.physical_hpa_disks(global_params, systems, unique_id)
compare_sets.compute_similar_hosts_list(
systems_groups,
compare_sets.get_hosts_list_from_result(groups))
groups = check.logical_disks(global_params, systems, unique_id)
compare_sets.compute_similar_hosts_list(
systems_groups,
compare_sets.get_hosts_list_from_result(groups))
def compare_type(type_, check_func, global_params,
bench_values, unique_id, systems_groups):
systems = utils.find_sub_element(bench_values, unique_id, type_)
groups = check_func(global_params, systems, unique_id)
compare_sets.compute_similar_hosts_list(
systems_groups,
compare_sets.get_hosts_list_from_result(groups))
def group_systems(global_params, bench_values, unique_id,
systems_groups, ignore_list):
for name, func in (('hpa', check.hpa),
('megaraid', check.megaraid),
('ahci', check.ahci),
('ipmi', check.ipmi),
('system', check.systems),
('firmware', check.firmware),
('memory', check.memory_timing),
('network', check.network_interfaces),
('cpu', check.cpu)):
if name not in ignore_list:
compare_type(name, func, global_params, bench_values,
unique_id, systems_groups)
def compare_performance(bench_values, unique_id, systems_groups, detail,
rampup_value=0, current_dir=""):
for group in systems_groups:
systems = utils.find_sub_element(bench_values, unique_id,
'disk', group)
check.logical_disks_perf(systems, unique_id,
systems_groups.index(group),
detail, rampup_value, current_dir)
for group in systems_groups:
systems = utils.find_sub_element(bench_values, unique_id, 'cpu', group)
check.cpu_perf(systems, unique_id, systems_groups.index(group), detail,
rampup_value, current_dir)
for group in systems_groups:
systems = utils.find_sub_element(bench_values, unique_id, 'cpu', group)
check.memory_perf(systems, unique_id, systems_groups.index(group),
detail, rampup_value, current_dir)
for group in systems_groups:
systems = utils.find_sub_element(bench_values, unique_id, 'network',
group)
check.network_perf(systems, unique_id, systems_groups.index(group),
detail, rampup_value, current_dir)
def analyze_data(global_params, pattern, ignore_list, detail, rampup_value=0,
max_rampup_value=0, current_dir=""):
if rampup_value > 0:
pattern = pattern + "*.hw"
# Extracting regex and path
path = os.path.dirname(pattern)
if not path:
path = "."
else:
pattern = os.path.basename(pattern)
if not os.path.isdir(path):
print "Error: the path %s doesn't exists !" % path
sys.exit(2)
health_data_file = utils.find_file(path, pattern)
if len(health_data_file) == 0:
print "No log file found with pattern %s!" % pattern
sys.exit(1)
else:
if rampup_value == 0:
print "### %d files Selected with pattern '%s' ###" % \
(len(health_data_file), pattern)
else:
print "########## Rampup: %d / %d hosts #########" % \
(rampup_value, max_rampup_value)
# Extract data from the hw files
bench_values = []
for health in health_data_file:
bench_values.append(eval(open(health).read()))
if rampup_value > 0:
unique_id = 'uuid'
else:
unique_id = 'serial'
# Extracting the host list from the data to get
# the initial list of hosts. We have here a single group
# with all the servers
systems_groups = []
systems_groups.append(utils.get_hosts_list(bench_values, unique_id))
# Let's create groups of similar servers
if rampup_value == 0:
group_systems(global_params, bench_values, unique_id, systems_groups,
ignore_list)
compare_sets.print_systems_groups(systems_groups)
# It's time to compare performance in each group
compare_performance(bench_values, unique_id, systems_groups, detail,
rampup_value, current_dir)
print "##########################################"
print
return bench_values
def compute_deviance_percentage(metric):
# If we have a single item
# checking the variance is useless
array = numpy.array(metric)
if len(metric) == 1:
return 0
return numpy.std(array) / numpy.mean(array) * 100
def compute_metric(current_dir, rampup_value, metric, metric_name):
array = numpy.array(metric)
mean_group = numpy.mean(metric)
deviance_percentage = compute_deviance_percentage(metric)
deviance = numpy.std(array)
utils.write_gnuplot_file(current_dir+"/%s-mean.plot" % metric_name,
rampup_value, mean_group)
utils.write_gnuplot_file(
current_dir + "/%s-deviance_percentage.plot" % metric_name,
rampup_value, deviance_percentage)
utils.write_gnuplot_file(
current_dir + "/%s-deviance.plot" % metric_name,
rampup_value, deviance)
def compute_metrics(current_dir, rampup_value, metrics):
duration = []
start_lag = []
for value in metrics["duration"]:
duration.append(metrics["duration"][value])
for value in metrics["start_lag"]:
start_lag.append(float(metrics["start_lag"][value]) * 1000) # in ms
compute_metric(current_dir, rampup_value, duration, "job_duration")
compute_metric(current_dir, rampup_value, start_lag, "jitter")
def do_plot(current_dir, gpm_dir, main_title, subtitle, name, unit, titles,
titles_order, expected_value=""):
filename = current_dir+"/"+name+".gnuplot"
with open(filename, "a") as f:
shutil.copyfile("%s/graph2D.gpm" % gpm_dir,
"%s/graph2D.gpm" % current_dir)
with open("%s/graph2D.gpm" % current_dir, "a") as myfile:
column = 2
for title in titles_order:
if column == 2:
myfile.write(
"plot '$2' using %d:xtic(1) "
"with linespoints title '%s'" %
(column, titles[title]))
else:
myfile.write(",\\\n'$2' using %d:xtic(1) "
"with linespoints title '%s'" %
(column, titles[title]))
column = column + 1
if expected_value:
myfile.write(",\\\n %.2f w l ls 1 ti "
"'Expected value (%.2f)'" %
(expected_value, expected_value))
myfile.write("\nset output '$4-smooth.png'\n")
column = 2
for title in titles_order:
if column == 2:
myfile.write("plot '$2' using %d:xtic(1) "
"smooth csplines title '%s'" %
(column, titles[title]))
else:
myfile.write(",\\\n'$2' using %d:xtic(1) "
"smooth csplines title '%s'" %
(column, titles[title]))
column = column + 1
if expected_value:
myfile.write(",\\\n %.2f w l ls 1 ti "
"'Expected value (%.2f)'" %
(expected_value, expected_value))
column = 2
myfile.write("\nset output '$4-trend.png'\n")
for title in titles_order:
if column == 2:
myfile.write("plot '$2' using %d:xtic(1) "
"smooth bezier title '%s'" %
(column, titles[title]))
else:
myfile.write(",\\\n'$2' using %d:xtic(1) "
"smooth bezier title '%s'" %
(column, titles[title]))
column = column + 1
if expected_value:
myfile.write(",\\\n %.2f w l ls 1 ti "
"'Expected value (%.2f)'" %
(expected_value, expected_value))
myfile.write("\n")
f.write("call \'%s/graph2D.gpm\' \'%s' \'%s\' \'%s\' \'%s\' \'%s\' "
"\'%s\'\n" % (current_dir, main_title, subtitle,
current_dir+"/"+name+".plot",
name, current_dir+name, unit))
try:
os.system("gnuplot %s" % filename)
except:
pass
def extract_hw_info(hw, level1, level2, level3):
result = []
temp_level2 = level2
for entry in hw:
if level2 == '*':
temp_level2 = entry[1]
if (level1 == entry[0] and temp_level2 == entry[1] and
level3 == entry[2]):
result.append(entry[3])
return result
def is_virtualized(bench_values):
if "hypervisor" in extract_hw_info(bench_values[0],
'cpu', 'physical_0',
'flags')[0]:
return "virtualized"
return ""
def plot_results(current_dir, rampup_values, job, metrics, bench_values,
titles, titles_order):
gpm_dir = "./"
context = ""
bench_type = job
unit = {}
expected_value = {}
expected_value["job_duration-mean"] = metrics["bench"]["runtime"]
unit["job_duration-mean"] = "seconds (s)"
unit["job_duration-deviance"] = unit["job_duration-mean"]
unit["job_duration-deviance_percentage"] = "% of deviance (vs mean perf)"
unit["jitter-mean"] = "milliseconds (ms)"
unit["jitter-deviance"] = unit["jitter-mean"]
unit["jitter-deviance_percentage"] = "% of deviance (vs mean perf)"
if "cpu" in job:
unit["deviance"] = "loops_per_sec"
unit["deviance_percentage"] = "% of deviance (vs mean perf)"
unit["mean"] = unit["deviance"]
unit["sum"] = unit["deviance"]
context = "%d cpu load per host" % metrics["bench"]["cores"]
bench_type = "%s power" % job
if "memory" in job:
unit["deviance"] = "MB/sec"
unit["deviance_percentage"] = "% of deviance (vs mean perf)"
unit["mean"] = unit["deviance"]
unit["sum"] = unit["deviance"]
bench_type = "%s bandwidth" % job
context = "%d %s threads per host, blocksize=%s" % \
(metrics["bench"]["cores"],
metrics["bench"]["mode"],
metrics["bench"]["block-size"])
if "network" in job:
if metrics["bench"]["mode"] == "bandwidth":
unit["deviance"] = "Mbit/sec"
bench_type = "%s %s bandwidth" % \
(job, metrics["bench"]["connection"])
elif metrics["bench"]["mode"] == "latency":
unit["deviance"] = "RRQ/sec"
bench_type = "%s %s latency" % \
(job, metrics["bench"]["connection"])
unit["deviance_percentage"] = "% of deviance (vs mean perf)"
unit["mean"] = unit["deviance"]
unit["sum"] = unit["deviance"]
context = "%d %s threads per host, blocksize=%s" % \
(metrics["bench"]["cores"],
metrics["bench"]["mode"],
metrics["bench"]["block-size"])
if "storage" in job:
unit["deviance"] = "KB/sec"
unit["deviance_percentage"] = "% of deviance (vs mean perf)"
unit["mean"] = unit["deviance"]
unit["sum"] = unit["deviance"]
bench_type = "%s bandwidth" % job
context = "%d %s threads per host, blocksize=%s, " \
"mode=%s, access=%s" % \
(metrics["bench"]["cores"],
metrics["bench"]["mode"],
metrics["bench"]["block-size"],
metrics["bench"]["mode"],
metrics["bench"]["access"])
for kind in unit:
title_appendix = ""
if len(titles.keys()) > 1:
for key in titles_order:
if not title_appendix:
title_appendix = "\\n %s" % titles[key]
else:
title_appendix = "%s vs %s" % (title_appendix, titles[key])
else:
title_appendix = metrics["bench"]["title"]
title = "Study of %s %s from %d to %d hosts (step=%d) : %s" % \
(bench_type, kind, min(rampup_values),
max(rampup_values), metrics["bench"]["step-hosts"],
title_appendix)
total_disk_size = 0
for disk_size in extract_hw_info(bench_values[0][0], 'disk', '*',
'size'):
total_disk_size = total_disk_size + int(disk_size)
system = "HW per %s host: %s x %s CPUs, %d MB of RAM, %d " \
"disks : %d GB total, %d NICs\\n OS : %s running " \
"kernel %s, cpu_arch=%s" % \
(is_virtualized(bench_values[0]),
extract_hw_info(bench_values[0][0], 'cpu',
'physical', 'number')[0],
extract_hw_info(bench_values[0][0], 'cpu',
'physical_0', 'product')[0],
int(extract_hw_info(bench_values[0][0], 'memory',
'total', 'size')[0]) / 1024 / 1024,
int(extract_hw_info(bench_values[0][0], 'disk',
'logical', 'count')[0]),
total_disk_size,
len(extract_hw_info(bench_values[0][0], 'network',
'*', 'serial')),
extract_hw_info(bench_values[0][0], 'system',
'os', 'version')[0],
extract_hw_info(bench_values[0][0], 'system',
'kernel', 'version')[0],
extract_hw_info(bench_values[0][0], 'system',
'kernel', 'arch')[0])
subtitle = "\\nBenchmark setup : %s, runtime=%d seconds, %d "
"hypervisors with %s scheduling\\n%s" % \
(context, metrics["bench"]["runtime"],
len(metrics["affinity"]), metrics["bench"]["affinity"],
system)
if kind in expected_value:
do_plot(current_dir, gpm_dir, title, subtitle, kind, unit[kind],
titles, titles_order, expected_value[kind])
else:
do_plot(current_dir, gpm_dir, title, subtitle, kind, unit[kind],
titles, titles_order)
def main(argv):
pattern = ''
rampup = ""
rampup_dirs = []
rampup_values = ''
ignore_list = ''
detail = {'category': '', 'group': '', 'item': ''}
global_params = {}
try:
opts, args = getopt.getopt(argv[1:], "hp:l:g:c:i:I:r:o:",
['pattern', 'log-level', 'group',
'category', 'item', "ignore",
"rampup", "output_dir"])
except getopt.GetoptError:
print "Error: One of the options passed " \
"to the cmdline was not supported"
print "Please fix your command line or read the help (-h option)"
sys.exit(2)
utils.print_level = int(utils.Levels.SUMMARY)
for opt, arg in opts:
if opt in ("-h", "--help"):
print_help()
sys.exit(0)
elif opt in ("-p", "--pattern"):
pattern = arg
pattern = pattern.replace('\\', '')
elif opt in ("-r", "--rampup"):
rampup = arg
rampup = rampup.replace('\\', '')
elif opt in ("-l", "--log-level"):
if "list" in arg:
print_help()
sys.exit(2)
utils.print_level = 0
if utils.Levels.message[utils.Levels.INFO] in arg:
utils.print_level |= int(utils.Levels.INFO)
if utils.Levels.message[utils.Levels.WARNING] in arg:
utils.print_level |= int(utils.Levels.WARNING)
if utils.Levels.message[utils.Levels.ERROR] in arg:
utils.print_level |= int(utils.Levels.ERROR)
if utils.Levels.message[utils.Levels.SUMMARY] in arg:
utils.print_level |= int(utils.Levels.SUMMARY)
if utils.Levels.message[utils.Levels.DETAIL] in arg:
utils.print_level |= int(utils.Levels.DETAIL)
if utils.print_level == 0:
print "Error: The log level specified is not " \
"part of the supported list !"
print "Please check the usage of this tool and retry."
sys.exit(2)
elif opt in ("-g", "--group"):
detail['group'] = arg
elif opt in ("-c", "--category"):
detail['category'] = arg
elif opt in ("-i", "--item"):
detail['item'] = arg
elif opt in ("-I", "--ignore"):
ignore_list = arg
elif opt in ("-o", "--ouptut_dir"):
if os.path.exists(arg):
for filename in glob.glob("%s/*.diff" % arg):
os.remove(filename)
for filename in glob.glob("%s/*.def" % arg):
os.remove(filename)
else:
os.mkdir(arg)
global_params["output_dir"] = arg
if (utils.print_level & utils.Levels.DETAIL) == utils.Levels.DETAIL:
if (len(detail['group']) == 0 or len(detail['category']) == 0 or
len(detail['item']) == 0):
print "Error: The DETAIL output requires group, category & item " \
"options to be set"
sys.exit(2)
if not pattern and not rampup:
print "Error: Pattern option is mandatory"
print_help()
sys.exit(2)
if rampup:
for rampup_subdir in rampup.split(','):
rampup_dir = rampup_subdir.strip()
rampup_dirs.append(rampup_dir)
if not os.path.isdir(rampup_dir):
print "Rampup option shall point a directory"
print "Error: the path %s doesn't exists !" % rampup_dir
sys.exit(2)
if not os.path.isfile(rampup_dir + "/hosts"):
print "A valid rampup directory (%s) shall have a 'hosts'" \
" file in it" % rampup_dir
print "Exiting"
sys.exit(2)
current_dir = "%s/results/" % (rampup_dir)
try:
if os.path.exists(current_dir):
shutil.rmtree(current_dir)
except IOError as e:
print "Unable to delete directory %s" % current_dir
print e
sys.exit(2)
temp_rampup_values = [int(name) for name in os.listdir(rampup_dir)
if os.path.isdir(rampup_dir+name)]
if not rampup_values:
rampup_values = temp_rampup_values
if len(rampup_values) < 2:
print "A valid rampup directory (%s) shall have " \
"more than 1 output in it" % rampup_dir
print "Exiting"
sys.exit(2)
print "Found %d rampup tests to analyse (from %d " \
"host up to %d)" % (len(rampup_values),
min(rampup_values),
max(rampup_values))
else:
if rampup_values != temp_rampup_values:
print "Directory %s doesn't have the same rampup values " \
"than the previous ones !" % (rampup_dir)
print "Exiting"
sys.exit(2)
if rampup_values:
bench_values = []
for job in os.listdir("%s/%s" % (rampup_dir, rampup_values[0])):
print "Processing Job '%s'" % job
metrics = {}
titles = {}
for rampup_dir in rampup_dirs:
result_dir = rampup_dir
if len(rampup_dirs) > 1:
result_dir = "compared"
current_dir = "%s/results/%s/" % (result_dir, job)
try:
if not os.path.exists(current_dir):
os.makedirs(current_dir)
except:
print "Unable to create directory %s" % current_dir
sys.exit(2)
for rampup_value in sorted(rampup_values):
metrics = {}
metrics_file = (rampup_dir +
"/%d/%s/metrics" % (rampup_value, job))
if not os.path.isfile(metrics_file):
print "Missing metric file for rampup=%d (%s)" % \
(rampup_value, metrics_file)
print "Skipping %d" % rampup_value
continue
metrics = eval(open(metrics_file).read())
titles[rampup_dir] = metrics["bench"]["title"]
compute_metrics(current_dir, rampup_value, metrics)
bench_values.append(
analyze_data((global_params, rampup_dir + '/' +
str(rampup_value) + '/' + job + '/'),
ignore_list, detail,
rampup_value, max(rampup_values),
current_dir))
plot_results(current_dir, rampup_values, metrics,
bench_values, titles, rampup_dirs, [])
if len(titles.keys()) > 1:
final_directory_name = ""
for key in titles.keys():
if not final_directory_name:
final_directory_name = "%s" % titles[key]
else:
final_directory_name = "%s_vs_%s" % \
(final_directory_name, titles[key])
if os.path.exists(final_directory_name):
shutil.rmtree(final_directory_name)
os.rename(result_dir, final_directory_name)
print "Output results can be found in directory '%s'" % \
final_directory_name
else:
analyze_data(global_params, pattern, ignore_list, detail)
# Main
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
|
"""
This module contains integer constants from a C header file named something
like gl.h.
"""
GL_DEPTH_BUFFER_BIT = 0x00000100
GL_STENCIL_BUFFER_BIT = 0x00000400
GL_COLOR_BUFFER_BIT = 0x00004000
GL_POINTS = 0x0000
GL_LINES = 0x0001
GL_LINE_LOOP = 0x0002
GL_LINE_STRIP = 0x0003
GL_TRIANGLES = 0x0004
GL_TRIANGLE_STRIP = 0x0005
GL_TRIANGLE_FAN = 0x0006
GL_NEVER = 0x0200
GL_LESS = 0x0201
GL_EQUAL = 0x0202
GL_LEQUAL = 0x0203
GL_GREATER = 0x0204
GL_NOTEQUAL = 0x0205
GL_GEQUAL = 0x0206
GL_ALWAYS = 0x0207
GL_SRC_COLOR = 0x0300
GL_ONE_MINUS_SRC_COLOR = 0x0301
GL_SRC_ALPHA = 0x0302
GL_ONE_MINUS_SRC_ALPHA = 0x0303
GL_DST_ALPHA = 0x0304
GL_ONE_MINUS_DST_ALPHA = 0x0305
GL_DST_COLOR = 0x0306
GL_ONE_MINUS_DST_COLOR = 0x0307
GL_SRC_ALPHA_SATURATE = 0x0308
GL_CLIP_PLANE0 = 0x3000
GL_CLIP_PLANE1 = 0x3001
GL_CLIP_PLANE2 = 0x3002
GL_CLIP_PLANE3 = 0x3003
GL_CLIP_PLANE4 = 0x3004
GL_CLIP_PLANE5 = 0x3005
GL_FRONT = 0x0404
GL_BACK = 0x0405
GL_FRONT_AND_BACK = 0x0408
GL_FOG = 0x0B60
GL_LIGHTING = 0x0B50
GL_TEXTURE_2D = 0x0DE1
GL_CULL_FACE = 0x0B44
GL_ALPHA_TEST = 0x0BC0
GL_BLEND = 0x0BE2
GL_COLOR_LOGIC_OP = 0x0BF2
GL_DITHER = 0x0BD0
GL_STENCIL_TEST = 0x0B90
GL_DEPTH_TEST = 0x0B71
GL_POINT_SMOOTH = 0x0B10
GL_LINE_SMOOTH = 0x0B20
GL_SCISSOR_TEST = 0x0C11
GL_COLOR_MATERIAL = 0x0B57
GL_NORMALIZE = 0x0BA1
GL_RESCALE_NORMAL = 0x803A
GL_POLYGON_OFFSET_FILL = 0x8037
GL_VERTEX_ARRAY = 0x8074
GL_NORMAL_ARRAY = 0x8075
GL_COLOR_ARRAY = 0x8076
GL_TEXTURE_COORD_ARRAY = 0x8078
GL_MULTISAMPLE = 0x809D
GL_SAMPLE_ALPHA_TO_COVERAGE = 0x809E
GL_SAMPLE_ALPHA_TO_ONE = 0x809F
GL_SAMPLE_COVERAGE = 0x80A0
GL_NO_ERROR = 0
GL_INVALID_ENUM = 0x0500
GL_INVALID_VALUE = 0x0501
GL_INVALID_OPERATION = 0x0502
GL_STACK_OVERFLOW = 0x0503
GL_STACK_UNDERFLOW = 0x0504
GL_OUT_OF_MEMORY = 0x0505
GL_INVALID_FRAMEBUFFER_OPERATION = 0x0506
GL_EXP = 0x0800
GL_EXP2 = 0x0801
GL_FOG_DENSITY = 0x0B62
GL_FOG_START = 0x0B63
GL_FOG_END = 0x0B64
GL_FOG_MODE = 0x0B65
GL_FOG_COLOR = 0x0B66
GL_CW = 0x0900
GL_CCW = 0x0901
GL_CURRENT_COLOR = 0x0B00
GL_CURRENT_NORMAL = 0x0B02
GL_CURRENT_TEXTURE_COORDS = 0x0B03
GL_POINT_SIZE = 0x0B11
GL_POINT_SIZE_MIN = 0x8126
GL_POINT_SIZE_MAX = 0x8127
GL_POINT_FADE_THRESHOLD_SIZE = 0x8128
GL_POINT_DISTANCE_ATTENUATION = 0x8129
GL_SMOOTH_POINT_SIZE_RANGE = 0x0B12
GL_LINE_WIDTH = 0x0B21
GL_SMOOTH_LINE_WIDTH_RANGE = 0x0B22
GL_ALIASED_POINT_SIZE_RANGE = 0x846D
GL_ALIASED_LINE_WIDTH_RANGE = 0x846E
GL_CULL_FACE_MODE = 0x0B45
GL_FRONT_FACE = 0x0B46
GL_SHADE_MODEL = 0x0B54
GL_DEPTH_RANGE = 0x0B70
GL_DEPTH_WRITEMASK = 0x0B72
GL_DEPTH_CLEAR_VALUE = 0x0B73
GL_DEPTH_FUNC = 0x0B74
GL_STENCIL_CLEAR_VALUE = 0x0B91
GL_STENCIL_FUNC = 0x0B92
GL_STENCIL_VALUE_MASK = 0x0B93
GL_STENCIL_FAIL = 0x0B94
GL_STENCIL_PASS_DEPTH_FAIL = 0x0B95
GL_STENCIL_PASS_DEPTH_PASS = 0x0B96
GL_STENCIL_REF = 0x0B97
GL_STENCIL_WRITEMASK = 0x0B98
GL_MATRIX_MODE = 0x0BA0
GL_VIEWPORT = 0x0BA2
GL_MODELVIEW_STACK_DEPTH = 0x0BA3
GL_PROJECTION_STACK_DEPTH = 0x0BA4
GL_TEXTURE_STACK_DEPTH = 0x0BA5
GL_MODELVIEW_MATRIX = 0x0BA6
GL_PROJECTION_MATRIX = 0x0BA7
GL_TEXTURE_MATRIX = 0x0BA8
GL_ALPHA_TEST_FUNC = 0x0BC1
GL_ALPHA_TEST_REF = 0x0BC2
GL_BLEND_DST = 0x0BE0
GL_BLEND_SRC = 0x0BE1
GL_LOGIC_OP_MODE = 0x0BF0
GL_SCISSOR_BOX = 0x0C10
GL_SCISSOR_TEST = 0x0C11
GL_COLOR_CLEAR_VALUE = 0x0C22
GL_COLOR_WRITEMASK = 0x0C23
GL_UNPACK_ALIGNMENT = 0x0CF5
GL_PACK_ALIGNMENT = 0x0D05
GL_MAX_LIGHTS = 0x0D31
GL_MAX_CLIP_PLANES = 0x0D32
GL_MAX_TEXTURE_SIZE = 0x0D33
GL_MAX_MODELVIEW_STACK_DEPTH = 0x0D36
GL_MAX_PROJECTION_STACK_DEPTH = 0x0D38
GL_MAX_TEXTURE_STACK_DEPTH = 0x0D39
GL_MAX_VIEWPORT_DIMS = 0x0D3A
GL_MAX_TEXTURE_UNITS = 0x84E2
GL_SUBPIXEL_BITS = 0x0D50
GL_RED_BITS = 0x0D52
GL_GREEN_BITS = 0x0D53
GL_BLUE_BITS = 0x0D54
GL_ALPHA_BITS = 0x0D55
GL_DEPTH_BITS = 0x0D56
GL_STENCIL_BITS = 0x0D57
GL_POLYGON_OFFSET_UNITS = 0x2A00
GL_POLYGON_OFFSET_FILL = 0x8037
GL_POLYGON_OFFSET_FACTOR = 0x8038
GL_TEXTURE_BINDING_2D = 0x8069
GL_VERTEX_ARRAY_SIZE = 0x807A
GL_VERTEX_ARRAY_TYPE = 0x807B
GL_VERTEX_ARRAY_STRIDE = 0x807C
GL_NORMAL_ARRAY_TYPE = 0x807E
GL_NORMAL_ARRAY_STRIDE = 0x807F
GL_COLOR_ARRAY_SIZE = 0x8081
GL_COLOR_ARRAY_TYPE = 0x8082
GL_COLOR_ARRAY_STRIDE = 0x8083
GL_TEXTURE_COORD_ARRAY_SIZE = 0x8088
GL_TEXTURE_COORD_ARRAY_TYPE = 0x8089
GL_TEXTURE_COORD_ARRAY_STRIDE = 0x808A
GL_VERTEX_ARRAY_POINTER = 0x808E
GL_NORMAL_ARRAY_POINTER = 0x808F
GL_COLOR_ARRAY_POINTER = 0x8090
GL_TEXTURE_COORD_ARRAY_POINTER = 0x8092
GL_SAMPLE_BUFFERS = 0x80A8
GL_SAMPLES = 0x80A9
GL_SAMPLE_COVERAGE_VALUE = 0x80AA
GL_SAMPLE_COVERAGE_INVERT = 0x80AB
GL_NUM_COMPRESSED_TEXTURE_FORMATS = 0x86A2
GL_COMPRESSED_TEXTURE_FORMATS = 0x86A3
GL_DONT_CARE = 0x1100
GL_FASTEST = 0x1101
GL_NICEST = 0x1102
GL_PERSPECTIVE_CORRECTION_HINT = 0x0C50
GL_POINT_SMOOTH_HINT = 0x0C51
GL_LINE_SMOOTH_HINT = 0x0C52
GL_FOG_HINT = 0x0C54
GL_GENERATE_MIPMAP_HINT = 0x8192
GL_LIGHT_MODEL_AMBIENT = 0x0B53
GL_LIGHT_MODEL_TWO_SIDE = 0x0B52
GL_AMBIENT = 0x1200
GL_DIFFUSE = 0x1201
GL_SPECULAR = 0x1202
GL_POSITION = 0x1203
GL_SPOT_DIRECTION = 0x1204
GL_SPOT_EXPONENT = 0x1205
GL_SPOT_CUTOFF = 0x1206
GL_CONSTANT_ATTENUATION = 0x1207
GL_LINEAR_ATTENUATION = 0x1208
GL_QUADRATIC_ATTENUATION = 0x1209
GL_BYTE = 0x1400
GL_UNSIGNED_BYTE = 0x1401
GL_SHORT = 0x1402
GL_UNSIGNED_SHORT = 0x1403
GL_FLOAT = 0x1406
GL_FIXED = 0x140C
GL_CLEAR = 0x1500
GL_AND = 0x1501
GL_AND_REVERSE = 0x1502
GL_COPY = 0x1503
GL_AND_INVERTED = 0x1504
GL_NOOP = 0x1505
GL_XOR = 0x1506
GL_OR = 0x1507
GL_NOR = 0x1508
GL_EQUIV = 0x1509
GL_INVERT = 0x150A
GL_OR_REVERSE = 0x150B
GL_COPY_INVERTED = 0x150C
GL_OR_INVERTED = 0x150D
GL_NAND = 0x150E
GL_SET = 0x150F
GL_EMISSION = 0x1600
GL_SHININESS = 0x1601
GL_AMBIENT_AND_DIFFUSE = 0x1602
GL_MODELVIEW = 0x1700
GL_PROJECTION = 0x1701
GL_TEXTURE = 0x1702
GL_ALPHA = 0x1906
GL_RGB = 0x1907
GL_RGBA = 0x1908
GL_LUMINANCE = 0x1909
GL_LUMINANCE_ALPHA = 0x190A
GL_UNPACK_ALIGNMENT = 0x0CF5
GL_PACK_ALIGNMENT = 0x0D05
GL_UNSIGNED_SHORT_4_4_4_4 = 0x8033
GL_UNSIGNED_SHORT_5_5_5_1 = 0x8034
GL_UNSIGNED_SHORT_5_6_5 = 0x8363
GL_FLAT = 0x1D00
GL_SMOOTH = 0x1D01
GL_KEEP = 0x1E00
GL_REPLACE = 0x1E01
GL_INCR = 0x1E02
GL_DECR = 0x1E03
GL_VENDOR = 0x1F00
GL_RENDERER = 0x1F01
GL_VERSION = 0x1F02
GL_EXTENSIONS = 0x1F03
GL_MODULATE = 0x2100
GL_DECAL = 0x2101
GL_ADD = 0x0104
GL_TEXTURE_ENV_MODE = 0x2200
GL_TEXTURE_ENV_COLOR = 0x2201
GL_TEXTURE_ENV = 0x2300
GL_NEAREST = 0x2600
GL_LINEAR = 0x2601
GL_NEAREST_MIPMAP_NEAREST = 0x2700
GL_LINEAR_MIPMAP_NEAREST = 0x2701
GL_NEAREST_MIPMAP_LINEAR = 0x2702
GL_LINEAR_MIPMAP_LINEAR = 0x2703
GL_TEXTURE_MAG_FILTER = 0x2800
GL_TEXTURE_MIN_FILTER = 0x2801
GL_TEXTURE_WRAP_S = 0x2802
GL_TEXTURE_WRAP_T = 0x2803
GL_GENERATE_MIPMAP = 0x8191
GL_TEXTURE0 = 0x84C0
GL_TEXTURE1 = 0x84C1
GL_TEXTURE2 = 0x84C2
GL_TEXTURE3 = 0x84C3
GL_TEXTURE4 = 0x84C4
GL_TEXTURE5 = 0x84C5
GL_TEXTURE6 = 0x84C6
GL_TEXTURE7 = 0x84C7
GL_TEXTURE8 = 0x84C8
GL_TEXTURE9 = 0x84C9
GL_TEXTURE10 = 0x84CA
GL_TEXTURE11 = 0x84CB
GL_TEXTURE12 = 0x84CC
GL_TEXTURE13 = 0x84CD
GL_TEXTURE14 = 0x84CE
GL_TEXTURE15 = 0x84CF
GL_TEXTURE16 = 0x84D0
GL_TEXTURE17 = 0x84D1
GL_TEXTURE18 = 0x84D2
GL_TEXTURE19 = 0x84D3
GL_TEXTURE20 = 0x84D4
GL_TEXTURE21 = 0x84D5
GL_TEXTURE22 = 0x84D6
GL_TEXTURE23 = 0x84D7
GL_TEXTURE24 = 0x84D8
GL_TEXTURE25 = 0x84D9
GL_TEXTURE26 = 0x84DA
GL_TEXTURE27 = 0x84DB
GL_TEXTURE28 = 0x84DC
GL_TEXTURE29 = 0x84DD
GL_TEXTURE30 = 0x84DE
GL_TEXTURE31 = 0x84DF
GL_ACTIVE_TEXTURE = 0x84E0
GL_CLIENT_ACTIVE_TEXTURE = 0x84E1
GL_REPEAT = 0x2901
GL_CLAMP_TO_EDGE = 0x812F
GL_LIGHT0 = 0x4000
GL_LIGHT1 = 0x4001
GL_LIGHT2 = 0x4002
GL_LIGHT3 = 0x4003
GL_LIGHT4 = 0x4004
GL_LIGHT5 = 0x4005
GL_LIGHT6 = 0x4006
GL_LIGHT7 = 0x4007
GL_ARRAY_BUFFER = 0x8892
GL_ELEMENT_ARRAY_BUFFER = 0x8893
GL_ARRAY_BUFFER_BINDING = 0x8894
GL_ELEMENT_ARRAY_BUFFER_BINDING = 0x8895
GL_VERTEX_ARRAY_BUFFER_BINDING = 0x8896
GL_NORMAL_ARRAY_BUFFER_BINDING = 0x8897
GL_COLOR_ARRAY_BUFFER_BINDING = 0x8898
GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING = 0x889A
GL_STATIC_DRAW = 0x88E4
GL_DYNAMIC_DRAW = 0x88E8
GL_BUFFER_SIZE = 0x8764
GL_BUFFER_USAGE = 0x8765
GL_SUBTRACT = 0x84E7
GL_COMBINE = 0x8570
GL_COMBINE_RGB = 0x8571
GL_COMBINE_ALPHA = 0x8572
GL_RGB_SCALE = 0x8573
GL_ADD_SIGNED = 0x8574
GL_INTERPOLATE = 0x8575
GL_CONSTANT = 0x8576
GL_PRIMARY_COLOR = 0x8577
GL_PREVIOUS = 0x8578
GL_OPERAND0_RGB = 0x8590
GL_OPERAND1_RGB = 0x8591
GL_OPERAND2_RGB = 0x8592
GL_OPERAND0_ALPHA = 0x8598
GL_OPERAND1_ALPHA = 0x8599
GL_OPERAND2_ALPHA = 0x859A
GL_ALPHA_SCALE = 0x0D1C
GL_SRC0_RGB = 0x8580
GL_SRC1_RGB = 0x8581
GL_SRC2_RGB = 0x8582
GL_SRC0_ALPHA = 0x8588
GL_SRC1_ALPHA = 0x8589
GL_SRC2_ALPHA = 0x858A
GL_DOT3_RGB = 0x86AE
GL_DOT3_RGBA = 0x86AF
GL_IMPLEMENTATION_COLOR_READ_TYPE_OES = 0x8B9A
GL_IMPLEMENTATION_COLOR_READ_FORMAT_OES = 0x8B9B
GL_PALETTE4_RGB8_OES = 0x8B90
GL_PALETTE4_RGBA8_OES = 0x8B91
GL_PALETTE4_R5_G6_B5_OES = 0x8B92
GL_PALETTE4_RGBA4_OES = 0x8B93
GL_PALETTE4_RGB5_A1_OES = 0x8B94
GL_PALETTE8_RGB8_OES = 0x8B95
GL_PALETTE8_RGBA8_OES = 0x8B96
GL_PALETTE8_R5_G6_B5_OES = 0x8B97
GL_PALETTE8_RGBA4_OES = 0x8B98
GL_PALETTE8_RGB5_A1_OES = 0x8B99
GL_POINT_SIZE_ARRAY_OES = 0x8B9C
GL_POINT_SIZE_ARRAY_TYPE_OES = 0x898A
GL_POINT_SIZE_ARRAY_STRIDE_OES = 0x898B
GL_POINT_SIZE_ARRAY_POINTER_OES = 0x898C
GL_POINT_SIZE_ARRAY_BUFFER_BINDING_OES = 0x8B9F
GL_POINT_SPRITE_OES = 0x8861
GL_COORD_REPLACE_OES = 0x8862
|
|
# -*- coding:utf8 -*-
# File : desc_a3cc_box2d_LunarLanderContinuousV2.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 5/31/17
#
# This file is part of TensorArtist.
"""
A3C-Continuous reproduction on Lunar Lander game. (OpenAI.Gym.Box2D.LunarLander)
This model does not follows the original settings in DeepMind's paper, which use:
1. LSTM model.
2. Episode-as-a-batch update.
3. Gaussian distribution.
In this model, we included several tricks for the training:
1. Truncated Laplacian distribution for policy.
2. Positive advantage only update.
Details can be found in the code.
"""
import collections
import functools
import os
import queue
import numpy as np
from tartist import random
from tartist.app import rl
from tartist.core import get_env, get_logger
from tartist.core.utils.cache import cached_result
from tartist.core.utils.naming import get_dump_directory
from tartist.data import flow
from tartist.nn import opr as O, optimizer, summary
logger = get_logger(__file__)
__envs__ = {
'dir': {
'root': get_dump_directory(__file__),
},
'a3c': {
'env_name': 'LunarLanderContinuous-v2',
'nr_history_frames': 4,
'max_nr_steps': None, # no limit length
# Action space used for exploration strategy sampling
# Instead of sampling from a truncated Laplacian distribution, we perform a simplified version via
# discretizing the action space.
'actor_space': np.array([
np.linspace(-1, 1, 11),
np.linspace(-1, 1, 11)
], dtype='float32'),
# gamma and TD steps in future_reward
'gamma': 0.99,
'nr_td_steps': 5,
# async training data collector
'nr_players': 50,
'nr_predictors': 2,
'predictor': {
'batch_size': 16,
'outputs_name': ['value', 'policy_explore', 'policy']
},
'inference': {
'nr_plays': 20,
},
'demo': {
'nr_plays': 5
}
},
'trainer': {
'learning_rate': 0.001,
'batch_size': 128,
'epoch_size': 200,
'nr_epochs': 100,
}
}
__trainer_cls__ = rl.train.A3CTrainer
__trainer_env_cls__ = rl.train.A3CTrainerEnv
# normal pdf, not used (instead, use Laplace distribution)
def normal_pdf(x, mu, var):
exponent = ((x - mu) ** 2.) / (var + 1e-4)
prob = (1. / (2. * np.pi * var)) * O.exp(-exponent)
return prob
def make_network(env):
is_train = env.phase is env.Phase.TRAIN
# device control: always use master device only for training session
if is_train:
slave_devices = env.slave_devices
env.set_slave_devices([])
with env.create_network() as net:
input_length, = get_input_shape()
action_length, = get_action_shape()
dpc = env.create_dpcontroller()
with dpc.activate():
def inputs():
state = O.placeholder('state', shape=(None, input_length))
return [state]
# forward policy network and value network separately (actor-critic)
def forward(x):
_ = x
_ = O.fc('fcp1', _, 512, nonlin=O.relu)
_ = O.fc('fcp2', _, 256, nonlin=O.relu)
dpc.add_output(_, name='feature_p')
_ = x
_ = O.fc('fcv1', _, 512, nonlin=O.relu)
_ = O.fc('fcv2', _, 256, nonlin=O.relu)
dpc.add_output(_, name='feature_v')
dpc.set_input_maker(inputs).set_forward_func(forward)
_ = dpc.outputs['feature_p']
# mu and std, assuming spherical covariance
policy_mu = O.fc('fc_policy_mu', _, action_length)
# In this example, we do not use variance. instead, we use fixed value.
# policy_var = O.fc('fc_policy_var', _, 1, nonlin=O.softplus)
# policy_var = O.tile(policy_var, [1, action_length], name='policy_var')
# policy_std = O.sqrt(policy_var, name='policy_std')
actor_space = get_env('a3c.actor_space')
nr_bins = actor_space.shape[1]
# Instead of using normal distribution, we use Laplacian distribution for policy.
# And also, we are sampling from a truncated Laplacian distribution (only care the value in the
# action space). To simplify the computation, we discretize the action space.
actor_space = O.constant(actor_space)
actor_space = O.tile(actor_space.add_axis(0), [policy_mu.shape[0], 1, 1])
policy_mu3 = O.tile(policy_mu.add_axis(2), [1, 1, nr_bins])
# policy_std3 = O.tile(policy_std.add_axis(2), [1, 1, nr_bins])
# logits = O.abs(actor_space - policy_mu3) / (policy_std3 + 1e-2)
# Here, we force the std of the policy to be 1.
logits_explore = -O.abs(actor_space - policy_mu3)
policy_explore = O.softmax(logits_explore)
# Clip the policy for output
action_range = get_action_range()
action_range = tuple(map(O.constant, action_range))
action_range = tuple(map(lambda x: O.tile(x.add_axis(0), [policy_mu.shape[0], 1]), action_range))
policy_output = O.clip_by_value(policy_mu, *action_range)
_ = dpc.outputs['feature_v']
value = O.fc('fc_value', _, 1)
value = value.remove_axis(1, name='value')
# Note that, here the policy_explore is a discrete policy,
# and policy is actually the continuous one.
net.add_output(policy_explore, name='policy_explore')
net.add_output(policy_output, name='policy')
net.add_output(value, name='value')
if is_train:
action = O.placeholder('action', shape=(None, action_length), dtype='int64')
future_reward = O.placeholder('future_reward', shape=(None, ))
entropy_beta = O.scalar('entropy_beta', 0.1, trainable=False)
# Since we discretized the action space, use cross entropy here.
log_policy = O.log(policy_explore + 1e-4)
log_pi_a_given_s = (log_policy * O.one_hot(action, nr_bins)).sum(axis=2).sum(axis=1)
advantage = (future_reward - O.zero_grad(value)).rename('advantage')
# Important trick: using only positive advantage to perform gradient assent. This stabilizes the training.
advantage = advantage * O.zero_grad((advantage > 0.).astype('float32'))
policy_loss = O.identity(-(log_pi_a_given_s * advantage).mean(), name='policy_loss')
# As mentioned, there is no trainable variance.
# entropy_loss = O.identity(-entropy_beta * (policy_std ** 2.).sum(axis=1).mean(), name='entropy_loss')
value_loss = O.raw_smooth_l1_loss('raw_value_loss', future_reward, value).mean(name='value_loss')
loss = O.add_n([policy_cost, value_loss], name='loss')
net.set_loss(loss)
for v in [policy_cost, value_loss,
value.mean(name='predict_value'), advantage.rms(name='rms_advantage'), loss]:
summary.scalar(v)
if is_train:
env.set_slave_devices(slave_devices)
def make_player(is_train=True, dump_dir=None):
p = rl.GymRLEnviron(get_env('a3c.env_name'), dump_dir=dump_dir)
p = rl.HistoryFrameProxyRLEnviron(p, get_env('a3c.nr_history_frames'))
p = rl.LimitLengthProxyRLEnviron(p, get_env('a3c.max_nr_steps'))
if is_train:
p = rl.AutoRestartProxyRLEnviron(p)
return p
def make_optimizer(env):
lr = optimizer.base.make_optimizer_variable('learning_rate', get_env('trainer.learning_rate'))
wrapper = optimizer.OptimizerWrapper()
wrapper.set_base_optimizer(optimizer.base.AdamOptimizer(lr, epsilon=1e-3))
wrapper.append_grad_modifier(optimizer.grad_modifier.LearningRateMultiplier([
('*/b', 2.0),
]))
# To make the training more stable, we use grad clip by value.
wrapper.append_grad_modifier(optimizer.grad_modifier.GlobalGradClip(0.001))
# wrapper.append_grad_modifier(optimizer.grad_modifier.GlobalGradClipByAvgNorm(0.1))
env.set_optimizer(wrapper)
def make_dataflow_train(env):
batch_size = get_env('trainer.batch_size')
df = flow.QueueDataFlow(env.data_queue)
df = flow.BatchDataFlow(df, batch_size, sample_dict={
'state': np.empty((batch_size, ) + get_input_shape(), dtype='float32'),
'action': np.empty((batch_size, ) + get_action_shape(), dtype='int64'),
'future_reward': np.empty((batch_size, ), dtype='float32')
})
return df
@cached_result
def get_action_shape():
p = make_player()
n = p.action_space.shape
del p
return tuple(n)
@cached_result
def get_action_range():
p = make_player()
l, h = p.action_space.low, p.action_space.high
del p
# Convert it to float32 to match the network's data type.
return l.astype('float32'), h.astype('float32')
@cached_result
def get_input_shape():
p = make_player()
p.restart()
input_shape = p.current_state.shape
del p
return input_shape
def sample_action(policy):
space = get_env('a3c.actor_space')
action = []
for i, s in enumerate(space):
a = random.choice(len(s), p=policy[i])
action.append(a)
return action
def player_func(pid, requester):
player = make_player()
actor_space = get_env('a3c.actor_space')
player.restart()
state = player.current_state
reward = 0
is_over = False
with requester.activate():
while True:
action = requester.query('data', (state, reward, is_over))
mapped_action = actor_space[np.arange(len(action)), action]
reward, is_over = player.action(mapped_action)
if len(player.stats['score']) > 0:
score = player.stats['score'][-1]
requester.query('stat', {'async/train/score': score}, do_recv=False)
player.clear_stats()
state = player.current_state
def _predictor_func(pid, router, task_queue, func, is_inference=False):
batch_size = get_env('a3c.predictor.batch_size')
batched_state = np.empty((batch_size, ) + get_input_shape(), dtype='float32')
while True:
callbacks = []
nr_total = 0
for i in range(batch_size):
if i == 0 or not is_inference:
identifier, inp, callback = task_queue.get()
else:
try:
identifier, inp, callback = task_queue.get_nowait()
except queue.Empty:
break
batched_state[i] = inp[0]
callbacks.append(callback)
nr_total += 1
out = func(state=batched_state[:nr_total])
for i in range(nr_total):
if is_inference:
action = out['policy'][i]
else:
action = sample_action(out['policy_explore'][i])
callbacks[i](action, out['value'][i])
def make_a3c_configs(env):
from common_a3c import on_data_func, on_stat_func
predictor_func = functools.partial(_predictor_func, is_inference=False)
env.player_master.player_func = player_func
env.player_master.predictor_func = predictor_func
env.player_master.on_data_func = on_data_func
env.player_master.on_stat_func = on_stat_func
env.players_history = collections.defaultdict(list)
def main_train(trainer):
from tartist.plugins.trainer_enhancer import summary
summary.enable_summary_history(trainer, extra_summary_types={
'async/train/score': 'async_scalar',
'async/inference/score': 'async_scalar',
})
summary.enable_echo_summary_scalar(trainer, summary_spec={
'async/train/score': ['avg', 'max'],
'async/inference/score': ['avg', 'max']
})
from tartist.plugins.trainer_enhancer import progress
progress.enable_epoch_progress(trainer)
from tartist.plugins.trainer_enhancer import snapshot
snapshot.enable_snapshot_saver(trainer, save_interval=5)
from tartist.core import register_event
from common_a3c import main_inference_play_multithread
def on_epoch_after(trainer):
if trainer.epoch > 0 and trainer.epoch % 2 == 0:
main_inference_play_multithread(trainer, make_player=make_player)
# This one should run before monitor.
register_event(trainer, 'epoch:after', on_epoch_after, priority=5)
trainer.train()
def main_demo(env, func):
dump_dir = get_env('dir.demo', os.path.join(get_env('dir.root'), 'demo'))
logger.info('Demo dump dir: {}'.format(dump_dir))
player = make_player(is_train=False, dump_dir=dump_dir)
repeat_time = get_env('a3c.demo.nr_plays', 1)
def get_action(inp, func=func):
action = func(state=inp[np.newaxis])['policy'][0].argmax()
return action
for i in range(repeat_time):
player.play_one_episode(get_action)
logger.info('#{} play score={}'.format(i, player.stats['score'][-1]))
|
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
def sigmoid(x):
return numpy.tanh(x * 0.5) * 0.5 + 0.5
def _split(inputs, pos):
return inputs[:pos], inputs[pos:]
def _to_gpu(x):
if x is None:
return None
elif isinstance(x, list):
return [_to_gpu(xi) for xi in x]
else:
return cuda.to_gpu(x)
def _wrap_variable(x):
if isinstance(x, list):
return [_wrap_variable(xi) for xi in x]
else:
return chainer.Variable(x)
class TestNStepLSTM(unittest.TestCase):
batches = [3, 2, 1]
length = len(batches)
in_size = 3
out_size = 2
n_layers = 2
dropout = 0.0
def setUp(self):
self.xs = [numpy.random.uniform(-1, 1, (b, self.in_size)).astype('f')
for b in self.batches]
h_shape = (self.n_layers, self.batches[0], self.out_size)
self.cx = numpy.random.uniform(-1, 1, h_shape).astype(numpy.float32)
self.hx = numpy.random.uniform(-1, 1, h_shape).astype(numpy.float32)
self.ws = []
self.bs = []
for i in range(self.n_layers):
weights = []
biases = []
for j in range(8):
if i == 0 and j < 4:
w_in = self.in_size
else:
w_in = self.out_size
weights.append(numpy.random.uniform(
-1, 1, (self.out_size, w_in)).astype('f'))
biases.append(numpy.random.uniform(
-1, 1, (self.out_size,)).astype('f'))
self.ws.append(weights)
self.bs.append(biases)
self.dys = [numpy.random.uniform(-1, 1, (b, self.out_size)).astype('f')
for b in self.batches]
self.dcy = numpy.random.uniform(-1, 1, h_shape).astype(numpy.float32)
self.dhy = numpy.random.uniform(-1, 1, h_shape).astype(numpy.float32)
def check_forward(
self, h_data, c_data, xs_data, ws_data, bs_data):
h = _wrap_variable(h_data)
c = _wrap_variable(c_data)
xs = _wrap_variable(xs_data)
ws = _wrap_variable(ws_data)
bs = _wrap_variable(bs_data)
hy, cy, ys = functions.n_step_lstm(
self.n_layers, self.dropout, h, c, ws, bs, xs)
e_hy = self.hx.copy()
e_cy = self.cx.copy()
for ind in range(self.length):
x = self.xs[ind]
batch = x.shape[0]
for layer in range(self.n_layers):
w = self.ws[layer]
b = self.bs[layer]
h_prev = e_hy[layer, :batch]
c_prev = e_cy[layer, :batch]
i = sigmoid(x.dot(w[0].T) + h_prev.dot(w[4].T) + b[0] + b[4])
f = sigmoid(x.dot(w[1].T) + h_prev.dot(w[5].T) + b[1] + b[5])
c_bar = numpy.tanh(
x.dot(w[2].T) + h_prev.dot(w[6].T) + b[2] + b[6])
o = sigmoid(x.dot(w[3].T) + h_prev.dot(w[7].T) + b[3] + b[7])
e_c = (f * c_prev + i * c_bar)
e_h = o * numpy.tanh(e_c)
e_hy[layer, :batch] = e_h
e_cy[layer, :batch] = e_c
x = e_h
testing.assert_allclose(
ys[ind].data, x, rtol=1e-4, atol=1e-4)
testing.assert_allclose(hy.data, e_hy, rtol=1e-4, atol=1e-4)
testing.assert_allclose(cy.data, e_cy, rtol=1e-4, atol=1e-4)
def test_forward_cpu(self):
self.check_forward(self.hx, self.cx, self.xs, self.ws, self.bs)
def check_forward_gpu(self, use_cudnn):
with chainer.using_config('use_cudnn', use_cudnn):
self.check_forward(
_to_gpu(self.hx),
_to_gpu(self.cx),
_to_gpu(self.xs),
_to_gpu(self.ws),
_to_gpu(self.bs))
@attr.gpu
def test_forward_gpu_cudnn_always(self):
self.check_forward_gpu('always')
@attr.gpu
def test_forward_gpu_cudnn_auto(self):
self.check_forward_gpu('auto')
@attr.gpu
def test_forward_gpu_cudnn_never(self):
self.check_forward_gpu('never')
def check_backward(self, h_data, c_data, xs_data, ws_data, bs_data,
dhy_data, dcy_data, dys_data):
args = tuple([h_data, c_data] + sum(ws_data, []) + sum(bs_data, []) +
xs_data)
grads = tuple([dhy_data, dcy_data] + dys_data)
def f(*inputs):
(hx, cx), inputs = _split(inputs, 2)
ws = []
for i in range(self.n_layers):
weights, inputs = _split(inputs, 8)
ws.append(weights)
bs = []
for i in range(self.n_layers):
biases, inputs = _split(inputs, 8)
bs.append(biases)
xs = inputs
hy, cy, ys = functions.n_step_lstm(
self.n_layers, self.dropout, hx, cx, ws, bs, xs)
return (hy, cy) + ys
gradient_check.check_backward(
f, args, grads, eps=1e-2, rtol=1e-3, atol=1e-3)
def test_backward_cpu(self):
self.check_backward(self.hx, self.cx, self.xs, self.ws, self.bs,
self.dhy, self.dcy, self.dys)
@attr.gpu
def test_backward_gpu(self):
with chainer.using_config('use_cudnn', 'always'):
self.check_backward(
_to_gpu(self.hx),
_to_gpu(self.cx),
_to_gpu(self.xs),
_to_gpu(self.ws),
_to_gpu(self.bs),
_to_gpu(self.dhy),
_to_gpu(self.dcy),
_to_gpu(self.dys))
def call_forward(self, train):
hx = _wrap_variable(_to_gpu(self.hx))
cx = _wrap_variable(_to_gpu(self.cx))
xs = _wrap_variable(_to_gpu(self.xs))
ws = _wrap_variable(_to_gpu(self.ws))
bs = _wrap_variable(_to_gpu(self.bs))
with chainer.using_config('enable_backprop', train), \
chainer.using_config('train', train):
return functions.n_step_lstm(
self.n_layers, self.dropout, hx, cx, ws, bs, xs)
def check_call_cudnn_forward_training(self, use_cudnn):
with chainer.using_config('use_cudnn', use_cudnn):
expect = chainer.should_use_cudnn('>=auto', 5000)
with testing.patch('cupy.cudnn.rnn_forward_training') as func:
self.call_forward(True)
assert func.called == expect
@attr.cudnn
def test_call_cudnn_forward_training(self):
self.check_call_cudnn_forward_training('always')
self.check_call_cudnn_forward_training('never')
self.check_call_cudnn_forward_training('auto')
def check_call_cudnn_forward_inference(self, use_cudnn):
with chainer.using_config('use_cudnn', use_cudnn):
expect = chainer.should_use_cudnn('>=auto', 5000)
with testing.patch('cupy.cudnn.rnn_forward_inference') as func:
self.call_forward(False)
assert func.called == expect
@attr.cudnn
def test_call_cudnn_forward_inference(self):
self.check_call_cudnn_forward_inference('always')
self.check_call_cudnn_forward_inference('never')
self.check_call_cudnn_forward_inference('auto')
def check_call_cudnn_backward(self, use_cudnn):
with chainer.using_config('use_cudnn', use_cudnn):
expect = chainer.should_use_cudnn('>=auto', 5000)
hy, cy, ys = self.call_forward(True)
hy.grad = _to_gpu(self.dhy)
with testing.patch('cupy.cudnn.rnn_backward_weights') as func:
hy.backward()
assert func.called == expect
@attr.cudnn
def test_call_cudnn_backward(self):
self.check_call_cudnn_backward('always')
self.check_call_cudnn_backward('never')
self.check_call_cudnn_backward('auto')
class TestNStepBiLSTM(unittest.TestCase):
batches = [3, 2, 1]
length = len(batches)
in_size = 3
out_size = 2
n_layers = 3
dropout = 0.0
def setUp(self):
self.xs = [numpy.random.uniform(-1, 1, (b, self.in_size)).astype('f')
for b in self.batches]
h_shape = (self.n_layers * 2, self.batches[0], self.out_size)
self.cx = numpy.random.uniform(-1, 1, h_shape).astype(numpy.float32)
self.hx = numpy.random.uniform(-1, 1, h_shape).astype(numpy.float32)
self.ws = []
self.bs = []
for i in range(self.n_layers):
for di in [0, 1]:
weights = []
biases = []
for j in range(8):
if i == 0 and j < 4:
w_in = self.in_size
elif i > 0 and j < 4:
w_in = self.out_size * 2
else:
w_in = self.out_size
weights.append(numpy.random.uniform(
-1, 1, (self.out_size, w_in)).astype('f'))
biases.append(numpy.random.uniform(
-1, 1, (self.out_size,)).astype('f'))
self.ws.append(weights)
self.bs.append(biases)
self.dys = [numpy.random.uniform(-1, 1, (b, self.out_size * 2))
.astype('f') for b in self.batches]
self.dcy = numpy.random.uniform(-1, 1, h_shape).astype(numpy.float32)
self.dhy = numpy.random.uniform(-1, 1, h_shape).astype(numpy.float32)
def check_forward(
self, h_data, c_data, xs_data, ws_data, bs_data):
h = _wrap_variable(h_data)
c = _wrap_variable(c_data)
xs = _wrap_variable(xs_data)
ws = _wrap_variable(ws_data)
bs = _wrap_variable(bs_data)
hy, cy, ys = functions.n_step_bilstm(
self.n_layers, self.dropout, h, c, ws, bs, xs)
xs_next = self.xs
e_hy = self.hx.copy()
e_cy = self.cx.copy()
for layer in range(self.n_layers):
# forward
di = 0
xf = []
layer_idx = layer * 2 + di
w = self.ws[layer_idx]
b = self.bs[layer_idx]
for ind in range(self.length):
x = xs_next[ind]
batch = x.shape[0]
h_prev = e_hy[layer_idx, :batch]
c_prev = e_cy[layer_idx, :batch]
i = sigmoid(x.dot(w[0].T) + h_prev.dot(w[4].T) + b[0] + b[4])
f = sigmoid(x.dot(w[1].T) + h_prev.dot(w[5].T) + b[1] + b[5])
c_bar = numpy.tanh(
x.dot(w[2].T) + h_prev.dot(w[6].T) + b[2] + b[6])
o = sigmoid(x.dot(w[3].T) + h_prev.dot(w[7].T) + b[3] + b[7])
e_c = (f * c_prev + i * c_bar)
e_h = o * numpy.tanh(e_c)
e_hy[layer_idx, :batch] = e_h
e_cy[layer_idx, :batch] = e_c
xf.append(e_h)
# backward
di = 1
xb = []
layer_idx = layer * 2 + di
w = self.ws[layer_idx]
b = self.bs[layer_idx]
for ind in reversed(range(self.length)):
x = xs_next[ind]
batch = x.shape[0]
h_prev = e_hy[layer_idx, :batch]
c_prev = e_cy[layer_idx, :batch]
i = sigmoid(x.dot(w[0].T) + h_prev.dot(w[4].T) + b[0] + b[4])
f = sigmoid(x.dot(w[1].T) + h_prev.dot(w[5].T) + b[1] + b[5])
c_bar = numpy.tanh(
x.dot(w[2].T) + h_prev.dot(w[6].T) + b[2] + b[6])
o = sigmoid(x.dot(w[3].T) + h_prev.dot(w[7].T) + b[3] + b[7])
e_c = (f * c_prev + i * c_bar)
e_h = o * numpy.tanh(e_c)
e_hy[layer_idx, :batch] = e_h
e_cy[layer_idx, :batch] = e_c
xb.append(e_h)
xb.reverse()
xs_next = [numpy.concatenate([hfi, hbi], axis=1) for (hfi, hbi) in
zip(xf, xb)]
for k, (ysi, xsi) in enumerate(zip(ys, xs_next)):
testing.assert_allclose(ysi.data, xsi, rtol=1e-4, atol=1e-4)
testing.assert_allclose(hy.data, e_hy, rtol=1e-4, atol=1e-4)
testing.assert_allclose(cy.data, e_cy, rtol=1e-4, atol=1e-4)
def test_forward_cpu(self):
self.check_forward(self.hx, self.cx, self.xs, self.ws, self.bs)
def check_forward_gpu(self, use_cudnn):
with chainer.using_config('use_cudnn', use_cudnn):
self.check_forward(
_to_gpu(self.hx),
_to_gpu(self.cx),
_to_gpu(self.xs),
_to_gpu(self.ws),
_to_gpu(self.bs))
@attr.gpu
def test_forward_gpu_cudnn_always(self):
self.check_forward_gpu('always')
@attr.gpu
def test_forward_gpu_cudnn_auto(self):
self.check_forward_gpu('auto')
@attr.gpu
def test_forward_gpu_cudnn_never(self):
self.check_forward_gpu('never')
def check_backward(self, h_data, c_data, xs_data, ws_data, bs_data,
dhy_data, dcy_data, dys_data):
args = tuple([h_data, c_data] + sum(ws_data, []) + sum(bs_data, []) +
xs_data)
grads = tuple([dhy_data, dcy_data] + dys_data)
def f(*inputs):
(hx, cx), inputs = _split(inputs, 2)
ws = []
for i in range(self.n_layers * 2):
weights, inputs = _split(inputs, 8)
ws.append(weights)
bs = []
for i in range(self.n_layers * 2):
biases, inputs = _split(inputs, 8)
bs.append(biases)
xs = inputs
hy, cy, ys = functions.n_step_bilstm(
self.n_layers, self.dropout, hx, cx, ws, bs, xs)
return (hy, cy) + ys
gradient_check.check_backward(
f, args, grads, eps=1e-2, rtol=1e-3, atol=1e-3)
def test_backward_cpu(self):
self.check_backward(self.hx, self.cx, self.xs, self.ws, self.bs,
self.dhy, self.dcy, self.dys)
@attr.gpu
def check_backward_gpu(self):
with chainer.using_config('use_cudnn', 'always'):
self.check_backward(
_to_gpu(self.hx),
_to_gpu(self.cx),
_to_gpu(self.xs),
_to_gpu(self.ws),
_to_gpu(self.bs),
_to_gpu(self.dhy),
_to_gpu(self.dcy),
_to_gpu(self.dys))
def call_forward(self, train):
hx = _wrap_variable(_to_gpu(self.hx))
cx = _wrap_variable(_to_gpu(self.cx))
xs = _wrap_variable(_to_gpu(self.xs))
ws = _wrap_variable(_to_gpu(self.ws))
bs = _wrap_variable(_to_gpu(self.bs))
with chainer.using_config('enable_backprop', train), \
chainer.using_config('train', train):
return functions.n_step_bilstm(
self.n_layers, self.dropout, hx, cx, ws, bs, xs)
def check_call_cudnn_forward_training(self, use_cudnn):
with chainer.using_config('use_cudnn', use_cudnn):
expect = chainer.should_use_cudnn('>=auto', 5000)
with testing.patch('cupy.cudnn.rnn_forward_training') as func:
self.call_forward(True)
assert func.called == expect
@attr.cudnn
def test_call_cudnn_forward_training(self):
self.check_call_cudnn_forward_training('always')
self.check_call_cudnn_forward_training('never')
self.check_call_cudnn_forward_training('auto')
def check_call_cudnn_forward_inference(self, use_cudnn):
with chainer.using_config('use_cudnn', use_cudnn):
expect = chainer.should_use_cudnn('>=auto', 5000)
with testing.patch('cupy.cudnn.rnn_forward_inference') as func:
self.call_forward(False)
assert func.called == expect
@attr.cudnn
def test_call_cudnn_forward_inference(self):
self.check_call_cudnn_forward_inference('always')
self.check_call_cudnn_forward_inference('never')
self.check_call_cudnn_forward_inference('auto')
def check_call_cudnn_backward(self, use_cudnn):
with chainer.using_config('use_cudnn', use_cudnn):
expect = chainer.should_use_cudnn('>=auto', 5000)
hy, cy, ys = self.call_forward(True)
hy.grad = _to_gpu(self.dhy)
with testing.patch('cupy.cudnn.rnn_backward_weights') as func:
hy.backward()
assert func.called == expect
@attr.cudnn
def test_call_cudnn_backward(self):
self.check_call_cudnn_backward('always')
self.check_call_cudnn_backward('never')
self.check_call_cudnn_backward('auto')
def _stack_weight(ws):
# TODO(unno): Input of the current LSTM implementaiton is shuffled
w = functions.stack(ws, axis=1)
shape = w.shape
return functions.reshape(w, (shape[0] * shape[1],) + shape[2:])
def count_close(x, y, atol=1e-4):
assert x.shape == y.shape
return int(sum(abs(x - y) / abs(x) < atol))
def lstm_without_dropout(n_layer, dropout, hx, cx, ws, bs, xs):
xws = [_stack_weight([w[2], w[0], w[1], w[3]]) for w in ws]
hws = [_stack_weight([w[6], w[4], w[5], w[7]]) for w in ws]
xbs = [_stack_weight([b[2], b[0], b[1], b[3]]) for b in bs]
hbs = [_stack_weight([b[6], b[4], b[5], b[7]]) for b in bs]
xs = [xs[i] for i in range(3)]
ys = []
for x in xs:
cx_next = []
hx_next = []
for layer in range(n_layer):
c = cx[layer]
h = hx[layer]
if layer != 0:
# Only multiply ratio
x = x * (1 / (1.0 - dropout))
lstm_in = functions.linear(x, xws[layer], xbs[layer]) + \
functions.linear(h, hws[layer], hbs[layer])
c_new, h_new = functions.lstm(c, lstm_in)
cx_next.append(c_new)
hx_next.append(h_new)
x = h_new
cx = cx_next
hx = hx_next
ys.append(x)
cy = functions.stack(cx)
hy = functions.stack(hx)
return hy, cy, ys
def rand_vector(shape):
# return cuda.cupy.random.randint(-2, 2, shape).astype('f')
return cuda.cupy.random.uniform(-1, 1, shape).astype('f')
# return cuda.cupy.ones(shape).astype('f')
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
}))
@attr.cudnn
class TestNStepLSTMDropout(unittest.TestCase):
batch = 20
length = 3
in_size = 1
out_size = 1
n_layers = 2
dropout = 0.3
n_tests = 100
def setUp(self):
self.xs = [rand_vector((self.batch, self.in_size))
for _ in range(self.length)]
h_shape = (self.n_layers, self.batch, self.out_size)
self.cx = rand_vector(h_shape)
self.hx = rand_vector(h_shape)
self.ws = []
self.bs = []
for i in range(self.n_layers):
weights = []
biases = []
for j in range(8):
if i == 0 and j < 4:
w_in = self.in_size
else:
w_in = self.out_size
weights.append(rand_vector((self.out_size, w_in)))
biases.append(rand_vector((self.out_size,)))
self.ws.append(weights)
self.bs.append(biases)
def assert_count(self, actual, expect):
self.assertTrue(expect * 0.8 < actual < expect * 1.2)
@condition.retry(5)
def test_forward_dropout_count(self):
y_counts = [0] * self.length
h_counts = [0] * self.n_layers
c_counts = [0] * self.n_layers
for _ in range(self.n_tests):
hy1, cy1, ys1 = lstm_without_dropout(
self.n_layers, self.dropout, self.hx, self.cx, self.ws,
self.bs, self.xs)
with chainer.using_config('use_cudnn', self.use_cudnn):
hy2, cy2, ys2 = functions.n_step_lstm(
self.n_layers, self.dropout, self.hx, self.cx, self.ws,
self.bs, self.xs)
for i in range(self.length):
y_counts[i] += count_close(ys1[i].data, ys2[i].data)
for i in range(self.n_layers):
h_counts[i] += count_close(hy1[i].data, hy2[i].data)
c_counts[i] += count_close(cy1[i].data, cy2[i].data)
total = self.batch * self.n_tests
for i in range(self.length):
self.assert_count(
y_counts[i],
total * (1 - self.dropout) ** ((self.n_layers - 1) * (i + 1)))
for i in range(self.n_layers):
self.assert_count(
h_counts[i], total * (1 - self.dropout) ** (self.length * i))
self.assert_count(
c_counts[i], total * (1 - self.dropout) ** (self.length * i))
testing.run_module(__name__, __file__)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import pyarrow as pa
from pyarrow import fs
from pyarrow.filesystem import FileSystem, LocalFileSystem
from pyarrow.tests.parquet.common import parametrize_legacy_dataset
try:
import pyarrow.parquet as pq
from pyarrow.tests.parquet.common import _read_table, _test_dataframe
except ImportError:
pq = None
try:
import pandas as pd
import pandas.testing as tm
except ImportError:
pd = tm = None
pytestmark = pytest.mark.parquet
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_parquet_incremental_file_build(tempdir, use_legacy_dataset):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
writer = pq.ParquetWriter(out, arrow_table.schema, version='2.6')
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
writer.close()
buf = out.getvalue()
result = _read_table(
pa.BufferReader(buf), use_legacy_dataset=use_legacy_dataset)
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
def test_validate_schema_write_table(tempdir):
# ARROW-2926
simple_fields = [
pa.field('POS', pa.uint32()),
pa.field('desc', pa.string())
]
simple_schema = pa.schema(simple_fields)
# simple_table schema does not match simple_schema
simple_from_array = [pa.array([1]), pa.array(['bla'])]
simple_table = pa.Table.from_arrays(simple_from_array, ['POS', 'desc'])
path = tempdir / 'simple_validate_schema.parquet'
with pq.ParquetWriter(path, simple_schema,
version='2.6',
compression='snappy', flavor='spark') as w:
with pytest.raises(ValueError):
w.write_table(simple_table)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_parquet_writer_context_obj(tempdir, use_legacy_dataset):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
with pq.ParquetWriter(out, arrow_table.schema, version='2.6') as writer:
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
buf = out.getvalue()
result = _read_table(
pa.BufferReader(buf), use_legacy_dataset=use_legacy_dataset)
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_parquet_writer_context_obj_with_exception(
tempdir, use_legacy_dataset
):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
error_text = 'Artificial Error'
try:
with pq.ParquetWriter(out,
arrow_table.schema,
version='2.6') as writer:
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
if i == 5:
raise ValueError(error_text)
except Exception as e:
assert str(e) == error_text
buf = out.getvalue()
result = _read_table(
pa.BufferReader(buf), use_legacy_dataset=use_legacy_dataset)
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
@pytest.mark.parametrize("filesystem", [
None,
LocalFileSystem._get_instance(),
fs.LocalFileSystem(),
])
def test_parquet_writer_filesystem_local(tempdir, filesystem):
df = _test_dataframe(100)
table = pa.Table.from_pandas(df, preserve_index=False)
path = str(tempdir / 'data.parquet')
with pq.ParquetWriter(
path, table.schema, filesystem=filesystem, version='2.6'
) as writer:
writer.write_table(table)
result = _read_table(path).to_pandas()
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
@pytest.mark.s3
def test_parquet_writer_filesystem_s3(s3_example_fs):
df = _test_dataframe(100)
table = pa.Table.from_pandas(df, preserve_index=False)
fs, uri, path = s3_example_fs
with pq.ParquetWriter(
path, table.schema, filesystem=fs, version='2.6'
) as writer:
writer.write_table(table)
result = _read_table(uri).to_pandas()
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
@pytest.mark.s3
def test_parquet_writer_filesystem_s3_uri(s3_example_fs):
df = _test_dataframe(100)
table = pa.Table.from_pandas(df, preserve_index=False)
fs, uri, path = s3_example_fs
with pq.ParquetWriter(uri, table.schema, version='2.6') as writer:
writer.write_table(table)
result = _read_table(path, filesystem=fs).to_pandas()
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
@pytest.mark.s3
def test_parquet_writer_filesystem_s3fs(s3_example_s3fs):
df = _test_dataframe(100)
table = pa.Table.from_pandas(df, preserve_index=False)
fs, directory = s3_example_s3fs
path = directory + "/test.parquet"
with pq.ParquetWriter(
path, table.schema, filesystem=fs, version='2.6'
) as writer:
writer.write_table(table)
result = _read_table(path, filesystem=fs).to_pandas()
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
def test_parquet_writer_filesystem_buffer_raises():
df = _test_dataframe(100)
table = pa.Table.from_pandas(df, preserve_index=False)
filesystem = fs.LocalFileSystem()
# Should raise ValueError when filesystem is passed with file-like object
with pytest.raises(ValueError, match="specified path is file-like"):
pq.ParquetWriter(
pa.BufferOutputStream(), table.schema, filesystem=filesystem
)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_parquet_writer_with_caller_provided_filesystem(use_legacy_dataset):
out = pa.BufferOutputStream()
class CustomFS(FileSystem):
def __init__(self):
self.path = None
self.mode = None
def open(self, path, mode='rb'):
self.path = path
self.mode = mode
return out
fs = CustomFS()
fname = 'expected_fname.parquet'
df = _test_dataframe(100)
table = pa.Table.from_pandas(df, preserve_index=False)
with pq.ParquetWriter(fname, table.schema, filesystem=fs, version='2.6') \
as writer:
writer.write_table(table)
assert fs.path == fname
assert fs.mode == 'wb'
assert out.closed
buf = out.getvalue()
table_read = _read_table(
pa.BufferReader(buf), use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df_read, df)
# Should raise ValueError when filesystem is passed with file-like object
with pytest.raises(ValueError) as err_info:
pq.ParquetWriter(pa.BufferOutputStream(), table.schema, filesystem=fs)
expected_msg = ("filesystem passed but where is file-like, so"
" there is nothing to open with filesystem.")
assert str(err_info) == expected_msg
|
|
from __future__ import annotations
import os
from collections import OrderedDict
from collections.abc import Iterable, Sequence
from logging import ERROR, WARNING, getLogger
from typing import ClassVar, Optional
import git
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import yaml
from IPython.display import HTML
from pandas import DataFrame
from pandas.core.series import Series
import riip.material
# from numpy.typing import ArrayLike
logger = getLogger(__package__)
_dirname = os.path.dirname(__file__)
_ri_database = os.path.join(_dirname, "data", "refractiveindex.info-database")
_db_directory = os.path.join(_ri_database, "database")
_my_db_directory = os.path.join(_dirname, "data", "my_database")
_catalog_file = os.path.join(_dirname, "data", "catalog.csv")
_raw_data_file = os.path.join(_dirname, "data", "raw_data.csv")
_grid_data_file = os.path.join(_dirname, "data", "grid_data.h5")
_ri_database_repo = (
"https://github.com/polyanskiy/" + "refractiveindex.info-database.git"
)
_ri_database_patch = os.path.join(_dirname, "data", "riid.patch")
class RiiDataFrame:
"""A class that provides a Pandas DataFrame for 'refractiveindex.info database'.
Attributes:
db_path: The path to the refractiveindex.info-database/database.
my_db_path: The path to my_database.
catalog: The catalog.
catalog_file: The csv filename to store the catalog.
raw_data: The experimental data.
raw_data_file: The csv filename to store the raw_data.
grid_data_file: The hdf5 filename to store the grid_data.
"""
_catalog_columns: ClassVar[OrderedDict] = OrderedDict(
(
("id", np.int32),
("shelf", str),
("shelf_name", str),
("division", str),
("book", str),
("book_name", str),
("section", str),
("page", str),
("path", str),
("formula", np.int32),
("tabulated", str),
("num_n", np.int32),
("num_k", np.int32),
("wl_n_min", np.float64),
("wl_n_max", np.float64),
("wl_k_min", np.float64),
("wl_k_max", np.float64),
("wl_min", np.float64),
("wl_max", np.float64),
)
)
_raw_data_columns: ClassVar[OrderedDict] = OrderedDict(
(
("id", np.int32),
("c", np.float64),
("wl_n", np.float64),
("n", np.float64),
("wl_k", np.float64),
("k", np.float64),
)
)
_grid_data_columns: ClassVar[OrderedDict] = OrderedDict(
(("id", np.int32), ("wl", np.float64), ("n", np.float64), ("k", np.float64))
)
def __init__(
self,
db_path: str = _db_directory,
catalog_file: str = _catalog_file,
raw_data_file: str = _raw_data_file,
grid_data_file: str = _grid_data_file,
my_db_path: str = _my_db_directory,
):
"""Initialize the RiiDataFrame.
Args:
db_path: The path to the refractiveindex.info-database/database.
my_db_path: The path to my_database.
catalog_file: The filename of the catalog csv file.
raw_data_file: The filename of the experimental data csv file.
grid_data_file: The filename of the grid wl-nk data csv file.
"""
self._db_path: str = db_path
self._my_db_path: str = my_db_path
self._ri_database: str = os.path.dirname(self._db_path)
self._catalog_file: str = catalog_file
self._raw_data_file: str = raw_data_file
self._grid_data_file: str = grid_data_file
_catalog, _raw_data = self._load_catalog_and_raw_data()
self.catalog: DataFrame = _catalog
self.raw_data: DataFrame = _raw_data
self.__book_page_order = self._create_book_page_order()
def _load_catalog_and_raw_data(self) -> tuple[DataFrame, DataFrame]:
# Create csv files
if not os.path.isfile(self._catalog_file):
logger.warning("Catalog file not found.")
if not os.path.isfile(os.path.join(self._db_path, "library.yml")):
logger.warning("Cloning Repository...")
# repo = git.Repo.clone_from(
# _ri_database_repo, self._ri_database, branch="master"
# )
# repo.git.apply(_ri_database_patch)
git.Repo.clone_from(
_ri_database_repo, self._ri_database, branch="master"
)
logger.warning("Done.")
logger.warning("Creating catalog file...")
catalog = self._add_my_db_to_catalog(self._create_catalog())
logger.warning("Done.")
# Preparing raw_data
logger.warning("Creating raw data file...")
raw_data, catalog = self._create_raw_data_and_modify_catalog(catalog)
logger.warning("Done.")
# Preparing grid_data
logger.warning("Updating grid data file...")
catalog = catalog.set_index("id")
raw_data = raw_data.set_index("id")
self._create_grid_data(catalog, raw_data)
logger.warning("Done.")
else:
catalog = pd.read_csv(
self._catalog_file,
dtype=self._catalog_columns,
index_col="id",
na_filter=False,
)
raw_data = pd.read_csv(
self._raw_data_file,
dtype=self._raw_data_columns,
index_col="id",
na_filter=False,
)
return catalog, raw_data
@staticmethod
def _extract_entry(db_path: str, start_id: int = 0) -> Iterable:
"""Yield a single data set."""
reference_path = os.path.normpath(db_path)
library_file = os.path.join(reference_path, "library.yml")
with open(library_file, "r", encoding="utf-8") as f:
library = yaml.safe_load(f)
idx = start_id
shelf = "main"
book = "Ag (Experimental data)"
page = "Johnson"
try:
for sh in library:
shelf = sh["SHELF"]
if shelf == "3d":
# This shelf does not seem to contain new data.
break
shelf_name = sh["name"]
division = None
for b in sh["content"]:
if "DIVIDER" in b:
division = b["DIVIDER"]
else:
if division is None:
raise Exception("'DIVIDER' is missing in 'library.yml'.")
if "DIVIDER" not in b["content"]:
section = ""
for p in b["content"]:
if "DIVIDER" in p:
# This DIVIDER specifies the phase of the
# material such as gas, liquid or solid, so it
# is added to the book and book_name with
# parentheses.
section = p["DIVIDER"]
else:
book = b["BOOK"]
book_name = b["name"]
page = p["PAGE"]
path = os.path.join(
reference_path, "data", os.path.normpath(p["data"])
)
logger.debug("{0} {1} {2}".format(idx, book, page))
row = [idx, shelf, shelf_name, division]
row += [book, book_name, section, page, path]
row += [0, "f", 0, 0, 0, 0, 0, 0, 0, 0]
yield row
idx += 1
except Exception as e:
message = (
"There seems to be some inconsistency in the library.yml "
+ "around id={}, shelf={}, book={}, page={}.".format(
idx, shelf, book, page
)
)
raise Exception(message) from e
def _create_catalog(self) -> DataFrame:
"""Create catalog DataFrame from library.yml."""
logger.info("Creating catalog...")
df = DataFrame(
list(self._extract_entry(self._db_path)),
columns=self._catalog_columns.keys(),
)
logger.info("Done.")
return df.astype(self._catalog_columns)
def _add_my_db_to_catalog(self, catalog: DataFrame) -> DataFrame:
"""Add data in my_database to catalog DataFrame."""
logger.info("Adding my_db to catalog...")
start_id = catalog["id"].values[-1] + 1
logger.debug(start_id)
df = DataFrame(
list(self._extract_entry(self._my_db_path, start_id)),
columns=self._catalog_columns.keys(),
)
df = pd.concat([catalog, df], ignore_index=True)
logger.info("Done.")
return df
def _create_book_page_order(self) -> Series:
"""Create [id, book+page string] array used to search id."""
cl = self.catalog
book_page = {
idx: f"{cl.loc[idx, 'book']}{cl.loc[idx, 'page']}" for idx in cl.index
}
return Series(book_page).sort_values()
def book_page_to_id(self, params: dict) -> int:
bp = params["book"] + params["page"]
ind = np.searchsorted(self.__book_page_order, bp)
if self.__book_page_order.iloc[ind] != bp:
raise ValueError(bp + " could not be found")
return self.__book_page_order.index[ind]
def _extract_raw_data(
self, idx: int, catalog: DataFrame
) -> tuple[DataFrame, DataFrame]:
"""Yield a single raw data set.
Some data are inserted into the catalog.
Args:
catalog: The catalog.
idx: The ID number of the data set.
"""
path = catalog.loc[idx, "path"]
with open(path, "r", encoding="utf-8") as f:
data_list = yaml.safe_load(f)["DATA"]
wl_n_min = wl_k_min = 0.0
wl_n_max = wl_k_max = np.inf
formula = 0
tabulated = ""
cs = []
wls_n = []
wls_k = []
ns = []
ks = []
num_n = num_k = 0
for data in data_list:
data_type, data_set = data["type"].strip().split()
# For tabulated data
if data_type == "tabulated":
if data_set == "nk":
tabulated += data_set
wls_n, ns, ks = np.array(
[
line.strip().split()
for line in data["data"].strip().split("\n")
],
dtype=float,
).T
wls_n, inds = np.unique(wls_n, return_index=True)
ns = ns[inds]
ks = ks[inds]
inds = np.argsort(wls_n)
wls_n = list(wls_n[inds])
wls_k = wls_n
ns = list(ns[inds])
ks = list(ks[inds])
wl_n_min = wl_k_min = wls_n[0]
wl_n_max = wl_k_max = wls_n[-1]
num_n = len(wls_n)
num_k = len(wls_k)
elif data_set == "n":
tabulated += data_set
wls_n, ns = np.array(
[
line.strip().split()
for line in data["data"].strip().split("\n")
],
dtype=float,
).T
wls_n, inds = np.unique(wls_n, return_index=True)
ns = ns[inds]
inds = np.argsort(wls_n)
wls_n = list(wls_n[inds])
ns = list(ns[inds])
wl_n_min = wls_n[0]
wl_n_max = wls_n[-1]
num_n = len(wls_n)
elif data_set == "k":
tabulated += data_set
wls_k, ks = np.array(
[
line.strip().split()
for line in data["data"].strip().split("\n")
],
dtype=float,
).T
wls_k, inds = np.unique(wls_k, return_index=True)
ks = ks[inds]
inds = np.argsort(wls_k)
wls_k = list(wls_k[inds])
ks = list(ks[inds])
wl_k_min = wls_k[0]
wl_k_max = wls_k[-1]
num_k = len(wls_k)
else:
raise Exception("DATA is broken.")
# For formulas
elif data_type == "formula":
formula = data_set
wl_n_min, wl_n_max = [
float(s) for s in data["wavelength_range"].strip().split()
]
cs = [float(s) for s in data["coefficients"].strip().split()]
else:
raise Exception("DATA has unknown contents {}".format(data_type))
if len(tabulated) > 2:
raise Exception("Too many tabulated data set are provided")
elif "nn" in tabulated or "kk" in tabulated:
raise Exception("There is redundancy in n or k.")
elif tabulated == "kn":
tabulated = "nk"
elif tabulated == "":
tabulated = "f"
if tabulated == "k" and formula != 0:
if wl_n_min < wl_k_min:
wls_k = [wl_n_min] + wls_k
ks = [min(ks)] + ks
num_k += 1
if wl_n_max > wl_k_max:
wls_k = wls_k + [wl_n_max]
ks = ks + [min(ks)]
num_k += 1
wl_k_min, wl_k_max = wl_n_min, wl_n_max
if "k" not in tabulated:
wl_k_min, wl_k_max = wl_n_min, wl_n_max
wl_min = max(wl_n_min, wl_k_min)
wl_max = min(wl_n_max, wl_k_max)
# The coefficients not included in the formula must be zero.
num_c = len(cs)
if formula != 0:
cs += [0.0] * (24 - num_c)
num_c = 24
# All the arrays must have the same length.
num = max(num_n, num_k, num_c)
_cs = np.array(cs + [0.0] * (num - num_c), dtype=np.float64)
_wls_n = np.array(wls_n + [0.0] * (num - num_n), dtype=np.float64)
_ns = np.array(ns + [0.0] * (num - num_n), dtype=np.float64)
_wls_k = np.array(wls_k + [0.0] * (num - num_k), dtype=np.float64)
_ks = np.array(ks + [0.0] * (num - num_k), dtype=np.float64)
# Rewrite catalog with the obtained data
catalog.loc[idx, "formula"] = formula
catalog.loc[idx, "tabulated"] = tabulated
catalog.loc[idx, "num_n"] = num_n
catalog.loc[idx, "num_k"] = num_k
catalog.loc[idx, "wl_n_min"] = wl_n_min
catalog.loc[idx, "wl_n_max"] = wl_n_max
catalog.loc[idx, "wl_k_min"] = wl_k_min
catalog.loc[idx, "wl_k_max"] = wl_k_max
catalog.loc[idx, "wl_min"] = wl_min
catalog.loc[idx, "wl_max"] = wl_max
df = DataFrame(
{
key: val
for key, val in zip(
self._raw_data_columns.keys(), [idx, _cs, _wls_n, _ns, _wls_k, _ks]
)
}
)
# Arrange the columns according to the order of _raw_data_columns
df = df.loc[:, self._raw_data_columns.keys()].astype(self._raw_data_columns)
return df, catalog
def _create_raw_data_and_modify_catalog(
self, catalog: DataFrame
) -> tuple[DataFrame, DataFrame]:
"""Create a DataFrame for experimental data."""
logger.info("Creating raw data...")
df = DataFrame(columns=self._raw_data_columns)
for idx in catalog.index:
logger.debug("{}: {}".format(idx, catalog.loc[idx, "path"]))
df_idx, catalog = self._extract_raw_data(idx, catalog)
df = pd.concat([df, df_idx], ignore_index=True)
df = df.astype(self._raw_data_columns)
catalog.to_csv(self._catalog_file, index=False, encoding="utf-8")
df.to_csv(self._raw_data_file, index=False, encoding="utf-8")
logger.info("Done.")
return df, catalog
def load_grid_data(self, id: Optional[int] = None) -> DataFrame:
"""Load grid data of (wl, n, k) for given id.
Args:
id (Optional[int]): ID number. If id is None, all the data is loaded.
Defaults to None.
Returns:
DataFrame: Grid data of (wl, n, k).
(wl, n, k) = (wavelength, refractive index, extinction coefficient).
"""
if not os.path.isfile(self._grid_data_file):
logger.warning("Grid data file not found.")
logger.warning("Creating grid data file...")
self._create_grid_data(self.catalog, self.raw_data)
logger.warning("Done.")
else:
logger.info("Grid data file found at {}".format(self._grid_data_file))
if id is None:
return pd.read_hdf(self._grid_data_file).set_index("id")
return pd.read_hdf(self._grid_data_file, where=f"id == {id}").set_index("id")
def _create_grid_data(self, catalog: DataFrame, raw_data: DataFrame) -> None:
"""Create a DataFrame for the wl-nk data."""
logger.info("Creating grid data...")
columns = self._grid_data_columns.keys()
df = DataFrame(columns=columns)
logger.setLevel(ERROR)
for idx in catalog.index:
material = riip.material.RiiMaterial(idx, catalog, raw_data)
wl_min = material.catalog.loc["wl_min"]
wl_max = material.catalog.loc["wl_max"]
wls = np.linspace(wl_min, wl_max, 200)
ns = material.n(wls)
ks = material.k(wls)
data = {key: val for key, val in zip(columns, [idx, wls, ns, ks])}
df = pd.concat([df, DataFrame(data).loc[:, columns]], ignore_index=True)
logger.setLevel(WARNING)
df = df.astype(self._grid_data_columns)
df.to_hdf(
self._grid_data_file,
"grid_data",
mode="w",
data_columns=["id"],
format="table",
)
logger.info("Done.")
def update_db(self) -> None:
"""Pull repository and update local database."""
if not os.path.isfile(os.path.join(self._db_path, "library.yml")):
logger.warning("Cloning Repository.")
git.Repo.clone_from(_ri_database_repo, self._ri_database, branch="master")
logger.warning("Done.")
else:
logger.warning("Pulling Repository...")
repo = git.Repo(self._ri_database)
repo.remotes.origin.pull()
logger.warning("Done.")
logger.warning("Updating catalog file...")
catalog = self._add_my_db_to_catalog(self._create_catalog())
logger.warning("Done.")
logger.warning("Updating raw data file...")
self.raw_data, self.catalog = self._create_raw_data_and_modify_catalog(catalog)
self.catalog = self.catalog.set_index("id")
self.raw_data = self.raw_data.set_index("id")
logger.warning("Done.")
logger.warning("Updating grid data file...")
self._create_grid_data(self.catalog, self.raw_data)
logger.warning("Done.")
logger.warning("All Done.")
"""."""
def search(self, name: str) -> DataFrame:
"""Search pages which contain the name of material.
Args:
name (str): Name of material
Returns:
DataFrame: Simplified catalog
"""
columns = [
"book",
"section",
"page",
"formula",
"tabulated",
"wl_min",
"wl_max",
]
df = self.catalog[
(
(self.catalog["book"].str.contains(name))
| (
self.catalog["book_name"]
.str.replace("<sub>", "")
.str.replace("</sub>", "")
.str.lower()
.str.contains(name.lower())
)
)
]
return df.loc[:, columns]
def select(self, cond: str) -> DataFrame:
"""Select pages that fullfil the condition.
Args:
cond (str): Query condition, such as '1.5 <= n <= 2 & 1.0 <= wl <= 2.0'
Returns:
List[int]: Simplified catalog
"""
columns = [
"book",
"section",
"page",
"formula",
"tabulated",
"wl_min",
"wl_max",
]
gd = self.load_grid_data()
id_list = gd.query(cond).index.unique()
return self.catalog.loc[id_list, columns]
def show(self, id: int | Sequence[int]) -> DataFrame:
"""Summary of page(s) of ID (list of IDs).
Args:
id (Union[int, Sequence[int]]): ID number
Returns:
DataFrame: Simplified catalog
"""
columns = [
"book",
"section",
"page",
"formula",
"tabulated",
"wl_min",
"wl_max",
]
return self.catalog.loc[id, columns]
def material(self, params: dict) -> riip.material.Material:
"""Create instance of Material class associated with ID.
Args:
params (dict): Parameter dict that can contain the following values:
'id': ID number (int)
'book': book value in catalog of RiiDataFrame. (str)
'page': page value in catalog of RiiDataFrame. (str)
'RI': Constant refractive index. (complex)
'e': Constant permittivity. (complex)
'bound_check': True if bound check should be done. Defaults to True. (bool)
'im_factor': A magnification factor multiplied to the imaginary part of permittivity. Defaults to 1.0. (float)
Returns:
Material: A class that provides refractive index, extinction coefficient and dielectric function of the material
"""
return riip.material.Material(params, self)
def read(self, id: int, as_dict: bool = False):
"""Return contants of a page associated with the id.
Args:
id (int): ID number
as_dict (bool): If True, the page contents are returned as python dict
Returns:
Union[str, dict]: Page contents
"""
path = self.catalog.loc[id, "path"]
with open(path) as fd:
if as_dict:
contents = yaml.safe_load(fd)
else:
contents = fd.read()
return contents
def references(self, id: int) -> HTML:
"""Return REFERENCES as IPython.display.HTML class.
Args:
id (int): ID number
Returns:
HTML: REFERENCES as IPython.display.HTML class
"""
contents = self.read(id, as_dict=True)
return HTML(contents["REFERENCES"])
def plot(
self,
id: int,
comp: str = "n",
fmt1: Optional[str] = "-",
fmt2: Optional[str] = "--",
**kwargs,
):
"""Plot refractive index, extinction coefficient or permittivity.
Args:
id (int): ID number.
comp (str): 'n', 'k' or 'eps'.
fmt1 (Union[str, None]): Plot format for n and Re(eps).
fmt2 (Union[str, None]): Plot format for k and Im(eps).
"""
label = f"{self.catalog.loc[id, 'book']} {self.catalog.loc[id, 'page']}"
df = self.load_grid_data(id)
wls = df["wl"]
ns = df["n"]
ks = df["k"]
kwargs.setdefault("lw", 4)
kwargs.setdefault("ms", 8)
if comp == "n":
plt.plot(wls, ns, fmt1, label=label, **kwargs)
plt.ylabel(r"$n$")
elif comp == "k":
plt.plot(wls, ks, fmt2, label=label, **kwargs)
plt.ylabel(r"$k$")
elif comp == "eps":
eps_r = ns ** 2 - ks ** 2
eps_i = 2 * ns * ks
(line,) = plt.plot(wls, eps_r, fmt1, label=label, **kwargs)
color = line.get_color()
plt.plot(wls, eps_i, fmt2, color=color, **kwargs)
plt.ylabel(r"$\varepsilon$")
plt.xlabel(r"$\lambda$ $[\mathrm{\mu m}]$")
plt.legend()
if __name__ == "__main__":
from logging import DEBUG, Formatter, StreamHandler, getLogger
logger = getLogger("")
formatter = Formatter(fmt="%(levelname)s:[%(name)s.%(funcName)s]: %(message)s")
logger.setLevel(DEBUG)
stream_handler = StreamHandler()
stream_handler.setFormatter(formatter)
stream_handler.setLevel(DEBUG)
logger.addHandler(stream_handler)
rii_df = RiiDataFrame()
rii_df.update_db()
|
|
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2006,2007,2008,2009 Frank Scholz <coherence@beebits.net>
from sets import Set
from twisted.internet import reactor, defer
from twisted.internet.task import LoopingCall
from twisted.python import failure
from coherence.upnp.core.soap_service import errorCode
from coherence.upnp.core import DIDLLite
import string
import os
import platform
from StringIO import StringIO
import tokenize
import pygst
pygst.require('0.10')
import gst
import coherence.extern.louie as louie
from coherence.extern.simple_plugin import Plugin
from coherence import log
class Player(log.Loggable):
logCategory = 'gstreamer_player'
max_playbin_volume = 1.
def __init__(self, default_mimetype='audio/mpeg', audio_sink_name=None,
video_sink_name=None, audio_sink_options=None,
video_sink_options=None):
log.Loggable.__init__(self)
self.audio_sink_name = audio_sink_name or "autoaudiosink"
self.video_sink_name = video_sink_name or "autovideosink"
self.audio_sink_options = audio_sink_options or {}
self.video_sink_options = video_sink_options or {}
self.player = None
self.source = None
self.sink = None
self.bus = None
self.views = []
self.playing = False
self.duration = None
self.mimetype = default_mimetype
self.create_pipeline(self.mimetype)
def add_view(self, view):
self.views.append(view)
def remove_view(self, view):
self.views.remove(view)
def update(self, message=None):
for v in self.views:
v(message=message)
def _is_not_playbin2_friendly(self):
uname = platform.uname()[1]
result = False
if uname.startswith('Nokia'):
try:
device = uname.split("-")[1]
except:
device = "unknown"
result = device != "N900"
return result
def create_pipeline(self, mimetype):
self.debug("creating pipeline")
if self._is_not_playbin2_friendly():
self.bus = None
self.player = None
self.source = None
self.sink = None
if mimetype == 'application/ogg':
self.player = gst.parse_launch('gnomevfssrc name=source ! oggdemux ! ivorbisdec ! audioconvert ! dsppcmsink name=sink')
self.player.set_name('oggplayer')
self.set_volume = self.set_volume_dsp_pcm_sink
self.get_volume = self.get_volume_dsp_pcm_sink
elif mimetype == 'application/flac':
self.player = gst.parse_launch('gnomevfssrc name=source ! flacdemux ! flacdec ! audioconvert ! dsppcmsink name=sink')
self.player.set_name('flacplayer')
self.set_volume = self.set_volume_dsp_pcm_sink
self.get_volume = self.get_volume_dsp_pcm_sink
else:
self.player = gst.parse_launch('gnomevfssrc name=source ! id3lib ! dspmp3sink name=sink')
self.player.set_name('mp3player')
self.set_volume = self.set_volume_dsp_mp3_sink
self.get_volume = self.get_volume_dsp_mp3_sink
self.source = self.player.get_by_name('source')
self.sink = self.player.get_by_name('sink')
self.player_uri = 'location'
self.mute = self.mute_hack
self.unmute = self.unmute_hack
self.get_mute = self.get_mute_hack
else:
self.player = gst.element_factory_make('playbin2', 'player')
self.player_uri = 'uri'
self.source = self.sink = self.player
self.set_volume = self.set_volume_playbin
self.get_volume = self.get_volume_playbin
self.mute = self.mute_playbin
self.unmute = self.unmute_playbin
self.get_mute = self.get_mute_playbin
audio_sink = gst.element_factory_make(self.audio_sink_name)
self._set_props(audio_sink, self.audio_sink_options)
self.player.set_property("audio-sink", audio_sink)
video_sink = gst.element_factory_make(self.video_sink_name)
self._set_props(video_sink, self.video_sink_options)
self.player.set_property("video-sink", video_sink)
self.bus = self.player.get_bus()
self.player_clean = True
self.bus.connect('message', self.on_message)
self.bus.add_signal_watch()
self.update_LC = LoopingCall(self.update)
def _set_props(self, element, props):
for option, value in props.iteritems():
value = self._py_value(value)
element.set_property(option, value)
def _py_value(self, s):
value = None
g = tokenize.generate_tokens(StringIO(s).readline)
for toknum, tokval, _, _, _ in g:
if toknum == tokenize.NUMBER:
if '.' in tokval:
value = float(tokval)
else:
value = int(tokval)
elif toknum == tokenize.NAME:
value = tokval
if value is not None:
break
return value
def get_volume_playbin(self):
""" playbin volume is a double from 0.0 - 10.0
"""
volume = self.sink.get_property('volume')
return int((volume * 100) / self.max_playbin_volume)
def set_volume_playbin(self, volume):
volume = int(volume)
if volume < 0:
volume = 0
if volume > 100:
volume = 100
volume = (volume * self.max_playbin_volume) / 100.
self.sink.set_property('volume', volume)
def get_volume_dsp_mp3_sink(self):
""" dspmp3sink volume is a n in from 0 to 65535
"""
volume = self.sink.get_property('volume')
return int(volume * 100 / 65535)
def set_volume_dsp_mp3_sink(self, volume):
volume = int(volume)
if volume < 0:
volume = 0
if volume > 100:
volume = 100
self.sink.set_property('volume', volume * 65535 / 100)
def get_volume_dsp_pcm_sink(self):
""" dspmp3sink volume is a n in from 0 to 65535
"""
volume = self.sink.get_property('volume')
return int(volume * 100 / 65535)
def set_volume_dsp_pcm_sink(self, volume):
volume = int(volume)
if volume < 0:
volume = 0
if volume > 100:
volume = 100
self.sink.set_property('volume', volume * 65535 / 100)
def mute_playbin(self):
self.player.set_property('mute', True)
def unmute_playbin(self):
self.player.set_property('mute', False)
def get_mute_playbin(self):
return self.player.get_property('mute')
def mute_hack(self):
if hasattr(self, 'stored_volume'):
self.stored_volume = self.sink.get_property('volume')
self.sink.set_property('volume', 0)
else:
self.sink.set_property('mute', True)
def unmute_hack(self):
if hasattr(self, 'stored_volume'):
self.sink.set_property('volume', self.stored_volume)
else:
self.sink.set_property('mute', False)
def get_mute_hack(self):
if hasattr(self, 'stored_volume'):
muted = self.sink.get_property('volume') == 0
else:
try:
muted = self.sink.get_property('mute')
except TypeError:
if not hasattr(self, 'stored_volume'):
self.stored_volume = self.sink.get_property('volume')
muted = self.stored_volume == 0
except:
muted = False
self.warning("can't get mute state")
return muted
def get_state(self):
return self.player.get_state()
def get_uri(self):
""" playbin2 has an empty uri property after a
pipeline stops, as the uri is nowdays the next
track to play, not the current one
"""
if self.player.get_name() != 'player':
return self.source.get_property(self.player_uri)
else:
try:
return self.current_uri
except:
return None
def set_uri(self, uri):
self.source.set_property(self.player_uri, uri.encode('utf-8'))
if self.player.get_name() == 'player':
self.current_uri = uri.encode('utf-8')
def on_message(self, bus, message):
#print "on_message", message
#print "from", message.src.get_name()
t = message.type
#print t
if t == gst.MESSAGE_ERROR:
err, debug = message.parse_error()
self.warning("Gstreamer error: %s,%r", err.message, debug)
if self.playing == True:
self.seek('-0')
#self.player.set_state(gst.STATE_READY)
elif t == gst.MESSAGE_TAG:
for key in message.parse_tag().keys():
self.tags[key] = message.structure[key]
#print self.tags
elif t == gst.MESSAGE_STATE_CHANGED:
if message.src == self.player:
old, new, pending = message.parse_state_changed()
#print "player (%s) state_change:" %(message.src.get_path_string()), old, new, pending
if new == gst.STATE_PLAYING:
self.playing = True
self.update_LC.start(1, False)
self.update()
elif old == gst.STATE_PLAYING:
self.playing = False
try:
self.update_LC.stop()
except:
pass
self.update()
#elif new == gst.STATE_READY:
# self.update()
elif t == gst.MESSAGE_EOS:
self.debug("reached file end")
self.seek('-0')
self.update(message=gst.MESSAGE_EOS)
def query_position(self):
#print "query_position"
try:
position, format = self.player.query_position(gst.FORMAT_TIME)
except:
#print "CLOCK_TIME_NONE", gst.CLOCK_TIME_NONE
position = gst.CLOCK_TIME_NONE
position = 0
#print position
if self.duration == None:
try:
self.duration, format = self.player.query_duration(gst.FORMAT_TIME)
except:
self.duration = gst.CLOCK_TIME_NONE
self.duration = 0
#import traceback
#print traceback.print_exc()
#print self.duration
r = {}
if self.duration == 0:
self.duration = None
self.debug("duration unknown")
return r
r[u'raw'] = {u'position': unicode(str(position)), u'remaining': unicode(str(self.duration - position)), u'duration': unicode(str(self.duration))}
position_human = u'%d:%02d' % (divmod(position / 1000000000, 60))
duration_human = u'%d:%02d' % (divmod(self.duration / 1000000000, 60))
remaining_human = u'%d:%02d' % (divmod((self.duration - position) / 1000000000, 60))
r[u'human'] = {u'position': position_human, u'remaining': remaining_human, u'duration': duration_human}
r[u'percent'] = {u'position': position * 100 / self.duration, u'remaining': 100 - (position * 100 / self.duration)}
self.debug(r)
return r
def load(self, uri, mimetype):
self.debug("load --> %r %r", uri, mimetype)
_, state, _ = self.player.get_state()
if(state == gst.STATE_PLAYING or state == gst.STATE_PAUSED):
self.stop()
#print "player -->", self.player.get_name()
if self.player.get_name() != 'player':
self.create_pipeline(mimetype)
self.player.set_state(gst.STATE_READY)
self.set_uri(uri)
self.player_clean = True
self.duration = None
self.mimetype = mimetype
self.tags = {}
#self.player.set_state(gst.STATE_PAUSED)
#self.update()
self.debug("load <--")
self.play()
def play(self):
uri = self.get_uri()
mimetype = self.mimetype
self.debug("play --> %r %r", uri, mimetype)
if self.player.get_name() != 'player':
if self.player_clean == False:
#print "rebuild pipeline"
self.player.set_state(gst.STATE_NULL)
self.create_pipeline(mimetype)
self.set_uri(uri)
self.player.set_state(gst.STATE_READY)
else:
self.player_clean = True
self.player.set_state(gst.STATE_PLAYING)
self.debug("play <--")
def pause(self):
self.debug("pause --> %r", self.get_uri())
self.player.set_state(gst.STATE_PAUSED)
self.debug("pause <--")
def stop(self):
self.debug("stop --> %r", self.get_uri())
self.seek('-0')
self.player.set_state(gst.STATE_READY)
self.update(message=gst.MESSAGE_EOS)
self.debug("stop <-- %r ", self.get_uri())
def seek(self, location):
"""
@param location: simple number = time to seek to, in seconds
+nL = relative seek forward n seconds
-nL = relative seek backwards n seconds
"""
_, state, _ = self.player.get_state()
if state != gst.STATE_PAUSED:
self.player.set_state(gst.STATE_PAUSED)
l = long(location) * 1000000000
p = self.query_position()
#print p['raw']['position'], l
if location[0] == '+':
l = long(p[u'raw'][u'position']) + (long(location[1:]) * 1000000000)
l = min(l, long(p[u'raw'][u'duration']))
elif location[0] == '-':
if location == '-0':
l = 0L
else:
l = long(p[u'raw'][u'position']) - (long(location[1:]) * 1000000000)
l = max(l, 0L)
self.debug("seeking to %r", l)
"""
self.player.seek( 1.0, gst.FORMAT_TIME,
gst.SEEK_FLAG_FLUSH | gst.SEEK_FLAG_ACCURATE,
gst.SEEK_TYPE_SET, l,
gst.SEEK_TYPE_NONE, 0)
"""
event = gst.event_new_seek(1.0, gst.FORMAT_TIME,
gst.SEEK_FLAG_FLUSH | gst.SEEK_FLAG_KEY_UNIT,
gst.SEEK_TYPE_SET, l,
gst.SEEK_TYPE_NONE, 0)
res = self.player.send_event(event)
if res:
pass
#print "setting new stream time to 0"
#self.player.set_new_stream_time(0L)
elif location != '-0':
print "seek to %r failed" % location
if location == '-0':
content_type, _ = self.mimetype.split("/")
try:
self.update_LC.stop()
except:
pass
if self.player.get_name() != 'player':
self.player.set_state(gst.STATE_NULL)
self.player_clean = False
elif content_type != "image":
self.player.set_state(gst.STATE_READY)
self.update()
else:
self.player.set_state(state)
if state == gst.STATE_PAUSED:
self.update()
class GStreamerPlayer(log.Loggable, Plugin):
""" a backend with a GStreamer based audio player
needs gnomevfssrc from gst-plugins-base
unfortunately gnomevfs has way too much dependencies
# not working -> http://bugzilla.gnome.org/show_bug.cgi?id=384140
# needs the neonhttpsrc plugin from gst-plugins-bad
# tested with CVS version
# and with this patch applied
# --> http://bugzilla.gnome.org/show_bug.cgi?id=375264
# not working
and id3demux from gst-plugins-good CVS too
"""
logCategory = 'gstreamer_player'
implements = ['MediaRenderer']
vendor_value_defaults = {'RenderingControl': {'A_ARG_TYPE_Channel': 'Master'},
'AVTransport': {'A_ARG_TYPE_SeekMode': ('ABS_TIME', 'REL_TIME', 'TRACK_NR')}}
vendor_range_defaults = {'RenderingControl': {'Volume': {'maximum': 100}}}
def __init__(self, device, **kwargs):
log.Loggable.__init__(self)
if(device.coherence.config.get('use_dbus', 'no') != 'yes' and
device.coherence.config.get('glib', 'no') != 'yes'):
raise Exception('this media renderer needs use_dbus enabled in the configuration')
self.name = kwargs.get('name', 'GStreamer Audio Player')
audio_sink_name = kwargs.get("audio_sink_name")
audio_sink_options = kwargs.get("audio_sink_options")
video_sink_name = kwargs.get("video_sink_name")
video_sink_options = kwargs.get("video_sink_options")
self.player = Player(audio_sink_name=audio_sink_name,
video_sink_name=video_sink_name,
audio_sink_options=audio_sink_options,
video_sink_options=video_sink_options)
self.player.add_view(self.update)
self.metadata = None
self.duration = None
self.view = []
self.tags = {}
self.server = device
self.playcontainer = None
self.dlna_caps = ['playcontainer-0-1']
louie.send('Coherence.UPnP.Backend.init_completed', None, backend=self)
def __repr__(self):
return str(self.__class__).split('.')[-1]
def update(self, message=None):
_, current, _ = self.player.get_state()
self.debug("update current %r", current)
connection_manager = self.server.connection_manager_server
av_transport = self.server.av_transport_server
conn_id = connection_manager.lookup_avt_id(self.current_connection_id)
if current == gst.STATE_PLAYING:
state = 'playing'
av_transport.set_variable(conn_id, 'TransportState', 'PLAYING')
elif current == gst.STATE_PAUSED:
state = 'paused'
av_transport.set_variable(conn_id, 'TransportState',
'PAUSED_PLAYBACK')
elif self.playcontainer != None and message == gst.MESSAGE_EOS and \
self.playcontainer[0] + 1 < len(self.playcontainer[2]):
state = 'transitioning'
av_transport.set_variable(conn_id, 'TransportState', 'TRANSITIONING')
next_track = ()
item = self.playcontainer[2][self.playcontainer[0] + 1]
infos = connection_manager.get_variable('SinkProtocolInfo')
local_protocol_infos = infos.value.split(',')
res = item.res.get_matching(local_protocol_infos,
protocol_type='internal')
if len(res) == 0:
res = item.res.get_matching(local_protocol_infos)
if len(res) > 0:
res = res[0]
infos = res.protocolInfo.split(':')
remote_protocol, remote_network, remote_content_format, _ = infos
didl = DIDLLite.DIDLElement()
didl.addItem(item)
next_track = (res.data, didl.toString(), remote_content_format)
self.playcontainer[0] = self.playcontainer[0] + 1
if len(next_track) == 3:
av_transport.set_variable(conn_id, 'CurrentTrack',
self.playcontainer[0] + 1)
self.load(next_track[0], next_track[1], next_track[2])
self.play()
else:
state = 'idle'
av_transport.set_variable(conn_id, 'TransportState', 'STOPPED')
elif message == gst.MESSAGE_EOS and \
len(av_transport.get_variable('NextAVTransportURI').value) > 0:
state = 'transitioning'
av_transport.set_variable(conn_id, 'TransportState', 'TRANSITIONING')
CurrentURI = av_transport.get_variable('NextAVTransportURI').value
metadata = av_transport.get_variable('NextAVTransportURIMetaData')
CurrentURIMetaData = metadata.value
av_transport.set_variable(conn_id, 'NextAVTransportURI', '')
av_transport.set_variable(conn_id, 'NextAVTransportURIMetaData', '')
r = self.upnp_SetAVTransportURI(self, InstanceID=0,
CurrentURI=CurrentURI,
CurrentURIMetaData=CurrentURIMetaData)
if r == {}:
self.play()
else:
state = 'idle'
av_transport.set_variable(conn_id, 'TransportState', 'STOPPED')
else:
state = 'idle'
av_transport.set_variable(conn_id, 'TransportState', 'STOPPED')
self.info("update %r", state)
self._update_transport_position(state)
def _update_transport_position(self, state):
connection_manager = self.server.connection_manager_server
av_transport = self.server.av_transport_server
conn_id = connection_manager.lookup_avt_id(self.current_connection_id)
position = self.player.query_position()
#print position
for view in self.view:
view.status(self.status(position))
if position.has_key(u'raw'):
if self.duration == None and 'duration' in position[u'raw']:
self.duration = int(position[u'raw'][u'duration'])
if self.metadata != None and len(self.metadata) > 0:
# FIXME: duration breaks client parsing MetaData?
elt = DIDLLite.DIDLElement.fromString(self.metadata)
for item in elt:
for res in item.findall('res'):
formatted_duration = self._format_time(self.duration)
res.attrib['duration'] = formatted_duration
self.metadata = elt.toString()
#print self.metadata
if self.server != None:
av_transport.set_variable(conn_id,
'AVTransportURIMetaData',
self.metadata)
av_transport.set_variable(conn_id,
'CurrentTrackMetaData',
self.metadata)
self.info("%s %d/%d/%d - %d%%/%d%% - %s/%s/%s", state,
string.atol(position[u'raw'][u'position']) / 1000000000,
string.atol(position[u'raw'][u'remaining']) / 1000000000,
string.atol(position[u'raw'][u'duration']) / 1000000000,
position[u'percent'][u'position'],
position[u'percent'][u'remaining'],
position[u'human'][u'position'],
position[u'human'][u'remaining'],
position[u'human'][u'duration'])
duration = string.atol(position[u'raw'][u'duration'])
formatted = self._format_time(duration)
av_transport.set_variable(conn_id, 'CurrentTrackDuration', formatted)
av_transport.set_variable(conn_id, 'CurrentMediaDuration', formatted)
position = string.atol(position[u'raw'][u'position'])
formatted = self._format_time(position)
av_transport.set_variable(conn_id, 'RelativeTimePosition', formatted)
av_transport.set_variable(conn_id, 'AbsoluteTimePosition', formatted)
def _format_time(self, time):
fmt = '%d:%02d:%02d'
try:
m, s = divmod(time / 1000000000, 60)
h, m = divmod(m, 60)
except:
h = m = s = 0
fmt = '%02d:%02d:%02d'
formatted = fmt % (h, m, s)
return formatted
def load(self, uri, metadata, mimetype=None):
self.info("loading: %r %r ", uri, mimetype)
_, state, _ = self.player.get_state()
connection_id = self.server.connection_manager_server.lookup_avt_id(self.current_connection_id)
self.stop(silent=True) # the check whether a stop is really needed is done inside stop
if mimetype is None:
_, ext = os.path.splitext(uri)
if ext == '.ogg':
mimetype = 'application/ogg'
elif ext == '.flac':
mimetype = 'application/flac'
else:
mimetype = 'audio/mpeg'
self.player.load(uri, mimetype)
self.metadata = metadata
self.mimetype = mimetype
self.tags = {}
if self.playcontainer == None:
self.server.av_transport_server.set_variable(connection_id, 'AVTransportURI', uri)
self.server.av_transport_server.set_variable(connection_id, 'AVTransportURIMetaData', metadata)
self.server.av_transport_server.set_variable(connection_id, 'NumberOfTracks', 1)
self.server.av_transport_server.set_variable(connection_id, 'CurrentTrack', 1)
else:
self.server.av_transport_server.set_variable(connection_id, 'AVTransportURI', self.playcontainer[1])
self.server.av_transport_server.set_variable(connection_id, 'NumberOfTracks', len(self.playcontainer[2]))
self.server.av_transport_server.set_variable(connection_id, 'CurrentTrack', self.playcontainer[0] + 1)
self.server.av_transport_server.set_variable(connection_id, 'CurrentTrackURI', uri)
self.server.av_transport_server.set_variable(connection_id, 'CurrentTrackMetaData', metadata)
#self.server.av_transport_server.set_variable(connection_id, 'TransportState', 'TRANSITIONING')
#self.server.av_transport_server.set_variable(connection_id, 'CurrentTransportActions','PLAY,STOP,PAUSE,SEEK,NEXT,PREVIOUS')
if uri.startswith('http://'):
transport_actions = Set(['PLAY,STOP,PAUSE'])
else:
transport_actions = Set(['PLAY,STOP,PAUSE,SEEK'])
if len(self.server.av_transport_server.get_variable('NextAVTransportURI').value) > 0:
transport_actions.add('NEXT')
if self.playcontainer != None:
if len(self.playcontainer[2]) - (self.playcontainer[0] + 1) > 0:
transport_actions.add('NEXT')
if self.playcontainer[0] > 0:
transport_actions.add('PREVIOUS')
self.server.av_transport_server.set_variable(connection_id, 'CurrentTransportActions', transport_actions)
if state == gst.STATE_PLAYING:
self.info("was playing...")
self.play()
self.update()
def status(self, position):
uri = self.player.get_uri()
if uri == None:
return {u'state': u'idle', u'uri': u''}
else:
r = {u'uri': unicode(uri),
u'position': position}
if self.tags != {}:
try:
r[u'artist'] = unicode(self.tags['artist'])
except:
pass
try:
r[u'title'] = unicode(self.tags['title'])
except:
pass
try:
r[u'album'] = unicode(self.tags['album'])
except:
pass
if self.player.get_state()[1] == gst.STATE_PLAYING:
r[u'state'] = u'playing'
elif self.player.get_state()[1] == gst.STATE_PAUSED:
r[u'state'] = u'paused'
else:
r[u'state'] = u'idle'
return r
def start(self, uri):
self.load(uri)
self.play()
def stop(self, silent=False):
self.info('Stopping: %r', self.player.get_uri())
if self.player.get_uri() == None:
return
if self.player.get_state()[1] in [gst.STATE_PLAYING, gst.STATE_PAUSED]:
self.player.stop()
if silent is True:
self.server.av_transport_server.set_variable(self.server.connection_manager_server.lookup_avt_id(self.current_connection_id), 'TransportState', 'STOPPED')
def play(self):
self.info("Playing: %r", self.player.get_uri())
if self.player.get_uri() == None:
return
self.player.play()
self.server.av_transport_server.set_variable(self.server.connection_manager_server.lookup_avt_id(self.current_connection_id), 'TransportState', 'PLAYING')
def pause(self):
self.info('Pausing: %r', self.player.get_uri())
self.player.pause()
self.server.av_transport_server.set_variable(self.server.connection_manager_server.lookup_avt_id(self.current_connection_id), 'TransportState', 'PAUSED_PLAYBACK')
def seek(self, location, old_state):
self.player.seek(location)
if old_state != None:
self.server.av_transport_server.set_variable(0, 'TransportState', old_state)
def mute(self):
self.player.mute()
rcs_id = self.server.connection_manager_server.lookup_rcs_id(self.current_connection_id)
self.server.rendering_control_server.set_variable(rcs_id, 'Mute', 'True')
def unmute(self):
self.player.unmute()
rcs_id = self.server.connection_manager_server.lookup_rcs_id(self.current_connection_id)
self.server.rendering_control_server.set_variable(rcs_id, 'Mute', 'False')
def get_mute(self):
return self.player.get_mute()
def get_volume(self):
return self.player.get_volume()
def set_volume(self, volume):
self.player.set_volume(volume)
rcs_id = self.server.connection_manager_server.lookup_rcs_id(self.current_connection_id)
self.server.rendering_control_server.set_variable(rcs_id, 'Volume', volume)
def playcontainer_browse(self, uri):
"""
dlna-playcontainer://uuid%3Afe814e3e-5214-4c24-847b-383fb599ff01?sid=urn%3Aupnp-org%3AserviceId%3AContentDirectory&cid=1441&fid=1444&fii=0&sc=&md=0
"""
from urllib import unquote
from cgi import parse_qs
from coherence.extern.et import ET
from coherence.upnp.core.utils import parse_xml
def handle_reply(r, uri, action, kw):
try:
next_track = ()
elt = DIDLLite.DIDLElement.fromString(r['Result'])
item = elt.getItems()[0]
local_protocol_infos = self.server.connection_manager_server.get_variable('SinkProtocolInfo').value.split(',')
res = item.res.get_matching(local_protocol_infos, protocol_type='internal')
if len(res) == 0:
res = item.res.get_matching(local_protocol_infos)
if len(res) > 0:
res = res[0]
remote_protocol, remote_network, remote_content_format, _ = res.protocolInfo.split(':')
didl = DIDLLite.DIDLElement()
didl.addItem(item)
next_track = (res.data, didl.toString(), remote_content_format)
""" a list with these elements:
the current track index
- will change during playback of the container items
the initial complete playcontainer-uri
a list of all the items in the playcontainer
the action methods to do the Browse call on the device
the kwargs for the Browse call
- kwargs['StartingIndex'] will be modified during further Browse requests
"""
self.playcontainer = [int(kw['StartingIndex']), uri, elt.getItems()[:], action, kw]
def browse_more(starting_index, number_returned, total_matches):
self.info("browse_more %s %s %s", starting_index, number_returned, total_matches)
try:
def handle_error(r):
pass
def handle_reply(r, starting_index):
elt = DIDLLite.DIDLElement.fromString(r['Result'])
self.playcontainer[2] += elt.getItems()[:]
browse_more(starting_index, int(r['NumberReturned']), int(r['TotalMatches']))
if((number_returned != 5 or
number_returned < (total_matches - starting_index)) and
(total_matches - number_returned) != starting_index):
self.info("seems we have been returned only a part of the result")
self.info("requested %d, starting at %d", 5, starting_index)
self.info("got %d out of %d", number_returned, total_matches)
self.info("requesting more starting now at %d", starting_index + number_returned)
self.playcontainer[4]['StartingIndex'] = str(starting_index + number_returned)
d = self.playcontainer[3].call(**self.playcontainer[4])
d.addCallback(handle_reply, starting_index + number_returned)
d.addErrback(handle_error)
except:
import traceback
traceback.print_exc()
browse_more(int(kw['StartingIndex']), int(r['NumberReturned']), int(r['TotalMatches']))
if len(next_track) == 3:
return next_track
except:
import traceback
traceback.print_exc()
return failure.Failure(errorCode(714))
def handle_error(r):
return failure.Failure(errorCode(714))
try:
udn, args = uri[21:].split('?')
udn = unquote(udn)
args = parse_qs(args)
type = args['sid'][0].split(':')[-1]
try:
sc = args['sc'][0]
except:
sc = ''
device = self.server.coherence.get_device_with_id(udn)
service = device.get_service_by_type(type)
action = service.get_action('Browse')
kw = {'ObjectID': args['cid'][0],
'BrowseFlag': 'BrowseDirectChildren',
'StartingIndex': args['fii'][0],
'RequestedCount': str(5),
'Filter': '*',
'SortCriteria': sc}
d = action.call(**kw)
d.addCallback(handle_reply, uri, action, kw)
d.addErrback(handle_error)
return d
except:
return failure.Failure(errorCode(714))
def upnp_init(self):
self.current_connection_id = None
self.server.connection_manager_server.set_variable(0, 'SinkProtocolInfo',
['internal:%s:audio/mpeg:*' % self.server.coherence.hostname,
'http-get:*:audio/mpeg:*',
'internal:%s:audio/mp4:*' % self.server.coherence.hostname,
'http-get:*:audio/mp4:*',
'internal:%s:application/ogg:*' % self.server.coherence.hostname,
'http-get:*:application/ogg:*',
'internal:%s:audio/ogg:*' % self.server.coherence.hostname,
'http-get:*:audio/ogg:*',
'internal:%s:video/ogg:*' % self.server.coherence.hostname,
'http-get:*:video/ogg:*',
'internal:%s:application/flac:*' % self.server.coherence.hostname,
'http-get:*:application/flac:*',
'internal:%s:audio/flac:*' % self.server.coherence.hostname,
'http-get:*:audio/flac:*',
'internal:%s:video/x-msvideo:*' % self.server.coherence.hostname,
'http-get:*:video/x-msvideo:*',
'internal:%s:video/mp4:*' % self.server.coherence.hostname,
'http-get:*:video/mp4:*',
'internal:%s:video/quicktime:*' % self.server.coherence.hostname,
'http-get:*:video/quicktime:*',
'internal:%s:image/gif:*' % self.server.coherence.hostname,
'http-get:*:image/gif:*',
'internal:%s:image/jpeg:*' % self.server.coherence.hostname,
'http-get:*:image/jpeg:*',
'internal:%s:image/png:*' % self.server.coherence.hostname,
'http-get:*:image/png:*',
'http-get:*:*:*'],
default=True)
self.server.av_transport_server.set_variable(0, 'TransportState', 'NO_MEDIA_PRESENT', default=True)
self.server.av_transport_server.set_variable(0, 'TransportStatus', 'OK', default=True)
self.server.av_transport_server.set_variable(0, 'CurrentPlayMode', 'NORMAL', default=True)
self.server.av_transport_server.set_variable(0, 'CurrentTransportActions', '', default=True)
self.server.rendering_control_server.set_variable(0, 'Volume', self.get_volume())
self.server.rendering_control_server.set_variable(0, 'Mute', self.get_mute())
def upnp_Play(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
Speed = int(kwargs['Speed'])
self.play()
return {}
def upnp_Pause(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
self.pause()
return {}
def upnp_Stop(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
self.stop()
return {}
def upnp_Seek(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
Unit = kwargs['Unit']
Target = kwargs['Target']
if InstanceID != 0:
return failure.Failure(errorCode(718))
if Unit in ['ABS_TIME', 'REL_TIME']:
old_state = self.server.av_transport_server.get_variable('TransportState').value
self.server.av_transport_server.set_variable(InstanceID, 'TransportState', 'TRANSITIONING')
sign = ''
if Target[0] == '+':
Target = Target[1:]
sign = '+'
if Target[0] == '-':
Target = Target[1:]
sign = '-'
h, m, s = Target.split(':')
seconds = int(h) * 3600 + int(m) * 60 + int(s)
self.seek(sign + str(seconds), old_state)
if Unit in ['TRACK_NR']:
if self.playcontainer == None:
NextURI = self.server.av_transport_server.get_variable('NextAVTransportURI', InstanceID).value
if NextURI != '':
self.server.av_transport_server.set_variable(InstanceID, 'TransportState', 'TRANSITIONING')
NextURIMetaData = self.server.av_transport_server.get_variable('NextAVTransportURIMetaData').value
self.server.av_transport_server.set_variable(InstanceID, 'NextAVTransportURI', '')
self.server.av_transport_server.set_variable(InstanceID, 'NextAVTransportURIMetaData', '')
r = self.upnp_SetAVTransportURI(self, InstanceID=InstanceID, CurrentURI=NextURI, CurrentURIMetaData=NextURIMetaData)
return r
else:
Target = int(Target)
if 0 < Target <= len(self.playcontainer[2]):
self.server.av_transport_server.set_variable(InstanceID, 'TransportState', 'TRANSITIONING')
next_track = ()
item = self.playcontainer[2][Target - 1]
local_protocol_infos = self.server.connection_manager_server.get_variable('SinkProtocolInfo').value.split(',')
res = item.res.get_matching(local_protocol_infos, protocol_type='internal')
if len(res) == 0:
res = item.res.get_matching(local_protocol_infos)
if len(res) > 0:
res = res[0]
remote_protocol, remote_network, remote_content_format, _ = res.protocolInfo.split(':')
didl = DIDLLite.DIDLElement()
didl.addItem(item)
next_track = (res.data, didl.toString(), remote_content_format)
self.playcontainer[0] = Target - 1
if len(next_track) == 3:
self.server.av_transport_server.set_variable(self.server.connection_manager_server.lookup_avt_id(self.current_connection_id), 'CurrentTrack', Target)
self.load(next_track[0], next_track[1], next_track[2])
self.play()
return {}
return failure.Failure(errorCode(711))
return {}
def upnp_Next(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
track_nr = self.server.av_transport_server.get_variable('CurrentTrack')
return self.upnp_Seek(self, InstanceID=InstanceID, Unit='TRACK_NR', Target=str(int(track_nr.value) + 1))
def upnp_Previous(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
track_nr = self.server.av_transport_server.get_variable('CurrentTrack')
return self.upnp_Seek(self, InstanceID=InstanceID, Unit='TRACK_NR', Target=str(int(track_nr.value) - 1))
def upnp_SetNextAVTransportURI(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
NextURI = kwargs['NextURI']
current_connection_id = self.server.connection_manager_server.lookup_avt_id(self.current_connection_id)
NextMetaData = kwargs['NextURIMetaData']
self.server.av_transport_server.set_variable(current_connection_id, 'NextAVTransportURI', NextURI)
self.server.av_transport_server.set_variable(current_connection_id, 'NextAVTransportURIMetaData', NextMetaData)
if len(NextURI) == 0 and self.playcontainer == None:
transport_actions = self.server.av_transport_server.get_variable('CurrentTransportActions').value
transport_actions = Set(transport_actions.split(','))
try:
transport_actions.remove('NEXT')
self.server.av_transport_server.set_variable(current_connection_id, 'CurrentTransportActions', transport_actions)
except KeyError:
pass
return {}
transport_actions = self.server.av_transport_server.get_variable('CurrentTransportActions').value
transport_actions = Set(transport_actions.split(','))
transport_actions.add('NEXT')
self.server.av_transport_server.set_variable(current_connection_id, 'CurrentTransportActions', transport_actions)
return {}
def upnp_SetAVTransportURI(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
CurrentURI = kwargs['CurrentURI']
CurrentURIMetaData = kwargs['CurrentURIMetaData']
#print "upnp_SetAVTransportURI",InstanceID, CurrentURI, CurrentURIMetaData
if CurrentURI.startswith('dlna-playcontainer://'):
def handle_result(r):
self.load(r[0], r[1], mimetype=r[2])
return {}
def pass_error(r):
return r
d = defer.maybeDeferred(self.playcontainer_browse, CurrentURI)
d.addCallback(handle_result)
d.addErrback(pass_error)
return d
elif len(CurrentURIMetaData) == 0:
self.playcontainer = None
self.load(CurrentURI, CurrentURIMetaData)
return {}
else:
local_protocol_infos = self.server.connection_manager_server.get_variable('SinkProtocolInfo').value.split(',')
#print local_protocol_infos
elt = DIDLLite.DIDLElement.fromString(CurrentURIMetaData)
if elt.numItems() == 1:
item = elt.getItems()[0]
res = item.res.get_matching(local_protocol_infos, protocol_type='internal')
if len(res) == 0:
res = item.res.get_matching(local_protocol_infos)
if len(res) > 0:
res = res[0]
remote_protocol, remote_network, remote_content_format, _ = res.protocolInfo.split(':')
self.playcontainer = None
self.load(res.data, CurrentURIMetaData, mimetype=remote_content_format)
return {}
return failure.Failure(errorCode(714))
def upnp_SetMute(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
Channel = kwargs['Channel']
DesiredMute = kwargs['DesiredMute']
if DesiredMute in ['TRUE', 'True', 'true', '1', 'Yes', 'yes']:
self.mute()
else:
self.unmute()
return {}
def upnp_SetVolume(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
Channel = kwargs['Channel']
DesiredVolume = int(kwargs['DesiredVolume'])
self.set_volume(DesiredVolume)
return {}
if __name__ == '__main__':
import sys
p = Player(None)
if len(sys.argv) > 1:
reactor.callWhenRunning(p.start, sys.argv[1])
reactor.run()
|
|
#!/usr/bin/env python
import os
import shutil
import subprocess
import tempfile
import unittest
from mozprofile.prefs import Preferences
from mozprofile.profile import Profile
class PreferencesTest(unittest.TestCase):
"""test mozprofile"""
def run_command(self, *args):
"""
runs mozprofile;
returns (stdout, stderr, code)
"""
process = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
stdout = stdout.strip()
stderr = stderr.strip()
return stdout, stderr, process.returncode
def compare_generated(self, _prefs, commandline):
"""
writes out to a new profile with mozprofile command line
reads the generated preferences with prefs.py
compares the results
cleans up
"""
profile, stderr, code = self.run_command(*commandline)
prefs_file = os.path.join(profile, 'user.js')
self.assertTrue(os.path.exists(prefs_file))
read = Preferences.read_prefs(prefs_file)
if isinstance(_prefs, dict):
read = dict(read)
self.assertEqual(_prefs, read)
shutil.rmtree(profile)
def test_basic_prefs(self):
_prefs = {"browser.startup.homepage": "http://planet.mozilla.org/"}
commandline = ["mozprofile"]
_prefs = _prefs.items()
for pref, value in _prefs:
commandline += ["--pref", "%s:%s" % (pref, value)]
self.compare_generated(_prefs, commandline)
def test_ordered_prefs(self):
"""ensure the prefs stay in the right order"""
_prefs = [("browser.startup.homepage", "http://planet.mozilla.org/"),
("zoom.minPercent", 30),
("zoom.maxPercent", 300),
("webgl.verbose", 'false')]
commandline = ["mozprofile"]
for pref, value in _prefs:
commandline += ["--pref", "%s:%s" % (pref, value)]
_prefs = [(i, Preferences.cast(j)) for i, j in _prefs]
self.compare_generated(_prefs, commandline)
def test_ini(self):
# write the .ini file
_ini = """[DEFAULT]
browser.startup.homepage = http://planet.mozilla.org/
[foo]
browser.startup.homepage = http://github.com/
"""
fd, name = tempfile.mkstemp(suffix='.ini')
os.write(fd, _ini)
os.close(fd)
commandline = ["mozprofile", "--preferences", name]
# test the [DEFAULT] section
_prefs = {'browser.startup.homepage': 'http://planet.mozilla.org/'}
self.compare_generated(_prefs, commandline)
# test a specific section
_prefs = {'browser.startup.homepage': 'http://github.com/'}
commandline[-1] = commandline[-1] + ':foo'
self.compare_generated(_prefs, commandline)
# cleanup
os.remove(name)
def test_reset_should_remove_added_prefs(self):
"""Check that when we call reset the items we expect are updated"""
profile = Profile()
prefs_file = os.path.join(profile.profile, 'user.js')
# we shouldn't have any initial preferences
initial_prefs = Preferences.read_prefs(prefs_file)
self.assertFalse(initial_prefs)
initial_prefs = file(prefs_file).read().strip()
self.assertFalse(initial_prefs)
# add some preferences
prefs1 = [("mr.t.quotes", "i aint getting on no plane!")]
profile.set_preferences(prefs1)
self.assertEqual(prefs1, Preferences.read_prefs(prefs_file))
lines = file(prefs_file).read().strip().splitlines()
self.assertTrue(bool([line for line in lines
if line.startswith('#MozRunner Prefs Start')]))
self.assertTrue(bool([line for line in lines
if line.startswith('#MozRunner Prefs End')]))
profile.reset()
self.assertNotEqual(prefs1, \
Preferences.read_prefs(os.path.join(profile.profile, 'user.js')),\
"I pity the fool who left my pref")
def test_magic_markers(self):
"""ensure our magic markers are working"""
profile = Profile()
prefs_file = os.path.join(profile.profile, 'user.js')
# we shouldn't have any initial preferences
initial_prefs = Preferences.read_prefs(prefs_file)
self.assertFalse(initial_prefs)
initial_prefs = file(prefs_file).read().strip()
self.assertFalse(initial_prefs)
# add some preferences
prefs1 = [("browser.startup.homepage", "http://planet.mozilla.org/"),
("zoom.minPercent", 30)]
profile.set_preferences(prefs1)
self.assertEqual(prefs1, Preferences.read_prefs(prefs_file))
lines = file(prefs_file).read().strip().splitlines()
self.assertTrue(bool([line for line in lines
if line.startswith('#MozRunner Prefs Start')]))
self.assertTrue(bool([line for line in lines
if line.startswith('#MozRunner Prefs End')]))
# add some more preferences
prefs2 = [("zoom.maxPercent", 300),
("webgl.verbose", 'false')]
profile.set_preferences(prefs2)
self.assertEqual(prefs1 + prefs2, Preferences.read_prefs(prefs_file))
lines = file(prefs_file).read().strip().splitlines()
self.assertTrue(len([line for line in lines
if line.startswith('#MozRunner Prefs Start')]) == 2)
self.assertTrue(len([line for line in lines
if line.startswith('#MozRunner Prefs End')]) == 2)
# now clean it up
profile.clean_preferences()
final_prefs = Preferences.read_prefs(prefs_file)
self.assertFalse(final_prefs)
lines = file(prefs_file).read().strip().splitlines()
self.assertTrue('#MozRunner Prefs Start' not in lines)
self.assertTrue('#MozRunner Prefs End' not in lines)
def test_preexisting_preferences(self):
"""ensure you don't clobber preexisting preferences"""
# make a pretend profile
tempdir = tempfile.mkdtemp()
try:
# make a user.js
contents = """
user_pref("webgl.enabled_for_all_sites", true);
user_pref("webgl.force-enabled", true);
"""
user_js = os.path.join(tempdir, 'user.js')
f = file(user_js, 'w')
f.write(contents)
f.close()
# make sure you can read it
prefs = Preferences.read_prefs(user_js)
original_prefs = [('webgl.enabled_for_all_sites', True), ('webgl.force-enabled', True)]
self.assertTrue(prefs == original_prefs)
# now read this as a profile
profile = Profile(tempdir, preferences={"browser.download.dir": "/home/jhammel"})
# make sure the new pref is now there
new_prefs = original_prefs[:] + [("browser.download.dir", "/home/jhammel")]
prefs = Preferences.read_prefs(user_js)
self.assertTrue(prefs == new_prefs)
# clean up the added preferences
profile.cleanup()
del profile
# make sure you have the original preferences
prefs = Preferences.read_prefs(user_js)
self.assertTrue(prefs == original_prefs)
except:
shutil.rmtree(tempdir)
raise
def test_json(self):
_prefs = {"browser.startup.homepage": "http://planet.mozilla.org/"}
json = '{"browser.startup.homepage": "http://planet.mozilla.org/"}'
# just repr it...could use the json module but we don't need it here
fd, name = tempfile.mkstemp(suffix='.json')
os.write(fd, json)
os.close(fd)
commandline = ["mozprofile", "--preferences", name]
self.compare_generated(_prefs, commandline)
if __name__ == '__main__':
unittest.main()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from importlib import import_module
import logging
import os
import pkgutil
from horizon.utils import file_discovery
from openstack_dashboard import theme_settings
def import_submodules(module):
"""Import all submodules and make them available in a dict."""
submodules = {}
for loader, name, ispkg in pkgutil.iter_modules(module.__path__,
module.__name__ + '.'):
try:
submodule = import_module(name)
except ImportError as e:
# FIXME: Make the errors non-fatal (do we want that?).
logging.warning("Error importing %s", name)
logging.exception(e)
else:
parent, child = name.rsplit('.', 1)
submodules[child] = submodule
return submodules
def import_dashboard_config(modules):
"""Imports configuration from all the modules and merges it."""
config = collections.defaultdict(dict)
for module in modules:
for submodule in import_submodules(module).values():
if hasattr(submodule, 'DASHBOARD'):
dashboard = submodule.DASHBOARD
config[dashboard].update(submodule.__dict__)
elif (hasattr(submodule, 'PANEL')
or hasattr(submodule, 'PANEL_GROUP')
or hasattr(submodule, 'FEATURE')):
config[submodule.__name__] = submodule.__dict__
else:
logging.warning("Skipping %s because it doesn't have DASHBOARD"
", PANEL, PANEL_GROUP, or FEATURE defined.",
submodule.__name__)
return sorted(config.items(),
key=lambda c: c[1]['__name__'].rsplit('.', 1)[1])
def update_dashboards(modules, horizon_config, installed_apps):
"""Imports dashboard and panel configuration from modules and applies it.
The submodules from specified modules are imported, and the configuration
for the specific dashboards is merged, with the later modules overriding
settings from the former. Then the configuration is applied to
horizon_config and installed_apps, in alphabetical order of files from
which the configurations were imported.
For example, given this setup:
| foo/__init__.py
| foo/_10_baz.py
| foo/_20_qux.py
| bar/__init__.py
| bar/_30_baz_.py
and being called with ``modules=[foo, bar]``, we will first have the
configuration from ``_10_baz`` and ``_30_baz`` merged, then the
configurations will be applied in order ``qux``, ``baz`` (``baz`` is
second, because the most recent file which contributed to it, ``_30_baz``,
comes after ``_20_qux``).
Panel specific configurations are stored in horizon_config. Dashboards
from both plugin-based and openstack_dashboard must be registered before
the panel configuration can be applied. Making changes to the panel is
deferred until the horizon autodiscover is completed, configurations are
applied in alphabetical order of files where it was imported.
"""
config_dashboards = horizon_config.get('dashboards', [])
if config_dashboards or horizon_config.get('default_dashboard'):
logging.warning(
'"dashboards" and "default_dashboard" in (local_)settings is '
'DEPRECATED now and may be unsupported in some future release. '
'The preferred way to specify the order of dashboards and the '
'default dashboard is the pluggable dashboard mechanism (in %s).',
', '.join([os.path.abspath(module.__path__[0])
for module in modules])
)
enabled_dashboards = []
disabled_dashboards = []
exceptions = horizon_config.get('exceptions', {})
apps = []
angular_modules = []
js_files = []
js_spec_files = []
scss_files = []
panel_customization = []
update_horizon_config = {}
for key, config in import_dashboard_config(modules):
if config.get('DISABLED', False):
if config.get('DASHBOARD'):
disabled_dashboards.append(config.get('DASHBOARD'))
continue
_apps = config.get('ADD_INSTALLED_APPS', [])
apps.extend(_apps)
if config.get('AUTO_DISCOVER_STATIC_FILES', False):
for _app in _apps:
module = import_module(_app)
base_path = os.path.join(module.__path__[0], 'static/')
file_discovery.populate_horizon_config(horizon_config,
base_path)
add_exceptions = config.get('ADD_EXCEPTIONS', {}).items()
for category, exc_list in add_exceptions:
exceptions[category] = tuple(set(exceptions.get(category, ())
+ exc_list))
angular_modules.extend(config.get('ADD_ANGULAR_MODULES', []))
# avoid pulling in dashboard javascript dependencies multiple times
existing = set(js_files)
js_files.extend([f for f in config.get('ADD_JS_FILES', [])
if f not in existing])
js_spec_files.extend(config.get('ADD_JS_SPEC_FILES', []))
scss_files.extend(config.get('ADD_SCSS_FILES', []))
update_horizon_config.update(
config.get('UPDATE_HORIZON_CONFIG', {}))
if config.get('DASHBOARD'):
dashboard = key
enabled_dashboards.append(dashboard)
if config.get('DEFAULT', False):
horizon_config['default_dashboard'] = dashboard
elif config.get('PANEL') or config.get('PANEL_GROUP'):
config.pop("__builtins__", None)
panel_customization.append(config)
# Preserve the dashboard order specified in settings
dashboards = ([d for d in config_dashboards
if d not in disabled_dashboards] +
[d for d in enabled_dashboards
if d not in config_dashboards])
horizon_config['panel_customization'] = panel_customization
horizon_config['dashboards'] = tuple(dashboards)
horizon_config.setdefault('exceptions', {}).update(exceptions)
horizon_config.update(update_horizon_config)
horizon_config.setdefault('angular_modules', []).extend(angular_modules)
horizon_config.setdefault('js_files', []).extend(js_files)
horizon_config.setdefault('js_spec_files', []).extend(js_spec_files)
horizon_config.setdefault('scss_files', []).extend(scss_files)
# apps contains reference to applications declared in the enabled folder
# basically a list of applications that are internal and external plugins
# installed_apps contains reference to applications declared in settings
# such as django.contribe.*, django_pyscss, compressor, horizon, etc...
# for translation, we are only interested in the list of external plugins
# so we save the reference to it before we append to installed_apps
horizon_config.setdefault('plugins', []).extend(apps)
installed_apps[0:0] = apps
# Order matters, list the xstatic module name and the entry point file(s) for
# that module (this is often defined as the "main" in bower.json, and
# as the xstatic module MAIN variable in the very few compliant xstatic
# modules). If the xstatic module does define a MAIN then set the files
# list to None.
# This list is to be used as the base list which is potentially added to in
# local_settings.py before being passed to get_xstatic_dirs()
BASE_XSTATIC_MODULES = [
('xstatic.pkg.jquery', ['jquery.js']),
('xstatic.pkg.jquery_migrate', ['jquery-migrate.js']),
('xstatic.pkg.angular', [
'angular.js',
'angular-cookies.js',
'angular-sanitize.js',
'angular-route.js'
]),
('xstatic.pkg.angular_bootstrap', ['angular-bootstrap.js']),
('xstatic.pkg.angular_gettext', None),
('xstatic.pkg.angular_lrdragndrop', None),
('xstatic.pkg.angular_smart_table', None),
('xstatic.pkg.angular_fileupload', ['ng-file-upload-all.js']),
('xstatic.pkg.d3', ['d3.js']),
('xstatic.pkg.jquery_quicksearch', ['jquery.quicksearch.js']),
('xstatic.pkg.jquery_tablesorter', ['jquery.tablesorter.js']),
('xstatic.pkg.spin', ['spin.js', 'spin.jquery.js']),
('xstatic.pkg.jquery_ui', ['jquery-ui.js']),
('xstatic.pkg.bootstrap_scss', ['js/bootstrap.js']),
('xstatic.pkg.bootstrap_datepicker', ['bootstrap-datepicker.js']),
('xstatic.pkg.hogan', ['hogan.js']),
('xstatic.pkg.rickshaw', ['rickshaw.js']),
('xstatic.pkg.jsencrypt', None),
('xstatic.pkg.objectpath', ['ObjectPath.js']),
('xstatic.pkg.tv4', ['tv4.js']),
('xstatic.pkg.angular_schema_form', ['schema-form.js']),
# @imported in scss files diectly
('xstatic.pkg.font_awesome', []),
('xstatic.pkg.bootswatch', []),
('xstatic.pkg.roboto_fontface', []),
('xstatic.pkg.mdi', []),
# testing only, not included in application
('xstatic.pkg.jasmine', []),
('xstatic.pkg.termjs', []),
]
def get_xstatic_dirs(XSTATIC_MODULES, HORIZON_CONFIG):
"""Discover static file configuration of the xstatic modules.
For each entry in the XSTATIC_MODULES list we determine the entry
point files (which may come from the xstatic MAIN var) and then
determine where in the Django static tree the xstatic package's contents
should be placed.
For jquery.bootstrap.wizard.js the module name is None the static file is
actually a 3rd-party file but resides in the Horizon source tree and not
an xstatic package.
The xstatic.pkg.jquery_ui package had its contents moved by packagers so
it must be handled as a special case.
"""
STATICFILES_DIRS = []
HORIZON_CONFIG['xstatic_lib_files'] = []
for module_name, files in XSTATIC_MODULES:
module = import_module(module_name)
if module_name == 'xstatic.pkg.jquery_ui':
# determine the correct path for jquery-ui which packagers moved
if module.VERSION.startswith('1.10.'):
# The 1.10.x versions already contain 'ui' directory.
files = ['ui/' + files[0]]
STATICFILES_DIRS.append(
('horizon/lib/' + module.NAME, module.BASE_DIR)
)
# pull the file entry points from the xstatic package MAIN if possible
if hasattr(module, 'MAIN'):
files = module.MAIN
if not isinstance(files, list):
files = [files]
# just the Javascript files, please (don't <script> css, etc
# which is explicitly included in style/themes as appropriate)
files = [file for file in files if file.endswith('.js')]
# add to the list of files to link in the HTML
for file in files:
file = 'horizon/lib/' + module.NAME + '/' + file
HORIZON_CONFIG['xstatic_lib_files'].append(file)
return STATICFILES_DIRS
def find_static_files(
HORIZON_CONFIG,
AVAILABLE_THEMES,
THEME_COLLECTION_DIR,
ROOT_PATH):
import horizon
import openstack_dashboard
os_dashboard_home_dir = openstack_dashboard.__path__[0]
horizon_home_dir = horizon.__path__[0]
# note the path must end in a '/' or the resultant file paths will have a
# leading "/"
file_discovery.populate_horizon_config(
HORIZON_CONFIG,
os.path.join(horizon_home_dir, 'static/')
)
# filter out non-angular javascript code and lib
HORIZON_CONFIG['js_files'] = ([f for f in HORIZON_CONFIG['js_files']
if not f.startswith('horizon/')])
# note the path must end in a '/' or the resultant file paths will have a
# leading "/"
file_discovery.populate_horizon_config(
HORIZON_CONFIG,
os.path.join(os_dashboard_home_dir, 'static/'),
sub_path='app/'
)
# Discover theme static resources, and in particular any
# static HTML (client-side) that the theme overrides
theme_static_files = {}
theme_info = theme_settings.get_theme_static_dirs(
AVAILABLE_THEMES,
THEME_COLLECTION_DIR,
ROOT_PATH)
for url, path in theme_info:
discovered_files = {}
# discover static files provided by the theme
file_discovery.populate_horizon_config(
discovered_files,
path
)
# Get the theme name from the theme url
theme_name = url.split('/')[-1]
# build a dictionary of this theme's static HTML templates.
# For each overridden template, strip off the '/templates/' part of the
# theme filename then use that name as the key, and the location in the
# theme directory as the value. This allows the quick lookup of
# theme path for any file overridden by a theme template
template_overrides = {}
for theme_file in discovered_files['external_templates']:
# Example:
# external_templates_dict[
# 'framework/widgets/help-panel/help-panel.html'
# ] = 'themes/material/templates/framework/widgets/\
# help-panel/help-panel.html'
(templates_part, override_path) = theme_file.split('/templates/')
template_overrides[override_path] = 'themes/' + \
theme_name + theme_file
discovered_files['template_overrides'] = template_overrides
# Save all of the discovered file info for this theme in our
# 'theme_files' object using the theme name as the key
theme_static_files[theme_name] = discovered_files
# Add the theme file info to the horizon config for use by template tags
HORIZON_CONFIG['theme_static_files'] = theme_static_files
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v9.resources.types import age_range_view
from google.ads.googleads.v9.services.types import age_range_view_service
from .transports.base import AgeRangeViewServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import AgeRangeViewServiceGrpcTransport
class AgeRangeViewServiceClientMeta(type):
"""Metaclass for the AgeRangeViewService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AgeRangeViewServiceTransport]]
_transport_registry["grpc"] = AgeRangeViewServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[AgeRangeViewServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AgeRangeViewServiceClient(metaclass=AgeRangeViewServiceClientMeta):
"""Service to manage age range views."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AgeRangeViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AgeRangeViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AgeRangeViewServiceTransport:
"""Return the transport used by the client instance.
Returns:
AgeRangeViewServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def age_range_view_path(
customer_id: str, ad_group_id: str, criterion_id: str,
) -> str:
"""Return a fully-qualified age_range_view string."""
return "customers/{customer_id}/ageRangeViews/{ad_group_id}~{criterion_id}".format(
customer_id=customer_id,
ad_group_id=ad_group_id,
criterion_id=criterion_id,
)
@staticmethod
def parse_age_range_view_path(path: str) -> Dict[str, str]:
"""Parse a age_range_view path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/ageRangeViews/(?P<ad_group_id>.+?)~(?P<criterion_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, AgeRangeViewServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the age range view service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AgeRangeViewServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AgeRangeViewServiceTransport):
# transport is a AgeRangeViewServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = AgeRangeViewServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_age_range_view(
self,
request: Union[
age_range_view_service.GetAgeRangeViewRequest, dict
] = None,
*,
resource_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> age_range_view.AgeRangeView:
r"""Returns the requested age range view in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.GetAgeRangeViewRequest, dict]):
The request object. Request message for
[AgeRangeViewService.GetAgeRangeView][google.ads.googleads.v9.services.AgeRangeViewService.GetAgeRangeView].
resource_name (:class:`str`):
Required. The resource name of the
age range view to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.resources.types.AgeRangeView:
An age range view.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a age_range_view_service.GetAgeRangeViewRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, age_range_view_service.GetAgeRangeViewRequest
):
request = age_range_view_service.GetAgeRangeViewRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_age_range_view
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("AgeRangeViewServiceClient",)
|
|
import datetime
import json
import furl
import responses
from django.utils import timezone
from nose.tools import * # flake8: noqa
from framework.auth.core import Auth
from addons.github.models import GithubFolder
from addons.github.tests.factories import GitHubAccountFactory
from api.base.settings.defaults import API_BASE
from api.base.utils import waterbutler_api_url_for
from api_tests import utils as api_utils
from tests.base import ApiTestCase
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
PrivateLinkFactory
)
def prepare_mock_wb_response(
node=None,
provider='github',
files=None,
folder=True,
path='/',
method=responses.GET,
status_code=200
):
"""Prepare a mock Waterbutler response with responses library.
:param Node node: Target node.
:param str provider: Addon provider
:param list files: Optional list of files. You can specify partial data; missing values
will have defaults.
:param folder: True if mocking out a folder response, False if a file response.
:param path: Waterbutler path, passed to waterbutler_api_url_for.
:param str method: HTTP method.
:param int status_code: HTTP status.
"""
node = node
files = files or []
wb_url = waterbutler_api_url_for(node._id, provider=provider, _internal=True, path=path, meta=True, view_only=None)
default_file = {
u'contentType': None,
u'extra': {u'downloads': 0, u'version': 1},
u'kind': u'file',
u'modified': None,
u'name': u'NewFile',
u'path': u'/NewFile',
u'provider': provider,
u'size': None,
u'materialized': '/',
}
if len(files):
data = [dict(default_file, **each) for each in files]
else:
data = [default_file]
jsonapi_data = []
for datum in data:
jsonapi_data.append({'attributes': datum})
if not folder:
jsonapi_data = jsonapi_data[0]
responses.add(
responses.Response(
method,
wb_url,
json={u'data': jsonapi_data},
status=status_code,
content_type='application/json'
)
)
class TestNodeFilesList(ApiTestCase):
def setUp(self):
super(TestNodeFilesList, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.private_url = '/{}nodes/{}/files/'.format(
API_BASE, self.project._id)
self.user_two = AuthUserFactory()
self.public_project = ProjectFactory(creator=self.user, is_public=True)
self.public_url = '/{}nodes/{}/files/'.format(API_BASE, self.public_project._id)
def add_github(self):
user_auth = Auth(self.user)
self.project.add_addon('github', auth=user_auth)
addon = self.project.get_addon('github')
addon.repo = 'something'
addon.user = 'someone'
oauth_settings = GitHubAccountFactory()
oauth_settings.save()
self.user.add_addon('github')
self.user.external_accounts.add(oauth_settings)
self.user.save()
addon.user_settings = self.user.get_addon('github')
addon.external_account = oauth_settings
addon.save()
self.project.save()
addon.user_settings.oauth_grants[self.project._id] = {
oauth_settings._id: []}
addon.user_settings.save()
def view_only_link(self):
private_link = PrivateLinkFactory(creator=self.user)
private_link.nodes.add(self.project)
private_link.save()
return private_link
def _prepare_mock_wb_response(self, node=None, **kwargs):
prepare_mock_wb_response(node=node or self.project, **kwargs)
def test_returns_public_files_logged_out(self):
res = self.app.get(self.public_url, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(
res.json['data'][0]['attributes']['provider'],
'osfstorage'
)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_returns_public_files_logged_in(self):
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(
res.json['data'][0]['attributes']['provider'],
'osfstorage'
)
def test_returns_storage_addons_link(self):
res = self.app.get(self.private_url, auth=self.user.auth)
assert_in('storage_addons', res.json['data'][0]['links'])
def test_returns_file_data(self):
fobj = self.project.get_addon(
'osfstorage').get_root().append_file('NewFile')
fobj.save()
res = self.app.get(
'{}osfstorage/{}'.format(self.private_url, fobj._id), auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_true(isinstance(res.json['data'], dict))
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(res.json['data']['attributes']['kind'], 'file')
assert_equal(res.json['data']['attributes']['name'], 'NewFile')
def test_returns_osfstorage_folder_version_two(self):
fobj = self.project.get_addon(
'osfstorage').get_root().append_folder('NewFolder')
fobj.save()
res = self.app.get(
'{}osfstorage/'.format(self.private_url), auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_returns_osf_storage_folder_version_two_point_two(self):
fobj = self.project.get_addon(
'osfstorage').get_root().append_folder('NewFolder')
fobj.save()
res = self.app.get(
'{}osfstorage/?version=2.2'.format(self.private_url), auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_list_returns_folder_data(self):
fobj = self.project.get_addon(
'osfstorage').get_root().append_folder('NewFolder')
fobj.save()
res = self.app.get(
'{}osfstorage/'.format(self.private_url, fobj._id), auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(res.json['data'][0]['attributes']['name'], 'NewFolder')
def test_returns_folder_data(self):
fobj = self.project.get_addon(
'osfstorage').get_root().append_folder('NewFolder')
fobj.save()
res = self.app.get(
'{}osfstorage/{}/'.format(self.private_url, fobj._id), auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 0)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_returns_private_files_logged_out(self):
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
def test_returns_private_files_logged_in_contributor(self):
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(len(res.json['data']), 1)
assert_equal(
res.json['data'][0]['attributes']['provider'],
'osfstorage'
)
def test_returns_private_files_logged_in_non_contributor(self):
res = self.app.get(
self.private_url,
auth=self.user_two.auth,
expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
def test_returns_addon_folders(self):
user_auth = Auth(self.user)
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 1)
assert_equal(
res.json['data'][0]['attributes']['provider'],
'osfstorage'
)
self.project.add_addon('github', auth=user_auth)
addon = self.project.get_addon('github')
addon.repo = 'something'
addon.user = 'someone'
oauth_settings = GitHubAccountFactory()
oauth_settings.save()
self.user.add_addon('github')
self.user.external_accounts.add(oauth_settings)
self.user.save()
addon.user_settings = self.user.get_addon('github')
addon.external_account = oauth_settings
addon.save()
self.project.save()
addon.user_settings.oauth_grants[self.project._id] = {
oauth_settings._id: []}
addon.user_settings.save()
res = self.app.get(self.private_url, auth=self.user.auth)
data = res.json['data']
providers = [item['attributes']['provider'] for item in data]
assert_equal(len(data), 2)
assert_in('github', providers)
assert_in('osfstorage', providers)
@responses.activate
def test_vol_node_files_list(self):
self._prepare_mock_wb_response(
provider='github', files=[{'name': 'NewFile'}])
self.add_github()
vol = self.view_only_link()
url = '/{}nodes/{}/files/github/?view_only={}'.format(
API_BASE, self.project._id, vol.key)
res = self.app.get(url, auth=self.user_two.auth)
wb_request = responses.calls[-1].request
url = furl.furl(wb_request.url)
assert_equal(url.query, 'meta=True&view_only={}'.format(unicode(vol.key, 'utf-8')))
assert_equal(res.json['data'][0]['attributes']['name'], 'NewFile')
assert_equal(res.json['data'][0]['attributes']['provider'], 'github')
assert_in(vol.key, res.json['data'][0]['links']['info'])
assert_in(vol.key, res.json['data'][0]['links']['move'])
assert_in(vol.key, res.json['data'][0]['links']['upload'])
assert_in(vol.key, res.json['data'][0]['links']['download'])
assert_in(vol.key, res.json['data'][0]['links']['delete'])
@responses.activate
def test_returns_node_files_list(self):
self._prepare_mock_wb_response(
provider='github', files=[{'name': 'NewFile'}])
self.add_github()
url = '/{}nodes/{}/files/github/'.format(API_BASE, self.project._id)
# test create
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json['data'][0]['attributes']['name'], 'NewFile')
assert_equal(res.json['data'][0]['attributes']['provider'], 'github')
# test get
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json['data'][0]['attributes']['name'], 'NewFile')
assert_equal(res.json['data'][0]['attributes']['provider'], 'github')
@responses.activate
def test_returns_folder_metadata_not_children(self):
folder = GithubFolder(
name='Folder',
node=self.project,
path='/Folder/'
)
folder.save()
self._prepare_mock_wb_response(provider='github', files=[{'name': 'Folder'}], path='/Folder/')
self.add_github()
url = '/{}nodes/{}/files/github/Folder/'.format(API_BASE, self.project._id)
res = self.app.get(url, params={'info': ''}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data'][0]['attributes']['kind'], 'folder')
assert_equal(res.json['data'][0]['attributes']['name'], 'Folder')
assert_equal(res.json['data'][0]['attributes']['provider'], 'github')
@responses.activate
def test_returns_node_file(self):
self._prepare_mock_wb_response(
provider='github', files=[{'name': 'NewFile'}],
folder=False, path='/file')
self.add_github()
url = '/{}nodes/{}/files/github/file'.format(
API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth, headers={
'COOKIE': 'foo=bar;' # Webtests doesnt support cookies?
})
# test create
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['name'], 'NewFile')
assert_equal(res.json['data']['attributes']['provider'], 'github')
# test get
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['name'], 'NewFile')
assert_equal(res.json['data']['attributes']['provider'], 'github')
@responses.activate
def test_notfound_node_file_returns_folder(self):
self._prepare_mock_wb_response(
provider='github', files=[{'name': 'NewFile'}],
path='/file')
url = '/{}nodes/{}/files/github/file'.format(
API_BASE, self.project._id)
res = self.app.get(
url, auth=self.user.auth,
expect_errors=True,
headers={'COOKIE': 'foo=bar;'} # Webtests doesnt support cookies?
)
assert_equal(res.status_code, 404)
@responses.activate
def test_notfound_node_folder_returns_file(self):
self._prepare_mock_wb_response(
provider='github', files=[{'name': 'NewFile'}],
folder=False, path='/')
url = '/{}nodes/{}/files/github/'.format(API_BASE, self.project._id)
res = self.app.get(
url, auth=self.user.auth,
expect_errors=True,
headers={'COOKIE': 'foo=bar;'} # Webtests doesnt support cookies?
)
assert_equal(res.status_code, 404)
@responses.activate
def test_waterbutler_server_error_returns_503(self):
self._prepare_mock_wb_response(status_code=500)
self.add_github()
url = '/{}nodes/{}/files/github/'.format(API_BASE, self.project._id)
res = self.app.get(
url, auth=self.user.auth,
expect_errors=True,
headers={'COOKIE': 'foo=bar;'} # Webtests doesnt support cookies?
)
assert_equal(res.status_code, 503)
@responses.activate
def test_waterbutler_invalid_data_returns_503(self):
wb_url = waterbutler_api_url_for(self.project._id, _internal=True, provider='github', path='/', meta=True)
self.add_github()
responses.add(
responses.Response(
responses.GET,
wb_url,
body=json.dumps({}),
status=400
)
)
url = '/{}nodes/{}/files/github/'.format(API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 503)
@responses.activate
def test_handles_unauthenticated_waterbutler_request(self):
self._prepare_mock_wb_response(status_code=401)
self.add_github()
url = '/{}nodes/{}/files/github/'.format(API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
@responses.activate
def test_handles_notfound_waterbutler_request(self):
invalid_provider = 'gilkjadsflhub'
self._prepare_mock_wb_response(
status_code=404, provider=invalid_provider)
url = '/{}nodes/{}/files/{}/'.format(API_BASE,
self.project._id, invalid_provider)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
assert_in('detail', res.json['errors'][0])
def test_handles_request_to_provider_not_configured_on_project(self):
provider = 'box'
url = '/{}nodes/{}/files/{}/'.format(
API_BASE, self.project._id, provider)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_false(self.project.get_addon(provider))
assert_equal(res.status_code, 404)
assert_equal(
res.json['errors'][0]['detail'],
'The {} provider is not configured for this project.'.format(provider))
@responses.activate
def test_handles_bad_waterbutler_request(self):
wb_url = waterbutler_api_url_for(self.project._id, _internal=True, provider='github', path='/', meta=True)
responses.add(
responses.Response(
responses.GET,
wb_url,
json={'bad' : 'json'},
status=418
)
)
self.add_github()
url = '/{}nodes/{}/files/github/'.format(API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 503)
assert_in('detail', res.json['errors'][0])
def test_files_list_contains_relationships_object(self):
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert 'relationships' in res.json['data'][0]
class TestNodeFilesListFiltering(ApiTestCase):
def setUp(self):
super(TestNodeFilesListFiltering, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
# Prep HTTP mocks
prepare_mock_wb_response(
node=self.project, provider='github',
files=[
{'name': 'abc', 'path': '/abc/', 'materialized': '/abc/', 'kind': 'folder'},
{'name': 'xyz', 'path': '/xyz', 'materialized': '/xyz', 'kind': 'file'},
]
)
def add_github(self):
user_auth = Auth(self.user)
self.project.add_addon('github', auth=user_auth)
addon = self.project.get_addon('github')
addon.repo = 'something'
addon.user = 'someone'
oauth_settings = GitHubAccountFactory()
oauth_settings.save()
self.user.add_addon('github')
self.user.external_accounts.add(oauth_settings)
self.user.save()
addon.user_settings = self.user.get_addon('github')
addon.external_account = oauth_settings
addon.save()
self.project.save()
addon.user_settings.oauth_grants[self.project._id] = {
oauth_settings._id: []}
addon.user_settings.save()
@responses.activate
def test_node_files_are_filterable_by_name(self):
url = '/{}nodes/{}/files/github/?filter[name]=xyz'.format(
API_BASE, self.project._id)
self.add_github()
# test create
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1) # filters out 'abc'
assert_equal(res.json['data'][0]['attributes']['name'], 'xyz')
# test get
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1) # filters out 'abc'
assert_equal(res.json['data'][0]['attributes']['name'], 'xyz')
@responses.activate
def test_node_files_filter_by_name_case_insensitive(self):
url = '/{}nodes/{}/files/github/?filter[name]=XYZ'.format(
API_BASE, self.project._id)
self.add_github()
# test create
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
# filters out 'abc', but finds 'xyz'
assert_equal(len(res.json['data']), 1)
assert_equal(res.json['data'][0]['attributes']['name'], 'xyz')
# test get
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
# filters out 'abc', but finds 'xyz'
assert_equal(len(res.json['data']), 1)
assert_equal(res.json['data'][0]['attributes']['name'], 'xyz')
@responses.activate
def test_node_files_are_filterable_by_path(self):
url = '/{}nodes/{}/files/github/?filter[path]=abc'.format(
API_BASE, self.project._id)
self.add_github()
# test create
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1) # filters out 'xyz'
assert_equal(res.json['data'][0]['attributes']['name'], 'abc')
# test get
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1) # filters out 'xyz'
assert_equal(res.json['data'][0]['attributes']['name'], 'abc')
@responses.activate
def test_node_files_are_filterable_by_kind(self):
url = '/{}nodes/{}/files/github/?filter[kind]=folder'.format(
API_BASE, self.project._id)
self.add_github()
# test create
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1) # filters out 'xyz'
assert_equal(res.json['data'][0]['attributes']['name'], 'abc')
# test get
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1) # filters out 'xyz'
assert_equal(res.json['data'][0]['attributes']['name'], 'abc')
@responses.activate
def test_node_files_external_provider_can_filter_by_last_touched(self):
yesterday_stamp = timezone.now() - datetime.timedelta(days=1)
self.add_github()
url = '/{}nodes/{}/files/github/?filter[last_touched][gt]={}'.format(
API_BASE, self.project._id, yesterday_stamp.isoformat())
# test create
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 2)
# test get
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 2)
def test_node_files_osfstorage_cannot_filter_by_last_touched(self):
yesterday_stamp = timezone.now() - datetime.timedelta(days=1)
self.file = api_utils.create_test_file(self.project, self.user)
url = '/{}nodes/{}/files/osfstorage/?filter[last_touched][gt]={}'.format(
API_BASE, self.project._id, yesterday_stamp.isoformat())
# test create
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(len(res.json['errors']), 1)
# test get
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(len(res.json['errors']), 1)
class TestNodeFilesListPagination(ApiTestCase):
def setUp(self):
super(TestNodeFilesListPagination, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
def add_github(self):
user_auth = Auth(self.user)
self.project.add_addon('github', auth=user_auth)
addon = self.project.get_addon('github')
addon.repo = 'something'
addon.user = 'someone'
oauth_settings = GitHubAccountFactory()
oauth_settings.save()
self.user.add_addon('github')
self.user.external_accounts.add(oauth_settings)
self.user.save()
addon.user_settings = self.user.get_addon('github')
addon.external_account = oauth_settings
addon.save()
self.project.save()
addon.user_settings.oauth_grants[self.project._id] = {
oauth_settings._id: []}
addon.user_settings.save()
def check_file_order(self, resp):
previous_file_name = 0
for file in resp.json['data']:
int_file_name = int(file['attributes']['name'])
assert int_file_name > previous_file_name, 'Files were not in order'
previous_file_name = int_file_name
@responses.activate
def test_node_files_are_sorted_correctly(self):
prepare_mock_wb_response(
node=self.project, provider='github',
files=[
{'name': '01', 'path': '/01/', 'materialized': '/01/', 'kind': 'folder'},
{'name': '02', 'path': '/02', 'materialized': '/02', 'kind': 'file'},
{'name': '03', 'path': '/03/', 'materialized': '/03/', 'kind': 'folder'},
{'name': '04', 'path': '/04', 'materialized': '/04', 'kind': 'file'},
{'name': '05', 'path': '/05/', 'materialized': '/05/', 'kind': 'folder'},
{'name': '06', 'path': '/06', 'materialized': '/06', 'kind': 'file'},
{'name': '07', 'path': '/07/', 'materialized': '/07/', 'kind': 'folder'},
{'name': '08', 'path': '/08', 'materialized': '/08', 'kind': 'file'},
{'name': '09', 'path': '/09/', 'materialized': '/09/', 'kind': 'folder'},
{'name': '10', 'path': '/10', 'materialized': '/10', 'kind': 'file'},
{'name': '11', 'path': '/11/', 'materialized': '/11/', 'kind': 'folder'},
{'name': '12', 'path': '/12', 'materialized': '/12', 'kind': 'file'},
{'name': '13', 'path': '/13/', 'materialized': '/13/', 'kind': 'folder'},
{'name': '14', 'path': '/14', 'materialized': '/14', 'kind': 'file'},
{'name': '15', 'path': '/15/', 'materialized': '/15/', 'kind': 'folder'},
{'name': '16', 'path': '/16', 'materialized': '/16', 'kind': 'file'},
{'name': '17', 'path': '/17/', 'materialized': '/17/', 'kind': 'folder'},
{'name': '18', 'path': '/18', 'materialized': '/18', 'kind': 'file'},
{'name': '19', 'path': '/19/', 'materialized': '/19/', 'kind': 'folder'},
{'name': '20', 'path': '/20', 'materialized': '/20', 'kind': 'file'},
{'name': '21', 'path': '/21/', 'materialized': '/21/', 'kind': 'folder'},
{'name': '22', 'path': '/22', 'materialized': '/22', 'kind': 'file'},
{'name': '23', 'path': '/23/', 'materialized': '/23/', 'kind': 'folder'},
{'name': '24', 'path': '/24', 'materialized': '/24', 'kind': 'file'},
]
)
self.add_github()
url = '/{}nodes/{}/files/github/?page[size]=100'.format(
API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth)
self.check_file_order(res)
class TestNodeProviderDetail(ApiTestCase):
def setUp(self):
super(TestNodeProviderDetail, self).setUp()
self.user = AuthUserFactory()
self.public_project = ProjectFactory(is_public=True)
self.private_project = ProjectFactory(creator=self.user)
self.public_url = '/{}nodes/{}/files/providers/osfstorage/'.format(
API_BASE, self.public_project._id)
self.private_url = '/{}nodes/{}/files/providers/osfstorage/'.format(
API_BASE, self.private_project._id)
def test_can_view_if_contributor(self):
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(
res.json['data']['id'],
'{}:osfstorage'.format(self.private_project._id)
)
def test_can_view_if_public(self):
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(
res.json['data']['id'],
'{}:osfstorage'.format(self.public_project._id)
)
def test_cannot_view_if_private(self):
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
|
|
# Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack Foundation
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for volume types code
"""
import datetime
import time
from oslo_config import cfg
from oslo_log import log as logging
from cinder import context
from cinder import db
from cinder.db.sqlalchemy import api as db_api
from cinder.db.sqlalchemy import models
from cinder import exception
from cinder.i18n import _
from cinder import test
from cinder.tests import conf_fixture
from cinder.volume import qos_specs
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
class VolumeTypeTestCase(test.TestCase):
"""Test cases for volume type code."""
def setUp(self):
super(VolumeTypeTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.vol_type1_name = str(int(time.time()))
self.vol_type1_specs = dict(type="physical drive",
drive_type="SAS",
size="300",
rpm="7200",
visible="True")
self.vol_type1_description = self.vol_type1_name + '_desc'
def test_volume_type_create_then_destroy(self):
"""Ensure volume types can be created and deleted."""
prev_all_vtypes = volume_types.get_all_types(self.ctxt)
# create
type_ref = volume_types.create(self.ctxt,
self.vol_type1_name,
self.vol_type1_specs,
description=self.vol_type1_description)
new = volume_types.get_volume_type_by_name(self.ctxt,
self.vol_type1_name)
LOG.info(_("Given data: %s"), self.vol_type1_specs)
LOG.info(_("Result data: %s"), new)
self.assertEqual(self.vol_type1_description, new['description'])
for k, v in self.vol_type1_specs.iteritems():
self.assertEqual(v, new['extra_specs'][k],
'one of fields does not match')
new_all_vtypes = volume_types.get_all_types(self.ctxt)
self.assertEqual(len(prev_all_vtypes) + 1,
len(new_all_vtypes),
'drive type was not created')
# update
new_type_name = self.vol_type1_name + '_updated'
new_type_desc = self.vol_type1_description + '_updated'
type_ref_updated = volume_types.update(self.ctxt,
type_ref.id,
new_type_name,
new_type_desc)
self.assertEqual(new_type_name, type_ref_updated['name'])
self.assertEqual(new_type_desc, type_ref_updated['description'])
# destroy
volume_types.destroy(self.ctxt, type_ref['id'])
new_all_vtypes = volume_types.get_all_types(self.ctxt)
self.assertEqual(prev_all_vtypes,
new_all_vtypes,
'drive type was not deleted')
def test_create_volume_type_with_invalid_params(self):
"""Ensure exception will be returned."""
vol_type_invalid_specs = "invalid_extra_specs"
self.assertRaises(exception.VolumeTypeCreateFailed,
volume_types.create, self.ctxt,
self.vol_type1_name,
vol_type_invalid_specs)
def test_get_all_volume_types(self):
"""Ensures that all volume types can be retrieved."""
session = db_api.get_session()
total_volume_types = session.query(models.VolumeTypes).count()
vol_types = volume_types.get_all_types(self.ctxt)
self.assertEqual(total_volume_types, len(vol_types))
def test_get_default_volume_type(self):
"""Ensures default volume type can be retrieved."""
volume_types.create(self.ctxt, conf_fixture.def_vol_type, {})
default_vol_type = volume_types.get_default_volume_type()
self.assertEqual(default_vol_type.get('name'),
conf_fixture.def_vol_type)
def test_default_volume_type_missing_in_db(self):
"""Ensures proper exception raised if default volume type
is not in database.
"""
default_vol_type = volume_types.get_default_volume_type()
self.assertEqual(default_vol_type, {})
def test_get_default_volume_type_under_non_default(self):
cfg.CONF.set_default('default_volume_type', None)
self.assertEqual({}, volume_types.get_default_volume_type())
def test_non_existent_vol_type_shouldnt_delete(self):
"""Ensures that volume type creation fails with invalid args."""
self.assertRaises(exception.VolumeTypeNotFound,
volume_types.destroy, self.ctxt, "sfsfsdfdfs")
def test_volume_type_with_volumes_shouldnt_delete(self):
"""Ensures volume type deletion with associated volumes fail."""
type_ref = volume_types.create(self.ctxt, self.vol_type1_name)
db.volume_create(self.ctxt,
{'id': '1',
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'display_description': 'Test Desc',
'size': 20,
'status': 'available',
'volume_type_id': type_ref['id']})
self.assertRaises(exception.VolumeTypeInUse,
volume_types.destroy, self.ctxt, type_ref['id'])
def test_repeated_vol_types_shouldnt_raise(self):
"""Ensures that volume duplicates don't raise."""
new_name = self.vol_type1_name + "dup"
type_ref = volume_types.create(self.ctxt, new_name)
volume_types.destroy(self.ctxt, type_ref['id'])
type_ref = volume_types.create(self.ctxt, new_name)
def test_invalid_volume_types_params(self):
"""Ensures that volume type creation fails with invalid args."""
self.assertRaises(exception.InvalidVolumeType,
volume_types.destroy, self.ctxt, None)
self.assertRaises(exception.InvalidVolumeType,
volume_types.get_volume_type, self.ctxt, None)
self.assertRaises(exception.InvalidVolumeType,
volume_types.get_volume_type_by_name,
self.ctxt, None)
def test_volume_type_get_by_id_and_name(self):
"""Ensure volume types get returns same entry."""
volume_types.create(self.ctxt,
self.vol_type1_name,
self.vol_type1_specs)
new = volume_types.get_volume_type_by_name(self.ctxt,
self.vol_type1_name)
new2 = volume_types.get_volume_type(self.ctxt, new['id'])
self.assertEqual(new, new2)
def test_volume_type_search_by_extra_spec(self):
"""Ensure volume types get by extra spec returns correct type."""
volume_types.create(self.ctxt, "type1", {"key1": "val1",
"key2": "val2"})
volume_types.create(self.ctxt, "type2", {"key2": "val2",
"key3": "val3"})
volume_types.create(self.ctxt, "type3", {"key3": "another_value",
"key4": "val4"})
vol_types = volume_types.get_all_types(
self.ctxt,
search_opts={'extra_specs': {"key1": "val1"}})
LOG.info("vol_types: %s" % vol_types)
self.assertEqual(len(vol_types), 1)
self.assertIn("type1", vol_types.keys())
self.assertEqual(vol_types['type1']['extra_specs'],
{"key1": "val1", "key2": "val2"})
vol_types = volume_types.get_all_types(
self.ctxt,
search_opts={'extra_specs': {"key2": "val2"}})
LOG.info("vol_types: %s" % vol_types)
self.assertEqual(len(vol_types), 2)
self.assertIn("type1", vol_types.keys())
self.assertIn("type2", vol_types.keys())
vol_types = volume_types.get_all_types(
self.ctxt,
search_opts={'extra_specs': {"key3": "val3"}})
LOG.info("vol_types: %s" % vol_types)
self.assertEqual(len(vol_types), 1)
self.assertIn("type2", vol_types.keys())
def test_volume_type_search_by_extra_spec_multiple(self):
"""Ensure volume types get by extra spec returns correct type."""
volume_types.create(self.ctxt, "type1", {"key1": "val1",
"key2": "val2",
"key3": "val3"})
volume_types.create(self.ctxt, "type2", {"key2": "val2",
"key3": "val3"})
volume_types.create(self.ctxt, "type3", {"key1": "val1",
"key3": "val3",
"key4": "val4"})
vol_types = volume_types.get_all_types(
self.ctxt,
search_opts={'extra_specs': {"key1": "val1",
"key3": "val3"}})
LOG.info("vol_types: %s" % vol_types)
self.assertEqual(len(vol_types), 2)
self.assertIn("type1", vol_types.keys())
self.assertIn("type3", vol_types.keys())
self.assertEqual(vol_types['type1']['extra_specs'],
{"key1": "val1", "key2": "val2", "key3": "val3"})
self.assertEqual(vol_types['type3']['extra_specs'],
{"key1": "val1", "key3": "val3", "key4": "val4"})
def test_is_encrypted(self):
volume_type = volume_types.create(self.ctxt, "type1")
volume_type_id = volume_type.get('id')
self.assertFalse(volume_types.is_encrypted(self.ctxt, volume_type_id))
encryption = {
'control_location': 'front-end',
'provider': 'fake_provider',
}
db_api.volume_type_encryption_create(self.ctxt, volume_type_id,
encryption)
self.assertTrue(volume_types.is_encrypted(self.ctxt, volume_type_id))
def test_add_access(self):
project_id = '456'
vtype = volume_types.create(self.ctxt, 'type1')
vtype_id = vtype.get('id')
volume_types.add_volume_type_access(self.ctxt, vtype_id, project_id)
vtype_access = db.volume_type_access_get_all(self.ctxt, vtype_id)
self.assertIn(project_id, [a.project_id for a in vtype_access])
def test_remove_access(self):
project_id = '456'
vtype = volume_types.create(self.ctxt, 'type1', projects=['456'])
vtype_id = vtype.get('id')
volume_types.remove_volume_type_access(self.ctxt, vtype_id, project_id)
vtype_access = db.volume_type_access_get_all(self.ctxt, vtype_id)
self.assertNotIn(project_id, vtype_access)
def test_get_volume_type_qos_specs(self):
qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', {'k1': 'v1',
'k2': 'v2',
'k3': 'v3'})
type_ref = volume_types.create(self.ctxt, "type1", {"key2": "val2",
"key3": "val3"})
res = volume_types.get_volume_type_qos_specs(type_ref['id'])
self.assertIsNone(res['qos_specs'])
qos_specs.associate_qos_with_type(self.ctxt,
qos_ref['id'],
type_ref['id'])
expected = {'qos_specs': {'id': qos_ref['id'],
'name': 'qos-specs-1',
'consumer': 'back-end',
'specs': {
'k1': 'v1',
'k2': 'v2',
'k3': 'v3'}}}
res = volume_types.get_volume_type_qos_specs(type_ref['id'])
self.assertDictMatch(expected, res)
def test_volume_types_diff(self):
# type_ref 1 and 2 have the same extra_specs, while 3 has different
keyvals1 = {"key1": "val1", "key2": "val2"}
keyvals2 = {"key1": "val0", "key2": "val2"}
type_ref1 = volume_types.create(self.ctxt, "type1", keyvals1)
type_ref2 = volume_types.create(self.ctxt, "type2", keyvals1)
type_ref3 = volume_types.create(self.ctxt, "type3", keyvals2)
# Check equality with only extra_specs
diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'],
type_ref2['id'])
self.assertTrue(same)
self.assertEqual(diff['extra_specs']['key1'], ('val1', 'val1'))
diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'],
type_ref3['id'])
self.assertFalse(same)
self.assertEqual(diff['extra_specs']['key1'], ('val1', 'val0'))
# qos_ref 1 and 2 have the same specs, while 3 has different
qos_keyvals1 = {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'}
qos_keyvals2 = {'k1': 'v0', 'k2': 'v2', 'k3': 'v3'}
qos_ref1 = qos_specs.create(self.ctxt, 'qos-specs-1', qos_keyvals1)
qos_ref2 = qos_specs.create(self.ctxt, 'qos-specs-2', qos_keyvals1)
qos_ref3 = qos_specs.create(self.ctxt, 'qos-specs-3', qos_keyvals2)
# Check equality with qos specs too
qos_specs.associate_qos_with_type(self.ctxt, qos_ref1['id'],
type_ref1['id'])
qos_specs.associate_qos_with_type(self.ctxt, qos_ref2['id'],
type_ref2['id'])
diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'],
type_ref2['id'])
self.assertTrue(same)
self.assertEqual(diff['extra_specs']['key1'], ('val1', 'val1'))
self.assertEqual(diff['qos_specs']['k1'], ('v1', 'v1'))
qos_specs.disassociate_qos_specs(self.ctxt, qos_ref2['id'],
type_ref2['id'])
qos_specs.associate_qos_with_type(self.ctxt, qos_ref3['id'],
type_ref2['id'])
diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'],
type_ref2['id'])
self.assertFalse(same)
self.assertEqual(diff['extra_specs']['key1'], ('val1', 'val1'))
self.assertEqual(diff['qos_specs']['k1'], ('v1', 'v0'))
qos_specs.disassociate_qos_specs(self.ctxt, qos_ref3['id'],
type_ref2['id'])
qos_specs.associate_qos_with_type(self.ctxt, qos_ref2['id'],
type_ref2['id'])
# And add encryption for good measure
enc_keyvals1 = {'cipher': 'c1', 'key_size': 256, 'provider': 'p1',
'control_location': 'front-end',
'encryption_id': 'uuid1'}
enc_keyvals2 = {'cipher': 'c1', 'key_size': 128, 'provider': 'p1',
'control_location': 'front-end',
'encryption_id': 'uuid2'}
db.volume_type_encryption_create(self.ctxt, type_ref1['id'],
enc_keyvals1)
db.volume_type_encryption_create(self.ctxt, type_ref2['id'],
enc_keyvals2)
diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'],
type_ref2['id'])
self.assertFalse(same)
self.assertEqual(diff['extra_specs']['key1'], ('val1', 'val1'))
self.assertEqual(diff['qos_specs']['k1'], ('v1', 'v1'))
self.assertEqual(diff['encryption']['key_size'], (256, 128))
# Check diff equals type specs when one type is None
diff, same = volume_types.volume_types_diff(self.ctxt, None,
type_ref1['id'])
self.assertFalse(same)
self.assertEqual(diff['extra_specs'],
{'key1': (None, 'val1'), 'key2': (None, 'val2')})
self.assertEqual(diff['qos_specs'],
{'consumer': (None, 'back-end'),
'k1': (None, 'v1'),
'k2': (None, 'v2'),
'k3': (None, 'v3')})
self.assertEqual(diff['encryption'],
{'cipher': (None, 'c1'),
'control_location': (None, 'front-end'),
'deleted': (None, False),
'key_size': (None, 256),
'provider': (None, 'p1'),
'encryption_id': (None, 'uuid1')})
def test_encryption_create(self):
volume_type = volume_types.create(self.ctxt, "type1")
volume_type_id = volume_type.get('id')
encryption = {
'control_location': 'front-end',
'provider': 'fake_provider',
}
db_api.volume_type_encryption_create(self.ctxt, volume_type_id,
encryption)
self.assertTrue(volume_types.is_encrypted(self.ctxt, volume_type_id))
def test_get_volume_type_encryption(self):
volume_type = volume_types.create(self.ctxt, "type1")
volume_type_id = volume_type.get('id')
encryption = {
'control_location': 'front-end',
'provider': 'fake_provider',
}
db.volume_type_encryption_create(self.ctxt, volume_type_id,
encryption)
ret = volume_types.get_volume_type_encryption(self.ctxt,
volume_type_id)
self.assertIsNotNone(ret)
def test_get_volume_type_encryption_without_volume_type_id(self):
ret = volume_types.get_volume_type_encryption(self.ctxt, None)
self.assertIsNone(ret)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2015-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module provide the function :py:func:`summary` that is used for printing
an `execution summary
<https://github.com/spotify/luigi/blob/master/examples/execution_summary_example.py>`_
at the end of luigi invocations.
"""
import textwrap
import datetime
def _partition_tasks(worker):
"""
Takes a worker and sorts out tasks based on their status.
Still_pending_not_ext is only used to get upstream_failure, upstream_missing_dependency and run_by_other_worker
"""
task_history = worker._add_task_history
pending_tasks = {task for(task, status, ext) in task_history if status == 'PENDING'}
set_tasks = {}
set_tasks["completed"] = {task for (task, status, ext) in task_history if status == 'DONE' and task in pending_tasks}
set_tasks["already_done"] = {task for (task, status, ext) in task_history if status == 'DONE' and task not in pending_tasks and task not in set_tasks["completed"]}
set_tasks["failed"] = {task for (task, status, ext) in task_history if status == 'FAILED'}
set_tasks["still_pending_ext"] = {task for (task, status, ext) in task_history if status == 'PENDING' and task not in set_tasks["failed"] and task not in set_tasks["completed"] and not ext}
set_tasks["still_pending_not_ext"] = {task for (task, status, ext) in task_history if status == 'PENDING' and task not in set_tasks["failed"] and task not in set_tasks["completed"] and ext}
set_tasks["run_by_other_worker"] = set()
set_tasks["upstream_failure"] = set()
set_tasks["upstream_missing_dependency"] = set()
set_tasks["upstream_run_by_other_worker"] = set()
set_tasks["unknown_reason"] = set()
return set_tasks
def _populate_unknown_statuses(set_tasks):
"""
Add the "upstream_*" and "unknown_reason" statuses my mutating set_tasks.
"""
visited = set()
for task in set_tasks["still_pending_not_ext"]:
_depth_first_search(set_tasks, task, visited)
def _depth_first_search(set_tasks, current_task, visited):
"""
This dfs checks why tasks are still pending.
"""
visited.add(current_task)
if current_task in set_tasks["still_pending_not_ext"]:
upstream_failure = False
upstream_missing_dependency = False
upstream_run_by_other_worker = False
for task in current_task._requires():
if task not in visited:
_depth_first_search(set_tasks, task, visited)
if task in set_tasks["failed"] or task in set_tasks["upstream_failure"]:
set_tasks["upstream_failure"].add(current_task)
upstream_failure = True
if task in set_tasks["still_pending_ext"] or task in set_tasks["upstream_missing_dependency"]:
set_tasks["upstream_missing_dependency"].add(current_task)
upstream_missing_dependency = True
if task in set_tasks["run_by_other_worker"] or task in set_tasks["upstream_run_by_other_worker"]:
set_tasks["upstream_run_by_other_worker"].add(current_task)
upstream_run_by_other_worker = True
if not upstream_failure and not upstream_missing_dependency and not upstream_run_by_other_worker and current_task not in set_tasks["run_by_other_worker"]:
set_tasks["unknown_reason"].add(current_task)
def _get_str(task_dict, extra_indent):
"""
This returns a string for each status
"""
lines = []
for task_family, tasks in task_dict.items():
row = ' '
if extra_indent:
row += ' '
if len(lines) >= 5:
"""
This is how many rows will be printed for each status. If you want fewer rows you can lower the limit.
"""
row += '...'
lines.append(row)
break
if len(tasks[0].get_params()) == 0:
row += '- {0} {1}()'.format(len(tasks), str(task_family))
elif _get_len_of_params(tasks[0]) > 60 or (len(tasks) == 2 and len(tasks[0].get_params()) > 1 and (_get_len_of_params(tasks[0]) > 40 or len(str(tasks[0])) > 100)) or len(str(tasks[0])) > 200:
"""
This is to make sure that there is no really long task in the output
"""
row += '- {0} {1}(...)'.format(len(tasks), task_family)
elif len((tasks[0].get_params())) == 1:
attributes = sorted({getattr(task, tasks[0].get_params()[0][0]) for task in tasks})
row += '- {0} {1}({2}='.format(len(tasks), task_family, tasks[0].get_params()[0][0])
if _ranging_attributes(attributes, tasks[0].get_params()[0]) and len(attributes) > 3:
row += '{0}...{1}'.format(tasks[0].get_params()[0][1].serialize(attributes[0]), tasks[0].get_params()[0][1].serialize(attributes[-1]))
else:
row += '{0}'.format(_get_str_one_parameter(tasks))
row += ")"
else:
ranging = False
params = _get_set_of_params(tasks)
unique_param_keys = list(_get_unique_param_keys(params))
if len(unique_param_keys) == 1:
unique_param, = unique_param_keys
attributes = sorted(params[unique_param])
if _ranging_attributes(attributes, unique_param) and len(attributes) > 2:
ranging = True
row += '- {0} {1}({2}'.format(len(tasks), task_family, _get_str_ranging_multiple_parameters(attributes, tasks, unique_param))
if not ranging:
if len(tasks) == 1:
row += '- {0} {1}'.format(len(tasks), tasks[0])
if len(tasks) == 2:
row += '- {0} and {1}'.format(tasks[0], tasks[1])
if len(tasks) > 2:
row += '- {0} and {1} other {2}'.format(tasks[0], len(tasks) - 1, task_family)
lines.append(row)
return '\n'.join(lines)
def _get_len_of_params(task):
return sum(len(param[0]) for param in task.get_params())
def _get_str_ranging_multiple_parameters(attributes, tasks, unique_param):
row = ''
str_unique_param = '{0}...{1}'.format(unique_param[1].serialize(attributes[0]), unique_param[1].serialize(attributes[-1]))
for param in tasks[0].get_params():
row += '{0}='.format(param[0])
if param[0] == unique_param[0]:
row += '{0}'.format(str_unique_param)
else:
row += '{0}'.format(param[1].serialize(getattr(tasks[0], param[0])))
if param != tasks[0].get_params()[-1]:
row += ", "
row += ')'
return row
def _get_set_of_params(tasks):
params = {}
for param in tasks[0].get_params():
params[param] = {getattr(task, param[0]) for task in tasks}
return params
def _get_unique_param_keys(params):
for param_key, param_values in params.items():
if len(param_values) > 1:
yield param_key
def _ranging_attributes(attributes, unique_param):
"""
Checks if there is a continuous range
"""
if len(attributes) > 2:
if unique_param[1].next_in_enumeration(attributes[0]) is None:
return False
for i in range(1, len(attributes)):
if unique_param[1].next_in_enumeration(attributes[i - 1]) != attributes[i]:
return False
return True
def _get_str_one_parameter(tasks):
row = ''
count = 0
for task in tasks:
if (len(row) >= 30 and count > 2 and count != len(tasks) - 1) or len(row) > 200:
row += '...'
break
row += '{0}'.format(getattr(task, task.get_params()[0][0]))
if count < len(tasks) - 1:
row += ','
count += 1
return row
def _serialize_first_param(task):
return task.get_params()[0][1].serialize(getattr(task, task.get_params()[0][0]))
def _get_number_of_tasks_for(status, group_tasks):
if status == "still_pending":
return (_get_number_of_tasks(group_tasks["still_pending_ext"]) +
_get_number_of_tasks(group_tasks["still_pending_not_ext"]))
return _get_number_of_tasks(group_tasks[status])
def _get_number_of_tasks(task_dict):
return sum(len(tasks) for tasks in task_dict.values())
def _get_comments(group_tasks):
"""
Get the human readable comments and quantities for the task types.
"""
comments = {}
for status, human in _COMMENTS:
num_tasks = _get_number_of_tasks_for(status, group_tasks)
if num_tasks:
space = " " if status in _PENDING_SUB_STATUSES else ""
comments[status] = '{space}* {num_tasks} {human}:\n'.format(
space=space,
num_tasks=num_tasks,
human=human)
return comments
# Oredered in the sense that they'll be printed in this order
_ORDERED_STATUSES = (
"already_done",
"completed",
"failed",
"still_pending",
"still_pending_ext",
"run_by_other_worker",
"upstream_failure",
"upstream_missing_dependency",
"upstream_run_by_other_worker",
"unknown_reason",
)
_PENDING_SUB_STATUSES = set(_ORDERED_STATUSES[_ORDERED_STATUSES.index("still_pending_ext"):])
_COMMENTS = set((
("already_done", 'present dependencies were encountered'),
("completed", 'ran successfully'),
("failed", 'failed'),
("still_pending", 'were left pending, among these'),
("still_pending_ext", 'were missing external dependencies'),
("run_by_other_worker", 'were being run by another worker'),
("upstream_failure", 'had failed dependencies'),
("upstream_missing_dependency", 'had missing external dependencies'),
("upstream_run_by_other_worker", 'had dependencies that were being run by other worker'),
("unknown_reason", 'were left pending because of unknown reason'),
))
def _get_run_by_other_worker(worker):
"""
This returns a set of the tasks that are being run by other worker
"""
worker_that_blocked_task = dict()
get_work_response_history = worker._get_work_response_history
for get_work_response in get_work_response_history:
if get_work_response['task_id'] is None:
for running_task in get_work_response['running_tasks']:
other_worker_id = running_task['worker']
other_task_id = running_task['task_id']
other_task = worker._scheduled_tasks.get(other_task_id)
if other_task:
worker_that_blocked_task[other_task] = other_worker_id
return set(worker_that_blocked_task.keys())
def _get_external_workers(worker):
"""
This returns a dict with a set of tasks for all of the other workers
"""
worker_that_blocked_task = dict()
get_work_response_history = worker._get_work_response_history
for get_work_response in get_work_response_history:
if get_work_response['task_id'] is None:
for running_task in get_work_response['running_tasks']:
other_worker_id = running_task['worker']
other_task_id = running_task['task_id']
other_task = worker._scheduled_tasks.get(other_task_id)
if other_task:
if other_worker_id not in worker_that_blocked_task.keys():
worker_that_blocked_task[other_worker_id] = set()
worker_that_blocked_task[other_worker_id].add(other_task)
return worker_that_blocked_task
def _group_tasks_by_name_and_status(task_dict):
"""
Takes a dictionary with sets of tasks grouped by their status and returns a dictionary with dictionaries with an array of tasks grouped by their status and task name
"""
group_status = {}
for task in task_dict:
if task.task_family not in group_status:
group_status[task.task_family] = []
group_status[task.task_family].append(task)
return group_status
def _summary_dict(worker):
set_tasks = _partition_tasks(worker)
set_tasks["run_by_other_worker"] = _get_run_by_other_worker(worker)
_populate_unknown_statuses(set_tasks)
return set_tasks
def _summary_format(set_tasks, worker):
group_tasks = {}
for status, task_dict in set_tasks.items():
group_tasks[status] = _group_tasks_by_name_and_status(task_dict)
str_tasks = {}
comments = _get_comments(group_tasks)
num_all_tasks = len(set_tasks["already_done"]) + len(set_tasks["completed"]) + len(set_tasks["failed"]) + len(set_tasks["still_pending_ext"]) + len(set_tasks["still_pending_not_ext"])
str_output = ''
str_output += 'Scheduled {0} tasks of which:\n'.format(num_all_tasks)
for status in _ORDERED_STATUSES:
if status not in comments:
continue
str_output += '{0}'.format(comments[status])
if status != 'still_pending':
str_output += '{0}\n'.format(_get_str(group_tasks[status], status in _PENDING_SUB_STATUSES))
ext_workers = _get_external_workers(worker)
group_tasks_ext_workers = {}
for ext_worker, task_dict in ext_workers.items():
group_tasks_ext_workers[ext_worker] = _group_tasks_by_name_and_status(task_dict)
if len(ext_workers) > 0:
str_output += "\nThe other workers were:\n"
count = 0
for ext_worker, task_dict in ext_workers.items():
if count > 3 and count < len(ext_workers) - 1:
str_output += " and {0} other workers".format(len(ext_workers) - count)
break
str_output += " - {0} ran {1} tasks\n".format(ext_worker, len(task_dict))
count += 1
str_output += '\n'
if num_all_tasks == len(set_tasks["already_done"]) + len(set_tasks["still_pending_ext"]) + len(set_tasks["still_pending_not_ext"]):
if len(ext_workers) == 0:
str_output += '\n'
str_output += 'Did not run any tasks'
smiley = ""
reason = ""
if len(set_tasks["failed"]):
smiley = ":("
reason = "there were failed tasks"
elif len(set_tasks["still_pending_ext"]):
smiley = ":|"
reason = "there were missing external dependencies"
else:
smiley = ":)"
reason = "there were no failed tasks or missing external dependencies"
str_output += "\nThis progress looks {0} because {1}".format(smiley, reason)
if num_all_tasks == 0:
str_output = 'Did not schedule any tasks'
return str_output
def _summary_wrap(str_output):
return textwrap.dedent("""
===== Luigi Execution Summary =====
{str_output}
===== Luigi Execution Summary =====
""").format(str_output=str_output)
def summary(worker):
"""
Given a worker, return a human readable summary of what the worker have
done.
"""
return _summary_wrap(_summary_format(_summary_dict(worker), worker))
# 5
|
|
#!/usr/bin/env python3
from strips import *
#def astar(p, s, a):
# start = (p, s, a)
# closed = []
# open = []
# gh = []
#
# def heuristic_cost_estimate(x): return 0
#
# def add_to_open(x, g, h):
# if x not in open:
# open.append(x)
# gh = (g, h)
#
# def find_next_best():
# current = None
# g, h = 0, 0
# for i in range(len(open)):
# if current is None or gh[i][0] + gh[i][1] < g + h:
# current = open[i]
# g, h = gh[i]
# return (current, g, h)
#
# def move_to_closed(x):
# if x in open:
# i = open.index(x)
# del open[i]
# del gh[i]
# if current not in closed:
# closed.append(x)
#
# def update_gh(x, g, h)
#
# add_to_open(start, 0, heuristic_cost_estimate(start))
#
# while open:
# current, g, h = find_next_best()
#
# p, s, a = current
# if p.final(s):
# yield current
#
# move_to_closed(current)
#
# for next1 in p.trans(s):
# if next1 in closed:
# continue
# p1, s1, a1 = next1
# g1 = g + 1 # 1 == dist_between(current, next1)
#
# if next1 not in open or g1 < gh[open.index(next1)][0]:
# i = open.index(next1)
# gh[next1][0] = g1
# if next1 not in open:
# open.add(next1)
def trans_star(p, s, a):
if p.final(s):
yield (p, s, a)
for p1, s1, a1 in p.trans(s):
yield from trans_star(p1, s1, a + a1)
def indigolog(p, s, a, exec_cb=lambda a: None, exog_cb=lambda s: s):
# at each step apply exogenous events if any:
s = exog_cb(s)
for p1, s1, a1 in p.trans(s):
# commit to the first step, since we are executing in an online fashion:
exec_cb(a1)
return indigolog(p1, s1, a + a1, exec_cb, exog_cb)
else: return p.final(s)
class Program:
pass
class Choose(Program):
def __init__(self, p1, p2, *ps):
self.p1 = p1
self.p2 = Choose(p2, ps[0], *ps[1:]) if ps else p2
def trans(self, s):
yield from self.p1.trans(s)
yield from self.p2.trans(s)
def final(self, s):
return self.p1.final(s) or self.p2.final(s)
def __repr__(self): return 'Choose(%s, %s)' % (self.p1, self.p2)
class Empty(Program):
def trans(self, s):
yield from () # yield nothing
def final(self, s):
return True
def __repr__(self): return 'Empty()'
class Exec(Program):
def __init__(self, ground_action):
self.ground_action = ground_action
def trans(self, s):
try: yield (Empty(), self.ground_action.apply(s), [self.ground_action])
except UnsatisfiedPreconditions: pass
def final(self, s):
return False
def __repr__(self): return 'Exec(%s)' % (self.ground_action)
class If(Program):
def __init__(self, condition, p1, p2):
self.condition = condition
self.p1 = p1
self.p2 = p2
def trans(self, s):
if self.condition(s): yield from self.p1.trans(s)
else: yield from self.p2.trans(s)
def final(self, s):
if self.condition(s): return self.p1.final(s)
else: return self.p2.final(s)
def __repr__(self): return 'If(%s, %s, %s)' % (self.condition, self.p1, self.p2)
class Pick(Program):
def __init__(self, domain, p1):
self.domain = domain
self.p1 = p1
def trans(self, s):
for obj in Object.get_objects_of_type(self.domain):
yield from self.p1(obj).trans(s)
def final(self, s):
for obj in Object.get_objects_of_type(self.domain):
if self.p1(obj).final(s): return True
return False
def __repr__(self): return 'Pick(%s, %s)' % (self.domain.__name__, self.p1)
class Search(Program):
def __init__(self, p1):
self.p1 = p1
def trans(self, s):
yield from trans_star(self.p1, s, [])
def final(self, s):
return any(trans_star(self.p1, s, []))
def __repr__(self): return 'Search(%s)' % self.p1
class Sequence(Program):
def __init__(self, p1, p2, *ps):
self.p1 = p1
self.p2 = Sequence(p2, ps[0], *ps[1:]) if ps else p2
def trans(self, s):
if self.p1.final(s):
yield from self.p2.trans(s)
for pn, sn, an in self.p1.trans(s):
yield (Sequence(pn, self.p2), sn, an)
def final(self, s):
return self.p1.final(s) and self.p2.final(s)
def __repr__(self): return 'Sequence(%s, %s)' % (self.p1, self.p2)
class Star(Program):
def __init__(self, p1):
self.p1 = p1
def trans(self, s):
for pn, sn, an in self.p1.trans(s):
yield (Sequence(pn, self), sn, an)
def final(self, s):
return True
def __repr__(self): return 'Star(%s)' % (self.p1)
class Test(Program):
def __init__(self, condition):
self.condition = condition
def trans(self, s):
if self.condition(s):
yield (Empty(), s, [])
def final(self, s):
return False
def __repr__(self): return 'Test(%s)' % self.condition
class While(Program):
def __init__(self, condition, p1):
self.condition = condition
self.p1 = p1
def trans(self, s):
if self.condition(s):
for pn, sn, an in self.p1.trans(s):
yield (Sequence(pn, self), sn, an)
def final(self, s):
return not self.condition(s) or self.p1.final(s)
def __repr__(self): return 'While(%s, %s)' % (self.condition, self.p1)
# ConGolog constructs:
class Conc(Program):
def __init__(self, p1, p2, *ps):
self.p1 = p1
self.p2 = Conc(p2, ps[0], *ps[1:]) if ps else p2
def trans(self, s):
p1_trans = False
for pn, sn, an in self.p1.trans(s):
p1_trans = True
yield (Conc(pn, self.p2), sn, an)
if p1_trans: return
for pn, sn, an in self.p2.trans(s):
yield (Conc(self.p1, pn), sn, an)
def final(self, s):
return self.p1.final(s) and self.p2.final(s)
def __repr__(self): return 'Conc(%s, %s)' % (self.p1, self.p2)
class PConc(Program):
def __init__(self, p1, p2, *ps):
self.p1 = p1
self.p2 = PConc(p2, ps[0], *ps[1:]) if ps else p2
def trans(self, s):
p1_trans = False
for pn, sn, an in self.p1.trans(s):
p1_trans = True
yield (PConc(pn, self.p2), sn, an)
if p1_trans: return
for pn, sn, an in self.p2.trans(s):
yield (PConc(self.p1, pn), sn, an)
def final(self, s):
return self.p1.final(s) and self.p2.final(s)
def __repr__(self): return 'PConc(%s, %s)' % (self.p1, self.p2)
class IConc(Program):
def __init__(self, p1):
self.p1 = p1
def trans(self, s):
for pn, sn, an in self.p1.trans(s):
yield (Conc(pn, IConc(self.p1)), sn, an)
def final(self, s):
return True
def __repr__(self): return 'IConc(%s)' % (self.p1)
def interrupt(trigger, body):
return While(lambda s: True, If(trigger, body, Test(lambda s: False)))
def prioritized_interrupts(*args):
return PConc(*args)
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A wrapper for the GStreamer Python bindings that exposes a simple
music player.
"""
from __future__ import division, absolute_import, print_function
import six
import sys
import time
from six.moves import _thread
import os
import copy
from six.moves import urllib
from beets import ui
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst # noqa: E402
Gst.init(None)
class QueryError(Exception):
pass
class GstPlayer(object):
"""A music player abstracting GStreamer's Playbin element.
Create a player object, then call run() to start a thread with a
runloop. Then call play_file to play music. Use player.playing
to check whether music is currently playing.
A basic play queue is also implemented (just a Python list,
player.queue, whose last element is next to play). To use it,
just call enqueue() and then play(). When a track finishes and
another is available on the queue, it is played automatically.
"""
def __init__(self, finished_callback=None):
"""Initialize a player.
If a finished_callback is provided, it is called every time a
track started with play_file finishes.
Once the player has been created, call run() to begin the main
runloop in a separate thread.
"""
# Set up the Gstreamer player. From the pygst tutorial:
# https://pygstdocs.berlios.de/pygst-tutorial/playbin.html (gone)
# https://brettviren.github.io/pygst-tutorial-org/pygst-tutorial.html
####
# Updated to GStreamer 1.0 with:
# https://wiki.ubuntu.com/Novacut/GStreamer1.0
self.player = Gst.ElementFactory.make("playbin", "player")
if self.player is None:
raise ui.UserError("Could not create playbin")
fakesink = Gst.ElementFactory.make("fakesink", "fakesink")
if fakesink is None:
raise ui.UserError("Could not create fakesink")
self.player.set_property("video-sink", fakesink)
bus = self.player.get_bus()
bus.add_signal_watch()
bus.connect("message", self._handle_message)
# Set up our own stuff.
self.playing = False
self.finished_callback = finished_callback
self.cached_time = None
self._volume = 1.0
def _get_state(self):
"""Returns the current state flag of the playbin."""
# gst's get_state function returns a 3-tuple; we just want the
# status flag in position 1.
return self.player.get_state(Gst.CLOCK_TIME_NONE)[1]
def _handle_message(self, bus, message):
"""Callback for status updates from GStreamer."""
if message.type == Gst.MessageType.EOS:
# file finished playing
self.player.set_state(Gst.State.NULL)
self.playing = False
self.cached_time = None
if self.finished_callback:
self.finished_callback()
elif message.type == Gst.MessageType.ERROR:
# error
self.player.set_state(Gst.State.NULL)
err, debug = message.parse_error()
print(u"Error: {0}".format(err))
self.playing = False
def _set_volume(self, volume):
"""Set the volume level to a value in the range [0, 1.5]."""
# And the volume for the playbin.
self._volume = volume
self.player.set_property("volume", volume)
def _get_volume(self):
"""Get the volume as a float in the range [0, 1.5]."""
return self._volume
volume = property(_get_volume, _set_volume)
def play_file(self, path):
"""Immediately begin playing the audio file at the given
path.
"""
self.player.set_state(Gst.State.NULL)
if isinstance(path, six.text_type):
path = path.encode('utf-8')
uri = 'file://' + urllib.parse.quote(path)
self.player.set_property("uri", uri)
self.player.set_state(Gst.State.PLAYING)
self.playing = True
def play(self):
"""If paused, resume playback."""
if self._get_state() == Gst.State.PAUSED:
self.player.set_state(Gst.State.PLAYING)
self.playing = True
def pause(self):
"""Pause playback."""
self.player.set_state(Gst.State.PAUSED)
def stop(self):
"""Halt playback."""
self.player.set_state(Gst.State.NULL)
self.playing = False
self.cached_time = None
def run(self):
"""Start a new thread for the player.
Call this function before trying to play any music with
play_file() or play().
"""
# If we don't use the MainLoop, messages are never sent.
def start():
loop = GLib.MainLoop()
loop.run()
_thread.start_new_thread(start, ())
def time(self):
"""Returns a tuple containing (position, length) where both
values are integers in seconds. If no stream is available,
returns (0, 0).
"""
fmt = Gst.Format(Gst.Format.TIME)
try:
posq = self.player.query_position(fmt)
if not posq[0]:
raise QueryError("query_position failed")
pos = posq[1] / (10 ** 9)
lengthq = self.player.query_duration(fmt)
if not lengthq[0]:
raise QueryError("query_duration failed")
length = lengthq[1] / (10 ** 9)
self.cached_time = (pos, length)
return (pos, length)
except QueryError:
# Stream not ready. For small gaps of time, for instance
# after seeking, the time values are unavailable. For this
# reason, we cache recent.
if self.playing and self.cached_time:
return self.cached_time
else:
return (0, 0)
def seek(self, position):
"""Seeks to position (in seconds)."""
cur_pos, cur_len = self.time()
if position > cur_len:
self.stop()
return
fmt = Gst.Format(Gst.Format.TIME)
ns = position * 10 ** 9 # convert to nanoseconds
self.player.seek_simple(fmt, Gst.SeekFlags.FLUSH, ns)
# save new cached time
self.cached_time = (position, cur_len)
def block(self):
"""Block until playing finishes."""
while self.playing:
time.sleep(1)
def get_decoders(self):
return get_decoders()
def get_decoders():
"""Get supported audio decoders from GStreamer.
Returns a dict mapping decoder element names to the associated media types
and file extensions.
"""
# We only care about audio decoder elements.
filt = (Gst.ELEMENT_FACTORY_TYPE_DEPAYLOADER |
Gst.ELEMENT_FACTORY_TYPE_DEMUXER |
Gst.ELEMENT_FACTORY_TYPE_PARSER |
Gst.ELEMENT_FACTORY_TYPE_DECODER |
Gst.ELEMENT_FACTORY_TYPE_MEDIA_AUDIO)
decoders = {}
mime_types = set()
for f in Gst.ElementFactory.list_get_elements(filt, Gst.Rank.NONE):
for pad in f.get_static_pad_templates():
if pad.direction == Gst.PadDirection.SINK:
caps = pad.static_caps.get()
mimes = set()
for i in range(caps.get_size()):
struct = caps.get_structure(i)
mime = struct.get_name()
if mime == 'unknown/unknown':
continue
mimes.add(mime)
mime_types.add(mime)
if mimes:
decoders[f.get_name()] = (mimes, set())
# Check all the TypeFindFactory plugin features form the registry. If they
# are associated with an audio media type that we found above, get the list
# of corresponding file extensions.
mime_extensions = {mime: set() for mime in mime_types}
for feat in Gst.Registry.get().get_feature_list(Gst.TypeFindFactory):
caps = feat.get_caps()
if caps:
for i in range(caps.get_size()):
struct = caps.get_structure(i)
mime = struct.get_name()
if mime in mime_types:
mime_extensions[mime].update(feat.get_extensions())
# Fill in the slot we left for file extensions.
for name, (mimes, exts) in decoders.items():
for mime in mimes:
exts.update(mime_extensions[mime])
return decoders
def play_simple(paths):
"""Play the files in paths in a straightforward way, without
using the player's callback function.
"""
p = GstPlayer()
p.run()
for path in paths:
p.play_file(path)
p.block()
def play_complicated(paths):
"""Play the files in the path one after the other by using the
callback function to advance to the next song.
"""
my_paths = copy.copy(paths)
def next_song():
my_paths.pop(0)
p.play_file(my_paths[0])
p = GstPlayer(next_song)
p.run()
p.play_file(my_paths[0])
while my_paths:
time.sleep(1)
if __name__ == '__main__':
# A very simple command-line player. Just give it names of audio
# files on the command line; these are all played in sequence.
paths = [os.path.abspath(os.path.expanduser(p))
for p in sys.argv[1:]]
# play_simple(paths)
play_complicated(paths)
|
|
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import time
import IECore
import Gaffer
import GafferScene
import GafferRenderMan
class InteractiveRenderManRenderTest( unittest.TestCase ) :
def __colorAtUV( self, image, uv ) :
e = IECore.ImagePrimitiveEvaluator( image )
r = e.createResult()
e.pointAtUV( uv, r )
return IECore.Color3f(
r.floatPrimVar( image["R"] ),
r.floatPrimVar( image["G"] ),
r.floatPrimVar( image["B"] ),
)
def testLights( self ) :
s = Gaffer.ScriptNode()
s["l"] = GafferRenderMan.RenderManLight()
s["l"].loadShader( "pointlight" )
s["l"]["parameters"]["lightcolor"].setValue( IECore.Color3f( 1, 0.5, 0.25 ) )
s["l"]["transform"]["translate"]["z"].setValue( 1 )
s["p"] = GafferScene.Plane()
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 1 )
s["g"] = GafferScene.Group()
s["g"]["in"].setInput( s["l"]["out"] )
s["g"]["in1"].setInput( s["p"]["out"] )
s["g"]["in2"].setInput( s["c"]["out"] )
s["s"] = GafferRenderMan.RenderManShader()
s["s"].loadShader( "matte" )
s["a"] = GafferScene.ShaderAssignment()
s["a"]["in"].setInput( s["g"]["out"] )
s["a"]["shader"].setInput( s["s"]["out"] )
s["d"] = GafferScene.Displays()
s["d"].addDisplay(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"quantize" : IECore.FloatVectorData( [ 0, 0, 0, 0 ] ),
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelyPlane",
}
)
)
s["d"]["in"].setInput( s["a"]["out"] )
s["o"] = GafferScene.StandardOptions()
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["in"].setInput( s["d"]["out"] )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
# start a render, give it time to finish, and check the output
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[0], IECore.Color3f( 1, 0.5, 0.25 ) )
# adjust a parameter, give it time to update, and check the output
s["l"]["parameters"]["lightcolor"].setValue( IECore.Color3f( 0.25, 0.5, 1 ) )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[2], IECore.Color3f( 0.25, 0.5, 1 ) )
# pause it, adjust a parameter, wait, and check that nothing changed
s["r"]["state"].setValue( s["r"].State.Paused )
s["l"]["parameters"]["lightcolor"].setValue( IECore.Color3f( 1, 0.5, 0.25 ) )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[2], IECore.Color3f( 0.25, 0.5, 1 ) )
# unpause it, wait, and check that the update happened
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[0], IECore.Color3f( 1, 0.5, 0.25 ) )
# turn off light updates, adjust a parameter, wait, and check nothing happened
s["r"]["updateLights"].setValue( False )
s["l"]["parameters"]["lightcolor"].setValue( IECore.Color3f( 0.25, 0.5, 1 ) )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[0], IECore.Color3f( 1, 0.5, 0.25 ) )
# turn light updates back on and check that it updates
s["r"]["updateLights"].setValue( True )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[2], IECore.Color3f( 0.25, 0.5, 1 ) )
# stop the render, tweak a parameter and check that nothing happened
s["r"]["state"].setValue( s["r"].State.Stopped )
s["l"]["parameters"]["lightcolor"].setValue( IECore.Color3f( 1, 0.5, 0.25 ) )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[2], IECore.Color3f( 0.25, 0.5, 1 ) )
def testShaders( self ) :
s = Gaffer.ScriptNode()
s["p"] = GafferScene.Plane()
s["p"]["transform"]["translate"].setValue( IECore.V3f( -0.1, -0.1, 0 ) )
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 1 )
s["l"] = GafferRenderMan.RenderManLight()
s["l"].loadShader( "ambientlight" )
s["g"] = GafferScene.Group()
s["g"]["in"].setInput( s["p"]["out"] )
s["g"]["in1"].setInput( s["c"]["out"] )
s["g"]["in2"].setInput( s["l"]["out"] )
s["s"] = GafferRenderMan.RenderManShader()
s["s"].loadShader( "checker" )
s["s"]["parameters"]["blackcolor"].setValue( IECore.Color3f( 1, 0.5, 0.25 ) )
s["s"]["parameters"]["Ka"].setValue( 1 )
s["s"]["parameters"]["frequency"].setValue( 1 )
s["a"] = GafferScene.ShaderAssignment()
s["a"]["in"].setInput( s["g"]["out"] )
s["a"]["shader"].setInput( s["s"]["out"] )
s["d"] = GafferScene.Displays()
s["d"].addDisplay(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"quantize" : IECore.FloatVectorData( [ 0, 0, 0, 0 ] ),
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelyPlane",
}
)
)
s["d"]["in"].setInput( s["a"]["out"] )
s["o"] = GafferScene.StandardOptions()
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["in"].setInput( s["d"]["out"] )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
# start a render, give it time to finish, and check the output
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c, IECore.Color3f( 1, 0.5, 0.25 ) )
# adjust a shader parameter, wait, and check that it changed
s["s"]["parameters"]["blackcolor"].setValue( IECore.Color3f( 1, 1, 1 ) )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c, IECore.Color3f( 1 ) )
# turn off shader updates, do the same, and check that it hasn't changed
s["r"]["updateShaders"].setValue( False )
s["s"]["parameters"]["blackcolor"].setValue( IECore.Color3f( 0.5 ) )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c, IECore.Color3f( 1 ) )
# turn shader updates back on, and check that it updates
s["r"]["updateShaders"].setValue( True )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c, IECore.Color3f( 0.5 ) )
def testScopesDontLeak( self ) :
s = Gaffer.ScriptNode()
s["p"] = GafferScene.Plane()
s["p"]["transform"]["translate"].setValue( IECore.V3f( -0.6, -0.1, 0 ) )
s["p1"] = GafferScene.Plane()
s["p1"]["transform"]["translate"].setValue( IECore.V3f( 0.6, 0.1, 0 ) )
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 2 )
s["l"] = GafferRenderMan.RenderManLight()
s["l"].loadShader( "ambientlight" )
s["g"] = GafferScene.Group()
s["g"]["in"].setInput( s["p"]["out"] )
s["g"]["in1"].setInput( s["p1"]["out"] )
s["g"]["in2"].setInput( s["c"]["out"] )
s["g"]["in3"].setInput( s["l"]["out"] )
s["s"] = GafferRenderMan.RenderManShader()
s["s"].loadShader( "checker" )
s["s"]["parameters"]["blackcolor"].setValue( IECore.Color3f( 1, 0, 0 ) )
s["s"]["parameters"]["Ka"].setValue( 1 )
s["s"]["parameters"]["frequency"].setValue( 1 )
s["f"] = GafferScene.PathFilter()
s["f"]["paths"].setValue( IECore.StringVectorData( [ "/group/plane" ] ) )
s["a"] = GafferScene.ShaderAssignment()
s["a"]["in"].setInput( s["g"]["out"] )
s["a"]["shader"].setInput( s["s"]["out"] )
s["a"]["filter"].setInput( s["f"]["match"] )
s["d"] = GafferScene.Displays()
s["d"].addDisplay(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"quantize" : IECore.FloatVectorData( [ 0, 0, 0, 0 ] ),
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelyPlanes",
}
)
)
s["d"]["in"].setInput( s["a"]["out"] )
s["o"] = GafferScene.StandardOptions()
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["options"]["renderResolution"]["value"].setValue( IECore.V2i( 512 ) )
s["o"]["options"]["renderResolution"]["enabled"].setValue( True )
s["o"]["in"].setInput( s["d"]["out"] )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
# start a render, give it time to finish, and check the output.
# we should have a red plane on the left, and a facing ratio
# shaded plane on the right, because we attached no shader to the
# second plane.
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlanes" ),
IECore.V2f( 0.25, 0.5 ),
)
self.assertEqual( c, IECore.Color3f( 1, 0, 0 ) )
c1 = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlanes" ),
IECore.V2f( 0.75, 0.5 ),
)
self.assertTrue( c1[0] > 0.9 )
self.assertEqual( c1[0], c1[1] )
self.assertEqual( c1[0], c1[2] )
# adjust a shader parameter, wait, and check that the plane
# on the left changed. check that the plane on the right didn't
# change at all.
s["s"]["parameters"]["blackcolor"].setValue( IECore.Color3f( 0, 1, 0 ) )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlanes" ),
IECore.V2f( 0.25, 0.5 ),
)
self.assertEqual( c, IECore.Color3f( 0, 1, 0 ) )
c1 = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlanes" ),
IECore.V2f( 0.75, 0.5 ),
)
self.assertTrue( c1[0] > 0.9 )
self.assertEqual( c1[0], c1[1] )
self.assertEqual( c1[0], c1[2] )
def testContext( self ):
s = Gaffer.ScriptNode()
r = GafferRenderMan.InteractiveRenderManRender()
self.assertNotEqual( r.getContext(), None )
self.failIf( r.getContext().isSame( s.context() ) )
s["r"] = r
self.failUnless( r.getContext().isSame( s.context() ) )
s.removeChild( r )
self.failIf( r.getContext().isSame( s.context() ) )
if __name__ == "__main__":
unittest.main()
|
|
"""Test config utils."""
# pylint: disable=too-many-public-methods,protected-access
import os
import unittest
import unittest.mock as mock
import pytest
from voluptuous import MultipleInvalid
from homeassistant.core import DOMAIN, HomeAssistantError, Config
import homeassistant.config as config_util
from homeassistant.const import (
CONF_LATITUDE, CONF_LONGITUDE, CONF_UNIT_SYSTEM, CONF_NAME,
CONF_TIME_ZONE, CONF_ELEVATION, CONF_CUSTOMIZE, __version__,
CONF_UNIT_SYSTEM_METRIC, CONF_UNIT_SYSTEM_IMPERIAL, CONF_TEMPERATURE_UNIT)
from homeassistant.util import location as location_util, dt as dt_util
from homeassistant.util.async import run_coroutine_threadsafe
from homeassistant.helpers.entity import Entity
from tests.common import (
get_test_config_dir, get_test_home_assistant)
CONFIG_DIR = get_test_config_dir()
YAML_PATH = os.path.join(CONFIG_DIR, config_util.YAML_CONFIG_FILE)
VERSION_PATH = os.path.join(CONFIG_DIR, config_util.VERSION_FILE)
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
def create_file(path):
"""Create an empty file."""
with open(path, 'w'):
pass
class TestConfig(unittest.TestCase):
"""Test the configutils."""
def setUp(self): # pylint: disable=invalid-name
"""Initialize a test Home Assistant instance."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Clean up."""
dt_util.DEFAULT_TIME_ZONE = ORIG_TIMEZONE
if os.path.isfile(YAML_PATH):
os.remove(YAML_PATH)
if os.path.isfile(VERSION_PATH):
os.remove(VERSION_PATH)
self.hass.stop()
def test_create_default_config(self):
"""Test creation of default config."""
config_util.create_default_config(CONFIG_DIR, False)
self.assertTrue(os.path.isfile(YAML_PATH))
def test_find_config_file_yaml(self):
"""Test if it finds a YAML config file."""
create_file(YAML_PATH)
self.assertEqual(YAML_PATH, config_util.find_config_file(CONFIG_DIR))
@mock.patch('builtins.print')
def test_ensure_config_exists_creates_config(self, mock_print):
"""Test that calling ensure_config_exists.
If not creates a new config file.
"""
config_util.ensure_config_exists(CONFIG_DIR, False)
self.assertTrue(os.path.isfile(YAML_PATH))
self.assertTrue(mock_print.called)
def test_ensure_config_exists_uses_existing_config(self):
"""Test that calling ensure_config_exists uses existing config."""
create_file(YAML_PATH)
config_util.ensure_config_exists(CONFIG_DIR, False)
with open(YAML_PATH) as f:
content = f.read()
# File created with create_file are empty
self.assertEqual('', content)
def test_load_yaml_config_converts_empty_files_to_dict(self):
"""Test that loading an empty file returns an empty dict."""
create_file(YAML_PATH)
self.assertIsInstance(
config_util.load_yaml_config_file(YAML_PATH), dict)
def test_load_yaml_config_raises_error_if_not_dict(self):
"""Test error raised when YAML file is not a dict."""
with open(YAML_PATH, 'w') as f:
f.write('5')
with self.assertRaises(HomeAssistantError):
config_util.load_yaml_config_file(YAML_PATH)
def test_load_yaml_config_raises_error_if_malformed_yaml(self):
"""Test error raised if invalid YAML."""
with open(YAML_PATH, 'w') as f:
f.write(':')
with self.assertRaises(HomeAssistantError):
config_util.load_yaml_config_file(YAML_PATH)
def test_load_yaml_config_raises_error_if_unsafe_yaml(self):
"""Test error raised if unsafe YAML."""
with open(YAML_PATH, 'w') as f:
f.write('hello: !!python/object/apply:os.system')
with self.assertRaises(HomeAssistantError):
config_util.load_yaml_config_file(YAML_PATH)
def test_load_yaml_config_preserves_key_order(self):
"""Test removal of library."""
with open(YAML_PATH, 'w') as f:
f.write('hello: 0\n')
f.write('world: 1\n')
self.assertEqual(
[('hello', 0), ('world', 1)],
list(config_util.load_yaml_config_file(YAML_PATH).items()))
@mock.patch('homeassistant.util.location.detect_location_info',
return_value=location_util.LocationInfo(
'0.0.0.0', 'US', 'United States', 'CA', 'California',
'San Diego', '92122', 'America/Los_Angeles', 32.8594,
-117.2073, True))
@mock.patch('homeassistant.util.location.elevation', return_value=101)
@mock.patch('builtins.print')
def test_create_default_config_detect_location(self, mock_detect,
mock_elev, mock_print):
"""Test that detect location sets the correct config keys."""
config_util.ensure_config_exists(CONFIG_DIR)
config = config_util.load_yaml_config_file(YAML_PATH)
self.assertIn(DOMAIN, config)
ha_conf = config[DOMAIN]
expected_values = {
CONF_LATITUDE: 32.8594,
CONF_LONGITUDE: -117.2073,
CONF_ELEVATION: 101,
CONF_UNIT_SYSTEM: CONF_UNIT_SYSTEM_METRIC,
CONF_NAME: 'Home',
CONF_TIME_ZONE: 'America/Los_Angeles'
}
assert expected_values == ha_conf
assert mock_print.called
@mock.patch('builtins.print')
def test_create_default_config_returns_none_if_write_error(self,
mock_print):
"""Test the writing of a default configuration.
Non existing folder returns None.
"""
self.assertIsNone(
config_util.create_default_config(
os.path.join(CONFIG_DIR, 'non_existing_dir/'), False))
self.assertTrue(mock_print.called)
def test_core_config_schema(self):
"""Test core config schema."""
for value in (
{CONF_UNIT_SYSTEM: 'K'},
{'time_zone': 'non-exist'},
{'latitude': '91'},
{'longitude': -181},
{'customize': 'bla'},
{'customize': {'invalid_entity_id': {}}},
{'customize': {'light.sensor': 100}},
):
with pytest.raises(MultipleInvalid):
config_util.CORE_CONFIG_SCHEMA(value)
config_util.CORE_CONFIG_SCHEMA({
'name': 'Test name',
'latitude': '-23.45',
'longitude': '123.45',
CONF_UNIT_SYSTEM: CONF_UNIT_SYSTEM_METRIC,
'customize': {
'sensor.temperature': {
'hidden': True,
},
},
})
def test_entity_customization(self):
"""Test entity customization through configuration."""
config = {CONF_LATITUDE: 50,
CONF_LONGITUDE: 50,
CONF_NAME: 'Test',
CONF_CUSTOMIZE: {'test.test': {'hidden': True}}}
run_coroutine_threadsafe(
config_util.async_process_ha_core_config(self.hass, config),
self.hass.loop).result()
entity = Entity()
entity.entity_id = 'test.test'
entity.hass = self.hass
entity.update_ha_state()
self.hass.block_till_done()
state = self.hass.states.get('test.test')
assert state.attributes['hidden']
@mock.patch('homeassistant.config.shutil')
@mock.patch('homeassistant.config.os')
def test_remove_lib_on_upgrade(self, mock_os, mock_shutil):
"""Test removal of library on upgrade."""
ha_version = '0.7.0'
mock_os.path.isdir = mock.Mock(return_value=True)
mock_open = mock.mock_open()
with mock.patch('homeassistant.config.open', mock_open, create=True):
opened_file = mock_open.return_value
opened_file.readline.return_value = ha_version
self.hass.config.path = mock.Mock()
config_util.process_ha_config_upgrade(self.hass)
hass_path = self.hass.config.path.return_value
self.assertEqual(mock_os.path.isdir.call_count, 1)
self.assertEqual(
mock_os.path.isdir.call_args, mock.call(hass_path)
)
self.assertEqual(mock_shutil.rmtree.call_count, 1)
self.assertEqual(
mock_shutil.rmtree.call_args, mock.call(hass_path)
)
@mock.patch('homeassistant.config.shutil')
@mock.patch('homeassistant.config.os')
def test_not_remove_lib_if_not_upgrade(self, mock_os, mock_shutil):
"""Test removal of library with no upgrade."""
ha_version = __version__
mock_os.path.isdir = mock.Mock(return_value=True)
mock_open = mock.mock_open()
with mock.patch('homeassistant.config.open', mock_open, create=True):
opened_file = mock_open.return_value
opened_file.readline.return_value = ha_version
self.hass.config.path = mock.Mock()
config_util.process_ha_config_upgrade(self.hass)
assert mock_os.path.isdir.call_count == 0
assert mock_shutil.rmtree.call_count == 0
def test_loading_configuration(self):
"""Test loading core config onto hass object."""
self.hass.config = mock.Mock()
run_coroutine_threadsafe(
config_util.async_process_ha_core_config(self.hass, {
'latitude': 60,
'longitude': 50,
'elevation': 25,
'name': 'Huis',
CONF_UNIT_SYSTEM: CONF_UNIT_SYSTEM_IMPERIAL,
'time_zone': 'America/New_York',
}), self.hass.loop).result()
assert self.hass.config.latitude == 60
assert self.hass.config.longitude == 50
assert self.hass.config.elevation == 25
assert self.hass.config.location_name == 'Huis'
assert self.hass.config.units.name == CONF_UNIT_SYSTEM_IMPERIAL
assert self.hass.config.time_zone.zone == 'America/New_York'
def test_loading_configuration_temperature_unit(self):
"""Test backward compatibility when loading core config."""
self.hass.config = mock.Mock()
run_coroutine_threadsafe(
config_util.async_process_ha_core_config(self.hass, {
'latitude': 60,
'longitude': 50,
'elevation': 25,
'name': 'Huis',
CONF_TEMPERATURE_UNIT: 'C',
'time_zone': 'America/New_York',
}), self.hass.loop).result()
assert self.hass.config.latitude == 60
assert self.hass.config.longitude == 50
assert self.hass.config.elevation == 25
assert self.hass.config.location_name == 'Huis'
assert self.hass.config.units.name == CONF_UNIT_SYSTEM_METRIC
assert self.hass.config.time_zone.zone == 'America/New_York'
@mock.patch('homeassistant.util.location.detect_location_info',
autospec=True, return_value=location_util.LocationInfo(
'0.0.0.0', 'US', 'United States', 'CA', 'California',
'San Diego', '92122', 'America/Los_Angeles', 32.8594,
-117.2073, True))
@mock.patch('homeassistant.util.location.elevation',
autospec=True, return_value=101)
def test_discovering_configuration(self, mock_detect, mock_elevation):
"""Test auto discovery for missing core configs."""
self.hass.config.latitude = None
self.hass.config.longitude = None
self.hass.config.elevation = None
self.hass.config.location_name = None
self.hass.config.time_zone = None
run_coroutine_threadsafe(
config_util.async_process_ha_core_config(
self.hass, {}), self.hass.loop
).result()
assert self.hass.config.latitude == 32.8594
assert self.hass.config.longitude == -117.2073
assert self.hass.config.elevation == 101
assert self.hass.config.location_name == 'San Diego'
assert self.hass.config.units.name == CONF_UNIT_SYSTEM_METRIC
assert self.hass.config.units.is_metric
assert self.hass.config.time_zone.zone == 'America/Los_Angeles'
@mock.patch('homeassistant.util.location.detect_location_info',
autospec=True, return_value=None)
@mock.patch('homeassistant.util.location.elevation', return_value=0)
def test_discovering_configuration_auto_detect_fails(self, mock_detect,
mock_elevation):
"""Test config remains unchanged if discovery fails."""
self.hass.config = Config()
run_coroutine_threadsafe(
config_util.async_process_ha_core_config(
self.hass, {}), self.hass.loop
).result()
blankConfig = Config()
assert self.hass.config.latitude == blankConfig.latitude
assert self.hass.config.longitude == blankConfig.longitude
assert self.hass.config.elevation == blankConfig.elevation
assert self.hass.config.location_name == blankConfig.location_name
assert self.hass.config.units == blankConfig.units
assert self.hass.config.time_zone == blankConfig.time_zone
|
|
# -*- coding: utf-8 -*-
import base64
from io import BytesIO
import logging
from urllib.parse import urlparse, urljoin
from lxml.html import HtmlElement
from PIL import Image as ImageLib, ImageFile
from scrapy.http import Request
from scrapy.pipelines.media import MediaPipeline
from mydm.util import is_url
logger = logging.getLogger(__name__)
ImageFile.LOAD_TRUNCATED_IMAGES = True
class Image:
MAX_WIDTH = 1024
def __init__(self, data, type=None):
self._image = ImageLib.open(BytesIO(data))
if self._image.format.upper() == 'PNG':
buffer = BytesIO()
self._image.save(buffer, format='WebP')
self._image.close()
self._image = ImageLib.open(buffer)
@property
def size(self):
return self._image.size
@property
def type(self):
return self._image.format
def optimize(self, quality=75):
image = self._image
width, height = image.size
if width > self.MAX_WIDTH:
ratio = float(height) / float(width)
width = self.MAX_WIDTH
height = int(width * ratio)
image = image.resize(
(width, height),
ImageLib.ANTIALIAS
)
buffer = BytesIO()
image.save(
buffer,
format=self.type,
quality=quality,
)
return buffer.getvalue()
class ImagesDlownloadPipeline(MediaPipeline):
MEDIA_NAME = 'image'
MAX_SIZE = 1024*256
def __init__(self, settings):
super().__init__(settings=settings)
self._category_filter = settings['IMAGE_OPTIMIZE_CATEGORY_FILTER']
self._invalid_img_element = [] # invalid img element list per item
@classmethod
def from_crawler(cls, crawler):
settings = crawler.settings
pipe = cls(settings)
pipe.crawler = crawler
return pipe
@property
def spider(self):
return self.spiderinfo.spider
@property
def spider_name(self):
return self.spiderinfo.spider.name
@property
def spider_category(self):
return self.spiderinfo.spider.category
def need_optimize(self, size):
if self.spider_category in self._category_filter:
return False
if size < self.MAX_SIZE:
return False
return True
def get_media_requests(self, item, info):
self._invalid_img_element = []
doc = item['content']
assert isinstance(doc, HtmlElement)
attrs = {'src'}
img_attr = getattr(
self.spider,
'image_url_attr',
None,
)
if isinstance(img_attr, (list, tuple)):
attrs = attrs.union(img_attr)
elif img_attr:
attrs.add(img_attr)
urls = []
for e in doc.xpath('//img'):
def format_url(url, item):
url = url.strip('\r\n\t ')
if url.startswith('//'):
scheme = urlparse(item['link']).scheme
url = f'{scheme}:{url}'
elif url.startswith('/'):
url = urljoin(item['link'], url)
return url
if 'srcset' in e.attrib:
srcset = e.get('srcset')
url = srcset.split(',')[0].split(' ')[0]
url = format_url(url, item)
if is_url(url):
urls.append((url, e))
e.attrib.pop('srcset')
continue
for attr in attrs:
if attr not in e.attrib:
continue
url = e.get(attr)
url = format_url(url, item)
if not is_url(url):
continue
else:
urls.append((url, e))
break
else:
logger.error(
"spider[%s] can't find image link attribute",
self.spider_name
)
self._invalid_img_element.append(e)
requests = []
for url, e in urls:
if url.startswith('data'):
continue
try:
request = Request(url, meta={'image_xpath_node': e})
except ValueError:
logger.error(
'spider[%s] got invalid url[%s]',
self.spider_name,
url
)
else:
requests.append(request)
return requests
def media_failed(self, failure, request, info):
logger.error(
'spider[%s] download image[%s] failed',
self.spider_name,
request.url
)
def media_downloaded(self, response, request, info):
if not response.body:
logger.error(
'spider[%s] got size 0 image[%s]',
self.spider_name,
request.url
)
self._invalid_img_element.append(
response.meta['image_xpath_node']
)
return
image_xpath_node = response.meta['image_xpath_node']
src = response.url
data = response.body
image_size = len(data)
try:
image_type = response.headers['Content-Type'].split('/')[-1]
except Exception:
image_type = src.split('?')[0].split('.')[-1]
image_type = image_type.upper()
try:
image = Image(data, type=image_type)
except (OSError, IOError) as e:
logger.error(
'spider[%s] PILLOW open image[%s, %s] failed[%s]',
self.spider_name,
src,
image_type,
e
)
else:
if self.spider_category in self._category_filter:
width, _ = image.size
factor = 1
while True:
new_width = width // factor
if new_width <= 800:
width = new_width
break
factor = factor + 1
image_xpath_node.set('width', f'{width}px')
elif self.need_optimize(image_size):
data = image.optimize()
image_type = image.type.upper()
image_xpath_node.set('source', src)
data = base64.b64encode(data).decode('ascii')
if image_type == 'SVG':
type = 'SVG+xml'
else:
type = image_type
image_xpath_node.set(
'src',
f'data:image/{type};base64,{data}'
)
def item_completed(self, results, item, info):
for e in self._invalid_img_element:
e.drop_tree()
self._invalid_img_element = []
return item
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loader implementation for SavedModel with hermetic, language-neutral exports.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf import message
from google.protobuf import text_format
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import utils_impl as saved_model_utils
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
def _parse_saved_model(export_dir):
"""Reads the savedmodel.pb or savedmodel.pbtxt file containing `SavedModel`.
Args:
export_dir: Directory containing the SavedModel file.
Returns:
A `SavedModel` protocol buffer.
Raises:
IOError: If the file does not exist, or cannot be successfully parsed.
"""
# Build the path to the SavedModel in pbtxt format.
path_to_pbtxt = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))
# Build the path to the SavedModel in pb format.
path_to_pb = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
# Parse the SavedModel protocol buffer.
saved_model = saved_model_pb2.SavedModel()
if file_io.file_exists(path_to_pb):
try:
file_content = file_io.FileIO(path_to_pb, "rb").read()
saved_model.ParseFromString(file_content)
return saved_model
except message.DecodeError as e:
raise IOError("Cannot parse file %s: %s." % (path_to_pb, str(e)))
elif file_io.file_exists(path_to_pbtxt):
try:
file_content = file_io.FileIO(path_to_pbtxt, "rb").read()
text_format.Merge(file_content.decode("utf-8"), saved_model)
return saved_model
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (path_to_pbtxt, str(e)))
else:
raise IOError("SavedModel file does not exist at: %s/{%s|%s}" %
(export_dir,
constants.SAVED_MODEL_FILENAME_PBTXT,
constants.SAVED_MODEL_FILENAME_PB))
def _get_asset_tensors(export_dir, meta_graph_def_to_load, import_scope=None):
"""Gets the asset tensors, if defined in the meta graph def to load.
Args:
export_dir: Directory where the SavedModel is located.
meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded.
import_scope: Optional `string` -- if specified, prepend this followed by
'/' to all returned asset tensor names.
Returns:
A dictionary of asset tensors, keyed by the name of the asset tensor. The
value in the map corresponds to the absolute path of the asset file.
"""
# Collection-def that may contain the assets key.
collection_def = meta_graph_def_to_load.collection_def
asset_tensor_dict = {}
if constants.ASSETS_KEY in collection_def:
# Location of the assets for SavedModel.
assets_directory = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY))
assets_any_proto = collection_def[constants.ASSETS_KEY].any_list.value
# Process each asset and add it to the asset tensor dictionary.
for asset_any_proto in assets_any_proto:
asset_proto = meta_graph_pb2.AssetFileDef()
asset_any_proto.Unpack(asset_proto)
tensor_name = asset_proto.tensor_info.name
if import_scope:
tensor_name = "%s/%s" % (import_scope, tensor_name)
asset_tensor_dict[tensor_name] = os.path.join(
compat.as_bytes(assets_directory),
compat.as_bytes(asset_proto.filename))
return asset_tensor_dict
def _get_main_op_tensor(
meta_graph_def_to_load, init_op_key=constants.MAIN_OP_KEY):
"""Gets the main op tensor, if one exists.
Args:
meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded.
init_op_key: name of collection to check; should be one of MAIN_OP_KEY
or the deprecated LEGACY_INIT_OP_KEY
Returns:
The main op tensor, if it exists and `None` otherwise.
Raises:
RuntimeError: If the collection def corresponding to the main op key has
other than exactly one tensor.
"""
collection_def = meta_graph_def_to_load.collection_def
main_op_tensor = None
if init_op_key in collection_def:
main_ops = collection_def[init_op_key].node_list.value
if len(main_ops) != 1:
raise RuntimeError("Expected exactly one SavedModel main op. "
"Found: {}".format(main_ops))
main_op_tensor = ops.get_collection(init_op_key)[0]
return main_op_tensor
@tf_export(v1=[
"saved_model.contains_saved_model",
"saved_model.maybe_saved_model_directory",
"saved_model.loader.maybe_saved_model_directory"
])
@deprecation.deprecated_endpoints(
"saved_model.loader.maybe_saved_model_directory")
def maybe_saved_model_directory(export_dir):
"""Checks whether the provided export directory could contain a SavedModel.
Note that the method does not load any data by itself. If the method returns
`false`, the export directory definitely does not contain a SavedModel. If the
method returns `true`, the export directory may contain a SavedModel but
provides no guarantee that it can be loaded.
Args:
export_dir: Absolute string path to possible export location. For example,
'/my/foo/model'.
Returns:
True if the export directory contains SavedModel files, False otherwise.
"""
txt_path = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PBTXT)
pb_path = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PB)
return file_io.file_exists(txt_path) or file_io.file_exists(pb_path)
@tf_export("saved_model.contains_saved_model", v1=[])
def contains_saved_model(export_dir):
"""Checks whether the provided export directory could contain a SavedModel.
Note that the method does not load any data by itself. If the method returns
`false`, the export directory definitely does not contain a SavedModel. If the
method returns `true`, the export directory may contain a SavedModel but
provides no guarantee that it can be loaded.
Args:
export_dir: Absolute string path to possible export location. For example,
'/my/foo/model'.
Returns:
True if the export directory contains SavedModel files, False otherwise.
"""
return maybe_saved_model_directory(export_dir)
@tf_export(v1=["saved_model.load", "saved_model.loader.load"])
@deprecation.deprecated(
None,
"This function will only be available through the v1 compatibility "
"library as tf.compat.v1.saved_model.loader.load or "
"tf.compat.v1.saved_model.load. There will be a new function for importing "
"SavedModels in Tensorflow 2.0.")
def load(sess, tags, export_dir, import_scope=None, **saver_kwargs):
"""Loads the model from a SavedModel as specified by tags.
Args:
sess: The TensorFlow session to restore the variables.
tags: Set of string tags to identify the required MetaGraphDef. These should
correspond to the tags used when saving the variables using the
SavedModel `save()` API.
export_dir: Directory in which the SavedModel protocol buffer and variables
to be loaded are located.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
**saver_kwargs: Optional keyword arguments passed through to Saver.
Returns:
The `MetaGraphDef` protocol buffer loaded in the provided session. This
can be used to further extract signature-defs, collection-defs, etc.
Raises:
RuntimeError: MetaGraphDef associated with the tags cannot be found.
"""
loader = SavedModelLoader(export_dir)
return loader.load(sess, tags, import_scope, **saver_kwargs)
class SavedModelLoader(object):
"""Load graphs and restore variable values from a `SavedModel`."""
def __init__(self, export_dir):
"""Creates a `SavedModelLoader`.
Args:
export_dir: Directory in which the SavedModel protocol buffer and
variables to be loaded are located.
"""
self._export_dir = export_dir
self._variables_path = saved_model_utils.get_variables_path(export_dir)
self._saved_model = _parse_saved_model(export_dir)
@property
def export_dir(self):
"""Directory containing the SavedModel."""
return self._export_dir
@property
def variables_path(self):
"""Path to variable checkpoint files."""
return self._variables_path
@property
def saved_model(self):
"""SavedModel object parsed from the export directory."""
return self._saved_model
def get_meta_graph_def_from_tags(self, tags):
"""Return MetaGraphDef with the exact specified tags.
Args:
tags: A list or set of string tags that identify the MetaGraphDef.
Returns:
MetaGraphDef with the same tags.
Raises:
RuntimeError: if no metagraphs were found with the associated tags.
"""
found_match = False
for meta_graph_def in self._saved_model.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set(tags):
meta_graph_def_to_load = meta_graph_def
found_match = True
break
if not found_match:
raise RuntimeError(
"MetaGraphDef associated with tags " + str(tags).strip("[]") +
" could not be found in SavedModel. To inspect available tag-sets in"
" the SavedModel, please use the SavedModel CLI: `saved_model_cli`"
)
return meta_graph_def_to_load
def load_graph(self, graph, tags, import_scope=None, **saver_kwargs):
"""Load ops and nodes from SavedModel MetaGraph into graph.
Args:
graph: tf.Graph object.
tags: a set of string tags identifying a MetaGraphDef.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
**saver_kwargs: keyword arguments to pass to tf.train.import_meta_graph.
Returns:
A tuple of
* Saver defined by the MetaGraph, which can be used to restore the
variable values.
* List of `Operation`/`Tensor` objects returned from
`tf.import_graph_def` (may be `None`).
"""
meta_graph_def = self.get_meta_graph_def_from_tags(tags)
with graph.as_default():
return tf_saver._import_meta_graph_with_return_elements( # pylint: disable=protected-access
meta_graph_def, import_scope=import_scope, **saver_kwargs)
def restore_variables(self, sess, saver, import_scope=None):
"""Restore SavedModel variable values into the session.
Args:
sess: tf.Session to restore variable values.
saver: a tf.train.Saver object. Can be None if there are no variables in
graph. This may be the saver returned by the load_graph() function, or a
default `tf.train.Saver()`.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
Raises:
ValueError: if no saver was passed to the saver argument, and there are
variables in the graph.
"""
with sess.graph.as_default():
if (saver is None and
not variables._all_saveable_objects(scope=import_scope)): # pylint: disable=protected-access
tf_logging.info("The specified SavedModel has no variables; no "
"checkpoints were restored.")
elif isinstance(saver, tf_saver.Saver):
saver.restore(sess, self._variables_path)
else:
raise ValueError(
"No tf.train.Saver object was passed to the function "
"SavedModelLoader.restore_variables. Since there are variables in "
"the graph, a saver is required.")
def run_init_ops(self, sess, tags, import_scope=None):
"""Run initialization ops defined in the `MetaGraphDef`.
Args:
sess: tf.Session to restore variable values.
tags: a set of string tags identifying a MetaGraphDef.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
"""
meta_graph_def = self.get_meta_graph_def_from_tags(tags)
with sess.graph.as_default():
# Get asset tensors, if any.
asset_tensors_dictionary = _get_asset_tensors(
self._export_dir, meta_graph_def, import_scope=import_scope)
main_op_tensor = (
_get_main_op_tensor(meta_graph_def, constants.MAIN_OP_KEY) or
_get_main_op_tensor(meta_graph_def, constants.LEGACY_INIT_OP_KEY))
if main_op_tensor is not None:
sess.run(fetches=[main_op_tensor], feed_dict=asset_tensors_dictionary)
def load(self, sess, tags, import_scope=None, **saver_kwargs):
"""Load the MetaGraphDef graph and restore variable values into the session.
Args:
sess: tf.Session to restore variable values.
tags: a set of string tags identifying a MetaGraphDef.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
**saver_kwargs: keyword arguments to pass to tf.train.import_meta_graph.
Returns:
`MetagraphDef` proto of the graph that was loaded.
"""
with sess.graph.as_default():
saver, _ = self.load_graph(sess.graph, tags, import_scope,
**saver_kwargs)
self.restore_variables(sess, saver, import_scope)
self.run_init_ops(sess, tags, import_scope)
return self.get_meta_graph_def_from_tags(tags)
|
|
import pickle
import theano
import theano.tensor as T
from pylearn2.models import mlp
from pylearn2.training_algorithms import sgd
from pylearn2.training_algorithms.learning_rule import AdaGrad
from pylearn2.training_algorithms.learning_rule import Momentum
from pylearn2.training_algorithms.sgd import ExponentialDecay
from pylearn2.training_algorithms.sgd import LinearDecay
from pylearn2.training_algorithms.learning_rule import AdaDelta
from pylearn2.termination_criteria import EpochCounter
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.costs.cost import Cost
from pylearn2.costs.cost import DefaultDataSpecsMixin
import numpy as np
from random import randint
import os
import matplotlib.pyplot as plt
from features import logfbank
class XOR(DenseDesignMatrix):
def __init__(self):
self.class_names = ['0', '1']
X = [[randint(0, 1), randint(0, 1)] for _ in range(1000)]
y = []
for a, b in X:
if a + b == 1:
y.append([0, 1])
else:
y.append([1, 0])
X = np.array(X)
y = np.array(y)
super(XOR, self).__init__(X=X, y=y)
class NegativeLogLikelihoodCost(DefaultDataSpecsMixin, Cost):
supervised = True
def expr(self, model, data, **kwargs):
space, source = self.get_data_specs(model)
space.validate(data)
inputs, targets = data
outputs = model.fprop(inputs)
loss = -(targets * T.log(outputs)).sum(axis=1)
return loss.mean()
left_context = 10
right_context = 5
keywords = ["she", "had"]
def getDataForFrames(data, X, y):
fbanks = data[0]
segments = data[1]
feat_cnt = len(fbanks[0])
for start, end, class_id in segments:
if randint(0, 1) == 0:
for i in xrange(start, end):
features = np.empty(0)
for j in xrange(-left_context, right_context):
if i + j < 0 or i + j >= len(fbanks):
features = np.append(features, np.zeros(feat_cnt))
else:
features = np.append(features, fbanks[i + j])
result = np.zeros(len(keywords) + 1)
result[class_id] = 1.0
X.append(features)
y.append(result)
if randint(0, 1) == 0:
for i in xrange(start, end):
features = np.empty(0)
for j in xrange(-left_context, right_context):
if i + j < start or i + j >= end:
features = np.append(features, np.zeros(feat_cnt))
else:
features = np.append(features, fbanks[i + j])
result = np.zeros(len(keywords) + 1)
result[class_id] = 1.0
X.append(features)
y.append(result)
def getWindowedFeats(fbanks):
X = []
feat_cnt = len(fbanks[0])
for i in xrange(len(fbanks)):
features = np.empty(0)
for j in xrange(-left_context, right_context):
if i + j < 0 or i + j >= len(fbanks):
features = np.append(features, np.zeros(feat_cnt))
else:
features = np.append(features, fbanks[i + j])
X.append(features)
return X
def getDataFromPath(path, maxFiles = 1e9):
X = []
y = []
for file in os.listdir(path):
maxFiles -= 1
if maxFiles < 0:
break
data = np.load(path + "/" + file)
getDataForFrames(data, X, y)
X = np.array(X)
y = np.array(y)
return X, y
class SHEHAD(DenseDesignMatrix):
feat_cnt = 0
def __init__(self, path):
self.class_names = ["she", "had", "filler"]
X, y = getDataFromPath(path)
# print X.shape
self.feat_cnt = len(X[0])
super(SHEHAD, self).__init__(X=X, y=y)
def test(model):
X, y = getDataFromPath("/Users/evgeny/data/TEST")
confusion = np.zeros([3, 3])
ypred = np.log(model.fprop(theano.shared(np.array(X), name='inputs')).eval())
cnt = np.zeros(3)
for a, b in zip(ypred, y):
pos = np.argmax(b)
i = np.argmax(a)
#for i in xrange(3):
confusion[pos][i] += 1
# cnt[pos] += 1
for i in xrange(3):
for j in xrange(3):
print "%.0f" % confusion[i][j],
print # confusion[i][j] /= cnt[i]
print confusion
# create hidden layer with 2 nodes, init weights in range -0.1 to 0.1 and add
# a bias with value 1
rng = 0.001
modelName = "2x128relu50epochs-v7-momentum.mdl"
debug = True
if debug or not os.path.exists(modelName):
ds = SHEHAD("/Users/evgeny/data/TRAIN")
vds = SHEHAD("/Users/evgeny/data/TEST")
hidden_layer = mlp.RectifiedLinear(layer_name='hidden', dim=128, irange=0.001, init_bias=0)
hidden_layer2 = mlp.RectifiedLinear(layer_name='hidden2', dim=128, irange=0.01, init_bias=0)
hidden_layer3 = mlp.RectifiedLinear(layer_name='hidden3', dim=128, irange=0.01, init_bias=0)
# create Softmax output layer
output_layer = mlp.Softmax(3, 'output', irange=.1)
# create Stochastic Gradient Descent trainer that runs for 400 epochs
cost = NegativeLogLikelihoodCost()
rule = Momentum(0.9)
# rule = Momentum(0.9, True)
# update_callbacks=ExponentialDecay(1 + 1e-5, 0.001)
trainer = sgd.SGD(learning_rate=0.01, cost=cost, batch_size=128, termination_criterion=EpochCounter(1000),
monitoring_dataset=vds, learning_rule=rule)
layers = [hidden_layer, hidden_layer2, output_layer]
# create neural net that takes two inputs
ann = mlp.MLP(layers, nvis=ds.feat_cnt)
trainer.setup(ann, ds)
print trainer.cost
# train neural net until the termination criterion is true
iteration = 0
while True:
trainer.train(dataset=ds)
ann.monitor.report_epoch()
ann.monitor()
if iteration % 10 == 0:
if not debug:
with open(modelName, 'wb') as f:
pickle.dump(ann, f)
if not trainer.continue_learning(ann):
break
iteration += 1
if not debug:
with open(modelName, 'wb') as f:
pickle.dump(ann, f)
else:
with open(modelName) as f:
ann = pickle.load(f)
#test(ann)
#exit(0)
window = 0.025
step = 0.01
nfilt = 40
fftsize = 512
def extractLogFBank(rate, sig):
feats = logfbank(sig, rate, window, step, nfilt, fftsize, 0, None, 0)
return feats
#sph2pipe = "/Users/evgeny/kaldi3/tools/sph2pipe_v2.5/sph2pipe"
#os.system(sph2pipe + " -f wav " + "SA1.WAV" + " tmp.wav")
def computeFile(model, path):
# import extractFeats
import scipy.io.wavfile as wav
(rate, sig) = wav.read(path)
fbanks = extractLogFBank(rate, sig)
X = getWindowedFeats(fbanks)
ypred = np.log(model.fprop(theano.shared(np.array(X), name='inputs')).eval())
ypred = np.transpose(ypred)
ig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True)
ax1.plot(ypred[0])
ax2.plot(ypred[1])
ax3.plot(ypred[2])
plt.show()
computeFile(ann, "tmp2.wav")
|
|
"""
Color markups
Contribution, Griatch 2017
Additional color markup styles for Evennia (extending or replacing the default |r, |234 etc).
Installation:
Import the desired style variables from this module into mygame/server/conf/settings.py and add them
to these settings variables. Each are specified as a list, and multiple such lists can be added to
each variable to support multiple formats. Note that list order affects which regexes are applied
first. You must restart both Portal and Server for color tags to update.
Assign to the following settings variables:
COLOR_ANSI_EXTRA_MAP - a mapping between regexes and ANSI colors
COLOR_XTERM256_EXTRA_FG - regex for defining XTERM256 foreground colors
COLOR_XTERM256_EXTRA_BG - regex for defining XTERM256 background colors
COLOR_XTERM256_EXTRA_GFG - regex for defining XTERM256 grayscale foreground colors
COLOR_XTERM256_EXTRA_GBG - regex for defining XTERM256 grayscale background colors
COLOR_ANSI_BRIGHT_BG_EXTRA_MAP = ANSI does not support bright backgrounds; we fake
this by mapping ANSI markup to matching bright XTERM256 backgrounds
COLOR_NO_DEFAULT - Set True/False. If False (default), extend the default markup, otherwise
replace it completely.
To add the {- "curly-bracket" style, add the following to your settings file, then reboot both
Server and Portal:
from evennia.contrib import color_markups
COLOR_ANSI_EXTRA_MAP = color_markups.CURLY_COLOR_ANSI_EXTRA_MAP
COLOR_XTERM256_EXTRA_FG = color_markups.CURLY_COLOR_XTERM256_EXTRA_FG
COLOR_XTERM256_EXTRA_BG = color_markups.CURLY_COLOR_XTERM256_EXTRA_BG
COLOR_XTERM256_EXTRA_GFG = color_markups.CURLY_COLOR_XTERM256_EXTRA_GFG
COLOR_XTERM256_EXTRA_GBG = color_markups.CURLY_COLOR_XTERM256_EXTRA_GBG
COLOR_ANSI_BRIGHT_BG_EXTRA_MAP = color_markups.CURLY_COLOR_ANSI_BRIGHT_BG_EXTRA_MAP
To add the %c- "mux/mush" style, add the following to your settings file, then reboot both Server
and Portal:
from evennia.contrib import color_markups
COLOR_ANSI_EXTRA_MAP = color_markups.MUX_COLOR_ANSI_EXTRA_MAP
COLOR_XTERM256_EXTRA_FG = color_markups.MUX_COLOR_XTERM256_EXTRA_FG
COLOR_XTERM256_EXTRA_BG = color_markups.MUX_COLOR_XTERM256_EXTRA_BG
COLOR_XTERM256_EXTRA_GFG = color_markups.MUX_COLOR_XTERM256_EXTRA_GFG
COLOR_XTERM256_EXTRA_GBG = color_markups.MUX_COLOR_XTERM256_EXTRA_GBG
COLOR_ANSI_BRIGHT_BGS_EXTRA_MAP = color_markups.CURLY_COLOR_ANSI_BRIGHT_BGS_EXTRA_MAP
"""
# ANSI constants (copied from evennia.utils.ansi to avoid import)
_ANSI_BEEP = "\07"
_ANSI_ESCAPE = "\033"
_ANSI_NORMAL = "\033[0m"
_ANSI_UNDERLINE = "\033[4m"
_ANSI_HILITE = "\033[1m"
_ANSI_UNHILITE = "\033[22m"
_ANSI_BLINK = "\033[5m"
_ANSI_INVERSE = "\033[7m"
_ANSI_INV_HILITE = "\033[1;7m"
_ANSI_INV_BLINK = "\033[7;5m"
_ANSI_BLINK_HILITE = "\033[1;5m"
_ANSI_INV_BLINK_HILITE = "\033[1;5;7m"
# Foreground colors
_ANSI_BLACK = "\033[30m"
_ANSI_RED = "\033[31m"
_ANSI_GREEN = "\033[32m"
_ANSI_YELLOW = "\033[33m"
_ANSI_BLUE = "\033[34m"
_ANSI_MAGENTA = "\033[35m"
_ANSI_CYAN = "\033[36m"
_ANSI_WHITE = "\033[37m"
# Background colors
_ANSI_BACK_BLACK = "\033[40m"
_ANSI_BACK_RED = "\033[41m"
_ANSI_BACK_GREEN = "\033[42m"
_ANSI_BACK_YELLOW = "\033[43m"
_ANSI_BACK_BLUE = "\033[44m"
_ANSI_BACK_MAGENTA = "\033[45m"
_ANSI_BACK_CYAN = "\033[46m"
_ANSI_BACK_WHITE = "\033[47m"
# Formatting Characters
_ANSI_RETURN = "\r\n"
_ANSI_TAB = "\t"
_ANSI_SPACE = " "
#############################################################
#
# {- style MUD markup (old Evennia default). This is
# basically identical to the default |-style except using
# a curly bracket instead. This was removed because {}
# are used in Python string formatting.
#
# {r, {R - bright/dark red foreground
# {[r, {[R - bright/dark red background
# {500, {[500 - XTERM256 red foreground/background
# {=w, {[=w - XTERM256 greyscale foreground/background
#
#############################################################
CURLY_COLOR_ANSI_EXTRA_MAP = [
(r'{n', _ANSI_NORMAL), # reset
(r'{/', _ANSI_RETURN), # line break
(r'{-', _ANSI_TAB), # tab
(r'{_', _ANSI_SPACE), # space
(r'{*', _ANSI_INVERSE), # invert
(r'{^', _ANSI_BLINK), # blinking text (very annoying and not supported by all clients)
(r'{u', _ANSI_UNDERLINE), # underline
(r'{r', _ANSI_HILITE + _ANSI_RED),
(r'{g', _ANSI_HILITE + _ANSI_GREEN),
(r'{y', _ANSI_HILITE + _ANSI_YELLOW),
(r'{b', _ANSI_HILITE + _ANSI_BLUE),
(r'{m', _ANSI_HILITE + _ANSI_MAGENTA),
(r'{c', _ANSI_HILITE + _ANSI_CYAN),
(r'{w', _ANSI_HILITE + _ANSI_WHITE), # pure white
(r'{x', _ANSI_HILITE + _ANSI_BLACK), # dark grey
(r'{R', _ANSI_HILITE + _ANSI_RED),
(r'{G', _ANSI_HILITE + _ANSI_GREEN),
(r'{Y', _ANSI_HILITE + _ANSI_YELLOW),
(r'{B', _ANSI_HILITE + _ANSI_BLUE),
(r'{M', _ANSI_HILITE + _ANSI_MAGENTA),
(r'{C', _ANSI_HILITE + _ANSI_CYAN),
(r'{W', _ANSI_HILITE + _ANSI_WHITE), # light grey
(r'{X', _ANSI_HILITE + _ANSI_BLACK), # pure black
# hilight-able colors
(r'{h', _ANSI_HILITE),
(r'{H', _ANSI_UNHILITE),
(r'{!R', _ANSI_RED),
(r'{!G', _ANSI_GREEN),
(r'{!Y', _ANSI_YELLOW),
(r'{!B', _ANSI_BLUE),
(r'{!M', _ANSI_MAGENTA),
(r'{!C', _ANSI_CYAN),
(r'{!W', _ANSI_WHITE), # light grey
(r'{!X', _ANSI_BLACK), # pure black
# normal ANSI backgrounds
(r'{[R', _ANSI_BACK_RED),
(r'{[G', _ANSI_BACK_GREEN),
(r'{[Y', _ANSI_BACK_YELLOW),
(r'{[B', _ANSI_BACK_BLUE),
(r'{[M', _ANSI_BACK_MAGENTA),
(r'{[C', _ANSI_BACK_CYAN),
(r'{[W', _ANSI_BACK_WHITE), # light grey background
(r'{[X', _ANSI_BACK_BLACK), # pure black background
]
CURLY_COLOR_XTERM256_EXTRA_FG = [r'\{([0-5])([0-5])([0-5])'] # |123 - foreground colour
CURLY_COLOR_XTERM256_EXTRA_BG = [r'\{\[([0-5])([0-5])([0-5])'] # |[123 - background colour
CURLY_COLOR_XTERM256_EXTRA_GFG = [r'\{=([a-z])'] # |=a - greyscale foreground
CURLY_COLOR_XTERM256_EXTRA_GBG = [r'\{\[=([a-z])'] # |[=a - greyscale background
CURLY_COLOR_ANSI_XTERM256_BRIGHT_BG_EXTRA_MAP = [
(r'{[r', r'{[500'),
(r'{[g', r'{[050'),
(r'{[y', r'{[550'),
(r'{[b', r'{[005'),
(r'{[m', r'{[505'),
(r'{[c', r'{[055'),
(r'{[w', r'{[555'), # white background
(r'{[x', r'{[222'), # dark grey background
]
#############################################################
#
# %c - MUX/MUSH style markup. This was Evennia's first
# color markup style. It was phased out due to % being used
# in Python formatting operations.
#
# %ch%cr, %cr - bright/dark red foreground
# %ch%cR, %cR- bright/dark red background
# %c500, %c[500 - XTERM256 red foreground/background
# %c=w, %c[=w - XTERM256 greyscale foreground/background
#
#############################################################
MUX_COLOR_ANSI_EXTRA_MAP = [
(r'%cn', _ANSI_NORMAL), # reset
(r'%ch', _ANSI_HILITE), # highlight
(r'%r', _ANSI_RETURN), # line break
(r'%R', _ANSI_RETURN), #
(r'%t', _ANSI_TAB), # tab
(r'%T', _ANSI_TAB), #
(r'%b', _ANSI_SPACE), # space
(r'%B', _ANSI_SPACE),
(r'%cf', _ANSI_BLINK), # annoying and not supported by all clients
(r'%ci', _ANSI_INVERSE), # invert
(r'%cr', _ANSI_RED),
(r'%cg', _ANSI_GREEN),
(r'%cy', _ANSI_YELLOW),
(r'%cb', _ANSI_BLUE),
(r'%cm', _ANSI_MAGENTA),
(r'%cc', _ANSI_CYAN),
(r'%cw', _ANSI_WHITE),
(r'%cx', _ANSI_BLACK),
(r'%cR', _ANSI_BACK_RED),
(r'%cG', _ANSI_BACK_GREEN),
(r'%cY', _ANSI_BACK_YELLOW),
(r'%cB', _ANSI_BACK_BLUE),
(r'%cM', _ANSI_BACK_MAGENTA),
(r'%cC', _ANSI_BACK_CYAN),
(r'%cW', _ANSI_BACK_WHITE),
(r'%cX', _ANSI_BACK_BLACK)
]
MUX_COLOR_XTERM256_EXTRA_FG = [r'%c([0-5])([0-5])([0-5])'] # %c123 - foreground colour
MUX_COLOR_XTERM256_EXTRA_BG = [r'%c\[([0-5])([0-5])([0-5])'] # %c[123 - background colour
MUX_COLOR_XTERM256_EXTRA_GFG = [r'%c=([a-z])'] # %c=a - greyscale foreground
MUX_COLOR_XTERM256_EXTRA_GBG = [r'%c\[=([a-z])'] # %c[=a - greyscale background
MUX_COLOR_ANSI_XTERM256_BRIGHT_BG_EXTRA_MAP = [
(r'%ch%cR', r'%c[500'),
(r'%ch%cG', r'%c[050'),
(r'%ch%cY', r'%c[550'),
(r'%ch%cB', r'%c[005'),
(r'%ch%cM', r'%c[505'),
(r'%ch%cC', r'%c[055'),
(r'%ch%cW', r'%c[555'), # white background
(r'%ch%cX', r'%c[222'), # dark grey background
]
|
|
import numpy as np
from ..regularization import BaseSimilarityMeasure
from ..utils import eigenvalue_by_power_iteration
from ..optimization import IterationPrinters, StoppingCriteria
from .directives import InversionDirective, SaveEveryIteration
###############################################################################
# #
# Directives of joint inversion #
# #
###############################################################################
class SimilarityMeasureInversionPrinters:
betas = {
"title": "betas",
"value": lambda M: ["{:.2e}".format(elem) for elem in M.parent.betas],
"width": 26,
"format": "%s",
}
lambd = {
"title": "lambda",
"value": lambda M: M.parent.lambd,
"width": 10,
"format": "%1.2e",
}
phi_d_list = {
"title": "phi_d",
"value": lambda M: ["{:.2e}".format(elem) for elem in M.parent.phi_d_list],
"width": 26,
"format": "%s",
}
phi_m_list = {
"title": "phi_m",
"value": lambda M: ["{:.2e}".format(elem) for elem in M.parent.phi_m_list],
"width": 26,
"format": "%s",
}
phi_sim = {
"title": "phi_sim",
"value": lambda M: M.parent.phi_sim,
"width": 10,
"format": "%1.2e",
}
iterationCG = {
"title": "iterCG",
"value": lambda M: M.cg_count,
"width": 10,
"format": "%3d",
}
class SimilarityMeasureInversionDirective(InversionDirective):
"""
Directive for two model similiraty measure joint inversions. Sets Printers and
StoppingCriteria.
Notes
-----
Methods assume we are working with two models, and a single similarity measure.
Also, the SimilarityMeasure objective function must be the last regularization.
"""
printers = [
IterationPrinters.iteration,
SimilarityMeasureInversionPrinters.betas,
SimilarityMeasureInversionPrinters.lambd,
IterationPrinters.f,
SimilarityMeasureInversionPrinters.phi_d_list,
SimilarityMeasureInversionPrinters.phi_m_list,
SimilarityMeasureInversionPrinters.phi_sim,
SimilarityMeasureInversionPrinters.iterationCG,
]
def initialize(self):
if not isinstance(self.reg.objfcts[-1], BaseSimilarityMeasure):
raise TypeError(
f"The last regularization function must be an instance of "
f"BaseSimilarityMeasure, got {type(self.reg.objfcts[-1])}."
)
# define relevant attributes
self.betas = self.reg.multipliers[:-1]
self.lambd = self.reg.multipliers[-1]
self.phi_d_list = []
self.phi_m_list = []
self.phi_sim = 0.0
# pass attributes to invProb
self.invProb.betas = self.betas
self.invProb.num_models = len(self.betas)
self.invProb.lambd = self.lambd
self.invProb.phi_d_list = self.phi_d_list
self.invProb.phi_m_list = self.phi_m_list
self.invProb.phi_sim = self.phi_sim
self.opt.printers = self.printers
self.opt.stoppers = [StoppingCriteria.iteration]
def validate(self, directiveList):
# check that this directive is first in the DirectiveList
dList = directiveList.dList
self_ind = dList.index(self)
if self_ind != 0:
raise IndexError(
"The CrossGradientInversionDirective must be first in directive list."
)
return True
def endIter(self):
# compute attribute values
phi_d = []
for dmis in self.dmisfit.objfcts:
phi_d.append(dmis(self.opt.xc))
phi_m = []
for reg in self.reg.objfcts:
phi_m.append(reg(self.opt.xc))
# pass attributes values to invProb
self.invProb.phi_d_list = phi_d
self.invProb.phi_m_list = phi_m[:-1]
self.invProb.phi_sim = phi_m[-1]
self.invProb.betas = self.reg.multipliers[:-1]
# Assume last reg.objfct is the coupling
self.invProb.lambd = self.reg.multipliers[-1]
class SimilarityMeasureSaveOutputEveryIteration(SaveEveryIteration):
"""
SaveOutputEveryIteration for Joint Inversions.
Saves information on the tradeoff parameters, data misfits, regularizations,
coupling term, number of CG iterations, and value of cost function.
"""
header = None
save_txt = True
betas = None
phi_d = None
phi_m = None
phi_sim = None
phi = None
def initialize(self):
if self.save_txt is True:
print(
"CrossGradientSaveOutputEveryIteration will save your inversion "
"progress as: '###-{0!s}.txt'".format(self.fileName)
)
f = open(self.fileName + ".txt", "w")
self.header = " # betas lambda joint_phi_d joint_phi_m phi_sim iterCG phi \n"
f.write(self.header)
f.close()
# Create a list of each
self.betas = []
self.lambd = []
self.phi_d = []
self.phi_m = []
self.phi = []
self.phi_sim = []
def endIter(self):
self.betas.append(["{:.2e}".format(elem) for elem in self.invProb.betas])
self.phi_d.append(["{:.3e}".format(elem) for elem in self.invProb.phi_d_list])
self.phi_m.append(["{:.3e}".format(elem) for elem in self.invProb.phi_m_list])
self.lambd.append("{:.2e}".format(self.invProb.lambd))
self.phi_sim.append(self.invProb.phi_sim)
self.phi.append(self.opt.f)
if self.save_txt:
f = open(self.fileName + ".txt", "a")
i = self.opt.iter
f.write(
" {0:2d} {1} {2} {3} {4} {5:1.4e} {6:d} {7:1.4e}\n".format(
i,
self.betas[i - 1],
self.lambd[i - 1],
self.phi_d[i - 1],
self.phi_m[i - 1],
self.phi_sim[i - 1],
self.opt.cg_count,
self.phi[i - 1],
)
)
f.close()
def load_results(self):
results = np.loadtxt(self.fileName + str(".txt"), comments="#")
self.betas = results[:, 1]
self.lambd = results[:, 2]
self.phi_d = results[:, 3]
self.phi_m = results[:, 4]
self.phi_sim = results[:, 5]
self.f = results[:, 7]
class PairedBetaEstimate_ByEig(InversionDirective):
"""
Estimate the trade-off parameter, beta, between pairs of data misfit(s) and the
regularization(s) as a multiple of the ratio between the highest eigenvalue of the
data misfit term and the highest eigenvalue of the regularization.
The highest eigenvalues are estimated through power iterations and Rayleigh
quotient.
Notes
-----
This class assumes the order of the data misfits for each model parameter match
the order for the respective regularizations, i.e.
>>> data_misfits = [phi_d_m1, phi_d_m2, phi_d_m3]
>>> regs = [phi_m_m1, phi_m_m2, phi_m_m3]
In which case it will estimate regularization parameters for each respective pair.
"""
beta0_ratio = 1.0 #: the estimated ratio is multiplied by this to obtain beta
n_pw_iter = 4 #: number of power iterations for estimation.
seed = None #: Random seed for the directive
def initialize(self):
"""
The initial beta is calculated by comparing the estimated
eigenvalues of JtJ and WtW.
To estimate the eigenvector of **A**, we will use one iteration
of the *Power Method*:
.. math::
\\mathbf{x_1 = A x_0}
Given this (very course) approximation of the eigenvector, we can
use the *Rayleigh quotient* to approximate the largest eigenvalue.
.. math::
\\lambda_0 = \\frac{\\mathbf{x^\\top A x}}{\\mathbf{x^\\top x}}
We will approximate the largest eigenvalue for both JtJ and WtW,
and use some ratio of the quotient to estimate beta0.
.. math::
\\beta_0 = \\gamma \\frac{\\mathbf{x^\\top J^\\top J x}}{\\mathbf{x^\\top W^\\top W x}}
:rtype: float
:return: beta0
"""
if self.seed is not None:
np.random.seed(self.seed)
if self.debug:
print("Calculating the beta0 parameter.")
m = self.invProb.model
dmis_eigenvalues = []
reg_eigenvalues = []
dmis_objs = self.dmisfit.objfcts
reg_objs = [
obj
for obj in self.reg.objfcts
if not isinstance(obj, BaseSimilarityMeasure)
]
if len(dmis_objs) != len(reg_objs):
raise ValueError(
f"There must be the same number of data misfit and regularizations."
f"Got {len(dmis_objs)} and {len(reg_objs)} respectively."
)
for dmis, reg in zip(dmis_objs, reg_objs):
dmis_eigenvalues.append(
eigenvalue_by_power_iteration(dmis, m, n_pw_iter=self.n_pw_iter,)
)
reg_eigenvalues.append(
eigenvalue_by_power_iteration(reg, m, n_pw_iter=self.n_pw_iter,)
)
self.ratios = np.array(dmis_eigenvalues) / np.array(reg_eigenvalues)
self.invProb.betas = self.beta0_ratio * self.ratios
self.reg.multipliers[:-1] = self.invProb.betas
class PairedBetaSchedule(InversionDirective):
"""
Directive for beta cooling schedule to determine the tradeoff
parameters when using paired data misfits and regularizations for a joint inversion.
"""
chifact_target = 1.0
beta_tol = 1e-1
update_beta = True
cooling_rate = 1
cooling_factor = 2
dmis_met = False
@property
def target(self):
if getattr(self, "_target", None) is None:
nD = np.array([survey.nD for survey in self.survey])
self._target = nD * 0.5 * self.chifact_target
return self._target
@target.setter
def target(self, val):
self._target = val
def initialize(self):
self.dmis_met = np.zeros_like(self.invProb.betas, dtype=bool)
def endIter(self):
# Check if target misfit has been reached, if so, set dmis_met to True
for i, phi_d in enumerate(self.invProb.phi_d_list):
self.dmis_met[i] = phi_d < self.target[i]
# check separately if misfits are within the tolerance,
# otherwise, scale beta individually
for i, phi_d in enumerate(self.invProb.phi_d_list):
if self.opt.iter > 0 and self.opt.iter % self.cooling_rate == 0:
target = self.target[i]
ratio = phi_d / target
if self.update_beta and ratio <= (1.0 + self.beta_tol):
if ratio <= 1:
ratio = np.maximum(0.75, ratio)
else:
ratio = np.minimum(1.5, ratio)
self.invProb.betas[i] /= ratio
elif ratio > 1.0:
self.invProb.betas[i] /= self.cooling_factor
self.reg.multipliers[:-1] = self.invProb.betas
class MovingAndMultiTargetStopping(InversionDirective):
r"""
Directive for setting stopping criteria for a joint inversion.
Ensures both that all target misfits are met and there is a small change in the
model. Computes the percentage change of the current model from the previous model.
..math::
\frac {\| \mathbf{m_i} - \mathbf{m_{i-1}} \|} {\| \mathbf{m_{i-1}} \|}
"""
tol = 1e-5
beta_tol = 1e-1
chifact_target = 1.0
@property
def target(self):
if getattr(self, "_target", None) is None:
nD = []
for survey in self.survey:
nD += [survey.nD]
nD = np.array(nD)
self._target = nD * 0.5 * self.chifact_target
return self._target
@target.setter
def target(self, val):
self._target = val
def endIter(self):
for phi_d, target in zip(self.invProb.phi_d_list, self.target):
if np.abs(1.0 - phi_d / target) >= self.beta_tol:
return
if (
np.linalg.norm(self.opt.xc - self.opt.x_last)
/ np.linalg.norm(self.opt.x_last)
> self.tol
):
return
print(
"stopping criteria met: ",
np.linalg.norm(self.opt.xc - self.opt.x_last)
/ np.linalg.norm(self.opt.x_last),
)
self.opt.stopNextIteration = True
|
|
#!/usr/bin/env python
from sys import stdout, stderr, exit, maxint
from optparse import OptionParser
from itertools import product, combinations, izip
from os.path import basename, dirname, join, isfile
from random import shuffle
import logging
import csv
import re
MARKER_PAT = re.compile('^([^:]+):(\d+):(\d+)(\+$|-$|$)')
FONTSIZE_VIS = 20
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
def readMarkerSequences(data):
res = list()
isHeader = True
for _, segid, gene, _, _, in csv.reader(data, delimiter='\t'):
if isHeader:
isHeader = False
continue
if int(segid) > len(res):
res.append(set())
res[-1].add(gene)
return res
def readIadhoreConfig(data):
res = list()
for line in data:
if not line.strip():
continue
if line.find('=') >= 0:
k, v = line.split('=', 1)
res.append((k.strip(), v.strip()))
elif line.find(' ') >= 0 and len(res):
if len(res[-1]) < 3:
res[-1] = res[-1] + (list(), )
k, v = line.split(' ', 1)
res[-1][2].append((k.strip(), v.strip()))
else:
print >> stderr, ('Unable to parse line \"%s\" in i-AdHoRe ' + \
'config file. Exiting') %line.strip()
exit(1)
return res
def readBlastTable(data, gene2genome):
res = dict()
for g1i, g2k in csv.reader(data, delimiter='\t'):
G1 = gene2genome[g1i]
G2 = gene2genome[g2k]
if not res.has_key((G1, G2)):
res[(G1, G2)] = dict()
res[(G2, G1)] = dict()
if not res[(G1, G2)].has_key(g1i):
res[(G1, G2)][g1i] = set()
res[(G1, G2)][g1i].add(g2k)
if not res[(G2, G1)].has_key(g2k):
res[(G2, G1)][g2k] = set()
res[(G2, G1)][g2k].add(g1i)
return res
def readGenomes(iadhoreConfig, configPath):
genomes = dict()
gene2genome = dict()
for x in iadhoreConfig:
if len(x) != 3 or x[0] != 'genome':
continue
genomes[x[1]] = list()
for _, fPath in x[2]:
f = join(configPath, fPath)
for line in open(f):
gx = line.strip()
if gx[-1] not in ('+', '-'):
LOG.fatal(('Gene %s in file %s has unknown ' + \
'orientation. Exiting') %(gx, f))
exit(1)
genomes[x[1]].append(gx[:-1])
gene2genome[gx[:-1]] = x[1]
return genomes, gene2genome
def readSegments(data):
isHeader = True
res = dict()
c = 0
for line in csv.reader(data, delimiter='\t'):
if isHeader:
isHeader = False
continue
mid = int(line[1])
if not res.has_key(mid):
res[mid] = list()
res[mid].append((c, line[2], line[4], line[5]))
c += 1
return res
def weightedScore(segments, marker_seqs, blastMap, onlyAll=False):
res = list()
if onlyAll:
n = len(set(reduce(lambda x,y: x+y, blastMap.keys())))
for mid in sorted(segments.keys()):
if onlyAll and len(set(x for _, x, _, _ in segments[mid])) < n:
continue
c = 0.0
s = 0
for i in xrange(len(segments[mid])):
sid1, G1, _, _ = segments[mid][i]
s += len(marker_seqs[sid1])
for g1x in marker_seqs[sid1]:
hasHit = True
j = 0
while hasHit and j < len(segments[mid]):
j += 1
if i == j-1:
continue
sid2, G2, _, _ = segments[mid][j-1]
hasHit = blastMap.has_key((G1, G2)) and \
blastMap[(G1, G2)].has_key(g1x)
if not hasHit:
break
hasHit_i = False
for g2y in blastMap[(G1, G2)][g1x]:
hasHit_i = g2y in marker_seqs[sid2]
if hasHit_i:
break
hasHit = hasHit_i
if hasHit:
c += 1
res.append((mid, c/s))
return res
def relaxedScore(segments, marker_seqs, blastMap, onlyAll=False):
res = list()
for mid in sorted(segments.keys()):
if onlyAll and len(set(x for _, x, _, _ in segments[mid])) < n:
continue
c = 0.0
s = 0
for i in xrange(len(segments[mid])):
sid1, G1, _, _ = segments[mid][i]
s += len(marker_seqs[sid1])
for g1x in marker_seqs[sid1]:
hasHit = False
j = 0
while not hasHit and j < len(segments[mid]):
j += 1
if i == j-1:
continue
sid2, G2, _, _ = segments[mid][j-1]
if not blastMap.has_key((G1, G2)) or \
not blastMap[(G1, G2)].has_key(g1x):
break
for g2y in blastMap[(G1, G2)][g1x]:
hasHit = g2y in marker_seqs[sid2]
if hasHit:
break
if hasHit:
c += 1
res.append((mid, c/s))
return res
def showHistogram(scores, stype, fileName):
try:
import matplotlib.pyplot as plt
except:
LOG.fatal('Unable to import matplotlib.pyplot. Not installed? Exiting.')
exit(1)
# the histogram of the data
n, bins, patches = plt.hist(map(lambda x: x[1], scores), 50,
facecolor='#b30000', edgecolor='none', alpha=0.75)
plt.xlabel(stype, fontsize=FONTSIZE_VIS)
plt.ylabel('count', fontsize=FONTSIZE_VIS)
#plt.axis([40, 160, 0, 0.03])
#plt.grid(True)
plt.xlim([0, 1.005])
plt.savefig(fileName, formtat='eps', transparent=True)
plt.show()
if __name__ == '__main__':
usage = 'usage: %prog [options] <I-ADHORE CONFIGURAION FILE> <SEGMENTS FILE>'
parser = OptionParser(usage=usage)
parser.add_option('-v', '--visualize', dest='visual', default=False,
action='store_true', help='Show histogram of scores. IMPORTANT: ' + \
'requires matplotlib.pyplot library! [default: %default]')
parser.add_option('-t', '--type', dest='type', default='relaxed', type=str,
help='Scoring type. [default: %default]', metavar=
'(relaxed|weighted)')
parser.add_option('-a', '--only_all', dest='onlyAll', default=False,
action='store_true', help='Consider only those multiplicons ' + \
'that span all genomes, not just a subset [default: ' + \
'%default]')
parser.add_option('-f', '--figure_name', dest='figName', default='(relaxed' + \
'|weighted)_scores.eps', type=str, help='Name of output file of ' + \
'histogram. Only applicable in combination with -v. [default:' + \
'%default]')
figNameOpt = parser.option_list[-1]
(options, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
exit(1)
# setup logging
ch = logging.StreamHandler(stderr)
ch.setLevel(logging.ERROR)
ch.setFormatter(logging.Formatter('!! %(message)s'))
cf = logging.FileHandler('%s.synteny_scores.log' %(basename(args[0]).rsplit('.', 1)[0]), mode='w')
cf.setLevel(logging.INFO)
cf.setFormatter(logging.Formatter('%(levelname)s\t%(asctime)s\t++ %(message)s'))
LOG.addHandler(cf)
LOG.addHandler(ch)
#
# main
#
if figNameOpt.default == options.figName:
options.figName = '%s_scores.eps' %options.type
iadhoreConfig = readIadhoreConfig(open(args[0]))
iadhoreCMap = dict(x for x in iadhoreConfig if len(x) == 2)
if iadhoreCMap.has_key('table_type') and \
iadhoreCMap['table_type'] == 'family':
LOG.fatal(('Unable to parse file %s with family assignments: not ' + \
'implemented. Exiting.') %iadhoreCMap['blast_table'])
exit(1)
listElemFile = join(dirname(args[1]), 'list_elements.txt')
if not isfile(listElemFile):
LOG.fatal(('File %s is required, but does not exist. ' + \
'Exiting.') %listElemFile)
exit(1)
marker_seqs = readMarkerSequences(open(listElemFile))
_ , gene2genome = readGenomes(iadhoreConfig, dirname(args[0]))
segments = readSegments(open(args[1]))
blastMap = readBlastTable(open(join(dirname(args[0]),
iadhoreCMap['blast_table'])), gene2genome)
scores = None
if options.type == 'relaxed':
scores = relaxedScore(segments, marker_seqs, blastMap, options.onlyAll)
elif options.type == 'weighted':
scores = weightedScore(segments, marker_seqs, blastMap, options.onlyAll)
else:
LOG.fatal('Scoring type must be either relaxed or weighted. Exiting')
exit(1)
if options.visual:
showHistogram(scores, options.type + ' synteny score', options.figName)
else:
for s in scores:
print >> stdout, '%s\t%s' %s
# compute coverage
gNames = sorted(set(reduce(lambda x,y: x+y, blastMap.keys())))
covered_markers = dict((G1, set()) for G1 in gNames)
for seg in segments.values():
if options.onlyAll and len(set(x for _, x, _, _ in seg)) < len(gNames):
continue
for sid, G1, _, _ in seg:
covered_markers[G1].update(marker_seqs[sid])
res = dict()
for G1, markers in covered_markers.items():
res[G1] = 0
for g1i in markers:
m = MARKER_PAT.match(g1i)
_, start, end, _ = m.groups()
res[G1] += int(end)-int(start)+1
LOG.info('Coverage: \n%s' %('\n'.join(map(lambda x: '\t'.join(map(str, x)),
sorted(res.items())))))
LOG.info('finished')
|
|
# Copyright(c) 2014, The scLVM developers (Forian Buettner, Paolo Francesco Casale, Oliver Stegle)
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""Conditions CLVM code.
This is currently experimental and not supported
"""
import pdb
import scipy as SP
import scipy.linalg as LA
import copy
import pdb
import sys
sys.path.append('./..')
from utils.misc import regressOut
import limix_legacy
import limix_legacy.deprecated as limix
class gpCLVM:#(object):
"""
Class for conditional gplvm
"""
def __init__(self,Y=None,X0=None,k=1,standardize=False,interaction=True):
"""
Y: data [NxG]
X0: known latent factors [Nxk0]
k: number of latent factors to infer
"""
assert Y!=None, 'gpCLVM:: set Y!'
assert X0!=None, 'gpCLVM:: set X0!'
self.cache = {}
self.interaction = interaction
# read data
self._setY(Y,standardize=standardize)
self._setX0(X0)
self.k = k
# covariance for known latex factor
self.C0 = limix.CFixedCF(self.K0)
# covariance for unknow latent factor
self.C = limix.CProductCF()
self.Ca = limix.CLowRankCF(self.N,self.k)
self.C.addCovariance(self.Ca)
if self.interaction==True:
self.Cb1 = limix.CFixedCF(SP.ones((self.N,self.N)))
self.Cb1.setParamMask(SP.zeros(1))
self.Cb2 = limix.CFixedCF(self.K0)
self.Cb = limix.CSumCF()
self.Cb.addCovariance(self.Cb1)
self.Cb.addCovariance(self.Cb2)
self.C.addCovariance(self.Cb)
# total covariance
covar = limix.CSumCF()
covar.addCovariance(self.C0)
covar.addCovariance(self.C)
# likelihood
self.ll = limix.CLikNormalIso()
# init GP and hyper params
self.gp = limix.CGPbase(covar,self.ll)
self.gp.setY(self.Y)
def _setY(self,Y,standardize=False):
""" set phenotype """
Y -= Y.mean(0)
if standardize:
Y /= Y.std(0)
self.Y = Y
self.N,self.G=Y.shape
def _setX0(self,X0):
""" set X0 """
assert X0.shape[0]==self.N, 'gpCLVM:: dimension dismatch'
self.X0 = X0
self.k0 = X0.shape[0]
self.K0 = SP.dot(self.X0,self.X0.T)
self.K0 /= self.K0.diagonal().mean()
def initParams(self,method='fast',Ycc=None,X=None,varXX=None,varX0X0=None,nois=None):
"""
This takes care of parameter initialization
"""
if method=='fast':
rv = self._initParams_fast()
elif method=='regressOut':
assert Ycc!=None, 'provide Ycc'
assert X!=None, 'provide X'
assert varXX!=None, 'provide varXX'
rv = self._initParams_regressOut(Ycc,X,varXX)
elif method=='random':
rv = self._initParams_random()
elif method=='null':
assert varX0X0!=None, 'provide varX0X0'
assert nois!=None, 'provide nois'
rv = self._initParams_null(varX0X0,nois)
return rv
def optimize(self,params0):
""" initialize """
self.gp.setParams(params0)
self.cache['params0'] = params0
self.cache['lml0'] = self.gp.LML()
self.cache['lmlGrad0'] = self.gp.LMLgrad()
""" optimize """
gpopt = limix.CGPopt(self.gp)
conv = gpopt.opt()
""" store stuff """
self.cache['lml'] = self.gp.LML()
self.cache['lmlGrad'] = self.gp.LMLgrad()
self.cache['params'] = self.gp.getParams()
self.cache['X'] = self.Ca.getParams().reshape((self.N,self.k),order='F')
self.cache['K'] = self.Ca.K()
if self.interaction:
self.cache['Ki'] = self.Ca.K()*self.Cb2.K()
else:
self.cache['Ki'] = None
self.cache['var'] = {}
self.cache['var']['K0'] = self.C0.getParams()[0]**2
self.cache['var']['K'] = self.cache['K'].diagonal().mean()
if self.interaction:
self.cache['var']['Ki'] = self.cache['Ki'].diagonal().mean()
self.cache['var']['noise'] = self.ll.getParams()[0]**2
self.cache['K'] /= self.cache['var']['K']
if self.interaction:
self.cache['Ki'] /= self.cache['var']['Ki']
return conv
def getX(self):
"""
return X
"""
return self.cache['X']
def getK(self):
"""
return K
"""
return self.cache['K']
def getKi(self):
"""
return Ki
"""
return self.cache['Ki']
def getVarianceComps(self):
"""
return variance compoennts
"""
return self.cache['var']
def _initParams_fast(self):
"""
initialize the gp parameters
1) project Y on the known factor X0 -> Y0
average variance of Y0 is used to initialize the variance explained by X0
2) considers the residual Y1 = Y-Y0 (this equivals to regress out X0)
3) perform PCA on cov(Y1) and considers the first k PC for initializing X
4) the variance of all other PCs is used to initialize the noise
5) the variance explained by interaction is set to a small random number
"""
Xd = LA.pinv(self.X0)
Y0 = self.X0.dot(Xd.dot(self.Y))
Y1 = self.Y-Y0
YY = SP.cov(Y1)
S,U = LA.eigh(YY)
X = U[:,-self.k:]*SP.sqrt(S[-self.k:])
a = SP.array([SP.sqrt(Y0.var(0).mean())])
b = 1e-3*SP.randn(1)
c = SP.array([SP.sqrt((YY-SP.dot(X,X.T)).diagonal().mean())])
# gp hyper params
params = limix.CGPHyperParams()
if self.interaction:
params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F'),SP.ones(1),b])
else:
params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F')])
params['lik'] = c
return params
def _initParams_regressOut(self,Ycc,X,varXX):
"""
initialize the gp parameters
1) the variance of Kcc as Ycc.var(0).mean()
2) X with the provided
3) variance of interaction (if label is True) will be set to ~0
4) residual to residual
"""
X *= SP.sqrt(varXX/(X**2).mean())
Y1 = self.Y-Ycc
a = SP.array([SP.sqrt(Ycc.var(0).mean())])
b = 1e-3*SP.ones(1)
c = Y1.var(0).mean()-varXX
c = SP.maximum(1e-1,c)
c = SP.array([SP.sqrt(c)])
# gp hyper params
params = limix.CGPHyperParams()
if self.interaction:
params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F'),SP.ones(1),b])
else:
params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F')])
params['lik'] = c
return params
def _initParams_random(self):
"""
initialize the gp parameters randomly
"""
# gp hyper params
params = limix.CGPHyperParams()
if self.interaction: params['covar'] = SP.concatenate([SP.randn(self.N*self.k+1),SP.ones(1),SP.randn(1)])
else: params['covar'] = SP.randn(self.N*self.k+1)
params['lik'] = SP.randn(1)
return params
def _initParams_null(self,varX0X0,nois):
"""
initialize from null model
"""
X = 1e-3*SP.randn(self.N,self.k)
a = SP.array([SP.sqrt(varX0X0)])
b = 1e-3*SP.ones(1)
c = SP.array([SP.sqrt(nois)])
# gp hyper params
params = limix.CGPHyperParams()
if self.interaction:
params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F'),SP.ones(1),b])
else:
params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F')])
params['lik'] = c
return params
def fix_a(self,flag=True):
"""
if flag==True: fix a
else: set a free
"""
if flag: self.C0.setParamMask(SP.zeros(1))
else: self.C0.setParamMask(SP.ones(1))
|
|
__author__ = 'chris'
"""
Copyright (c) 2015 Chris Pacia
"""
import bitcoin
import random
from io import BytesIO
from random import shuffle
from protocol import PeerFactory
from twisted.internet import reactor, defer, task
from discovery import dns_discovery
from binascii import unhexlify
from extensions import BloomFilter
from bitcoin.core import CTransaction
from bitcoin.net import CInv
from bitcoin.messages import msg_inv
from bitcoin import base58
from blockchain import BlockDatabase
from log import *
from twisted.python import log, logfile
from zope.interface.verify import verifyObject
from zope.interface.exceptions import DoesNotImplement
from listeners import DownloadListener, PeerEventListener
class BitcoinClient(object):
def __init__(self, addrs, params="mainnet", blockchain=None, user_agent="/pyBitcoin:0.1/", max_connections=10, subscriptions=[], listeners=[]):
self.addrs = addrs
self.params = params
self.blockchain = blockchain
self.user_agent = user_agent
self.max_connections = max_connections
self.testnet = True if params == "testnet" else False
self.peers = []
self.inventory = {}
self.pending_txs = {}
self.subscriptions = {}
self.bloom_filter = BloomFilter(10, 0.001, random.getrandbits(32), BloomFilter.UPDATE_NONE)
self.download_listener = None
self.peer_event_listener = None
for s in subscriptions:
self.subscribe_address(s[0], s[1])
for l in listeners:
self.add_event_listener(l)
self._connect_to_peers()
if self.blockchain: self._start_chain_download()
bitcoin.SelectParams(params)
def add_event_listener(self, listener):
try:
verifyObject(DownloadListener, listener)
self.download_listener = listener
for peer in self.peers:
if peer.protocol is not None:
peer.protocol.download_listener = listener
except DoesNotImplement:
pass
try:
verifyObject(PeerEventListener, listener)
self.peer_event_listener = listener
except DoesNotImplement:
pass
def _connect_to_peers(self):
"""
Will attempt to connect to enough peers to get us up to `max_connections`. This should be called
again after we disconnect from a peer to maintain a stable number of peers.
"""
if len(self.peers) < self.max_connections:
shuffle(self.addrs)
for i in range(self.max_connections - len(self.peers)):
if len(self.addrs) > 0:
addr = self.addrs.pop(0)
peer = PeerFactory(self.params, self.user_agent, self.inventory, self.subscriptions,
self.bloom_filter, self._on_peer_disconnected, self.blockchain, self.download_listener)
reactor.connectTCP(addr[0], addr[1], peer)
self.peers.append(peer)
if self.peer_event_listener is not None:
self.peer_event_listener.on_peer_connected(addr, len(self.peers))
else:
# We ran out of addresses and need to hit up the seeds again.
self.addrs = dns_discovery(self.testnet)
self._connect_to_peers()
def get_peer_count(self):
return len(self.peers)
def _start_chain_download(self):
"""
Pick a single peer and download the headers/merkle blocks from it until we are at the tip of the chain.
If the peer isn't fully initialized yet, let's pause a second and try again.
"""
shuffle(self.peers)
if self.peers[0].protocol is None or self.peers[0].protocol.version is None:
return task.deferLater(reactor, 1, self._start_chain_download)
self.peers[0].protocol.download_blocks(self.check_for_more_blocks)
def check_for_more_blocks(self):
"""
After we finish downloading blocks from our download peer let's check to see if any of our other peers
know about any additional blocks. If so, let's download from them as well.
"""
for peer in self.peers:
if peer.protocol is not None and peer.protocol.version is not None:
if peer.protocol.version.nStartingHeight > self.blockchain.get_height():
peer.protocol.download_blocks(self.check_for_more_blocks)
break
def _on_peer_disconnected(self, peer):
if self.peer_event_listener is not None:
ip = (peer.protocol.transport.getPeer().host, peer.protocol.transport.getPeer().port)
self.peer_event_listener.on_peer_disconnected(ip, len(self.peers))
self.peers.remove(peer)
self._connect_to_peers()
def broadcast_tx(self, tx):
"""
Sends the tx to half our peers and waits for half of the remainder to
announce it via inv packets before calling back.
"""
def on_peer_anncounce(txid):
self.subscriptions[txhash]["announced"] += 1
if self.subscriptions[txhash]["announced"] >= self.subscriptions[txhash]["ann_threshold"]:
if self.subscriptions[txid]["timeout"].active():
self.subscriptions[txid]["timeout"].cancel()
self.subscriptions[txid]["deferred"].callback(True)
d = defer.Deferred()
transaction = CTransaction.stream_deserialize(BytesIO(unhexlify(tx)))
txhash = transaction.GetHash()
self.inventory[txhash] = transaction
cinv = CInv()
cinv.type = 1
cinv.hash = txhash
inv_packet = msg_inv()
inv_packet.inv.append(cinv)
self.bloom_filter.insert(txhash)
self.subscriptions[txhash] = {
"announced": 0,
"ann_threshold": len(self.peers)/4,
"callback": on_peer_anncounce,
"confirmations": 0,
"in_blocks": [],
"deferred": d,
"timeout": reactor.callLater(10, d.callback, False)
}
for peer in self.peers[len(self.peers)/2:]:
peer.protocol.load_filter()
for peer in self.peers[:len(self.peers)/2]:
peer.protocol.send_message(inv_packet)
return d
def subscribe_address(self, address, callback):
"""
Listen on an address for transactions. Since we can't validate unconfirmed
txs we will only callback if the tx is announced by a majority of our peers
or included in a block.
"""
def on_peer_announce(txhash):
if self.subscriptions[txhash]["announced"] < self.subscriptions[txhash]["ann_threshold"] and self.subscriptions[txhash]["confirmations"] == 0:
self.subscriptions[txhash]["announced"] += 1
if self.subscriptions[txhash]["announced"] >= self.subscriptions[txhash]["ann_threshold"]:
callback(self.subscriptions[txhash]["tx"], self.subscriptions[txhash]["in_blocks"], self.subscriptions[txhash]["confirmations"])
self.subscriptions[txhash]["last_confirmation"] = self.subscriptions[txhash]["confirmations"]
elif self.subscriptions[txhash]["confirmations"] > self.subscriptions[txhash]["last_confirmation"]:
self.subscriptions[txhash]["last_confirmation"] = self.subscriptions[txhash]["confirmations"]
callback(self.subscriptions[txhash]["tx"], self.subscriptions[txhash]["in_blocks"], self.subscriptions[txhash]["confirmations"])
self.subscriptions[address] = (len(self.peers)/2, on_peer_announce)
self.bloom_filter.insert(base58.decode(address)[1:21])
for peer in self.peers:
if peer.protocol is not None:
peer.protocol.load_filter()
def unsubscribe_address(self, address):
"""
Unsubscribe to an address. Will update the bloom filter to reflect its
state before the address was inserted.
"""
if address in self.subscriptions:
self.bloom_filter.remove(base58.decode(address)[1:21])
for peer in self.peers:
if peer.protocol is not None:
peer.protocol.load_filter()
del self.subscriptions[address]
if __name__ == "__main__":
# Connect to testnet
logFile = logfile.LogFile.fromFullPath("bitcoin.log", rotateLength=15000000, maxRotatedFiles=1)
log.addObserver(FileLogObserver(logFile).emit)
log.addObserver(FileLogObserver().emit)
bd = BlockDatabase("blocks.db", testnet=True)
BitcoinClient(dns_discovery(True), params="testnet", blockchain=bd)
reactor.run()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class ProxyResource(Resource):
"""The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
class Configuration(ProxyResource):
"""Tenant configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param enforce_private_markdown_storage: When flag is set to true Markdown tile will require
external storage configuration (URI). The inline content configuration will be prohibited.
:type enforce_private_markdown_storage: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'enforce_private_markdown_storage': {'key': 'properties.enforcePrivateMarkdownStorage', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(Configuration, self).__init__(**kwargs)
self.enforce_private_markdown_storage = kwargs.get('enforce_private_markdown_storage', None)
class ConfigurationList(msrest.serialization.Model):
"""List of tenant configurations.
:param value: The array of tenant configurations.
:type value: list[~azure.mgmt.portal.models.Configuration]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Configuration]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConfigurationList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class Dashboard(msrest.serialization.Model):
"""The shared dashboard resource definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param lenses: The dashboard lenses.
:type lenses: list[~azure.mgmt.portal.models.DashboardLens]
:param metadata: The dashboard metadata.
:type metadata: dict[str, any]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'lenses': {'key': 'properties.lenses', 'type': '[DashboardLens]'},
'metadata': {'key': 'properties.metadata', 'type': '{object}'},
}
def __init__(
self,
**kwargs
):
super(Dashboard, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs['location']
self.tags = kwargs.get('tags', None)
self.lenses = kwargs.get('lenses', None)
self.metadata = kwargs.get('metadata', None)
class DashboardLens(msrest.serialization.Model):
"""A dashboard lens.
All required parameters must be populated in order to send to Azure.
:param order: Required. The lens order.
:type order: int
:param parts: Required. The dashboard parts.
:type parts: list[~azure.mgmt.portal.models.DashboardParts]
:param metadata: The dashboard len's metadata.
:type metadata: dict[str, any]
"""
_validation = {
'order': {'required': True},
'parts': {'required': True},
}
_attribute_map = {
'order': {'key': 'order', 'type': 'int'},
'parts': {'key': 'parts', 'type': '[DashboardParts]'},
'metadata': {'key': 'metadata', 'type': '{object}'},
}
def __init__(
self,
**kwargs
):
super(DashboardLens, self).__init__(**kwargs)
self.order = kwargs['order']
self.parts = kwargs['parts']
self.metadata = kwargs.get('metadata', None)
class DashboardListResult(msrest.serialization.Model):
"""List of dashboards.
:param value: The array of custom resource provider manifests.
:type value: list[~azure.mgmt.portal.models.Dashboard]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Dashboard]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DashboardListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class DashboardPartMetadata(msrest.serialization.Model):
"""A dashboard part metadata.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: MarkdownPartMetadata.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, any]
:param type: Required. The type of dashboard part.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
}
_subtype_map = {
'type': {'Extension/HubsExtension/PartType/MarkdownPart': 'MarkdownPartMetadata'}
}
def __init__(
self,
**kwargs
):
super(DashboardPartMetadata, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.type = 'DashboardPartMetadata' # type: str
class DashboardParts(msrest.serialization.Model):
"""A dashboard part.
All required parameters must be populated in order to send to Azure.
:param position: Required. The dashboard's part position.
:type position: ~azure.mgmt.portal.models.DashboardPartsPosition
:param metadata: The dashboard part's metadata.
:type metadata: ~azure.mgmt.portal.models.DashboardPartMetadata
"""
_validation = {
'position': {'required': True},
}
_attribute_map = {
'position': {'key': 'position', 'type': 'DashboardPartsPosition'},
'metadata': {'key': 'metadata', 'type': 'DashboardPartMetadata'},
}
def __init__(
self,
**kwargs
):
super(DashboardParts, self).__init__(**kwargs)
self.position = kwargs['position']
self.metadata = kwargs.get('metadata', None)
class DashboardPartsPosition(msrest.serialization.Model):
"""The dashboard's part position.
All required parameters must be populated in order to send to Azure.
:param x: Required. The dashboard's part x coordinate.
:type x: int
:param y: Required. The dashboard's part y coordinate.
:type y: int
:param row_span: Required. The dashboard's part row span.
:type row_span: int
:param col_span: Required. The dashboard's part column span.
:type col_span: int
:param metadata: The dashboard part's metadata.
:type metadata: dict[str, any]
"""
_validation = {
'x': {'required': True},
'y': {'required': True},
'row_span': {'required': True},
'col_span': {'required': True},
}
_attribute_map = {
'x': {'key': 'x', 'type': 'int'},
'y': {'key': 'y', 'type': 'int'},
'row_span': {'key': 'rowSpan', 'type': 'int'},
'col_span': {'key': 'colSpan', 'type': 'int'},
'metadata': {'key': 'metadata', 'type': '{object}'},
}
def __init__(
self,
**kwargs
):
super(DashboardPartsPosition, self).__init__(**kwargs)
self.x = kwargs['x']
self.y = kwargs['y']
self.row_span = kwargs['row_span']
self.col_span = kwargs['col_span']
self.metadata = kwargs.get('metadata', None)
class ErrorDefinition(msrest.serialization.Model):
"""Error definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Service specific error code which serves as the substatus for the HTTP error code.
:vartype code: int
:ivar message: Description of the error.
:vartype message: str
:ivar details: Internal error details.
:vartype details: list[~azure.mgmt.portal.models.ErrorDefinition]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'int'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDefinition]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDefinition, self).__init__(**kwargs)
self.code = None
self.message = None
self.details = None
class ErrorResponse(msrest.serialization.Model):
"""Error response.
:param error: The error details.
:type error: ~azure.mgmt.portal.models.ErrorDefinition
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDefinition'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class MarkdownPartMetadata(DashboardPartMetadata):
"""Markdown part metadata.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, any]
:param type: Required. The type of dashboard part.Constant filled by server.
:type type: str
:param inputs: Input to dashboard part.
:type inputs: list[any]
:param settings: Markdown part settings.
:type settings: ~azure.mgmt.portal.models.MarkdownPartMetadataSettings
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[object]'},
'settings': {'key': 'settings', 'type': 'MarkdownPartMetadataSettings'},
}
def __init__(
self,
**kwargs
):
super(MarkdownPartMetadata, self).__init__(**kwargs)
self.type = 'Extension/HubsExtension/PartType/MarkdownPart' # type: str
self.inputs = kwargs.get('inputs', None)
self.settings = kwargs.get('settings', None)
class MarkdownPartMetadataSettings(msrest.serialization.Model):
"""Markdown part settings.
:param content: The content of markdown part.
:type content: ~azure.mgmt.portal.models.MarkdownPartMetadataSettingsContent
"""
_attribute_map = {
'content': {'key': 'content', 'type': 'MarkdownPartMetadataSettingsContent'},
}
def __init__(
self,
**kwargs
):
super(MarkdownPartMetadataSettings, self).__init__(**kwargs)
self.content = kwargs.get('content', None)
class MarkdownPartMetadataSettingsContent(msrest.serialization.Model):
"""The content of markdown part.
:param settings: The setting of the content of markdown part.
:type settings: ~azure.mgmt.portal.models.MarkdownPartMetadataSettingsContentSettings
"""
_attribute_map = {
'settings': {'key': 'settings', 'type': 'MarkdownPartMetadataSettingsContentSettings'},
}
def __init__(
self,
**kwargs
):
super(MarkdownPartMetadataSettingsContent, self).__init__(**kwargs)
self.settings = kwargs.get('settings', None)
class MarkdownPartMetadataSettingsContentSettings(msrest.serialization.Model):
"""The setting of the content of markdown part.
:param content: The content of the markdown part.
:type content: str
:param title: The title of the markdown part.
:type title: str
:param subtitle: The subtitle of the markdown part.
:type subtitle: str
:param markdown_source: The source of the content of the markdown part.
:type markdown_source: int
:param markdown_uri: The uri of markdown content.
:type markdown_uri: str
"""
_attribute_map = {
'content': {'key': 'content', 'type': 'str'},
'title': {'key': 'title', 'type': 'str'},
'subtitle': {'key': 'subtitle', 'type': 'str'},
'markdown_source': {'key': 'markdownSource', 'type': 'int'},
'markdown_uri': {'key': 'markdownUri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MarkdownPartMetadataSettingsContentSettings, self).__init__(**kwargs)
self.content = kwargs.get('content', None)
self.title = kwargs.get('title', None)
self.subtitle = kwargs.get('subtitle', None)
self.markdown_source = kwargs.get('markdown_source', None)
self.markdown_uri = kwargs.get('markdown_uri', None)
class PatchableDashboard(msrest.serialization.Model):
"""The shared dashboard resource definition.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param lenses: The dashboard lenses.
:type lenses: list[~azure.mgmt.portal.models.DashboardLens]
:param metadata: The dashboard metadata.
:type metadata: dict[str, any]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'lenses': {'key': 'properties.lenses', 'type': '[DashboardLens]'},
'metadata': {'key': 'properties.metadata', 'type': '{object}'},
}
def __init__(
self,
**kwargs
):
super(PatchableDashboard, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.lenses = kwargs.get('lenses', None)
self.metadata = kwargs.get('metadata', None)
class ResourceProviderOperation(msrest.serialization.Model):
"""Supported operations of this resource provider.
:param name: Operation name, in format of {provider}/{resource}/{operation}.
:type name: str
:param is_data_action: Indicates whether the operation applies to data-plane.
:type is_data_action: str
:param display: Display metadata associated with the operation.
:type display: ~azure.mgmt.portal.models.ResourceProviderOperationDisplay
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'is_data_action': {'key': 'isDataAction', 'type': 'str'},
'display': {'key': 'display', 'type': 'ResourceProviderOperationDisplay'},
}
def __init__(
self,
**kwargs
):
super(ResourceProviderOperation, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.is_data_action = kwargs.get('is_data_action', None)
self.display = kwargs.get('display', None)
class ResourceProviderOperationDisplay(msrest.serialization.Model):
"""Display metadata associated with the operation.
:param provider: Resource provider: Microsoft Custom Providers.
:type provider: str
:param resource: Resource on which the operation is performed.
:type resource: str
:param operation: Type of operation: get, read, delete, etc.
:type operation: str
:param description: Description of this operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceProviderOperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class ResourceProviderOperationList(msrest.serialization.Model):
"""Results of the request to list operations.
:param value: List of operations supported by this resource provider.
:type value: list[~azure.mgmt.portal.models.ResourceProviderOperation]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ResourceProviderOperation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceProviderOperationList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class Violation(msrest.serialization.Model):
"""Violation information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Id of the item that violates tenant configuration.
:vartype id: str
:ivar user_id: Id of the user who owns violated item.
:vartype user_id: str
:ivar error_message: Error message.
:vartype error_message: str
"""
_validation = {
'id': {'readonly': True},
'user_id': {'readonly': True},
'error_message': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'user_id': {'key': 'userId', 'type': 'str'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Violation, self).__init__(**kwargs)
self.id = None
self.user_id = None
self.error_message = None
class ViolationsList(msrest.serialization.Model):
"""List of list of items that violate tenant's configuration.
:param value: The array of violations.
:type value: list[~azure.mgmt.portal.models.Violation]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Violation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ViolationsList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
|
|
# This is a patched version of apparentlymart's python-nextbus
# library available on Google. All rights to it remain his. This
# is not a project of Matt Conway or Portland Transport.
from xml.etree import ElementTree
from urllib import urlencode
NEXTBUS_SERVICE_URL = "http://webservices.nextbus.com/service/publicXMLFeed"
def _autoinit(realinit = None):
def auto_init(self, **kwargs):
for k in kwargs:
self.__dict__[k] = kwargs[k]
if realinit is not None:
realinit(self)
return auto_init
_url_fetcher = None
def _init_fetcher():
global _url_fetcher
have_urllib2 = True
try:
import urllib2
except:
have_urllib2 = False
if have_urllib2:
def urllib2_fetcher(url):
return urllib2.urlopen(url)
_url_fetcher = urllib2_fetcher
_init_fetcher()
_cache = None
def set_url_fetcher(func):
_url_fetcher = func
def fetch_xml(url):
return ElementTree.parse(_url_fetcher(url))
def make_fetcher_method(url_func, target_class):
def meth(self):
if _url_fetcher is None:
raise RuntimeError("No configured url fetcher")
url = url_func(self)
etree = fetch_xml(url)
return target_class.from_etree(etree)
def make_nextbus_url(command, a = None, *args):
real_args = []
real_args.append(('command', command))
if a is not None:
real_args.append(('a', a))
real_args.extend(args)
return '?'.join([NEXTBUS_SERVICE_URL, urlencode(real_args, True)])
def fetch_nextbus_url(*args, **kwargs):
url = make_nextbus_url(*args, **kwargs)
return fetch_xml(url)
def memoize_in_cache(key_name, expire_time):
def decorator(orig_func):
def func(*args):
if _cache is not None:
full_key_name = ":".join((key_name, ",".join(args)))
import pickle
cacheval = _cache.get(full_key_name)
if cacheval is not None:
return pickle.loads(cacheval)
ret = orig_func(*args)
if _cache is not None:
if ret is not None:
cacheval = pickle.dumps(ret, 2)
_cache.set(full_key_name, cacheval)
return ret
return func
return decorator
@memoize_in_cache("agencies", 604800)
def get_all_agencies():
"""
Get a list of all agencies supported by the NextBus public API.
Note that this does not include all agencies supported by NextBus.
Public data is not available for some agencies despite the fact
that they use NextBus. Hassle your local transit agency to
enable the public data feed.
"""
etree = fetch_nextbus_url("agencyList")
ret = []
for elem in etree.findall("agency"):
ret.append(Agency.from_elem(elem))
return ret
@memoize_in_cache("agency_routes", 604800)
def get_all_routes_for_agency(tag):
"""
Get a list of all routes for a given agency.
"""
etree = fetch_nextbus_url("routeList", tag)
ret = []
for elem in etree.findall("route"):
ret.append(Route.from_elem(elem))
return ret
@memoize_in_cache("route_config", 604800)
def get_route_config(agency_tag, route_tag):
"""
Get the route configuration for a given route with in a given agency.
"""
etree = fetch_nextbus_url("routeConfig", agency_tag, ('r', route_tag))
elem = etree.find("route")
return RouteConfig.from_elem(elem)
@memoize_in_cache("stop_predictions", 30)
def get_predictions_for_stop(agency_tag, stop_id):
"""
Get the current predictions for a particular stop across all routes.
"""
etree = fetch_nextbus_url("predictions", agency_tag, ('stopId', stop_id))
predictions = Predictions()
for predictions_elem in etree.findall("predictions"):
route = Route(tag=predictions_elem.get("routeTag"), title=predictions_elem.get("routeTitle"))
predictions.stop_title = predictions_elem.get("stopTitle")
no_predictions_direction_title = predictions_elem.get("dirTitleBecauseNoPredictions")
if no_predictions_direction_title:
# record the direction but no predictions
direction = TaglessDirection(title=no_predictions_direction_title, route=route)
predictions.directions.append(direction)
continue
for message_elem in predictions_elem.findall("message"):
predictions.messages.add(message_elem.get("text"))
for direction_elem in predictions_elem.findall("direction"):
direction = Direction()
direction.route = route
direction.title = direction_elem.get("title")
predictions.directions.append(direction)
for prediction_elem in direction_elem.findall("prediction"):
prediction = Prediction()
prediction.direction = direction
prediction.seconds = int(prediction_elem.get("seconds"))
prediction.minutes = int(prediction_elem.get("minutes"))
prediction.epoch_time = int(prediction_elem.get("epochTime"))
prediction.block = prediction_elem.get("block")
if prediction_elem.get("isDeparture") == "true":
prediction.is_departing = True
else:
prediction.is_departing = False
# For some reason NextBus returns the direction tag on
# each individual prediction rather than on the direction element.
direction.tag = prediction_elem.get("dirTag")
predictions.predictions.append(prediction)
predictions.predictions.sort(lambda a,b : int(a.epoch_time - b.epoch_time))
return predictions
@memoize_in_cache("all_vehicles", 30)
def get_all_vehicle_locations(agency_tag):
etree = fetch_nextbus_url("vehicleLocations", agency_tag, ('t', 0))
return map(lambda elem : Vehicle.from_elem(elem), etree.findall("vehicle"))
@memoize_in_cache("route_vehicles", 30)
def get_vehicle_locations_on_route(agency_tag, route_tag):
etree = fetch_nextbus_url("vehicleLocations", agency_tag, ('r', route_tag), ('t', 0))
return map(lambda elem : Vehicle.from_elem(elem), etree.findall("vehicle"))
def _standard_repr(self):
return "%s(%s)" % (self.__class__.__name__, self.__dict__)
class Agency:
tag = None
title = None
region_title = None
__repr__ = _standard_repr
__init__ = _autoinit()
@classmethod
def from_elem(cls, elem):
ret = cls()
ret.tag = elem.get("tag")
ret.title = elem.get("title")
ret.region_title = elem.get("regionTitle")
return ret
class Route:
tag = None
title = None
__repr__ = _standard_repr
__init__ = _autoinit()
@classmethod
def from_elem(cls, elem):
ret = cls()
ret.tag = elem.get("tag")
ret.title = elem.get("title")
return ret
class RouteConfig:
route = None
color = None
opposite_color = None
stops_dict = None
directions_dict = None
__repr__ = _standard_repr
@_autoinit
def __init__(self):
if self.stops_dict is None:
self.stops_dict = {}
if self.directions_dict is None:
self.directions_dict = {}
@classmethod
def from_elem(cls, elem):
self = cls()
self.route = Route.from_elem(elem)
self.color = elem.get("color")
self.opposite_color = elem.get("oppositeColor")
# We want to return the dict keyed on stop_id,
# but in order to build the directions we
# need to key on tag too. Also, some agencies
# don't have unique stop IDs, so they use stop tags instead.
self.stops_by_tag = {}
# For agencies that don't have unique Stop IDs, we need to
# have a list of all stops that is guaranteed to have them all
self.stops = []
for stop_elem in elem.findall("stop"):
stop = StopOnRoute.from_elem(stop_elem)
self.stops.append(stop)
self.stops_by_tag[stop.tag] = stop
self.stops_dict[stop.stop_id] = stop
for direction_elem in elem.findall("direction"):
direction = DirectionOnRoute()
direction.tag = direction_elem.get("tag")
direction.title = direction_elem.get("title")
direction.name = direction_elem.get("name")
if direction_elem.get("useForUI") == "true":
direction.use_for_ui = True
else:
direction.use_for_ui = False
self.directions_dict[direction.tag] = direction
for stop_elem in direction_elem.findall("stop"):
tag = stop_elem.get("tag")
try:
stop = self.stops_by_tag[tag]
direction.stops.append(stop)
except KeyError:
# For some reason sometimes NextBus
# references stops that it hasn't
# told us about. Not much we can do.
pass
return self
# stops is now just a list element, so we can be sure we get them all
# see above comment
#@property
#def stops(self):
# return self.stops_dict.values()
@property
def directions(self):
return self.directions_dict.values()
def has_stop_id(stop_id):
return stop_id in self.stops_dict
class Stop:
tag = None
title = None
latitude = None
longitude = None
stop_id = None
__repr__ = _standard_repr
__init__ = _autoinit()
@classmethod
def from_elem(cls, elem):
self = cls()
self.tag = elem.get("tag")
self.title = elem.get("title")
self.latitude = float(elem.get("lat"))
self.longitude = float(elem.get("lon"))
self.stop_id = elem.get("stopId")
return self
class StopOnRoute(Stop):
direction_tag = None
@classmethod
def from_elem(cls, elem):
stop = Stop.from_elem(elem)
self = StopOnRoute(**stop.__dict__)
self.direction_tag = elem.get("dirTag")
return self
class TaglessDirection:
"""
A direction that only has a display title and lacks a tag.
In the prediction output when a particular direction has no predictions
NextBus returns only the name of the direction and not its tag,
so this really stupid class is used to represent that situation.
"""
title = None
route = None
__repr__ = _standard_repr
__init__ = _autoinit()
class Direction(TaglessDirection):
tag = None
class DirectionOnRoute(Direction):
use_for_ui = None
stops = None
name = None
@_autoinit
def __init__(self):
if self.stops is None:
self.stops = []
class Predictions:
directions = None
messages = None
predictions = None
stop_title = None
__repr__ = _standard_repr
@_autoinit
def __init__(self):
if self.messages is None:
self.messages = set()
if self.directions is None:
self.directions = []
if self.predictions is None:
self.predictions = []
class Prediction:
direction = None
minutes = None
seconds = None
epoch_time = None
is_departing = None
block = None
__repr__ = _standard_repr
__init__ = _autoinit()
class Vehicle:
id = None
route_tag = None
direction_tag = None
latitude = None
longitude = None
seconds_since_report = None
predictable = None
heading = None
leading_vehicle_id = None
__repr__ = _standard_repr
__init__ = _autoinit()
@classmethod
def from_elem(cls, elem):
self = cls()
self.id = elem.get("id")
self.route_tag = elem.get("routeTag")
self.direction_tag = elem.get("dirTag")
self.latitude = float(elem.get("lat"))
self.longitude = float(elem.get("lon"))
self.seconds_since_report = int(elem.get("secsSinceReport"))
self.heading = float(elem.get("heading"))
self.leading_vehicle_id = elem.get("leadingVehicleId")
self.predictable = (elem.get("predictable") == "true")
if self.route_tag == "null":
self.route_tag = None
if self.direction_tag == "null":
self.direction_tag = None
return self
|
|
#!/usr/bin/env python
# Copyright FuseSoC contributors
# Licensed under the 2-Clause BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-2-Clause
import argparse
import os
import shutil
import signal
import subprocess
import sys
import warnings
from fusesoc import __version__
# Check if this is run from a local installation
fusesocdir = os.path.abspath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")
)
if os.path.exists(os.path.join(fusesocdir, "fusesoc")):
sys.path[0:0] = [fusesocdir]
import logging
from edalize import get_edatool
from fusesoc.config import Config
from fusesoc.coremanager import CoreManager, DependencyError
from fusesoc.edalizer import Edalizer
from fusesoc.librarymanager import Library
from fusesoc.utils import Launcher, setup_logging, yaml_fread
from fusesoc.vlnv import Vlnv
logger = logging.getLogger(__name__)
def _get_core(cm, name):
core = None
try:
core = cm.get_core(Vlnv(name))
except RuntimeError as e:
logger.error(str(e))
exit(1)
except DependencyError as e:
logger.error(
f"{name!r} or any of its dependencies requires {e.value!r}, but "
"this core was not found"
)
exit(1)
return core
def abort_handler(signal, frame):
print("")
logger.info("****************************")
logger.info("**** FuseSoC aborted ****")
logger.info("****************************")
print("")
sys.exit(0)
signal.signal(signal.SIGINT, abort_handler)
def pgm(cm, args):
warnings.warn(
"The 'pgm' subcommand has been removed. "
"Use 'fusesoc run --target=synth --run' instead."
)
def fetch(cm, args):
core = _get_core(cm, args.core)
try:
core.setup()
except RuntimeError as e:
logger.error("Failed to fetch '{}': {}".format(core.name, str(e)))
exit(1)
def init(cm, args):
warnings.warn(
"The 'init' subcommand to fetch the FuseSoC standard library has been "
"removed. Use 'fusesoc library add fusesoc_cores "
"https://github.com/fusesoc/fusesoc-cores' instead."
)
def list_paths(cm, args):
cores_root = [x.location for x in cm.get_libraries()]
print("\n".join(cores_root))
def add_library(cm, args):
sync_uri = vars(args)["sync-uri"]
if args.location:
location = args.location
elif vars(args).get("global", False):
location = os.path.join(cm._lm.library_root, args.name)
else:
location = os.path.join("fusesoc_libraries", args.name)
if "sync-type" in vars(args):
sync_type = vars(args)["sync-type"]
else:
sync_type = None
# Check if it's a dir. Otherwise fall back to git repo
if not sync_type:
if os.path.isdir(sync_uri):
sync_type = "local"
else:
sync_type = "git"
if sync_type == "local":
logger.info(
"Interpreting sync-uri '{}' as location for local provider.".format(
sync_uri
)
)
location = os.path.abspath(sync_uri)
auto_sync = not args.no_auto_sync
library = Library(args.name, location, sync_type, sync_uri, auto_sync)
if args.config:
config = Config(file=args.config)
elif vars(args)["global"]:
xdg_config_home = os.environ.get("XDG_CONFIG_HOME") or os.path.join(
os.path.expanduser("~"), ".config"
)
config_file = os.path.join(xdg_config_home, "fusesoc", "fusesoc.conf")
config = Config(path=config_file)
else:
config = Config(path="fusesoc.conf")
try:
config.add_library(library)
except RuntimeError as e:
logger.error("`add library` failed: " + str(e))
exit(1)
def library_list(cm, args):
lengths = [4, 8, 9, 8, 9]
for lib in cm.get_libraries():
lengths[0] = max(lengths[0], len(lib.name))
lengths[1] = max(lengths[1], len(lib.location))
lengths[2] = max(lengths[2], len(lib.sync_type))
lengths[3] = max(lengths[3], len(lib.sync_uri or ""))
print(
"{} : {} : {} : {} : {}".format(
"Name".ljust(lengths[0]),
"Location".ljust(lengths[1]),
"Sync type".ljust(lengths[2]),
"Sync URI".ljust(lengths[3]),
"Auto sync".ljust(lengths[4]),
)
)
for lib in cm.get_libraries():
print(
"{} : {} : {} : {} : {}".format(
lib.name.ljust(lengths[0]),
lib.location.ljust(lengths[1]),
lib.sync_type.ljust(lengths[2]),
(lib.sync_uri or "N/A").ljust(lengths[3]),
("y" if lib.auto_sync else "n").ljust(lengths[4]),
)
)
def list_cores(cm, args):
cores = cm.get_cores()
print("\nAvailable cores:\n")
if not cores:
cores_root = cm.get_libraries()
if cores_root:
logger.error("No cores found in any library")
else:
logger.error("No libraries registered")
exit(1)
maxlen = max(map(len, cores.keys()))
print("Core".ljust(maxlen) + " Cache status Description")
print("=" * 80)
for name in sorted(cores.keys()):
core = cores[name]
print(
name.ljust(maxlen)
+ " : "
+ core.cache_status().rjust(10)
+ " : "
+ (core.description or "<No description>")
)
def gen_list(cm, args):
cores = cm.get_generators()
if not cores:
print("\nNo available generators\n")
else:
print("\nAvailable generators:\n")
maxlen = max(map(len, cores.keys()))
print("Core".ljust(maxlen) + " Generator")
print("=" * (maxlen + 12))
for core in sorted(cores.keys()):
for generator_name, generator_data in cores[core].items():
print(
"{} : {} : {}".format(
core.ljust(maxlen),
generator_name,
generator_data.description or "<No description>",
)
)
def gen_show(cm, args):
cores = cm.get_generators()
for core in sorted(cores.keys()):
for generator_name, generator_data in cores[core].items():
if generator_name == args.generator:
print(
"""
Core : {}
Generator : {}
Description : {}
Usage :
{}""".format(
core,
generator_name,
generator_data.description or "<No description>",
generator_data.usage or "",
)
)
def core_info(cm, args):
core = _get_core(cm, args.core)
print(core.info())
def run(cm, args):
stages = (args.setup, args.build, args.run)
# Always run setup if build is true
args.setup |= args.build
# Run all stages by default if no stage flags are set
if stages == (False, False, False):
do_configure = True
do_build = True
do_run = True
elif stages == (True, False, True):
logger.error("Configure and run without build is invalid")
exit(1)
else:
do_configure = args.setup
do_build = args.build
do_run = args.run
flags = {"target": args.target or "default"}
if args.tool:
flags["tool"] = args.tool
for flag in args.flag:
if flag[0] == "+":
flags[flag[1:]] = True
elif flag[0] == "-":
flags[flag[1:]] = False
else:
flags[flag] = True
run_backend(
cm,
not args.no_export,
do_configure,
do_build,
do_run,
flags,
args.system_name,
args.system,
args.backendargs,
args.build_root,
args.verbose,
)
# Clean out old work root
def prepare_work_root(work_root):
if os.path.exists(work_root):
for f in os.listdir(work_root):
if os.path.isdir(os.path.join(work_root, f)):
shutil.rmtree(os.path.join(work_root, f))
else:
os.remove(os.path.join(work_root, f))
else:
os.makedirs(work_root)
def run_backend(
cm,
export,
do_configure,
do_build,
do_run,
flags,
system_name,
system,
backendargs,
build_root_arg,
verbose,
):
tool_error = (
"No tool was supplied on command line or found in '{}' core description"
)
core = _get_core(cm, system)
target = flags["target"]
try:
flags = dict(core.get_flags(target), **flags)
except SyntaxError as e:
logger.error(str(e))
exit(1)
tool = flags["tool"]
if not tool:
logger.error(tool_error.format(system))
exit(1)
build_root = build_root_arg or os.path.join(
cm.config.build_root, core.name.sanitized_name
)
logger.debug(f"Setting build_root to {build_root}")
if export:
export_root = os.path.join(build_root, "src")
else:
export_root = None
try:
work_root = os.path.join(build_root, f"{target}-{tool}")
except SyntaxError as e:
logger.error(e.msg)
exit(1)
edam_file = os.path.join(work_root, core.name.sanitized_name + ".eda.yml")
if not os.path.exists(edam_file):
do_configure = True
try:
backend_class = get_edatool(tool)
except ImportError:
logger.error(f"Backend {tool!r} not found")
exit(1)
edalizer = Edalizer(
toplevel=core.name,
flags=flags,
core_manager=cm,
work_root=work_root,
export_root=export_root,
system_name=system_name,
)
if do_configure:
try:
prepare_work_root(work_root)
edam = edalizer.run()
parsed_args = edalizer.parse_args(backend_class, backendargs, edam)
edalizer.add_parsed_args(backend_class, parsed_args)
except SyntaxError as e:
logger.error(e.msg)
exit(1)
except RuntimeError as e:
logger.error("Setup failed : {}".format(str(e)))
exit(1)
edalizer.to_yaml(edam_file)
else:
edam = yaml_fread(edam_file)
parsed_args = edalizer.parse_args(backend_class, backendargs, edam)
# Frontend/backend separation
try:
backend = backend_class(edam=edam, work_root=work_root, verbose=verbose)
except RuntimeError as e:
logger.error(str(e))
exit(1)
except FileNotFoundError as e:
logger.error(f'Could not find EDA API file "{e.filename}"')
exit(1)
if do_configure:
try:
backend.configure([])
print("")
except RuntimeError as e:
logger.error("Failed to configure the system")
logger.error(str(e))
exit(1)
if do_build:
try:
backend.build()
except RuntimeError as e:
logger.error("Failed to build {} : {}".format(str(core.name), str(e)))
exit(1)
if do_run:
try:
backend.run(parsed_args)
except RuntimeError as e:
logger.error("Failed to run {} : {}".format(str(core.name), str(e)))
exit(1)
def update(cm, args):
if "warn" in args:
logger.warning(args.warn)
cm._lm.update(args.libraries)
def init_logging(verbose, monochrome, log_file=None):
level = logging.DEBUG if verbose else logging.INFO
setup_logging(level, monochrome, log_file)
if verbose:
logger.debug("Verbose output")
else:
logger.debug("Concise output")
if monochrome:
logger.debug("Monochrome output")
else:
logger.debug("Colorful output")
def init_coremanager(config, args_cores_root):
logger.debug("Initializing core manager")
cm = CoreManager(config)
args_libs = [Library(acr, acr) for acr in args_cores_root]
# Add libraries from config file, env var and command-line
for library in config.libraries + args_libs:
try:
cm.add_library(library)
except (RuntimeError, OSError) as e:
_s = "Failed to register library '{}'"
logger.warning(_s.format(str(e)))
return cm
def get_parser():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
# Global actions
parser.add_argument(
"--version",
help="Display the FuseSoC version",
action="version",
version=__version__,
)
# Global options
parser.add_argument(
"--cores-root",
help="Add additional directories containing cores",
default=[],
action="append",
)
parser.add_argument(
"--config", help="Specify the config file to use", type=argparse.FileType("r")
)
parser.add_argument(
"--monochrome",
help="Don't use color for messages",
action="store_true",
default=not sys.stdout.isatty(),
)
parser.add_argument("--verbose", help="More info messages", action="store_true")
parser.add_argument("--log-file", help="Write log messages to file")
# init subparser
parser_init = subparsers.add_parser(
"init", help="Initialize the FuseSoC core libraries. DEPRECATED"
)
parser_init.add_argument(
"-y", action="store_true", help="Skip user input and use default settings"
)
parser_init.set_defaults(func=init)
# pgm subparser
parser_pgm = subparsers.add_parser(
"pgm",
help="Program an FPGA with a system configuration. DEPRECATED, use 'run' instead.",
)
parser_pgm.add_argument("system")
parser_pgm.add_argument("backendargs", nargs=argparse.REMAINDER)
parser_pgm.set_defaults(func=pgm)
# fetch subparser
parser_fetch = subparsers.add_parser(
"fetch", help="Fetch a remote core and its dependencies to local cache"
)
parser_fetch.add_argument("core")
parser_fetch.set_defaults(func=fetch)
# core subparser
parser_core = subparsers.add_parser(
"core", help="Subcommands for dealing with cores"
)
core_subparsers = parser_core.add_subparsers()
parser_core.set_defaults(subparser=parser_core)
# core list subparser
parser_core_list = core_subparsers.add_parser("list", help="List available cores")
parser_core_list.set_defaults(func=list_cores)
# core show subparser
parser_core_show = core_subparsers.add_parser(
"show", help="Show information about a core"
)
parser_core_show.add_argument("core", help="Name of the core to show")
parser_core_show.set_defaults(func=core_info)
# list-cores subparser
parser_list_cores = subparsers.add_parser("list-cores", help="List available cores")
parser_list_cores.set_defaults(func=list_cores)
# core-info subparser
parser_core_info = subparsers.add_parser(
"core-info", help="Display details about a core"
)
parser_core_info.add_argument("core")
parser_core_info.set_defaults(func=core_info)
# gen subparser
parser_gen = subparsers.add_parser(
"gen", help="Run or show information about generators"
)
parser_gen.set_defaults(subparser=parser_gen)
gen_subparsers = parser_gen.add_subparsers()
# gen list subparser
parser_gen_list = gen_subparsers.add_parser(
"list", help="List available generators"
)
parser_gen_list.set_defaults(func=gen_list)
# gen show subparser
parser_gen_show = gen_subparsers.add_parser(
"show", help="Show information about a generator"
)
parser_gen_show.add_argument("generator", help="Name of the generator to show")
parser_gen_show.set_defaults(func=gen_show)
# list-paths subparser
parser_list_paths = subparsers.add_parser(
"list-paths", help="Display the search order for core root paths"
)
parser_list_paths.set_defaults(func=list_paths)
# library subparser
parser_library = subparsers.add_parser(
"library", help="Subcommands for dealing with library management"
)
library_subparsers = parser_library.add_subparsers()
parser_library.set_defaults(subparser=parser_library)
# library add subparser
parser_library_add = library_subparsers.add_parser(
"add", help="Add new library to fusesoc.conf"
)
parser_library_add.add_argument("name", help="A friendly name for the library")
parser_library_add.add_argument(
"sync-uri", help="The URI source for the library (can be a file system path)"
)
parser_library_add.add_argument(
"--location",
help="The location to store the library into (defaults to $XDG_DATA_HOME/[name])",
)
parser_library_add.add_argument(
"--sync-type",
help="The provider type for the library. Defaults to 'git'.",
choices=["git", "local"],
)
parser_library_add.add_argument(
"--no-auto-sync",
action="store_true",
help="Disable automatic updates of the library",
)
parser_library_add.add_argument(
"--global",
action="store_true",
help="Use the global FuseSoC config file in $XDG_CONFIG_HOME/fusesoc/fusesoc.conf",
)
parser_library_add.set_defaults(func=add_library)
# library list subparser
parser_library_list = library_subparsers.add_parser(
"list", help="List core libraries"
)
parser_library_list.set_defaults(func=library_list)
# library update subparser
parser_library_update = library_subparsers.add_parser(
"update", help="Update the FuseSoC core libraries"
)
parser_library_update.add_argument(
"libraries", nargs="*", help="The libraries to update (defaults to all)"
)
parser_library_update.set_defaults(func=update)
# run subparser
parser_run = subparsers.add_parser("run", help="Start a tool flow")
parser_run.add_argument(
"--no-export",
action="store_true",
help="Reference source files from their current location instead of exporting to a build tree",
)
parser_run.add_argument(
"--build-root", help="Output directory for build. Defaults to build/$VLNV"
)
parser_run.add_argument("--setup", action="store_true", help="Execute setup stage")
parser_run.add_argument("--build", action="store_true", help="Execute build stage")
parser_run.add_argument("--run", action="store_true", help="Execute run stage")
parser_run.add_argument("--target", help="Override default target")
parser_run.add_argument("--tool", help="Override default tool for target")
parser_run.add_argument(
"--flag",
help="Set custom use flags. Can be specified multiple times",
action="append",
default=[],
)
parser_run.add_argument(
"--system-name", help="Override default VLNV name for system"
)
parser_run.add_argument("system", help="Select a system to operate on")
parser_run.add_argument(
"backendargs", nargs=argparse.REMAINDER, help="arguments to be sent to backend"
)
parser_run.set_defaults(func=run)
# update subparser
parser_update = subparsers.add_parser(
"update", help="Update the FuseSoC core libraries"
)
parser_update.add_argument(
"libraries",
nargs="*",
help="The libraries (or core roots) to update (defaults to all)",
)
parser_update.set_defaults(func=update)
parser_update.set_defaults(
warn="'fusesoc update' is deprecated. Use 'fusesoc library update' instead"
)
return parser
def parse_args(argv):
parser = get_parser()
args = parser.parse_args(argv)
if hasattr(args, "func"):
return args
if hasattr(args, "subparser"):
args.subparser.print_help()
else:
parser.print_help()
return None
def fusesoc(args):
init_logging(args.verbose, args.monochrome, args.log_file)
config = Config(file=args.config)
cm = init_coremanager(config, args.cores_root)
# Run the function
args.func(cm, args)
def main():
args = parse_args(sys.argv[1:])
if not args:
exit(0)
logger.debug("Command line arguments: " + str(sys.argv))
fusesoc(args)
if __name__ == "__main__":
main()
|
|
<<<<<<< HEAD
<<<<<<< HEAD
"""Tests for distutils.dir_util."""
import unittest
import os
import stat
import sys
from unittest.mock import patch
from distutils import dir_util, errors
from distutils.dir_util import (mkpath, remove_tree, create_tree, copy_tree,
ensure_relative)
from distutils import log
from distutils.tests import support
from test.support import run_unittest
class DirUtilTestCase(support.TempdirManager, unittest.TestCase):
def _log(self, msg, *args):
if len(args) > 0:
self._logs.append(msg % args)
else:
self._logs.append(msg)
def setUp(self):
super(DirUtilTestCase, self).setUp()
self._logs = []
tmp_dir = self.mkdtemp()
self.root_target = os.path.join(tmp_dir, 'deep')
self.target = os.path.join(self.root_target, 'here')
self.target2 = os.path.join(tmp_dir, 'deep2')
self.old_log = log.info
log.info = self._log
def tearDown(self):
log.info = self.old_log
super(DirUtilTestCase, self).tearDown()
def test_mkpath_remove_tree_verbosity(self):
mkpath(self.target, verbose=0)
wanted = []
self.assertEqual(self._logs, wanted)
remove_tree(self.root_target, verbose=0)
mkpath(self.target, verbose=1)
wanted = ['creating %s' % self.root_target,
'creating %s' % self.target]
self.assertEqual(self._logs, wanted)
self._logs = []
remove_tree(self.root_target, verbose=1)
wanted = ["removing '%s' (and everything under it)" % self.root_target]
self.assertEqual(self._logs, wanted)
@unittest.skipIf(sys.platform.startswith('win'),
"This test is only appropriate for POSIX-like systems.")
def test_mkpath_with_custom_mode(self):
# Get and set the current umask value for testing mode bits.
umask = os.umask(0o002)
os.umask(umask)
mkpath(self.target, 0o700)
self.assertEqual(
stat.S_IMODE(os.stat(self.target).st_mode), 0o700 & ~umask)
mkpath(self.target2, 0o555)
self.assertEqual(
stat.S_IMODE(os.stat(self.target2).st_mode), 0o555 & ~umask)
def test_create_tree_verbosity(self):
create_tree(self.root_target, ['one', 'two', 'three'], verbose=0)
self.assertEqual(self._logs, [])
remove_tree(self.root_target, verbose=0)
wanted = ['creating %s' % self.root_target]
create_tree(self.root_target, ['one', 'two', 'three'], verbose=1)
self.assertEqual(self._logs, wanted)
remove_tree(self.root_target, verbose=0)
def test_copy_tree_verbosity(self):
mkpath(self.target, verbose=0)
copy_tree(self.target, self.target2, verbose=0)
self.assertEqual(self._logs, [])
remove_tree(self.root_target, verbose=0)
mkpath(self.target, verbose=0)
a_file = os.path.join(self.target, 'ok.txt')
with open(a_file, 'w') as f:
f.write('some content')
wanted = ['copying %s -> %s' % (a_file, self.target2)]
copy_tree(self.target, self.target2, verbose=1)
self.assertEqual(self._logs, wanted)
remove_tree(self.root_target, verbose=0)
remove_tree(self.target2, verbose=0)
def test_copy_tree_skips_nfs_temp_files(self):
mkpath(self.target, verbose=0)
a_file = os.path.join(self.target, 'ok.txt')
nfs_file = os.path.join(self.target, '.nfs123abc')
for f in a_file, nfs_file:
with open(f, 'w') as fh:
fh.write('some content')
copy_tree(self.target, self.target2)
self.assertEqual(os.listdir(self.target2), ['ok.txt'])
remove_tree(self.root_target, verbose=0)
remove_tree(self.target2, verbose=0)
def test_ensure_relative(self):
if os.sep == '/':
self.assertEqual(ensure_relative('/home/foo'), 'home/foo')
self.assertEqual(ensure_relative('some/path'), 'some/path')
else: # \\
self.assertEqual(ensure_relative('c:\\home\\foo'), 'c:home\\foo')
self.assertEqual(ensure_relative('home\\foo'), 'home\\foo')
def test_copy_tree_exception_in_listdir(self):
"""
An exception in listdir should raise a DistutilsFileError
"""
with patch("os.listdir", side_effect=OSError()), \
self.assertRaises(errors.DistutilsFileError):
src = self.tempdirs[-1]
dir_util.copy_tree(src, None)
def test_suite():
return unittest.makeSuite(DirUtilTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
=======
"""Tests for distutils.dir_util."""
import unittest
import os
import stat
import sys
from unittest.mock import patch
from distutils import dir_util, errors
from distutils.dir_util import (mkpath, remove_tree, create_tree, copy_tree,
ensure_relative)
from distutils import log
from distutils.tests import support
from test.support import run_unittest
class DirUtilTestCase(support.TempdirManager, unittest.TestCase):
def _log(self, msg, *args):
if len(args) > 0:
self._logs.append(msg % args)
else:
self._logs.append(msg)
def setUp(self):
super(DirUtilTestCase, self).setUp()
self._logs = []
tmp_dir = self.mkdtemp()
self.root_target = os.path.join(tmp_dir, 'deep')
self.target = os.path.join(self.root_target, 'here')
self.target2 = os.path.join(tmp_dir, 'deep2')
self.old_log = log.info
log.info = self._log
def tearDown(self):
log.info = self.old_log
super(DirUtilTestCase, self).tearDown()
def test_mkpath_remove_tree_verbosity(self):
mkpath(self.target, verbose=0)
wanted = []
self.assertEqual(self._logs, wanted)
remove_tree(self.root_target, verbose=0)
mkpath(self.target, verbose=1)
wanted = ['creating %s' % self.root_target,
'creating %s' % self.target]
self.assertEqual(self._logs, wanted)
self._logs = []
remove_tree(self.root_target, verbose=1)
wanted = ["removing '%s' (and everything under it)" % self.root_target]
self.assertEqual(self._logs, wanted)
@unittest.skipIf(sys.platform.startswith('win'),
"This test is only appropriate for POSIX-like systems.")
def test_mkpath_with_custom_mode(self):
# Get and set the current umask value for testing mode bits.
umask = os.umask(0o002)
os.umask(umask)
mkpath(self.target, 0o700)
self.assertEqual(
stat.S_IMODE(os.stat(self.target).st_mode), 0o700 & ~umask)
mkpath(self.target2, 0o555)
self.assertEqual(
stat.S_IMODE(os.stat(self.target2).st_mode), 0o555 & ~umask)
def test_create_tree_verbosity(self):
create_tree(self.root_target, ['one', 'two', 'three'], verbose=0)
self.assertEqual(self._logs, [])
remove_tree(self.root_target, verbose=0)
wanted = ['creating %s' % self.root_target]
create_tree(self.root_target, ['one', 'two', 'three'], verbose=1)
self.assertEqual(self._logs, wanted)
remove_tree(self.root_target, verbose=0)
def test_copy_tree_verbosity(self):
mkpath(self.target, verbose=0)
copy_tree(self.target, self.target2, verbose=0)
self.assertEqual(self._logs, [])
remove_tree(self.root_target, verbose=0)
mkpath(self.target, verbose=0)
a_file = os.path.join(self.target, 'ok.txt')
with open(a_file, 'w') as f:
f.write('some content')
wanted = ['copying %s -> %s' % (a_file, self.target2)]
copy_tree(self.target, self.target2, verbose=1)
self.assertEqual(self._logs, wanted)
remove_tree(self.root_target, verbose=0)
remove_tree(self.target2, verbose=0)
def test_copy_tree_skips_nfs_temp_files(self):
mkpath(self.target, verbose=0)
a_file = os.path.join(self.target, 'ok.txt')
nfs_file = os.path.join(self.target, '.nfs123abc')
for f in a_file, nfs_file:
with open(f, 'w') as fh:
fh.write('some content')
copy_tree(self.target, self.target2)
self.assertEqual(os.listdir(self.target2), ['ok.txt'])
remove_tree(self.root_target, verbose=0)
remove_tree(self.target2, verbose=0)
def test_ensure_relative(self):
if os.sep == '/':
self.assertEqual(ensure_relative('/home/foo'), 'home/foo')
self.assertEqual(ensure_relative('some/path'), 'some/path')
else: # \\
self.assertEqual(ensure_relative('c:\\home\\foo'), 'c:home\\foo')
self.assertEqual(ensure_relative('home\\foo'), 'home\\foo')
def test_copy_tree_exception_in_listdir(self):
"""
An exception in listdir should raise a DistutilsFileError
"""
with patch("os.listdir", side_effect=OSError()), \
self.assertRaises(errors.DistutilsFileError):
src = self.tempdirs[-1]
dir_util.copy_tree(src, None)
def test_suite():
return unittest.makeSuite(DirUtilTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""Tests for distutils.dir_util."""
import unittest
import os
import stat
import sys
from unittest.mock import patch
from distutils import dir_util, errors
from distutils.dir_util import (mkpath, remove_tree, create_tree, copy_tree,
ensure_relative)
from distutils import log
from distutils.tests import support
from test.support import run_unittest
class DirUtilTestCase(support.TempdirManager, unittest.TestCase):
def _log(self, msg, *args):
if len(args) > 0:
self._logs.append(msg % args)
else:
self._logs.append(msg)
def setUp(self):
super(DirUtilTestCase, self).setUp()
self._logs = []
tmp_dir = self.mkdtemp()
self.root_target = os.path.join(tmp_dir, 'deep')
self.target = os.path.join(self.root_target, 'here')
self.target2 = os.path.join(tmp_dir, 'deep2')
self.old_log = log.info
log.info = self._log
def tearDown(self):
log.info = self.old_log
super(DirUtilTestCase, self).tearDown()
def test_mkpath_remove_tree_verbosity(self):
mkpath(self.target, verbose=0)
wanted = []
self.assertEqual(self._logs, wanted)
remove_tree(self.root_target, verbose=0)
mkpath(self.target, verbose=1)
wanted = ['creating %s' % self.root_target,
'creating %s' % self.target]
self.assertEqual(self._logs, wanted)
self._logs = []
remove_tree(self.root_target, verbose=1)
wanted = ["removing '%s' (and everything under it)" % self.root_target]
self.assertEqual(self._logs, wanted)
@unittest.skipIf(sys.platform.startswith('win'),
"This test is only appropriate for POSIX-like systems.")
def test_mkpath_with_custom_mode(self):
# Get and set the current umask value for testing mode bits.
umask = os.umask(0o002)
os.umask(umask)
mkpath(self.target, 0o700)
self.assertEqual(
stat.S_IMODE(os.stat(self.target).st_mode), 0o700 & ~umask)
mkpath(self.target2, 0o555)
self.assertEqual(
stat.S_IMODE(os.stat(self.target2).st_mode), 0o555 & ~umask)
def test_create_tree_verbosity(self):
create_tree(self.root_target, ['one', 'two', 'three'], verbose=0)
self.assertEqual(self._logs, [])
remove_tree(self.root_target, verbose=0)
wanted = ['creating %s' % self.root_target]
create_tree(self.root_target, ['one', 'two', 'three'], verbose=1)
self.assertEqual(self._logs, wanted)
remove_tree(self.root_target, verbose=0)
def test_copy_tree_verbosity(self):
mkpath(self.target, verbose=0)
copy_tree(self.target, self.target2, verbose=0)
self.assertEqual(self._logs, [])
remove_tree(self.root_target, verbose=0)
mkpath(self.target, verbose=0)
a_file = os.path.join(self.target, 'ok.txt')
with open(a_file, 'w') as f:
f.write('some content')
wanted = ['copying %s -> %s' % (a_file, self.target2)]
copy_tree(self.target, self.target2, verbose=1)
self.assertEqual(self._logs, wanted)
remove_tree(self.root_target, verbose=0)
remove_tree(self.target2, verbose=0)
def test_copy_tree_skips_nfs_temp_files(self):
mkpath(self.target, verbose=0)
a_file = os.path.join(self.target, 'ok.txt')
nfs_file = os.path.join(self.target, '.nfs123abc')
for f in a_file, nfs_file:
with open(f, 'w') as fh:
fh.write('some content')
copy_tree(self.target, self.target2)
self.assertEqual(os.listdir(self.target2), ['ok.txt'])
remove_tree(self.root_target, verbose=0)
remove_tree(self.target2, verbose=0)
def test_ensure_relative(self):
if os.sep == '/':
self.assertEqual(ensure_relative('/home/foo'), 'home/foo')
self.assertEqual(ensure_relative('some/path'), 'some/path')
else: # \\
self.assertEqual(ensure_relative('c:\\home\\foo'), 'c:home\\foo')
self.assertEqual(ensure_relative('home\\foo'), 'home\\foo')
def test_copy_tree_exception_in_listdir(self):
"""
An exception in listdir should raise a DistutilsFileError
"""
with patch("os.listdir", side_effect=OSError()), \
self.assertRaises(errors.DistutilsFileError):
src = self.tempdirs[-1]
dir_util.copy_tree(src, None)
def test_suite():
return unittest.makeSuite(DirUtilTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2011 Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import StringIO
import sys
import fixtures
import mock
from nova.cmd import manage
from nova import context
from nova import db
from nova import exception
from nova.i18n import _
from nova import test
from nova.tests.db import fakes as db_fakes
from nova.tests.objects import test_network
class FixedIpCommandsTestCase(test.TestCase):
def setUp(self):
super(FixedIpCommandsTestCase, self).setUp()
db_fakes.stub_out_db_network_api(self.stubs)
self.commands = manage.FixedIpCommands()
def test_reserve(self):
self.commands.reserve('192.168.0.100')
address = db.fixed_ip_get_by_address(context.get_admin_context(),
'192.168.0.100')
self.assertEqual(address['reserved'], True)
def test_reserve_nonexistent_address(self):
self.assertEqual(2, self.commands.reserve('55.55.55.55'))
def test_unreserve(self):
self.commands.unreserve('192.168.0.100')
address = db.fixed_ip_get_by_address(context.get_admin_context(),
'192.168.0.100')
self.assertEqual(address['reserved'], False)
def test_unreserve_nonexistent_address(self):
self.assertEqual(2, self.commands.unreserve('55.55.55.55'))
def test_list(self):
self.useFixture(fixtures.MonkeyPatch('sys.stdout',
StringIO.StringIO()))
self.commands.list()
self.assertNotEqual(1, sys.stdout.getvalue().find('192.168.0.100'))
def test_list_just_one_host(self):
def fake_fixed_ip_get_by_host(*args, **kwargs):
return [db_fakes.fixed_ip_fields]
self.useFixture(fixtures.MonkeyPatch(
'nova.db.fixed_ip_get_by_host',
fake_fixed_ip_get_by_host))
self.useFixture(fixtures.MonkeyPatch('sys.stdout',
StringIO.StringIO()))
self.commands.list('banana')
self.assertNotEqual(1, sys.stdout.getvalue().find('192.168.0.100'))
class FloatingIpCommandsTestCase(test.TestCase):
def setUp(self):
super(FloatingIpCommandsTestCase, self).setUp()
db_fakes.stub_out_db_network_api(self.stubs)
self.commands = manage.FloatingIpCommands()
def test_address_to_hosts(self):
def assert_loop(result, expected):
for ip in result:
self.assertIn(str(ip), expected)
address_to_hosts = self.commands.address_to_hosts
# /32 and /31
self.assertRaises(exception.InvalidInput, address_to_hosts,
'192.168.100.1/32')
self.assertRaises(exception.InvalidInput, address_to_hosts,
'192.168.100.1/31')
# /30
expected = ["192.168.100.%s" % i for i in range(1, 3)]
result = address_to_hosts('192.168.100.0/30')
self.assertEqual(2, len(list(result)))
assert_loop(result, expected)
# /29
expected = ["192.168.100.%s" % i for i in range(1, 7)]
result = address_to_hosts('192.168.100.0/29')
self.assertEqual(6, len(list(result)))
assert_loop(result, expected)
# /28
expected = ["192.168.100.%s" % i for i in range(1, 15)]
result = address_to_hosts('192.168.100.0/28')
self.assertEqual(14, len(list(result)))
assert_loop(result, expected)
# /16
result = address_to_hosts('192.168.100.0/16')
self.assertEqual(65534, len(list(result)))
# NOTE(dripton): I don't test /13 because it makes the test take 3s.
# /12 gives over a million IPs, which is ridiculous.
self.assertRaises(exception.InvalidInput, address_to_hosts,
'192.168.100.1/12')
class NetworkCommandsTestCase(test.TestCase):
def setUp(self):
super(NetworkCommandsTestCase, self).setUp()
self.commands = manage.NetworkCommands()
self.net = {'id': 0,
'label': 'fake',
'injected': False,
'cidr': '192.168.0.0/24',
'cidr_v6': 'dead:beef::/64',
'multi_host': False,
'gateway_v6': 'dead:beef::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '8.8.8.8',
'dns2': '8.8.4.4',
'vlan': 200,
'vlan_start': 201,
'vpn_public_address': '10.0.0.2',
'vpn_public_port': '2222',
'vpn_private_address': '192.168.0.2',
'dhcp_start': '192.168.0.3',
'project_id': 'fake_project',
'host': 'fake_host',
'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}
def fake_network_get_by_cidr(context, cidr):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(cidr, self.fake_net['cidr'])
return db_fakes.FakeModel(dict(test_network.fake_network,
**self.fake_net))
def fake_network_get_by_uuid(context, uuid):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(uuid, self.fake_net['uuid'])
return db_fakes.FakeModel(dict(test_network.fake_network,
**self.fake_net))
def fake_network_update(context, network_id, values):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.assertEqual(values, self.fake_update_value)
self.fake_network_get_by_cidr = fake_network_get_by_cidr
self.fake_network_get_by_uuid = fake_network_get_by_uuid
self.fake_network_update = fake_network_update
def test_create(self):
def fake_create_networks(obj, context, **kwargs):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(kwargs['label'], 'Test')
self.assertEqual(kwargs['cidr'], '10.2.0.0/24')
self.assertEqual(kwargs['multi_host'], False)
self.assertEqual(kwargs['num_networks'], 1)
self.assertEqual(kwargs['network_size'], 256)
self.assertEqual(kwargs['vlan'], 200)
self.assertEqual(kwargs['vlan_start'], 201)
self.assertEqual(kwargs['vpn_start'], 2000)
self.assertEqual(kwargs['cidr_v6'], 'fd00:2::/120')
self.assertEqual(kwargs['gateway'], '10.2.0.1')
self.assertEqual(kwargs['gateway_v6'], 'fd00:2::22')
self.assertEqual(kwargs['bridge'], 'br200')
self.assertEqual(kwargs['bridge_interface'], 'eth0')
self.assertEqual(kwargs['dns1'], '8.8.8.8')
self.assertEqual(kwargs['dns2'], '8.8.4.4')
self.flags(network_manager='nova.network.manager.VlanManager')
from nova.network import manager as net_manager
self.stubs.Set(net_manager.VlanManager, 'create_networks',
fake_create_networks)
self.commands.create(
label='Test',
cidr='10.2.0.0/24',
num_networks=1,
network_size=256,
multi_host='F',
vlan=200,
vlan_start=201,
vpn_start=2000,
cidr_v6='fd00:2::/120',
gateway='10.2.0.1',
gateway_v6='fd00:2::22',
bridge='br200',
bridge_interface='eth0',
dns1='8.8.8.8',
dns2='8.8.4.4',
uuid='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
def test_list(self):
def fake_network_get_all(context):
return [db_fakes.FakeModel(self.net)]
self.stubs.Set(db, 'network_get_all', fake_network_get_all)
output = StringIO.StringIO()
sys.stdout = output
self.commands.list()
sys.stdout = sys.__stdout__
result = output.getvalue()
_fmt = "\t".join(["%(id)-5s", "%(cidr)-18s", "%(cidr_v6)-15s",
"%(dhcp_start)-15s", "%(dns1)-15s", "%(dns2)-15s",
"%(vlan)-15s", "%(project_id)-15s", "%(uuid)-15s"])
head = _fmt % {'id': _('id'),
'cidr': _('IPv4'),
'cidr_v6': _('IPv6'),
'dhcp_start': _('start address'),
'dns1': _('DNS1'),
'dns2': _('DNS2'),
'vlan': _('VlanID'),
'project_id': _('project'),
'uuid': _("uuid")}
body = _fmt % {'id': self.net['id'],
'cidr': self.net['cidr'],
'cidr_v6': self.net['cidr_v6'],
'dhcp_start': self.net['dhcp_start'],
'dns1': self.net['dns1'],
'dns2': self.net['dns2'],
'vlan': self.net['vlan'],
'project_id': self.net['project_id'],
'uuid': self.net['uuid']}
answer = '%s\n%s\n' % (head, body)
self.assertEqual(result, answer)
def test_delete(self):
self.fake_net = self.net
self.fake_net['project_id'] = None
self.fake_net['host'] = None
self.stubs.Set(db, 'network_get_by_uuid',
self.fake_network_get_by_uuid)
def fake_network_delete_safe(context, network_id):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
self.commands.delete(uuid=self.fake_net['uuid'])
def test_delete_by_cidr(self):
self.fake_net = self.net
self.fake_net['project_id'] = None
self.fake_net['host'] = None
self.stubs.Set(db, 'network_get_by_cidr',
self.fake_network_get_by_cidr)
def fake_network_delete_safe(context, network_id):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
self.commands.delete(fixed_range=self.fake_net['cidr'])
def _test_modify_base(self, update_value, project, host, dis_project=None,
dis_host=None):
self.fake_net = self.net
self.fake_update_value = update_value
self.stubs.Set(db, 'network_get_by_cidr',
self.fake_network_get_by_cidr)
self.stubs.Set(db, 'network_update', self.fake_network_update)
self.commands.modify(self.fake_net['cidr'], project=project, host=host,
dis_project=dis_project, dis_host=dis_host)
def test_modify_associate(self):
self._test_modify_base(update_value={'project_id': 'test_project',
'host': 'test_host'},
project='test_project', host='test_host')
def test_modify_unchanged(self):
self._test_modify_base(update_value={}, project=None, host=None)
def test_modify_disassociate(self):
self._test_modify_base(update_value={'project_id': None, 'host': None},
project=None, host=None, dis_project=True,
dis_host=True)
class NeutronV2NetworkCommandsTestCase(test.TestCase):
def setUp(self):
super(NeutronV2NetworkCommandsTestCase, self).setUp()
self.flags(network_api_class='nova.network.neutronv2.api.API')
self.commands = manage.NetworkCommands()
def test_create(self):
self.assertEqual(2, self.commands.create())
def test_list(self):
self.assertEqual(2, self.commands.list())
def test_delete(self):
self.assertEqual(2, self.commands.delete())
def test_modify(self):
self.assertEqual(2, self.commands.modify('192.168.0.1'))
class ProjectCommandsTestCase(test.TestCase):
def setUp(self):
super(ProjectCommandsTestCase, self).setUp()
self.commands = manage.ProjectCommands()
def test_quota(self):
output = StringIO.StringIO()
sys.stdout = output
self.commands.quota(project_id='admin',
key='instances',
value='unlimited',
)
sys.stdout = sys.__stdout__
result = output.getvalue()
print_format = "%-36s %-10s" % ('instances', 'unlimited')
self.assertEqual((print_format in result), True)
def test_quota_update_invalid_key(self):
self.assertEqual(2, self.commands.quota('admin', 'volumes1', '10'))
class DBCommandsTestCase(test.TestCase):
def setUp(self):
super(DBCommandsTestCase, self).setUp()
self.commands = manage.DbCommands()
def test_archive_deleted_rows_negative(self):
self.assertEqual(1, self.commands.archive_deleted_rows(-1))
class ServiceCommandsTestCase(test.TestCase):
def setUp(self):
super(ServiceCommandsTestCase, self).setUp()
self.commands = manage.ServiceCommands()
def test_service_enable_invalid_params(self):
self.assertEqual(2, self.commands.enable('nohost', 'noservice'))
def test_service_disable_invalid_params(self):
self.assertEqual(2, self.commands.disable('nohost', 'noservice'))
class CellCommandsTestCase(test.TestCase):
def setUp(self):
super(CellCommandsTestCase, self).setUp()
self.commands = manage.CellCommands()
def test_create_transport_hosts_multiple(self):
"""Test the _create_transport_hosts method
when broker_hosts is set.
"""
brokers = "127.0.0.1:5672,127.0.0.2:5671"
thosts = self.commands._create_transport_hosts(
'guest', 'devstack',
broker_hosts=brokers)
self.assertEqual(2, len(thosts))
self.assertEqual('127.0.0.1', thosts[0].hostname)
self.assertEqual(5672, thosts[0].port)
self.assertEqual('127.0.0.2', thosts[1].hostname)
self.assertEqual(5671, thosts[1].port)
def test_create_transport_hosts_single(self):
"""Test the _create_transport_hosts method when hostname is passed."""
thosts = self.commands._create_transport_hosts('guest', 'devstack',
hostname='127.0.0.1',
port=80)
self.assertEqual(1, len(thosts))
self.assertEqual('127.0.0.1', thosts[0].hostname)
self.assertEqual(80, thosts[0].port)
def test_create_transport_hosts_single_broker(self):
"""Test the _create_transport_hosts method for single broker_hosts."""
thosts = self.commands._create_transport_hosts(
'guest', 'devstack',
broker_hosts='127.0.0.1:5672')
self.assertEqual(1, len(thosts))
self.assertEqual('127.0.0.1', thosts[0].hostname)
self.assertEqual(5672, thosts[0].port)
def test_create_transport_hosts_both(self):
"""Test the _create_transport_hosts method when both broker_hosts
and hostname/port are passed.
"""
thosts = self.commands._create_transport_hosts(
'guest', 'devstack',
broker_hosts='127.0.0.1:5672',
hostname='127.0.0.2', port=80)
self.assertEqual(1, len(thosts))
self.assertEqual('127.0.0.1', thosts[0].hostname)
self.assertEqual(5672, thosts[0].port)
def test_create_transport_hosts_wrong_val(self):
"""Test the _create_transport_hosts method when broker_hosts
is wrongly sepcified
"""
self.assertRaises(ValueError,
self.commands._create_transport_hosts,
'guest', 'devstack',
broker_hosts='127.0.0.1:5672,127.0.0.1')
def test_create_transport_hosts_wrong_port_val(self):
"""Test the _create_transport_hosts method when port in
broker_hosts is wrongly sepcified
"""
self.assertRaises(ValueError,
self.commands._create_transport_hosts,
'guest', 'devstack',
broker_hosts='127.0.0.1:')
def test_create_transport_hosts_wrong_port_arg(self):
"""Test the _create_transport_hosts method when port
argument is wrongly sepcified
"""
self.assertRaises(ValueError,
self.commands._create_transport_hosts,
'guest', 'devstack',
hostname='127.0.0.1', port='ab')
@mock.patch.object(context, 'get_admin_context')
@mock.patch.object(db, 'cell_create')
def test_create_broker_hosts(self, mock_db_cell_create, mock_ctxt):
"""Test the create function when broker_hosts is
passed
"""
cell_tp_url = "fake://guest:devstack@127.0.0.1:5432"
cell_tp_url += ",guest:devstack@127.0.0.2:9999/"
ctxt = mock.sentinel
mock_ctxt.return_value = mock.sentinel
self.commands.create("test",
broker_hosts='127.0.0.1:5432,127.0.0.2:9999',
woffset=0, wscale=0,
username="guest", password="devstack")
exp_values = {'name': "test",
'is_parent': False,
'transport_url': cell_tp_url,
'weight_offset': 0.0,
'weight_scale': 0.0}
mock_db_cell_create.assert_called_once_with(ctxt, exp_values)
@mock.patch.object(context, 'get_admin_context')
@mock.patch.object(db, 'cell_create')
def test_create_hostname(self, mock_db_cell_create, mock_ctxt):
"""Test the create function when hostname and port is
passed
"""
cell_tp_url = "fake://guest:devstack@127.0.0.1:9999/"
ctxt = mock.sentinel
mock_ctxt.return_value = mock.sentinel
self.commands.create("test",
hostname='127.0.0.1', port="9999",
woffset=0, wscale=0,
username="guest", password="devstack")
exp_values = {'name': "test",
'is_parent': False,
'transport_url': cell_tp_url,
'weight_offset': 0.0,
'weight_scale': 0.0}
mock_db_cell_create.assert_called_once_with(ctxt, exp_values)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import os
import re
from oslo.config import cfg
from nova.image import glance
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import images
from nova.virt.powervm import command
from nova.virt.powervm import common
from nova.virt.powervm import constants
from nova.virt.powervm import exception
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class PowerVMDiskAdapter(object):
"""PowerVM disk adapter interface
Provides a contract to implement multiple ways to generate
and attach volumes to virtual machines using local and/or
external storage
"""
def create_volume(self, size):
"""Creates a volume with a minimum size
:param size: size of the volume in bytes
:returns: string -- the name of the disk device.
"""
pass
def delete_volume(self, volume_info):
"""Removes the disk and its associated vSCSI connection
:param volume_info: dictionary with volume info including name of
disk device in /dev/
"""
pass
def create_volume_from_image(self, context, instance, image_id):
"""Creates a Volume and copies the specified image to it
:param context: nova context used to retrieve image from glance
:param instance: instance to create the volume for
:param image_id: image_id reference used to locate image in glance
:returns: dictionary with the name of the created
disk device in 'device_name' key
"""
pass
def create_image_from_volume(self, device_name, context,
image_id, image_meta):
"""Capture the contents of a volume and upload to glance
:param device_name: device in /dev/ to capture
:param context: nova context for operation
:param image_id: image reference to pre-created image in glance
:param image_meta: metadata for new image
"""
pass
def migrate_volume(self, lv_name, src_host, dest, image_path,
instance_name=None):
"""Copy a logical volume to file, compress, and transfer
:param lv_name: volume device name
:param src_host: source IP or DNS name.
:param dest: destination IP or DNS name
:param image_path: path to remote image storage directory
:param instance_name: name of instance that is being migrated
:returns: file path on destination of image file that was moved
"""
pass
def attach_volume_to_host(self, *args, **kargs):
"""
Attaches volume to host using info passed in *args and **kargs
"""
pass
def detach_volume_from_host(self, *args, **kargs):
"""
Detaches volume from host using info passed in *args and **kargs
"""
pass
class PowerVMLocalVolumeAdapter(PowerVMDiskAdapter):
"""Default block device providor for PowerVM
This disk adapter uses logical volumes on the hosting VIOS
to provide backing block devices for instances/LPARs
"""
def __init__(self, connection):
super(PowerVMLocalVolumeAdapter, self).__init__()
self.command = command.IVMCommand()
self._connection = None
self.connection_data = connection
def _set_connection(self):
if self._connection is None:
self._connection = common.ssh_connect(self.connection_data)
def create_volume(self, size):
"""Creates a logical volume with a minimum size
:param size: size of the logical volume in bytes
:returns: string -- the name of the new logical volume.
:raises: PowerVMNoSpaceLeftOnVolumeGroup
"""
return self._create_logical_volume(size)
def delete_volume(self, volume_info):
"""Removes the Logical Volume and its associated vSCSI connection
:param volume_info: Dictionary with volume info including name of
Logical Volume device in /dev/ via device_name key
"""
disk_name = volume_info["device_name"]
LOG.debug(_("Removing the logical volume '%s'") % disk_name)
self._remove_logical_volume(disk_name)
def create_volume_from_image(self, context, instance, image_id):
"""Creates a Logical Volume and copies the specified image to it
:param context: nova context used to retrieve image from glance
:param instance: instance to create the volume for
:param image_id: image_id reference used to locate image in glance
:returns: dictionary with the name of the created
Logical Volume device in 'device_name' key
"""
file_name = '.'.join([image_id, 'gz'])
file_path = os.path.join(CONF.powervm_img_local_path,
file_name)
if not os.path.isfile(file_path):
LOG.debug(_("Fetching image '%s' from glance") % image_id)
images.fetch(context, image_id, file_path,
instance['user_id'],
instance['project_id'])
else:
LOG.debug((_("Using image found at '%s'") % file_path))
LOG.debug(_("Ensuring image '%s' exists on IVM") % file_path)
remote_path = CONF.powervm_img_remote_path
remote_file_name, size = self._copy_image_file(file_path, remote_path)
# calculate root device size in bytes
# we respect the minimum root device size in constants
size_gb = max(instance['instance_type']['root_gb'],
constants.POWERVM_MIN_ROOT_GB)
size = size_gb * 1024 * 1024 * 1024
try:
LOG.debug(_("Creating logical volume of size %s bytes") % size)
disk_name = self._create_logical_volume(size)
LOG.debug(_("Copying image to the device '%s'") % disk_name)
self._copy_file_to_device(remote_file_name, disk_name)
except Exception:
LOG.error(_("Error while creating logical volume from image. "
"Will attempt cleanup."))
# attempt cleanup of logical volume before re-raising exception
with excutils.save_and_reraise_exception():
try:
self.delete_volume(disk_name)
except Exception:
msg = _('Error while attempting cleanup of failed '
'deploy to logical volume.')
LOG.exception(msg)
return {'device_name': disk_name}
def create_image_from_volume(self, device_name, context,
image_id, image_meta):
"""Capture the contents of a volume and upload to glance
:param device_name: device in /dev/ to capture
:param context: nova context for operation
:param image_id: image reference to pre-created image in glance
:param image_meta: metadata for new image
"""
# do the disk copy
dest_file_path = common.aix_path_join(CONF.powervm_img_remote_path,
image_id)
self._copy_device_to_file(device_name, dest_file_path)
# compress and copy the file back to the nova-compute host
snapshot_file_path = self._copy_image_file_from_host(
dest_file_path, CONF.powervm_img_local_path,
compress=True)
# get glance service
glance_service, image_id = glance.get_remote_image_service(
context, image_id)
# upload snapshot file to glance
with open(snapshot_file_path, 'r') as img_file:
glance_service.update(context,
image_id,
image_meta,
img_file)
LOG.debug(_("Snapshot added to glance."))
# clean up local image file
try:
os.remove(snapshot_file_path)
except OSError as ose:
LOG.warn(_("Failed to clean up snapshot file "
"%(snapshot_file_path)s") % locals())
def migrate_volume(self, lv_name, src_host, dest, image_path,
instance_name=None):
"""Copy a logical volume to file, compress, and transfer
:param lv_name: logical volume device name
:param dest: destination IP or DNS name
:param image_path: path to remote image storage directory
:param instance_name: name of instance that is being migrated
:returns: file path on destination of image file that was moved
"""
if instance_name:
file_name = ''.join([instance_name, '_rsz'])
else:
file_name = ''.join([lv_name, '_rsz'])
file_path = os.path.join(image_path, file_name)
self._copy_device_to_file(lv_name, file_path)
cmds = 'gzip %s' % file_path
self.run_vios_command_as_root(cmds)
file_path = file_path + '.gz'
# If destination is not same host
# transfer file to destination VIOS system
if (src_host != dest):
with common.vios_to_vios_auth(self.connection_data.host,
dest,
self.connection_data) as key_name:
cmd = ''.join(['scp -o "StrictHostKeyChecking no"',
('-i %s' % key_name),
file_path,
'%s@%s:%s' % (self.connection_data.username,
dest,
image_path)
])
# do the remote copy
self.run_vios_command(cmd)
# cleanup local file only if transferring to remote system
# otherwise keep the file to boot from locally and clean up later
cleanup_cmd = 'rm %s' % file_path
self.run_vios_command_as_root(cleanup_cmd)
return file_path
def attach_volume_to_host(self, *args, **kargs):
pass
def detach_volume_from_host(self, *args, **kargs):
pass
def _create_logical_volume(self, size):
"""Creates a logical volume with a minimum size.
:param size: size of the logical volume in bytes
:returns: string -- the name of the new logical volume.
:raises: PowerVMNoSpaceLeftOnVolumeGroup
"""
vgs = self.run_vios_command(self.command.lsvg())
cmd = self.command.lsvg('%s -field vgname freepps -fmt :' %
' '.join(vgs))
output = self.run_vios_command(cmd)
found_vg = None
# If it's not a multiple of 1MB we get the next
# multiple and use it as the megabyte_size.
megabyte = 1024 * 1024
if (size % megabyte) != 0:
megabyte_size = int(size / megabyte) + 1
else:
megabyte_size = size / megabyte
# Search for a volume group with enough free space for
# the new logical volume.
for vg in output:
# Returned output example: 'rootvg:396 (25344 megabytes)'
match = re.search(r'^(\w+):\d+\s\((\d+).+$', vg)
if match is None:
continue
vg_name, avail_size = match.groups()
if megabyte_size <= int(avail_size):
found_vg = vg_name
break
if not found_vg:
LOG.error(_('Could not create logical volume. '
'No space left on any volume group.'))
raise exception.PowerVMNoSpaceLeftOnVolumeGroup()
cmd = self.command.mklv('%s %sB' % (found_vg, size / 512))
lv_name = self.run_vios_command(cmd)[0]
return lv_name
def _remove_logical_volume(self, lv_name):
"""Removes the lv and the connection between its associated vscsi.
:param lv_name: a logical volume name
"""
cmd = self.command.rmvdev('-vdev %s -rmlv' % lv_name)
self.run_vios_command(cmd)
def _copy_file_to_device(self, source_path, device, decompress=True):
"""Copy file to device.
:param source_path: path to input source file
:param device: output device name
:param decompress: if True (default) the file will be decompressed
on the fly while being copied to the drive
"""
if decompress:
cmd = ('gunzip -c %s | dd of=/dev/%s bs=1024k' %
(source_path, device))
else:
cmd = 'dd if=%s of=/dev/%s bs=1024k' % (source_path, device)
self.run_vios_command_as_root(cmd)
def _copy_device_to_file(self, device_name, file_path):
"""Copy a device to a file using dd
:param device_name: device name to copy from
:param file_path: output file path
"""
cmd = 'dd if=/dev/%s of=%s bs=1024k' % (device_name, file_path)
self.run_vios_command_as_root(cmd)
def _md5sum_remote_file(self, remote_path):
# AIX6/VIOS cannot md5sum files with sizes greater than ~2GB
cmd = ("perl -MDigest::MD5 -e 'my $file = \"%s\"; open(FILE, $file); "
"binmode(FILE); "
"print Digest::MD5->new->addfile(*FILE)->hexdigest, "
"\" $file\n\";'" % remote_path)
output = self.run_vios_command_as_root(cmd)
return output[0]
def _copy_image_file(self, source_path, remote_path, decompress=False):
"""Copy file to VIOS, decompress it, and return its new size and name.
:param source_path: source file path
:param remote_path remote file path
:param decompress: if True, decompressess the file after copying;
if False (default), just copies the file
"""
# Calculate source image checksum
hasher = hashlib.md5()
block_size = 0x10000
img_file = file(source_path, 'r')
buf = img_file.read(block_size)
while len(buf) > 0:
hasher.update(buf)
buf = img_file.read(block_size)
source_cksum = hasher.hexdigest()
comp_path = os.path.join(remote_path, os.path.basename(source_path))
uncomp_path = comp_path.rstrip(".gz")
if not decompress:
final_path = comp_path
else:
final_path = uncomp_path
# Check whether the image is already on IVM
output = self.run_vios_command("ls %s" % final_path,
check_exit_code=False)
# If the image does not exist already
if not output:
# Copy file to IVM
common.ftp_put_command(self.connection_data, source_path,
remote_path)
# Verify image file checksums match
output = self._md5sum_remote_file(final_path)
if not output:
LOG.error(_("Unable to get checksum"))
raise exception.PowerVMFileTransferFailed()
if source_cksum != output.split(' ')[0]:
LOG.error(_("Image checksums do not match"))
raise exception.PowerVMFileTransferFailed()
if decompress:
# Unzip the image
cmd = "/usr/bin/gunzip %s" % comp_path
output = self.run_vios_command_as_root(cmd)
# Remove existing image file
cmd = "/usr/bin/rm -f %s.*" % uncomp_path
output = self.run_vios_command_as_root(cmd)
# Rename unzipped image
cmd = "/usr/bin/mv %s %s" % (uncomp_path, final_path)
output = self.run_vios_command_as_root(cmd)
# Remove compressed image file
cmd = "/usr/bin/rm -f %s" % comp_path
output = self.run_vios_command_as_root(cmd)
else:
LOG.debug(_("Image found on host at '%s'") % final_path)
# Calculate file size in multiples of 512 bytes
output = self.run_vios_command("ls -o %s|awk '{print $4}'" %
final_path, check_exit_code=False)
if output:
size = int(output[0])
else:
LOG.error(_("Uncompressed image file not found"))
raise exception.PowerVMFileTransferFailed()
if (size % 512 != 0):
size = (int(size / 512) + 1) * 512
return final_path, size
def _copy_image_file_from_host(self, remote_source_path, local_dest_dir,
compress=False):
"""
Copy a file from IVM to the nova-compute host,
and return the location of the copy
:param remote_source_path remote source file path
:param local_dest_dir local destination directory
:param compress: if True, compress the file before transfer;
if False (default), copy the file as is
"""
temp_str = common.aix_path_join(local_dest_dir,
os.path.basename(remote_source_path))
local_file_path = temp_str + '.gz'
if compress:
copy_from_path = remote_source_path + '.gz'
else:
copy_from_path = remote_source_path
if compress:
# Gzip the file
cmd = "/usr/bin/gzip %s" % remote_source_path
self.run_vios_command_as_root(cmd)
# Cleanup uncompressed remote file
cmd = "/usr/bin/rm -f %s" % remote_source_path
self.run_vios_command_as_root(cmd)
# Get file checksum
output = self._md5sum_remote_file(copy_from_path)
if not output:
LOG.error(_("Unable to get checksum"))
msg_args = {'file_path': copy_from_path}
raise exception.PowerVMFileTransferFailed(**msg_args)
else:
source_chksum = output.split(' ')[0]
# Copy file to host
common.ftp_get_command(self.connection_data,
copy_from_path,
local_file_path)
# Calculate copied image checksum
with open(local_file_path, 'r') as image_file:
hasher = hashlib.md5()
block_size = 0x10000
buf = image_file.read(block_size)
while len(buf) > 0:
hasher.update(buf)
buf = image_file.read(block_size)
dest_chksum = hasher.hexdigest()
# do comparison
if source_chksum and dest_chksum != source_chksum:
LOG.error(_("Image checksums do not match"))
raise exception.PowerVMFileTransferFailed()
# Cleanup transferred remote file
cmd = "/usr/bin/rm -f %s" % copy_from_path
output = self.run_vios_command_as_root(cmd)
return local_file_path
def run_vios_command(self, cmd, check_exit_code=True):
"""Run a remote command using an active ssh connection.
:param command: String with the command to run.
"""
self._set_connection()
stdout, stderr = utils.ssh_execute(self._connection, cmd,
check_exit_code=check_exit_code)
return stdout.strip().splitlines()
def run_vios_command_as_root(self, command, check_exit_code=True):
"""Run a remote command as root using an active ssh connection.
:param command: List of commands.
"""
self._set_connection()
stdout, stderr = common.ssh_command_as_root(
self._connection, command, check_exit_code=check_exit_code)
return stdout.read().splitlines()
|
|
from sympy import sympify, Add, ImmutableMatrix as Matrix
from sympy.core.compatibility import u, unicode
from .printing import (VectorLatexPrinter, VectorPrettyPrinter,
VectorStrPrinter)
__all__ = ['Dyadic']
class Dyadic(object):
"""A Dyadic object.
See:
http://en.wikipedia.org/wiki/Dyadic_tensor
Kane, T., Levinson, D. Dynamics Theory and Applications. 1985 McGraw-Hill
A more powerful way to represent a rigid body's inertia. While it is more
complex, by choosing Dyadic components to be in body fixed basis vectors,
the resulting matrix is equivalent to the inertia tensor.
"""
def __init__(self, inlist):
"""
Just like Vector's init, you shouldn't call this unless creating a
zero dyadic.
zd = Dyadic(0)
Stores a Dyadic as a list of lists; the inner list has the measure
number and the two unit vectors; the outerlist holds each unique
unit vector pair.
"""
self.args = []
if inlist == 0:
inlist = []
while len(inlist) != 0:
added = 0
for i, v in enumerate(self.args):
if ((str(inlist[0][1]) == str(self.args[i][1])) and
(str(inlist[0][2]) == str(self.args[i][2]))):
self.args[i] = (self.args[i][0] + inlist[0][0],
inlist[0][1], inlist[0][2])
inlist.remove(inlist[0])
added = 1
break
if added != 1:
self.args.append(inlist[0])
inlist.remove(inlist[0])
i = 0
# This code is to remove empty parts from the list
while i < len(self.args):
if ((self.args[i][0] == 0) | (self.args[i][1] == 0) |
(self.args[i][2] == 0)):
self.args.remove(self.args[i])
i -= 1
i += 1
def __add__(self, other):
"""The add operator for Dyadic. """
other = _check_dyadic(other)
return Dyadic(self.args + other.args)
def __and__(self, other):
"""The inner product operator for a Dyadic and a Dyadic or Vector.
Parameters
==========
other : Dyadic or Vector
The other Dyadic or Vector to take the inner product with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer
>>> N = ReferenceFrame('N')
>>> D1 = outer(N.x, N.y)
>>> D2 = outer(N.y, N.y)
>>> D1.dot(D2)
(N.x|N.y)
>>> D1.dot(N.y)
N.x
"""
from sympy.physics.vector.vector import Vector, _check_vector
if isinstance(other, Dyadic):
other = _check_dyadic(other)
ol = Dyadic(0)
for i, v in enumerate(self.args):
for i2, v2 in enumerate(other.args):
ol += v[0] * v2[0] * (v[2] & v2[1]) * (v[1] | v2[2])
else:
other = _check_vector(other)
ol = Vector(0)
for i, v in enumerate(self.args):
ol += v[0] * v[1] * (v[2] & other)
return ol
def __div__(self, other):
"""Divides the Dyadic by a sympifyable expression. """
return self.__mul__(1 / other)
__truediv__ = __div__
def __eq__(self, other):
"""Tests for equality.
Is currently weak; needs stronger comparison testing
"""
if other == 0:
other = Dyadic(0)
other = _check_dyadic(other)
if (self.args == []) and (other.args == []):
return True
elif (self.args == []) or (other.args == []):
return False
return set(self.args) == set(other.args)
def __mul__(self, other):
"""Multiplies the Dyadic by a sympifyable expression.
Parameters
==========
other : Sympafiable
The scalar to multiply this Dyadic with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer
>>> N = ReferenceFrame('N')
>>> d = outer(N.x, N.x)
>>> 5 * d
5*(N.x|N.x)
"""
newlist = [v for v in self.args]
for i, v in enumerate(newlist):
newlist[i] = (sympify(other) * newlist[i][0], newlist[i][1],
newlist[i][2])
return Dyadic(newlist)
def __ne__(self, other):
return not self.__eq__(other)
def __neg__(self):
return self * -1
def _latex(self, printer=None):
ar = self.args # just to shorten things
if len(ar) == 0:
return str(0)
ol = [] # output list, to be concatenated to a string
mlp = VectorLatexPrinter()
for i, v in enumerate(ar):
# if the coef of the dyadic is 1, we skip the 1
if ar[i][0] == 1:
ol.append(' + ' + mlp.doprint(ar[i][1]) + r"\otimes " +
mlp.doprint(ar[i][2]))
# if the coef of the dyadic is -1, we skip the 1
elif ar[i][0] == -1:
ol.append(' - ' +
mlp.doprint(ar[i][1]) +
r"\otimes " +
mlp.doprint(ar[i][2]))
# If the coefficient of the dyadic is not 1 or -1,
# we might wrap it in parentheses, for readability.
elif ar[i][0] != 0:
arg_str = mlp.doprint(ar[i][0])
if isinstance(ar[i][0], Add):
arg_str = '(%s)' % arg_str
if arg_str.startswith('-'):
arg_str = arg_str[1:]
str_start = ' - '
else:
str_start = ' + '
ol.append(str_start + arg_str + mlp.doprint(ar[i][1]) +
r"\otimes " + mlp.doprint(ar[i][2]))
outstr = ''.join(ol)
if outstr.startswith(' + '):
outstr = outstr[3:]
elif outstr.startswith(' '):
outstr = outstr[1:]
return outstr
def _pretty(self, printer=None):
e = self
class Fake(object):
baseline = 0
def render(self, *args, **kwargs):
ar = e.args # just to shorten things
settings = printer._settings if printer else {}
if printer:
use_unicode = printer._use_unicode
else:
from sympy.printing.pretty.pretty_symbology import (
pretty_use_unicode)
use_unicode = pretty_use_unicode()
mpp = printer if printer else VectorPrettyPrinter(settings)
if len(ar) == 0:
return unicode(0)
bar = u("\N{CIRCLED TIMES}") if use_unicode else "|"
ol = [] # output list, to be concatenated to a string
for i, v in enumerate(ar):
# if the coef of the dyadic is 1, we skip the 1
if ar[i][0] == 1:
ol.extend([u(" + "),
mpp.doprint(ar[i][1]),
bar,
mpp.doprint(ar[i][2])])
# if the coef of the dyadic is -1, we skip the 1
elif ar[i][0] == -1:
ol.extend([u(" - "),
mpp.doprint(ar[i][1]),
bar,
mpp.doprint(ar[i][2])])
# If the coefficient of the dyadic is not 1 or -1,
# we might wrap it in parentheses, for readability.
elif ar[i][0] != 0:
if isinstance(ar[i][0], Add):
arg_str = mpp._print(
ar[i][0]).parens()[0]
else:
arg_str = mpp.doprint(ar[i][0])
if arg_str.startswith(u("-")):
arg_str = arg_str[1:]
str_start = u(" - ")
else:
str_start = u(" + ")
ol.extend([str_start, arg_str, u(" "),
mpp.doprint(ar[i][1]),
bar,
mpp.doprint(ar[i][2])])
outstr = u("").join(ol)
if outstr.startswith(u(" + ")):
outstr = outstr[3:]
elif outstr.startswith(" "):
outstr = outstr[1:]
return outstr
return Fake()
def __rand__(self, other):
"""The inner product operator for a Vector or Dyadic, and a Dyadic
This is for: Vector dot Dyadic
Parameters
==========
other : Vector
The vector we are dotting with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, dot, outer
>>> N = ReferenceFrame('N')
>>> d = outer(N.x, N.x)
>>> dot(N.x, d)
N.x
"""
from sympy.physics.vector.vector import Vector, _check_vector
other = _check_vector(other)
ol = Vector(0)
for i, v in enumerate(self.args):
ol += v[0] * v[2] * (v[1] & other)
return ol
def __rsub__(self, other):
return (-1 * self) + other
def __rxor__(self, other):
"""For a cross product in the form: Vector x Dyadic
Parameters
==========
other : Vector
The Vector that we are crossing this Dyadic with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer, cross
>>> N = ReferenceFrame('N')
>>> d = outer(N.x, N.x)
>>> cross(N.y, d)
- (N.z|N.x)
"""
from sympy.physics.vector.vector import _check_vector
other = _check_vector(other)
ol = Dyadic(0)
for i, v in enumerate(self.args):
ol += v[0] * ((other ^ v[1]) | v[2])
return ol
def __str__(self, printer=None):
"""Printing method. """
ar = self.args # just to shorten things
if len(ar) == 0:
return str(0)
ol = [] # output list, to be concatenated to a string
for i, v in enumerate(ar):
# if the coef of the dyadic is 1, we skip the 1
if ar[i][0] == 1:
ol.append(' + (' + str(ar[i][1]) + '|' + str(ar[i][2]) + ')')
# if the coef of the dyadic is -1, we skip the 1
elif ar[i][0] == -1:
ol.append(' - (' + str(ar[i][1]) + '|' + str(ar[i][2]) + ')')
# If the coefficient of the dyadic is not 1 or -1,
# we might wrap it in parentheses, for readability.
elif ar[i][0] != 0:
arg_str = VectorStrPrinter().doprint(ar[i][0])
if isinstance(ar[i][0], Add):
arg_str = "(%s)" % arg_str
if arg_str[0] == '-':
arg_str = arg_str[1:]
str_start = ' - '
else:
str_start = ' + '
ol.append(str_start + arg_str + '*(' + str(ar[i][1]) +
'|' + str(ar[i][2]) + ')')
outstr = ''.join(ol)
if outstr.startswith(' + '):
outstr = outstr[3:]
elif outstr.startswith(' '):
outstr = outstr[1:]
return outstr
def __sub__(self, other):
"""The subtraction operator. """
return self.__add__(other * -1)
def __xor__(self, other):
"""For a cross product in the form: Dyadic x Vector.
Parameters
==========
other : Vector
The Vector that we are crossing this Dyadic with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer, cross
>>> N = ReferenceFrame('N')
>>> d = outer(N.x, N.x)
>>> cross(d, N.y)
(N.x|N.z)
"""
from sympy.physics.vector.vector import _check_vector
other = _check_vector(other)
ol = Dyadic(0)
for i, v in enumerate(self.args):
ol += v[0] * (v[1] | (v[2] ^ other))
return ol
_sympystr = __str__
_sympyrepr = _sympystr
__repr__ = __str__
__radd__ = __add__
__rmul__ = __mul__
def express(self, frame1, frame2=None):
"""Expresses this Dyadic in alternate frame(s)
The first frame is the list side expression, the second frame is the
right side; if Dyadic is in form A.x|B.y, you can express it in two
different frames. If no second frame is given, the Dyadic is
expressed in only one frame.
Calls the global express function
Parameters
==========
frame1 : ReferenceFrame
The frame to express the left side of the Dyadic in
frame2 : ReferenceFrame
If provided, the frame to express the right side of the Dyadic in
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer, dynamicsymbols
>>> N = ReferenceFrame('N')
>>> q = dynamicsymbols('q')
>>> B = N.orientnew('B', 'Axis', [q, N.z])
>>> d = outer(N.x, N.x)
>>> d.express(B, N)
cos(q)*(B.x|N.x) - sin(q)*(B.y|N.x)
"""
from sympy.physics.vector.functions import express
return express(self, frame1, frame2)
def to_matrix(self, reference_frame, second_reference_frame=None):
"""Returns the matrix form of the dyadic with respect to one or two
reference frames.
Parameters
----------
reference_frame : ReferenceFrame
The reference frame that the rows and columns of the matrix
correspond to. If a second reference frame is provided, this
only corresponds to the rows of the matrix.
second_reference_frame : ReferenceFrame, optional, default=None
The reference frame that the columns of the matrix correspond
to.
Returns
-------
matrix : ImmutableMatrix, shape(3,3)
The matrix that gives the 2D tensor form.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> Vector.simp = True
>>> from sympy.physics.mechanics import inertia
>>> Ixx, Iyy, Izz, Ixy, Iyz, Ixz = symbols('Ixx, Iyy, Izz, Ixy, Iyz, Ixz')
>>> N = ReferenceFrame('N')
>>> inertia_dyadic = inertia(N, Ixx, Iyy, Izz, Ixy, Iyz, Ixz)
>>> inertia_dyadic.to_matrix(N)
Matrix([
[Ixx, Ixy, Ixz],
[Ixy, Iyy, Iyz],
[Ixz, Iyz, Izz]])
>>> beta = symbols('beta')
>>> A = N.orientnew('A', 'Axis', (beta, N.x))
>>> inertia_dyadic.to_matrix(A)
Matrix([
[ Ixx, Ixy*cos(beta) + Ixz*sin(beta), -Ixy*sin(beta) + Ixz*cos(beta)],
[ Ixy*cos(beta) + Ixz*sin(beta), Iyy*cos(2*beta)/2 + Iyy/2 + Iyz*sin(2*beta) - Izz*cos(2*beta)/2 + Izz/2, -Iyy*sin(2*beta)/2 + Iyz*cos(2*beta) + Izz*sin(2*beta)/2],
[-Ixy*sin(beta) + Ixz*cos(beta), -Iyy*sin(2*beta)/2 + Iyz*cos(2*beta) + Izz*sin(2*beta)/2, -Iyy*cos(2*beta)/2 + Iyy/2 - Iyz*sin(2*beta) + Izz*cos(2*beta)/2 + Izz/2]])
"""
if second_reference_frame is None:
second_reference_frame = reference_frame
return Matrix([i.dot(self).dot(j) for i in reference_frame for j in
second_reference_frame]).reshape(3, 3)
def doit(self, **hints):
"""Calls .doit() on each term in the Dyadic"""
return sum([Dyadic([(v[0].doit(**hints), v[1], v[2])])
for v in self.args], Dyadic(0))
def dt(self, frame):
"""Take the time derivative of this Dyadic in a frame.
This function calls the global time_derivative method
Parameters
==========
frame : ReferenceFrame
The frame to take the time derivative in
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer, dynamicsymbols
>>> N = ReferenceFrame('N')
>>> q = dynamicsymbols('q')
>>> B = N.orientnew('B', 'Axis', [q, N.z])
>>> d = outer(N.x, N.x)
>>> d.dt(B)
- q'*(N.y|N.x) - q'*(N.x|N.y)
"""
from sympy.physics.vector.functions import time_derivative
return time_derivative(self, frame)
def simplify(self):
"""Returns a simplified Dyadic."""
out = Dyadic(0)
for v in self.args:
out += Dyadic([(v[0].simplify(), v[1], v[2])])
return out
def subs(self, *args, **kwargs):
"""Substituion on the Dyadic.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> from sympy import Symbol
>>> N = ReferenceFrame('N')
>>> s = Symbol('s')
>>> a = s * (N.x|N.x)
>>> a.subs({s: 2})
2*(N.x|N.x)
"""
return sum([Dyadic([(v[0].subs(*args, **kwargs), v[1], v[2])])
for v in self.args], Dyadic(0))
def applyfunc(self, f):
"""Apply a function to each component of a Dyadic."""
if not callable(f):
raise TypeError("`f` must be callable.")
out = Dyadic(0)
for a, b, c in self.args:
out += f(a) * (b|c)
return out
dot = __and__
cross = __xor__
def _check_dyadic(other):
if not isinstance(other, Dyadic):
raise TypeError('A Dyadic must be supplied')
return other
|
|
# texttable - module for creating simple ASCII tables
# Copyright (C) 2003-2015 Gerome Fournier <jef(at)foutaise.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""module for creating simple ASCII tables
Example:
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([["Name", "Age", "Nickname"],
["Mr\\nXavier\\nHuon", 32, "Xav'"],
["Mr\\nBaptiste\\nClement", 1, "Baby"],
["Mme\\nLouise\\nBourgeau", 28, "Lou\\n\\nLoue"]])
print table.draw() + "\\n"
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print table.draw()
Result:
+----------+-----+----------+
| Name | Age | Nickname |
+==========+=====+==========+
| Mr | | |
| Xavier | 32 | |
| Huon | | Xav' |
+----------+-----+----------+
| Mr | | |
| Baptiste | 1 | |
| Clement | | Baby |
+----------+-----+----------+
| Mme | | Lou |
| Louise | 28 | |
| Bourgeau | | Loue |
+----------+-----+----------+
text float exp int auto
===========================================
abcd 67.000 6.540e+02 89 128.001
efgh 67.543 6.540e-01 90 1.280e+22
ijkl 0.000 5.000e-78 89 0.000
mnop 0.023 5.000e+78 92 1.280e+22
"""
from __future__ import division
__all__ = ["Texttable", "ArraySizeError"]
__author__ = 'Gerome Fournier <jef(at)foutaise.org>'
__license__ = 'LGPL'
__version__ = '0.8.8'
__credits__ = """\
Jeff Kowalczyk:
- textwrap improved import
- comment concerning header output
Anonymous:
- add_rows method, for adding rows in one go
Sergey Simonenko:
- redefined len() function to deal with non-ASCII characters
Roger Lew:
- columns datatype specifications
Brian Peterson:
- better handling of unicode errors
Frank Sachsenheim:
- add Python 2/3-compatibility
Maximilian Hils:
- fix minor bug for Python 3 compatibility
frinkelpi:
- preserve empty lines
"""
import sys
import string
import unicodedata
try:
if sys.version >= '2.3':
import textwrap
elif sys.version >= '2.2':
from optparse import textwrap
else:
from optik import textwrap
except ImportError:
sys.stderr.write("Can't import textwrap module!\n")
raise
if sys.version >= '2.7':
from functools import reduce
if sys.version >= '3.0':
unicode_type = str
bytes_type = bytes
else:
unicode_type = unicode
bytes_type = str
def obj2unicode(obj):
"""Return a unicode representation of a python object
"""
if isinstance(obj, unicode_type):
return obj
elif isinstance(obj, bytes_type):
try:
return unicode_type(obj, 'utf-8')
except UnicodeDecodeError as strerror:
sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (obj, strerror))
return unicode_type(obj, 'utf-8', 'replace')
else:
return unicode_type(obj)
def len(iterable):
"""Redefining len here so it will be able to work with non-ASCII characters
"""
if isinstance(iterable, bytes_type) or isinstance(iterable, unicode_type):
unicode_data = obj2unicode(iterable)
if hasattr(unicodedata, 'east_asian_width'):
w = unicodedata.east_asian_width
return sum([w(c) in 'WF' and 2 or 1 for c in unicode_data])
else:
return unicode_data.__len__()
else:
return iterable.__len__()
class ArraySizeError(Exception):
"""Exception raised when specified rows don't fit the required size
"""
def __init__(self, msg):
self.msg = msg
Exception.__init__(self, msg, '')
def __str__(self):
return self.msg
class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
if max_width <= 0:
max_width = False
self._max_width = max_width
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either "a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
try:
f = float(x)
except:
return obj2unicode(x)
n = self._precision
dtype = self._dtype[i]
if dtype == 'i':
return str(int(round(f)))
elif dtype == 'f':
return '%.*f' % (n, f)
elif dtype == 'e':
return '%.*e' % (n, f)
elif dtype == 't':
return obj2unicode(x)
else:
if f - round(f) == 0:
if abs(f) > 1e8:
return '%.*e' % (n, f)
else:
return str(int(round(f)))
else:
if abs(f) > 1e8:
return '%.*e' % (n, f)
else:
return '%.*f' % (n, f)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
items = len(maxi)
length = sum(maxi)
if self._max_width and length + items * 3 + 1 > self._max_width:
maxi = [
int(round(self._max_width / (length + items * 3 + 1) * n))
for n in maxi
]
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = "c"
if align == "r":
out += "%s " % (fill * space + cell_line)
elif align == "c":
out += "%s " % (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += "%s " % (cell_line + fill * space)
if length < len(line):
out += "%s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrap.wrap(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
if __name__ == '__main__':
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([["Name", "Age", "Nickname"],
["Mr\nXavier\nHuon", 32, "Xav'"],
["Mr\nBaptiste\nClement", 1, "Baby"],
["Mme\nLouise\nBourgeau", 28, "Lou\n \nLoue"]])
print(table.draw() + "\n")
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print(table.draw())
|
|
from u2fval import app, exc
from u2fval.model import db, Client
from .soft_u2f_v2 import SoftU2FDevice, CERT
from six.moves.urllib.parse import quote
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import Encoding
import unittest
import json
class RestApiTest(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
app.config['ALLOW_UNTRUSTED'] = True
db.session.close()
db.drop_all()
db.create_all()
db.session.add(Client('fooclient', 'https://example.com',
['https://example.com']))
db.session.commit()
self.app = app.test_client()
def test_call_without_client(self):
resp = self.app.get('/')
self.assertEqual(resp.status_code, 400)
err = json.loads(resp.data.decode('utf8'))
self.assertEqual(err['errorCode'], exc.BadInputException.code)
def test_call_with_invalid_client(self):
resp = self.app.get('/', environ_base={'REMOTE_USER': 'invalid'})
self.assertEqual(resp.status_code, 404)
err = json.loads(resp.data.decode('utf8'))
self.assertEqual(err['errorCode'], exc.BadInputException.code)
def test_get_trusted_facets(self):
resp = json.loads(
self.app.get('/', environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertIn('https://example.com', resp['trustedFacets'][0]['ids'])
def test_list_empty_devices(self):
resp = json.loads(
self.app.get('/foouser', environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(resp, [])
def test_begin_auth_without_devices(self):
resp = self.app.get('/foouser/sign',
environ_base={'REMOTE_USER': 'fooclient'})
self.assertEqual(resp.status_code, 400)
err = json.loads(resp.data.decode('utf8'))
self.assertEqual(err['errorCode'], exc.NoEligibleDevicesException.code)
def test_register(self):
device = SoftU2FDevice()
self.do_register(device, {'foo': 'bar'})
def test_sign(self):
device = SoftU2FDevice()
self.do_register(device, {'foo': 'bar', 'baz': 'one'})
descriptor = self.do_sign(device, {'baz': 'two'})
self.assertEqual(descriptor['properties'],
{'foo': 'bar', 'baz': 'two'})
def test_get_properties(self):
device = SoftU2FDevice()
descriptor = self.do_register(device, {'foo': 'bar', 'baz': 'foo'})
descriptor2 = json.loads(
self.app.get('/foouser/' + descriptor['handle'],
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(descriptor2['properties'],
{'foo': 'bar', 'baz': 'foo'})
def test_update_properties(self):
device = SoftU2FDevice()
desc = self.do_register(device,
{'foo': 'one', 'bar': 'one', 'baz': 'one'})
self.assertEqual({
'foo': 'one',
'bar': 'one',
'baz': 'one'
}, desc['properties'])
desc2 = json.loads(self.app.post(
'/foouser/' + desc['handle'],
environ_base={'REMOTE_USER': 'fooclient'},
data=json.dumps({'bar': 'two', 'baz': None})
).data.decode('utf8'))
self.assertEqual({
'foo': 'one',
'bar': 'two'
}, desc2['properties'])
desc3 = json.loads(self.app.get(
'/foouser/' + desc['handle'],
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(desc2['properties'], desc3['properties'])
def test_get_devices(self):
self.do_register(SoftU2FDevice())
self.do_register(SoftU2FDevice())
self.do_register(SoftU2FDevice())
resp = json.loads(
self.app.get('/foouser', environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(len(resp), 3)
def test_get_device_descriptor_and_cert(self):
desc = self.do_register(SoftU2FDevice())
desc2 = json.loads(
self.app.get('/foouser/' + desc['handle'],
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(desc, desc2)
cert = x509.load_pem_x509_certificate(self.app.get(
'/foouser/' + desc['handle'] + '/certificate',
environ_base={'REMOTE_USER': 'fooclient'}
).data, default_backend())
self.assertEqual(CERT, cert.public_bytes(Encoding.DER))
def test_get_invalid_device(self):
resp = self.app.get('/foouser/' + ('ab' * 16),
environ_base={'REMOTE_USER': 'fooclient'}
)
self.assertEqual(resp.status_code, 404)
self.do_register(SoftU2FDevice())
resp = self.app.get('/foouser/' + ('ab' * 16),
environ_base={'REMOTE_USER': 'fooclient'}
)
self.assertEqual(resp.status_code, 404)
resp = self.app.get('/foouser/InvalidHandle',
environ_base={'REMOTE_USER': 'fooclient'}
)
self.assertEqual(resp.status_code, 400)
def test_delete_user(self):
self.do_register(SoftU2FDevice())
self.do_register(SoftU2FDevice())
self.do_register(SoftU2FDevice())
self.app.delete('/foouser',
environ_base={'REMOTE_USER': 'fooclient'})
resp = json.loads(
self.app.get('/foouser', environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(resp, [])
def test_delete_devices(self):
d1 = self.do_register(SoftU2FDevice())
d2 = self.do_register(SoftU2FDevice())
d3 = self.do_register(SoftU2FDevice())
self.app.delete('/foouser/' + d2['handle'],
environ_base={'REMOTE_USER': 'fooclient'})
resp = json.loads(
self.app.get('/foouser',
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(len(resp), 2)
self.app.delete('/foouser/' + d1['handle'],
environ_base={'REMOTE_USER': 'fooclient'})
resp = json.loads(
self.app.get('/foouser',
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(len(resp), 1)
self.assertEqual(d3, resp[0])
self.app.delete('/foouser/' + d3['handle'],
environ_base={'REMOTE_USER': 'fooclient'})
resp = json.loads(
self.app.get('/foouser',
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(resp, [])
def test_set_properties_during_register(self):
device = SoftU2FDevice()
reg_req = json.loads(self.app.get(
'/foouser/register?properties=' + quote(json.dumps(
{'foo': 'one', 'bar': 'one'})),
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
reg_resp = device.register('https://example.com', reg_req['appId'],
reg_req['registerRequests'][0]).json
desc = json.loads(self.app.post(
'/foouser/register',
data=json.dumps({
'registerResponse': reg_resp,
'properties': {'baz': 'two', 'bar': 'two'}
}),
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual({'foo': 'one', 'bar': 'two', 'baz': 'two'},
desc['properties'])
def test_set_properties_during_sign(self):
device = SoftU2FDevice()
self.do_register(device, {'foo': 'one', 'bar': 'one', 'baz': 'one'})
aut_req = json.loads(self.app.get(
'/foouser/sign?properties=' + quote(json.dumps(
{'bar': 'two', 'boo': 'two'})),
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
aut_resp = device.getAssertion('https://example.com', aut_req['appId'],
aut_req['challenge'],
aut_req['registeredKeys'][0]).json
desc = json.loads(self.app.post(
'/foouser/sign',
data=json.dumps({
'signResponse': aut_resp,
'properties': {'baz': 'three', 'boo': None}
}),
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual({
'foo': 'one',
'bar': 'two',
'baz': 'three',
}, desc['properties'])
def test_register_and_sign_with_custom_challenge(self):
device = SoftU2FDevice()
reg_req = json.loads(self.app.get(
'/foouser/register?challenge=ThisIsAChallenge',
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(reg_req['registerRequests'][0]['challenge'],
'ThisIsAChallenge')
reg_resp = device.register('https://example.com', reg_req['appId'],
reg_req['registerRequests'][0]).json
desc1 = json.loads(self.app.post(
'/foouser/register',
data=json.dumps({
'registerResponse': reg_resp
}),
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
aut_req = json.loads(self.app.get(
'/foouser/sign?challenge=ThisIsAChallenge',
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(aut_req['challenge'], 'ThisIsAChallenge')
aut_resp = device.getAssertion('https://example.com', aut_req['appId'],
aut_req['challenge'],
aut_req['registeredKeys'][0]).json
desc2 = json.loads(self.app.post(
'/foouser/sign',
data=json.dumps({
'signResponse': aut_resp
}),
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(desc1['handle'], desc2['handle'])
def test_sign_with_handle_filtering(self):
dev = SoftU2FDevice()
h1 = self.do_register(dev)['handle']
h2 = self.do_register(dev)['handle']
self.do_register(dev)['handle']
aut_req = json.loads(
self.app.get('/foouser/sign',
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(len(aut_req['registeredKeys']), 3)
self.assertEqual(len(aut_req['descriptors']), 3)
aut_req = json.loads(
self.app.get('/foouser/sign?handle=' + h1,
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(len(aut_req['registeredKeys']), 1)
self.assertEqual(aut_req['descriptors'][0]['handle'], h1)
aut_req = json.loads(
self.app.get(
'/foouser/sign?handle=' + h1 + '&handle=' + h2,
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(len(aut_req['registeredKeys']), 2)
self.assertIn(aut_req['descriptors'][0]['handle'], [h1, h2])
self.assertIn(aut_req['descriptors'][1]['handle'], [h1, h2])
def test_sign_with_invalid_handle(self):
dev = SoftU2FDevice()
self.do_register(dev)
resp = self.app.get('/foouser/sign?handle=foobar',
environ_base={'REMOTE_USER': 'fooclient'})
self.assertEqual(resp.status_code, 400)
def test_device_compromised_on_counter_error(self):
dev = SoftU2FDevice()
self.do_register(dev)
self.do_sign(dev)
self.do_sign(dev)
self.do_sign(dev)
dev.counter = 1
aut_req = json.loads(
self.app.get('/foouser/sign',
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
aut_resp = dev.getAssertion('https://example.com', aut_req['appId'],
aut_req['challenge'],
aut_req['registeredKeys'][0]).json
resp = self.app.post(
'/foouser/sign',
data=json.dumps({
'signResponse': aut_resp
}),
environ_base={'REMOTE_USER': 'fooclient'}
)
self.assertEqual(400, resp.status_code)
self.assertEqual(12, json.loads(resp.data.decode('utf8'))['errorCode'])
resp = self.app.get('/foouser/sign',
environ_base={'REMOTE_USER': 'fooclient'})
self.assertEqual(400, resp.status_code)
self.assertEqual(11, json.loads(resp.data.decode('utf8'))['errorCode'])
def do_register(self, device, properties=None):
reg_req = json.loads(
self.app.get('/foouser/register',
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(len(reg_req['registeredKeys']),
len(reg_req['descriptors']))
reg_resp = device.register('https://example.com', reg_req['appId'],
reg_req['registerRequests'][0]).json
if properties is None:
properties = {}
descriptor = json.loads(self.app.post(
'/foouser/register',
data=json.dumps({
'registerResponse': reg_resp,
'properties': properties
}),
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(descriptor['properties'], properties)
return descriptor
def do_sign(self, device, properties=None):
aut_req = json.loads(
self.app.get('/foouser/sign',
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
aut_resp = device.getAssertion('https://example.com', aut_req['appId'],
aut_req['challenge'],
aut_req['registeredKeys'][0]).json
if properties is None:
properties = {}
return json.loads(self.app.post(
'/foouser/sign',
data=json.dumps({
'signResponse': aut_resp,
'properties': properties
}),
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import re
import shutil
import tempfile
import threading
import unittest
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python import keras
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import adam
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class KerasCallbacksTest(test.TestCase):
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.5
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
@test_util.run_v1_only('b/120545219')
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
random_seed.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1))
return model
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)),
0.01,
atol=1e-4)
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
def test_TerminateOnNaN(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertEqual(loss[0], np.inf)
@test_util.run_v1_only('b/120545219')
def test_TensorBoard(self):
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
yield (x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
else:
yield (x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
i += 1
i %= max_batch_index
# case: Sequential
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
# non_trainable_weights: moving_variance, moving_mean
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = keras.callbacks.TensorBoard(
log_dir=temp_dir, histogram_freq=1, write_images=True,
write_grads=True, batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
# fit with validation data and accuracy
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
# fit generator with validation data
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data
# histogram_freq must be zero
tsb.histogram_freq = 0
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
callbacks=cbks,
verbose=0)
# fit generator with validation data and accuracy
tsb.histogram_freq = 1
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data and accuracy
tsb.histogram_freq = 0
model.fit_generator(
data_generator(True), len(x_train), epochs=2, callbacks=cbks)
assert os.path.exists(temp_dir)
@test_util.run_v1_only('b/120545219')
def test_TensorBoard_multi_input_output(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
filepath = os.path.join(tmpdir, 'logs')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
# simulate multi-input/output models
yield ([x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
else:
yield ([x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
i += 1
i %= max_batch_index
inp1 = keras.Input((INPUT_DIM,))
inp2 = keras.Input((INPUT_DIM,))
inp = keras.layers.add([inp1, inp2])
hidden = keras.layers.Dense(2, activation='relu')(inp)
hidden = keras.layers.Dropout(0.1)(hidden)
output1 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
output2 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
model = keras.models.Model([inp1, inp2], [output1, output2])
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [keras.callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
batch_size=5)]
# fit without validation data
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
callbacks=callbacks_factory(histogram_freq=0), epochs=3)
# fit with validation data and accuracy
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1), epochs=2)
# fit generator without validation data
model.fit_generator(data_generator(True), len(x_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0))
# fit generator with validation data and accuracy
model.fit_generator(data_generator(True), len(x_train), epochs=2,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
@test_util.run_v1_only('b/120545219')
def test_Tensorboard_histogram_summaries_in_test_function(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.steps_seen = []
def add_summary(self, summary, global_step):
summary_obj = summary_pb2.Summary()
# ensure a valid Summary proto is being sent
if isinstance(summary, bytes):
summary_obj.ParseFromString(summary)
else:
assert isinstance(summary, summary_pb2.Summary)
summary_obj = summary
# keep track of steps seen for the merged_summary op,
# which contains the histogram summaries
if len(summary_obj.value) > 1:
self.steps_seen.append(global_step)
def flush(self):
pass
def close(self):
pass
def _init_writer(obj):
obj.writer = FileWriterStub(obj.log_dir)
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
# non_trainable_weights: moving_variance, moving_mean
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
keras.callbacks.TensorBoard._init_writer = _init_writer
tsb = keras.callbacks.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
self.assertAllEqual(tsb.writer.steps_seen, [0, 1, 2, 3, 4, 5])
@test_util.run_v1_only('b/120545219')
def test_Tensorboard_histogram_summaries_with_generator(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
def generator():
x = np.random.randn(10, 100).astype(np.float32)
y = np.random.randn(10, 10).astype(np.float32)
while True:
yield x, y
with self.cached_session():
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=10, input_dim=100)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = keras.callbacks.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation generator
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
validation_steps=2,
callbacks=cbks,
verbose=0)
with self.assertRaises(ValueError):
# fit with validation generator but no
# validation_steps
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
callbacks=cbks,
verbose=0)
self.assertTrue(os.path.exists(tmpdir))
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_TensorBoard_with_ReduceLROnPlateau(self):
with self.cached_session():
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.5, patience=4, verbose=1),
keras.callbacks.TensorBoard(log_dir=temp_dir)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert os.path.exists(temp_dir)
@test_util.run_deprecated_v1
def test_Tensorboard_batch_logging(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batches_logged = []
self.summary_values = []
self.summary_tags = []
def add_summary(self, summary, step):
self.summary_values.append(summary.value[0].simple_value)
self.summary_tags.append(summary.value[0].tag)
self.batches_logged.append(step)
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
for batch in range(5):
tb_cbk.on_batch_end(batch, {'acc': batch})
self.assertEqual(tb_cbk.writer.batches_logged, [0, 1, 2, 3, 4])
self.assertEqual(tb_cbk.writer.summary_values, [0., 1., 2., 3., 4.])
self.assertEqual(tb_cbk.writer.summary_tags, ['batch_acc'] * 5)
@test_util.run_deprecated_v1
def test_Tensorboard_epoch_and_batch_logging(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
def add_summary(self, summary, step):
if 'batch_' in summary.value[0].tag:
self.batch_summary = (step, summary)
elif 'epoch_' in summary.value[0].tag:
self.epoch_summary = (step, summary)
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0})
batch_step, batch_summary = tb_cbk.writer.batch_summary
self.assertEqual(batch_step, 0)
self.assertEqual(batch_summary.value[0].simple_value, 5.0)
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='epoch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_epoch_end(0, {'acc': 10.0})
epoch_step, epoch_summary = tb_cbk.writer.epoch_summary
self.assertEqual(epoch_step, 0)
self.assertEqual(epoch_summary.value[0].simple_value, 10.0)
@test_util.run_in_graph_and_eager_modes
def test_Tensorboard_eager(self):
temp_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='binary_crossentropy',
optimizer=adam.AdamOptimizer(0.01),
metrics=['accuracy'])
cbks = [keras.callbacks.TensorBoard(log_dir=temp_dir)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertTrue(os.path.exists(temp_dir))
@test_util.run_deprecated_v1
def test_TensorBoard_update_freq(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batch_summaries = []
self.epoch_summaries = []
def add_summary(self, summary, step):
if 'batch_' in summary.value[0].tag:
self.batch_summaries.append((step, summary))
elif 'epoch_' in summary.value[0].tag:
self.epoch_summaries.append((step, summary))
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
# Epoch mode
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='epoch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(tb_cbk.writer.batch_summaries, [])
tb_cbk.on_epoch_end(0, {'acc': 10.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.epoch_summaries), 1)
# Batch mode
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
self.assertFalse(tb_cbk.writer.epoch_summaries)
# Integer mode
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq=20)
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertFalse(tb_cbk.writer.batch_summaries)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
tb_cbk.on_batch_end(0, {'acc': 10.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
self.assertFalse(tb_cbk.writer.epoch_summaries)
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
@test_util.run_deprecated_v1
def test_fit_generator_with_callback(self):
class TestCallback(keras.callbacks.Callback):
def set_model(self, model):
# Check the model operations for the optimizer operations that
# the _make_train_function adds under a named scope for the
# optimizer. This ensurs the full model is populated before the
# set_model callback is called.
optimizer_name_scope = 'training/' + model.optimizer.__class__.__name__
graph_def = ops.get_default_graph().as_graph_def()
for node in graph_def.node:
if node.name.startswith(optimizer_name_scope):
return
raise RuntimeError('The optimizer operations are not present in the '
'model graph when the Callback.set_model function '
'is called')
np.random.seed(1337)
def generator():
x = np.random.randn(10, 100).astype(np.float32)
y = np.random.randn(10, 10).astype(np.float32)
while True:
yield x, y
with self.cached_session():
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=10, input_dim=100)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=1,
validation_data=generator(),
validation_steps=2,
callbacks=[TestCallback()],
verbose=0)
if __name__ == '__main__':
test.main()
|
|
"""
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
import warnings
import numpy as np
from .exceptions import ChangedBehaviorWarning
from scipy import linalg
from scipy.special import expit
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.multiclass import check_classification_targets
from .utils.extmath import softmax
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, str):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
# rescale
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_classes, n_features)
Class means.
"""
classes, y = np.unique(y, return_inverse=True)
cnt = np.bincount(y)
means = np.zeros(shape=(len(classes), X.shape[1]))
np.add.at(means, y, X)
means /= cnt[:, None]
return means
def _class_cov(X, y, priors, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
cov = np.zeros(shape=(X.shape[1], X.shape[1]))
for idx, group in enumerate(classes):
Xg = X[y == group, :]
cov += priors[idx] * np.atleast_2d(_cov(Xg, shrinkage))
return cov
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
.. versionadded:: 0.17
*LinearDiscriminantAnalysis*.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default).
Does not compute the covariance matrix, therefore this solver is
recommended for data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional (default=None)
Number of components (<= min(n_classes - 1, n_features)) for
dimensionality reduction. If None, will be set to
min(n_classes - 1, n_features).
store_covariance : bool, optional
Additionally compute class covariance matrix (default False), used
only in 'svd' solver.
.. versionadded:: 0.17
tol : float, optional, (default 1.0e-4)
Threshold used for rank estimation in SVD solver.
.. versionadded:: 0.17
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
or svd solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis()
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals)
)[::-1][:self._max_components]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
self.explained_variance_ratio_ = (S**2 / np.sum(
S**2))[:self._max_components]
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1) +
np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
.. versionchanged:: 0.19
*store_covariance* has been moved to main constructor.
.. versionchanged:: 0.19
*tol* has been moved to main constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
# FIXME: Future warning to be removed in 0.23
X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self,
dtype=[np.float64, np.float32])
self.classes_ = unique_labels(y)
n_samples, _ = X.shape
n_classes = len(self.classes_)
if n_samples == n_classes:
raise ValueError("The number of samples must be more "
"than the number of classes.")
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = np.bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if not np.isclose(self.priors_.sum(), 1.0):
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
# Maximum number of components no matter what n_components is
# specified:
max_components = min(len(self.classes_) - 1, X.shape[1])
if self.n_components is None:
self._max_components = max_components
else:
if self.n_components > max_components:
warnings.warn(
"n_components cannot be larger than min(n_features, "
"n_classes - 1). Using min(n_features, "
"n_classes - 1) = min(%d, %d - 1) = %d components."
% (X.shape[1], len(self.classes_), max_components),
ChangedBehaviorWarning)
future_msg = ("In version 0.23, setting n_components > min("
"n_features, n_classes - 1) will raise a "
"ValueError. You should set n_components to None"
" (default), or a value smaller or equal to "
"min(n_features, n_classes - 1).")
warnings.warn(future_msg, FutureWarning)
self._max_components = max_components
else:
self._max_components = self.n_components
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2,
dtype=X.dtype)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1, dtype=X.dtype)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
return X_new[:, :self._max_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
check_is_fitted(self, 'classes_')
decision = self.decision_function(X)
if self.classes_.size == 2:
proba = expit(decision)
return np.vstack([1-proba, proba]).T
else:
return softmax(decision)
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
.. versionadded:: 0.17
*QuadraticDiscriminantAnalysis*
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
store_covariance : boolean
If True the covariance matrices are computed and stored in the
`self.covariance_` attribute.
.. versionadded:: 0.17
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
.. versionadded:: 0.17
Attributes
----------
covariance_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
QuadraticDiscriminantAnalysis()
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariance=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariance = store_covariance
self.tol = tol
def fit(self, X, y):
"""Fit the model according to the given training data and parameters.
.. versionchanged:: 0.19
``store_covariances`` has been moved to main constructor as
``store_covariance``
.. versionchanged:: 0.19
``tol`` has been moved to main constructor.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('The number of classes has to be greater than'
' one; got %d class' % (n_classes))
if self.priors is None:
self.priors_ = np.bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
store_covariance = self.store_covariance
if store_covariance:
cov = []
means = []
scalings = []
rotations = []
for ind in range(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariance or store_covariance:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariance or store_covariance:
self.covariance_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
|
|
import numpy as np
from os import path
from pandas import read_csv
from lib.Astro_Libraries.spectrum_fitting.plot_tools import MCMC_printer
from collections import OrderedDict
from lib.Astro_Libraries.spectrum_fitting.import_functions import ImportModelData, parseObjData, make_folder
from lib.Astro_Libraries.spectrum_fitting.starContinuum_functions import SspFitter, CCM89_Bal07
from lib.Astro_Libraries.spectrum_fitting.gasContinuum_functions import NebularContinuaCalculator
from lib.Astro_Libraries.spectrum_fitting.gasEmission_functions import TOIII_TSIII_relation, EmissionComponents
from lib.Astro_Libraries.spectrum_fitting.extinction_tools import ReddeningLaws
class ModelIngredients(ImportModelData, SspFitter, NebularContinuaCalculator, EmissionComponents, ReddeningLaws, MCMC_printer):
def __init__(self):
# Load the default configuration
ImportModelData.__init__(self, path.dirname(path.realpath(__file__)))
# Load tools for spectra calculation
SspFitter.__init__(self)
EmissionComponents.__init__(self, self.config['temp_grid'], self.config['den_grid'])
NebularContinuaCalculator.__init__(self)
# Import extinction classes
ReddeningLaws.__init__(self, self.config['R_v'], self.config['reddenig_curve'])
# For generating graphs
MCMC_printer.__init__(self)
def gen_synth_obs(self, obs_name, output_folder,
obj_properties_file=None, obj_lines_file = None,
wavelengh_limits = None, resample_inc = None, norm_interval = None,
ssp_lib_type = None, ssp_folder = None, ssp_file = None,
obj_ssp_coeffs_file = None, error_stellarContinuum = None, error_lines = None,
atomic_data=None, ftau_coeffs=None):
# Dictionary to store the data
self.obj_data = {}
# Store input files:
self.obj_data['obj_properties_file'] = obj_properties_file
self.obj_data['obj_lines_file'] = obj_lines_file
self.obj_data['obj_ssp_coeffs_file'] = obj_ssp_coeffs_file
self.obj_data['output_folder'] = output_folder
# Read simulation data from file
obj_prop_df = read_csv(obj_properties_file, delim_whitespace=True, header=0, index_col=0)
# Read lines file
obj_lines_df = read_csv(obj_lines_file, delim_whitespace=True, header=0, index_col=0)
# Import the stellar library and use it to generate the observation continuum
self.ssp_lib = self.load_ssp_library(ssp_lib_type, ssp_folder, ssp_file, wavelengh_limits, resample_inc, norm_interval)
# Declare wavelength for the object
z_obj, obj_WaveRest = obj_prop_df.loc['z_obj'][0], self.ssp_lib['wave_resam']
obj_WaveObs = obj_WaveRest * (1.0 + z_obj)
# Generate masks for the object from the lines log
self.generate_object_mask(obj_lines_df, obj_WaveRest, obj_lines_df.index.values)
# ---------------------------------------- Emission lines data -------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# Store input data
self.obj_data['flux_hbeta'] = obj_prop_df.loc['flux_hbeta'][0]
# Load atomic data
self.import_atomic_data(atomic_data, ftau_coeffs, self.config['temp_grid'], self.config['den_grid'])
# Prepare data from emission line file (trick to put all the lines)
self.import_emission_line_data(obj_lines_df, obj_lines_df.index.values)
# Reddening parameters
self.obj_data['lineFlambda'] = self.gasExtincParams(self.obj_data['lineWaves'], self.config['R_v'], self.config['reddenig_curve'])
# # Create or load emissivity grids
# self.emis_dict = self.computeEmissivityDict(self.obj_data['linePynebCode'], self.obj_data['lineIons'],
# self.obj_data['lineLabels'], output_folder)
#
# # Fit emissivity grids to a surface
# self.fitEmissivityPlane(self.obj_data['lineIons'], self.obj_data['lineLabels'], self.configFolder)
#
# # Plot fits of emissivity grids
# self.plot_emisFits(self.obj_data['lineLabels'], self.emisCoeffs, self.emis_dict, output_folder)
# # Create or emissivity diagnostic ratios
# self.diagnosRatios, self.diagnosGrid = self.computeDiagnosGrids(self.obj_data['linePynebCode'], self.diagnosDict, self.emis_grid)
#
# # Fit emissivity ratios a surface
# self.emisRatioCoeffs = self.fitEmissivityDiagnosPlane(self.diagnosRatios, self.diagnosGrid)
#
# Plot fits of emissivity ratios
# self.plot_emisRatioFits(self.diagnosRatios, self.emisRatioCoeffs, self.diagnosGrid, self.paths_dict['emisGridsPath'])
# Reddening parameters
self.obj_data['lineFlambda'] = self.gasExtincParams(self.obj_data['lineWaves'], self.Rv_model,
self.reddedning_curve_model)
# Variables to make the iterations simpler
self.obj_data['T_low_true'], self.obj_data['n_e_true'] = obj_prop_df.loc['T_low_true'][0], obj_prop_df.loc['n_e_true'][0]
self.gasSamplerVariables(self.obj_data['lineIons'], self.config['high_temp_ions'],
lineLabels=self.obj_data['lineLabels'], lineFlambda=self.obj_data['lineFlambda'])
#Dictionary with synthetic abundances
abund_df = obj_prop_df[obj_prop_df.index.str.contains('_abund')]
abund_keys = abund_df.index.str.rstrip('_abund').astype(str).values
abund_values = abund_df.variable_magnitude.values
self.abund_dict = dict(zip(abund_keys, abund_values.T))
#Gas physical parameters
T_low = obj_prop_df.loc['T_low_true'][0]
n_e = obj_prop_df.loc['n_e_true'][0]
tau = obj_prop_df.loc['tau'][0]
cHbeta = obj_prop_df.loc['cHbeta'][0]
#Calculate T_high assuming we get T_low
T_high = TOIII_TSIII_relation(T_low)
# # Prepare gas data # TODO update the prepare_gas_data to use run in this part
# self.lineLabels = self.obj_data['lineLabels']
# self.lineIons = self.obj_data['lineIons']
# self.lineFlambda = self.obj_data['lineFlambda']
# Compute lines flux
self.lineLabels = self.obj_data['lineLabels']
lineFluxes = self.calcEmFluxes(T_low, T_high, n_e, cHbeta, tau, self.abund_dict, self.emFluxTensors, np.zeros(len(self.obj_data['lineLabels'])))
self.obj_data['lineFluxes'] = lineFluxes
# Use general error if this is provided
self.obj_data['lineErr'] = self.obj_data['lineFluxes'] * error_lines
# Store data to synthetic observation files
idx_lines = (obj_lines_df.index != 'H1_4861A')
obj_lines_df.loc[idx_lines, 'obs_flux'] = self.obj_data['lineFluxes']
obj_lines_df.loc[idx_lines, 'obs_fluxErr'] = self.obj_data['lineErr']
obj_lines_df.loc['H1_4861A', 'obs_flux'] = 1.0
obj_lines_df.loc['H1_4861A', 'obs_fluxErr'] = 1.0 * error_lines
# Assign line region
obj_lines_df['w3'] = np.floor(obj_lines_df['obs_wavelength'].values * 0.998)
obj_lines_df['w4'] = np.ceil(obj_lines_df['obs_wavelength'].values * 1.002)
# Create txt lines log
synth_lines_log = '{}{}_lineslog.txt'.format(output_folder, obs_name)
with open(synth_lines_log, 'w') as f:
f.write(obj_lines_df.ix[:, :'blended_label'].to_string(float_format=lambda x: "{:15.8f}".format(x), index=True, index_names=False))
# ---------------------------------------- Continuum calculation -----------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# Get Halpha flux to calibrate
idx_Halpha = (self.obj_data['lineLabels'] == 'H1_6563A')
flux_halpha = self.obj_data['lineFluxes'][idx_Halpha][0] * obj_prop_df.loc['flux_hbeta'][0]
# Reddening parameters for the nebular continuum
self.obj_data['nebFlambda'] = self.gasExtincParams(obj_WaveRest, self.config['R_v'], self.config['reddenig_curve'])
# Calculate the nebular continua
self.obj_data['nebularFlux'] = self.nebFluxCont(obj_WaveRest,
obj_prop_df.loc['cHbeta'][0], self.obj_data['nebFlambda'],
obj_prop_df.loc['T_low_true'][0],
obj_prop_df.loc['He1r_abund'][0], obj_prop_df.loc['He2r_abund'][0],
flux_halpha)
# Save input conditions:
Av_star = obj_prop_df.loc['Av_star'][0]
sigma_star = obj_prop_df.loc['sigma_star'][0]
flux_hbeta = obj_prop_df.loc['flux_hbeta'][0]
eqw_hbeta = obj_prop_df.loc['eqw_hbeta'][0]
#Get populations for the stellar continua
bases_idx, bases_coeff, bases_coeff_err = np.loadtxt(self.obj_data['obj_ssp_coeffs_file'], usecols=[0, 1, 2], unpack=True)
# SSP library flux at modelled Av, z_star and sigma_star
ssp_grid_model_norm = self.physical_SED_model(self.ssp_lib['wave_resam'], obj_WaveObs, self.ssp_lib['flux_norm'],
Av_star, z_obj, sigma_star, Rv_coeff=self.config['R_v'])
# Normalized object flux
stellarFluxNorm = ssp_grid_model_norm.dot(bases_coeff)
# Instead of denormalizing the grid we use the Hbeta flux and equivalent width
# To get a better shape of the nebular and stellar continua
flux_hbeta, eqw_hbeta = obj_prop_df.loc['flux_hbeta'][0], obj_prop_df.loc['eqw_hbeta'][0]
cont_hbeta = flux_hbeta / eqw_hbeta
stellarFlux = stellarFluxNorm * cont_hbeta
# Generate synth error # TODO We should play with this error
sigma_err = error_stellarContinuum * np.median(stellarFlux)
self.obj_data['stellarFlux'] = stellarFlux + np.random.normal(0.0, sigma_err, stellarFlux.size)
#Save synthetic continua according to the observations
if 'nebularFlux' not in self.obj_data:
self.obj_data['obsFlux'] = self.obj_data['stellarFlux']
else:
self.obj_data['obsFlux'] = self.obj_data['nebularFlux'] + self.obj_data['stellarFlux']
# Store the spectrum as a text file
synth_spectrum_address = '{}{}_spectrum.txt'.format(output_folder, obs_name)
np.savetxt(synth_spectrum_address, np.transpose(np.array([obj_WaveObs, self.obj_data['obsFlux']])), fmt="%7.1f %10.4e")
#Store synthetic object data log # TODO Add here the comments of the function and make it automatic
obj_dict = OrderedDict()
obj_dict['address_lines_log'] = synth_lines_log
obj_dict['address_spectrum'] = synth_spectrum_address
obj_dict['flux_hbeta'] = flux_hbeta
obj_dict['flux_halpha'] = flux_halpha
obj_dict['Normalized_by_Hbeta'] = True
obj_dict['eqw_hbeta'] = eqw_hbeta
obj_dict['sigma_gas'] = obj_prop_df.loc['sigma_gas'][0]
obj_dict['T_low'] = T_low
obj_dict['T_high'] = T_high
obj_dict['n_e'] = n_e
obj_dict['cHbeta'] = cHbeta
obj_dict['Av_star'] = Av_star
obj_dict['sigma_star'] = sigma_star
obj_dict['z_obj'] = z_obj
obj_dict['continuum_sigma'] = error_stellarContinuum
obj_dict['resample_inc'] = resample_inc
obj_dict['wavelengh_limits'] = wavelengh_limits
obj_dict['norm_interval'] = norm_interval
obj_dict['Te_prior'] = [15000.0, 2500.0]
obj_dict['ne_prior'] = [450, 100]
obj_dict['redening_check'] = True
obj_dict['Thigh_check'] = True
obj_dict['T_low_true'] = T_low
obj_dict['T_high_true'] = T_high
obj_dict['n_e_true'] = n_e
obj_dict['cHbeta_true'] = cHbeta
obj_dict['tau_true'] = tau
obj_dict['He1r_true'] = self.abund_dict['He1r']
obj_dict['He2r_true'] = self.abund_dict['He2r']
obj_dict['S2_true'] = self.abund_dict['S2']
obj_dict['S3_true'] = self.abund_dict['S3']
obj_dict['Ar3_true'] = self.abund_dict['Ar3']
obj_dict['Ar4_true'] = self.abund_dict['Ar4']
obj_dict['O2_true'] = self.abund_dict['O2']
obj_dict['O3_true'] = self.abund_dict['O3']
obj_dict['N2_true'] = self.abund_dict['N2']
#Save the data into an "ini" configuration file
conf_address = '{}{}_objParams.txt'.format(output_folder, obs_name)
parseObjData(conf_address, obs_name, obj_dict)
return
def ready_simulation(self, output_folder, obs_data, ssp_data, fitting_components, overwrite_grids=False,
input_lines=None, wavelengh_limits = None, resample_inc=None, norm_interval=None):
# Dictionary to store the data
self.obj_data = {}
self.obj_data = obs_data.copy()
# Prepare emission data
if 'emission' in fitting_components:
# Read log with observational features and masks
obj_lines_df = read_csv(obs_data['obj_lines_file'], delim_whitespace=True, header=0, index_col=0)
# Load atomic data
self.import_atomic_data() # TODO Do we need this one for the nebular continuum?
# Prepare data from emission line file (trick to put all the lines)
self.import_emission_line_data(obj_lines_df, input_lines=input_lines)
# Reddening parameters
self.obj_data['lineFlambda'] = self.gasExtincParams(self.obj_data['lineWaves'], self.Rv_model, self.reddedning_curve_model)
# # Create or load emissivity grids
# self.emis_dict = self.computeEmissivityDict(self.obj_data['linePynebCode'], self.obj_data['lineIons'],
# self.obj_data['lineLabels'], output_folder)
#
# # Fit emissivity grids to a surface
# self.fitEmissivityPlane(self.obj_data['lineIons'], self.obj_data['lineLabels'], self.configFolder)
#
# # Plot fits of emissivity grids
# self.plot_emisFits(self.obj_data['lineLabels'], self.emisCoeffs, self.emis_dict, output_folder)
#
# # Create or emissivity diagnostic ratios
# self.diagnosRatios, self.diagnosGrid = self.computeDiagnosGrids(self.obj_data['linePynebCode'], self.diagnosDict, self.emis_grid)
#
# # Fit emissivity ratios a surface
# self.emisRatioCoeffs = self.fitEmissivityDiagnosPlane(self.diagnosRatios, self.diagnosGrid)
#
# # Plot fits of emissivity ratios
# self.plot_emisRatioFits(self.diagnosRatios, self.emisRatioCoeffs, self.diagnosGrid, self.paths_dict['emisGridsPath'])
# Trim, resample and normalize according to the input object
if 'obs_wavelength' in self.obj_data: # TODO this is a bit dirty
self.treat_input_spectrum(self.obj_data, self.obj_data['obs_wavelength'], self.obj_data['obs_flux'], wavelengh_limits, resample_inc, norm_interval)
# Reddening parameters for the nebular continuum
if 'nebular' in fitting_components:
obj_wave_rest = self.obj_data['wave_resam'] / (1.0 + self.obj_data['z_obj'])
self.obj_data['nebFlambda'] = self.gasExtincParams(obj_wave_rest, self.Rv_model, self.reddedning_curve_model)
# Declare stellar data
if 'stellar' in fitting_components:
self.ssp_lib = {}
self.ssp_lib = ssp_data.copy()
# Generate object masks
if ('wave_resam' in self.obj_data) and ('lineLabels' in self.obj_data): # TODO this is a bit dirty
self.generate_object_mask(obj_lines_df, self.obj_data['wave_resam'], self.obj_data['lineLabels'])
return
def prepareSimulation(self, obs_data, ssp_data=None, output_folder = None, storage_folder = None,
spectra_components=None, input_lines='all', normalized_by_Hbeta=True,
excludeReddening = False, T_high_prior = False, prefit_ssp = True,
wavelengh_limits = None, resample_inc = None, norm_interval=None):
# Store components fit
self.spectraComponents = spectra_components
# Folders to store inputs and outputs
self.input_folder = output_folder + 'input_data/' # TODO sure?
self.output_folder = output_folder + 'output_data/'
self.dataFolder = self.output_folder if storage_folder is None else storage_folder # TODO sure?
self.configFile = obs_data['obsFile'] #TODO this one has to go into the configuration
self.objName = str(obs_data['objName'])
self.prefit_db = '{}{}_sspPrefitDB'.format(self.dataFolder, self.objName)
self.sspCoeffsPrefit_file = '{}{}_prefitSSPpopulations.txt'.format(self.input_folder, self.objName)
self.sspCoeffs_file = '{}{}_SSPpopulations.txt'.format(self.input_folder, self.objName)
# Create them if not available
make_folder(self.input_folder)
make_folder(self.output_folder)
# Prepare spectrum components for fitting
self.emissionCheck, self.stellarCheck, self.emissionCheck = False, False, False
# Pre-analysis emission spectrum
if 'emission' in self.spectraComponents:
# Get emission data from input files
self.ready_simulation(output_folder, obs_data, ssp_data, spectra_components, input_lines=input_lines,
wavelengh_limits=wavelengh_limits, resample_inc=resample_inc, norm_interval=norm_interval)
# Declare gas sampler variables
self.gasSamplerVariables(self.obj_data['lineIons'], self.config['high_temp_ions'],
self.obj_data['lineFluxes'], self.obj_data['lineErr'],
self.obj_data['lineLabels'], self.obj_data['lineFlambda'],
normalized_by_Hbeta, self.config['linesMinimumError'])
# Prios definition # TODO this must go to the a special section
self.priorsDict = {'T_low': self.obj_data['Te_prior'], 'n_e': self.obj_data['ne_prior']}
# Confirm inputs are valid
self.emissionCheck = True
# Prefit stellar continua
if 'stellar' in self.spectraComponents:
self.stellarCheck = True
# Perform a new SPP synthesis otherwise use available data
if prefit_ssp:
# Compute nebular continuum using normalise Halpha and standard conditions
self.computeDefaultNeb(self.nebDefault['Te_neb'], self.obj_data['nebFlambda'], self.nebDefault['cHbeta_neb'],
self.nebDefault['He1_neb'], self.nebDefault['He2_neb'],
self.nebDefault['flux_halpha'] / self.obj_data['normFlux_coeff'], self.nebDefault['z_neb'])
# Ready continuum data
self.prepareContinuaData(self.ssp_lib['wave_resam'], self.ssp_lib['flux_norm'], self.ssp_lib['normFlux_coeff'],
self.obj_data['wave_resam'], self.obj_data['flux_norm'], self.obj_data['continuum_sigma'],
self.int_mask, nebularFlux=self.nebDefault['synth_neb_flux'])
# Select model
self.select_inference_model('stelar_prefit')
# Plot input simulation data
self.plotInputSSPsynthesis()
# Run stellar continua prefit and store/print the results
#self.run_pymc(self.prefit_db, iterations=8000, variables_list=['Av_star', 'sigma_star'], prefit = True)
self.savePrefitData(self.sspCoeffsPrefit_file, self.prefit_db)
# Compute nebular continuum using prior physical data
self.computeDefaultNeb(self.nebDefault['Te_neb'], self.obj_data['nebFlambda'], self.nebDefault['cHbeta_neb'],
self.nebDefault['He1_neb'], self.nebDefault['He2_neb'],
self.obj_data['flux_halpha'] / self.obj_data['normFlux_coeff'], self.nebDefault['z_neb'])
# Compute nebular continuum using normalise Halpha and standard conditions
# TODO I think I need to remove nebular continuum here if I want to add it later
self.prepareContinuaData(self.ssp_lib['wave_resam'], self.ssp_lib['flux_norm'], self.ssp_lib['normFlux_coeff'],
self.obj_data['wave_resam'], self.obj_data['flux_norm'],
self.obj_data['continuum_sigma'],
self.int_mask,
nebularFlux=None,#self.nebDefault['synth_neb_flux'],
mainPopulationsFile=self.sspCoeffsPrefit_file)
return
def prepareContinuaData(self, basesWave, basesFlux, basesFluxCoeffs, obsWave, obsFlux, obsFluxEr, objMask, nebularFlux = None, mainPopulationsFile = None):
# Remove nebular contribution from observed continuum
if nebularFlux is not None:
inputContinuum = obsFlux - nebularFlux
else:
inputContinuum = obsFlux
# Trim the total SSP library to the main populations stored in a text file
if mainPopulationsFile is not None:
# Three column file with idcs populations, weight and mainPopulationsFile
bases_idx, bases_coeff, bases_coeff_err = np.loadtxt(mainPopulationsFile, usecols=[0, 1, 2], unpack=True)
# Include ssps above minimum value TODO we should check this limit thing currentl it must be 0.001
idx_populations = bases_coeff >= self.lowlimit_sspContribution
# Data for the analysis # TODO not sure if this one should be here
self.stellarAv_prior = self.obj_data['Av_prefit'][0], self.obj_data['Av_prefit'][1]
self.stellarSigma_prior = self.obj_data['sigma_star_prefit'][0], self.obj_data['sigma_star_prefit'][1]
else:
bases_idx = np.arange(basesFlux.shape[0])
bases_coeff = np.ones(basesFlux.shape[0], dtype=bool)
bases_coeff_err = np.zeros(basesFlux.shape[0], dtype=bool)
idx_populations = np.ones(basesFlux.shape[0], dtype=bool)
# Input Object data
self.obsFluxNorm = inputContinuum
self.inputContinuum = inputContinuum * objMask
self.inputContinuumEr = obsFluxEr # TODO this will not work if the error is a scalar.. need to rethink
self.inputWave = obsWave
# Populations parameters
self.sspPrefitIdcs = np.where(idx_populations)[0]
self.sspPrefitCoeffs = bases_coeff[idx_populations]
self.sspPrefitErr = bases_coeff_err[idx_populations]
self.sspPrefitLimits = np.vstack((self.sspPrefitCoeffs * 0.8, self.sspPrefitCoeffs * 1.2)).T # Theoretical limits
#Bases parameters
neglected_populations = np.where(~idx_populations)
self.onBasesWave = basesWave
self.onBasesFluxNorm = np.delete(basesFlux, neglected_populations, axis=0)
self.onBasesFluxNormCoeffs = np.delete(basesFluxCoeffs, neglected_populations, axis=0)
self.nBases = self.onBasesFluxNorm.shape[0] #self.onBasesFlux.shape[0]
self.range_bases = np.arange(self.nBases)
# Limit for bases
self.zMin_SspLimit = np.around((obsWave[-1] / basesWave[-1]), decimals=2 - 1)
self.zMax_SspLimit = np.around((obsWave[0] / basesWave[0]), decimals=2 - 1)
# Static reddening curve for the stellar continuum
self.Xx_stellar = CCM89_Bal07(self.Rv_model, basesWave)
return
def savePrefitData(self, sspCoeffs_file, ssp_db_file):
# Read input data
stellarPrefit_db, stat_db_dict = self.load_pymc_database_manual(ssp_db_file, burning=5000,
params_list=['Av_star', 'sigma_star', 'ssp_coefficients'])
# Get mean and uncertainty values
Av_mean, Av_std = stat_db_dict['Av_star']['trace'].mean(axis=0), stat_db_dict['Av_star']['trace'].std(axis=0)
sigma_mean, sigma_std = stat_db_dict['sigma_star']['trace'].mean(axis=0), stat_db_dict['sigma_star']['trace'].std(axis=0)
coeffs_mean, coeffs_std = stat_db_dict['ssp_coefficients']['trace'].mean(axis=0), stat_db_dict['ssp_coefficients']['trace'].std(axis=0)
# File for saving the population coefficients # TODO Default files names should go into the configuration
pops_vector = np.arange(coeffs_mean.size, dtype=int)
np.savetxt(sspCoeffs_file, np.transpose(np.array([pops_vector, coeffs_mean, coeffs_std])), fmt="%4i %10.8f %10.8f")
# Add results to object config file
sectionName = self.objName + '_results'
objData = {'Av_prefit':[Av_mean,Av_std], 'sigma_star_prefit':[sigma_mean, sigma_std], 'coeffsPop_prefit':coeffs_mean, 'coeffsPopErr_prefit':coeffs_std}
parseObjData(self.configFile, sectionName, objData)
# Compute mean output spectrum
ssp_grid_i_norm = self.physical_SED_model(self.onBasesWave, self.inputWave, self.onBasesFluxNorm,
Av_mean, 0.0, sigma_mean, self.Rv_model)
obj_ssp_fit_flux = ssp_grid_i_norm.dot(coeffs_mean)
# Print prefit output
self.plotOutputSSPsynthesis(stellarPrefit_db, stat_db_dict, obj_ssp_fit_flux, coeffs_mean)
return
def load_prefit_data(self, obj_wave):
# Mean parameter values #TODO we need to add the redshift here
self.sspPrefit_Coeffs = stat_db_dict['ssp_coefficients']['trace'].mean(axis=0)
self.sspPrefit_err = stat_db_dict['ssp_coefficients']['trace'].std(axis=0)
self.ssp_lib['Av_sspPrefit'] = stat_db_dict['Av_star']['mean']
self.ssp_lib['sigma_sspPrefit'] = stat_db_dict['sigma_star']['mean']
return
def computeDefaultNeb(self, Te, nebFlambda, cHbeta, He1_abund, He2_abund, fluxHalpha_norm, z_obj):
# Generate synthetic nebular emission to remove from object
self.nebDefault['wave_neb'] = self.obj_data['wave_resam'] / (1 + z_obj)
self.nebDefault['synth_neb_flux'] = self.nebFluxCont(self.nebDefault['wave_neb'], cHbeta, nebFlambda, Te, He1_abund, He2_abund, fluxHalpha_norm)
return
|
|
from __future__ import absolute_import, division
from django.conf import settings
from django.core import urlresolvers
from django.db import connection
from django.db.models import Sum
from django.db.models.query import QuerySet
from django.http import HttpResponseNotFound, HttpRequest, HttpResponse
from django.template import RequestContext, loader
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.shortcuts import render
from jinja2 import Markup as mark_safe
from analytics.lib.counts import CountStat, process_count_stat, COUNT_STATS
from analytics.lib.time_utils import time_range
from analytics.models import BaseCount, InstallationCount, RealmCount, \
UserCount, StreamCount, last_successful_fill
from zerver.decorator import has_request_variables, REQ, zulip_internal, \
zulip_login_required, to_non_negative_int, to_utc_datetime
from zerver.lib.request import JsonableError
from zerver.lib.response import json_success
from zerver.lib.timestamp import ceiling_to_hour, ceiling_to_day, timestamp_to_datetime
from zerver.models import Realm, UserProfile, UserActivity, \
UserActivityInterval, Client
from collections import defaultdict
from datetime import datetime, timedelta
import itertools
import json
import logging
import pytz
import re
import time
from six.moves import filter, map, range, zip
from typing import Any, Callable, Dict, List, Optional, Set, Text, \
Tuple, Type, Union
@zulip_login_required
def stats(request):
# type: (HttpRequest) -> HttpResponse
return render(request,
'analytics/stats.html',
context=dict(realm_name = request.user.realm.name))
@has_request_variables
def get_chart_data(request, user_profile, chart_name=REQ(),
min_length=REQ(converter=to_non_negative_int, default=None),
start=REQ(converter=to_utc_datetime, default=None),
end=REQ(converter=to_utc_datetime, default=None)):
# type: (HttpRequest, UserProfile, Text, Optional[int], Optional[datetime], Optional[datetime]) -> HttpResponse
if chart_name == 'number_of_humans':
stat = COUNT_STATS['active_users:is_bot:day']
tables = [RealmCount]
subgroups = ['false', 'true']
labels = ['human', 'bot']
labels_sort_function = None
include_empty_subgroups = [True]
elif chart_name == 'messages_sent_over_time':
stat = COUNT_STATS['messages_sent:is_bot:hour']
tables = [RealmCount, UserCount]
subgroups = ['false', 'true']
labels = ['human', 'bot']
labels_sort_function = None
include_empty_subgroups = [True, False]
elif chart_name == 'messages_sent_by_message_type':
stat = COUNT_STATS['messages_sent:message_type:day']
tables = [RealmCount, UserCount]
subgroups = ['public_stream', 'private_stream', 'private_message']
labels = ['Public Streams', 'Private Streams', 'PMs & Group PMs']
labels_sort_function = lambda data: sort_by_totals(data['realm'])
include_empty_subgroups = [True, True]
elif chart_name == 'messages_sent_by_client':
stat = COUNT_STATS['messages_sent:client:day']
tables = [RealmCount, UserCount]
subgroups = [str(x) for x in Client.objects.values_list('id', flat=True).order_by('id')]
# these are further re-written by client_label_map
labels = list(Client.objects.values_list('name', flat=True).order_by('id'))
labels_sort_function = sort_client_labels
include_empty_subgroups = [False, False]
else:
raise JsonableError(_("Unknown chart name: %s") % (chart_name,))
# Most likely someone using our API endpoint. The /stats page does not
# pass a start or end in its requests.
if start is not None and end is not None and start > end:
raise JsonableError(_("Start time is later than end time. Start: %(start)s, End: %(end)s") %
{'start': start, 'end': end})
realm = user_profile.realm
if start is None:
start = realm.date_created
if end is None:
end = last_successful_fill(stat.property)
if end is None or start > end:
logging.warning("User from realm %s attempted to access /stats, but the computed "
"start time: %s (creation time of realm) is later than the computed "
"end time: %s (last successful analytics update). Is the "
"analytics cron job running?" % (realm.string_id, start, end))
raise JsonableError(_("No analytics data available. Please contact your server administrator."))
end_times = time_range(start, end, stat.frequency, min_length)
data = {'end_times': end_times, 'frequency': stat.frequency, 'interval': stat.interval}
for table, include_empty_subgroups_ in zip(tables, include_empty_subgroups):
if table == RealmCount:
data['realm'] = get_time_series_by_subgroup(
stat, RealmCount, realm.id, end_times, subgroups, labels, include_empty_subgroups_)
if table == UserCount:
data['user'] = get_time_series_by_subgroup(
stat, UserCount, user_profile.id, end_times, subgroups, labels, include_empty_subgroups_)
if labels_sort_function is not None:
data['display_order'] = labels_sort_function(data)
else:
data['display_order'] = None
return json_success(data=data)
def sort_by_totals(value_arrays):
# type: (Dict[str, List[int]]) -> List[str]
totals = []
for label, values in value_arrays.items():
totals.append((label, sum(values)))
totals.sort(key=lambda label_total: label_total[1], reverse=True)
return [label for label, total in totals]
# For any given user, we want to show a fixed set of clients in the chart,
# regardless of the time aggregation or whether we're looking at realm or
# user data. This fixed set ideally includes the clients most important in
# understanding the realm's traffic and the user's traffic. This function
# tries to rank the clients so that taking the first N elements of the
# sorted list has a reasonable chance of doing so.
def sort_client_labels(data):
# type: (Dict[str, Dict[str, List[int]]]) -> List[str]
realm_order = sort_by_totals(data['realm'])
user_order = sort_by_totals(data['user'])
label_sort_values = {} # type: Dict[str, float]
for i, label in enumerate(realm_order):
label_sort_values[label] = i
for i, label in enumerate(user_order):
label_sort_values[label] = min(i-.1, label_sort_values.get(label, i))
return [label for label, sort_value in sorted(label_sort_values.items(),
key=lambda x: x[1])]
def table_filtered_to_id(table, key_id):
# type: (Type[BaseCount], int) -> QuerySet
if table == RealmCount:
return RealmCount.objects.filter(realm_id=key_id)
elif table == UserCount:
return UserCount.objects.filter(user_id=key_id)
elif table == StreamCount:
return StreamCount.objects.filter(stream_id=key_id)
elif table == InstallationCount:
return InstallationCount.objects.all()
else:
raise AssertionError("Unknown table: %s" % (table,))
def client_label_map(name):
# type: (str) -> str
if name == "website":
return "Website"
if name.startswith("desktop app"):
return "Old desktop app"
if name == "ZulipAndroid":
return "Android app"
if name == "ZulipiOS":
return "Old iOS app"
if name == "ZulipMobile":
return "New iOS app"
if name in ["ZulipPython", "API: Python"]:
return "Python API"
if name.startswith("Zulip") and name.endswith("Webhook"):
return name[len("Zulip"):-len("Webhook")] + " webhook"
# Clients in dev environment autogenerated data start with _ so
# that it's easy to manually drop without affecting other data.
if settings.DEVELOPMENT and name.startswith("_"):
return name[1:]
return name
def rewrite_client_arrays(value_arrays):
# type: (Dict[str, List[int]]) -> Dict[str, List[int]]
mapped_arrays = {} # type: Dict[str, List[int]]
for label, array in value_arrays.items():
mapped_label = client_label_map(label)
if mapped_label in mapped_arrays:
for i in range(0, len(array)):
mapped_arrays[mapped_label][i] += value_arrays[label][i]
else:
mapped_arrays[mapped_label] = [value_arrays[label][i] for i in range(0, len(array))]
return mapped_arrays
def get_time_series_by_subgroup(stat, table, key_id, end_times, subgroups, labels, include_empty_subgroups):
# type: (CountStat, Type[BaseCount], Optional[int], List[datetime], List[str], List[str], bool) -> Dict[str, List[int]]
if len(subgroups) != len(labels):
raise AssertionError("subgroups and labels have lengths %s and %s, which are different." %
(len(subgroups), len(labels)))
queryset = table_filtered_to_id(table, key_id).filter(property=stat.property) \
.values_list('subgroup', 'end_time', 'value')
value_dicts = defaultdict(lambda: defaultdict(int)) # type: Dict[Optional[str], Dict[datetime, int]]
for subgroup, end_time, value in queryset:
value_dicts[subgroup][end_time] = value
value_arrays = {}
for subgroup, label in zip(subgroups, labels):
if (subgroup in value_dicts) or include_empty_subgroups:
value_arrays[label] = [value_dicts[subgroup][end_time] for end_time in end_times]
if stat == COUNT_STATS['messages_sent:client:day']:
# HACK: We rewrite these arrays to collapse the Client objects
# with similar names into a single sum, and generally give
# them better names
return rewrite_client_arrays(value_arrays)
return value_arrays
eastern_tz = pytz.timezone('US/Eastern')
def make_table(title, cols, rows, has_row_class=False):
# type: (str, List[str], List[Any], bool) -> str
if not has_row_class:
def fix_row(row):
# type: (Any) -> Dict[str, Any]
return dict(cells=row, row_class=None)
rows = list(map(fix_row, rows))
data = dict(title=title, cols=cols, rows=rows)
content = loader.render_to_string(
'analytics/ad_hoc_query.html',
dict(data=data)
)
return content
def dictfetchall(cursor):
# type: (connection.cursor) -> List[Dict[str, Any]]
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(list(zip([col[0] for col in desc], row)))
for row in cursor.fetchall()
]
def get_realm_day_counts():
# type: () -> Dict[str, Dict[str, str]]
query = '''
select
r.string_id,
(now()::date - pub_date::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
pub_date > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.string_id,
age
order by
r.string_id,
age
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts = defaultdict(dict) # type: Dict[str, Dict[int, int]]
for row in rows:
counts[row['string_id']][row['age']] = row['cnt']
result = {}
for string_id in counts:
raw_cnts = [counts[string_id].get(age, 0) for age in range(8)]
min_cnt = min(raw_cnts)
max_cnt = max(raw_cnts)
def format_count(cnt):
# type: (int) -> str
if cnt == min_cnt:
good_bad = 'bad'
elif cnt == max_cnt:
good_bad = 'good'
else:
good_bad = 'neutral'
return '<td class="number %s">%s</td>' % (good_bad, cnt)
cnts = ''.join(map(format_count, raw_cnts))
result[string_id] = dict(cnts=cnts)
return result
def realm_summary_table(realm_minutes):
# type: (Dict[str, float]) -> str
query = '''
SELECT
realm.string_id,
coalesce(user_counts.active_user_count, 0) active_user_count,
coalesce(at_risk_counts.at_risk_count, 0) at_risk_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND not is_bot
) user_profile_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND is_bot
) bot_count
FROM zerver_realm realm
LEFT OUTER JOIN
(
SELECT
up.realm_id realm_id,
count(distinct(ua.user_profile_id)) active_user_count
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
last_visit > now() - interval '1 day'
AND
not is_bot
GROUP BY realm_id
) user_counts
ON user_counts.realm_id = realm.id
LEFT OUTER JOIN
(
SELECT
realm_id,
count(*) at_risk_count
FROM (
SELECT
realm.id as realm_id,
up.email
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
JOIN zerver_realm realm
ON realm.id = up.realm_id
WHERE up.is_active
AND (not up.is_bot)
AND
ua.query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
GROUP by realm.id, up.email
HAVING max(last_visit) between
now() - interval '7 day' and
now() - interval '1 day'
) as at_risk_users
GROUP BY realm_id
) at_risk_counts
ON at_risk_counts.realm_id = realm.id
WHERE EXISTS (
SELECT *
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'/api/v1/send_message',
'send_message_backend',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
up.realm_id = realm.id
AND
last_visit > now() - interval '2 week'
)
ORDER BY active_user_count DESC, string_id ASC
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row['history'] = counts[row['string_id']]['cnts']
except Exception:
row['history'] = ''
# augment data with realm_minutes
total_hours = 0.0
for row in rows:
string_id = row['string_id']
minutes = realm_minutes.get(string_id, 0.0)
hours = minutes / 60.0
total_hours += hours
row['hours'] = str(int(hours))
try:
row['hours_per_user'] = '%.1f' % (hours / row['active_user_count'],)
except Exception:
pass
# formatting
for row in rows:
row['string_id'] = realm_activity_link(row['string_id'])
# Count active sites
def meets_goal(row):
# type: (Dict[str, int]) -> bool
return row['active_user_count'] >= 5
num_active_sites = len(list(filter(meets_goal, rows)))
# create totals
total_active_user_count = 0
total_user_profile_count = 0
total_bot_count = 0
total_at_risk_count = 0
for row in rows:
total_active_user_count += int(row['active_user_count'])
total_user_profile_count += int(row['user_profile_count'])
total_bot_count += int(row['bot_count'])
total_at_risk_count += int(row['at_risk_count'])
rows.append(dict(
string_id='Total',
active_user_count=total_active_user_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours),
at_risk_count=total_at_risk_count,
))
content = loader.render_to_string(
'analytics/realm_summary_table.html',
dict(rows=rows, num_active_sites=num_active_sites)
)
return content
def user_activity_intervals():
# type: () -> Tuple[mark_safe, Dict[str, float]]
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end
).select_related(
'user_profile',
'user_profile__realm'
).only(
'start',
'end',
'user_profile__email',
'user_profile__realm__string_id'
).order_by(
'user_profile__realm__string_id',
'user_profile__email'
)
by_string_id = lambda row: row.user_profile.realm.string_id
by_email = lambda row: row.user_profile.email
realm_minutes = {}
for string_id, realm_intervals in itertools.groupby(all_intervals, by_string_id):
realm_duration = timedelta(0)
output += '<hr>%s\n' % (string_id,)
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += " %-*s%s\n" % (37, email, duration)
realm_minutes[string_id] = realm_duration.total_seconds() / 60
output += "\nTotal Duration: %s\n" % (total_duration,)
output += "\nTotal Duration in minutes: %s\n" % (total_duration.total_seconds() / 60.,)
output += "Total Duration amortized to a month: %s" % (total_duration.total_seconds() * 30. / 60.,)
content = mark_safe('<pre>' + output + '</pre>')
return content, realm_minutes
def sent_messages_report(realm):
# type: (str) -> str
title = 'Recently sent messages for ' + realm
cols = [
'Date',
'Humans',
'Bots'
]
query = '''
select
series.day::date,
humans.cnt,
bots.cnt
from (
select generate_series(
(now()::date - interval '2 week'),
now()::date,
interval '1 day'
) as day
) as series
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
(not up.is_bot)
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) humans on
series.day = humans.pub_date
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
up.is_bot
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) bots on
series.day = bots.pub_date
'''
cursor = connection.cursor()
cursor.execute(query, [realm, realm])
rows = cursor.fetchall()
cursor.close()
return make_table(title, cols, rows)
def ad_hoc_queries():
# type: () -> List[Dict[str, str]]
def get_page(query, cols, title):
# type: (str, List[str], str) -> Dict[str, str]
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = list(map(list, rows))
cursor.close()
def fix_rows(i, fixup_func):
# type: (int, Union[Callable[[Realm], mark_safe], Callable[[datetime], str]]) -> None
for row in rows:
row[i] = fixup_func(row[i])
for i, col in enumerate(cols):
if col == 'Realm':
fix_rows(i, realm_activity_link)
elif col in ['Last time', 'Last visit']:
fix_rows(i, format_date_for_activity_reports)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title
)
pages = []
###
for mobile_type in ['Android', 'ZulipiOS']:
title = '%s usage' % (mobile_type,)
query = '''
select
realm.string_id,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like '%s'
group by string_id, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, up.id, client.name
''' % (mobile_type,)
cols = [
'Realm',
'User id',
'Name',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Desktop users'
query = '''
select
realm.string_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by string_id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, client.name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by realm'
query = '''
select
realm.string_id,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by string_id, client_name
having max(last_visit) > now() - interval '2 week'
order by string_id, client_name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by client'
query = '''
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.string_id,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, string_id
having max(last_visit) > now() - interval '2 week'
order by client_name, string_id
'''
cols = [
'Client',
'Realm',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
return pages
@zulip_internal
@has_request_variables
def get_activity(request):
# type: (HttpRequest) -> HttpResponse
duration_content, realm_minutes = user_activity_intervals() # type: Tuple[mark_safe, Dict[str, float]]
counts_content = realm_summary_table(realm_minutes) # type: str
data = [
('Counts', counts_content),
('Durations', duration_content),
]
for page in ad_hoc_queries():
data.append((page['title'], page['content']))
title = 'Activity'
return render(
request,
'analytics/activity.html',
context=dict(data=data, title=title, is_home=True),
)
def get_user_activity_records_for_realm(realm, is_bot):
# type: (str, bool) -> QuerySet
fields = [
'user_profile__full_name',
'user_profile__email',
'query',
'client__name',
'count',
'last_visit',
]
records = UserActivity.objects.filter(
user_profile__realm__string_id=realm,
user_profile__is_active=True,
user_profile__is_bot=is_bot
)
records = records.order_by("user_profile__email", "-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def get_user_activity_records_for_email(email):
# type: (str) -> List[QuerySet]
fields = [
'user_profile__full_name',
'query',
'client__name',
'count',
'last_visit'
]
records = UserActivity.objects.filter(
user_profile__email=email
)
records = records.order_by("-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def raw_user_activity_table(records):
# type: (List[QuerySet]) -> str
cols = [
'query',
'client',
'count',
'last_visit'
]
def row(record):
# type: (QuerySet) -> List[Any]
return [
record.query,
record.client.name,
record.count,
format_date_for_activity_reports(record.last_visit)
]
rows = list(map(row, records))
title = 'Raw Data'
return make_table(title, cols, rows)
def get_user_activity_summary(records):
# type: (List[QuerySet]) -> Dict[str, Dict[str, Any]]
#: `Any` used above should be `Union(int, datetime)`.
#: However current version of `Union` does not work inside other function.
#: We could use something like:
# `Union[Dict[str, Dict[str, int]], Dict[str, Dict[str, datetime]]]`
#: but that would require this long `Union` to carry on throughout inner functions.
summary = {} # type: Dict[str, Dict[str, Any]]
def update(action, record):
# type: (str, QuerySet) -> None
if action not in summary:
summary[action] = dict(
count=record.count,
last_visit=record.last_visit
)
else:
summary[action]['count'] += record.count
summary[action]['last_visit'] = max(
summary[action]['last_visit'],
record.last_visit
)
if records:
summary['name'] = records[0].user_profile.full_name
for record in records:
client = record.client.name
query = record.query
update('use', record)
if client == 'API':
m = re.match('/api/.*/external/(.*)', query)
if m:
client = m.group(1)
update(client, record)
if client.startswith('desktop'):
update('desktop', record)
if client == 'website':
update('website', record)
if ('send_message' in query) or re.search('/api/.*/external/.*', query):
update('send', record)
if query in ['/json/update_pointer', '/json/users/me/pointer', '/api/v1/update_pointer']:
update('pointer', record)
update(client, record)
return summary
def format_date_for_activity_reports(date):
# type: (Optional[datetime]) -> str
if date:
return date.astimezone(eastern_tz).strftime('%Y-%m-%d %H:%M')
else:
return ''
def user_activity_link(email):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_user_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(email=email))
email_link = '<a href="%s">%s</a>' % (url, email)
return mark_safe(email_link)
def realm_activity_link(realm_str):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_realm_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(realm_str=realm_str))
realm_link = '<a href="%s">%s</a>' % (url, realm_str)
return mark_safe(realm_link)
def realm_client_table(user_summaries):
# type: (Dict[str, Dict[str, Dict[str, Any]]]) -> str
exclude_keys = [
'internal',
'name',
'use',
'send',
'pointer',
'website',
'desktop',
]
rows = []
for email, user_summary in user_summaries.items():
email_link = user_activity_link(email)
name = user_summary['name']
for k, v in user_summary.items():
if k in exclude_keys:
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
name,
email_link,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'Last visit',
'Client',
'Name',
'Email',
'Count',
]
title = 'Clients'
return make_table(title, cols, rows)
def user_activity_summary_table(user_summary):
# type: (Dict[str, Dict[str, Any]]) -> str
rows = []
for k, v in user_summary.items():
if k == 'name':
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'last_visit',
'client',
'count',
]
title = 'User Activity'
return make_table(title, cols, rows)
def realm_user_summary_table(all_records, admin_emails):
# type: (List[QuerySet], Set[Text]) -> Tuple[Dict[str, Dict[str, Any]], str]
user_records = {}
def by_email(record):
# type: (QuerySet) -> str
return record.user_profile.email
for email, records in itertools.groupby(all_records, by_email):
user_records[email] = get_user_activity_summary(list(records))
def get_last_visit(user_summary, k):
# type: (Dict[str, Dict[str, datetime]], str) -> Optional[datetime]
if k in user_summary:
return user_summary[k]['last_visit']
else:
return None
def get_count(user_summary, k):
# type: (Dict[str, Dict[str, str]], str) -> str
if k in user_summary:
return user_summary[k]['count']
else:
return ''
def is_recent(val):
# type: (Optional[datetime]) -> bool
age = timezone.now() - val
return age.total_seconds() < 5 * 60
rows = []
for email, user_summary in user_records.items():
email_link = user_activity_link(email)
sent_count = get_count(user_summary, 'send')
cells = [user_summary['name'], email_link, sent_count]
row_class = ''
for field in ['use', 'send', 'pointer', 'desktop', 'ZulipiOS', 'Android']:
visit = get_last_visit(user_summary, field)
if field == 'use':
if visit and is_recent(visit):
row_class += ' recently_active'
if email in admin_emails:
row_class += ' admin'
val = format_date_for_activity_reports(visit)
cells.append(val)
row = dict(cells=cells, row_class=row_class)
rows.append(row)
def by_used_time(row):
# type: (Dict[str, Any]) -> str
return row['cells'][3]
rows = sorted(rows, key=by_used_time, reverse=True)
cols = [
'Name',
'Email',
'Total sent',
'Heard from',
'Message sent',
'Pointer motion',
'Desktop',
'ZulipiOS',
'Android',
]
title = 'Summary'
content = make_table(title, cols, rows, has_row_class=True)
return user_records, content
@zulip_internal
def get_realm_activity(request, realm_str):
# type: (HttpRequest, str) -> HttpResponse
data = [] # type: List[Tuple[str, str]]
all_user_records = {} # type: Dict[str, Any]
try:
admins = Realm.objects.get(string_id=realm_str).get_admin_users()
except Realm.DoesNotExist:
return HttpResponseNotFound("Realm %s does not exist" % (realm_str,))
admin_emails = {admin.email for admin in admins}
for is_bot, page_title in [(False, 'Humans'), (True, 'Bots')]:
all_records = list(get_user_activity_records_for_realm(realm_str, is_bot))
user_records, content = realm_user_summary_table(all_records, admin_emails)
all_user_records.update(user_records)
data += [(page_title, content)]
page_title = 'Clients'
content = realm_client_table(all_user_records)
data += [(page_title, content)]
page_title = 'History'
content = sent_messages_report(realm_str)
data += [(page_title, content)]
realm_link = 'https://stats1.zulip.net:444/render/?from=-7days'
realm_link += '&target=stats.gauges.staging.users.active.%s.0_16hr' % (realm_str,)
title = realm_str
return render(
request,
'analytics/activity.html',
context=dict(data=data, realm_link=realm_link, title=title),
)
@zulip_internal
def get_user_activity(request, email):
# type: (HttpRequest, str) -> HttpResponse
records = get_user_activity_records_for_email(email)
data = [] # type: List[Tuple[str, str]]
user_summary = get_user_activity_summary(records)
content = user_activity_summary_table(user_summary)
data += [('Summary', content)]
content = raw_user_activity_table(records)
data += [('Info', content)]
title = email
return render(
request,
'analytics/activity.html',
context=dict(data=data, title=title),
)
|
|
"""Common test objects."""
import copy
from datetime import datetime
import json
from unittest.mock import ANY, patch
import yaml
from homeassistant import config as hass_config
from homeassistant.components import mqtt
from homeassistant.components.mqtt import debug_info
from homeassistant.components.mqtt.const import MQTT_DISCONNECTED
from homeassistant.components.mqtt.mixins import MQTT_ATTRIBUTES_BLOCKED
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_ENTITY_ID,
SERVICE_RELOAD,
STATE_UNAVAILABLE,
)
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.setup import async_setup_component
from tests.common import async_fire_mqtt_message, mock_registry
DEFAULT_CONFIG_DEVICE_INFO_ID = {
"identifiers": ["helloworld"],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
"suggested_area": "default_area",
"configuration_url": "http://example.com",
}
DEFAULT_CONFIG_DEVICE_INFO_MAC = {
"connections": [[dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
"suggested_area": "default_area",
"configuration_url": "http://example.com",
}
async def help_test_availability_when_connection_lost(hass, mqtt_mock, domain, config):
"""Test availability after MQTT disconnection."""
assert await async_setup_component(hass, domain, config)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
mqtt_mock.connected = False
async_dispatcher_send(hass, MQTT_DISCONNECTED)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async def help_test_availability_without_topic(hass, mqtt_mock, domain, config):
"""Test availability without defined availability topic."""
assert "availability_topic" not in config[domain]
assert await async_setup_component(hass, domain, config)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_default_availability_payload(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability by default payload with defined topic.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability_topic"] = "availability-topic"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if state_topic:
async_fire_mqtt_message(hass, state_topic, state_message)
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_default_availability_list_payload(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability by default payload with defined topic.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability"] = [
{"topic": "availability-topic1"},
{"topic": "availability-topic2"},
]
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic1", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic2", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic2", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if state_topic:
async_fire_mqtt_message(hass, state_topic, state_message)
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_default_availability_list_payload_all(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability by default payload with defined topic.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability_mode"] = "all"
config[domain]["availability"] = [
{"topic": "availability-topic1"},
{"topic": "availability-topic2"},
]
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic2", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic2", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic2", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_default_availability_list_payload_any(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability by default payload with defined topic.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability_mode"] = "any"
config[domain]["availability"] = [
{"topic": "availability-topic1"},
{"topic": "availability-topic2"},
]
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic2", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic2", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic1", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async def help_test_default_availability_list_single(
hass,
mqtt_mock,
caplog,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability list and availability_topic are mutually exclusive.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability"] = [
{"topic": "availability-topic1"},
]
config[domain]["availability_topic"] = "availability-topic"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state is None
assert (
"Invalid config for [sensor.mqtt]: two or more values in the same group of exclusion 'availability'"
in caplog.text
)
async def help_test_custom_availability_payload(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability by custom payload with defined topic.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability_topic"] = "availability-topic"
config[domain]["payload_available"] = "good"
config[domain]["payload_not_available"] = "nogood"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "good")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic", "nogood")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if state_topic:
async_fire_mqtt_message(hass, state_topic, state_message)
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "good")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_discovery_update_availability(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test update of discovered MQTTAvailability.
This is a test helper for the MQTTAvailability mixin.
"""
# Add availability settings to config
config1 = copy.deepcopy(config)
config1[domain]["availability_topic"] = "availability-topic1"
config2 = copy.deepcopy(config)
config2[domain]["availability"] = [
{"topic": "availability-topic2"},
{"topic": "availability-topic3"},
]
config3 = copy.deepcopy(config)
config3[domain]["availability_topic"] = "availability-topic4"
data1 = json.dumps(config1[domain])
data2 = json.dumps(config2[domain])
data3 = json.dumps(config3[domain])
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
# Change availability_topic
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data2)
await hass.async_block_till_done()
# Verify we are no longer subscribing to the old topic
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, "availability-topic2", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, "availability-topic3", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
# Change availability_topic
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data3)
await hass.async_block_till_done()
# Verify we are no longer subscribing to the old topic
async_fire_mqtt_message(hass, "availability-topic2", "online")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
# Verify we are no longer subscribing to the old topic
async_fire_mqtt_message(hass, "availability-topic3", "online")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, "availability-topic4", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, domain, config
):
"""Test the setting of attribute via MQTT with JSON payload.
This is a test helper for the MqttAttributes mixin.
"""
# Add JSON attributes settings to config
config = copy.deepcopy(config)
config[domain]["json_attributes_topic"] = "attr-topic"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "attr-topic", '{ "val": "100" }')
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("val") == "100"
async def help_test_setting_blocked_attribute_via_mqtt_json_message(
hass, mqtt_mock, domain, config, extra_blocked_attributes
):
"""Test the setting of blocked attribute via MQTT with JSON payload.
This is a test helper for the MqttAttributes mixin.
"""
extra_blocked_attributes = extra_blocked_attributes or []
# Add JSON attributes settings to config
config = copy.deepcopy(config)
config[domain]["json_attributes_topic"] = "attr-topic"
data = json.dumps(config[domain])
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
val = "abc123"
for attr in MQTT_ATTRIBUTES_BLOCKED:
async_fire_mqtt_message(hass, "attr-topic", json.dumps({attr: val}))
state = hass.states.get(f"{domain}.test")
assert state.attributes.get(attr) != val
for attr in extra_blocked_attributes:
async_fire_mqtt_message(hass, "attr-topic", json.dumps({attr: val}))
state = hass.states.get(f"{domain}.test")
assert state.attributes.get(attr) != val
async def help_test_setting_attribute_with_template(hass, mqtt_mock, domain, config):
"""Test the setting of attribute via MQTT with JSON payload.
This is a test helper for the MqttAttributes mixin.
"""
# Add JSON attributes settings to config
config = copy.deepcopy(config)
config[domain]["json_attributes_topic"] = "attr-topic"
config[domain]["json_attributes_template"] = "{{ value_json['Timer1'] | tojson }}"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
async_fire_mqtt_message(
hass, "attr-topic", json.dumps({"Timer1": {"Arm": 0, "Time": "22:18"}})
)
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("Arm") == 0
assert state.attributes.get("Time") == "22:18"
async def help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, domain, config
):
"""Test attributes get extracted from a JSON result.
This is a test helper for the MqttAttributes mixin.
"""
# Add JSON attributes settings to config
config = copy.deepcopy(config)
config[domain]["json_attributes_topic"] = "attr-topic"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "attr-topic", '[ "list", "of", "things"]')
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("val") is None
assert "JSON result was not a dictionary" in caplog.text
async def help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, domain, config
):
"""Test JSON validation of attributes.
This is a test helper for the MqttAttributes mixin.
"""
# Add JSON attributes settings to config
config = copy.deepcopy(config)
config[domain]["json_attributes_topic"] = "attr-topic"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "attr-topic", "This is not JSON")
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("val") is None
assert "Erroneous JSON: This is not JSON" in caplog.text
async def help_test_discovery_update_attr(hass, mqtt_mock, caplog, domain, config):
"""Test update of discovered MQTTAttributes.
This is a test helper for the MqttAttributes mixin.
"""
# Add JSON attributes settings to config
config1 = copy.deepcopy(config)
config1[domain]["json_attributes_topic"] = "attr-topic1"
config2 = copy.deepcopy(config)
config2[domain]["json_attributes_topic"] = "attr-topic2"
data1 = json.dumps(config1[domain])
data2 = json.dumps(config2[domain])
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data1)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "attr-topic1", '{ "val": "100" }')
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("val") == "100"
# Change json_attributes_topic
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data2)
await hass.async_block_till_done()
# Verify we are no longer subscribing to the old topic
async_fire_mqtt_message(hass, "attr-topic1", '{ "val": "50" }')
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("val") == "100"
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, "attr-topic2", '{ "val": "75" }')
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("val") == "75"
async def help_test_unique_id(hass, mqtt_mock, domain, config):
"""Test unique id option only creates one entity per unique_id."""
assert await async_setup_component(hass, domain, config)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(domain)) == 1
async def help_test_discovery_removal(hass, mqtt_mock, caplog, domain, data):
"""Test removal of discovered component.
This is a test helper for the MqttDiscoveryUpdate mixin.
"""
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state is not None
assert state.name == "test"
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", "")
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state is None
async def help_test_discovery_update(
hass,
mqtt_mock,
caplog,
domain,
discovery_config1,
discovery_config2,
state_data1=None,
state_data2=None,
):
"""Test update of discovered component.
This is a test helper for the MqttDiscoveryUpdate mixin.
"""
# Add some future configuration to the configurations
config1 = copy.deepcopy(discovery_config1)
config1["some_future_option_1"] = "future_option_1"
config2 = copy.deepcopy(discovery_config2)
config2["some_future_option_2"] = "future_option_2"
discovery_data1 = json.dumps(config1)
discovery_data2 = json.dumps(config2)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", discovery_data1)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.beer")
assert state is not None
assert state.name == "Beer"
if state_data1:
for (mqtt_messages, expected_state, attributes) in state_data1:
for (topic, data) in mqtt_messages:
async_fire_mqtt_message(hass, topic, data)
state = hass.states.get(f"{domain}.beer")
if expected_state:
assert state.state == expected_state
if attributes:
for (attr, value) in attributes:
assert state.attributes.get(attr) == value
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", discovery_data2)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.beer")
assert state is not None
assert state.name == "Milk"
if state_data2:
for (mqtt_messages, expected_state, attributes) in state_data2:
for (topic, data) in mqtt_messages:
async_fire_mqtt_message(hass, topic, data)
state = hass.states.get(f"{domain}.beer")
if expected_state:
assert state.state == expected_state
if attributes:
for (attr, value) in attributes:
assert state.attributes.get(attr) == value
state = hass.states.get(f"{domain}.milk")
assert state is None
async def help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, domain, data1, discovery_update
):
"""Test update of discovered component without changes.
This is a test helper for the MqttDiscoveryUpdate mixin.
"""
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.beer")
assert state is not None
assert state.name == "Beer"
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data1)
await hass.async_block_till_done()
assert not discovery_update.called
async def help_test_discovery_broken(hass, mqtt_mock, caplog, domain, data1, data2):
"""Test handling of bad discovery message."""
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.beer")
assert state is None
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data2)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.milk")
assert state is not None
assert state.name == "Milk"
state = hass.states.get(f"{domain}.beer")
assert state is None
async def help_test_encoding_subscribable_topics(
hass,
mqtt_mock,
caplog,
domain,
config,
topic,
value,
attribute=None,
attribute_value=None,
init_payload=None,
skip_raw_test=False,
):
"""Test handling of incoming encoded payload."""
async def _test_encoding(
hass,
entity_id,
topic,
encoded_value,
attribute,
init_payload_topic,
init_payload_value,
):
state = hass.states.get(entity_id)
if init_payload_value:
# Sometimes a device needs to have an initialization pay load, e.g. to switch the device on.
async_fire_mqtt_message(hass, init_payload_topic, init_payload_value)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
async_fire_mqtt_message(hass, topic, encoded_value)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
if attribute:
return state.attributes.get(attribute)
return state.state if state else None
init_payload_value_utf8 = None
init_payload_value_utf16 = None
# setup test1 default encoding
config1 = copy.deepcopy(config)
if domain == "device_tracker":
config1["unique_id"] = "test1"
else:
config1["name"] = "test1"
config1[topic] = "topic/test1"
# setup test2 alternate encoding
config2 = copy.deepcopy(config)
if domain == "device_tracker":
config2["unique_id"] = "test2"
else:
config2["name"] = "test2"
config2["encoding"] = "utf-16"
config2[topic] = "topic/test2"
# setup test3 raw encoding
config3 = copy.deepcopy(config)
if domain == "device_tracker":
config3["unique_id"] = "test3"
else:
config3["name"] = "test3"
config3["encoding"] = ""
config3[topic] = "topic/test3"
if init_payload:
config1[init_payload[0]] = "topic/init_payload1"
config2[init_payload[0]] = "topic/init_payload2"
config3[init_payload[0]] = "topic/init_payload3"
init_payload_value_utf8 = init_payload[1].encode("utf-8")
init_payload_value_utf16 = init_payload[1].encode("utf-16")
await hass.async_block_till_done()
assert await async_setup_component(
hass, domain, {domain: [config1, config2, config3]}
)
await hass.async_block_till_done()
expected_result = attribute_value or value
# test1 default encoding
assert (
await _test_encoding(
hass,
f"{domain}.test1",
"topic/test1",
value.encode("utf-8"),
attribute,
"topic/init_payload1",
init_payload_value_utf8,
)
== expected_result
)
# test2 alternate encoding
assert (
await _test_encoding(
hass,
f"{domain}.test2",
"topic/test2",
value.encode("utf-16"),
attribute,
"topic/init_payload2",
init_payload_value_utf16,
)
== expected_result
)
# test3 raw encoded input
if skip_raw_test:
return
try:
result = await _test_encoding(
hass,
f"{domain}.test3",
"topic/test3",
value.encode("utf-16"),
attribute,
"topic/init_payload3",
init_payload_value_utf16,
)
assert result != expected_result
except (AttributeError, TypeError, ValueError):
pass
async def help_test_entity_device_info_with_identifier(hass, mqtt_mock, domain, config):
"""Test device registry integration.
This is a test helper for the MqttDiscoveryUpdate mixin.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
registry = dr.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
assert device.identifiers == {("mqtt", "helloworld")}
assert device.manufacturer == "Whatever"
assert device.name == "Beer"
assert device.model == "Glass"
assert device.sw_version == "0.1-beta"
assert device.suggested_area == "default_area"
assert device.configuration_url == "http://example.com"
async def help_test_entity_device_info_with_connection(hass, mqtt_mock, domain, config):
"""Test device registry integration.
This is a test helper for the MqttDiscoveryUpdate mixin.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_MAC)
config["unique_id"] = "veryunique"
registry = dr.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12")}
)
assert device is not None
assert device.connections == {(dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12")}
assert device.manufacturer == "Whatever"
assert device.name == "Beer"
assert device.model == "Glass"
assert device.sw_version == "0.1-beta"
assert device.suggested_area == "default_area"
assert device.configuration_url == "http://example.com"
async def help_test_entity_device_info_remove(hass, mqtt_mock, domain, config):
"""Test device registry remove."""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
dev_registry = dr.async_get(hass)
ent_registry = er.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = dev_registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
assert ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, "veryunique")
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", "")
await hass.async_block_till_done()
device = dev_registry.async_get_device({("mqtt", "helloworld")})
assert device is None
assert not ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, "veryunique")
async def help_test_entity_device_info_update(hass, mqtt_mock, domain, config):
"""Test device registry update.
This is a test helper for the MqttDiscoveryUpdate mixin.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
registry = dr.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
assert device.name == "Beer"
config["device"]["name"] = "Milk"
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
assert device.name == "Milk"
async def help_test_entity_id_update_subscriptions(
hass, mqtt_mock, domain, config, topics=None
):
"""Test MQTT subscriptions are managed when entity_id is updated."""
# Add unique_id to config
config = copy.deepcopy(config)
config[domain]["unique_id"] = "TOTALLY_UNIQUE"
if topics is None:
# Add default topics to config
config[domain]["availability_topic"] = "avty-topic"
config[domain]["state_topic"] = "test-topic"
topics = ["avty-topic", "test-topic"]
assert len(topics) > 0
registry = mock_registry(hass, {})
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state is not None
assert mqtt_mock.async_subscribe.call_count == len(topics)
for topic in topics:
mqtt_mock.async_subscribe.assert_any_call(topic, ANY, ANY, ANY)
mqtt_mock.async_subscribe.reset_mock()
registry.async_update_entity(f"{domain}.test", new_entity_id=f"{domain}.milk")
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state is None
state = hass.states.get(f"{domain}.milk")
assert state is not None
for topic in topics:
mqtt_mock.async_subscribe.assert_any_call(topic, ANY, ANY, ANY)
async def help_test_entity_id_update_discovery_update(
hass, mqtt_mock, domain, config, topic=None
):
"""Test MQTT discovery update after entity_id is updated."""
# Add unique_id to config
config = copy.deepcopy(config)
config[domain]["unique_id"] = "TOTALLY_UNIQUE"
if topic is None:
# Add default topic to config
config[domain]["availability_topic"] = "avty-topic"
topic = "avty-topic"
ent_registry = mock_registry(hass, {})
data = json.dumps(config[domain])
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, topic, "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, topic, "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
ent_registry.async_update_entity(f"{domain}.test", new_entity_id=f"{domain}.milk")
await hass.async_block_till_done()
config[domain]["availability_topic"] = f"{topic}_2"
data = json.dumps(config[domain])
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(domain)) == 1
async_fire_mqtt_message(hass, f"{topic}_2", "online")
state = hass.states.get(f"{domain}.milk")
assert state.state != STATE_UNAVAILABLE
async def help_test_entity_debug_info(hass, mqtt_mock, domain, config):
"""Test debug_info.
This is a test helper for MQTT debug_info.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
registry = dr.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"]) == 1
assert (
debug_info_data["entities"][0]["discovery_data"]["topic"]
== f"homeassistant/{domain}/bla/config"
)
assert debug_info_data["entities"][0]["discovery_data"]["payload"] == config
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert {"topic": "test-topic", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
assert len(debug_info_data["triggers"]) == 0
async def help_test_entity_debug_info_max_messages(hass, mqtt_mock, domain, config):
"""Test debug_info message overflow.
This is a test helper for MQTT debug_info.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
registry = dr.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert {"topic": "test-topic", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
start_dt = datetime(2019, 1, 1, 0, 0, 0)
with patch("homeassistant.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = start_dt
for i in range(0, debug_info.STORED_MESSAGES + 1):
async_fire_mqtt_message(hass, "test-topic", f"{i}")
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert (
len(debug_info_data["entities"][0]["subscriptions"][0]["messages"])
== debug_info.STORED_MESSAGES
)
messages = [
{
"payload": f"{i}",
"qos": 0,
"retain": False,
"time": start_dt,
"topic": "test-topic",
}
for i in range(1, debug_info.STORED_MESSAGES + 1)
]
assert {"topic": "test-topic", "messages": messages} in debug_info_data["entities"][
0
]["subscriptions"]
async def help_test_entity_debug_info_message(
hass, mqtt_mock, domain, config, topic=None, payload=None
):
"""Test debug_info message overflow.
This is a test helper for MQTT debug_info.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
if topic is None:
# Add default topic to config
config["state_topic"] = "state-topic"
topic = "state-topic"
if payload is None:
payload = "ON"
registry = dr.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) >= 1
assert {"topic": topic, "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
start_dt = datetime(2019, 1, 1, 0, 0, 0)
with patch("homeassistant.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = start_dt
async_fire_mqtt_message(hass, topic, payload)
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) >= 1
assert {
"topic": topic,
"messages": [
{
"payload": payload,
"qos": 0,
"retain": False,
"time": start_dt,
"topic": topic,
}
],
} in debug_info_data["entities"][0]["subscriptions"]
async def help_test_entity_debug_info_remove(hass, mqtt_mock, domain, config):
"""Test debug_info.
This is a test helper for MQTT debug_info.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
registry = dr.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"]) == 1
assert (
debug_info_data["entities"][0]["discovery_data"]["topic"]
== f"homeassistant/{domain}/bla/config"
)
assert debug_info_data["entities"][0]["discovery_data"]["payload"] == config
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert {"topic": "test-topic", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
assert len(debug_info_data["triggers"]) == 0
assert debug_info_data["entities"][0]["entity_id"] == f"{domain}.test"
entity_id = debug_info_data["entities"][0]["entity_id"]
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", "")
await hass.async_block_till_done()
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"]) == 0
assert len(debug_info_data["triggers"]) == 0
assert entity_id not in hass.data[debug_info.DATA_MQTT_DEBUG_INFO]["entities"]
async def help_test_entity_debug_info_update_entity_id(hass, mqtt_mock, domain, config):
"""Test debug_info.
This is a test helper for MQTT debug_info.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
dev_registry = dr.async_get(hass)
ent_registry = mock_registry(hass, {})
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = dev_registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"]) == 1
assert (
debug_info_data["entities"][0]["discovery_data"]["topic"]
== f"homeassistant/{domain}/bla/config"
)
assert debug_info_data["entities"][0]["discovery_data"]["payload"] == config
assert debug_info_data["entities"][0]["entity_id"] == f"{domain}.test"
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert {"topic": "test-topic", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
assert len(debug_info_data["triggers"]) == 0
ent_registry.async_update_entity(f"{domain}.test", new_entity_id=f"{domain}.milk")
await hass.async_block_till_done()
await hass.async_block_till_done()
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"]) == 1
assert (
debug_info_data["entities"][0]["discovery_data"]["topic"]
== f"homeassistant/{domain}/bla/config"
)
assert debug_info_data["entities"][0]["discovery_data"]["payload"] == config
assert debug_info_data["entities"][0]["entity_id"] == f"{domain}.milk"
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert {"topic": "test-topic", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
assert len(debug_info_data["triggers"]) == 0
assert (
f"{domain}.test" not in hass.data[debug_info.DATA_MQTT_DEBUG_INFO]["entities"]
)
async def help_test_entity_disabled_by_default(hass, mqtt_mock, domain, config):
"""Test device registry remove."""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["enabled_by_default"] = False
config["unique_id"] = "veryunique1"
dev_registry = dr.async_get(hass)
ent_registry = er.async_get(hass)
# Discover a disabled entity
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla1/config", data)
await hass.async_block_till_done()
entity_id = ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, "veryunique1")
assert not hass.states.get(entity_id)
assert dev_registry.async_get_device({("mqtt", "helloworld")})
# Discover an enabled entity, tied to the same device
config["enabled_by_default"] = True
config["unique_id"] = "veryunique2"
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla2/config", data)
await hass.async_block_till_done()
entity_id = ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, "veryunique2")
assert hass.states.get(entity_id)
# Remove the enabled entity, both entities and the device should be removed
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla2/config", "")
await hass.async_block_till_done()
assert not ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, "veryunique1")
assert not ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, "veryunique2")
assert not dev_registry.async_get_device({("mqtt", "helloworld")})
async def help_test_entity_category(hass, mqtt_mock, domain, config):
"""Test device registry remove."""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
ent_registry = er.async_get(hass)
# Discover an entity without entity category
unique_id = "veryunique1"
config["unique_id"] = unique_id
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/{unique_id}/config", data)
await hass.async_block_till_done()
entity_id = ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, unique_id)
assert hass.states.get(entity_id)
entry = ent_registry.async_get(entity_id)
assert entry.entity_category is None
# Discover an entity with entity category set to "config"
unique_id = "veryunique2"
config["entity_category"] = "config"
config["unique_id"] = unique_id
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/{unique_id}/config", data)
await hass.async_block_till_done()
entity_id = ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, unique_id)
assert hass.states.get(entity_id)
entry = ent_registry.async_get(entity_id)
assert entry.entity_category == "config"
# Discover an entity with entity category set to "no_such_category"
unique_id = "veryunique3"
config["entity_category"] = "no_such_category"
config["unique_id"] = unique_id
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/{unique_id}/config", data)
await hass.async_block_till_done()
assert not ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, unique_id)
async def help_test_publishing_with_custom_encoding(
hass,
mqtt_mock,
caplog,
domain,
config,
service,
topic,
parameters,
payload,
template,
tpl_par="value",
tpl_output=None,
):
"""Test a service with publishing MQTT payload with different encoding."""
# prepare config for tests
test_config = {
"test1": {"encoding": None, "cmd_tpl": False},
"test2": {"encoding": "utf-16", "cmd_tpl": False},
"test3": {"encoding": "", "cmd_tpl": False},
"test4": {"encoding": "invalid", "cmd_tpl": False},
"test5": {"encoding": "", "cmd_tpl": True},
}
setup_config = []
service_data = {}
for test_id, test_data in test_config.items():
test_config_setup = copy.deepcopy(config)
test_config_setup.update(
{
topic: f"cmd/{test_id}",
"name": f"{test_id}",
}
)
if test_data["encoding"] is not None:
test_config_setup["encoding"] = test_data["encoding"]
if test_data["cmd_tpl"]:
test_config_setup[
template
] = f"{{{{ (('%.1f'|format({tpl_par}))[0] if is_number({tpl_par}) else {tpl_par}[0]) | ord | pack('b') }}}}"
setup_config.append(test_config_setup)
# setup service data
service_data[test_id] = {ATTR_ENTITY_ID: f"{domain}.{test_id}"}
if parameters:
service_data[test_id].update(parameters)
# setup test entities
assert await async_setup_component(
hass,
domain,
{domain: setup_config},
)
await hass.async_block_till_done()
# 1) test with default encoding
await hass.services.async_call(
domain,
service,
service_data["test1"],
blocking=True,
)
mqtt_mock.async_publish.assert_any_call("cmd/test1", str(payload), 0, False)
mqtt_mock.async_publish.reset_mock()
# 2) test with utf-16 encoding
await hass.services.async_call(
domain,
service,
service_data["test2"],
blocking=True,
)
mqtt_mock.async_publish.assert_any_call(
"cmd/test2", str(payload).encode("utf-16"), 0, False
)
mqtt_mock.async_publish.reset_mock()
# 3) test with no encoding set should fail if payload is a string
await hass.services.async_call(
domain,
service,
service_data["test3"],
blocking=True,
)
assert (
f"Can't pass-through payload for publishing {payload} on cmd/test3 with no encoding set, need 'bytes'"
in caplog.text
)
# 4) test with invalid encoding set should fail
await hass.services.async_call(
domain,
service,
service_data["test4"],
blocking=True,
)
assert (
f"Can't encode payload for publishing {payload} on cmd/test4 with encoding invalid"
in caplog.text
)
# 5) test with command template and raw encoding if specified
if not template:
return
await hass.services.async_call(
domain,
service,
service_data["test5"],
blocking=True,
)
mqtt_mock.async_publish.assert_any_call(
"cmd/test5", tpl_output or str(payload)[0].encode("utf-8"), 0, False
)
mqtt_mock.async_publish.reset_mock()
async def help_test_reloadable(hass, mqtt_mock, caplog, tmp_path, domain, config):
"""Test reloading an MQTT platform."""
# Create and test an old config of 2 entities based on the config supplied
old_config_1 = copy.deepcopy(config)
old_config_1["name"] = "test_old_1"
old_config_2 = copy.deepcopy(config)
old_config_2["name"] = "test_old_2"
assert await async_setup_component(
hass, domain, {domain: [old_config_1, old_config_2]}
)
await hass.async_block_till_done()
assert hass.states.get(f"{domain}.test_old_1")
assert hass.states.get(f"{domain}.test_old_2")
assert len(hass.states.async_all(domain)) == 2
# Create temporary fixture for configuration.yaml based on the supplied config and test a reload with this new config
new_config_1 = copy.deepcopy(config)
new_config_1["name"] = "test_new_1"
new_config_2 = copy.deepcopy(config)
new_config_2["name"] = "test_new_2"
new_config_3 = copy.deepcopy(config)
new_config_3["name"] = "test_new_3"
new_yaml_config_file = tmp_path / "configuration.yaml"
new_yaml_config = yaml.dump({domain: [new_config_1, new_config_2, new_config_3]})
new_yaml_config_file.write_text(new_yaml_config)
assert new_yaml_config_file.read_text() == new_yaml_config
with patch.object(hass_config, "YAML_CONFIG_FILE", new_yaml_config_file):
await hass.services.async_call(
"mqtt",
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert "<Event event_mqtt_reloaded[L]>" in caplog.text
assert len(hass.states.async_all(domain)) == 3
assert hass.states.get(f"{domain}.test_new_1")
assert hass.states.get(f"{domain}.test_new_2")
assert hass.states.get(f"{domain}.test_new_3")
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__='ar'
import re
import sys
import os
import glob
import time
import json
import numpy as np
import skimage.io as io
import skimage.color as skcolor
import skimage.transform as sktransform
import matplotlib.pyplot as plt
from keras import backend as K
import keras
from keras.models import Sequential
from keras.layers import Convolution1D, Convolution2D, Convolution3D,\
MaxPooling1D, MaxPooling2D, MaxPooling3D,\
AveragePooling1D,AveragePooling2D, AveragePooling3D,\
InputLayer, Flatten, Merge, Activation, Dense, Dropout
# from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.optimizers import SGD
from keras.models import model_from_json
from keras.optimizers import Optimizer
from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax
from app.backend.core import utils as dlsutils
from batcher_image2d import BatcherImage2DLMDB
# from flow_parser import getKerasOptimizerName
from cfg import CFG_MODEL_TRAIN, CFG_SOLVER
#########################
def getOptimizerJson2Keras(strOpt, parLR=0.1):
# FIXME: only Learning Rate is processed correctly, other Optimizer-specific field is defined by default...
if strOpt == "SGD":
return SGD(lr=parLR)
elif strOpt == "RMSprop":
return RMSprop(lr=parLR)
elif strOpt == "Adagrad":
return Adagrad(lr=parLR)
elif strOpt == "Adadelta":
return Adadelta(lr=parLR)
elif strOpt == "Adam":
return Adam(lr=parLR)
elif strOpt == "Adamax":
return Adamax(lr=parLR)
elif strOpt == "Nadam":
return None
else:
return None
def getKerasOptimizerName(optObj):
if isinstance(optObj, SGD):
return 'SGD'
elif isinstance(optObj, RMSprop):
return 'RMSprop'
elif isinstance(optObj, Adagrad):
return 'Adagrad'
elif isinstance(optObj, Adadelta):
return 'Adadelta'
elif isinstance(optObj, Adam):
return 'Adam'
elif isinstance(optObj, Adamax):
return 'Adamax'
else:
return None
#########################
def split_list_by_blocks(lst, psiz):
"""
Split list by cuts fixed size psize (last cut can be less than psize),
:param lst: input list
:param psiz: size of cut
:return: cutted-list
"""
tret = [lst[x:x + psiz] for x in xrange(0, len(lst), psiz)]
return tret
def findLayerFromEndByType(model, layerType):
for ii,ll in enumerate(model.layers[::-1]):
if isinstance(ll, layerType):
return (len(model.layers) - ii - 1)
return -1
def cloneLayerFromLayer(pLayer):
if isinstance(pLayer, Convolution1D):
return Convolution1D.from_config(pLayer.get_config())
elif isinstance(pLayer, Convolution2D):
return Convolution2D.from_config(pLayer.get_config())
elif isinstance(pLayer, Convolution3D):
return Convolution3D.from_config(pLayer.get_config())
# Max-Pooling:
elif isinstance(pLayer, MaxPooling1D):
return MaxPooling2D.from_config(pLayer.get_config())
elif isinstance(pLayer, MaxPooling2D):
return MaxPooling2D.from_config(pLayer.get_config())
elif isinstance(pLayer, MaxPooling3D):
return MaxPooling3D.from_config(pLayer.get_config())
# Average-Pooling
elif isinstance(pLayer, AveragePooling1D):
return AveragePooling1D.from_config(pLayer.get_config())
elif isinstance(pLayer, AveragePooling2D):
return AveragePooling2D.from_config(pLayer.get_config())
elif isinstance(pLayer, AveragePooling3D):
return AveragePooling3D.from_config(pLayer.get_config())
#
elif isinstance(pLayer, Flatten):
return Flatten.from_config(pLayer.get_config())
elif isinstance(pLayer, Merge):
return Merge.from_config(pLayer.get_config())
elif isinstance(pLayer, Activation):
return Activation.from_config(pLayer.get_config())
elif isinstance(pLayer, Dropout):
return Dropout.from_config(pLayer.get_config())
#
elif isinstance(pLayer, Dense):
return Dense.from_config(pLayer.get_config())
return None
#########################
class KerasTrainer:
extModelWeights = 'h5kerasmodel'
extJsonTrainConfig = '_trainconfig.json'
extJsonSolverState = '_solverstate.json'
modelPrefix=''
batcherLMDB = None
pathModelConfig=None
model=None
outputDir=None
sizeBatch=32
numEpoch=1
numIterPerEpoch=0
intervalSaveModel=1
intervalValidation=1
currentIter=0
currentEpoch=0
printInterval=20
modelName="Unknown"
deviceType='cpu'
def __init__(self):
self.cleanResults()
@staticmethod
def adjustModelInputOutput2DBData(parModel, parLMDB, isFixOutputLayer = True):
# (1) check LMDB is object instance or path to DB
if isinstance(parLMDB, BatcherImage2DLMDB):
ptrLMDB = parLMDB
elif (isinstance(parLMDB, str) or isinstance(parLMDB, unicode)):
ptrLMDB = BatcherImage2DLMDB(parLMDB, 1)
else:
raise Exception("Unknown parLMDB instance")
# (2) Build Sequential model (currently only Sequential models supported)
retModel = Sequential()
tmpL0 = parModel.layers[0]
# (3) if InputLayer is present - skip it
if isinstance(tmpL0, InputLayer):
idxStart=1
else:
idxStart=0
# (4) Recreate new InputShape layer with DB input shape
retModel.add(InputLayer(input_shape=ptrLMDB.shapeImg))
#FIXME: check this code, do you think, that implicit layer resizing is a good idea?
# (5) find output Dense layer to automaticaly adjust his output with DB-output
idxDense = -1
if isFixOutputLayer:
idxDense = findLayerFromEndByType(parModel, keras.layers.Dense)
if idxDense<0:
raise Exception('Model without Dense layer currently not supported!')
listLayers = parModel.layers[idxStart:idxDense]
else:
listLayers = parModel.layers[idxStart:]
# (6) Re-create model layers
for ll in listLayers:
ll.inbound_nodes = []
# print ('\tadd [%s]' % (ll.__str__()))
tmpLayer = cloneLayerFromLayer(ll)
retModel.add(tmpLayer)
# (7) fix output dimension
if isFixOutputLayer and idxDense>0:
#FIXME: hak for classification model-task
tmpLayer = parModel.layers[idxDense]
tmpLayer.inbound_nodes = []
tmpLayerConfig = tmpLayer.get_config()
#FIXME: check Keras 'output_dim' paremater
tmpLayerConfig['output_dim'] = ptrLMDB.numLbl
retModel.add(Dense.from_config(tmpLayerConfig))
for ll in parModel.layers[idxDense+1:]:
ll.inbound_nodes = []
tmpLayer = cloneLayerFromLayer(ll)
retModel.add(tmpLayer)
#
# tmpL0 = parModel.layers[0]
# tmpL0cfg = tmpL0.get_config()
# if re.match(r'dense_input*', tmpL0.input.name) is not None:
# tmpShapeImageSize = np.prod(ptrLMDB.shapeImg)
# retModel = Sequential()
# retModel.add(
# Dense(tmpL0cfg['output_dim'], input_dim=tmpShapeImageSize, init=tmpL0cfg['init']))
# for ll in parModel.layers[1:]:
# retModel.add(ll)
# elif re.match(r'convolution2d_input*', tmpL0.input.name) is not None:
# retModel = Sequential()
# retModel.add(
# Convolution2D(tmpL0cfg['nb_filter'], tmpL0cfg['nb_col'], tmpL0cfg['nb_row'],
# border_mode=tmpL0cfg['border_mode'],
# subsample=tmpL0cfg['subsample'],
# input_shape=ptrLMDB.shapeImg,
# init=tmpL0cfg['init']))
# for ll in parModel.layers[1:]:
# ll.inbound_nodes=[]
# print (ll)
# retModel.add(ll)
# else:
# retModel = parModel
# FIXME: check this point (automatic output layer size). SoftMax to config in feature
# if isFixOutputLayer:
# retModel.add(Dense(ptrLMDB.numLbl, activation='softmax'))
return retModel
def buildModel(self, pathLMDBJob, pathModelConfig,
sizeBatch, numEpoch, intervalSaveModel=1, intervalValidation=1,
outputDir=None, modelPrefixName='keras_model', isResizeInputLayerToImageShape=True):
if self.isOk():
self.cleanModel()
self.loadBatcherLMDB(pathLMDBJob, sizeBatch)
with open(pathModelConfig, 'r') as f:
modelJSON = f.read()
modelFromCfg = model_from_json(modelJSON)
if modelFromCfg is not None:
self.pathModelConfig = pathModelConfig
self.sizeBatch = sizeBatch
self.numEpoch = numEpoch
self.numIterPerEpoch = self.batcherLMDB.numTrain / self.sizeBatch
self.intervalSaveModel = intervalSaveModel
self.intervalValidation = intervalValidation
self.modelPrefix = modelPrefixName
self.cleanResults()
if outputDir is None:
self.outputDir = os.getcwd()
else:
if os.path.isdir(outputDir):
self.outputDir = outputDir
else:
strErr = "Directory not found [%s]" % outputDir
self.printError(strErr)
raise Exception(strErr)
# FIXME: check this point: need more accurate logic to sync Data-Shape and Model-Input-Shape
# if isResizeInputLayerToImageShape:
# tmpL0 = modelFromCfg.layers[0]
# tmpL0cfg = tmpL0.get_config()
# if re.match(r'dense_input*', tmpL0.input.name) is not None:
# tmpShapeImageSize = np.prod(self.lmdbReader.shapeImg)
# self.model = Sequential()
# self.model.add(
# Dense(tmpL0cfg['output_dim'], input_dim=tmpShapeImageSize, init=tmpL0cfg['init']))
# for ll in modelFromCfg.layers[1:]:
# self.model.add(ll)
# else:
# self.model = modelFromCfg
# else:
# self.model = modelFromCfg
# FIXME: check this point (automatic output layer size). SoftMax to config in feature
# self.model.add(Dense(self.lmdbReader.numLbl))
# self.model.add(Activation('softmax'))
self.model = KerasTrainer.adjustModelInputOutput2DBData(modelFromCfg, self.batcherLMDB)
# TODO: make the setting for code below. For optimizer, loss-function, metrics
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
self.model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
def buildModelFromConfigs(self, paramBatcherLMDB, modelConfig,
sizeBatch, numEpoch,
modelOptimizer=None,
intervalSaveModel=1, intervalValidation=1,
outputDir=None, modelPrefixName='keras_model',
isAppendOutputLayer = True):
self.batcherLMDB = paramBatcherLMDB
modelFromCfg = modelConfig
if modelFromCfg is not None:
self.pathModelConfig = None
self.sizeBatch = sizeBatch
self.numEpoch = numEpoch
self.numIterPerEpoch = self.batcherLMDB.numTrain / self.sizeBatch
self.intervalSaveModel = intervalSaveModel
self.intervalValidation = intervalValidation
self.modelPrefix = modelPrefixName
self.cleanResults()
if outputDir is None:
self.outputDir = os.getcwd()
else:
if os.path.isdir(outputDir):
self.outputDir = outputDir
else:
strErr = "Directory not found [%s]" % outputDir
self.printError(strErr)
raise Exception(strErr)
self.model = KerasTrainer.adjustModelInputOutput2DBData(modelFromCfg, self.batcherLMDB, isFixOutputLayer=isAppendOutputLayer)
# TODO: make the setting for code below. For optimizer, loss-function, metrics
if modelOptimizer is None:
opt = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
else:
opt = modelOptimizer
self.model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
def isOk(self):
return ((self.batcherLMDB is not None) and (self.model is not None))
def loadBatcherLMDB(self, dbJobID, sizeBatch):
dirDataset=dlsutils.getPathForDatasetDir()
pathLMDBJob = os.path.join(dirDataset, dbJobID)
self.batcherLMDB = BatcherImage2DLMDB(pathLMDBJob, sizeBatch)
self.sizeBatch = sizeBatch
if not self.batcherLMDB.isOk():
strErr = "[KERAS-TRAINER] Incorrect LMDB-data in [%s]" % dbJobID
self.printError(strErr)
raise Exception(strErr)
def cleanResults(self):
self.trainLog={'epoch':[], 'iter':[], 'lossTrain':[], 'accTrain':[], 'lossVal':[], 'accVal':[]}
self.currentIter=0
self.currentEpoch=0
def cleanModel(self):
if self.isOk():
self.cleanResults()
self.model = None
self.batcherLMDB.close()
self.batcherLMDB = None
self.pathModelConfig = None
def printError(self, strError):
print("keras-error#%s" % strError)
def trainOneIter(self):
modelInputShape = list(self.model.input_shape)
dataX, dataY = self.batcherLMDB.getBatchTrain(reshape2Shape=modelInputShape)
tlossTrain = self.model.train_on_batch(dataX, dataY)
isNeedPrintInfo = False
if (self.currentIter % self.printInterval == 0):
dataXval, dataYval = self.batcherLMDB.getBatchVal(reshape2Shape=modelInputShape)
tlossVal = self.model.test_on_batch(dataXval, dataYval)
self.trainLog['epoch'].append(self.currentEpoch)
self.trainLog['iter'].append(self.currentIter)
self.trainLog['lossTrain'].append(float(tlossTrain[0]))
self.trainLog['accTrain'].append(float(tlossTrain[1]))
self.trainLog['lossVal'].append(float(tlossVal[0]))
self.trainLog['accVal'].append(float(tlossVal[1]))
print(("keras-info#%s#%s#%d|%d|%0.5f|%0.5f|%0.5f|%0.5f") % (
'I',
time.strftime('%Y.%m.%d-%H:%M:%S'),
self.currentEpoch,
self.currentIter,
self.trainLog['lossTrain'][-1],
self.trainLog['accTrain'][-1],
self.trainLog['lossVal'][-1],
self.trainLog['accVal'][-1]
))
sys.stdout.flush()
isNeedPrintInfo = True
self.currentIter += 1
return isNeedPrintInfo
def trainOneEpoch(self):
if not self.isOk():
strErr='KerasTrainer is not correctly initialized'
self.printError(strErr)
raise Exception(strErr)
modelInputShape = list(self.model.input_shape)
for ii in xrange(self.numIterPerEpoch):
dataX, dataY = self.batcherLMDB.getBatchTrain(reshape2Shape=modelInputShape)
tlossTrain = self.model.train_on_batch(dataX, dataY)
if (self.currentIter%self.printInterval==0):
dataXval, dataYval = self.batcherLMDB.getBatchVal(reshape2Shape=modelInputShape)
tlossVal = self.model.test_on_batch(dataXval, dataYval)
self.trainLog['epoch'].append(self.currentEpoch)
self.trainLog['iter'].append(self.currentIter)
self.trainLog['lossTrain'].append(tlossTrain[0])
self.trainLog['accTrain'].append(tlossTrain[1])
self.trainLog['lossVal'].append(tlossVal[0])
self.trainLog['accVal'].append(tlossVal[1])
print(("keras-info#%s#%s#%d|%d|%0.5f|%0.5f|%0.5f|%0.5f") % (
'I',
time.strftime('%Y.%m.%d-%H:%M:%S'),
self.currentEpoch,
self.currentIter,
self.trainLog['lossTrain'][-1],
self.trainLog['accTrain'][-1],
self.trainLog['lossVal'][-1],
self.trainLog['accVal'][-1]
))
sys.stdout.flush()
self.currentIter +=1
self.currentEpoch += 1
def convertImgUint8ToDBImage(self, pimg):
#FIXME: shape we can get from Batcher and from model.layers...
if len(self.batcherLMDB.shapeImg) < 3:
numCh = 1
else:
# FIXME: check this point, number of channels can be on last element on array...
numCh = self.batcherLMDB.shapeImg[0]
# check #channels of input image
if len(pimg.shape) < 3:
numChImg = 1
else:
numChImg = 3
# if #channels of input image is not equal to #channels in TrainDatabse, then convert shape inp Image to Database-Shape
if numCh != numChImg:
if numCh == 1:
# FIXME: this is fix potential bug: rgb2gray change automaticaly min/max range from (0,255) to (0,1), headbang!
pimg = skcolor.rgb2gray(pimg.astype(np.float))
else:
pimg = skcolor.gray2rgb(pimg)
timg = sktransform.resize(pimg.astype(np.float32) * self.batcherLMDB.scaleFactor, self.batcherLMDB.shapeImg[1:])
if numCh==1:
timg = timg.reshape([1] + list(timg.shape))
else:
timg = timg.transpose((2, 0, 1))
if self.batcherLMDB.isRemoveMean:
# FIXME: check this point: type of the mean-removing from one cofig (for train and inference stages)
timg -= self.batcherLMDB.meanChImage
return timg
def inferListImagePath(self, listPathToImages, batchSizeInfer=None):
if not self.isOk():
strError = 'KerasTrainer class is not initialized to call inference()'
self.printError(strError)
raise Exception(strError)
if batchSizeInfer is None:
batchSizeInfer = self.sizeBatch
splListPathToImages = split_list_by_blocks(listPathToImages, batchSizeInfer)
retProb = None
for idxBatch,lstPath in enumerate(splListPathToImages):
modelInputShape = list(self.model.input_shape)
# Fit batchSize to current number of images in list (lstPath)
tmpBatchSize = len(lstPath)
tdataX=None
for ppi,ppath in enumerate(lstPath):
timg = io.imread(ppath)
if timg is None:
strError = 'Cant read input image [%s], may be image is incorrect' % ppath
self.printError(strError)
raise Exception(strError)
timg = self.convertImgUint8ToDBImage(timg)
# Delayed initialization of Batch of Input-Data
if tdataX is None:
tsizeX = [tmpBatchSize, timg.shape[0], timg.shape[1], timg.shape[2]]
tdataX = np.zeros(tsizeX, np.float32)
tdataX[ppi] = timg
#FIXME: chack this point, this code tested on Fully-Connected NN, need tests for Convolution Neurel Networks
tdataX = tdataX.reshape([tmpBatchSize] + modelInputShape[1:])
# tprob = self.model.predict(tdataX, batch_size=tmpBatchSize)
tprob = self.model.predict(tdataX)
# Delayed initialization of returned classification probability
if retProb is None:
retProb = tprob
else:
retProb = np.concatenate(retProb, tprob)
idxMax = np.argmax(retProb, axis=1)
retLbl = np.array(self.batcherLMDB.lbl)[idxMax]
retVal = np.max(retProb, axis=1)
ret = {
'prob' : retProb,
'label' : retLbl,
'val' : retVal
}
return ret
def inferOneImageU8_DebugActivations(self, imgu8):
# [BEGIN] this code is cloned from self.inferOneImageU8()
timg = self.convertImgUint8ToDBImage(imgu8)
tmpBatchSize = 1
tsizeX = [tmpBatchSize, timg.shape[0], timg.shape[1], timg.shape[2]]
# FIXME: [1] check data type! [float32/float64]
tdataX = np.zeros(tsizeX, np.float32)
tdataX[0] = timg
modelInputShape = list(self.model.input_shape)
tdataX = tdataX.reshape([tmpBatchSize] + modelInputShape[1:])
# [END] this code is cloned from self.inferOneImageU8()
lstLayerForK=[]
for ii in xrange(len(self.model.layers)):
lstLayerForK.append(self.model.layers[ii].output)
localGetActivations = K.function([self.model.layers[0].input], lstLayerForK)
dataActivations = localGetActivations([tdataX])
return dataActivations
def inferOneImageU8(self, imgu8):
timg = self.convertImgUint8ToDBImage(imgu8)
tmpBatchSize = 1
tsizeX = [tmpBatchSize, timg.shape[0], timg.shape[1], timg.shape[2]]
# FIXME: [1] check data type! [float32/float64]
tdataX = np.zeros(tsizeX, np.float32)
tdataX[0] = timg
modelInputShape = list(self.model.input_shape)
tdataX = tdataX.reshape([tmpBatchSize] + modelInputShape[1:])
tprob = self.model.predict(tdataX, batch_size=1)
posMax = np.argmax(tprob[0])
tlbl = self.batcherLMDB.lbl[posMax]
tval = tprob[0][posMax]
tret = {
'prob': tprob,
'label': tlbl,
'val': tval
}
return tret
def inferOneImagePath(self, pathToImage):
if not self.isOk():
strError = 'KerasTrainer class is not initialized to call inference()'
self.printError(strError)
raise Exception(strError)
if not os.path.isfile(pathToImage):
strError='Cant find input image [%s]' % pathToImage
self.printError(strError)
raise Exception(strError)
timgu8 = io.imread(pathToImage)
if timgu8 is None:
strError = 'Cant read input image [%s], may be image is incorrect' % pathToImage
self.printError(strError)
raise Exception(strError)
return self.inferOneImageU8(timgu8)
def inferOneImagePathSorted(self, pathToImage):
tret = self.inferOneImagePath(pathToImage)
tarrProb=tret['prob'][0]
sortedIdx = np.argsort(-tarrProb)
sortedLbl = np.array(self.batcherLMDB.lbl)[sortedIdx]
sortedProb = tarrProb[sortedIdx]
tmp = [(ll,pp) for ll,pp in zip(sortedLbl,sortedProb)]
ret = {
'best': {
'label': tret['label'],
'prob': tret['val']
},
'distrib': tmp
}
return ret
def saveModelState(self, parOutputDir=None, isSaveWeights=True):
if parOutputDir is not None:
if not os.path.isdir(parOutputDir):
strError = "Cant find directory [%s]" % parOutputDir
self.printError(strError)
raise Exception(strError)
self.outputDir = parOutputDir
foutModelCfg=os.path.join(self.outputDir,"%s%s" % (self.modelPrefix, self.extJsonTrainConfig))
foutSolverCfg=os.path.join(self.outputDir,"%s%s" % (self.modelPrefix, self.extJsonSolverState))
foutModelWeights=os.path.join(self.outputDir,'%s_iter_%06d.%s' % (self.modelPrefix,self.currentIter,self.extModelWeights))
#
#FIXME: this is temporary solution, fix this in the future!
tmpOptimizerCfg = self.model.optimizer.get_config()
tmpOptimizerCfg['name'] = getKerasOptimizerName(self.model.optimizer)
jsonSolverState={
'optimizer' : tmpOptimizerCfg,
'loss' : self.model.loss,
'metrics' : self.model.metrics_names,
'dataset-id' : self.batcherLMDB.cfg.dbId,
'pathModelConfig' : "%s" % os.path.basename(self.pathModelConfig),
'sizeBatch' : self.sizeBatch,
'numEpoch' : self.numEpoch,
'currentIter' : self.currentIter,
'intervalSaveModel' : self.intervalSaveModel,
'intervalValidation': self.intervalValidation,
'printInterval' : self.printInterval,
'modelPrefix' : "%s" % self.modelPrefix,
'modelName' : self.modelName,
'deviceType' : self.deviceType
}
# FIXME: check the necesserity of the item [pathModelConfig]
txtJsonSolverState = json.dumps(jsonSolverState, indent=4)
with open(foutSolverCfg, 'w') as fslv:
fslv.write(txtJsonSolverState)
#
with open(foutModelCfg, 'w') as fcfg:
fcfg.write(self.model.to_json(sort_keys=True, indent=4, separators=(',', ': ')))
if isSaveWeights:
self.model.save_weights(foutModelWeights, overwrite=True)
# Print message when model saved (for Digits)
print(("keras-savestate#%s#%s#%s|%s|%s") % (
'I',
time.strftime('%Y.%m.%d-%H:%M:%S'),
os.path.abspath(foutModelCfg),
os.path.abspath(foutSolverCfg),
os.path.abspath(foutModelWeights)
))
def getTrainingStatesInDir(self, pathTrainDir, isReturnAllWeightsPath=False):
"""
explore directory with training-output data, and return path to files
:param pathTrainDir: path to directory with training-output
:return: None or list [pathModelConfigJson, pathSolverStateJson, pathModelWeights]
"""
if not os.path.isdir(pathTrainDir):
strError = "Cant find directory [%s]" % pathTrainDir
self.printError(strError)
return None
lstModelConfig = glob.glob('%s/*%s' % (pathTrainDir, self.extJsonTrainConfig))
lstSolverStates = glob.glob('%s/*%s' % (pathTrainDir, self.extJsonSolverState))
lstModelWeights = glob.glob('%s/*_iter_[0-9]*.%s' % (pathTrainDir, self.extModelWeights))
if len(lstModelConfig)<1:
strError = 'Cant find ModelConfig [%s] files in directory [%s]' % (self.extJsonTrainConfig, pathTrainDir)
self.printError(strError)
return None
if len(lstSolverStates)<1:
strError = 'Cant find Solver-States [%s] files in directory [%s]' % (self.extJsonSolverState, pathTrainDir)
self.printError(strError)
return None
if len(lstModelWeights) < 1:
strError = 'Cant find Model-Weights [%s] files in directory [%s]' % (self.extModelWeights, pathTrainDir)
self.printError(strError)
return None
lstModelConfig = sorted(lstModelConfig)
lstSolverStates = sorted(lstSolverStates)
lstModelWeights = sorted(lstModelWeights)
pathModelConfig = lstModelConfig[-1]
pathSolverState = lstSolverStates[-1]
if not isReturnAllWeightsPath:
pathModelWeight = lstModelWeights[-1]
else:
pathModelWeight = lstModelWeights
return [pathModelConfig, pathSolverState, pathModelWeight]
def loadModelFromTrainingStateInDir(self, pathTrainDir, isLoadLMDBReader=True):
self.cleanModel()
stateConfigs = self.getTrainingStatesInDir(pathTrainDir)
if stateConfigs is None:
strError = 'Cant find Model saved state from directory [%s]' % pathTrainDir
self.printError(strError)
pathModelConfig = stateConfigs[0]
pathSolverState = stateConfigs[1]
pathModelWeight = stateConfigs[2]
self.loadModelFromTrainingState(pathModelConfig=pathModelConfig,
pathSolverState=pathSolverState,
pathModelWeight=pathModelWeight,
isLoadLMDBReader=isLoadLMDBReader)
def loadModelFromTaskModelDir(self, pathTaskDir):
pathConfigModel = os.path.join(pathTaskDir, CFG_MODEL_TRAIN)
pathConfigSolver = os.path.join(pathTaskDir, CFG_SOLVER)
self.loadModelFromTrainingState(pathModelConfig=pathConfigModel,
pathSolverState=pathConfigSolver)
self.outputDir = pathTaskDir
def loadModelFromTrainingState(self, pathModelConfig, pathSolverState,
pathModelWeight=None, pathLMDBDataset=None, isLoadLMDBReader=True):
"""
Load Keras Model from Trained state (if present path to model Weights), or
for initial config
:param pathModelConfig: path to Model Config in JSON format
:param pathSolverState: path to SolverState Config in JSON format
:param pathModelWeight: path to Model Weights as binary Keras dump
:param pathModelWeight: path to LMDB-Dataset, if None -> skip
:param isLoadLMDBReader: load or not LMDBReader from SolverState Config
:return: None
"""
self.cleanModel()
# (1) Load Model Config from Json:
with open(pathModelConfig, 'r') as fModelConfig:
tmpStr = fModelConfig.read()
self.model = keras.models.model_from_json(tmpStr)
if self.model is None:
strError = 'Invalid Model config in file [%s]' % pathModelConfig
self.printError(strError)
raise Exception(strError)
# (2) Load SoverState Config from Json:
with open(pathSolverState) as fSolverState:
tmpStr = fSolverState.read()
configSolverState = json.loads(tmpStr)
if configSolverState is None:
strError = 'Invalid SolverState config in file [%s]' % pathSolverState
self.printError(strError)
raise Exception(strError)
if pathLMDBDataset is not None:
configSolverState['dataset-id'] = pathLMDBDataset
# (3) Load Model Weights:
if pathModelWeight is not None:
self.model.load_weights(pathModelWeight)
# (4) Reconfigure Model State:
self.intervalSaveModel = configSolverState['intervalSaveModel']
self.intervalValidation = configSolverState['intervalValidation']
self.numEpoch = configSolverState['numEpoch']
self.currentIter = configSolverState['currentIter']
self.sizeBatch = configSolverState['sizeBatch']
self.modelPrefix = configSolverState['modelPrefix']
if 'modelName' in configSolverState.keys():
self.modelName = configSolverState['modelName']
if 'deviceType' in configSolverState.keys():
self.deviceType = configSolverState['deviceType']
if isLoadLMDBReader:
self.loadBatcherLMDB(configSolverState['dataset-id'], self.sizeBatch)
self.numIterPerEpoch = self.batcherLMDB.numTrain / self.sizeBatch
self.currentEpoch = np.floor(self.currentIter / self.numIterPerEpoch)
else:
self.numIterPerEpoch = 1
self.currentEpoch = 0
self.pathModelConfig = pathModelConfig
# (5) Configure Loss, Solver, Metrics and compile model
tmpCfgOptimizer = configSolverState['optimizer'].copy()
parOptimizer = keras.optimizers.get(tmpCfgOptimizer)
parLoss = configSolverState['loss']
# parMetrics = configSolverState['metrics']
#TODO: i think this is a bug or a bad realization in Keras: 'loss' is an unknown metrics, this is temporary fix
parMetrics = []
if 'acc' in configSolverState['metrics']:
parMetrics.append('accuracy')
self.model.compile(optimizer=parOptimizer, loss=parLoss, metrics=parMetrics)
def runTrain(self, paramNumEpoch=-1):
if not self.isOk():
strErr = 'KerasTrainer is not correctly initialized'
self.printError(strErr)
raise Exception(strErr)
if paramNumEpoch>0:
self.numEpoch = paramNumEpoch
for ei in xrange(self.numEpoch):
self.trainOneEpoch()
if (ei%self.intervalSaveModel)==0:
self.saveModelState()
if (ei%self.intervalValidation)==0:
pass
#########################
if __name__ == '__main__':
pass
|
|
#! /usr/bin/env python
# encoding: utf-8
#
# Copyright (C) 2011 Serge Monkewitz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Serge Monkewitz, IPAC/Caltech
#
# Work on this project has been sponsored by LSST and SLAC/DOE.
#
from __future__ import with_statement
import itertools
import glob
import optparse
import os
import re
import string
import subprocess
import sys
import tempfile
import traceback
try:
import xml.etree.cElementTree as etree
except:
import xml.etree.ElementTree as etree
_have_mako = True
try:
from mako.template import Template
from mako.lookup import TemplateLookup
except:
_have_mako = False
# -- Helper functions ----
def _extract_content(elt, maybe_empty=False):
i = len(elt.tag)
s = etree.tostring(elt, "utf-8")
i = s.find(">") + 1
j = s.rfind("<")
s = s[i:j]
if not maybe_empty and len(s.strip()) == 0:
raise RuntimeError('<%s> is empty or contains only whitespace' % elt.tag)
return s
def _check_attrib(elt, attrib):
for key in elt.keys():
if key not in attrib:
raise RuntimeError('<%s> has illegal attribute %s' % (parent.tag, key))
def _find_one(parent, tag, required=True, attrib=[]):
elts = parent.findall(tag)
if elts == None or len(elts) == 0:
if required:
raise RuntimeError('<%s> must contain exactly one <%s> child' % (parent.tag, tag))
return None
elif len(elts) != 1:
raise RuntimeError('<%s> contains multiple <%s> children' % (parent.tag, tag))
_check_attrib(elts[0], set(attrib))
return elts[0]
def _find_many(parent, tag, required=True, attrib=[]):
elts = parent.findall(tag)
if elts == None or len(elts) == 0:
if required:
raise RuntimeError('<%s> must contain at least one <%s> child' % (parent.tag, tag))
return []
attrib = set(attrib)
for elt in elts:
_check_attrib(elt, attrib)
return elts
def _validate_children(parent, child_tags):
child_tags = set(child_tags)
for child in parent:
if child.tag not in child_tags:
raise RuntimeError('<%s> cannot contain <%s> children' % (parent.tag, child.tag))
# -- DOM classes for scisql documentation ----
class Description(object):
"""A description of a UDF or stored procedure, extracted from
the contents of a <desc> tag. The following attributes are available:
full: Full UDF/stored procedure description, stored as a unicode string
containing an XHTML fragment.
brief: The first sentence of the full description
"""
def __init__(self, parent):
desc = _find_one(parent, 'desc')
self.full = _extract_content(desc).strip()
i = self.full.find(".")
if i == -1: self.brief = re.sub(r'\s+', ' ', self.full)
else: self.brief = re.sub(r'\s+', ' ', self.full[:i + 1])
class Note(object):
"""A note, e.g. about UDF usage. The following attributes are available:
clazz: The kind of note, e.g. "warning", "info", "error"...
note: The note itself, stored as a UTF-8 string containing
an XHTML fragment.
"""
def __init__(self, elt):
self.clazz = elt.get('class', '')
self.content = _extract_content(elt).strip()
class Example(object):
"""A source code example, e.g. of UDF usage. The following attributes are available:
lang: The language of the example source code, typically 'sql' or 'bash'.
test: True if the source should be run during example verification.
source: The example source code, stored as a string containing an
XHTML fragment.
"""
def __init__(self, elt):
self.lang = elt.get('lang', 'sql')
self.test = elt.get('test', 'true') == 'true'
s = _extract_content(elt)
# dedent by the amount of leading whitespace in the first
# line containing non-whitespace characters
lines = s.split('\n')
trim = None
for i in xrange(len(lines)):
line = lines[i]
if len(line.strip()) == 0:
lines[i] = ''
continue
if trim == None:
trim = re.match(r'\s*', line).group(0)
if not line.startswith(trim):
raise RuntimeError('inconsistent leading whitespace in <example> source code')
lines[i] = line[len(trim):]
self.source = '\n'.join(lines)
class Argument(object):
"""An argument for a UDF or stored procedure. The following attributes are available:
kind: One of 'IN', 'INOUT', or 'OUT' (always 'IN' for a UDF)
name: Argument name.
type: SQL argument type.
units: Expected units, may be None
brief: Brief argument description
description: Full argument description
"""
def __init__(self, elt):
self.kind = elt.get('kind', 'IN').upper()
if self.kind not in ('IN', 'INOUT', 'OUT'):
raise RuntimeError('<%s> kind attribute value must be "IN", "INOUT", or "OUT"')
attr = elt.get('name')
if attr == None or len(attr.strip()) == 0:
raise RuntimeError('<%s> missing name attribute' % elt.tag)
self.name = attr.strip()
attr = elt.get('type')
if attr == None or len(attr.strip()) == 0:
raise RuntimeError('<%s> missing type attribute' % elt.tag)
self.type = attr.strip()
self.units = elt.get('units', '')
self.description = _extract_content(elt).strip()
class ArgumentList(object):
"""An argument list, e.g. for a UDF. The following attributes are available:
varargs: True if the argument list has a variable number of arguments
args: A list of Argument objects
"""
def __init__(self, elt, attrib):
_validate_children(elt, ['arg'])
self.varargs = elt.get('varargs', 'false') == 'true'
self.args = map(Argument, _find_many(elt, 'arg', required=False, attrib=attrib))
class Udf(object):
"""Documentation for a UDF. The following attributes are available:
aggregate: True if this an aggregate UDF
internal: True if this UDF is not intended for direct use
name: The name of the UDF
return_type: The return type of the UDF
section: The name of the section (category, group) the UDF belongs to
arglists: A list of ArgumentList objects for the UDF.
description: A Description for the UDF.
examples: A list of usage Example objects, may be empty.
notes: A list of Note objects, may be empty.
"""
def __init__(self, elt):
_check_attrib(elt, ['aggregate', 'internal', 'name', 'return_type', 'section'])
_validate_children(elt, ['desc','notes','args','example'])
self.aggregate = elt.get('aggregate', 'false') == 'true'
self.internal = elt.get('internal', 'false') == 'true'
attr = elt.get('name')
if attr == None or len(attr.strip()) == 0:
raise RuntimeError('<udf> element has missing or empty name attribute')
self.name = attr.strip()
attr = elt.get('return_type')
if attr == None or len(attr.strip()) == 0:
raise RuntimeError('<udf> element has missing or empty return_type attribute')
self.return_type = attr.strip()
self.section = elt.get('section', 'misc')
self.arglists = map(lambda x: ArgumentList(x, ['name', 'type', 'units']),
_find_many(elt, 'args', attrib=['varargs']))
self.description = Description(elt)
self.examples = map(Example, _find_many(elt, 'example', required=False, attrib=['lang', 'test']))
notes = _find_one(elt, 'notes', required=False)
if notes == None:
self.notes = []
else:
self.notes = map(Note, _find_many(notes, 'note', attrib=['class']))
class Proc(object):
"""Documentation for a stored procedure. The following attributes are available:
internal: True if this procedure is not intended for direct use
name: The name of the procedure
section: The name of the section (category, group) the procedure belongs to
args: A list of Argument objects for the procedure
description: A Description for the procedure.
examples: A list of usage Example objects, may be empty.
notes: A list of Note objects, may be empty.
"""
def __init__(self, elt):
_check_attrib(elt, ['internal', 'name', 'section'])
_validate_children(elt, ['desc','notes','args','example'])
self.internal = elt.get('internal', 'false') == 'true'
attr = elt.get('name')
if attr == None or len(attr.strip()) == 0:
raise RuntimeError('<proc> element has missing or empty name attribute')
self.name = attr.strip()
self.section = elt.get('section', 'misc')
args = _find_one(elt, 'args', required=False)
if args == None:
self.args = []
else:
self.args = ArgumentList(args, ['kind', 'name', 'type', 'units']).args
self.description = Description(elt)
self.examples = map(Example, _find_many(elt, 'example', required=False, attrib=['lang', 'test']))
notes = _find_one(elt, 'notes', required=False)
if notes == None:
self.notes = []
else:
self.notes = map(Note, _find_many(notes, 'note', attrib=['class']))
class Section(object):
"""A documentation section; contains information about a group/category of UDFs,
possibly including worked examples. The following attributes are available:
name: Section name, must not contain spaces
title: Section title
content: XHTML section content in string form.
examples: A list of Example objects in the section content, in order of
occurence.
"""
def __init__(self, elt):
attr = elt.get('name')
if attr == None or len(attr.strip()) == 0:
raise RuntimeError('<section> element has missing or empty name attribute')
self.name = attr.strip()
attr = elt.get('title')
if attr == None or len(attr.strip()) == 0:
raise RuntimeError('<section> element has missing or empty title attribute')
self.title = attr.strip()
self.udfs = []
self.procs = []
# Extract example source code
exlist = list(elt.getiterator('example'))
self.examples = map(Example, exlist)
# Turn <example> tags into <pre> tags with the appropriate prettify attributes
for ex in exlist:
ex.tag = 'pre'
lang = ex.get('lang', 'sql')
for k in ex.keys():
del ex.attrib[k]
ex.set('class', 'prettyprint lang-%s linenums' % lang)
self.content = _extract_content(elt)
# -- Extracting documentation from source code
def ast(elt):
if elt.tag == 'udf':
return Udf(elt)
elif elt.tag == 'proc':
return Proc(elt)
else:
raise RuntimeError('Unrecognized XML element <%s>' % elt.tag)
def extract_docs_from_c(filename):
with open(filename, 'rb') as f:
text = f.read()
# Extract comment blocks from file - note that nested comment blocks
# are not dealt with properly
comments = []
beg = text.find("/**")
while beg != -1:
end = text.find("*/", beg + 3)
if end == -1:
break
comments.append(text[beg + 3: end].strip())
beg = text.find("/**", end + 2)
docs = []
for block in comments:
if block.find("</udf>") == -1 and block.find("</proc>") == -1:
continue
# Strip leading * from each line in block
lines = block.split('\n')
stripped_lines = []
for line in lines:
m = re.match(r'\s*\*', line)
if m != None:
line = line[len(m.group(0)):]
stripped_lines.append(string.Template(line).safe_substitute(os.environ))
xml = '\n'.join(stripped_lines)
try:
elt = etree.XML(xml)
docs.append(ast(elt))
except:
print >>sys.stderr, "Failed to parse documentation block:\n\n%s\n\n" % xml
print >>sys.stderr, traceback.format_exception_only(sys.exc_type, sys.exc_value)
return docs
def extract_docs_from_sql(filename):
comments = []
with open(filename, 'rb') as f:
block = ''
for line in f:
m = re.match(r'\s*--', line)
if m != None:
block += line[len(m.group(0)):]
else:
if len(block) > 0:
comments.append(block)
block = ''
docs = []
for xml in comments:
if xml.find("</udf>") == -1 and xml.find("</proc>") == -1:
continue
try:
elt = etree.XML(string.Template(xml).safe_substitute(os.environ))
docs.append(ast(elt))
except:
print >>sys.stderr, "Failed to parse documentation block:\n\n%s\n\n" % xml
print >>sys.stderr, traceback.format_exception_only(sys.exc_type, sys.exc_value)
return docs
def extract_sections(filename):
with open(filename, 'rb') as f:
xml = f.read()
elt = etree.XML(string.Template(xml).safe_substitute(os.environ))
if elt.tag != 'sections':
raise RuntimeError('Root element of a section documentation file must be <section>!')
return map(Section, _find_many(elt, 'section', attrib=['name', 'title']))
def extract_docs(root):
nodes = []
for file in glob.glob(os.path.join(root, 'src', 'udfs', '*.c')):
nodes.extend(extract_docs_from_c(file))
for file in glob.glob(os.path.join(root, 'template', '*.mysql')):
nodes.extend(extract_docs_from_sql(file))
sections = extract_sections(os.path.join(root, 'tools', 'templates', 'sections.xml'))
secdict = dict((x.name, x) for x in sections)
for x in nodes:
if isinstance(x, Udf):
secdict[x.section].udfs.append(x)
elif isinstance(x, Proc):
secdict[x.section].procs.append(x)
for sec in sections:
sec.udfs.sort(key=lambda x: x.name)
sec.procs.sort(key = lambda x: x.name)
return sections
# -- Testing examples in documentation ----
def _test(obj):
nfail = 0
for ex in obj.examples:
if not ex.test or ex.lang not in ('sql', 'bash'):
continue
with tempfile.TemporaryFile() as source:
if ex.lang == 'sql':
source.write('USE scisql_demo;\n\n')
args = [ os.environ['MYSQL'], '--defaults-file=%s' % os.environ['MYSQL_CNF'] ]
else:
args = [ '/bin/bash' ]
source.write(ex.source)
source.flush()
source.seek(0)
try:
with open(os.devnull, 'wb') as devnull:
subprocess.check_call(args, shell=False, stdin=source, stdout=devnull)
except:
print >>sys.stderr, "Failed to run documentation example:\n\n%s\n\n" % ex.source
nfail += 1
return nfail
def run_doc_examples(sections):
"""Runs all examples marked as testable in the sciSQL documentation.
"""
nfail = 0
for sec in sections:
nfail += _test(sec)
for elt in itertools.chain(sec.udfs, sec.procs):
nfail += _test(elt)
return nfail
# -- Documentation generation ----
def gen_docs(root, sections, html=True):
"""Generates documentation for sciSQL, either in HTML or as a set of
MySQL tables (for the LSST schema browser).
"""
lookup = TemplateLookup(directories=[os.path.join(root, 'tools', 'templates')])
if html:
template = lookup.get_template('index.mako')
with open(os.path.join(root, 'doc', 'index.html'), 'wb') as f:
f.write(template.render(sections=sections,
SCISQL_VERSION=os.environ['SCISQL_VERSION']))
else:
template = lookup.get_template('lsst_schema_browser.mako')
with open('metadata_scisql.sql', 'wb') as f:
f.write(template.render(sections=sections,
SCISQL_VERSION=os.environ['SCISQL_VERSION']))
# -- Usage and command line processing
usage = """
%prog --help
Display usage information.
%prog
%prog test_docs
Make sure code samples in the documentation actually run.
%prog html_docs
Generate HTML documentation for sciSQL in doc/index.html.
%prog lsst_docs
Generate documentation in LSST schema browser format in
metadata_scisql.sql
"""
def main():
parser = optparse.OptionParser(usage=usage)
opts, args = parser.parse_args()
if len(args) > 1 or (len(args) == 1 and args[0] not in ('test_docs', 'html_docs', 'lsst_docs')):
parser.error("Too many arguments or illegal command")
root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
sections = extract_docs(root)
if len(args) == 0 or args[0] == 'test_docs':
nfail = run_doc_examples(sections)
# Above files may have been created by test procedure
for f in ['scisql_demo_htmid10.tsv', 'scisql_demo_ccds.tsv']:
filename = os.path.join("tmp", f)
if os.path.exists(filename):
os.remove(filename)
if nfail != 0:
sys.exit(1)
else:
if not _have_mako:
parser.error("You must install mako 0.4.x to generate documentation")
gen_docs(root, sections, html=(args[0] == 'html_docs'))
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
from collections import namedtuple
import six
CustomType = namedtuple('CustomType', 'customize reset')
MetaInfo = namedtuple('MetaInfo', 'readonly is_typed source_name')
class SourceMeta(type):
"""Initialize subclasses and source base class"""
def __new__(self, name, bases, dct):
if all([not '_read' in dct,
name != 'Source',
not name.endswith('Mixin')]):
msg = '%s is missing the required "_read" method' % name
raise NotImplementedError(msg)
dct['_meta'] = MetaInfo(
readonly='_write' not in dct,
source_name=name,
is_typed=dct.get('_is_typed', True)
)
return super(SourceMeta, self).__new__(self, name, bases, dct)
def __call__(cls, *args, **kwargs):
instance = super(SourceMeta, cls).__call__(*args, **kwargs)
instance._initialized = True
return instance
@six.add_metaclass(SourceMeta)
class AbstractSource(object):
"""Source object"""
_initialized = False
def __init__(self, **kwargs):
# _parent is the parent object
# _parent_key is the key on the parent that led to this object
self._parent, self._parent_key = kwargs.pop('parent', (None, None))
# kwargs.get would override the metaclass settings
# so only change it if it's really given.
if 'meta' in kwargs:
self._meta = kwargs['meta']
def is_writable(self):
return not self._meta.readonly
def get(self, name, default=None):
try:
return self[name]
except KeyError:
return default
def setdefault(self, name, value):
try:
return self[name]
except KeyError:
self[name] = value
return value
def items(self):
return six.iteritems(self._get_data())
def update(self, *others):
self._check_writable()
data = self._get_data()
for other in others:
if isinstance(other, Source):
data.update(other.dump())
else:
data.update(other)
self._set_data(data)
def dump(self):
return self._get_data()
def is_typed(self):
return self._meta.is_typed
def _read(self):
raise NotImplementedError
def _write(self, data):
raise NotImplementedError
def _get_data(self):
"""Proxies the underlying data source
Using double underscores should prevent name clashes with
user defined keys.
"""
try:
return self._read()
except NotImplementedError:
return self._parent._get_data()[self._parent_key]
def _set_data(self, data):
self._check_writable()
try:
self._write(data)
except NotImplementedError:
result = self._parent._get_data()
result[self._parent_key] = data
self._parent._set_data(result)
def _check_writable(self):
if self._meta.readonly:
raise TypeError('%s is a read-only source' % self._meta.source_name)
def __getattr__(self, name):
# although the key was accessed with attribute style
# lets keep raising a KeyError to distinguish between
# internal and user data.
return self[name]
def __setattr__(self, attr, value):
self[attr] = value
def __getitem__(self, key):
attr = self._get_data()[key]
if isinstance(attr, dict):
return Source(parent=(self, key),
meta=self._meta,
)
return attr
def __setitem__(self, key, value):
if any([self._initialized is False,
key == '_initialized',
key in self.__dict__,
key in self.__class__.__dict__]):
super(AbstractSource, self).__setattr__(key, value)
else:
self._check_writable()
data = self._get_data()
data[key] = value
self._set_data(data)
def __delattr__(self, name):
del self[name]
def __delitem__(self, key):
self._check_writable()
data = self._get_data()
del data[key]
self._set_data(data)
def __len__(self):
return len(self._get_data().keys())
def __iter__(self):
return iter(self._get_data().keys())
def __eq__(self, other):
return self._get_data() == other
def __repr__(self):
return repr(self._get_data())
class LockedSourceMixin(AbstractSource):
def __init__(self, *args, **kwargs):
# user additions
self._locked = kwargs.pop('readonly', False)
super(LockedSourceMixin, self).__init__(*args, **kwargs)
def is_writable(self):
is_writable = super(LockedSourceMixin, self).is_writable()
return is_writable and not self._locked
def _check_writable(self):
super(LockedSourceMixin, self)._check_writable()
if self._locked:
raise TypeError('%s is locked and cannot be changed' % self._meta.source_name)
class CacheMixin(AbstractSource):
def __init__(self, *args, **kwargs):
# will be applied to child classes as sublevel sources
# do not need caching.
self._use_cache = kwargs.pop('cached', False)
self._cache = None
super(CacheMixin, self).__init__(*args, **kwargs)
def write_cache(self):
self._check_writable()
try:
self._write(self._cache)
except NotImplementedError:
self._parent.write_cache()
def _get_data(self):
if self._use_cache:
if not self._cache:
self._cache = self._read()
return self._cache
return super(CacheMixin, self)._get_data()
def _set_data(self, data):
self._check_writable()
if self._use_cache:
self._cache = data
else:
return super(CacheMixin, self)._set_data(data)
class CustomTypeMixin(AbstractSource):
def __init__(self, *args, **kwargs):
# will be applied to child classes as sublevel sources
# do not need caching.
self._custom_types = kwargs.pop('type_map', {})
super(CustomTypeMixin, self).__init__(*args, **kwargs)
def dump(self, with_custom_types=False):
if with_custom_types is False:
return super(CustomTypeMixin, self).dump()
def iter_dict(data):
for key, value in data.items():
if isinstance(value, dict):
yield key, dict(iter_dict(value))
else:
yield key, self._to_custom_type(key, value)
return dict(iter_dict(self._get_data()))
def _to_custom_type(self, key, value):
converter = self._custom_types.get(key)
return converter.customize(value) if converter else value
def _to_original_type(self, key, value):
converter = self._custom_types[key]
return converter.reset(value) if converter else value
def __getitem__(self, key):
attr = super(CustomTypeMixin, self).__getitem__(key)
if isinstance(attr, Source):
attr._custom_types = self._custom_types
return attr
return self._to_custom_type(key, attr)
def __setitem__(self, key, value):
if self._initialized:
if key in self._custom_types:
value = self._to_original_type(key, value)
super(CustomTypeMixin, self).__setitem__(key, value)
class Source(CacheMixin,
CustomTypeMixin,
LockedSourceMixin,
AbstractSource
):
"""Source class with all features enabled"""
|
|
#!/usr/bin/env python
"""
test2.py
[--log_file PATH]
[--verbose]
"""
################################################################################
#
# test2
#
#
# Copyright (c) 7/16/2010 Leo Goodstadt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#################################################################################
import sys, os
# add self to search path for testing
if __name__ == '__main__':
exe_path = os.path.split(os.path.abspath(sys.argv[0]))[0]
module_name = os.path.split(sys.argv[0])[1]
module_name = os.path.splitext(module_name)[0];
else:
module_name = __name__
# Use import path from <<../python_modules>>
if __name__ == '__main__':
sys.path.insert(0, os.path.abspath(os.path.join(exe_path,"../..")))
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# options
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
if __name__ == '__main__':
from optparse import OptionParser
import StringIO
parser = OptionParser(version="%prog 1.0", usage = "\n\n %progs [options]")
parser.add_option("-i", "--input_file", dest="input_file",
metavar="FILE",
type="string",
help="Name and path of input file. "
"Defaults to reading from STDIN.")
#
# general options: verbosity / logging
#
parser.add_option("-v", "--verbose", dest = "verbose",
action="count", default=0,
help="Print more verbose messages for each additional verbose level.")
parser.add_option("-L", "--log_file", dest="log_file",
metavar="FILE",
type="string",
help="Name and path of log file")
parser.add_option("--skip_parameter_logging", dest="skip_parameter_logging",
action="store_true", default=False,
help="Do not print program parameters to log.")
parser.add_option("--debug", dest="debug",
action="count", default=0,
help="Set default program parameters in debugging mode.")
#
# pipeline
#
parser.add_option("-t", "--target_tasks", dest="target_tasks",
action="append",
default = list(),
metavar="JOBNAME",
type="string",
help="Target task(s) of pipeline.")
parser.add_option("-j", "--jobs", dest="jobs",
default=1,
metavar="N",
type="int",
help="Allow N jobs (commands) to run simultaneously.")
parser.add_option("-n", "--just_print", dest="just_print",
action="store_true", default=False,
help="Don't actually run any commands; just print the pipeline.")
parser.add_option("--flowchart", dest="flowchart",
metavar="FILE",
type="string",
help="Don't actually run any commands; just print the pipeline "
"as a flowchart.")
#
# Less common pipeline options
#
parser.add_option("--key_legend_in_graph", dest="key_legend_in_graph",
action="store_true", default=False,
help="Print out legend and key for dependency graph.")
parser.add_option("--draw_graph_horizontally", dest="draw_horizontally",
action="store_true", default=False,
help="Draw horizontal dependency graph.")
parser.add_option("--forced_tasks", dest="forced_tasks",
action="append",
default = list(),
metavar="JOBNAME",
type="string",
help="Pipeline task(s) which will be included even if they are up to date.")
# get help string
f =StringIO.StringIO()
parser.print_help(f)
helpstr = f.getvalue()
original_args = " ".join(sys.argv)
(options, remaining_args) = parser.parse_args()
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# #
# Debug: Change these #
# #
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
options.flowchart = "front_page_flowchart.png"
options.key_legend_in_graph = True
if options.debug:
options.log_file = os.path.join("test2.log")
options.verbose = 5
options.log_parameters = True
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# #
# Debug: Change these #
# #
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# mandatory options
#
mandatory_options = []
def check_mandatory_options (options, mandatory_options, helpstr):
"""
Check if specified mandatory options have b een defined
"""
missing_options = []
for o in mandatory_options:
if not getattr(options, o):
missing_options.append("--" + o)
if not len(missing_options):
return
raise Exception("Missing mandatory parameter%s: %s.\n\n%s\n\n" %
("s" if len(missing_options) > 1 else "",
", ".join(missing_options),
helpstr))
check_mandatory_options (options, mandatory_options, helpstr)
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# imports
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
from ruffus import *
from ruffus.ruffus_exceptions import JobSignalledBreak
#from json import dumps
#from collections import defaultdict
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Functions
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Logger
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
if __name__ == '__main__':
import logging
import logging.handlers
MESSAGE = 15
logging.addLevelName(MESSAGE, "MESSAGE")
def setup_std_logging (logger, log_file, verbose):
"""
set up logging using programme options
"""
class debug_filter(logging.Filter):
"""
Ignore INFO messages
"""
def filter(self, record):
return logging.INFO != record.levelno
class NullHandler(logging.Handler):
"""
for when there is no logging
"""
def emit(self, record):
pass
# We are interesting in all messages
logger.setLevel(logging.DEBUG)
has_handler = False
# log to file if that is specified
if log_file:
handler = logging.FileHandler(log_file, delay=False)
handler.setFormatter(logging.Formatter("%(asctime)s - %(name)s - %(levelname)6s - %(message)s"))
handler.setLevel(MESSAGE)
logger.addHandler(handler)
has_handler = True
# log to stderr if verbose
if verbose:
stderrhandler = logging.StreamHandler(sys.stderr)
stderrhandler.setFormatter(logging.Formatter(" %(message)s"))
stderrhandler.setLevel(logging.DEBUG)
if log_file:
stderrhandler.addFilter(debug_filter())
logger.addHandler(stderrhandler)
has_handler = True
# no logging
if not has_handler:
logger.addHandler(NullHandler())
#
# set up log
#
logger = logging.getLogger(module_name)
setup_std_logging(logger, options.log_file, options.verbose)
#
# Allow logging across Ruffus pipeline
#
def get_logger (logger_name, args):
return logger
from ruffus.proxy_logger import *
(logger_proxy,
logging_mutex) = make_shared_logger_and_proxy (get_logger,
module_name,
{})
#
# log programme parameters
#
if not options.skip_parameter_logging:
programme_name = os.path.split(sys.argv[0])[1]
logger.info("%s %s" % (programme_name, original_args))
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Pipeline
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
@files(None, "a.1")
def task1(input_file, output_file):
open(output_file, "w")
@transform(task1, suffix("1"), "2")
def task2(input_file, output_file):
open(output_file, "w")
@transform(task2, suffix("2"), "3")
def task3(input_file, output_file):
open(output_file, "w")
@transform(task3, suffix("3"), "4")
def task4(input_file, output_file):
open(output_file, "w")
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Main logic
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
import time
open("a.2", "w")
time.sleep(1)
open("a.1", "w")
time.sleep(1)
open("a.3", "w")
pipeline_printout_graph ( open(options.flowchart, "w"),
os.path.splitext(options.flowchart)[1][1:],
[task4],
no_key_legend = not options.key_legend_in_graph,
user_colour_scheme = {"colour_scheme_index":0},
pipeline_name = "Pipeline Flowchart:",
size = (6,5),
dpi = 72,
)
os.unlink("a.1")
os.unlink("a.2")
os.unlink("a.3")
#pipeline_run(options.target_tasks, options.forced_tasks,
# multiprocess = options.jobs,
# logger = stderr_logger,
# verbose = options.verbose)
|
|
import binascii
import bsdiff4
import os
import tempfile
import unittest
from fuse import FUSE
from basefs import exceptions
from basefs.keys import Key
from basefs.logs import LogEntry
from basefs.views import View
from . import utils
class ViewTests(unittest.TestCase):
def setUp(self):
__, self.logpath = tempfile.mkstemp()
self.log, self.root_key = utils.bootstrap(self.logpath)
self.log.load()
def tearDown(self):
os.remove(self.logpath)
def rebuild(self, view):
prev = view.paths
prev_str = str(view.root)
view.build()
for path, node in view.paths.items():
self.assertEqual(prev.pop(path).entry, node.entry)
self.assertEqual({}, prev)
self.assertEqual(len(prev_str), len(str(view.root))) # len() is used becuase of random dict ordering
def test_build(self):
view = View(self.log, self.root_key)
view.build()
path = os.path.join(os.sep, '.cluster')
self.assertEqual(b'127.0.0.1\n', view.get(path).content)
def test_mkdir(self):
view = View(self.log, self.root_key)
view.build()
home_path = os.sep + utils.random_ascii()
view.mkdir(home_path)
self.assertEqual(LogEntry.MKDIR, view.paths.get(home_path).entry.action)
user_path = os.path.join(home_path, utils.random_ascii())
view.mkdir(user_path)
self.assertEqual(LogEntry.MKDIR, view.get(user_path).entry.action)
not_path = os.path.join(os.sep, utils.random_ascii(), utils.random_ascii())
with self.assertRaises(exceptions.DoesNotExist):
view.mkdir(not_path)
with self.assertRaises(exceptions.DoesNotExist):
view.get(not_path)
self.rebuild(view)
self.assertEqual(LogEntry.MKDIR, view.get(home_path).entry.action)
self.assertEqual(LogEntry.MKDIR, view.get(user_path).entry.action)
with self.assertRaises(exceptions.DoesNotExist):
view.get(not_path)
# TODO same path name nested
def test_permission(self):
key = Key.generate()
view = View(self.log, key)
view.build()
path = os.path.join(os.sep, utils.random_ascii())
with self.assertRaises(exceptions.PermissionDenied):
view.mkdir(path)
def test_write(self):
view = View(self.log, self.root_key)
view.build()
path = os.path.join(os.sep, utils.random_ascii())
content = utils.random_ascii()
view.write(path, content)
self.assertEqual(LogEntry.WRITE, view.get(path).entry.action)
self.assertEqual(content.encode(), view.get(path).content)
self.rebuild(view)
self.assertEqual(content.encode(), view.get(path).content)
with self.assertRaises(exceptions.Exists):
view.mkdir(path)
view.delete(path)
view.mkdir(path)
path = os.path.join(path, 'content-%s' % utils.random_ascii())
alt_content = utils.random_ascii()
view.write(path, alt_content)
self.assertEqual(alt_content.encode(), view.get(path).content)
alt_content += utils.random_ascii()
view.write(path, alt_content)
self.assertEqual(alt_content.encode(), view.get(path).content)
alt_content = utils.random_ascii(512**2)
view.write(path, alt_content)
self.assertEqual(alt_content.encode(), view.get(path).content)
view.delete(path)
self.assertEqual(LogEntry.DELETE, view.get(path).entry.action)
view.mkdir(path)
self.assertEqual(LogEntry.MKDIR, view.get(path).entry.action)
view.delete(path)
self.assertEqual(LogEntry.DELETE, view.get(path).entry.action)
view.write(path, alt_content)
self.assertEqual(alt_content.encode(), view.get(path).content)
def test_delete(self):
view = View(self.log, self.root_key)
view.build()
# Delete File
file_path = os.path.join(os.sep, utils.random_ascii())
content = utils.random_ascii()
view.write(file_path, content)
self.assertEqual(LogEntry.WRITE, view.get(file_path).entry.action)
self.assertEqual(content.encode(), view.get(file_path).content)
view.delete(file_path)
self.assertEqual(LogEntry.DELETE, view.get(file_path).entry.action)
# Reload
self.rebuild(view)
self.assertEqual(LogEntry.DELETE, view.get(file_path).entry.action)
# Delete Dir
home_path = os.path.join(os.sep, utils.random_ascii())
view.mkdir(home_path)
self.assertEqual(LogEntry.MKDIR, view.paths.get(home_path).entry.action)
view.delete(home_path)
self.assertEqual(LogEntry.DELETE, view.get(home_path).entry.action)
def test_deleted_nested_dir(self):
view = View(self.log, self.root_key)
view.build()
home_path = os.path.join(os.sep, 'home-' + utils.random_ascii())
user_path = os.path.join(home_path, 'user-' + utils.random_ascii())
view.mkdir(home_path)
view.mkdir(user_path)
view.delete(home_path)
self.assertEqual(LogEntry.DELETE, view.get(home_path).entry.action)
with self.assertRaises(exceptions.DoesNotExist):
view.get(user_path)
self.rebuild(view)
self.assertEqual(LogEntry.DELETE, view.get(home_path).entry.action)
with self.assertRaises(exceptions.DoesNotExist):
view.get(user_path)
def test_recreate_deleted(self):
view = View(self.log, self.root_key)
view.build()
home_path = os.path.join(os.sep, 'home-' + utils.random_ascii())
user_path = os.path.join(home_path, 'user-' + utils.random_ascii())
view.mkdir(home_path)
view.mkdir(user_path)
file_path = os.path.join(user_path, utils.random_ascii())
file_content = utils.random_ascii()
view.write(file_path, file_content)
self.assertEqual(file_content.encode(), view.get(file_path).content)
view.delete(home_path)
with self.assertRaises(exceptions.DoesNotExist):
view.get(file_path)
view.mkdir(home_path)
with self.assertRaises(exceptions.DoesNotExist):
view.get(file_path)
with self.assertRaises(exceptions.DoesNotExist):
view.get(user_path)
# File
view.mkdir(user_path)
new_file_content = utils.random_ascii()
view.write(file_path, new_file_content)
self.assertEqual(new_file_content.encode(), view.get(file_path).content)
# Reload
self.rebuild(view)
self.assertEqual(new_file_content.encode(), view.get(file_path).content)
view.get(user_path)
view.delete(file_path)
new_file_content = utils.random_ascii()
view.write(file_path, new_file_content)
self.assertEqual(new_file_content.encode(), view.get(file_path).content)
def test_grant(self):
view = View(self.log, self.root_key)
view.build()
home_path = os.path.join(os.sep, utils.random_ascii())
view.mkdir(home_path)
key = Key.generate()
view.grant(home_path, 'user', key)
prev = str(view.root)
view.build()
self.assertEqual(len(prev), len(str(view.root)))
# Change key
view = View(self.log, key)
view.build()
content = utils.random_ascii()
file_path = os.path.join(os.sep, utils.random_ascii())
with self.assertRaises(exceptions.PermissionDenied):
view.write(file_path, content)
user_path = os.path.join(home_path, utils.random_ascii())
view.mkdir(user_path)
self.assertEqual(LogEntry.MKDIR, view.get(user_path).entry.action)
file_path = os.path.join(user_path, utils.random_ascii())
content = utils.random_ascii()
view.write(file_path, content)
self.assertEqual(content.encode(), view.get(file_path).content)
view = View(self.log, self.root_key)
view.build()
view.write(file_path, content)
self.assertEqual(content.encode(), view.get(file_path).content)
file_path = os.path.join(user_path, utils.random_ascii())
content = utils.random_ascii()
view.write(file_path, content)
self.assertEqual(content.encode(), view.get(file_path).content)
self.rebuild(view)
# grant/revoke to files
def test_revoke(self):
root_view = View(self.log, self.root_key)
root_view.build()
home_path = os.path.join(os.sep, 'home-' + utils.random_ascii())
root_view.mkdir(home_path)
key = Key.generate()
root_view.grant(home_path, 'user', key)
view = View(self.log, key)
view.build()
user_path = os.path.join(home_path, 'user-' + utils.random_ascii())
view.mkdir(user_path)
root_view.build()
file_path = os.path.join(user_path, 'file2-' + utils.random_ascii())
file_content = ('content1-' + utils.random_ascii(1024))*32
root_view.write(file_path, file_content)
self.assertEqual(file_content.encode(), root_view.get(file_path).content)
root_view.revoke(home_path, 'user')
self.assertEqual(file_content.encode(), root_view.get(file_path).content)
self.assertEqual(file_path, root_view.get(file_path).path)
view.build()
# with open(self.logpath, 'r') as r:
# print(r.read())
print(self.log.print_tree(view=view, color=True))
root_view.build()
# TODO tree eq after build, except the revoke brancj
# TODO test maintain current state (file writen by revoked user)
print(self.log.print_tree(view=root_view, color=True))
alt_file_content = 'content2-' + utils.random_ascii()
with self.assertRaises(exceptions.DoesNotExist):
view.write(file_path, alt_file_content)
def test_dir_file_exists_conflict(self):
view = View(self.log, self.root_key)
view.build()
path = os.path.join(os.sep, utils.random_ascii())
content = utils.random_ascii()
view.write(path, content)
with self.assertRaises(exceptions.Exists):
view.mkdir(path)
def test_branch_conflict(self):
view = View(self.log, self.root_key)
view.build()
home_path = os.path.join(os.sep, 'home-' + utils.random_ascii())
view.mkdir(home_path)
key = Key.generate()
view.grant(home_path, 'user', key)
view = View(self.log, key)
view.build()
parent_node = view.get(home_path)
user_path = os.path.join(home_path, 'user-' + utils.random_ascii())
max_hash = None
enc_content = ''
for ix in range(12):
content = 'content-' + utils.random_ascii(32)
prev = enc_content
enc_content = bsdiff4.diff(enc_content, content)
entry = self.log.write(parent_node.entry, user_path, key, attachment=enc_content)
max_hash = max(max_hash, entry.hash) if max_hash else entry.hash
view = View(self.log, self.root_key)
view.build()
self.assertEqual(bsdiff4.patch(prev, self.log.entries[max_hash].get_content()), view.get(user_path).content)
# Admin branch more power
admin_content = 'content-' + utils.random_ascii(32)
content = bsdiff4.diff(enc_content, admin_content)
self.log.write(parent_node.entry, user_path, self.root_key, attachment=content)
view.build()
self.assertEqual(admin_content.encode(), view.get(user_path).content)
alt_content = bsdiff4.diff(content, ('content-' + utils.random_ascii(32)).encode())
self.log.write(parent_node.entry, user_path, key, attachment=alt_content)
self.assertEqual(admin_content.encode(), view.get(user_path).content)
# Grant consistency with prev state
view.grant(os.sep, 'user', key)
self.assertEqual(admin_content.encode(), view.get(user_path).content)
view.build()
self.assertEqual(admin_content.encode(), view.get(user_path).content)
# Test prints
self.log.print_tree(view=view, color=True)
self.log.print_tree(view=view, ascii=True)
# TODO test non state branch weigth
def test_symlink(self):
view = View(self.log, self.root_key)
view.build()
view.symlink('/kakas', '/rata')
print(view.get('/kakas').content)
def test_hardlink(self):
view = View(self.log, self.root_key)
view.build()
rata_node = view.mkdir('/rata')
view.link('/home', rata_node.entry.hash)
print(view.get('/home') == rata_node)
kakas_node = view.write('/kakas', b'hola')
view.link('/kakas_link', kakas_node.entry.hash)
self.assertEqual(b'hola', view.get('/kakas_link').content)
view.delete('/kakas')
self.assertEqual(b'hola', view.get('/kakas_link').content)
print(view.get('/kakas').content)
def test_revert(self):
view = View(self.log, self.root_key)
view.build()
rata_node = view.mkdir('/rata')
# TODO
|
|
'''``TotoService`` can be used to write general processes that take advantage of the process creation/management features
used by ``TotoServer`` and ``TotoWorker`` - the two built in subclasses of ``TotoService``. ``TotoService`` subclasses can be
run with the ``--start`` (or ``--stop``) and ``--processes`` options
to start the service as a daemon process or run multiple instances simultaneously.
To run a subclass of ``TotoService`` create a script like this::
from toto.service import TotoService
class MyServiceSubclass(TotoService):
def main_loop(self):
while 1:
#run some job continuously
MyServiceSubclass('conf_file.conf').run()
'''
import os
import tornado
import logging
from tornado.options import define, options
from multiprocessing import Process, cpu_count
from time import sleep
define("daemon", metavar='start|stop|restart', help="Start, stop or restart this script as a daemon process. Use this setting in conf files, the shorter start, stop, restart aliases as command line arguments. Requires the multiprocessing module.")
define("processes", default=1, help="The number of daemon processes to run")
define("pidfile", default="toto.daemon.pid", help="The path to the pidfile for daemon processes will be named <path>.<num>.pid (toto.daemon.pid -> toto.daemon.0.pid)")
define("start", default=False, help="Alias for daemon=start for command line usage - overrides daemon setting.")
define("stop", default=False, help="Alias for daemon=start for command line usage - overrides daemon setting.")
define("restart", default=False, help="Alias for daemon=start for command line usage - overrides daemon setting.")
define("nodaemon", default=False, help="Alias for daemon='' for command line usage - overrides daemon setting.")
define("debug", default=False, help="Set this to true to prevent Toto from nicely formatting generic errors. With debug=True, errors will print to the command line")
#convert p to the absolute path, insert ".i" before the last "." or at the end of the path
def pid_path(i):
'''Used to generate PID files for daemonized TotoServices. Child processes with PID files
matching the paths returned by this function will be killed with SIGTERM when the server daemon process is stopped using the
``--stop`` or ``--daemon=stop`` arguments::
proc = Process()
proc.start()
with open(pid_path(process_count() + 1), 'wb') as f:
f.write(str(proc.pid))
Note that ``i`` must be an integer.
'''
(d, f) = os.path.split(os.path.abspath(options.pidfile))
components = f.rsplit('.', 1)
f = '%s.%s' % (components[0], i)
if len(components) > 1:
f += "." + components[1]
return os.path.join(d, f)
def process_count():
'''Returns the number of service processes that will run with the current configuration. This will match
the ``--processes=n`` option if n >= 0. Otherwise ``multiprocessing.cpu_count()`` will be used.
'''
return options.processes if options.processes >= 0 else cpu_count()
class TotoService(object):
'''Subclass ``TotoService`` to create a service that can be easily daemonised or
ran in multiple processes simultaneously.
'''
def _load_options(self, conf_file=None, final=True, **kwargs):
for k in kwargs:
setattr(options, k, kwargs[k])
if conf_file:
tornado.options.parse_config_file(conf_file, final=False)
tornado.options.parse_command_line(final=final)
if options.start:
setattr(options, 'daemon', 'start')
elif options.stop:
setattr(options, 'daemon', 'stop')
elif options.restart:
setattr(options, 'daemon', 'restart')
elif options.nodaemon:
setattr(options, 'daemon', '')
def __init__(self, conf_file=None, **kwargs):
if options.log_file_prefix:
root_logger = logging.getLogger()
for handler in [h for h in root_logger.handlers]:
root_logger.removeHandler(handler)
self._load_options(conf_file, **kwargs)
def __run_service(self, pidfile=None):
def start_server_process(pidfile, service_id=0):
self.service_id = service_id
self.main_loop()
if pidfile:
os.remove(pidfile)
count = process_count()
processes = []
pidfiles = options.daemon and [pid_path(i) for i in xrange(1, count + 1)] or []
self.prepare()
for i in xrange(count):
proc = Process(target=start_server_process, args=(pidfiles and pidfiles[i], i))
proc.daemon = True
processes.append(proc)
proc.start()
else:
print "Starting %s %s process%s." % (count, self.__class__.__name__, count > 1 and 'es' or '')
if options.daemon:
i = 1
for proc in processes:
with open(pidfiles[i - 1], 'w') as f:
f.write(str(proc.pid))
i += 1
for proc in processes:
proc.join()
self.finish()
if pidfile:
os.remove(pidfile)
def run(self):
'''Start the service. Depending on the initialization options, this may run more than one
service process.
'''
if options.daemon:
import multiprocessing
import signal, re
pattern = pid_path(r'\d+').replace('.', r'\.')
piddir = os.path.dirname(pattern).replace('\\.', '.')
master_pidfile = pid_path('master')
if options.daemon == 'stop' or options.daemon == 'restart':
existing_pidfiles = [pidfile for pidfile in (os.path.join(piddir, fn) for fn in os.listdir(piddir)) if re.match(pattern, pidfile)]
try:
with open(master_pidfile, 'rb') as f:
master_pid = int(f.read())
except:
master_pid = 0
for pidfile in existing_pidfiles:
try:
with open(pidfile, 'r') as f:
pid = int(f.read())
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno != 3:
raise
print "Stopped %s %s" % (self.__class__.__name__, pid)
os.remove(pidfile)
except (OSError, IOError) as e:
if e.errno != 2:
raise
if not existing_pidfiles and master_pid:
try:
os.kill(master_pid, signal.SIGTERM)
except OSError as e:
if e.errno != 3:
raise
os.remove(master_pidfile)
print 'Force stopped %s %s' % (self.__class__.__name__, master_pid)
else:
while os.path.exists(master_pidfile):
sleep(0.01)
if options.daemon == 'start' or options.daemon == 'restart':
existing_pidfiles = [pidfile for pidfile in (os.path.join(piddir, fn) for fn in os.listdir(piddir)) if re.match(pattern.replace(r'\d', r'[\w\d]'), pidfile)]
if existing_pidfiles:
print "Not starting %s, pidfile%s exist%s at %s" % (self.__class__.__name__, len(existing_pidfiles) > 1 and 's' or '', len(existing_pidfiles) == 1 and 's' or '', ', '.join(existing_pidfiles))
return
#fork and only continue on child process
if not os.fork():
#detach from controlling terminal
os.setsid()
#fork again and write pid to pidfile from parent, run server on child
pid = os.fork()
if pid:
with open(master_pidfile, 'w') as f:
f.write(str(pid))
else:
self.__run_service(master_pidfile)
if options.daemon not in ('start', 'stop', 'restart'):
print "Invalid daemon option: " + options.daemon
else:
self.__run_service()
def prepare(self):
'''Override this method in a ``TotoService`` subclass and it will be called before any service processes
are created. You can set instance variables here and they will be available in ``main_loop()`` but be
careful that any retained objects are safe to access across processes'''
pass
def main_loop(self):
'''Subclass ``TotoService`` and override ``main_loop()`` with your desired functionality.'''
raise NotImplementedError()
def finish(self):
'''Override this method in a ``TotoService`` subclass and it will be called after all service processes
have exited (after each ``main_loop()`` has returned).
Note: This method will only be called once and only after all child processes have finished.'''
pass
|
|
import json
from abc import ABCMeta
from collections import defaultdict
from django.template.loader import render_to_string
from django.utils import six
from django.utils.functional import Promise
from cms.utils.compat.dj import force_unicode
from cms.constants import RIGHT, LEFT, REFRESH_PAGE, URL_CHANGE
class ItemSearchResult(object):
def __init__(self, item, index):
self.item = item
self.index = index
def __add__(self, other):
return ItemSearchResult(self.item, self.index + other)
def __sub__(self, other):
return ItemSearchResult(self.item, self.index - other)
def __int__(self):
return self.index
def may_be_lazy(thing):
if isinstance(thing, Promise):
return thing._proxy____args[0]
else:
return thing
class ToolbarAPIMixin(six.with_metaclass(ABCMeta)):
REFRESH_PAGE = REFRESH_PAGE
URL_CHANGE = URL_CHANGE
LEFT = LEFT
RIGHT = RIGHT
def __init__(self):
self.items = []
self.menus = {}
self._memo = defaultdict(list)
def _memoize(self, item):
self._memo[item.__class__].append(item)
def _unmemoize(self, item):
self._memo[item.__class__].remove(item)
def _item_position(self, item):
return self.items.index(item)
def _add_item(self, item, position):
if position is not None:
self.items.insert(position, item)
else:
self.items.append(item)
def _remove_item(self, item):
if item in self.items:
self.items.remove(item)
else:
raise KeyError("Item %r not found" % item)
def get_item_count(self):
return len(self.items)
def add_item(self, item, position=None):
if not isinstance(item, BaseItem):
raise ValueError("Items must be subclasses of cms.toolbar.items.BaseItem, %r isn't" % item)
if isinstance(position, ItemSearchResult):
position = position.index
elif isinstance(position, BaseItem):
position = self._item_position(position)
elif not (position is None or isinstance(position, (int,))):
raise ValueError("Position must be None, an integer, an item or an ItemSearchResult, got %r instead" % position)
self._add_item(item, position)
self._memoize(item)
return item
def find_items(self, item_type, **attributes):
results = []
attr_items = attributes.items()
notfound = object()
for candidate in self._memo[item_type]:
if all(may_be_lazy(getattr(candidate, key, notfound)) == value for key, value in attr_items):
results.append(ItemSearchResult(candidate, self._item_position(candidate)))
return results
def find_first(self, item_type, **attributes):
try:
return self.find_items(item_type, **attributes)[0]
except IndexError:
return None
#
# This will only work if it is used to determine the insert position for
# all items in the same menu.
#
def get_alphabetical_insert_position(self, new_menu_name, item_type,
default=0):
results = self.find_items(item_type)
# No items yet? Use the default value provided
if not len(results):
return default
last_position = 0
for result in sorted(results, key=lambda x: x.item.name):
if result.item.name > new_menu_name:
return result.index
if result.index > last_position:
last_position = result.index
else:
return last_position + 1
def remove_item(self, item):
self._remove_item(item)
self._unmemoize(item)
def add_sideframe_item(self, name, url, active=False, disabled=False,
extra_classes=None, on_close=None, side=LEFT, position=None):
item = SideframeItem(name, url,
active=active,
disabled=disabled,
extra_classes=extra_classes,
on_close=on_close,
side=side,
)
self.add_item(item, position=position)
return item
def add_modal_item(self, name, url, active=False, disabled=False,
extra_classes=None, on_close=REFRESH_PAGE, side=LEFT, position=None):
item = ModalItem(name, url,
active=active,
disabled=disabled,
extra_classes=extra_classes,
on_close=on_close,
side=side,
)
self.add_item(item, position=position)
return item
def add_link_item(self, name, url, active=False, disabled=False,
extra_classes=None, side=LEFT, position=None):
item = LinkItem(name, url,
active=active,
disabled=disabled,
extra_classes=extra_classes,
side=side
)
self.add_item(item, position=position)
return item
def add_ajax_item(self, name, action, active=False, disabled=False,
extra_classes=None, data=None, question=None,
side=LEFT, position=None, on_success=None):
item = AjaxItem(name, action, self.csrf_token,
active=active,
disabled=disabled,
extra_classes=extra_classes,
data=data,
question=question,
side=side,
on_success=on_success,
)
self.add_item(item, position=position)
return item
class BaseItem(six.with_metaclass(ABCMeta)):
template = None
def __init__(self, side=LEFT):
self.side = side
@property
def right(self):
return self.side is RIGHT
def render(self):
return render_to_string(self.template, self.get_context())
def get_context(self):
return {}
class TemplateItem(BaseItem):
def __init__(self, template, extra_context=None, side=LEFT):
super(TemplateItem, self).__init__(side)
self.template = template
self.extra_context = extra_context
def get_context(self):
if self.extra_context:
return self.extra_context
return {}
class SubMenu(ToolbarAPIMixin, BaseItem):
template = "cms/toolbar/items/menu.html"
sub_level = True
active = False
def __init__(self, name, csrf_token, side=LEFT):
ToolbarAPIMixin.__init__(self)
BaseItem.__init__(self, side)
self.name = name
self.csrf_token = csrf_token
def __repr__(self):
return '<Menu:%s>' % force_unicode(self.name)
def add_break(self, identifier=None, position=None):
item = Break(identifier)
self.add_item(item, position=position)
return item
def get_items(self):
return self.items
def get_context(self):
return {
'active': self.active,
'items': self.get_items(),
'title': self.name,
'sub_level': self.sub_level
}
class Menu(SubMenu):
sub_level = False
def get_or_create_menu(self, key, verbose_name, side=LEFT, position=None):
if key in self.menus:
return self.menus[key]
menu = SubMenu(verbose_name, self.csrf_token, side=side)
self.menus[key] = menu
self.add_item(menu, position=position)
return menu
class LinkItem(BaseItem):
template = "cms/toolbar/items/item_link.html"
def __init__(self, name, url, active=False, disabled=False, extra_classes=None, side=LEFT):
super(LinkItem, self).__init__(side)
self.name = name
self.url = url
self.active = active
self.disabled = disabled
self.extra_classes = extra_classes or []
def __repr__(self):
return '<LinkItem:%s>' % force_unicode(self.name)
def get_context(self):
return {
'url': self.url,
'name': self.name,
'active': self.active,
'disabled': self.disabled,
'extra_classes': self.extra_classes,
}
class SideframeItem(BaseItem):
template = "cms/toolbar/items/item_sideframe.html"
def __init__(self, name, url, active=False, disabled=False, extra_classes=None, on_close=None, side=LEFT):
super(SideframeItem, self).__init__(side)
self.name = "%s ..." % force_unicode(name)
self.url = url
self.active = active
self.disabled = disabled
self.extra_classes = extra_classes or []
self.on_close = on_close
def __repr__(self):
return '<SideframeItem:%s>' % force_unicode(self.name)
def get_context(self):
return {
'url': self.url,
'name': self.name,
'active': self.active,
'disabled': self.disabled,
'extra_classes': self.extra_classes,
'on_close': self.on_close,
}
class ModalItem(SideframeItem):
template = "cms/toolbar/items/item_modal.html"
def __repr__(self):
return '<ModalItem:%s>' % force_unicode(self.name)
class AjaxItem(BaseItem):
template = "cms/toolbar/items/item_ajax.html"
def __init__(self, name, action, csrf_token, data=None, active=False,
disabled=False, extra_classes=None,
question=None, side=LEFT, on_success=None):
super(AjaxItem, self).__init__(side)
self.name = name
self.action = action
self.active = active
self.disabled = disabled
self.csrf_token = csrf_token
self.data = data or {}
self.extra_classes = extra_classes or []
self.question = question
self.on_success = on_success
def __repr__(self):
return '<AjaxItem:%s>' % force_unicode(self.name)
def get_context(self):
data = {}
data.update(self.data)
data['csrfmiddlewaretoken'] = self.csrf_token
data = json.dumps(data)
return {
'action': self.action,
'name': self.name,
'active': self.active,
'disabled': self.disabled,
'extra_classes': self.extra_classes,
'data': data,
'question': self.question,
'on_success': self.on_success
}
class Break(BaseItem):
template = "cms/toolbar/items/break.html"
def __init__(self, identifier=None):
self.identifier = identifier
class BaseButton(six.with_metaclass(ABCMeta)):
template = None
def render(self):
return render_to_string(self.template, self.get_context())
def get_context(self):
return {}
class Button(BaseButton):
template = "cms/toolbar/items/button.html"
def __init__(self, name, url, active=False, disabled=False,
extra_classes=None):
self.name = name
self.url = url
self.active = active
self.disabled = disabled
self.extra_classes = extra_classes or []
def __repr__(self):
return '<Button:%s>' % force_unicode(self.name)
def get_context(self):
return {
'name': self.name,
'url': self.url,
'active': self.active,
'disabled': self.disabled,
'extra_classes': self.extra_classes,
}
class ModalButton(Button):
template = "cms/toolbar/items/button_modal.html"
def __init__(self, name, url, active=False, disabled=False, extra_classes=None, on_close=None):
self.name = name
self.url = url
self.active = active
self.disabled = disabled
self.extra_classes = extra_classes or []
self.on_close = on_close
def __repr__(self):
return '<ModalButton:%s>' % force_unicode(self.name)
def get_context(self):
return {
'name': self.name,
'url': self.url,
'active': self.active,
'disabled': self.disabled,
'extra_classes': self.extra_classes,
'on_close': self.on_close,
}
class SideframeButton(ModalButton):
template = "cms/toolbar/items/button_sideframe.html"
def __repr__(self):
return '<SideframeButton:%s>' % force_unicode(self.name)
class ButtonList(BaseItem):
template = "cms/toolbar/items/button_list.html"
def __init__(self, identifier=None, extra_classes=None, side=LEFT):
super(ButtonList, self).__init__(side)
self.extra_classes = extra_classes or []
self.buttons = []
self.identifier = identifier
def __repr__(self):
return '<ButtonList:%s>' % self.identifier
def add_item(self, item):
if not isinstance(item, Button):
raise ValueError("Expected instance of cms.toolbar.items.Button, got %r instead" % item)
self.buttons.append(item)
def add_button(self, name, url, active=False, disabled=False,
extra_classes=None):
item = Button(name, url,
active=active,
disabled=disabled,
extra_classes=extra_classes
)
self.buttons.append(item)
return item
def add_modal_button(self, name, url, active=False, disabled=False, extra_classes=None, on_close=REFRESH_PAGE):
item = ModalButton(name, url,
active=active,
disabled=disabled,
extra_classes=extra_classes,
on_close=on_close,
)
self.buttons.append(item)
return item
def add_sideframe_button(self, name, url, active=False, disabled=False, extra_classes=None, on_close=None):
item = SideframeButton(name, url,
active=active,
disabled=disabled,
extra_classes=extra_classes,
on_close=on_close,
)
self.buttons.append(item)
return item
def get_context(self):
return {
'buttons': self.buttons,
'extra_classes': self.extra_classes
}
|
|
#!/usr/bin/env python
from collections import OrderedDict
import inspect
import json
import os
import re
import shutil
import io
from subprocess import call, Popen, PIPE
import sys, getopt
import pkg_resources
import subprocess
from jinja2 import Environment, FileSystemLoader
from drafter_postprocessing.json_processing import postprocess_drafter_json
from apib_extra_parse_utils import preprocess_apib_parameters_lines, start_apib_section, get_indentation
def print_api_spec_title_to_extra_file(input_file_path, extra_sections_file_path):
"""Extracts the title of the API specification and writes it to the extra sections file.
Arguments:
input_file_path -- File with the API specification
extra_sections_file_path -- File where we will write the extra sections
"""
with open(input_file_path, 'rU') as input_file_path, open(extra_sections_file_path, 'w') as extra_sections_file:
line = input_file_path.readline()
while (line != "" and not line.startswith("# ")):
line = input_file_path.readline()
extra_sections_file.write( line )
def separate_extra_sections_and_api_blueprint(input_file_path, extra_sections_file_path, API_blueprint_file_path):
"""Divides a Fiware API specification into extra sections and its API blueprint.
Arguments:
input_file_path -- A Fiware API specification file.
extra_sections_file_path -- Resulting file containing extra information about the API specification.
API_blueprint_file_path -- Resulting file containing the API blueprint of the Fiware API.
"""
print_api_spec_title_to_extra_file(input_file_path, extra_sections_file_path)
with open(input_file_path, 'rU') as input_file, open(extra_sections_file_path, 'a') as extra_sections_file, open(API_blueprint_file_path, 'w') as API_blueprint_file:
line_counter = 0
title_line_end = -1
apib_line_start = -1
metadata_section = True
apib_part = False
title_section = False
parameters_section = False
data_structures_section = 0
for line in input_file:
line_counter += 1
copy = False
if metadata_section and len(line.split(':')) == 1:
metadata_section = False
title_section = True
if metadata_section:
copy = False
else:
if title_section and line.startswith('##'):
title_section = False
if title_section:
copy = False
else:
if not apib_part:
apib_part = start_apib_section(line)
if title_line_end < 0:
title_line_end = line_counter
if not apib_part:
copy = True
else:
copy = False
if apib_line_start < 0:
apib_line_start = line_counter
if copy:
extra_sections_file.write(line)
else:
line = line.replace('\t',' ')
(line, parameters_section, data_structures_section) = preprocess_apib_parameters_lines(line,
parameters_section,
data_structures_section)
API_blueprint_file.write(line)
return (title_line_end, apib_line_start)
def convert_message_error_lines(drafter_output, title_line_end, apib_line_start):
"""Convert the error lines to match the extended FIWARE APIB file format
Arguments:
drafter_output -- Text with drafter postprocessing output
title_line_end -- Line where the specification title ends
apib_line_start -- Line where the specification of the API starts
"""
line_error_regex = re.compile( "line (\d+)," )
line_error_matches = line_error_regex.findall(drafter_output)
if line_error_matches:
line_error_set = set(line_error_matches)
for line_error in line_error_set:
if line_error >= apib_line_start:
line_error_substitute = int(line_error) - title_line_end + apib_line_start
drafter_output = drafter_output.replace("line {},".format(line_error), "line {},".format(line_error_substitute))
return drafter_output
def parse_api_blueprint_with_drafter(API_blueprint_file_path, API_blueprint_JSON_file_path, title_line_end, apib_line_start):
"""Parse the API Blueprint file with the API specification and save the output to a JSON file
Arguments:
API_blueprint_file_path -- An API Blueprint definition file
API_blueprint_JSON_file_path -- Path to JSON file
title_line_end -- Line where the specification title ends. Needed to reconvert error messages from drafter.
apib_line_start -- Line where the specification of the API starts. Needed to reconvert error messages from drafter.
"""
command_call = ["drafter", API_blueprint_file_path, "--output", API_blueprint_JSON_file_path, "--format", "json", "--use-line-num"]
[_, execution_error_output] = Popen(command_call, stderr=PIPE).communicate()
print convert_message_error_lines(execution_error_output, title_line_end, apib_line_start)
def generate_metadata_dictionary(metadata_section):
"""Generates a metadata section as a dictionary from a non-dictionary section
Arguments:
metadata_section -- Source metadata section
"""
metadata_section_dict = {}
metadata_section_dict['id'] = metadata_section['id']
metadata_section_dict['name'] = metadata_section['name']
metadata_section_dict['body'] = metadata_section['body']
metadata_section_dict['subsections'] = OrderedDict()
for subsection in metadata_section['subsections']:
metadata_section_dict['subsections'][subsection['name']] = generate_metadata_dictionary(subsection)
return metadata_section_dict
def copy_static_files(template_dir_path, dst_dir_path):
"""Copies the static files used by the resulting rendered site
Arguments:
template_dir_path -- path to the template directory
dst_dir_path -- destination directory
"""
subdirectories = ['/css', '/js', '/img', '/font']
for subdirectory in subdirectories:
if os.path.exists(dst_dir_path + subdirectory):
shutil.rmtree(dst_dir_path + subdirectory)
shutil.copytree(template_dir_path + subdirectory, dst_dir_path + subdirectory, ignore=shutil.ignore_patterns('*.pyc', '*.py'))
def render_api_blueprint(template_file_path, context_file_path, dst_dir_path):
"""Renders an API Blueprint context file with a Jinja2 template.
Arguments:
template_file_path -- The Jinja2 template path
context_file_path -- Path to the context file
dst_dir_path -- Path to save the compiled site
"""
env = Environment(extensions=["jinja2.ext.do",], loader=FileSystemLoader(os.path.dirname(template_file_path)))
env.filters['sort_payload_parameters'] = sort_payload_parameters
template = env.get_template(os.path.basename(template_file_path))
output = ""
with open(context_file_path, "rU") as contextFile:
output = template.render(json.load(contextFile))
rendered_HTML_filename = os.path.splitext(os.path.basename(context_file_path))[0]
rendered_HTML_path = os.path.join(dst_dir_path, rendered_HTML_filename + ".html")
with open(rendered_HTML_path, 'w') as output_file:
output_file.write(output.encode('utf-8'))
copy_static_files(os.path.dirname(template_file_path), dst_dir_path)
def create_directory_if_not_exists(dir_path):
"""Creates a directory with the given path if it doesn't exists yet"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def clear_directory(dir_path):
"""Removes all the files on a directory given its path"""
for file in os.listdir(dir_path):
file_path = os.path.join(dir_path, file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, e:
print e
def compare_payload_parameter(paramA, paramB):
"""Returns a boolean indicating whether paramA < paramB (alphabetically)
Arguments:
paramA - first operand of the comparison
paramB - second operand of the comparison"""
if( paramA['class'] == "property" and
paramB['class'] == "property"
):
if( paramA['content']['name']['literal'] < paramB['content']['name']['literal'] ):
return -1
else:
return 1
else:
return 0
def sort_payload_parameters(parameters_list):
"""Jinja2 custom filter for ordering a list of parameters
Arguments:
parameters_list - list of payload parameters given by Drafter"""
return sorted(parameters_list, cmp=compare_payload_parameter)
def render_api_specification(API_specification_path, template_path, dst_dir_path, clear_temporal_dir=True, cover=None):
"""Renders an API specification using a template and saves it to destination directory.
Arguments:
API_specification_path -- Path to API Blueprint specification
template_path -- The Jinja2 template path
dst_dir_path -- Path to save the compiled site
clear_temporal_dir -- Flag to clear temporary files generated by the script
"""
temp_dir_path = "/var/tmp/fiware_api_blueprint_renderer_tmp"
API_specification_file_name = os.path.splitext(os.path.basename(API_specification_path))[0]
API_extra_sections_file_path = os.path.join(temp_dir_path, API_specification_file_name + '.extras')
API_blueprint_file_path = os.path.join(temp_dir_path + '/' + API_specification_file_name + '.apib')
API_blueprint_JSON_file_path = os.path.join(temp_dir_path + '/' + API_specification_file_name + '.json')
create_directory_if_not_exists(temp_dir_path)
(title_line_end, apib_line_start) = separate_extra_sections_and_api_blueprint(API_specification_path,
API_extra_sections_file_path,
API_blueprint_file_path)
parse_api_blueprint_with_drafter(API_blueprint_file_path, API_blueprint_JSON_file_path, title_line_end, apib_line_start)
is_PDF = cover is not None
postprocess_drafter_json(API_blueprint_JSON_file_path,API_blueprint_file_path,API_extra_sections_file_path, is_PDF)
render_api_blueprint(template_path, API_blueprint_JSON_file_path, dst_dir_path)
if is_PDF: #cover needed for pdf
cover_json_path = os.path.join( dst_dir_path + '/' + 'cover' + '.json' )
shutil.move(API_blueprint_JSON_file_path, cover_json_path)
render_api_blueprint( cover, cover_json_path, dst_dir_path )
shutil.move(cover_json_path, API_blueprint_JSON_file_path)
return
if clear_temporal_dir == True:
clear_directory( temp_dir_path )
def print_package_dependencies():
"""Print the dependencies of package Fabre"""
print "\nPIP dependencies\n"
dependencies_matrix = [["Package", "Required version", "Installed version"]]
for package in pkg_resources.get_distribution("fiware_api_blueprint_renderer").requires():
package_header = str(package).split('>=')
package_name = package_header[0]
package_required_version = ">= " + package_header[1]
package_installed_info = subprocess.check_output(['pip', 'show', package_name])
version_regex = re.compile("Version: (.*)")
package_installed_version = version_regex.search(package_installed_info).group(1)
dependencies_matrix.append([package_name, package_required_version, package_installed_version])
pretty_print_matrix(dependencies_matrix)
system_dependencies_matrix = [["Package", "Required version", "Installed version"]]
system_dependencies = [('drafter', 'v0.1.9'), ('wkhtmltopdf', '0.12.2.1 (with patched qt)')]
for (package_name, package_required_version) in system_dependencies:
row = []
row.append(package_name)
row.append(package_required_version)
if package_name != 'wkhtmltopdf':
row.append(subprocess.check_output([package_name, '--version'])[0:-1])
else:
row.append(subprocess.check_output([package_name, '--version'])[0:-1].split(' ',1)[1])
system_dependencies_matrix.append(row)
print "\nSystem dependencies\n"
pretty_print_matrix(system_dependencies_matrix)
print "\n"
def pretty_print_matrix(matrix):
"""Pretty print the given matrix (as a table)"""
# Retrieve the size of the matrix longest element
longest_matrix_string_size = 0
for row in matrix:
longest_row_string_size = len(max(row, key=len))
if longest_row_string_size > longest_matrix_string_size:
longest_matrix_string_size = longest_row_string_size
# Print the matrix as a table
row_format = "{:<%i}" % (longest_matrix_string_size + 2)
row_format = row_format * len(matrix[0])
for row in matrix:
print "\t" + row_format.format(*row)
def main():
usage = "Usage: \n\t" + sys.argv[0] + " -i <api-spec-path> -o <dst-dir> [--pdf] [--no-clear-temp-dir] [--template]"
version = "fabre " + pkg_resources.require("fiware_api_blueprint_renderer")[0].version
default_theme = os.path.dirname(__file__)+"/../themes/default_theme/api-specification.tpl"
pdf_template_path= os.path.dirname(__file__)+"/../themes/default_theme/api-specification.tpl"
cover_template_path= os.path.dirname(__file__)+"/../themes/default_theme/cover.tpl"
template_path= default_theme
clear_temporal_dir = True
API_specification_path = None
dst_dir_path = None
temp_pdf_path = "/var/tmp/fiware_api_blueprint_renderer_tmp_pdf/"
pdf = False
try:
opts, args = getopt.getopt(sys.argv[1:],"hvi:o:ct:",["version","ifile=","odir=","no-clear-temp-dir","template=","pdf","version-dependencies"])
except getopt.GetoptError:
print usage
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print usage
sys.exit()
elif opt in ("-v", "--version"):
print version
sys.exit()
elif opt == '--version-dependencies':
print version
print_package_dependencies()
sys.exit()
elif opt in ("-i", "--input"):
API_specification_path = arg
elif opt in ("-o", "--output"):
dst_dir_path = arg
elif opt in ("-t", "--template"):
template_path = arg
elif opt in ("-c", "--no-clear-temp-dir"):
clear_temporal_dir = False
elif opt in ("--pdf"):
pdf = True
#if no template is specified, uses the default pdf template
if not ('-t' in zip(*opts)[0] or '--template' in zip(*opts)[0]):
template_path = pdf_template_path
if API_specification_path is None:
print "API specification file must be specified"
print usage
sys.exit(3)
if dst_dir_path is None:
print "Destination directory must be specified"
print usage
sys.exit(4)
if pdf:
create_directory_if_not_exists(temp_pdf_path)
rendered_HTML_filename = os.path.splitext(os.path.basename(API_specification_path))[0]
rendered_HTML_path = os.path.join(temp_pdf_path, rendered_HTML_filename + ".html")
rendered_HTML_cover = os.path.join(temp_pdf_path, "cover" + ".html")
if ".pdf" not in dst_dir_path:
create_directory_if_not_exists(dst_dir_path)
dst_dir_path = os.path.join(dst_dir_path, rendered_HTML_filename + ".pdf")
render_api_specification(API_specification_path, template_path, temp_pdf_path, clear_temporal_dir, cover_template_path)
call( ["wkhtmltopdf", '-d', '125', '--page-size','A4', "page", "file://"+rendered_HTML_cover ,"toc" ,"page", "file://"+rendered_HTML_path, '--footer-center', "Page [page]",'--footer-font-size', '8', '--footer-spacing', '3','--run-script', "setInterval(function(){if(document.readyState=='complete') window.status='done';},100)", "--window-status", "done", dst_dir_path ])
else:
create_directory_if_not_exists( dst_dir_path )
render_api_specification( API_specification_path, template_path, dst_dir_path, clear_temporal_dir, None)
sys.exit(0)
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of acl command for cloud storage providers."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import encoding
from gslib import metrics
from gslib.cloud_api import AccessDeniedException
from gslib.cloud_api import BadRequestException
from gslib.cloud_api import PreconditionException
from gslib.cloud_api import Preconditions
from gslib.cloud_api import ServiceException
from gslib.command import Command
from gslib.command import SetAclExceptionHandler
from gslib.command import SetAclFuncWrapper
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.help_provider import CreateHelpText
from gslib.storage_url import StorageUrlFromString
from gslib.storage_url import UrlsAreForSingleProvider
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.utils import acl_helper
from gslib.utils.constants import NO_MAX
from gslib.utils.retry_util import Retry
_SET_SYNOPSIS = """
gsutil acl set [-f] [-r] [-a] <file-or-canned_acl_name> url...
"""
_GET_SYNOPSIS = """
gsutil acl get url
"""
_CH_SYNOPSIS = """
gsutil acl ch [-f] [-r] <grant>... url...
where each <grant> is one of the following forms:
-u <id>|<email>:<permission>
-g <id>|<email>|<domain>|All|AllAuth:<permission>
-p (viewers|editors|owners)-<project number>:<permission>
-d <id>|<email>|<domain>|All|AllAuth|(viewers|editors|owners)-<project number>
"""
_GET_DESCRIPTION = """
<B>GET</B>
The "acl get" command gets the ACL text for a bucket or object, which you can
save and edit for the acl set command.
"""
_SET_DESCRIPTION = """
<B>SET</B>
The "acl set" command allows you to set an Access Control List on one or
more buckets and objects. The file-or-canned_acl_name parameter names either
a canned ACL or the path to a file that contains ACL text. The simplest way
to use the "acl set" command is to specify one of the canned ACLs, e.g.,:
gsutil acl set private gs://bucket
If you want to make an object or bucket publicly readable or writable, it is
recommended to use "acl ch", to avoid accidentally removing OWNER permissions.
See the "acl ch" section for details.
See `Predefined ACLs
<https://cloud.google.com/storage/docs/access-control/lists#predefined-acl>`_
for a list of canned ACLs.
If you want to define more fine-grained control over your data, you can
retrieve an ACL using the "acl get" command, save the output to a file, edit
the file, and then use the "acl set" command to set that ACL on the buckets
and/or objects. For example:
gsutil acl get gs://bucket/file.txt > acl.txt
Make changes to acl.txt such as adding an additional grant, then:
gsutil acl set acl.txt gs://cats/file.txt
Note that you can set an ACL on multiple buckets or objects at once. For
example, to set ACLs on all .jpg files found in a bucket:
gsutil acl set acl.txt gs://bucket/**.jpg
If you have a large number of ACLs to update you might want to use the
gsutil -m option, to perform a parallel (multi-threaded/multi-processing)
update:
gsutil -m acl set acl.txt gs://bucket/**.jpg
Note that multi-threading/multi-processing is only done when the named URLs
refer to objects, which happens either if you name specific objects or
if you enumerate objects by using an object wildcard or specifying
the acl -r flag.
<B>SET OPTIONS</B>
The "set" sub-command has the following options
-R, -r Performs "acl set" request recursively, to all objects under
the specified URL.
-a Performs "acl set" request on all object versions.
-f Normally gsutil stops at the first error. The -f option causes
it to continue when it encounters errors. If some of the ACLs
couldn't be set, gsutil's exit status will be non-zero even if
this flag is set. This option is implicitly set when running
"gsutil -m acl...".
"""
_CH_DESCRIPTION = """
<B>CH</B>
The "acl ch" (or "acl change") command updates access control lists, similar
in spirit to the Linux chmod command. You can specify multiple access grant
additions and deletions in a single command run; all changes will be made
atomically to each object in turn. For example, if the command requests
deleting one grant and adding a different grant, the ACLs being updated will
never be left in an intermediate state where one grant has been deleted but
the second grant not yet added. Each change specifies a user or group grant
to add or delete, and for grant additions, one of R, W, O (for the
permission to be granted). A more formal description is provided in a later
section; below we provide examples.
<B>CH EXAMPLES</B>
Examples for "ch" sub-command:
Grant anyone on the internet READ access to the object example-object:
gsutil acl ch -u AllUsers:R gs://example-bucket/example-object
NOTE: By default, publicly readable objects are served with a Cache-Control
header allowing such objects to be cached for 3600 seconds. If you need to
ensure that updates become visible immediately, you should set a
Cache-Control header of "Cache-Control:private, max-age=0, no-transform" on
such objects. For help doing this, see "gsutil help setmeta".
Grant anyone on the internet WRITE access to the bucket example-bucket:
WARNING: this is not recommended as you will be responsible for the content
gsutil acl ch -u AllUsers:W gs://example-bucket
Grant the user john.doe@example.com WRITE access to the bucket
example-bucket:
gsutil acl ch -u john.doe@example.com:WRITE gs://example-bucket
Grant the group admins@example.com OWNER access to all jpg files in
example-bucket:
gsutil acl ch -g admins@example.com:O gs://example-bucket/**.jpg
Grant the owners of project example-project WRITE access to the bucket
example-bucket:
gsutil acl ch -p owners-example-project:W gs://example-bucket
NOTE: You can replace 'owners' with 'viewers' or 'editors' to grant access
to a project's viewers/editors respectively.
Remove access to the bucket example-bucket for the viewers of project number
12345:
gsutil acl ch -d viewers-12345 gs://example-bucket
NOTE: You cannot remove the project owners group from ACLs of gs:// buckets in
the given project. Attempts to do so will appear to succeed, but the service
will add the project owners group into the new set of ACLs before applying it.
Note that removing a project requires you to reference the project by
its number (which you can see with the acl get command) as opposed to its
project ID string.
Grant the user with the specified canonical ID READ access to all objects
in example-bucket that begin with folder/:
gsutil acl ch -r \\
-u 84fac329bceSAMPLE777d5d22b8SAMPLE785ac2SAMPLE2dfcf7c4adf34da46:R \\
gs://example-bucket/folder/
Grant the service account foo@developer.gserviceaccount.com WRITE access to
the bucket example-bucket:
gsutil acl ch -u foo@developer.gserviceaccount.com:W gs://example-bucket
Grant all users from the `G Suite
<https://www.google.com/work/apps/business/>`_ domain my-domain.org READ
access to the bucket gcs.my-domain.org:
gsutil acl ch -g my-domain.org:R gs://gcs.my-domain.org
Remove any current access by john.doe@example.com from the bucket
example-bucket:
gsutil acl ch -d john.doe@example.com gs://example-bucket
If you have a large number of objects to update, enabling multi-threading
with the gsutil -m flag can significantly improve performance. The
following command adds OWNER for admin@example.org using
multi-threading:
gsutil -m acl ch -r -u admin@example.org:O gs://example-bucket
Grant READ access to everyone from my-domain.org and to all authenticated
users, and grant OWNER to admin@mydomain.org, for the buckets
my-bucket and my-other-bucket, with multi-threading enabled:
gsutil -m acl ch -r -g my-domain.org:R -g AllAuth:R \\
-u admin@mydomain.org:O gs://my-bucket/ gs://my-other-bucket
<B>CH ROLES</B>
You may specify the following roles with either their shorthand or
their full name:
R: READ
W: WRITE
O: OWNER
For more information on these roles and the access they grant, see the
permissions section of the `Access Control Lists page
<https://cloud.google.com/storage/docs/access-control/lists#permissions>`_.
<B>CH ENTITIES</B>
There are four different entity types: Users, Groups, All Authenticated Users,
and All Users.
Users are added with -u and a plain ID or email address, as in
"-u john-doe@gmail.com:r". Note: Service Accounts are considered to be users.
Groups are like users, but specified with the -g flag, as in
"-g power-users@example.com:O". Groups may also be specified as a full
domain, as in "-g my-company.com:r".
AllAuthenticatedUsers and AllUsers are specified directly, as
in "-g AllUsers:R" or "-g AllAuthenticatedUsers:O". These are case
insensitive, and may be shortened to "all" and "allauth", respectively.
Removing roles is specified with the -d flag and an ID, email
address, domain, or one of AllUsers or AllAuthenticatedUsers.
Many entities' roles can be specified on the same command line, allowing
bundled changes to be executed in a single run. This will reduce the number of
requests made to the server.
<B>CH OPTIONS</B>
The "ch" sub-command has the following options
-d Remove all roles associated with the matching entity.
-f Normally gsutil stops at the first error. The -f option causes
it to continue when it encounters errors. With this option the
gsutil exit status will be 0 even if some ACLs couldn't be
changed.
-g Add or modify a group entity's role.
-p Add or modify a project viewers/editors/owners role.
-R, -r Performs acl ch request recursively, to all objects under the
specified URL.
-u Add or modify a user entity's role.
"""
_SYNOPSIS = (_SET_SYNOPSIS + _GET_SYNOPSIS.lstrip('\n') +
_CH_SYNOPSIS.lstrip('\n') + '\n\n')
_DESCRIPTION = ("""
The acl command has three sub-commands:
""" + '\n'.join([_GET_DESCRIPTION, _SET_DESCRIPTION, _CH_DESCRIPTION]))
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_get_help_text = CreateHelpText(_GET_SYNOPSIS, _GET_DESCRIPTION)
_set_help_text = CreateHelpText(_SET_SYNOPSIS, _SET_DESCRIPTION)
_ch_help_text = CreateHelpText(_CH_SYNOPSIS, _CH_DESCRIPTION)
def _ApplyExceptionHandler(cls, exception):
cls.logger.error('Encountered a problem: %s', exception)
cls.everything_set_okay = False
def _ApplyAclChangesWrapper(cls, url_or_expansion_result, thread_state=None):
cls.ApplyAclChanges(url_or_expansion_result, thread_state=thread_state)
class AclCommand(Command):
"""Implementation of gsutil acl command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'acl',
command_name_aliases=['getacl', 'setacl', 'chacl'],
usage_synopsis=_SYNOPSIS,
min_args=2,
max_args=NO_MAX,
supported_sub_args='afRrg:u:d:p:',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=1,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments={
'set': [
CommandArgument.MakeFileURLOrCannedACLArgument(),
CommandArgument.MakeZeroOrMoreCloudURLsArgument()
],
'get': [CommandArgument.MakeNCloudURLsArgument(1)],
'ch': [CommandArgument.MakeZeroOrMoreCloudURLsArgument()],
})
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='acl',
help_name_aliases=['getacl', 'setacl', 'chmod', 'chacl'],
help_type='command_help',
help_one_line_summary='Get, set, or change bucket and/or object ACLs',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'get': _get_help_text,
'set': _set_help_text,
'ch': _ch_help_text
},
)
def _CalculateUrlsStartArg(self):
if not self.args:
self.RaiseWrongNumberOfArgumentsException()
if (self.args[0].lower() == 'set') or (self.command_alias_used == 'setacl'):
return 1
else:
return 0
def _SetAcl(self):
"""Parses options and sets ACLs on the specified buckets/objects."""
self.continue_on_error = False
if self.sub_opts:
for o, unused_a in self.sub_opts:
if o == '-a':
self.all_versions = True
elif o == '-f':
self.continue_on_error = True
elif o == '-r' or o == '-R':
self.recursion_requested = True
else:
self.RaiseInvalidArgumentException()
try:
self.SetAclCommandHelper(SetAclFuncWrapper, SetAclExceptionHandler)
except AccessDeniedException as unused_e:
self._WarnServiceAccounts()
raise
if not self.everything_set_okay:
raise CommandException('ACLs for some objects could not be set.')
def _ChAcl(self):
"""Parses options and changes ACLs on the specified buckets/objects."""
self.parse_versions = True
self.changes = []
self.continue_on_error = False
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-f':
self.continue_on_error = True
elif o == '-g':
if 'gserviceaccount.com' in a:
raise CommandException(
'Service accounts are considered users, not groups; please use '
'"gsutil acl ch -u" instead of "gsutil acl ch -g"')
self.changes.append(
acl_helper.AclChange(a, scope_type=acl_helper.ChangeType.GROUP))
elif o == '-p':
self.changes.append(
acl_helper.AclChange(a, scope_type=acl_helper.ChangeType.PROJECT))
elif o == '-u':
self.changes.append(
acl_helper.AclChange(a, scope_type=acl_helper.ChangeType.USER))
elif o == '-d':
self.changes.append(acl_helper.AclDel(a))
elif o == '-r' or o == '-R':
self.recursion_requested = True
else:
self.RaiseInvalidArgumentException()
if not self.changes:
raise CommandException('Please specify at least one access change '
'with the -g, -u, or -d flags')
if (not UrlsAreForSingleProvider(self.args) or
StorageUrlFromString(self.args[0]).scheme != 'gs'):
raise CommandException(
'The "{0}" command can only be used with gs:// URLs'.format(
self.command_name))
self.everything_set_okay = True
self.ApplyAclFunc(_ApplyAclChangesWrapper,
_ApplyExceptionHandler,
self.args,
object_fields=['acl', 'generation', 'metageneration'])
if not self.everything_set_okay:
raise CommandException('ACLs for some objects could not be set.')
def _RaiseForAccessDenied(self, url):
self._WarnServiceAccounts()
raise CommandException('Failed to set acl for %s. Please ensure you have '
'OWNER-role access to this resource.' % url)
@Retry(ServiceException, tries=3, timeout_secs=1)
def ApplyAclChanges(self, name_expansion_result, thread_state=None):
"""Applies the changes in self.changes to the provided URL.
Args:
name_expansion_result: NameExpansionResult describing the target object.
thread_state: If present, gsutil Cloud API instance to apply the changes.
"""
if thread_state:
gsutil_api = thread_state
else:
gsutil_api = self.gsutil_api
url = name_expansion_result.expanded_storage_url
if url.IsBucket():
bucket = gsutil_api.GetBucket(url.bucket_name,
provider=url.scheme,
fields=['acl', 'metageneration'])
current_acl = bucket.acl
elif url.IsObject():
gcs_object = encoding.JsonToMessage(apitools_messages.Object,
name_expansion_result.expanded_result)
current_acl = gcs_object.acl
if not current_acl:
self._RaiseForAccessDenied(url)
if self._ApplyAclChangesAndReturnChangeCount(url, current_acl) == 0:
self.logger.info('No changes to %s', url)
return
try:
if url.IsBucket():
preconditions = Preconditions(meta_gen_match=bucket.metageneration)
bucket_metadata = apitools_messages.Bucket(acl=current_acl)
gsutil_api.PatchBucket(url.bucket_name,
bucket_metadata,
preconditions=preconditions,
provider=url.scheme,
fields=['id'])
else: # Object
preconditions = Preconditions(gen_match=gcs_object.generation,
meta_gen_match=gcs_object.metageneration)
object_metadata = apitools_messages.Object(acl=current_acl)
try:
gsutil_api.PatchObjectMetadata(url.bucket_name,
url.object_name,
object_metadata,
preconditions=preconditions,
provider=url.scheme,
generation=url.generation,
fields=['id'])
except PreconditionException as e:
# Special retry case where we want to do an additional step, the read
# of the read-modify-write cycle, to fetch the correct object
# metadata before reattempting ACL changes.
self._RefetchObjectMetadataAndApplyAclChanges(url, gsutil_api)
self.logger.info('Updated ACL on %s', url)
except BadRequestException as e:
# Don't retry on bad requests, e.g. invalid email address.
raise CommandException('Received bad request from server: %s' % str(e))
except AccessDeniedException:
self._RaiseForAccessDenied(url)
except PreconditionException as e:
# For objects, retry attempts should have already been handled.
if url.IsObject():
raise CommandException(str(e))
# For buckets, raise PreconditionException and continue to next retry.
raise e
@Retry(PreconditionException, tries=3, timeout_secs=1)
def _RefetchObjectMetadataAndApplyAclChanges(self, url, gsutil_api):
"""Reattempts object ACL changes after a PreconditionException."""
gcs_object = gsutil_api.GetObjectMetadata(
url.bucket_name,
url.object_name,
provider=url.scheme,
fields=['acl', 'generation', 'metageneration'])
current_acl = gcs_object.acl
if self._ApplyAclChangesAndReturnChangeCount(url, current_acl) == 0:
self.logger.info('No changes to %s', url)
return
object_metadata = apitools_messages.Object(acl=current_acl)
preconditions = Preconditions(gen_match=gcs_object.generation,
meta_gen_match=gcs_object.metageneration)
gsutil_api.PatchObjectMetadata(url.bucket_name,
url.object_name,
object_metadata,
preconditions=preconditions,
provider=url.scheme,
generation=gcs_object.generation,
fields=['id'])
def _ApplyAclChangesAndReturnChangeCount(self, storage_url, acl_message):
modification_count = 0
for change in self.changes:
modification_count += change.Execute(storage_url, acl_message, 'acl',
self.logger)
return modification_count
def RunCommand(self):
"""Command entry point for the acl command."""
action_subcommand = self.args.pop(0)
self.ParseSubOpts(check_args=True)
# Commands with both suboptions and subcommands need to reparse for
# suboptions, so we log again.
metrics.LogCommandParams(sub_opts=self.sub_opts)
self.def_acl = False
if action_subcommand == 'get':
metrics.LogCommandParams(subcommands=[action_subcommand])
self.GetAndPrintAcl(self.args[0])
elif action_subcommand == 'set':
metrics.LogCommandParams(subcommands=[action_subcommand])
self._SetAcl()
elif action_subcommand in ('ch', 'change'):
metrics.LogCommandParams(subcommands=[action_subcommand])
self._ChAcl()
else:
raise CommandException(
('Invalid subcommand "%s" for the %s command.\n'
'See "gsutil help acl".') % (action_subcommand, self.command_name))
return 0
|
|
from django.contrib.auth.models import Permission
from django.test import TestCase, override_settings
from django.urls import reverse
from django.utils.text import capfirst
from wagtail.admin.edit_handlers import FieldPanel, ObjectList, TabbedInterface
from wagtail.contrib.settings.registry import SettingMenuItem
from wagtail.contrib.settings.views import get_setting_edit_handler
from wagtail.core import hooks
from wagtail.core.models import Page, Site
from wagtail.tests.testapp.models import (
FileUploadSetting, IconSetting, PanelSettings, TabbedSettings, TestSetting)
from wagtail.tests.utils import WagtailTestUtils
class TestSettingMenu(TestCase, WagtailTestUtils):
def login_only_admin(self):
""" Log in with a user that only has permission to access the admin """
user = self.create_user(
username='test', password='password')
user.user_permissions.add(Permission.objects.get_by_natural_key(
codename='access_admin', app_label='wagtailadmin', model='admin'))
self.login(username='test', password='password')
return user
def test_menu_item_in_admin(self):
self.login()
response = self.client.get(reverse('wagtailadmin_home'))
self.assertContains(response, capfirst(TestSetting._meta.verbose_name))
self.assertContains(response, reverse('wagtailsettings:edit', args=('tests', 'testsetting')))
def test_menu_item_no_permissions(self):
self.login_only_admin()
response = self.client.get(reverse('wagtailadmin_home'))
self.assertNotContains(response, TestSetting._meta.verbose_name)
self.assertNotContains(response, reverse('wagtailsettings:edit', args=('tests', 'testsetting')))
def test_menu_item_icon(self):
menu_item = SettingMenuItem(IconSetting, icon='tag', classnames='test-class')
self.assertEqual(menu_item.icon_name, 'tag')
self.assertEqual(menu_item.classnames, 'test-class')
def test_menu_item_icon_fontawesome(self):
menu_item = SettingMenuItem(IconSetting, icon='fa-suitcase', classnames='test-class')
self.assertEqual(menu_item.icon_name, '')
self.assertEqual(set(menu_item.classnames.split(' ')), {'icon', 'icon-fa-suitcase', 'test-class'})
class BaseTestSettingView(TestCase, WagtailTestUtils):
def get(self, site_pk=1, params={}, setting=TestSetting):
url = self.edit_url(setting=setting, site_pk=site_pk)
return self.client.get(url, params)
def post(self, site_pk=1, post_data={}, setting=TestSetting):
url = self.edit_url(setting=setting, site_pk=site_pk)
return self.client.post(url, post_data)
def edit_url(self, setting, site_pk=1):
args = [setting._meta.app_label, setting._meta.model_name, site_pk]
return reverse('wagtailsettings:edit', args=args)
class TestSettingCreateView(BaseTestSettingView):
def setUp(self):
self.login()
def test_get_edit(self):
response = self.get()
self.assertEqual(response.status_code, 200)
# there should be a menu item highlighted as active
self.assertContains(response, "menu-active")
def test_edit_invalid(self):
response = self.post(post_data={'foo': 'bar'})
self.assertContains(response, "The setting could not be saved due to errors.")
self.assertContains(response, """<p class="error-message"><span>This field is required.</span></p>""",
count=2, html=True)
self.assertContains(response, "This field is required", count=2)
def test_edit(self):
response = self.post(post_data={'title': 'Edited site title',
'email': 'test@example.com'})
self.assertEqual(response.status_code, 302)
default_site = Site.objects.get(is_default_site=True)
setting = TestSetting.objects.get(site=default_site)
self.assertEqual(setting.title, 'Edited site title')
self.assertEqual(setting.email, 'test@example.com')
def test_file_upload_multipart(self):
response = self.get(setting=FileUploadSetting)
# Ensure the form supports file uploads
self.assertContains(response, 'enctype="multipart/form-data"')
class TestSettingEditView(BaseTestSettingView):
def setUp(self):
default_site = Site.objects.get(is_default_site=True)
self.test_setting = TestSetting()
self.test_setting.title = 'Site title'
self.test_setting.email = 'initial@example.com'
self.test_setting.site = default_site
self.test_setting.save()
self.login()
def test_get_edit(self):
response = self.get()
self.assertEqual(response.status_code, 200)
# there should be a menu item highlighted as active
self.assertContains(response, "menu-active")
def test_non_existant_model(self):
response = self.client.get(reverse('wagtailsettings:edit', args=['test', 'foo', 1]))
self.assertEqual(response.status_code, 404)
def test_edit_invalid(self):
response = self.post(post_data={'foo': 'bar'})
self.assertContains(response, "The setting could not be saved due to errors.")
self.assertContains(response, """<p class="error-message"><span>This field is required.</span></p>""",
count=2, html=True)
self.assertContains(response, "This field is required", count=2)
def test_edit(self):
response = self.post(post_data={'title': 'Edited site title',
'email': 'test@example.com'})
self.assertEqual(response.status_code, 302)
default_site = Site.objects.get(is_default_site=True)
setting = TestSetting.objects.get(site=default_site)
self.assertEqual(setting.title, 'Edited site title')
self.assertEqual(setting.email, 'test@example.com')
def test_get_edit_current_site(self):
url = reverse('wagtailsettings:edit', args=('tests', 'testsetting'))
default_site = Site.objects.get(is_default_site=True)
response = self.client.get(url)
self.assertRedirects(response, status_code=302, expected_url='%s%s/' % (url, default_site.pk))
def test_get_edit_current_site_invalid(self):
Site.objects.all().delete()
url = reverse('wagtailsettings:edit', args=('tests', 'testsetting'))
response = self.client.get(url)
self.assertRedirects(response, status_code=302, expected_url='/admin/')
@override_settings(ALLOWED_HOSTS=['testserver', 'example.com', 'noneoftheabove.example.com'])
class TestMultiSite(BaseTestSettingView):
def setUp(self):
self.default_site = Site.objects.get(is_default_site=True)
self.other_site = Site.objects.create(hostname='example.com', root_page=Page.objects.get(pk=2))
self.login()
def test_redirect_to_default(self):
"""
Should redirect to the setting for the default site.
"""
start_url = reverse('wagtailsettings:edit', args=[
'tests', 'testsetting'])
dest_url = reverse('wagtailsettings:edit', args=[
'tests', 'testsetting', self.default_site.pk])
response = self.client.get(start_url, follow=True)
self.assertRedirects(response, dest_url, status_code=302, fetch_redirect_response=False)
def test_redirect_to_current(self):
"""
Should redirect to the setting for the current site taken from the URL,
by default
"""
start_url = reverse('wagtailsettings:edit', args=[
'tests', 'testsetting'])
dest_url = reverse('wagtailsettings:edit', args=[
'tests', 'testsetting', self.other_site.pk])
response = self.client.get(start_url, follow=True, HTTP_HOST=self.other_site.hostname)
self.assertRedirects(response, dest_url, status_code=302, fetch_redirect_response=False)
def test_with_no_current_site(self):
"""
Redirection should not break if the current request does not correspond to a site
"""
self.default_site.is_default_site = False
self.default_site.save()
start_url = reverse('wagtailsettings:edit', args=[
'tests', 'testsetting'])
response = self.client.get(start_url, follow=True, HTTP_HOST="noneoftheabove.example.com")
self.assertEqual(302, response.redirect_chain[0][1])
def test_switcher(self):
""" Check that the switcher form exists in the page """
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'id="settings-site-switch"')
def test_unknown_site(self):
""" Check that unknown sites throw a 404 """
response = self.get(site_pk=3)
self.assertEqual(response.status_code, 404)
def test_edit(self):
"""
Check that editing settings in multi-site mode edits the correct
setting, and leaves the other ones alone
"""
TestSetting.objects.create(
title='default',
email='default@example.com',
site=self.default_site)
TestSetting.objects.create(
title='other',
email='other@example.com',
site=self.other_site)
response = self.post(site_pk=self.other_site.pk, post_data={
'title': 'other-new', 'email': 'other-other@example.com'})
self.assertEqual(response.status_code, 302)
# Check that the correct setting was updated
other_setting = TestSetting.for_site(self.other_site)
self.assertEqual(other_setting.title, 'other-new')
self.assertEqual(other_setting.email, 'other-other@example.com')
# Check that the other setting was not updated
default_setting = TestSetting.for_site(self.default_site)
self.assertEqual(default_setting.title, 'default')
self.assertEqual(default_setting.email, 'default@example.com')
class TestAdminPermission(TestCase, WagtailTestUtils):
def test_registered_permission(self):
permission = Permission.objects.get_by_natural_key(
app_label='tests', model='testsetting', codename='change_testsetting')
for fn in hooks.get_hooks('register_permissions'):
if permission in fn():
break
else:
self.fail('Change permission for tests.TestSetting not registered')
class TestEditHandlers(TestCase):
def setUp(self):
get_setting_edit_handler.cache_clear()
def test_default_model_introspection(self):
handler = get_setting_edit_handler(TestSetting)
self.assertIsInstance(handler, ObjectList)
self.assertEqual(len(handler.children), 2)
first = handler.children[0]
self.assertIsInstance(first, FieldPanel)
self.assertEqual(first.field_name, 'title')
second = handler.children[1]
self.assertIsInstance(second, FieldPanel)
self.assertEqual(second.field_name, 'email')
def test_with_custom_panels(self):
handler = get_setting_edit_handler(PanelSettings)
self.assertIsInstance(handler, ObjectList)
self.assertEqual(len(handler.children), 1)
first = handler.children[0]
self.assertIsInstance(first, FieldPanel)
self.assertEqual(first.field_name, 'title')
def test_with_custom_edit_handler(self):
handler = get_setting_edit_handler(TabbedSettings)
self.assertIsInstance(handler, TabbedInterface)
self.assertEqual(len(handler.children), 2)
|
|
__author__ = ["Nurendra Choudhary <nurendrachoudhary31@gmail.com>", "Anoop Kunchukuttan <anoop.kunchukuttan@gmail.com>"]
__license__ = "GPLv3"
# Indic NLP Library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Indic NLP Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indic NLP Library. If not, see <http://www.gnu.org/licenses/>.
#
## language codes
LC_TA='ta'
SCRIPT_RANGES={
'pa':[0x0a00,0x0a7f] ,
'gu':[0x0a80,0x0aff] ,
'or':[0x0b00,0x0b7f] ,
'ta':[0x0b80,0x0bff] ,
'te':[0x0c00,0x0c7f] ,
'kn':[0x0c80,0x0cff] ,
'ml':[0x0d00,0x0d7f] ,
'si':[0x0d80,0x0dff] ,
'hi':[0x0900,0x097f] ,
'mr':[0x0900,0x097f] ,
'kK':[0x0900,0x097f] ,
'sa':[0x0900,0x097f] ,
'ne':[0x0900,0x097f] ,
'sd':[0x0900,0x097f] ,
'bn':[0x0980,0x09ff] ,
'as':[0x0980,0x09ff] ,
}
URDU_RANGES=[
[0x0600,0x06ff],
[0x0750,0x077f],
[0xfb50,0xfdff],
[0xfe70,0xfeff],
]
COORDINATED_RANGE_START_INCLUSIVE=0
COORDINATED_RANGE_END_INCLUSIVE=0x6f
NUMERIC_OFFSET_START=0x66
NUMERIC_OFFSET_END=0x6f
HALANTA_OFFSET=0x4d
AUM_OFFSET=0x50
NUKTA_OFFSET=0x3c
RUPEE_SIGN=0x20b9
DANDA=0x0964
DOUBLE_DANDA=0x0965
#TODO: add missing fricatives and approximants
VELAR_RANGE=[0x15,0x19]
PALATAL_RANGE=[0x1a,0x1e]
RETROFLEX_RANGE=[0x1f,0x23]
DENTAL_RANGE=[0x24,0x29]
LABIAL_RANGE=[0x2a,0x2e]
# verify
VOICED_LIST=[0x17,0x18,0x1c,0x1d,0x21,0x22,0x26,0x27,0x2c,0x2d]
UNVOICED_LIST=[0x15,0x16,0x1a,0x1b,0x1f,0x20,0x24,0x25,0x2a,0x2b] #TODO: add sibilants/sonorants
ASPIRATED_LIST=[0x16,0x18,0x1b,0x1d,0x20,0x22,0x25,0x27,0x2b,0x2d]
UNASPIRATED_LIST=[0x15,0x17,0x1a,0x1c,0x1f,0x21,0x24,0x26,0x2a,0x2c]
NASAL_LIST=[0x19,0x1e,0x23,0x28,0x29,0x2d]
FRICATIVE_LIST=[0x36,0x37,0x38]
APPROXIMANT_LIST=[0x2f,0x30,0x31,0x32,0x33,0x34,0x35]
#TODO: ha has to be properly categorized
def get_offset(c,lang):
"""
Applicable to Brahmi derived Indic scripts
"""
return ord(c)-SCRIPT_RANGES[lang][0]
def offset_to_char(c,lang):
"""
Applicable to Brahmi derived Indic scripts
"""
return chr(c+SCRIPT_RANGES[lang][0])
def in_coordinated_range(c_offset):
"""
Applicable to Brahmi derived Indic scripts
"""
return (c_offset>=COORDINATED_RANGE_START_INCLUSIVE and c_offset<=COORDINATED_RANGE_END_INCLUSIVE)
def is_indiclang_char(c,lang):
"""
Applicable to Brahmi derived Indic scripts
"""
o=get_offset(c,lang)
return (o>=0 and o<=0x7f) or ord(c)==DANDA or ord(c)==DOUBLE_DANDA
def is_vowel(c,lang):
"""
Is the character a vowel
"""
o=get_offset(c,lang)
return (o>=0x04 and o<=0x14)
def is_vowel_sign(c,lang):
"""
Is the character a vowel sign (maatraa)
"""
o=get_offset(c,lang)
return (o>=0x3e and o<=0x4c)
def is_halanta(c,lang):
"""
Is the character the halanta character
"""
o=get_offset(c,lang)
return (o==HALANTA_OFFSET)
def is_nukta(c,lang):
"""
Is the character the halanta character
"""
o=get_offset(c,lang)
return (o==NUKTA_OFFSET)
def is_aum(c,lang):
"""
Is the character a vowel sign (maatraa)
"""
o=get_offset(c,lang)
return (o==AUM_OFFSET)
def is_consonant(c,lang):
"""
Is the character a consonant
"""
o=get_offset(c,lang)
return (o>=0x15 and o<=0x39)
def is_velar(c,lang):
"""
Is the character a velar
"""
o=get_offset(c,lang)
return (o>=VELAR_RANGE[0] and o<=VELAR_RANGE[1])
def is_palatal(c,lang):
"""
Is the character a palatal
"""
o=get_offset(c,lang)
return (o>=PALATAL_RANGE[0] and o<=PALATAL_RANGE[1])
def is_retroflex(c,lang):
"""
Is the character a retroflex
"""
o=get_offset(c,lang)
return (o>=RETROFLEX_RANGE[0] and o<=RETROFLEX_RANGE[1])
def is_dental(c,lang):
"""
Is the character a dental
"""
o=get_offset(c,lang)
return (o>=DENTAL_RANGE[0] and o<=DENTAL_RANGE[1])
def is_labial(c,lang):
"""
Is the character a labial
"""
o=get_offset(c,lang)
return (o>=LABIAL_RANGE[0] and o<=LABIAL_RANGE[1])
def is_voiced(c,lang):
"""
Is the character a voiced consonant
"""
o=get_offset(c,lang)
return o in VOICED_LIST
def is_unvoiced(c,lang):
"""
Is the character a unvoiced consonant
"""
o=get_offset(c,lang)
return o in UNVOICED_LIST
def is_aspirated(c,lang):
"""
Is the character a aspirated consonant
"""
o=get_offset(c,lang)
return o in ASPIRATED_LIST
def is_unaspirated(c,lang):
"""
Is the character a unaspirated consonant
"""
o=get_offset(c,lang)
return o in UNASPIRATED_LIST
def is_nasal(c,lang):
"""
Is the character a nasal consonant
"""
o=get_offset(c,lang)
return o in NASAL_LIST
def is_fricative(c,lang):
"""
Is the character a fricative consonant
"""
o=get_offset(c,lang)
return o in FRICATIVE_LIST
def is_approximant(c,lang):
"""
Is the character an approximant consonant
"""
o=get_offset(c,lang)
return o in APPROXIMANT_LIST
def is_number(c,lang):
"""
Is the character a number
"""
o=get_offset(c,lang)
return (o>=0x66 and o<=0x6f)
|
|
#!/usr/bin/env python
# standard library
import itertools
import random
# third party
import dendropy as dpy
import numpy as np
from tree_distance import PhyloTree
# treeCl
from .errors import optioncheck
from .constants import ISPY3
from .utils import fileIO, weighted_choice
from .utils.decorators import lazyprop
from .utils.math import truncated_exponential
import logging
logger = logging.getLogger(__name__)
def cast(dendropy_tree):
""" Cast dendropy.Tree instance as Tree instance """
return Tree(dendropy_tree.as_string('newick', suppress_rooting=True) + ';')
def _infinite_labels_generator(labels, start=2, shuffle=True):
l = len(labels)
loop1 = random.sample(labels, l) if shuffle else labels
return itertools.chain.from_iterable([loop1, ('{}{}'.format(x, y) for x, y in
zip(itertools.cycle(labels),
itertools.chain.from_iterable(
itertools.repeat(i, len(loop1)) for i
in
itertools.count(start, 1))))])
def edge_length_check(length, edge):
""" Raises error if length is not in interval [0, edge.length] """
try:
assert 0 <= length <= edge.length
except AssertionError:
if length < 0:
raise TreeError('Negative edge-lengths are disallowed')
raise TreeError(
'This edge isn\'t long enough to prune at length {0}\n'
'(Edge length = {1})'.format(length, edge.length))
def rootcheck(edge, msg='This is the root edge'):
""" Raises error if edge is the root edge (has no tail node) """
if not edge.tail_node:
raise TreeError(msg)
def logn_correlated_rate(parent_rate, branch_length, autocorrel_param, size=1):
"""
The log of the descendent rate, ln(Rd), is ~ N(mu, bl*ac), where
the variance = bl*ac = branch_length * autocorrel_param, and mu is set
so that E[Rd] = Rp:
E[X] where ln(X) ~ N(mu, sigma^2) = exp(mu+(1/2)*sigma_sq)
so Rp = exp(mu+(1/2)*bl*ac),
ln(Rp) = mu + (1/2)*bl*ac,
ln(Rp) - (1/2)*bl*ac = mu,
so ln(Rd) ~ N(ln(Rp) - (1/2)*bl*ac, bl*ac)
(NB: Var[Rd] = Rp^2 * (exp(bl*ac)-1),
Std[Rd] = Rp * sqrt(exp(bl*ac)-1)
See: H Kishino, J L Thorne, and W J Bruno (2001)
"""
if autocorrel_param <= 0:
raise Exception('Autocorrelation parameter must be greater than 0')
variance = branch_length * autocorrel_param
stdev = np.sqrt(variance)
ln_descendant_rate = np.random.normal(np.log(parent_rate) - 0.5 * variance,
scale=stdev, size=size)
descendant_rate = np.exp(ln_descendant_rate)
return float(descendant_rate) if size == 1 else descendant_rate
class TreeError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class SPR(object):
""" Subtree prune and regraft functionality """
def __init__(self, tree):
self.tree = tree
def _check_single_outgroup(self):
"""
If only one (or none) of the seed node children is not a leaf node
it is not possible to prune that edge and make a topology-changing
regraft.
"""
root_child_nodes = self.tree._tree.seed_node.child_nodes()
not_leaves = np.logical_not([n.is_leaf() for n in root_child_nodes])
if not_leaves[not_leaves].size <= 1:
return [root_child_nodes[np.where(not_leaves)[0]].edge]
return []
def prune(self, edge, length=None):
""" Prunes a subtree from the main Tree, retaining an edge length
specified by length (defaults to entire length). The length is sanity-
checked by edge_length_check, to ensure it is within the bounds
[0, edge.length].
Returns the basal node of the pruned subtree. """
length = length or edge.length
edge_length_check(length, edge)
n = edge.head_node
self.tree._tree.prune_subtree(n, suppress_unifurcations=False)
n.edge_length = length
self.tree._dirty = True
return n
def regraft(self, edge, node, length=None):
""" Grafts a node onto an edge of the Tree, at a point specified by
length (defaults to middle of edge). """
rootcheck(edge, 'SPR regraft is not allowed on the root edge')
length = length or edge.length / 2. # Length measured from head to tail
edge_length_check(length, edge)
t = edge.tail_node
h = edge.head_node
new = t.new_child(edge_length=edge.length - length)
t.remove_child(h)
new.add_child(h)
h.edge.length=length
new.add_child(node)
self.tree._dirty = True
self.tree._tree.encode_bipartitions(suppress_unifurcations=True)
def spr(self, prune_edge, length1, regraft_edge, length2):
assert (regraft_edge.head_node
not in prune_edge.head_node.preorder_iter())
node = self.prune(prune_edge, length1)
self.regraft(regraft_edge, node, length2)
self.tree._dirty = True
def rspr(self, disallow_sibling_sprs=False,
keep_entire_edge=False, rescale=False):
""" Random SPR, with prune and regraft edges chosen randomly, and
lengths drawn uniformly from the available edge lengths.
N1: disallow_sibling_sprs prevents sprs that don't alter the topology
of the tree """
starting_length = self.tree._tree.length()
excl = [self.tree._tree.seed_node.edge] # exclude r
if disallow_sibling_sprs:
excl.extend(self._check_single_outgroup())
prune_edge, l1 = self.tree.map_event_onto_tree(excl)
if keep_entire_edge:
l1 = prune_edge.length
prune_edge_child_nodes = prune_edge.head_node.preorder_iter()
excl.extend([node.edge for node in prune_edge_child_nodes])
if disallow_sibling_sprs:
sibs = [node.edge for node in prune_edge.head_node.sister_nodes()]
par = prune_edge.tail_node.edge
sibs.append(par)
for edge in sibs:
if edge not in excl:
excl.append(edge)
if set(self.tree._tree.preorder_edge_iter()) - set(excl) == set([]):
print(repr(self.tree))
print(self.tree._tree.as_ascii_plot())
# print(edges[prune_edge])
raise Exception('No non-sibling sprs available')
regraft_edge, l2 = self.tree.map_event_onto_tree(excl)
# edges, nodes, redges, rnodes = self.tree._name_things()
# print(edges[prune_edge], l1, edges[regraft_edge], l2)
self.spr(prune_edge, l1, regraft_edge, l2)
if rescale:
self.tree.scale(starting_length / self.tree.length())
self.tree._dirty = True
class LGT(object):
def __init__(self, tree):
self.SPR = SPR(tree)
self.tree = self.SPR.tree
try:
self.tree._tree.calc_node_ages()
except:
raise Exception('Tree is not ultrametric')
def get_time(self, *args):
e, l = self.tree.map_event_onto_tree(*args)
time = l + e.head_node.age
return time
def matching_edges(self, time):
def edge_matches_time(edge):
if edge.tail_node is None:
return False
return edge.head_node.age < time < edge.tail_node.age
matching_edges = self.tree._tree.preorder_edge_iter(edge_matches_time)
return list(matching_edges)
def rlgt(self, time=None, disallow_sibling_lgts=False):
self.tree._tree.calc_node_ages()
excl = [self.tree._tree.seed_node.edge]
if time is None:
if disallow_sibling_lgts:
self.add_single_node()
children = self.tree._tree.seed_node.child_nodes()
excl.extend([n.edge for n in children])
time = self.get_time(excl)
print('time = {0}'.format(time))
self.tree._tree.encode_bipartitions()
self.tree._dirty = True
else:
time = self.get_time(excl)
print('time = {0}'.format(time))
matching_edges = self.matching_edges(time)
donor = random.sample(matching_edges, 1)[0]
matching_edges.remove(donor)
if disallow_sibling_lgts:
sibs = donor.head_node.sister_nodes()
for sib in sibs:
if sib.edge in matching_edges:
matching_edges.remove(sib.edge)
receiver = random.sample(matching_edges, 1)[0]
l1 = time - receiver.head_node.age
l2 = time - donor.head_node.age
self.SPR.spr(receiver, l1, donor, l2)
self.tree._tree.calc_node_ages()
def add_single_node(self):
cn = self.tree._tree.seed_node.child_nodes()
el = lambda n: n.edge_length
sh = min(cn, key=el)
lo = max(cn, key=el)
new = self.tree._tree.seed_node.new_child(edge_length=sh.edge_length)
self.tree._tree.prune_subtree(lo, suppress_unifurcations=False)
lo.edge_length -= sh.edge_length
new.add_child(lo)
self.tree._tree.encode_bipartitions(suppress_unifurcations=False)
self.tree._tree.calc_node_ages()
self.tree._dirty = True
class NNI(object):
def __init__(self, tree):
self.tree = tree
def _validate(self):
excludes = [self.tree._tree.seed_node] + self.tree._tree.leaf_nodes()
if self.tree.rooted:
child_a, child_b = self.tree._tree.seed_node.child_nodes()
if child_a in excludes:
excludes.append(child_b)
if child_b in excludes:
excludes.append(child_a)
self.valid_nodes = set([n for n in self.tree._tree.nodes() if not n in excludes])
def choose_node(self, use_weighted_choice=False, transform=None):
self._validate()
if use_weighted_choice:
weights = np.array([n.edge.length for n in self.valid_nodes])
if any(weight is None for weight in weights):
logger.debug('Not all weights were valid: {}'.format(weights))
weights = np.array([1.0 for n in self.valid_nodes])
logger.debug('Weights (weighted choice=True): {}'.format(weights))
if transform is not None:
weights = transform(weights)
logger.debug('Weights (transform=not None): {}'.format(weights))
else:
weights = np.array([1.0 for n in self.valid_nodes])
logger.debug('Weights (weighted choice=False): {}'.format(weights))
return weighted_choice(list(zip(self.valid_nodes, weights)))
def get_exchangeable_nodes(self, n):
"""
A C | Subtrees A, B, C and D are the exchangeable nodes
\ / | around the edge headed by n
-->n | The NNI exchanges either A or B with either C or D
/ \
B D
A C C A | Subtree A is exchanged
\ / +NNI(A,C) \ / | with subtree C.
-->n ==========> -->n
/ \ / \
B D B D
"""
parent = n.parent_node
a, b = random.sample(n.child_nodes(), 2)
if parent.parent_node is None:
if self.tree.rooted:
c, d = random.sample(n.sister_nodes()[0].child_nodes(), 2)
else:
c, d = random.sample(n.sister_nodes(), 2)
else:
c = random.choice(n.sister_nodes())
d = random.choice(parent.sister_nodes())
return a, b, c, d
def do_nni(self, node_1, node_2):
parent_1 = node_1.parent_node
parent_2 = node_2.parent_node
parent_1.remove_child(node_1)
parent_2.remove_child(node_2)
parent_1.add_child(node_2)
parent_2.add_child(node_1)
self.tree._tree.encode_bipartitions()
def rnni(self, use_weighted_choice=False, transform=None):
n = self.choose_node(use_weighted_choice, transform)
a, b, c, d = self.get_exchangeable_nodes(n)
self.do_nni(random.choice([a, b]), random.choice([c, d]))
def collapse(t, threshold=None, keep_lengths=True, support_key=None, length_threshold=0.0):
to_collapse = []
for node in t._tree.postorder_node_iter():
if node.is_leaf():
if node.edge_length < length_threshold:
node.edge_length = 0
continue
if node is t.seed_node:
continue
if threshold is not None:
try:
if support_key:
support = float(node.annotations.get_value(support_key))
node.label = support
else:
support = float(node.label)
except TypeError as e:
raise SupportValueError('Inner node with length {} has no support value'.format(node.edge_length), e)
except ValueError as e:
raise SupportValueError(
'Inner node with length {} has a non-numeric support value {}'.format(node.edge_length), e)
if support < threshold:
to_collapse.append(node.edge)
if node.edge_length < length_threshold:
to_collapse.append(node.edge)
for edge in to_collapse:
if keep_lengths:
for child in edge.head_node.child_nodes():
child.edge.length += edge.length
edge.collapse()
return t
class UltrametricNNI(NNI):
def __init__(self, tree):
super(UltrametricNNI, self).__init__(tree)
def _make_ultrametric(self):
leaves = list(self.tree._tree.leaf_node_iter())
root_tip_dists = [leaf.distance_from_root() for leaf in leaves]
mean_tip_dist = np.mean(root_tip_dists)
for dist, leaf in zip(root_tip_dists, leaves):
leaf.edge.length += (mean_tip_dist - dist)
def _validate(self):
super(UltrametricNNI, self)._validate()
self._make_ultrametric()
self.tree._tree.calc_node_ages()
def do_nni(self, node1, node2, node3, node4):
pass
class ILS(object):
def __init__(self, tree):
self.minlen=0
self.tree = tree
self._validate()
def _make_ultrametric(self):
leaves = list(self.tree._tree.leaf_node_iter())
root_tip_dists = [leaf.distance_from_root() for leaf in leaves]
mean_tip_dist = np.mean(root_tip_dists)
for dist, leaf in zip(root_tip_dists, leaves):
leaf.edge.length += (mean_tip_dist - dist)
def _break_ties(self):
collapse(self.tree, keep_lengths=True, length_threshold=self.minlen)
self.tree._tree.resolve_polytomies()
def _validate(self):
for edge in self.tree._tree.preorder_edge_iter():
if np.isnan(edge.length) or edge.length <= self.minlen:
edge.length = self.minlen
self._break_ties()
self._make_ultrametric()
self.tree._tree.calc_node_ages()
excludes = [self.tree._tree.seed_node] + self.tree._tree.seed_node.child_nodes() + self.tree._tree.leaf_nodes()
self.valid_nodes = self.tree._tree.nodes(filter_fn=lambda x: not x in excludes)
def choose_node(self, use_weighted_choice=False, transform=None):
self._validate()
if use_weighted_choice:
weights = np.array([n.edge.length for n in self.valid_nodes])
if any(weight is None for weight in weights):
logger.debug('Not all weights were valid: {}'.format(weights))
weights = np.array([1.0 for n in self.valid_nodes])
logger.debug('Weights (weighted choice=True): {}'.format(weights))
if transform is not None:
weights = transform(weights)
logger.debug('Weights (transform=not None): {}'.format(weights))
else:
weights = np.array([1.0 for n in self.valid_nodes])
logger.debug('Weights (weighted choice=False): {}'.format(weights))
return weighted_choice(list(zip(self.valid_nodes, weights)))
def get_matching_edge(self, starting_node, time):
def edge_matches_time(edge):
if edge.tail_node is None:
return False
return edge.head_node.age < time < edge.tail_node.age
if time > starting_node.parent_node.age:
for node in starting_node.parent_node.ancestor_iter():
if edge_matches_time(node.edge):
return node.edge
else:
sister = starting_node.sister_nodes()[0].edge
if edge_matches_time(sister):
return sister
else:
raise ValueError('No matching edge was found')
def ils(self, node, sorting_times=None, force_topology_change=True):
"""
A constrained and approximation of ILS using nearest-neighbour interchange
Process
-------
A node with at least three descendents is selected from an ultrametric tree
(node '2', below)
---0--... ---0--... ---0--...
| | | | --1-- |
| R --1-- R | | R
age | | | -2- |
^ | | | | | |
| --1-- -2- | | | |
| | | or | | | or | | |
| | | | | | | | |
| -2- | | | | | | |
| | | | | | | | | |
| A B C C B A A C B
Nodes 'A', 'B' and 'C' are rearranged into one of the three configurations
[(A, B), C], [A, (B, C)], [(A, C), B]
Nodes 1 and 2 are slid further up the tree, but no further than node 0
(this is why it's a constrained version), by an amount drawn from a
truncated exponential distribution.
This is approximately corresponds to the case where A and B failed to
coalesce in the branch 1->2, so they coalesce with C in the branch
0 -> 1 instead
"""
# node = '2', par = '1', gpar = '0' -- in above diagram
n_2 = node
n_1 = n_2.parent_node
if n_1 == self.tree._tree.seed_node:
logger.warn('Node 1 is the root - calling again on child')
self.ils(n_2.child_nodes())
n_0 = n_1.parent_node
a, b = node.child_nodes()
c, = node.sister_nodes()
ages = [a.age, b.age, c.age, n_2.age, n_1.age, n_0.age]
# Do topology changes
if force_topology_change:
swap_mode = random.choice([1, 2])
else:
swap_mode = random.choice([0, 1, 2])
if swap_mode == 1:
# Exchange 'a' and 'c'
n_2.remove_child(a)
n_1.remove_child(c)
n_2.add_child(c)
n_1.add_child(a)
elif swap_mode == 2:
# Exchange 'b' and 'c'
n_2.remove_child(b)
n_1.remove_child(c)
n_2.add_child(c)
n_1.add_child(b)
# Do branch length adjustments
# Bounds - between node 0 (upper) and node 1 (lower)
min_unsorted_age = n_1.age
max_unsorted_age = n_0.age
if sorting_times is None:
sorting_times = truncated_exponential(max_unsorted_age-min_unsorted_age,
scale=0.1*(max_unsorted_age-min_unsorted_age),
sample_size=2) # E(t) = n(n-1)/2, n = 3
sorting_times += min_unsorted_age
sorting_times = np.array([min_unsorted_age, ages[3]])
# Adjust node 1 edge length
new_n1_age = max(sorting_times)
prev_age = ages[4]
slide = (new_n1_age - prev_age)
if slide < 1e-6:
slide = 0
new_n1_age = prev_age
n_1.edge.length -= slide
n_2.edge.length += slide
# Adjust node 2 edge length
new_n2_age = min(sorting_times)
prev_age = ages[3]
slide = (new_n2_age - prev_age)
if slide < 1e-6:
slide = 0
new_n2_age = prev_age
n_2.edge.length -= slide
# Adjust a, b and c edge lengths
if swap_mode == 0:
a.edge.length = (new_n2_age - ages[0])
b.edge.length = (new_n2_age - ages[1])
c.edge.length = (new_n1_age - ages[2])
elif swap_mode == 1:
a.edge.length = (new_n1_age - ages[0])
b.edge.length = (new_n2_age - ages[1])
c.edge.length = (new_n2_age - ages[2])
else:
a.edge.length = (new_n2_age - ages[0])
b.edge.length = (new_n1_age - ages[1])
c.edge.length = (new_n2_age - ages[2])
# used to be .reindex_taxa() before dendropy 4.
# migrate_taxon_namespace is recommended migrated function,
# but not sure if its even needed anymore.
self.tree._tree.migrate_taxon_namespace(self.tree._tree.taxon_namespace)
self.tree._tree.encode_bipartitions()
self._validate()
logger.debug(self.tree)
def rils(self, use_weighted_choice=True, transform=None):
n = self.choose_node(use_weighted_choice, transform)
logger.debug('Chosen node = {} age = {} parent age = {}'.format([leaf.taxon.label for leaf in n.leaf_nodes()], n.age, n.parent_node.age))
self.ils(n)
# def ils_(self, node, sorting_times=None):
# unsorted_descendants = node.child_nodes()
# logger.info('Child 1 = {} age = {}'.format([leaf.taxon.label for leaf in unsorted_descendants[0].leaf_nodes()], unsorted_descendants[0].age))
# logger.info('Child 1 = {} age = {}'.format([leaf.taxon.label for leaf in unsorted_descendants[1].leaf_nodes()], unsorted_descendants[1].age))
# min_unsorted_age = max(node.age, node.sister_nodes()[0].age)
# logger.debug('Node age = {}, sister age = {}'.format(node.age, node.sister_nodes()[0].age))
# max_unsorted_age = self.tree.seed_node.age
# if sorting_times is None:
# sorting_times = truncated_exponential(max_unsorted_age-min_unsorted_age,
# scale=0.5*(max_unsorted_age-min_unsorted_age),
# sample_size=2) # E(t) = n(n-1)/2, n = 2
# sorting_times += min_unsorted_age
# if np.any(sorting_times > max_unsorted_age): logger.error('Sorting times too large: {}'.format(sorting_times))
# logger.info('Min/Max ages = {} {}'.format(min_unsorted_age, max_unsorted_age))
# logger.info('Sorting occurs = {} {}'.format(*sorting_times))
# random.shuffle(unsorted_descendants)
# c1, c2 = unsorted_descendants
# time1 = max(sorting_times)
# time2 = min(sorting_times)
# donor1 = self.get_matching_edge(node, time1)
# donor2 = self.get_matching_edge(node, time2)
# logger.info('Stage 0 - initial tree')
# logger.info('Stage 1 - remove c1')
# self.tree.print_plot(plot_metric='length')
# node.remove_child(c1)
# logger.info('Stage 2 - remove c2')
# self.tree.print_plot(plot_metric='length')
# node.remove_child(c2)
# c1.edge.length = time1 - c1.age
# c2.edge.length = time2 - c2.age
# logger.info('Stage 3 - regraft c1 at time={}'.format(time1))
# self.tree.print_plot(plot_metric='length')
# self.SPR.regraft(donor1, c1, time1 - donor1.head_node.age)
# logger.info('Stage 4 - regraft c2 at time={}'.format(time2))
# self.tree.print_plot(plot_metric='length')
# self.SPR.regraft(donor2, c2, time2 - donor2.head_node.age)
# self.tree.print_plot(plot_metric='length')
# self.tree.prune_subtree(node, suppress_unifurcations=True)
# logger.info('Stage Final')
# self.tree.print_plot(plot_metric='length')
# self.tree.encode_bipartitions()
# self.tree.reindex_taxa()
# self.tree.calc_node_ages()
# self._validate()
class NNI2(object):
def __init__(self, tree):
self.tree = tree
if tree.rooted:
self.reroot = True
self.rooting_info = self.tree.reversible_deroot()
else:
self.reroot = False
self.rooting_info = None
def get_children(self, inner_edge):
""" Given an edge in the tree, returns the child nodes of the head and
the tail nodes of the edge, for instance:
A C | A, B, C and D are the children of the edge --->,
\ / | C and D are the head node children, and A and B
t--->h | are the tail node children.
/ \
B D | Output: {'head': [<C>, <D>], 'tail': [<A>, <B>]}
N1: Edges are directional in dendropy trees. The head node of an
edge is automatically a child of the tail node, but we don't want this.
"""
h = inner_edge.head_node
t = inner_edge.tail_node
if not self.tree._tree.seed_node == t:
original_seed = self.tree._tree.seed_node
self.tree._tree.reseed_at(t)
else:
original_seed = None
head_children = h.child_nodes()
tail_children = list(set(t.child_nodes()) - {h}) # See N1
if original_seed:
self.tree._tree.reseed_at(original_seed)
return {'head': head_children, 'tail': tail_children}
def nni(
self,
edge,
head_subtree,
tail_subtree,
):
""" *Inplace* Nearest-neighbour interchange (NNI) operation.
An edge in the tree has two or more subtrees at each end (ends are
designated 'head' and 'tail'). The NNI operation exchanges one of the
head subtrees for one of the tail subtrees, as follows:
A C C A | Subtree A is exchanged
\ / +NNI(A,C) \ / | with subtree C.
---> ==========> ---> |
/ \ / \ |
B D B D
"""
# This implementation works on unrooted Trees. If the input Tree is
# rooted, the ReversibleDeroot decorator will temporarily unroot the
# tree while the NNI is carried out
original_seed = self.tree._tree.seed_node
head = edge.head_node
tail = edge.tail_node
self.tree._tree.reseed_at(tail)
try:
assert head_subtree.parent_node == head
assert tail_subtree.parent_node == tail
except:
print(head, tail, head_subtree, tail_subtree)
raise
head.remove_child(head_subtree)
tail.remove_child(tail_subtree)
head.add_child(tail_subtree)
tail.add_child(head_subtree)
self.tree._tree.reseed_at(original_seed)
self.tree._tree.encode_bipartitions()
self.tree._dirty = True
def reroot_tree(self):
if self.reroot and self.rooting_info is not None:
self.tree._tree.reroot_at_edge(*self.rooting_info)
self.tree._tree.encode_bipartitions()
self.tree._dirty = True
return self.tree
def rnni(self, use_weighted_choice=False, invert_weights=False):
"""
Apply a random NNI operation at a randomly selected edge
The edge can be chosen uniformly, or weighted by length --
invert_weights favours short edges.
"""
if use_weighted_choice:
leaves = list(self.tree._tree.leaf_edge_iter())
e, _ = self.tree.map_event_onto_tree(excluded_edges=leaves, invert_weights=invert_weights)
else:
e = random.choice(self.tree.get_inner_edges())
children = self.get_children(e)
h = random.choice(children['head'])
t = random.choice(children['tail'])
self.nni(e, h, t)
class Tree(object):
""" Tree data structure, wraps dendropy Tree class
"""
def __init__(
self,
newick=None,
name=None,
**kwargs
):
if newick:
self._tree = dpy.Tree.get_from_string(newick, 'newick', preserve_underscores=True, **kwargs)
if self.rooted:
self._tree.is_rooted = True
self._tree.encode_bipartitions()
else:
self._tree = dpy.Tree(**kwargs)
self.name = name
self._phylotree = None
self._dirty = False
def __repr__(self):
return '{0}{1}'.format(self.__class__.__name__,
(self.newick if self.newick else '(None)'))
def __str__(self):
""" Represents the object's information inside a newick comment, so is
still interpretable by a (good) newick parser """
s = 'Tree Object: {}\n'.format(self.name)
s += self.newick
return s
def __len__(self):
""" Number of leaves on the Tree. For total branch length use
self.length()"""
return len(self._tree.leaf_nodes())
def __and__(self, other):
""" Overloads & operator:
'self & other' is equivalent to 'self.intersection(other)''
"""
return self.intersection(other)
def __xor__(self, other):
return self.labels ^ other.labels
@property
def labels(self):
""" Returns the taxon set of the tree (same as the label- or
leaf-set) """
return set([n.taxon.label for n in self._tree.leaf_nodes()])
def sample_labels(self, n):
""" Returns a set of n labels sampled from the labels of the tree
:param n: Number of labels to sample
:return: set of randomly sampled labels
"""
if n >= len(self):
return self.labels
sample = random.sample(self.labels, n)
return set(sample)
@property
def newick(self):
"""
For more control the dendropy method self.as_string('newick', **kwargs)
can be used.
KWargs include:
suppress_internal_node_labels [True/False]
- turn on/off bootstrap labels
suppress_rooting [True/False]
- turn on/off [&U] or [&R] rooting
state labels
edge_label_compose_func
- function to convert edge lengths:
takes edge as arg, returns string
"""
n = self._tree.as_string('newick',
suppress_rooting=True,
suppress_internal_node_labels=True)
if n:
return n.strip(';\n') + ';'
return n
@property
def phylotree(self):
"""
Get the c++ PhyloTree object corresponding to this tree.
:return: PhyloTree instance
"""
if not self._phylotree or self._dirty:
try:
if ISPY3:
self._phylotree = PhyloTree(self.newick.encode(), self.rooted)
else:
self._phylotree = PhyloTree(self.newick, self.rooted)
except ValueError:
logger.error('Couldn\'t convert to C++ PhyloTree -- are there bootstrap values?')
self._dirty = False
return self._phylotree
@property
def seed_node(self):
return self._tree.seed_node
@newick.setter
def newick(self, newick_string):
if self.newick:
print('Newick string already loaded: {0}'.format(self.newick))
return
self._tree = dpy.Tree.get_from_string(newick_string, 'newick')
@property
def rooted(self):
""" Predicate testing for rootedness by checking for a bifurcation
at the root. """
return len(self._tree.seed_node.child_nodes()) == 2 if self.newick else None
@classmethod
def bifurcate_base(cls, newick):
""" Rewrites a newick string so that the base is a bifurcation
(rooted tree) """
t = cls(newick)
t._tree.resolve_polytomies()
return t.newick
@classmethod
def trifurcate_base(cls, newick):
""" Rewrites a newick string so that the base is a trifurcation
(usually means an unrooted tree) """
t = cls(newick)
t._tree.deroot()
return t.newick
def copy(self):
""" Returns an independent copy of self """
return self.__class__(self.newick)
def deroot(self):
""" Unroot the tree, inplace """
self._tree.deroot()
def get_inner_edges(self):
""" Returns a list of the internal edges of the tree. """
inner_edges = [e for e in self._tree.preorder_edge_iter() if e.is_internal()
and e.head_node and e.tail_node]
return inner_edges
def get_nonroot_edges(self):
return [e for e in self._tree.preorder_edge_iter()
if e.head_node and e.tail_node]
def intersection(self, other):
""" Returns the intersection of the taxon sets of two Trees """
taxa1 = self.labels
taxa2 = other.labels
return taxa1 & taxa2
def map_event_onto_tree(self, excluded_edges=None, invert_weights=False):
edge_list = list(self._tree.preorder_edge_iter())
if excluded_edges is not None:
if not isinstance(excluded_edges, list):
excluded_edges = [excluded_edges]
for excl in excluded_edges:
try:
edge_list.remove(excl)
except ValueError:
print('Excluded_edges list includes some things')
print('that aren\'t in the tree')
print('like this one:', excl)
lengths = np.array([edge.length for edge in edge_list])
if invert_weights:
lengths = 1/lengths
cumulative_lengths = lengths.cumsum()
rnum = np.random.random() * cumulative_lengths[-1]
index = cumulative_lengths.searchsorted(rnum)
chosen_edge = edge_list[index]
from_head_length = cumulative_lengths[index] - rnum
return chosen_edge, from_head_length
def multifurcate(self, threshold=1e-06, update_splits=True):
for edge in self._tree.postorder_edge_iter():
if edge.is_internal():
if edge.length <= threshold:
edge.collapse()
self._dirty = True
if update_splits:
self._tree.encode_bipartitions()
def ntaxa(self):
return len(self)
def pairdist(self, taxon_label1, taxon_label2):
if self.patristic is None:
print('Error calculating patristic distances - maybe this '
'tree has no branch lengths?')
return
leaf1 = self._tree.find_node_with_taxon_label(taxon_label1)
leaf2 = self._tree.find_node_with_taxon_label(taxon_label2)
if leaf1:
taxon1 = leaf1.taxon
else:
print('Couldn\'t find {0} on the tree'.format(taxon_label1))
return
if leaf2:
taxon2 = leaf2.taxon
else:
print('Couldn\'t find {0} on the tree'.format(taxon_label2))
return
return self.patristic(taxon1, taxon2)
@lazyprop
def patristic(self):
try:
pdm = dpy.calculate.treemeasure.PatristicDistanceMatrix(self._tree)
except TypeError:
pdm = None
return pdm
def postorder(self, skip_seed=False):
"""
Return a generator that yields the nodes of the tree in postorder.
If skip_seed=True then the root node is not included.
"""
for node in self._tree.postorder_node_iter():
if skip_seed and node is self._tree.seed_node:
continue
yield node
def preorder(self, skip_seed=False):
"""
Return a generator that yields the nodes of the tree in preorder.
If skip_seed=True then the root node is not included.
"""
for node in self._tree.preorder_node_iter():
if skip_seed and node is self._tree.seed_node:
continue
yield node
def prune_to_subset(self, subset, inplace=False):
""" Prunes the Tree to just the taxon set given in `subset` """
if not subset.issubset(self.labels):
print('"subset" is not a subset')
return
if not inplace:
t = self.copy()
else:
t = self
t._tree.retain_taxa_with_labels(subset)
t._tree.encode_bipartitions()
t._dirty = True
return t
def randomise_branch_lengths(
self,
i=(1, 1),
l=(1, 1),
distribution_func=random.gammavariate,
inplace=False,
):
""" Replaces branch lengths with values drawn from the specified
distribution_func. Parameters of the distribution are given in the
tuples i and l, for interior and leaf nodes respectively. """
if not inplace:
t = self.copy()
else:
t = self
for n in t._tree.preorder_node_iter():
if n.is_internal():
n.edge.length = max(0, distribution_func(*i))
else:
n.edge.length = max(0, distribution_func(*l))
t._dirty = True
return t
def randomise_labels(
self,
inplace=False,
):
""" Shuffles the leaf labels, but doesn't alter the tree structure """
if not inplace:
t = self.copy()
else:
t = self
names = list(t.labels)
random.shuffle(names)
for l in t._tree.leaf_node_iter():
l.taxon._label = names.pop()
t._dirty = True
return t
def reversible_deroot(self):
""" Stores info required to restore rootedness to derooted Tree. Returns
the edge that was originally rooted, the length of e1, and the length
of e2.
Dendropy Derooting Process:
In a rooted tree the root node is bifurcating. Derooting makes it
trifurcating.
Call the two edges leading out of the root node e1 and e2.
Derooting with Tree.deroot() deletes one of e1 and e2 (let's say e2),
and stretches the other to the sum of their lengths. Call this e3.
Rooted tree: Derooted tree:
A A B
|_ B \ /
/ |
/e1 |e3 (length = e1+e2; e2 is deleted)
Root--o ===> |
\e2 Root--o _ C
\ _ C |
| D
D
Reverse this with Tree.reroot_at_edge(edge, length1, length2, ...)
"""
root_edge = self._tree.seed_node.edge
lengths = dict([(edge, edge.length) for edge
in self._tree.seed_node.incident_edges() if edge is not root_edge])
self._tree.deroot()
reroot_edge = (set(self._tree.seed_node.incident_edges())
& set(lengths.keys())).pop()
self._tree.encode_bipartitions()
self._dirty = True
return (reroot_edge, reroot_edge.length - lengths[reroot_edge],
lengths[reroot_edge])
def autocorrelated_relaxed_clock(self, root_rate, autocorrel,
distribution='lognormal'):
"""
Attaches rates to each node according to autocorrelated lognormal
model from Kishino et al.(2001), or autocorrelated exponential
"""
optioncheck(distribution, ['exponential', 'lognormal'])
if autocorrel == 0:
for node in self._tree.preorder_node_iter():
node.rate = root_rate
return
for node in self._tree.preorder_node_iter():
if node == self._tree.seed_node:
node.rate = root_rate
else:
parent_rate = node.parent_node.rate
bl = node.edge_length
if distribution == 'lognormal':
node.rate = logn_correlated_rate(parent_rate, bl,
autocorrel)
else:
node.rate = np.random.exponential(parent_rate)
def uncorrelated_relaxed_clock(self, root_rate, variance,
distribution='lognormal'):
optioncheck(distribution, ['exponential', 'lognormal'])
for node in self._tree.preorder_node_iter():
if node == self._tree.seed_node:
node.rate = root_rate
else:
if distribution == 'lognormal':
mu = np.log(root_rate) - 0.5 * variance
node.rate = np.random.lognormal(mu, variance)
else:
node.rate = np.random.exponential(root_rate)
def rlgt(self, time=None, times=1,
disallow_sibling_lgts=False):
""" Uses class LGT to perform random lateral gene transfer on
ultrametric tree """
lgt = LGT(self.copy())
for _ in range(times):
lgt.rlgt(time, disallow_sibling_lgts)
return lgt.tree
def rnni(self, times=1, **kwargs):
""" Applies a NNI operation on a randomly chosen edge.
keyword args: use_weighted_choice (True/False) weight the random edge selection by edge length
transform (callable) transforms the edges using this function, prior to weighted selection
"""
nni = NNI(self.copy())
for _ in range(times):
nni.rnni(**kwargs)
# nni.reroot_tree()
return nni.tree
def rspr(self, times=1, **kwargs):
""" Random SPR, with prune and regraft edges chosen randomly, and
lengths drawn uniformly from the available edge lengths.
N1: disallow_sibling_sprs prevents sprs that don't alter the topology
of the tree """
spr = SPR(self.copy())
for _ in range(times):
spr.rspr(**kwargs)
return spr.tree
def scale(self, factor, inplace=True):
""" Multiplies all branch lengths by factor. """
if not inplace:
t = self.copy()
else:
t = self
t._tree.scale_edges(factor)
t._dirty = True
return t
def strip(self, inplace=False):
""" Sets all edge lengths to None """
if not inplace:
t = self.copy()
else:
t = self
for e in t._tree.preorder_edge_iter():
e.length = None
t._dirty = True
return t
def translate(self, dct):
"""
Translate leaf names using a dictionary of names
:param dct: Dictionary of current names -> updated names
:return: Copy of tree with names changed
"""
new_tree = self.copy()
for leaf in new_tree._tree.leaf_node_iter():
curr_name = leaf.taxon.label
leaf.taxon.label = dct.get(curr_name, curr_name)
return new_tree
def _name_things(self):
""" Easy names for debugging """
edges = {}
nodes = {None: 'root'}
for n in self._tree.postorder_node_iter():
nodes[n] = '.'.join([str(x.taxon) for x in n.leaf_nodes()])
for e in self._tree.preorder_edge_iter():
edges[e] = ' ---> '.join([nodes[e.tail_node], nodes[e.head_node]])
r_edges = {value: key for key, value in edges.items()}
r_nodes = {value: key for key, value in nodes.items()}
return edges, nodes, r_edges, r_nodes
@classmethod
def new_iterative_rtree(cls, nspecies, **kwargs):
return RandomTree.new(nspecies, **kwargs)
@classmethod
def new_rtree(cls, nspecies=16, zero_root_height=True, **kwargs):
tg = TreeGen(nspecies, **kwargs)
tree = tg.rtree()
if zero_root_height:
tree._tree.seed_node.edge_length = 0.0
return tree
@classmethod
def new_coal(cls, nspecies=16, zero_root_height=True, **kwargs):
tg = TreeGen(nspecies, **kwargs)
tree = tg.coal()
if zero_root_height:
tree._tree.seed_node.edge_length = 0.0
return tree
@classmethod
def new_yule(cls, nspecies=16, zero_root_height=True, **kwargs):
tg = TreeGen(nspecies, **kwargs)
tree = tg.yule()
if zero_root_height:
tree._tree.seed_node.edge_length = 0.0
return tree
def sample_gene_tree(self, **kwargs):
tg = TreeGen(template=self)
return tg.gene_tree(**kwargs)['gene_tree']
class RandomTree(object):
def __init__(self, names=None, rooted=False):
if names is None:
self.label_generator = itertools.chain(_infinite_labels_generator(['l'], start=1))
next(self.label_generator)
else:
self.label_generator = itertools.chain(_infinite_labels_generator(names, start=2))
if rooted:
self.tree = Tree('({}:1,{}:1):0;'.format(self.next_label(), self.next_label()))
else:
self.tree = Tree('({}:1,{}:1,{}:1):0;'.format(self.next_label(), self.next_label(), self.next_label()))
def next_label(self):
return next(self.label_generator)
def new_taxon_object(self):
lab = self.next_label()
tax = dpy.Taxon(label=lab)
return tax
def add(self, edge):
tail = edge.tail_node
head = edge.head_node
tail.remove_child(head)
new_taxon = self.new_taxon_object()
new_inner = tail.new_child(edge_length=1.0)
new_inner.new_child(taxon=new_taxon, edge_length=1.0)
new_inner.add_child(head)
head.edge_length=1.0
def select(self):
e, _ = self.tree.map_event_onto_tree()
return e
@classmethod
def new(cls, n, names=None, rooted=False):
rt = cls(names, rooted)
present = 2 if rooted else 3
for _ in range(n - present):
e = rt.select()
rt.add(e)
return rt.tree
class TreeGen(object):
def __init__(
self,
nspecies=16,
names=None,
template=None,
cf=False,
):
""" Generates a new Tree using a coalescent process (coal method), a
Yule pure-birth process (yule method), a random tree (rtree), or by
sampling a gene tree from a template species tree using a constrained
Kingman coalescent.
nspecies = number of taxa in the tree
names = a list of leaf names (names will be generated if not supplied)
template = a template species tree for drawing gene trees
cf = set to true to generate leaf names from the list of character names
from Cannon Fodder """
self.nspecies = nspecies
if names is not None:
g = _infinite_labels_generator(names, shuffle=False)
self.names = list(itertools.islice(g, nspecies))
elif cf:
g = _infinite_labels_generator(cfnames)
self.names = list(itertools.islice(g, nspecies))
else:
g = itertools.chain(_infinite_labels_generator(['Sp'], start=1))
next(g)
self.names = list(itertools.islice(g, nspecies))
if template and not isinstance(template, Tree):
raise TypeError('template should be \'Tree\' object. Got',
type(template))
self.template = template
def coal(self):
taxon_set = dpy.TaxonNamespace(self.names)
return cast(dpy.simulate.treesim.pure_kingman_tree(taxon_set))
def gene_tree(
self,
scale_to=None,
population_size=1,
trim_names=True,
):
""" Using the current tree object as a species tree, generate a gene
tree using the constrained Kingman coalescent process from dendropy. The
species tree should probably be a valid, ultrametric tree, generated by
some pure birth, birth-death or coalescent process, but no checks are
made. Optional kwargs are: -- scale_to, which is a floating point value
to scale the total tree tip-to-root length to, -- population_size, which
is a floating point value which all branch lengths will be divided by to
convert them to coalescent units, and -- trim_names, boolean, defaults
to true, trims off the number which dendropy appends to the sequence
name """
tree = self.template or self.yule()
for leaf in tree._tree.leaf_node_iter():
leaf.num_genes = 1
dfr = tree._tree.seed_node.distance_from_root()
dft = tree._tree.seed_node.distance_from_tip()
tree_height = dfr + dft
if scale_to:
population_size = tree_height / scale_to
for edge in tree._tree.preorder_edge_iter():
edge.pop_size = population_size
gene_tree = dpy.simulate.treesim.constrained_kingman_tree(tree._tree)[0]
if trim_names:
for leaf in gene_tree.leaf_node_iter():
leaf.taxon.label = leaf.taxon.label.replace('\'', '').split('_')[0]
# Dendropy changed its API
return {'gene_tree': tree.__class__(gene_tree.as_string('newick', suppress_rooting=True).strip(';\n') + ';'),
'species_tree': tree}
def rtree(self):
m = self.yule()
m.randomise_labels()
return m.randomise_branch_lengths()
def yule(self):
taxon_set = dpy.TaxonNamespace(self.names)
return cast(dpy.simulate.treesim.uniform_pure_birth_tree(taxon_set))
cfnames = [
'Jools', 'Jops', 'Stoo', 'Rj', 'Ubik', 'Cj', 'Chris', 'Pete',
'Tadger', 'Hector', 'Elroy', 'Softy', 'Mac', 'Bomber', 'Stan', 'Tosh',
'Brains', 'Norm', 'Buster', 'Spike', 'Browny', 'Murphy', 'Killer', 'Abdul',
'Spotty', 'Goofy', 'Donald', 'Windy', 'Nifta', 'Denzil', 'Cedric', 'Alf',
'Marty', 'Cecil', 'Wally', 'Pervy', 'Jason', 'Roy', 'Peewee', 'Arnie',
'Lofty', 'Tubby', 'Porky', 'Norris', 'Bugsy', 'Greg', 'Gus', 'Ginger',
'Eddy', 'Steve', 'Hugo', 'Zippy', 'Sonny', 'Willy', 'Mario', 'Luigi',
'Bo', 'Johan', 'Colin', 'Queeny', 'Morgan', 'Reg', 'Peter', 'Brett',
'Matt', 'Vic', 'Hut', 'Bud', 'Brad', 'Ashley', 'Les', 'Rex',
'Louis', 'Pedro', 'Marco', 'Leon', 'Ali', 'Tyson', 'Tiger', 'Frank',
'Reuben', 'Leyton', 'Josh', 'Judas', 'Aj', 'Lex', 'Butch', 'Bison',
'Gary', 'Luther', 'Kermit', 'Brian', 'Ray', 'Freak', 'Leroy', 'Lee',
'Banjo', 'Beaker', 'Basil', 'Bonzo', 'Kelvin', 'Ronnie', 'Rupert', 'Roo',
'Dan', 'Jimmy', 'Bob', 'Don', 'Tommy', 'Eddie', 'Ozzy', 'Paddy',
'Arnold', 'Tony', 'Teddy', 'Dom', 'Theo', 'Martin', 'Chunky', 'Jon',
'Ben', 'Girly', 'Julian', 'Pizza', 'Ciaran', 'Jock', 'Gravy', 'Trendy',
'Neil', 'Derek', 'Ed', 'Biff', 'Paul', 'Stuart', 'Randy', 'Loreta',
'Suzie', 'Pumpy', 'Urmer', 'Roger', 'Pussy', 'Meat', 'Beefy', 'Harry',
'Tiny', 'Howard', 'Morris', 'Thor', 'Rev', 'Duke', 'Micky', 'Chas',
'Melony', 'Craig', 'Sidney', 'Parson', 'Rowan', 'Smelly', 'Dok', 'Stew',
'Adrian', 'Pat', 'Iceman', 'Goose', 'Dippy', 'Viv', 'Fags', 'Bunty',
'Noel', 'Bono', 'Edge', 'Robbie', 'Sean', 'Miles', 'Jimi', 'Gordon',
'Val', 'Hobo', 'Fungus', 'Toilet', 'Lampy', 'Marcus', 'Pele', 'Hubert',
'James', 'Tim', 'Saul', 'Andy', 'Silky', 'Simon', 'Handy', 'Sid',
'George', 'Joff', 'Barry', 'Dick', 'Gil', 'Nick', 'Ted', 'Phil',
'Woody', 'Wynn', 'Alan', 'Pip', 'Mickey', 'Justin', 'Karl', 'Maddog',
'Horace', 'Harold', 'Gazza', 'Spiv', 'Foxy', 'Ned', 'Bazil', 'Oliver',
'Rett', 'Scot', 'Darren', 'Moses', 'Noah', 'Seth', 'Buddah', 'Mary',
'Pilot', 'Mcbeth', 'Mcduff', 'Belly', 'Mathew', 'Mark', 'Luke', 'John',
'Aslam', 'Ham', 'Shem', 'Joshua', 'Jacob', 'Esaw', 'Omar', 'Enoch',
'Obadia', 'Daniel', 'Samuel', 'Robbo', 'Joebed', 'Ismael', 'Isreal', 'Isabel',
'Isarat', 'Monk', 'Blip', 'Bacon', 'Danube', 'Friend', 'Darryl', 'Izzy',
'Crosby', 'Stills', 'Nash', 'Young', 'Cheese', 'Salami', 'Prawn', 'Radish',
'Egbert', 'Edwy', 'Edgar', 'Edwin', 'Edred', 'Eggpie', 'Bros', 'Sonic',
'Ziggy', 'Alfred', 'Siggy', 'Hilda', 'Snell', 'Sparks', 'Spook', 'Topcat',
'Benny', 'Dibble', 'Benker', 'Dosey', 'Beaky', 'Joist', 'Pivot', 'Tree',
'Bush', 'Grass', 'Seedy', 'Tin', 'Rollo', 'Zippo', 'Nancy', 'Larry',
'Iggy', 'Nigel', 'Jamie', 'Jesse', 'Leo', 'Virgo', 'Garth', 'Fidel',
'Idi', 'Che', 'Kirk', 'Spock', 'Maccoy', 'Chekov', 'Uhura', 'Bones',
'Vulcan', 'Fester', 'Jethro', 'Jimbob', 'Declan', 'Dalek', 'Hickey', 'Chocco',
'Goch', 'Pablo', 'Renoir', 'Rolf', 'Dali', 'Monet', 'Manet', 'Gaugin',
'Chagal', 'Kid', 'Hully', 'Robert', 'Piers', 'Raith', 'Jeeves', 'Paster',
'Adolf', 'Deiter', 'Deni', 'Zark', 'Wizkid', 'Wizard', 'Iain', 'Kitten',
'Gonner', 'Waster', 'Loser', 'Fodder',
]
|
|
"""
Code base for : The Holy Quran, translations and discussions
@author Abdullah Al Zakir Hossain, Email: aazhbd@yahoo.com
@copyright Copyright (c)2009-2016 ArticulateLogic Labs
"""
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.template.context import RequestContext
from django.db.models import Q, Count, Min, Max, Sum
from django.contrib.auth import authenticate, login, logout
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.models import User
from quran.models import *
import unicodedata
import json
from django.views import generic
class HomeView(generic.ListView):
model = Chapter
context_object_name = 'chapters'
template_name = 'home.html'
class InfoView(generic.TemplateView):
template_name = 'info.html'
def viewDiscuss(request):
context = RequestContext(request)
comments = Comment.objects.filter(enabled=True).order_by('-date_published')
context.update({'comments': comments, })
if request.method == "POST":
uemail = request.POST.get('email', None).strip()
password = request.POST.get('password', None).strip()
try:
user = User.objects.get(email=uemail)
except:
user = None
if user is not None and user.check_password(password):
if user.is_active:
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
context.update({'msg_body': "Login is successful.", })
else:
context.update({'msg_body': "The account is currently inactive, contact administrator.", })
else:
context.update({'msg_body': "The username and password were incorrect.", })
return render_to_response("login.html", context_instance=context)
context.update({'msg_body': "Recent Discussions", })
return render_to_response("discuss.html", context_instance=context)
def viewChapter(request, **Args):
context = RequestContext(request)
chapterNum = str(Args.get('chap')).strip('/')
cNum = int(chapterNum)
context.update({'cnum': cNum, })
try:
chName = Chapter.objects.get(pk=cNum)
context.update({
'msg_body': "Chapter " + chapterNum + ": " + chName.transliteration + " " + chName.arabic_name + " (" + chName.english_name + ")", })
except:
context.update({'msg_body': "Invalid chapter number", })
v = Q(chapter=cNum) & Q(author__name='Original Text')
full_chap = Verse.objects.filter(v).order_by('number')
for content in full_chap:
content.vtext = unicodedata.normalize('NFC', content.vtext)
context.update({'full_chap': full_chap, })
auths = Verse.objects.filter(chapter=cNum).values('author').distinct()
authors = []
for a in auths:
authors.append(Author.objects.get(pk=a['author']))
context.update({'authors': authors, })
return render_to_response("chapter.html", context_instance=context)
def viewVerse(request, **Args):
context = RequestContext(request)
c_success = None
chapterNum = str(Args.get('chap')).strip('/')
verseNum = str(Args.get('verse')).strip('/')
cNum = str(chapterNum)
vNum = str(verseNum)
if request.method == "POST":
ctext = request.POST.get('comment', "")
comment_type = request.POST.get('comment_type', "")
cuser = request.user
captcha_val = request.POST.get('g-recaptcha-response', "")
if ctext != "" and comment_type != "" and captcha_val != "":
try:
c = Comment(user=cuser, vnum=vNum, cnum=cNum, ctext=ctext, comment_type=comment_type)
c.save()
context.update({'messages': ['Your comment has been posted successfully'], 'cSuccess': c_success, })
except:
context.update({'messages': ["Invalid request, comment couldn't be saved"], 'cSuccess': c_success, })
raise
else:
context.update({'messages': ["Invalid request, comment couldn't be saved"], 'cSuccess': c_success, })
context.update({'cnum': cNum, 'vnum': vNum})
try:
chName = Chapter.objects.get(pk=cNum)
context.update({
'msg_body': "Verse " + vNum + " of Chapter " + cNum + " : " + chName.transliteration + " " + chName.arabic_name + " (" + chName.english_name + ")", })
except:
context.update({'msg_body': "Chapter " + cNum + " Verse " + vNum, })
verse = Verse.objects.filter(Q(chapter=cNum) & Q(number=vNum) & Q(author__name='Original Text'))
for v in verse:
v.vtext = unicodedata.normalize('NFC', v.vtext)
context.update({'verse': verse, })
english_verses = Verse.objects.filter(Q(chapter=cNum) & Q(number=vNum) & Q(author__name='Shakir'))
for v in english_verses:
v.vtext = unicodedata.normalize('NFC', v.vtext)
context.update({'english_verses': english_verses, })
auths = Verse.objects.filter(chapter=cNum).values('author').distinct()
authors = []
for a in auths:
authors.append(Author.objects.get(pk=a['author']))
context.update({'authors': authors, })
comments = Comment.objects.filter(Q(cnum=cNum) & Q(vnum=vNum) & Q(enabled=True))
context.update({'comments': comments, })
cdetail = Chapter.objects.get(pk=chapterNum)
if cdetail.total_verses:
total_verse = cdetail.total_verses
pnext = False
if int(verseNum) < int(total_verse):
pnext = True
pprevious = False
if int(verseNum) > 1:
pprevious = True
context.update({'pnext': pnext, 'pprevious': pprevious, })
else:
context.update({'pnext': False, 'pprevious': False, })
return render_to_response("verse.html", context_instance=context)
def viewSearch(request, **Args):
context = RequestContext(request)
try:
search = request.POST.get('search', Args.get('search'))
except:
search = ""
if search == None:
search = ""
search = search.strip()
try:
page = str(Args.get('page', 1)).strip('/')
except:
page = 1
pageNum = int(page)
pageSize = 40
titleresult = []
verseresult = []
commentresult = []
if search and search != "":
titlesearch = Q(english_name__icontains=search) | Q(arabic_name__icontains=search) | Q(
transliteration__icontains=search)
versesearch = Q(vtext__icontains=search)
commentsearch = Q(ctext_icontains=search)
try:
titleresult = Paginator(Chapter.objects.filter(titlesearch), pageSize).page(pageNum)
except:
titleresult = []
try:
verseresult = Paginator(Verse.objects.filter(versesearch), pageSize).page(pageNum)
except:
verseresult = []
try:
commentresult = Paginator(Comment.objects.filter(commentsearch), pageSize).page(pageNum)
except:
commentresult = []
totalentries = sum(
getattr(x, 'paginator', Paginator([], 0)).count for x in [titleresult, verseresult, commentresult])
totalpages = int(totalentries / pageSize)
context.update({
'titleresult': titleresult,
'verseresult': verseresult,
'commentresult': commentresult,
'searchkey': search,
'pageNum': pageNum,
'pageSize': pageSize,
'totalresult': totalentries,
'totalpages': totalpages
})
return render_to_response("search.html", context_instance=context)
def viewLogin(request):
context = RequestContext(request)
if request.method == "POST":
name = request.POST.get('name', None)
email = request.POST.get('email', None)
upass = request.POST.get('password', None)
rpass = request.POST.get('rpass', None)
captcha = request.POST.get('hiddenRecaptcha', None)
if email and upass:
try:
existing = User.objects.get(email=email)
except:
existing = None
if existing is not None:
context.update({'msg_body': "The sign up information were invalid, already exists " + str(email), })
return render_to_response("signup.html", context_instance=context)
else:
user = User.objects.create_user(email, email, upass)
user.first_name = name
user.save()
context.update({
'msg_body': "Congratulations, the signup was successful, You can now login to share your expertise or ask questions on any verse to get answers from Scholars and Enthusiasts", })
else:
context.update({'msg_body': "The sign up information were invalid. " + str(email), })
return render_to_response("signup.html", context_instance=context)
context.update({'msg_body': "Login", })
return render_to_response("login.html", context_instance=context)
def viewLogout(request):
context = RequestContext(request)
logout(request)
context.update({'msg_body': "You have been logged out.", })
return render_to_response("login.html", context_instance=context)
def viewSignup(request):
context = RequestContext(request)
context.update({'msg_body': "Signup", })
return render_to_response("signup.html", context_instance=context)
def getChapter(request):
try:
authorName = request.POST.get('authorName', False)
chapterNum = request.POST.get('chapterNum', False)
except:
raise
verses = Verse.objects.filter(Q(chapter=chapterNum) & Q(author__name=authorName))
try:
from HTMLParser import HTMLParser
except ImportError:
from html.parser import HTMLParser
h = HTMLParser()
results = []
for v in verses:
results.append({
'verseNum': v.number,
'vtext': unicodedata.normalize('NFC', h.unescape(v.vtext)),
'author': v.author.name,
'authorid': v.author.id,
'lang': v.author.alang.name,
'iso_lang': v.author.alang.iso_code
})
return HttpResponse(json.dumps(results), content_type="application/json")
def getVerse(request):
try:
authorName = request.POST.get('authorName', False)
chapterNum = request.POST.get('chapterNum', False)
verseNum = request.POST.get('verseNum', False)
except:
raise
try:
from HTMLParser import HTMLParser
except ImportError:
from html.parser import HTMLParser
h = HTMLParser()
verses = Verse.objects.filter(Q(chapter=chapterNum) & Q(number=verseNum) & Q(author__name=authorName))
results = []
for v in verses:
results.append({
'verseNum': v.number,
'vtext': unicodedata.normalize('NFC', h.unescape(v.vtext)),
'author': v.author.name,
'authorid': v.author.id,
'lang': v.author.alang.name,
'iso_lang': v.author.alang.iso_code
})
return HttpResponse(json.dumps(results), content_type="application/json")
|
|
"""
Utilities to extract features from images.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Olivier Grisel
# Vlad Niculae
# License: BSD
import numpy as np
from scipy import sparse
from ..utils.fixes import in1d
from ..utils import check_random_state
from ..utils.fixes import product
from ..base import BaseEstimator
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] / (n_y * n_z),
(edges[0] % (n_y * n_z)) / n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] / (n_y * n_z),
(edges[1] % (n_y * n_z)) / n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(in1d(edges[0], inds),
in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = mask.astype(np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.todense()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Parameters
===========
img: ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as: np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype: None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
===========
n_x: int
Dimension in x axis
n_y: int
Dimension in y axis
n_z: int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as: np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype: dtype, optional, default int
The data of the returned sparse matrix. By default it is int
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Parameters
----------
image: array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size: tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches: integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state: int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> patches.shape
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
image = np.atleast_2d(image)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if isinstance(max_patches, int) and max_patches < all_patches:
n_patches = max_patches
elif isinstance(max_patches, float) and 0 < max_patches < 1:
n_patches = int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
rng = check_random_state(random_state)
patches = np.empty((n_patches, p_h, p_w, n_colors), dtype=image.dtype)
i_s = rng.randint(n_h, size=n_patches)
j_s = rng.randint(n_w, size=n_patches)
for p, i, j in zip(patches, i_s, j_s):
p[:] = image[i:i + p_h, j:j + p_w, :]
else:
n_patches = all_patches
patches = np.empty((n_patches, p_h, p_w, n_colors), dtype=image.dtype)
for p, (i, j) in zip(patches, product(xrange(n_h), xrange(n_w))):
p[:] = image[i:i + p_h, j:j + p_w, :]
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Parameters
----------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size: tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image: array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(xrange(n_h), xrange(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in xrange(i_h):
for j in xrange(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Parameters
----------
patch_size: tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches: integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.max_patches:
n_patches = self.max_patches
else:
p_h, p_w = self.patch_size
n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches_shape = (n_images * n_patches,) + self.patch_size
if n_channels > 1:
patches_shape += (n_channels,)
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, self.patch_size, self.max_patches, self.random_state)
return patches
|
|
import logging
__author__ = 'tjhunter'
'''
Created on Nov 28, 2011
@author: tjhunter
Plotting script.
PLOT_EXPORT=../papers/2011_PathInference/images/ PYTHONPATH=. MM_DATA_DIR="/home/tjhunter/tmp/high_frequency/" python mm/path_inference_private/plot_final.py
PLOT_EXPORT="/home/tjhunter/work/mm/mm_code/mobilemillennium/arterial/paper/IEEE TITS 2010/figs-gen/" PYTHONPATH=. python mm/path_inference_private/plot_final.py
PLOT_EXPORT="/home/tjhunter/work/mm/mm_code/mobilemillennium/arterial/paper/IEEE TITS 2010/figs-gen/" ipython -pylab
PLOT_EXPORT="/home/tjhunter/work/phd-code/papers/2011_PathInference/IEEE TITS 2010/figs-gen/" ipython -pylab
'''
# Import all the hacks
import build
import numpy as np
import pylab as pl
import matplotlib.pyplot as plt
from collections import defaultdict
import cPickle as pickle
from mm.path_inference_private.proj_templates import get_learned_parameter_file
from mm.path_inference_private.evaluation import LEARNING_METHOD_IDX, METRIC_NAME_IDX, STRAT_NAME_IDX
from matplotlib.ticker import MaxNLocator
import random
import math
random.seed = 1
from mm.path_inference_private.plot_utils import *
font_size = 25
figures_width = 10
logging.info("Loading data...")
all_evals = {}
learned_parameters = {}
all_res = [1, 10, 30, 60, 90, 120]
for res in all_res:
all_evals[res] = get_all_eval(res=res)
learned_parameters[res] = []
for batch_idx in range(10):
try:
data = pickle.load(get_learned_parameter_file(res=res, batch_idx=batch_idx))
learned_parameters[res].append(data)
except IOError:
pass
colors = {'most_likely_simple': 'red', \
'em_simple': 'magenta', \
'most_likely_fancy': 'orange', \
'em_fancy': 'k', \
'shortest_path': 'blue', \
'closest_point': 'green', \
}
strat_c = {'ONLINE': 'red', 'LAGGED1': 'orange', 'LAGGED2': 'green', 'OFFLINE': 'blue'}
learning_display = {'most_likely_simple': 'MaxLL - simple', \
'em_simple': 'EM - simple', \
'most_likely_fancy': 'MaxLL - complex', \
'em_fancy': 'EM - complex', \
'shortest_path': 'Shortest path', \
'closest_point': 'Closest point', \
}
''' TRUE POINTS.
'''
''' First plot: simply the percentage of wrong paths for a given strategy,
with some error bars.
'''
#fig = pl.figure(1)
#ax = fig.gca()
#ys = [percent_wrong(get_true_point_by_strat(all_eval=all_evals[res], learning_method='most_likely_simple')['LAGGED2']) for res in all_res]
#ax.plot(all_res, ys)
#fig.show()
print ">> True points:"
fig = pl.figure(3)
fig.clf()
ax = fig.gca()
ax.hold(True)
strategy = 'VITERBI'
learning_methods = ['em_simple', 'most_likely_simple', 'most_likely_fancy', 'shortest_path', 'closest_point']
num_boot_samples = 100
for learning in learning_methods:
median_vals = []
mean_vals = []
lower_percentile = []
upper_percentile = []
for res in all_res:
data = get_true_point_by_strat(all_eval=all_evals[res], learning_method=learning)[strategy]
stat_dis = bootstrap_percent_wrong(data, num_boot_samples)
median_vals.append(stat_dis[int(num_boot_samples * 0.5)])
mean_vals.append(np.mean(stat_dis))
lower_percentile.append(stat_dis[int(num_boot_samples * 0.05)])
upper_percentile.append(stat_dis[int(num_boot_samples * 0.95)])
# ax.errorbar(all_res, median_vals, (lower_percentile, upper_percentile), label=learning)
print learning
ax.plot(all_res, mean_vals, c=colors[learning], label=learning_display[learning])
ax.plot(all_res, median_vals, 's', c=colors[learning])
ax.plot(all_res, lower_percentile, '-.', c=colors[learning])
ax.plot(all_res, upper_percentile, '-.', c=colors[learning])
strategy = 'ONLINE'
learning_methods = ['closest_point']
num_boot_samples = 100
for learning in learning_methods:
median_vals = []
mean_vals = []
lower_percentile = []
upper_percentile = []
for res in all_res:
data = get_true_point_by_strat(all_eval=all_evals[res], learning_method=learning)[strategy]
stat_dis = bootstrap_percent_wrong(data, num_boot_samples)
median_vals.append(stat_dis[int(num_boot_samples * 0.5)])
mean_vals.append(np.mean(stat_dis))
lower_percentile.append(stat_dis[int(num_boot_samples * 0.05)])
upper_percentile.append(stat_dis[int(num_boot_samples * 0.95)])
c = 'k'
ax.plot(all_res, mean_vals, c=c, label='Hard closest point')
ax.plot(all_res, median_vals, 's', c=c)
ax.plot(all_res, lower_percentile, '-.', c=c)
ax.plot(all_res, upper_percentile, '-.', c=c)
ax.set_xlabel("Sampling period (seconds)", fontsize=font_size)
ax.set_ylabel("Proportion of false point assignments", fontsize=font_size)
#ax.set_ylim([0,.2])
ax.set_xlim([0, max(all_res) + 10])
ax.set_xticks(all_res)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
leg = ax.legend(ncol=2, loc=1)
ltext = leg.get_texts()
plt.setp(ltext, fontsize=font_size * .7)
fig.set_size_inches(figures_width, 9)
#fig.show()
logging.info("true_points_percentage")
build.save_figure(fig, "figures-pif/true_points_percentage")
#print "%s/true_points_percentage.pdf" % saving_dir()
#fig.savefig("%s/true_points_percentage.pdf" % saving_dir(), bbox_inches='tight')
''' TRUE PATHS.
'''
fig = pl.figure(3)
fig.clf()
ax = fig.gca()
ax.hold(True)
strategy = 'VITERBI'
learning_methods = ['em_simple', 'most_likely_simple', 'most_likely_fancy', 'shortest_path', 'closest_point']
num_boot_samples = 200
for learning in learning_methods:
median_vals = []
mean_vals = []
lower_percentile = []
upper_percentile = []
for res in all_res:
data = get_true_path_by_strat(all_eval=all_evals[res], learning_method=learning)[strategy]
stat_dis = bootstrap_percent_wrong(data, num_boot_samples)
median_vals.append(stat_dis[int(num_boot_samples * 0.5)])
mean_vals.append(np.mean(stat_dis))
lower_percentile.append(stat_dis[int(num_boot_samples * 0.05)])
upper_percentile.append(stat_dis[int(num_boot_samples * 0.95)])
# ax.errorbar(all_res, median_vals, (lower_percentile, upper_percentile), label=learning)
print learning
ax.plot(all_res, mean_vals, c=colors[learning], label=learning_display[learning])
ax.plot(all_res, median_vals, 's', c=colors[learning])
ax.plot(all_res, lower_percentile, '-.', c=colors[learning])
ax.plot(all_res, upper_percentile, '-.', c=colors[learning])
# Add the plot corresponding to closest point / hard
strategy = 'ONLINE'
learning_methods = ['closest_point']
num_boot_samples = 200
for learning in learning_methods:
median_vals = []
mean_vals = []
lower_percentile = []
upper_percentile = []
for res in all_res:
data = get_true_path_by_strat(all_eval=all_evals[res], learning_method=learning)[strategy]
stat_dis = bootstrap_percent_wrong(data, num_boot_samples)
median_vals.append(stat_dis[int(num_boot_samples * 0.5)])
mean_vals.append(np.mean(stat_dis))
lower_percentile.append(stat_dis[int(num_boot_samples * 0.05)])
upper_percentile.append(stat_dis[int(num_boot_samples * 0.95)])
c = 'k'
ax.plot(all_res, mean_vals, c=c, label='Hard closest point')
ax.plot(all_res, median_vals, 's', c=c)
ax.plot(all_res, lower_percentile, '-.', c=c)
ax.plot(all_res, upper_percentile, '-.', c=c)
ax.set_ylabel("Proportion of false path assignments", fontsize=font_size)
ax.set_xlabel("Sampling period (seconds)", fontsize=font_size)
ax.set_xticks(all_res)
ax.set_xlim([0, max(all_res) + 10])
#ax.legend(bbox_to_anchor=(0., -0., 1, 0.1), loc=4, \
# ncol=2, borderaxespad=0.)
fig.set_size_inches(figures_width, 9)
# Size adjutments
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
leg = ax.legend(ncol=2, loc=1)
ltext = leg.get_texts()
plt.setp(ltext, fontsize=font_size * .7)
#fig.show()
logging.info("true_paths_percentage")
build.save_figure(fig, "figures-pif/true_paths_percentage")
#fig.savefig("%s/true_paths_percentage.pdf" % saving_dir(), bbox_inches='tight')
# True points, complex strategy:
def get_points_ll_by_strat(all_eval, learning_method):
ll_point_data = dict([(key, all_eval[key]) for key in all_eval \
if key[METRIC_NAME_IDX] == 'POINT_LL' \
and key[LEARNING_METHOD_IDX] == learning_method])
ll_point_by_strat = defaultdict(list)
for key, vals in ll_point_data.iteritems():
for vs in vals:
for vs_ in vs:
ll_point_by_strat[key[STRAT_NAME_IDX]] += vs_
return ll_point_by_strat
''' LL Paths
'''
ll_paths_font_size = 15
def get_paths_ll_by_strat(all_eval, learning_method):
ll_point_data = dict([(key, all_eval[key]) for key in all_eval \
if key[METRIC_NAME_IDX] == 'PATH_LL' \
and key[LEARNING_METHOD_IDX] == learning_method])
ll_point_by_strat = defaultdict(list)
for key, vals in ll_point_data.iteritems():
for vs in vals:
for vs_ in vs:
ll_point_by_strat[key[STRAT_NAME_IDX]] += [-u for u in vs_]
return ll_point_by_strat
fig = pl.figure(3)
fig.clf()
strategies = ['ONLINE', 'LAGGED1', 'LAGGED2', 'OFFLINE']
learning_methods = ['most_likely_simple', 'most_likely_fancy', 'em_simple']
max_vals_plot = [10, 10, 100]
for learning_idx in range(len(learning_methods)):
learning = learning_methods[learning_idx]
ax = fig.add_subplot(len(learning_methods), 1, learning_idx + 1)
for strategy in strategies:
median_vals = []
mean_vals = []
lower_percentile = []
upper_percentile = []
for res in all_res:
data = get_paths_ll_by_strat(all_eval=all_evals[res], learning_method=learning)[strategy]
data.sort()
median_vals.append(data[int(len(data) * 0.5)])
mean_vals.append(np.mean(data))
lower_percentile.append(data[int(len(data) * 0.25)])
upper_percentile.append(data[int(len(data) * 0.75)])
ax.plot(all_res, median_vals, c=strat_c[strategy], label="%s" % (strategy))
ax.plot(all_res, mean_vals, 's', c=strat_c[strategy])
ax.plot(all_res, lower_percentile, '-.', c=strat_c[strategy])
ax.plot(all_res, upper_percentile, '-.', c=strat_c[strategy])
ax.set_ylabel("Log-likelihood \nof true paths\n%s" % learning_display[learning], multialignment='center',
fontsize=ll_paths_font_size)
ax.set_xticks([])
ax.set_ylim([0, max_vals_plot[learning_idx]])
ax.set_xlim([0, max(all_res)])
# Size adjutments
ax.yaxis.set_major_locator(MaxNLocator(5))
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(ll_paths_font_size)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(ll_paths_font_size)
# Add a lagend for the last one:
ax.set_xlabel("Sampling period (seconds)", fontsize=ll_paths_font_size)
ax.set_xticks(all_res)
leg = ax.legend(bbox_to_anchor=(0., -0.5, 1, 0.1), loc=3, \
ncol=4, mode="expand", borderaxespad=1.)
fig.set_size_inches(figures_width, 9)
#fig.show()
logging.info("ll_paths")
build.save_figure(fig, "figures-pif/ll_paths")
#fig.savefig("%s/ll_paths.pdf" % (saving_dir()),
# bbox_inches=mpl.transforms.Bbox.from_bounds(0, 0, figures_width - .6, 8.4))
""" Entropy over paths.
"""
entropy_path_font_size = 15
def get_paths_entropy_by_strat(all_eval, learning_method):
ll_point_data = dict([(key, all_eval[key]) for key in all_eval \
if key[METRIC_NAME_IDX] == 'PATH_ENTROPY' \
and key[LEARNING_METHOD_IDX] == learning_method])
ll_point_by_strat = defaultdict(list)
for key, vals in ll_point_data.iteritems():
for vs in vals:
for vs_ in vs:
ll_point_by_strat[key[STRAT_NAME_IDX]] += vs_
return ll_point_by_strat
fig = pl.figure(3)
fig.clf()
strategies = ['ONLINE', 'LAGGED1', 'LAGGED2', 'OFFLINE']
learning_methods = ['most_likely_simple', 'most_likely_fancy', 'em_simple']
for learning_idx in range(len(learning_methods)):
learning = learning_methods[learning_idx]
ax = fig.add_subplot(len(learning_methods), 1, learning_idx + 1)
for strategy in strategies:
median_vals = []
mean_vals = []
lower_percentile = []
upper_percentile = []
for res in all_res:
data = get_paths_entropy_by_strat(all_eval=all_evals[res], learning_method=learning)[strategy]
data.sort()
median_vals.append(data[int(len(data) * 0.5)])
mean_vals.append(np.mean(data))
lower_percentile.append(data[int(len(data) * 0.05)])
upper_percentile.append(data[int(len(data) * 0.95)])
ax.plot(all_res, median_vals, c=strat_c[strategy], label="%s" % (strategy))
ax.plot(all_res, mean_vals, 's', c=strat_c[strategy])
ax.plot(all_res, lower_percentile, '-.', c=strat_c[strategy])
ax.plot(all_res, upper_percentile, '-.', c=strat_c[strategy])
ax.set_ylabel("Entropy of paths\n(%s)" % learning_display[learning], multialignment='center',
fontsize=entropy_path_font_size)
ax.set_xticks([])
ax.set_xlim([0, max(all_res) + 5])
# Size adjutments
ax.yaxis.set_major_locator(MaxNLocator(5))
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(entropy_path_font_size)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(entropy_path_font_size)
# Add a lagend for the last one:
ax.set_xlabel("Sampling period (seconds)", fontsize=entropy_path_font_size)
ax.set_xticks(all_res)
ax.legend(bbox_to_anchor=(0., -0.5, 1, 0.1), loc=3, \
ncol=4, mode="expand", borderaxespad=1.)
fig.set_size_inches(figures_width, 9)
#fig.show()
logging.info("entropy_paths")
build.save_figure(fig, "figures-pif/entropy_paths")
#fig.savefig("%s/entropy_paths.pdf" % (saving_dir()),
# bbox_inches=mpl.transforms.Bbox.from_bounds(0, 0, figures_width - .5, 8.5))
''' ENTROPY POINTS.
'''
entropy_point_font_size = 15
def get_points_entropy_by_strat(all_eval, learning_method):
ll_point_data = dict([(key, all_eval[key]) for key in all_eval \
if key[METRIC_NAME_IDX] == 'POINT_ENTROPY' \
and key[LEARNING_METHOD_IDX] == learning_method])
ll_point_by_strat = defaultdict(list)
for key, vals in ll_point_data.iteritems():
for vs in vals:
for vs_ in vs:
ll_point_by_strat[key[STRAT_NAME_IDX]] += vs_
return ll_point_by_strat
fig = pl.figure(3)
fig.clf()
strategies = ['ONLINE', 'LAGGED1', 'LAGGED2', 'OFFLINE']
learning_methods = ['most_likely_simple', 'most_likely_fancy', 'em_simple']
for learning_idx in range(len(learning_methods)):
learning = learning_methods[learning_idx]
ax = fig.add_subplot(len(learning_methods), 1, learning_idx + 1)
for strategy in strategies:
median_vals = []
mean_vals = []
lower_percentile = []
upper_percentile = []
for res in all_res:
data = get_points_entropy_by_strat(all_eval=all_evals[res], learning_method=learning)[strategy]
data.sort()
median_vals.append(data[int(len(data) * 0.5)])
mean_vals.append(np.mean(data))
lower_percentile.append(data[int(len(data) * 0.05)])
upper_percentile.append(data[int(len(data) * 0.95)])
ax.plot(all_res, median_vals, c=strat_c[strategy], label="%s" % (strategy))
ax.plot(all_res, mean_vals, 's', c=strat_c[strategy])
ax.plot(all_res, lower_percentile, '-.', c=strat_c[strategy])
ax.plot(all_res, upper_percentile, '-.', c=strat_c[strategy])
ax.set_ylabel("Entropy of points\n(%s)" % learning_display[learning], multialignment='center',
fontsize=entropy_point_font_size)
ax.set_xticks([])
ax.set_xlim([0, max(all_res) + 10])
# Size adjutments
ax.yaxis.set_major_locator(MaxNLocator(5))
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(entropy_point_font_size)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(entropy_point_font_size)
# Add a lagend for the last one:
ax.set_xlabel("Sampling period (seconds)", fontsize=entropy_point_font_size)
ax.set_xticks(all_res)
ax.legend(bbox_to_anchor=(0., -0.5, 1, 0.1), loc=3, \
ncol=4, mode="expand", borderaxespad=1.)
fig.set_size_inches(figures_width, 9)
# Size adjutments
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(entropy_point_font_size)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(entropy_point_font_size)
leg = ax.legend(ncol=2, loc=1)
ltext = leg.get_texts()
plt.setp(ltext, fontsize=entropy_point_font_size)
#fig.show()
logging.info("entropy_points")
build.save_figure(fig, "figures-pif/entropy_points")
#fig.savefig("%s/entropy_points.pdf" % (saving_dir()),
# bbox_inches=mpl.transforms.Bbox.from_bounds(0, 0, figures_width - .5, 8.5))
''' PATH COVERAGE
'''
coverage_font_size = 15
def get_paths_coverage_by_strat(all_eval, learning_method):
ll_point_data = dict([(key, all_eval[key]) for key in all_eval \
if key[METRIC_NAME_IDX] == 'PATH_COVERAGE' \
and key[LEARNING_METHOD_IDX] == learning_method])
ll_point_by_strat = defaultdict(list)
for key, vals in ll_point_data.iteritems():
for vs in vals:
for vs_ in vs:
ll_point_by_strat[key[STRAT_NAME_IDX]] += vs_
return ll_point_by_strat
fig = pl.figure(3)
fig.clf()
strategies = ['ONLINE', 'LAGGED1', 'LAGGED2', 'OFFLINE']
learning_methods = ['most_likely_simple', 'most_likely_fancy', 'em_simple']
for learning_idx in range(len(learning_methods)):
learning = learning_methods[learning_idx]
ax = fig.add_subplot(len(learning_methods), 1, learning_idx + 1)
for strategy in strategies:
median_vals = []
mean_vals = []
lower_percentile = []
upper_percentile = []
for res in all_res:
data = get_paths_coverage_by_strat(all_eval=all_evals[res], learning_method=learning)[strategy]
data.sort()
median_vals.append(data[int(len(data) * 0.5)])
mean_vals.append(np.mean(data))
lower_percentile.append(data[int(len(data) * 0.10)])
upper_percentile.append(data[int(len(data) * 0.90)])
ax.plot(all_res, median_vals, c=strat_c[strategy], label="%s" % (strategy))
ax.plot(all_res, mean_vals, 's', c=strat_c[strategy])
ax.plot(all_res, lower_percentile, '-.', c=strat_c[strategy])
ax.plot(all_res, upper_percentile, '-.', c=strat_c[strategy])
ax.set_ylabel("Path coverage (m)\n%s" % learning_display[learning], multialignment='center',
fontsize=coverage_font_size)
ax.set_xlim([0, max(all_res) + 10])
# Size adjutments
ax.yaxis.set_major_locator(MaxNLocator(5))
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(coverage_font_size)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(coverage_font_size)
# Add a lagend for the last one:
ax.set_xlabel("Sampling period (seconds)", fontsize=coverage_font_size)
ax.set_xticks(all_res)
ax.legend(bbox_to_anchor=(0., -0.5, 1, 0.1), loc=3, \
ncol=4, mode="expand", borderaxespad=1.)
fig.set_size_inches(figures_width, 9)
leg = ax.legend(ncol=2, loc=1)
ltext = leg.get_texts()
plt.setp(ltext, fontsize=coverage_font_size)
#fig.show()
logging.info("coverage_paths")
build.save_figure(fig, "figures-pif/coverage_paths")
#fig.savefig("%s/coverage_paths.pdf" % (saving_dir()),
# bbox_inches=mpl.transforms.Bbox.from_bounds(0, 0, figures_width - .5, 8.5))
''' PATH RELATIVE COVERAGE.
'''
def get_paths_relative_coverage_by_strat(all_eval, learning_method):
ll_point_data = dict([(key, all_eval[key]) for key in all_eval \
if key[METRIC_NAME_IDX] == 'PATH_RELATIVE_COVERAGE' \
and key[LEARNING_METHOD_IDX] == learning_method])
ll_point_by_strat = defaultdict(list)
for key, vals in ll_point_data.iteritems():
for vs in vals:
for vs_ in vs:
ll_point_by_strat[key[STRAT_NAME_IDX]] += [1 - x for x in vs_]
return ll_point_by_strat
relative_coverage_font_size = 15
fig = pl.figure(3)
fig.clf()
strategies = ['ONLINE', 'LAGGED1', 'LAGGED2', 'OFFLINE']
learning_methods = ['most_likely_simple', 'most_likely_fancy', 'em_simple']
for learning_idx in range(len(learning_methods)):
learning = learning_methods[learning_idx]
ax = fig.add_subplot(len(learning_methods), 1, learning_idx + 1)
for strategy in strategies:
median_vals = []
mean_vals = []
lower_percentile = []
upper_percentile = []
for res in all_res:
data = get_paths_relative_coverage_by_strat(all_eval=all_evals[res], learning_method=learning)[strategy]
data.sort()
median_vals.append(data[int(len(data) * 0.5)])
mean_vals.append(np.mean(data))
lower_percentile.append(data[int(len(data) * 0.20)])
upper_percentile.append(data[int(len(data) * 0.80)])
ax.plot(all_res, median_vals, c=strat_c[strategy], label="%s" % (strategy))
ax.plot(all_res, mean_vals, 's', c=strat_c[strategy])
ax.plot(all_res, lower_percentile, '-.', c=strat_c[strategy])
ax.plot(all_res, upper_percentile, '-.', c=strat_c[strategy])
ax.set_ylabel("Relative coverage\n(%s)" % learning_display[learning], multialignment='center',
fontsize=relative_coverage_font_size)
ax.set_ylim([0, 1])
ax.set_xticks([])
ax.set_xlim([0, max(all_res) + 10])
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(relative_coverage_font_size)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(relative_coverage_font_size)
# Add a lagend for the last one:
ax.set_xlabel("Sampling period (seconds)")
ax.set_xticks(all_res)
ax.legend(bbox_to_anchor=(0., -0.5, 1, 0.1), loc=3, \
ncol=4, mode="expand", borderaxespad=1.)
fig.set_size_inches(figures_width, 9)
# Size adjustments
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(relative_coverage_font_size)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(relative_coverage_font_size)
leg = ax.legend(ncol=2, loc=1)
ltext = leg.get_texts()
plt.setp(ltext, fontsize=relative_coverage_font_size)
#fig.show()
#fig.savefig("%s/relative_coverage_paths.pdf"%(saving_dir()))
logging.info("relative_coverage_paths")
build.save_figure(fig, "figures-pif/relative_coverage_paths")
#fig.savefig("%s/relative_coverage_paths.pdf" % (saving_dir()),
# bbox_inches=mpl.transforms.Bbox.from_bounds(0, 0, figures_width - .5, 8.5))
''' Analysis of parameters
'''
carac_length_distr = []
mean = []
dev_upper = []
dev_lower = []
for res in all_res:
data = [-1 / v['most_likely_simple'][0] for v in learned_parameters[res]]
data.sort()
carac_length_distr.append(data)
mean.append(np.mean(data))
dev_lower.append(-data[int(len(data) * 0)] + np.mean(data))
dev_upper.append(data[int(len(data) * 0.99)] - np.mean(data))
font_size_ = int(.8 * font_size)
fig = pl.figure(3)
fig.clf()
ax = fig.gca()
ax.errorbar(all_res, mean, [dev_lower, dev_upper], c='k')
ax.set_yscale('log')
ax.set_xlim([0, max(all_res) + 10])
#ax.set_xlabel("Sampling period (seconds)", fontsize=font_size_)
ax.set_ylabel("Learned proper length (m)", fontsize=font_size_)
ax.set_xticks(all_res)
fig.set_size_inches(figures_width, 5)
# Size adjutments
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
# There is a bug somewhere in mpl, need to use png
logging.info("proper_length")
build.save_figure(fig, "figures-pif/proper_length")
#fig.savefig("%s/proper_length.png" % (saving_dir()), dpi=300)
fig = pl.figure(3)
fig.clf()
ax = fig.gca()
std_dev_distr = []
mean = []
dev_upper = []
dev_lower = []
for res in all_res:
data = [1 / math.sqrt(v['most_likely_simple'][-1]) for v in learned_parameters[res]]
data.sort()
print data
std_dev_distr.append(data)
mean.append(np.mean(data))
dev_lower.append(-data[int(len(data) * 0.0)] + np.mean(data))
dev_upper.append(data[int(len(data) * 0.99)] - np.mean(data))
ax.errorbar(all_res, mean, [dev_lower, dev_upper], c='k')
mean = []
dev_upper = []
dev_lower = []
for res in all_res:
data = [1 / math.sqrt(v['em_simple'][-1]) for v in learned_parameters[res]]
data.sort()
print data
mean.append(np.mean(data))
dev_lower.append(-data[int(len(data) * 0.0)] + np.mean(data))
dev_upper.append(data[int(len(data) * 0.99)] - np.mean(data))
ax.errorbar(all_res, mean, [dev_lower, dev_upper], c='r')
font_size_ = int(.8 * font_size)
ax.set_xlim([0, max(all_res) + 10])
#ax.set_xlabel("Sampling period (seconds)", fontsize=font_size)
ax.set_ylabel("Learned standard deviation (m)", fontsize=font_size_)
ax.set_xticks(all_res)
ax.set_ylim([0, 9])
ax.yaxis.set_major_locator(MaxNLocator(5))
# Size adjutments
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
#fig.show()
fig.set_size_inches(figures_width, 5)
logging.info("proper_std_dev")
build.save_figure(fig, "figures-pif/proper_std_dev")
#fig.savefig("%s/proper_std_dev.pdf" % (saving_dir()))
fig = pl.figure(3)
fig.clf()
ax = fig.gca()
left_turn_distr = []
mean = []
dev_upper = []
dev_lower = []
for res in all_res:
data = [v['most_likely_fancy'][3] for v in learned_parameters[res]]
data.sort()
left_turn_distr.append(data)
mean.append(np.mean(data))
dev_lower.append(-data[int(len(data) * 0.0)] + np.mean(data))
dev_upper.append(data[int(len(data) * 0.99)] - np.mean(data))
ax.errorbar(all_res, mean, [dev_lower, dev_upper], c='b', label="Right")
right_turn_distr = []
mean = []
dev_upper = []
dev_lower = []
for res in all_res:
data = [v['most_likely_fancy'][4] for v in learned_parameters[res]]
data.sort()
right_turn_distr.append(data)
mean.append(np.mean(data))
dev_lower.append(-data[int(len(data) * 0.0)] + np.mean(data))
dev_upper.append(data[int(len(data) * 0.99)] - np.mean(data))
ax.errorbar(all_res, mean, [dev_lower, dev_upper], c='g', label="Left")
ax.plot(all_res, [0 for _ in all_res], c='k')
ax.set_xlim([2, max(all_res) + 10])
#ax.set_xlabel("Sampling period (seconds)")
ax.set_ylabel("Weight", fontsize=font_size)
ax.set_xticks(all_res)
ax.set_ylim([-1, 1])
# Matplotlib 1.2.0 has a bug in the font system, so that the minus sign is incorrectly rendered.
# Manually creating the ticks as a workaround.
yts = ax.get_yticks()
ax.set_yticklabels([str(yt) for yt in yts])
leg = ax.legend()
# Size adjutments
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
#leg = ax.legend(ncol=2, loc=1)
ltext = leg.get_texts()
plt.setp(ltext, fontsize=font_size)
#fig.show()
fig.set_size_inches(figures_width, 5)
logging.info("left_right")
build.save_figure(fig, "figures-pif/left_right")
#fig.savefig("%s/left_right.pdf" % (saving_dir()))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.