gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
import getopt
import os, glob, shutil, logging
import pexpect as p
import time
from pyraf import iraf
from pyraf import iraffunctions
import astropy.io.fits
from nifsUtils import datefmt, writeList, listit
def start(obsDirList, use_pq_offsets, im3dtran, over=""):
"""MERGE
This module contains all the functions needed to merge
the final data cubes.
NOTE: If you wish to shift the cubes manually in QFits View
you can combine them in this script by making sure that you
attach the prefix "shif" to each shifted image and save them
in the observation directory (ie. obs108). This is necessary
for very faint objects.
INPUT:
- Reference data cubes
- A list of paths where final data cubes are located
- Transformed integral field spectra
OUTPUT:
- Merged cubes for each observation (ie. DATE_obs##(#).fits)
- One final merged cube from entire observation program
"""
# Store the current working directory so we can find our way back later on.
path = os.getcwd()
iraf.gemini()
iraf.nifs()
iraf.gnirs()
iraf.gemtools()
# Unlearn the used tasks.
iraf.unlearn(iraf.gemini,iraf.gemtools,iraf.gnirs,iraf.nifs)
# Prepare the package for NIFS
iraf.nsheaders("nifs",logfile="Nifty.log")
iraf.set(stdimage='imt2048')
user_clobber=iraf.envget("clobber")
iraf.reset(clobber='yes')
# Set the default logfile for iraf tasks.
# TODO: Set the logfile for all iraf tasks! Right now it is not logging their output because of im3dtran...
# It seems im3dtran doesn't have a "log" parameter.
log = "Nifty.log"
# Change to the directory in iraf.
iraffunctions.chdir(path)
# Create some lists here.
listsOfCubes = [] # List of lists of cubes (one list for each science observation directory).
mergedCubes = [] # List of Merged cubes (one merged cube for each science observation directory).
obsidlist = [] # List of science observation id s.
# Pixel scale in arcseconds/pixel.
pixScale = 0.05
# TODO(ncomeau[*AT*]uvic.ca): implement a way to read and save cubelists to textfiles. It would be nice for users to
# be able to edit the list of cubes to merge by hand.
# If no Merged directory exists that contains a textfile list of cubes:
# Go to each science directory and copy cubes from there to a new directory called Merged.
for obsDir in obsDirList:
# Get date, obsid and obsPath by splitting each science directory name.
# Eg: directory name is ""/Users/ncomeau/research/newer-nifty/hd165459/20160705/H/obs13", then:
# temp1 == ('/Users/ncomeau/research/newer-nifty/hd165459/20160705/H', 'obs13')
# temp2 == ('/Users/ncomeau/research/newer-nifty/hd165459/20160705', 'H')
# temp3 == ('/Users/ncomeau/research/newer-nifty/hd165459', '20160705')
# temp4 == ('/Users/ncomeau/research/newer-nifty', 'hd165459')
# TODO: make this clearer.
temp1 = os.path.split(obsDir)
temp2 = os.path.split(temp1[0])
temp3 = os.path.split(temp2[0])
temp4 = os.path.split(temp3[0])
objname = temp4[1]
date = temp3[1]
obsid = temp1[1]
obsPath = temp3[0]
os.chdir(obsDir)
obsidlist.append(date+'_'+obsid)
# Create a directory called Merged and copy all the data cubes to this directory.
if not os.path.exists(obsPath+'/Merged/'):
os.mkdir(obsPath+'/Merged/')
logging.info('I am creating a directory called Merged')
Merged = obsPath+'/Merged'
if not os.path.exists(Merged+'/'+date+'_'+obsid):
os.mkdir(Merged+'/'+date+'_'+obsid)
logging.info('I am creating a directory with date and abs ID inside Merged ')
# If a list called shiftedcubes already exists then just merge those shifted cubes and continue.
if glob.glob("./shift*.fits"):
if over:
if os.path.exists('./'+obsid+'_merged.fits'):
os.remove('./'+obsid+'_merged.fits')
iraf.gemcube(input="shif*.fits[SCI]", output=obsid+'_merged', logfile = log)
elif not os.path.exists('./'+obsid+'_merged.fits'):
iraf.gemcube(input="shif*.fits[SCI]", output=obsid+'_merged', logfile = log)
else:
logging.info("Output exists and -over- not set - shifted cubes are not being merged")
shutil.copy('./'+obsid+'_merged.fits', Merged)
if obsDir==obsDirList[-1]:
return
else:
continue
# Create a list called cubes, which stores all the cubes from a particular night.
# Store all the cubes lists in a list of lists called listsOfCubes.
# TODO: syntax is fairly ugly; there may be a better way to do this.
cubes = glob.glob('catfbrsnN*.fits') # Cubes order at this point is arbitrary so we need to sort.
cubes.sort(key=lambda x: x[-8:-5]) # Sort cubes in increasing order by last three digits.
if cubes:
listsOfCubes.append(cubes)
else:
cubes = glob.glob('cptfbrsnN*.fits')
if cubes:
cubes.sort(key=lambda x: x[-8:-5]) # Sort cubes in increasing order by last three digits.
listsOfCubes.append(cubes)
else:
cubes = glob.glob('ctfbrsnN*.fits')
if cubes:
cubes.sort(key=lambda x: x[-8:-5]) # Sort cubes in increasing order by last three digits.
listsOfCubes.append(cubes)
else:
logging.info("\n+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
logging.info("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
logging.info("")
logging.info(" ERROR in merge: no cubes found!")
logging.info("")
logging.info("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
logging.info("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n")
raise SystemExit
# Copy cubes to their respective data_obsid directory within Merged.
for cube in cubes:
shutil.copy(cube, Merged+'/'+date+'_'+obsid)
os.chdir(Merged)
n=0
for cubes in listsOfCubes:
shiftlist = []
os.chdir(Merged+'/'+obsidlist[n])
iraffunctions.chdir(Merged+'/'+obsidlist[n])
if use_pq_offsets:
# Set the zero point p and q offsets to the p and q offsets of the first cube in each list of cubes.
header = astropy.io.fits.open(cubes[0])
p0 = header[0].header['POFFSET']
q0 = header[0].header['QOFFSET']
foff = open('offsets.txt', 'w')
foff.write('%d %d %d\n' % (0, 0, 0))
foff.close()
suffix = cubes[0][-8:-5]
if im3dtran:
if os.path.exists('transcube'+suffix+'.fits'):
if not over:
logging.info('Output already exists and -over- not set - skipping im3dtran')
if over:
os.remove('transcube'+suffix+'.fits')
iraf.im3dtran(input = cubes[0]+'[SCI][*,*,-*]', new_x=1, new_y=3, new_z=2, output = 'transcube'+suffix)
else:
iraf.im3dtran(input = cubes[0]+'[SCI][*,*,-*]', new_x=1, new_y=3, new_z=2, output = 'transcube'+suffix)
else:
iraf.imcopy(cubes[0]+'[SCI][*,*,*]', 'NONtranscube'+suffix+'.fits')
shiftlist.append('cube'+suffix+'.fits')
iraffunctions.chdir(os.getcwd())
for i in range(len(cubes)):
# Skip the first cube!
if i == 0:
continue
header2 = astropy.io.fits.open(cubes[i])
suffix = cubes[i][-8:-5]
# If user wants to merge using p and q offsets, grab those from .fits headers.
if use_pq_offsets:
# find the p and q offsets of the other cubes in the sequence.
xoff = header2[0].header['POFFSET']
yoff = header2[0].header['QOFFSET']
# calculate the difference between the zero point offsets and the offsets of the other cubes and convert that to pixels
xShift = round((xoff - p0)/pixScale)
yShift = round((yoff - q0)/pixScale)
# write all offsets to a text file (keep in mind that the x and y offsets use different pixel scales)
foff = open('offsets.txt', 'a')
if im3dtran:
# If we swap the y and lambda axis we must also write the offsets in x, lambda, y.
foff.write('%d %d %d\n' % (int(xShift), 0, int(yShift)))
else:
# Write offsets in regular x, y, lambda.
foff.write('%d\t%d\t%d\n' % (xShift, yShift, 0.))
foff.close()
if im3dtran:
prefix = 'transcube'
if os.path.exists('transcube'+suffix+'.fits'):
if not over:
logging.info('Output already exists and -over- not set - skipping im3dtran')
if over:
os.remove('transcube'+suffix+'.fits')
iraf.im3dtran(input = cubes[i]+'[SCI][*,*,-*]', new_x=1, new_y=3, new_z=2, output = 'transcube'+suffix)
else:
iraf.im3dtran(input = cubes[i]+'[SCI][*,*,-*]', new_x=1, new_y=3, new_z=2, output = 'transcube'+suffix)
else:
prefix = 'NONtranscube'
iraf.imcopy(cubes[i]+'[SCI][*,*,*]', prefix+suffix+'.fits')
shiftlist.append('cube'+suffix+'.fits')
if not use_pq_offsets:
# Before we combine make sure a suitable offsets.txt file exists.
a = raw_input("\nPaused. Please provide a suitable offsets.txt file in ", Merged+'/'+obsidlist[n])
while not os.path.exists('offsets.txt'):
a = raw_input("No offsets.txt file found. Please try again.")
logging.info('offsets.txt found successfully for', obsidlist[n])
if os.path.exists('cube_merged.fits'):
if over:
os.remove('cube_merged.fits')
iraf.imcombine(prefix+'*', output = 'cube_merged.fits', combine = 'median', offsets = 'offsets.txt')
else:
logging.info('Output already exists and -over- not set - skipping imcombine')
else:
iraf.imcombine(prefix+'*', output = 'cube_merged.fits', combine = 'median', offsets = 'offsets.txt')
if im3dtran:
# Transpose the cube back to x, y, lambda.
if os.path.exists('out.fits'):
if over:
os.remove('out.fits')
iraf.im3dtran(input='cube_merged[*,-*,*]', new_x=1, new_y=3, new_z=2, output = 'out.fits')
else:
logging.info('Output already exists and -over- not set - skipping final im3dtran')
else:
iraf.im3dtran(input='cube_merged[*,-*,*]', new_x=1, new_y=3, new_z=2, output = 'out.fits')
iraf.fxcopy(input=cubes[0]+'[0], out.fits', output = obsidlist[n]+'_merged.fits')
else:
iraf.fxcopy(input=cubes[0]+'[0], cube_merged.fits', output = obsidlist[n]+'_merged.fits')
mergedCubes.append(obsidlist[n]+'_merged.fits')
n+=1
os.chdir(Merged)
# Copy the merged observation sequence data cubes to the Merged directory.
for i in range(len(mergedCubes)):
shutil.copy(Merged+'/'+obsidlist[i]+'/'+mergedCubes[i], './')
# Merge all the individual merged observation sequence data cubes.
# TODO: test. Still untested.
"""
if len(mergedCubes)>1:
os.chdir(Merged)
iraffunctions.chdir(Merged)
gratlist = []
for i in range(len(mergedCubes)):
cubeheader = astropy.io.fits.open(mergedCubes[i])
grat = cubeheader[0].header['GRATING']
gratlist.append(grat)
print "gratlist is: ", gratlist
for n in range(len(gratlist)): # For each unique grating
# Grab the indices of the cubes associated with that grating.
indices = [k for k, x in enumerate(gratlist) if x==gratlist[n]]
print n
print "indices are: ", indices
newcubelist = []
for ind in indices:
newcubelist.append(mergedCubes[ind])
waveshift(newcubelist, grat)
print newcubelist
for i in range(len(newcubelist)):
# Build an input string containing all the cubes to combine.
if i==0:
inputstring = newcubelist[i]+'[1]'
else:
inputstring += ','+newcubelist[i]+'[1]'
print "input string is ", inputstring
if os.path.exists('temp_merged'+gratlist[n][0]+'.fits'):
if over:
iraf.delete('temp_merged'+gratlist[n][0]+'.fits')
iraf.imcombine(inputstring, output = 'temp_merged'+gratlist[n][0]+'.fits', combine = 'median', offsets = 'waveoffsets'+grat[0]+'.txt')
iraf.fxcopy(input=newcubelist[0]+'[0], temp_merged'+gratlist[n][0]+'.fits', output = 'TOTAL_merged'+gratlist[0][0]+'.fits')
else:
logging.info('Output exists and -over- not set - skipping final cube merge')
else:
iraf.imcombine(inputstring, output = 'temp_merged'+gratlist[n][0]+'.fits', combine = 'median', offsets = 'waveoffsets'+grat[0]+'.txt')
iraf.fxcopy(input=newcubelist[0]+'[0], temp_merged'+gratlist[n][0]+'.fits', output = 'TOTAL_merged'+gratlist[n][0]+'.fits')
"""
#####################################################################################
# FUNCTIONS #
#####################################################################################
def waveshift(cubelist, grat):
cubeheader0 = astropy.io.fits.open(cubelist[0])
wstart0 = cubeheader0[1].header['CRVAL3']
fwave = open('waveoffsets{0}.txt'.format(grat[0]), 'w')
fwave.write('%d %d %d\n' % (0, 0, 0))
for i in range(len(cubelist)):
cubeheader = astropy.io.fits.open(cubelist[i])
wstart = cubeheader[1].header['CRVAL3']
wdelt = cubeheader[1].header['CD3_3']
waveoff = int(round((wstart0-wstart)/wdelt))
fwave.write('%d %d %d\n' % (waveoff, 0, 0))
fwave.close()
#---------------------------------------------------------------------------------------------------------------------------------------#
if __name__ == '__main__':
pass
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from mock import call
from oslo.config import cfg
from neutron.agent.common import config as a_cfg
from neutron.agent.linux.iptables_firewall import IptablesFirewallDriver
from neutron.tests import base
from neutron.tests.unit import test_api_v2
_uuid = test_api_v2._uuid
FAKE_PREFIX = {'IPv4': '10.0.0.0/24',
'IPv6': 'fe80::/48'}
FAKE_IP = {'IPv4': '10.0.0.1',
'IPv6': 'fe80::1'}
class IptablesFirewallTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesFirewallTestCase, self).setUp()
cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT')
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.addCleanup(self.utils_exec_p.stop)
self.iptables_cls_p = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
iptables_cls = self.iptables_cls_p.start()
self.addCleanup(self.iptables_cls_p.stop)
self.iptables_inst = mock.Mock()
self.v4filter_inst = mock.Mock()
self.v6filter_inst = mock.Mock()
self.iptables_inst.ipv4 = {'filter': self.v4filter_inst}
self.iptables_inst.ipv6 = {'filter': self.v6filter_inst}
iptables_cls.return_value = self.iptables_inst
self.firewall = IptablesFirewallDriver()
self.firewall.iptables = self.iptables_inst
def _fake_port(self):
return {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff',
'fixed_ips': [FAKE_IP['IPv4'],
FAKE_IP['IPv6']]}
def test_prepare_port_filter_with_no_sg(self):
port = self._fake_port()
self.firewall.prepare_port_filter(port)
calls = [call.add_chain('sg-fallback'),
call.add_rule('sg-fallback', '-j DROP'),
call.ensure_remove_chain('sg-chain'),
call.add_chain('sg-chain'),
call.add_chain('ifake_dev'),
call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain'),
call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev'),
call.add_rule(
'ifake_dev', '-m state --state INVALID -j DROP'),
call.add_rule('ifake_dev', '-j $sfake_dev'),
call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN'),
call.add_rule('ifake_dev', '-j $sg-fallback'),
call.add_chain('ofake_dev'),
call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain'),
call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $ofake_dev'),
call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $ofake_dev'),
call.add_chain('sfake_dev'),
call.add_rule(
'sfake_dev', '-m mac --mac-source ff:ff:ff:ff '
'-s 10.0.0.1 -j RETURN'),
call.add_rule('sfake_dev', '-d 10.0.0.1 -j RETURN'),
call.add_rule('sfake_dev', '-j DROP'),
call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN'),
call.add_rule('ofake_dev', '-j $sfake_dev'),
call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP'),
call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP'),
call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN'),
call.add_rule('ofake_dev', '-j $sg-fallback'),
call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
def test_filter_ipv4_ingress(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress'}
ingress = call.add_rule('ifake_dev', '-j RETURN')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = call.add_rule('ifake_dev', '-s %s -j RETURN' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = call.add_rule('ifake_dev', '-p tcp -m tcp -j RETURN')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = call.add_rule('ifake_dev',
'-s %s -p tcp -m tcp -j RETURN' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = call.add_rule('ifake_dev', '-p icmp -j RETURN')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = call.add_rule(
'ifake_dev', '-s %s -p icmp -j RETURN' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = call.add_rule('ifake_dev',
'-p tcp -m tcp --dport 10 -j RETURN')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = call.add_rule(
'ifake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp'}
ingress = call.add_rule('ifake_dev', '-p udp -m udp -j RETURN')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = call.add_rule('ifake_dev',
'-s %s -p udp -m udp -j RETURN' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = call.add_rule('ifake_dev',
'-p udp -m udp --dport 10 -j RETURN')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = call.add_rule(
'ifake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = call.add_rule(
'ifake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress'}
egress = call.add_rule('ofake_dev', '-j RETURN')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'source_ip_prefix': prefix}
egress = call.add_rule('ofake_dev', '-s %s -j RETURN' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp'}
egress = call.add_rule('ofake_dev', '-p tcp -m tcp -j RETURN')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
egress = call.add_rule('ofake_dev',
'-s %s -p tcp -m tcp -j RETURN' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp'}
egress = call.add_rule('ofake_dev', '-p icmp -j RETURN')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
egress = call.add_rule(
'ofake_dev', '-s %s -p icmp -j RETURN' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = call.add_rule('ofake_dev',
'-p tcp -m tcp --dport 10 -j RETURN')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = call.add_rule(
'ofake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = call.add_rule(
'ofake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp'}
egress = call.add_rule('ofake_dev', '-p udp -m udp -j RETURN')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'source_ip_prefix': prefix}
egress = call.add_rule('ofake_dev',
'-s %s -p udp -m udp -j RETURN' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = call.add_rule('ofake_dev',
'-p udp -m udp --dport 10 -j RETURN')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = call.add_rule(
'ofake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = call.add_rule(
'ofake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress'}
ingress = call.add_rule('ifake_dev', '-j RETURN')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = call.add_rule('ifake_dev', '-s %s -j RETURN' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = call.add_rule('ifake_dev', '-p tcp -m tcp -j RETURN')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = call.add_rule('ifake_dev',
'-s %s -p tcp -m tcp -j RETURN' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = call.add_rule('ifake_dev',
'-p tcp -m tcp --dport 10 -j RETURN')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = call.add_rule('ifake_dev', '-p icmpv6 -j RETURN')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = call.add_rule(
'ifake_dev', '-s %s -p icmpv6 -j RETURN' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = call.add_rule(
'ifake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp'}
ingress = call.add_rule('ifake_dev', '-p udp -m udp -j RETURN')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = call.add_rule('ifake_dev',
'-s %s -p udp -m udp -j RETURN' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = call.add_rule('ifake_dev',
'-p udp -m udp --dport 10 -j RETURN')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = call.add_rule(
'ifake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = call.add_rule(
'ifake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress'}
egress = call.add_rule('ofake_dev', '-j RETURN')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'source_ip_prefix': prefix}
egress = call.add_rule('ofake_dev', '-s %s -j RETURN' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp'}
egress = call.add_rule('ofake_dev', '-p tcp -m tcp -j RETURN')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
egress = call.add_rule('ofake_dev',
'-s %s -p tcp -m tcp -j RETURN' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp'}
egress = call.add_rule('ofake_dev', '-p icmpv6 -j RETURN')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
egress = call.add_rule(
'ofake_dev', '-s %s -p icmpv6 -j RETURN' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = call.add_rule('ofake_dev',
'-p tcp -m tcp --dport 10 -j RETURN')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = call.add_rule(
'ofake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = call.add_rule(
'ofake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp'}
egress = call.add_rule('ofake_dev', '-p udp -m udp -j RETURN')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'source_ip_prefix': prefix}
egress = call.add_rule('ofake_dev',
'-s %s -p udp -m udp -j RETURN' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = call.add_rule('ofake_dev',
'-p udp -m udp --dport 10 -j RETURN')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = call.add_rule(
'ofake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = call.add_rule(
'ofake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def _test_prepare_port_filter(self,
rule,
ingress_expected_call=None,
egress_expected_call=None):
port = self._fake_port()
ethertype = rule['ethertype']
prefix = FAKE_IP[ethertype]
filter_inst = self.v4filter_inst
dhcp_rule = call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN')
if ethertype == 'IPv6':
filter_inst = self.v6filter_inst
dhcp_rule = call.add_rule('ofake_dev', '-p icmpv6 -j RETURN')
sg = [rule]
port['security_group_rules'] = sg
self.firewall.prepare_port_filter(port)
calls = [call.add_chain('sg-fallback'),
call.add_rule('sg-fallback', '-j DROP'),
call.ensure_remove_chain('sg-chain'),
call.add_chain('sg-chain'),
call.add_chain('ifake_dev'),
call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain'),
call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev'),
]
if ethertype == 'IPv6':
for icmp6_type in constants.ICMPV6_ALLOWED_TYPES:
calls.append(
call.add_rule('ifake_dev',
'-p icmpv6 --icmpv6-type %s -j RETURN' %
icmp6_type))
if ethertype == 'IPv4':
calls += [call.add_rule('ifake_dev',
'-m state --state INVALID -j DROP'),
call.add_rule('ifake_dev',
'-j $sfake_dev'),
call.add_rule('ifake_dev',
'-m state --state RELATED,ESTABLISHED '
'-j RETURN')]
else:
calls += [call.add_rule('ifake_dev',
'-m state --state INVALID -j DROP'),
call.add_rule('ifake_dev',
'-m state --state RELATED,ESTABLISHED '
'-j RETURN')]
if ingress_expected_call:
calls.append(ingress_expected_call)
calls += [call.add_rule('ifake_dev', '-j $sg-fallback'),
call.add_chain('ofake_dev'),
call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain'),
call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $ofake_dev'),
call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $ofake_dev'),
call.add_chain('sfake_dev'),
call.add_rule(
'sfake_dev',
'-m mac --mac-source ff:ff:ff:ff -s %s -j RETURN'
% prefix),
call.add_rule(
'sfake_dev',
'-d %s -j RETURN' % prefix),
call.add_rule('sfake_dev', '-j DROP'),
dhcp_rule,
call.add_rule('ofake_dev', '-j $sfake_dev')]
if ethertype == 'IPv4':
calls.append(call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP'))
calls += [call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP'),
call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN')]
if egress_expected_call:
calls.append(egress_expected_call)
calls += [call.add_rule('ofake_dev', '-j $sg-fallback'),
call.add_rule('sg-chain', '-j ACCEPT')]
filter_inst.assert_has_calls(calls)
def test_update_delete_port_filter(self):
port = self._fake_port()
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'ingress'}]
self.firewall.prepare_port_filter(port)
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'egress'}]
self.firewall.update_port_filter(port)
self.firewall.update_port_filter({'device': 'no-exist-device'})
self.firewall.remove_port_filter(port)
self.firewall.remove_port_filter({'device': 'no-exist-device'})
calls = [call.add_chain('sg-fallback'),
call.add_rule('sg-fallback', '-j DROP'),
call.ensure_remove_chain('sg-chain'),
call.add_chain('sg-chain'),
call.add_chain('ifake_dev'),
call.add_rule(
'FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain'),
call.add_rule(
'sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev'),
call.add_rule(
'ifake_dev', '-m state --state INVALID -j DROP'),
call.add_rule(
'ifake_dev', '-j $sfake_dev'),
call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN'),
call.add_rule('ifake_dev', '-j RETURN'),
call.add_rule('ifake_dev', '-j $sg-fallback'),
call.add_chain('ofake_dev'),
call.add_rule(
'FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain'),
call.add_rule(
'sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev'),
call.add_rule(
'INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev'),
call.add_chain('sfake_dev'),
call.add_rule(
'sfake_dev',
'-m mac --mac-source ff:ff:ff:ff -s 10.0.0.1 '
'-j RETURN'),
call.add_rule(
'sfake_dev',
'-d 10.0.0.1 -j RETURN'),
call.add_rule('sfake_dev', '-j DROP'),
call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN'),
call.add_rule('ofake_dev', '-j $sfake_dev'),
call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP'),
call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP'),
call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN'),
call.add_rule('ofake_dev', '-j $sg-fallback'),
call.add_rule('sg-chain', '-j ACCEPT'),
call.ensure_remove_chain('ifake_dev'),
call.ensure_remove_chain('ofake_dev'),
call.ensure_remove_chain('sfake_dev'),
call.ensure_remove_chain('sg-chain'),
call.add_chain('sg-chain'),
call.add_chain('ifake_dev'),
call.add_rule(
'FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain'),
call.add_rule(
'sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev'),
call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP'),
call.add_rule('ifake_dev', '-j $sfake_dev'),
call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN'),
call.add_rule('ifake_dev', '-j $sg-fallback'),
call.add_chain('ofake_dev'),
call.add_rule(
'FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain'),
call.add_rule(
'sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev'),
call.add_rule(
'INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev'),
call.add_chain('sfake_dev'),
call.add_rule(
'sfake_dev',
'-m mac --mac-source ff:ff:ff:ff:ff:ff -s 10.0.0.1 '
'-j RETURN'),
call.add_rule(
'sfake_dev',
'-d %s -j RETURN' % prefix),
call.add_rule('sfake_dev', '-j DROP'),
call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN'),
call.add_rule('ofake_dev', '-j $sfake_dev'),
call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP'),
call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP'),
call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN'),
call.add_rule('ofake_dev', '-j RETURN'),
call.add_rule('ofake_dev', '-j $sg-fallback'),
call.add_rule('sg-chain', '-j ACCEPT'),
call.ensure_remove_chain('ifake_dev'),
call.ensure_remove_chain('ofake_dev'),
call.ensure_remove_chain('sfake_dev'),
call.ensure_remove_chain('sg-chain'),
call.add_chain('sg-chain')]
self.v4filter_inst.assert_has_calls(calls)
def test_remove_unknown_port(self):
port = self._fake_port()
self.firewall.remove_port_filter(port)
# checking no exception occures
self.v4filter_inst.assert_has_calls([])
def test_defer_apply(self):
with self.firewall.defer_apply():
pass
self.iptables_inst.assert_has_calls([call.defer_apply_on(),
call.defer_apply_off()])
def test_filter_defer_with_exception(self):
try:
with self.firewall.defer_apply():
raise Exception("same exception")
except Exception:
pass
self.iptables_inst.assert_has_calls([call.defer_apply_on(),
call.defer_apply_off()])
def _mock_chain_applies(self):
class CopyingMock(mock.MagicMock):
"""Copies arguments so mutable arguments can be asserted on.
Copied verbatim from unittest.mock documentation.
"""
def __call__(self, *args, **kwargs):
args = copy.deepcopy(args)
kwargs = copy.deepcopy(kwargs)
return super(CopyingMock, self).__call__(*args, **kwargs)
# Need to use CopyingMock because _{setup,remove}_chains_apply are
# usually called with that's modified between calls (i.e.,
# self.firewall.filtered_ports).
chain_applies = CopyingMock()
self.firewall._setup_chains_apply = chain_applies.setup
self.firewall._remove_chains_apply = chain_applies.remove
return chain_applies
def test_mock_chain_applies(self):
chain_applies = self._mock_chain_applies()
port_prepare = {'device': 'd1', 'mac_address': 'prepare'}
port_update = {'device': 'd1', 'mac_address': 'update'}
self.firewall.prepare_port_filter(port_prepare)
self.firewall.update_port_filter(port_update)
self.firewall.remove_port_filter(port_update)
chain_applies.assert_has_calls([call.remove({}),
call.setup({'d1': port_prepare}),
call.remove({'d1': port_prepare}),
call.setup({'d1': port_update}),
call.remove({'d1': port_update}),
call.setup({})])
def test_defer_chain_apply_need_pre_defer_copy(self):
chain_applies = self._mock_chain_applies()
port = self._fake_port()
device2port = {port['device']: port}
self.firewall.prepare_port_filter(port)
with self.firewall.defer_apply():
self.firewall.remove_port_filter(port)
chain_applies.assert_has_calls([call.remove({}),
call.setup(device2port),
call.remove(device2port),
call.setup({})])
def test_defer_chain_apply_coalesce_simple(self):
chain_applies = self._mock_chain_applies()
port = self._fake_port()
with self.firewall.defer_apply():
self.firewall.prepare_port_filter(port)
self.firewall.update_port_filter(port)
self.firewall.remove_port_filter(port)
chain_applies.assert_has_calls([call.remove({}), call.setup({})])
def test_defer_chain_apply_coalesce_multiple_ports(self):
chain_applies = self._mock_chain_applies()
port1 = {'device': 'd1', 'mac_address': 'mac1'}
port2 = {'device': 'd2', 'mac_address': 'mac2'}
device2port = {'d1': port1, 'd2': port2}
with self.firewall.defer_apply():
self.firewall.prepare_port_filter(port1)
self.firewall.prepare_port_filter(port2)
chain_applies.assert_has_calls([call.remove({}),
call.setup(device2port)])
def test_ip_spoofing_filter_with_multiple_ips(self):
port = {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff',
'fixed_ips': ['10.0.0.1', 'fe80::1', '10.0.0.2']}
self.firewall.prepare_port_filter(port)
calls = [call.add_chain('sg-fallback'),
call.add_rule('sg-fallback', '-j DROP'),
call.ensure_remove_chain('sg-chain'),
call.add_chain('sg-chain'),
call.add_chain('ifake_dev'),
call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain'),
call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev'),
call.add_rule(
'ifake_dev', '-m state --state INVALID -j DROP'),
call.add_rule('ifake_dev', '-j $sfake_dev'),
call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN'),
call.add_rule('ifake_dev', '-j $sg-fallback'),
call.add_chain('ofake_dev'),
call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain'),
call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $ofake_dev'),
call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $ofake_dev'),
call.add_chain('sfake_dev'),
call.add_rule(
'sfake_dev',
'-m mac --mac-source ff:ff:ff:ff:ff:ff -s 10.0.0.1 '
'-j RETURN'),
call.add_rule('sfake_dev', '-d 10.0.0.1 -j RETURN'),
call.add_rule(
'sfake_dev',
'-m mac --mac-source ff:ff:ff:ff:ff:ff -s 10.0.0.2 '
'-j RETURN'),
call.add_rule(
'sfake_dev',
'-d 10.0.0.2 -j RETURN'),
call.add_rule('sfake_dev', '-j DROP'),
call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN'),
call.add_rule('ofake_dev', '-j $sfake_dev'),
call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP'),
call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP'),
call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN'),
call.add_rule('ofake_dev', '-j $sg-fallback'),
call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
def test_ip_spoofing_no_fixed_ips(self):
port = {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff',
'fixed_ips': []}
self.firewall.prepare_port_filter(port)
calls = [call.add_chain('sg-fallback'),
call.add_rule('sg-fallback', '-j DROP'),
call.ensure_remove_chain('sg-chain'),
call.add_chain('sg-chain'),
call.add_chain('ifake_dev'),
call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain'),
call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev'),
call.add_rule(
'ifake_dev', '-m state --state INVALID -j DROP'),
call.add_rule('ifake_dev', '-j $sfake_dev'),
call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN'),
call.add_rule('ifake_dev', '-j $sg-fallback'),
call.add_chain('ofake_dev'),
call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain'),
call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $ofake_dev'),
call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $ofake_dev'),
call.add_chain('sfake_dev'),
call.add_rule(
'sfake_dev',
'-m mac --mac-source ff:ff:ff:ff -j RETURN'),
call.add_rule('sfake_dev', '-j DROP'),
call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN'),
call.add_rule('ofake_dev', '-j $sfake_dev'),
call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP'),
call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP'),
call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN'),
call.add_rule('ofake_dev', '-j $sg-fallback'),
call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
| |
# -*- coding: utf-8 -*-
"""
File search utilities.
---
type:
python_module
validation_level:
v00_minimum
protection:
k00_public
copyright:
"Copyright 2016 High Integrity Artificial Intelligence Systems"
license:
"Licensed under the Apache License, Version 2.0 (the License);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."
...
"""
import os
import re
# -----------------------------------------------------------------------------
def find_files(root,
prefix = None,
suffix = None,
dirname = None,
direxcl = None):
"""
Find files.
"""
if direxcl is None:
direxcl = [r'^\..*$']
pathincl_regex = r'^.*'
if dirname is not None:
pathincl_regex += dirname + os.sep
if prefix is not None:
prefix = prefix.replace('.', r'\.')
pathincl_regex += prefix + r'.*'
if suffix is not None:
suffix = suffix.replace('.', r'\.')
pathincl_regex += suffix
pathincl_regex += r'$'
pathincl = [pathincl_regex]
return filtered_filepath_generator(
root = root,
direxcl = direxcl,
pathincl = pathincl,
pathexcl = None)
# -----------------------------------------------------------------------------
def filtered_dirpath_generator(root,
direxcl = None,
pathincl = None,
pathexcl = None):
"""
Return generator of dirpaths from root, filtered using regex lists.
"""
return _dirpath_from_os_walk_filter(
os_walk = _dirname_filtered_os_walk_gen(root, direxcl = direxcl),
pathincl = pathincl,
pathexcl = pathexcl)
# -----------------------------------------------------------------------------
def filtered_filepath_generator(root,
direxcl = None,
pathincl = None,
pathexcl = None):
"""
Return generator of filepaths from root, filtered using regex lists.
"""
return _filepath_from_os_walk_filter(
os_walk = _dirname_filtered_os_walk_gen(root, direxcl = direxcl),
pathincl = pathincl,
pathexcl = pathexcl)
# -----------------------------------------------------------------------------
def _dirpath_from_os_walk_filter(os_walk,
pathincl = None,
pathexcl = None):
"""
Return filter of dirpaths, adapted to take an iterable of os.walk tuples.
"""
return _filepath_regex_filter(
_adapt_os_walk_to_dirpath(os_walk), pathincl, pathexcl)
# -----------------------------------------------------------------------------
def _filepath_from_os_walk_filter(os_walk,
pathincl = None,
pathexcl = None):
"""
Return filter of filepaths, adapted to take an iterable of os.walk tuples.
"""
return _filepath_regex_filter(
_adapt_os_walk_to_filepath(os_walk), pathincl, pathexcl)
# -----------------------------------------------------------------------------
def _adapt_os_walk_to_dirpath(os_walk):
"""
Return adapter converting os.walk tuple iterable into dirpath iterable.
Intended to process the output of os_walk and
dirname_filter functions.
"""
for (current_path, dir_list, _) in os_walk:
for dir_name in dir_list:
yield os.path.join(current_path, dir_name)
# -----------------------------------------------------------------------------
def _adapt_os_walk_to_filepath(os_walk):
"""
Return adapter converting os.walk tuple iterable into filepath iterable.
Intended to process the output of os_walk and
dirname_filter functions.
"""
for (current_path, _, file_list) in os_walk:
for file_name in file_list:
yield os.path.join(current_path, file_name)
# -----------------------------------------------------------------------------
def _dirname_filtered_os_walk_gen(root,
direxcl = None,
onerror = None,
followlinks = False):
"""
Return generator of os.walk tuples, filtered using regex lists.
"""
return _dirname_regex_filter(
os_walk = os.walk(root,
topdown = True,
onerror = onerror,
followlinks = followlinks),
excl = direxcl)
# -----------------------------------------------------------------------------
def _dirname_regex_filter(os_walk,
excl = None):
"""
Filter tuples generated by os.walk. Recursion limited by directory name.
The supplied indicator function is used to
decide if a directory subtree should be
recursed into or not.
"""
dirname_indicator_func = _get_dual_regex_indicator_fcn(excl = excl)
return _dirname_filter(os_walk, dirname_indicator_func)
# -----------------------------------------------------------------------------
def _dirname_filter(os_walk, dirname_indicator_func):
"""
Filter tuples generated by os.walk. Recursion limited by directory name.
The supplied indicator function is used to
decide if a directory subtree should be
recursed into or not.
"""
for (current_path, subdir_list, file_list) in os_walk:
if not subdir_list:
subdir_list[:] = []
else:
subdir_list[:] = (
path for path in subdir_list if dirname_indicator_func(path))
yield (current_path, subdir_list, file_list)
# -----------------------------------------------------------------------------
def _filepath_regex_filter(iter_filepaths,
incl = None,
excl = None):
"""
Filter for filepaths, filtering specified using regex lists.
"""
filepath_indicator_func = _get_dual_regex_indicator_fcn(incl, excl)
return (path for path in iter_filepaths if filepath_indicator_func(path))
# -----------------------------------------------------------------------------
def _get_dual_regex_indicator_fcn(incl=None, excl=None):
"""
Indicator function for strings based on a pair of compiled regexes.
- Returns True if incl matches and excl does not.
- If incl is not specified or None, it always matches (always include).
- If excl is not specified or None, it never matches (never exclude).
"""
is_incl = _get_regex_indicator_fcn(incl, default = True)
is_excl = _get_regex_indicator_fcn(excl, default = False)
return lambda item: is_incl(item) and not is_excl(item)
# -----------------------------------------------------------------------------
def _get_regex_indicator_fcn(regex_list=None, default=False):
"""
Return an indicator function defined by the specified regular expression.
"""
if regex_list:
regex = _compile_regex_list(regex_list)
return lambda item: (regex.match(item) is not None)
else:
return lambda item: (default)
# -----------------------------------------------------------------------------
def _compile_regex_list(regex_list):
"""
Compile a list of regex strings into a regular expression object.
"""
combined = "(" + ")|(".join(regex_list) + ")"
compiled = re.compile(combined)
return compiled
# -----------------------------------------------------------------------------
def find_ancestor_dir_containing(dir_path, marker_name, allow_dir=True):
"""
Search for an ancestor directory of dir_path that contains a marker.
This function identifies the closest (deepest)
ancestor directory of dir_path that contains a
file or directory named as specified by the
marker_name parameter.
It works by visiting each ancestor directory
in dir_path in turn, starting at dir_path and
proceeding up towards the root of the file-
system hierarchy. At each step, it checks to
see if a file or directory with the specified
name exists. If it finds this marker, it returns
the containing directory. If not, it continues
towards the root. If, after reaching the root,
no marker has yet been found, a SearchError
exception is raised.
This function was created to help identify
the root of the local working copy, given a
path within it. It should also be possible
to use this function to help identify the
root of various other filesystem hierarchies,
given the emplacement of suitable marker files
or directories.
"""
for dir_path in _walk_towards_root_generator(os.path.realpath(dir_path)):
path_marker = os.path.join(dir_path, marker_name)
is_file = os.path.isfile(path_marker)
if allow_dir:
is_dir = os.path.isdir(path_marker)
is_marker = is_file or is_dir
else:
is_marker = is_file
if is_marker:
return dir_path
raise RuntimeError("Could not find marker {name}".format(
name = marker_name))
# -----------------------------------------------------------------------------
def _walk_towards_root_generator(dir_path):
"""
Iterate over ancestor directories from dir_path up to the filesystem root.
This function generates a sequence of directory
paths starting with dir_path and progressively
returning the parent directory of each until
the filesystem root directory is reached.
"""
prev_path = None
while dir_path != prev_path:
yield dir_path
prev_path = dir_path
dir_path = os.path.dirname(dir_path)
| |
from collections import Iterable
from numbers import Number
from types import FunctionType
import random
import numpy as np
from skimage.transform import warp, resize, AffineTransform
from skimage.exposure import equalize_hist
def chain_augmentations(*augmentations, augment_x=True, augment_y=False):
"""
Chain multiple augmentations.
Example:
.. code-block:: python
aug = chain_augmentations(NoiseAugmentation(),
WarpAugmentation())
Xa, ya = aug((X, y))
Args:
augmentations (Augmentation or functions):
Can be a Augmentation object or any callable object. The leftest
augmentations is applied first.
augment_x (bool): should augment data X
augment_y (bool): should augment label y
Returns:
A function that takes a minibatch as input and applies the augmentations.
The minibatch can either be a numpy array or tuple of (X, y)
where X are the data and y the labels.
"""
def get_transformation(aug, shape):
if issubclass(type(aug), Augmentation):
return aug.get_transformation(shape)
elif hasattr(aug, '__call__'):
return aug
else:
raise Exception("Must be a subclass of Augmentation or callabe. "
"But got {}".format(aug))
def wrapper(batch):
if type(batch) == tuple:
X, Y = batch
if len(X) != len(Y):
raise Exception("Got tuple but arrays have different size. "
"Got X= {} and y={}".format(X.shape, Y.shape))
Y_aug = []
elif type(batch) == np.ndarray:
X = batch
Y = None
Y_aug = None
X_aug = []
for i in range(len(X)):
x = X[i]
if Y is not None:
y = Y[i]
else:
y = None
for aug in augmentations:
transformation = get_transformation(aug, X[i].shape)
if augment_x:
x = transformation(x)
if augment_y and y is not None:
y = transformation(y)
X_aug.append(x)
if y is not None:
Y_aug.append(y)
X_aug = np.stack(X_aug)
if Y_aug is not None:
return X_aug, np.stack(Y_aug)
elif Y is not None:
return X_aug, Y
else:
return X_aug
return wrapper
class Augmentation:
"""
Augmentation super class. Subclasses must implement the ``get_transformation``
method.
"""
def get_transformation(self, shape):
"""
Returns a transformation. A transformation can be a function
or other callable object (``__call__``). It must map image
to the augmented image. See :py:meth:`.WarpAugmentation.transformation`
for an example.
"""
raise NotImplementedError()
def __call__(self, batch):
"""
Applies random augmentations to each sample in the batch.
The first axis of the batch must be the samples.
Args:
batch (np.ndarray): 4-dim data minibatch
"""
batch_transformed = []
for x in batch:
transformation = self.get_transformation(x.shape)
batch_transformed.append(transformation(x))
return np.stack(batch_transformed)
def _parse_parameter(param):
if isinstance(param, Iterable):
if len(param) != 2:
raise ValueError('lower and upper bound required')
lower, upper = param
return lambda: np.random.uniform(lower, upper)
elif isinstance(param, Number):
return lambda: param
elif isinstance(param, FunctionType):
return param
else:
raise TypeError('parameters must either be bounds for a uniform distribution,' +
'a single value or a value generating function')
class CropAugmentation(Augmentation):
def __init__(self, translation, crop_shape):
self.translation = _parse_parameter(translation)
self.crop_shape = crop_shape
def get_transformation(self, shape):
return CropTransformation([int(self.translation()), int(self.translation())],
self.crop_shape)
class CropTransformation:
def __init__(self, translation, crop_shape):
if type(translation) == int:
translation = (translation, translation)
if type(translation[0]) != int or type(translation[1]) != int:
raise Exception("Translation must be an integer! But got {}".format(translation))
self.translation = translation
self.crop_shape = crop_shape
def __call__(self, data):
if len(data.shape) <= 1:
raise Exception("Shape must be at least 2-dimensional. Got {}."
.format(data.shape))
crop_shp = self.crop_shape
if data.shape[-2:] != crop_shp:
h, w = data.shape[-2:]
assert h >= crop_shp[0] and w >= crop_shp[1]
hc = h // 2 + self.translation[0]
wc = w // 2 + self.translation[1]
hb = max(hc - crop_shp[0] // 2, 0)
he = hb + crop_shp[0]
wb = max(wc - crop_shp[1] // 2, 0)
we = wb + crop_shp[1]
return data[..., hb:he, wb:we]
else:
return data
class HistEqualization:
"""
Performs historgram equalization. See ``skimage.expose.equalize_hist``.
The returned data is scaled to ``[-1, 1]``.
"""
def __call__(self, data):
return 2*equalize_hist(data) - 1
class ChannelScaleShiftAugmentation(Augmentation):
def __init__(self, scale, shift, min=-1, max=1, per_channel=True):
"""
Augments a image by scaling and shifts its channels.
"""
self.scale = _parse_parameter(scale)
self.shift = _parse_parameter(shift)
self.min = min
self.max = max
self.per_channel = per_channel
def get_transformation(self, shape):
if len(shape) != 3:
raise Exception("Shape must be 3-dimensional. Got {}.".format(shape))
nb_channels = shape[0]
if self.per_channel:
shift = [self.shift() for _ in range(nb_channels)]
scale = [self.scale() for _ in range(nb_channels)]
else:
shift = [self.shift()] * nb_channels
scale = [self.scale()] * nb_channels
return ChannelScaleShiftTransformation(scale, shift, self.min, self.max)
class ChannelScaleShiftTransformation():
def __init__(self, scale, shift, min=-1, max=1):
self.scale = scale
self.shift = shift
self.min = min
self.max = max
def __call__(self, x):
return np.stack(
[np.clip(self.scale[i]*channel + self.shift[i], self.min, self.max)
for i, channel in enumerate(x)])
class WarpAugmentation(Augmentation):
"""
Perform random warping transformation on the input data.
Parameters can be either constant values, a list/tuple containing the lower and upper bounds
for a uniform distribution or a value generating function:
Examples:
* WarpAugmentation(rotation=0.5 * np.pi)
* WarpAugmentation(rotation=(-0.25 * np.pi, 0.25 * np.pi))
* WarpAugmentation(rotation=lambda: np.random.normal(0, np.pi))
Example Usage:
.. code-block:: python
aug = WarpAugmentation(rotation=(0.2 * np.pi))
# apply aug to each sample of the batch
batch_aug = aug(batch)
# get a transformation
trans = aug.get_transformation(batch.shape[1:])
# value of the rotation is available
rot = trans.rotation
# transform first sample in batch
x_aug = trans(batch[0])
Sensible starting values for parameter tuning:
* fliph_probability = 0.5
* flipv_probability = 0.5
* translation = (-5, 5)
* rotation = (-np.pi / 8, np.pi / 8)
* scale= (0.9, 1.1)
* shear = (-0.1 * np.pi, 0.1 * np.pi)
* diffeomorphism = [(8, .75)]
Args:
fliph_probability: probability of random flips on horizontal (first) axis
flipv_probability: probability of random flips on vertial (second) axis
translation: translation of image data among all axis
rotation: rotation angle in counter-clockwise direction as radians
scale: scale as proportion of input size
shear: shear angle in counter-clockwise direction as radians.
diffeomorphism: list of diffeomorphism parameters. Elements must
be of ``(scale, intensity)``.
diff_fix_border: fix_border parameter of diffeomorphism augmentation
fill_mode (default 'edge'): one of corresponse to numpy.pad mode
"""
def __init__(self,
fliph_probability=0.,
flipv_probability=0.,
translation=(0, 0),
rotation=(0, 0),
scale=(1, 1),
shear=(0, 0),
diffeomorphism=[],
diff_fix_border=False,
fill_mode='edge',
):
self.fliph_probability = _parse_parameter(fliph_probability)
self.flipv_probability = _parse_parameter(flipv_probability)
self.translation = _parse_parameter(translation)
self.rotation = _parse_parameter(rotation)
self.scale = _parse_parameter(scale)
self.shear = _parse_parameter(shear)
self.diffeomorphism = [(_parse_parameter(s), _parse_parameter(i))
for s, i in diffeomorphism]
self.diff_fix_border = _parse_parameter(diff_fix_border)
self.fill_mode = fill_mode
def get_transformation(self, shape):
scale = self.scale()
if type(self.scale()) in (float, int):
scale = (scale, scale)
diffeomorphism = [(diff_scale(), diff_intensity())
for diff_scale, diff_intensity in self.diffeomorphism]
return WarpTransformation(
bool(random.random() < self.flipv_probability()),
bool(random.random() < self.fliph_probability()),
(self.translation(), self.translation()),
self.rotation(),
scale, self.shear(),
diffeomorphism, self.diff_fix_border(),
self.fill_mode,
shape)
class WarpTransformation:
"""
Transformation produced by ::py::class:`.WarpAugmentation`.
You can access the values of the transformation. E.g.
WarpTransformation.translation will hold the translations of this transformation.
"""
def __init__(self, fliph, flipv, translation, rotation, scale, shear,
diffeomorphism, diff_fix_border, fill_mode, shape):
self.fliph = fliph
self.flipv = flipv
self.translation = translation
self.rotation = rotation
self.scale = scale
self.shear = shear
self.diffeomorphism = diffeomorphism
self.diff_fix_border = diff_fix_border
self.fill_mode = fill_mode
self.shape = shape[-2:]
self.affine_transformation = self._get_affine()
if self.diffeomorphism:
self.diffeomorphism_map = sum([
self._get_diffeomorphism_map(
self.shape, diff_scale, diff_intensity, self.diff_fix_border)
for diff_scale, diff_intensity in self.diffeomorphism])
else:
self.diffeomorphism_map = None
self.warp = self._warp_factory(self.diffeomorphism_map, self.affine_transformation,
self.flipv, self.fliph)
def __call__(self, img, order=3):
if img.ndim == 3:
img_warped = []
for channel in img:
img_warped.append(warp(channel, self.warp, order=order, mode=self.fill_mode))
return np.stack(img_warped)
elif img.ndim == 2:
return warp(img, self.warp, order=order, mode=self.fill_mode)
else:
raise Exception("Wrong number of dimensions. Expected 2 or 3. "
"Got {} with shape {}.".format(img.ndim, img.shape))
@staticmethod
def _center_transform(transform, shape):
center_transform = AffineTransform(translation=(-shape[0] // 2, -shape[1] // 2))
uncenter_transform = AffineTransform(translation=(shape[0] // 2, shape[1] // 2))
return center_transform + transform + uncenter_transform
@staticmethod
def _get_frame(shape, line_width, blur=8):
frame = np.zeros(shape)
frame[:, :line_width] = 1
frame[:, -line_width:] = 1
frame[:line_width, :] = 1
frame[-line_width:, :] = 1
return frame
def _get_affine(self):
t = AffineTransform(scale=self.scale,
rotation=self.rotation,
shear=self.shear,
translation=self.translation)
return self._center_transform(t, self.shape)
@staticmethod
def _get_diffeomorphism_map(shape, scale=30, intensity=1., fix_border=True,
random=np.random.uniform):
"""
Returns a diffeomorphism mapping that can be used with ``_warp_factory``.
Args:
shape (tuple): Shape of the image
scale: Scale of the diffeomorphism in pixels.
intensity (float): Intensity of the diffeomorphism. Must be between 0 and 1
fix_border (boolean): If true the border of the resulting image stay constant.
random: Function to draw the randomness. Will be called with
``random(-intensity, intensity, shape)``.
"""
h, w = shape
if h == min(h, w):
dh = int(scale)
dw = int(scale / h * w)
else:
dw = int(scale)
dh = int(scale / w * h)
rel_scale = scale / min(h, w)
intensity = 0.25 * intensity * 1 / rel_scale
diff_map = np.clip(random(-intensity, intensity, (dh, dw, 2)), -intensity, intensity)
if fix_border:
frame = WarpTransformation._get_frame((dh, dw), 1)
for i in (0, 1):
diff_map[:, :, i] = diff_map[:, :, i] * (1 - frame)
diff_map = resize(diff_map, (h, w, 2), order=3)
return diff_map
def _warp_factory(self, diff_map=None, transform=None, flipv=False, fliph=False):
def f(xy):
xi = xy[:, 0].astype(np.int)
yi = xy[:, 1].astype(np.int)
if diff_map is not None:
off = xy + diff_map[yi, xi]
else:
off = xy
if transform is not None:
off = transform(off)
if flipv:
off = off[::-1, :]
if fliph:
off = off[:, ::-1]
return off
return f
def random_std(loc, scale):
"""
Draws a random std from a gaussian distribution with mean ``loc`` and std ``scale``.
"""
def wrapper():
return np.clip(np.random.normal(loc=loc, scale=scale), np.finfo(float).eps, np.inf)
return wrapper
class NoiseAugmentation(Augmentation):
"""
Add gaussian noise with variable stds.
Args:
std (function): Returns variable std
"""
def __init__(self, std=random_std(0.03, 0.01)):
self.std = std
def get_transformation(self, shape):
return GaussNoiseTransformation(self.std(), shape)
class GaussNoiseTransformation:
def __init__(self, std, shape):
self.std = std
self.shape = shape
self.noise = np.random.normal(loc=0., scale=self.std, size=self.shape)
def __call__(self, arr):
return np.clip(arr + self.noise, -1, 1)
class LambdaAugmentation(Augmentation):
def __init__(self, func, **params):
self.func = func
self.params = {k: _parse_parameter(v) for k, v in params.items()}
def get_transformation(self, shape):
transformation_params = {k: param() for k, param in self.params.items()}
return LambdaTransformation(self.func, transformation_params)
class LambdaTransformation:
def __init__(self, func, params):
self.func = func
self.params = params
def __call__(self, arr):
return self.func(arr, **self.params)
| |
import html.entities
import re
import unicodedata
from gzip import GzipFile
from io import BytesIO
from django.utils.encoding import force_text
from django.utils.functional import (
SimpleLazyObject, keep_lazy, keep_lazy_text, lazy,
)
from django.utils.safestring import SafeText, mark_safe
from django.utils.translation import gettext as _, gettext_lazy, pgettext
@keep_lazy_text
def capfirst(x):
"""Capitalize the first letter of a string."""
return x and force_text(x)[0].upper() + force_text(x)[1:]
# Set up regular expressions
re_words = re.compile(r'<.*?>|((?:\w[-\w]*|&.*?;)+)', re.S)
re_chars = re.compile(r'<.*?>|(.)', re.S)
re_tag = re.compile(r'<(/)?([^ ]+?)(?:(\s*/)| .*?)?>', re.S)
re_newlines = re.compile(r'\r\n|\r') # Used in normalize_newlines
re_camel_case = re.compile(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
@keep_lazy_text
def wrap(text, width):
"""
A word-wrap function that preserves existing line breaks. Expects that
existing line breaks are posix newlines.
Preserve all white space except added line breaks consume the space on
which they break the line.
Don't wrap long words, thus the output text may have lines longer than
``width``.
"""
def _generator():
for line in text.splitlines(True): # True keeps trailing linebreaks
max_width = min((line.endswith('\n') and width + 1 or width), width)
while len(line) > max_width:
space = line[:max_width + 1].rfind(' ') + 1
if space == 0:
space = line.find(' ') + 1
if space == 0:
yield line
line = ''
break
yield '%s\n' % line[:space - 1]
line = line[space:]
max_width = min((line.endswith('\n') and width + 1 or width), width)
if line:
yield line
return ''.join(_generator())
class Truncator(SimpleLazyObject):
"""
An object used to truncate text, either by characters or words.
"""
def __init__(self, text):
super().__init__(lambda: force_text(text))
def add_truncation_text(self, text, truncate=None):
if truncate is None:
truncate = pgettext(
'String to return when truncating text',
'%(truncated_text)s...')
if '%(truncated_text)s' in truncate:
return truncate % {'truncated_text': text}
# The truncation text didn't contain the %(truncated_text)s string
# replacement argument so just append it to the text.
if text.endswith(truncate):
# But don't append the truncation text if the current text already
# ends in this.
return text
return '%s%s' % (text, truncate)
def chars(self, num, truncate=None, html=False):
"""
Return the text truncated to be no longer than the specified number
of characters.
`truncate` specifies what should be used to notify that the string has
been truncated, defaulting to a translatable string of an ellipsis
(...).
"""
self._setup()
length = int(num)
text = unicodedata.normalize('NFC', self._wrapped)
# Calculate the length to truncate to (max length - end_text length)
truncate_len = length
for char in self.add_truncation_text('', truncate):
if not unicodedata.combining(char):
truncate_len -= 1
if truncate_len == 0:
break
if html:
return self._truncate_html(length, truncate, text, truncate_len, False)
return self._text_chars(length, truncate, text, truncate_len)
def _text_chars(self, length, truncate, text, truncate_len):
"""Truncate a string after a certain number of chars."""
s_len = 0
end_index = None
for i, char in enumerate(text):
if unicodedata.combining(char):
# Don't consider combining characters
# as adding to the string length
continue
s_len += 1
if end_index is None and s_len > truncate_len:
end_index = i
if s_len > length:
# Return the truncated string
return self.add_truncation_text(text[:end_index or 0],
truncate)
# Return the original string since no truncation was necessary
return text
def words(self, num, truncate=None, html=False):
"""
Truncate a string after a certain number of words. `truncate` specifies
what should be used to notify that the string has been truncated,
defaulting to ellipsis (...).
"""
self._setup()
length = int(num)
if html:
return self._truncate_html(length, truncate, self._wrapped, length, True)
return self._text_words(length, truncate)
def _text_words(self, length, truncate):
"""
Truncate a string after a certain number of words.
Strip newlines in the string.
"""
words = self._wrapped.split()
if len(words) > length:
words = words[:length]
return self.add_truncation_text(' '.join(words), truncate)
return ' '.join(words)
def _truncate_html(self, length, truncate, text, truncate_len, words):
"""
Truncate HTML to a certain number of chars (not counting tags and
comments), or, if words is True, then to a certain number of words.
Close opened tags if they were correctly closed in the given HTML.
Preserve newlines in the HTML.
"""
if words and length <= 0:
return ''
html4_singlets = (
'br', 'col', 'link', 'base', 'img',
'param', 'area', 'hr', 'input'
)
# Count non-HTML chars/words and keep note of open tags
pos = 0
end_text_pos = 0
current_len = 0
open_tags = []
regex = re_words if words else re_chars
while current_len <= length:
m = regex.search(text, pos)
if not m:
# Checked through whole string
break
pos = m.end(0)
if m.group(1):
# It's an actual non-HTML word or char
current_len += 1
if current_len == truncate_len:
end_text_pos = pos
continue
# Check for tag
tag = re_tag.match(m.group(0))
if not tag or current_len >= truncate_len:
# Don't worry about non tags or tags after our truncate point
continue
closing_tag, tagname, self_closing = tag.groups()
# Element names are always case-insensitive
tagname = tagname.lower()
if self_closing or tagname in html4_singlets:
pass
elif closing_tag:
# Check for match in open tags list
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag,
# all unclosed intervening start tags with omitted end tags
open_tags = open_tags[i + 1:]
else:
# Add it to the start of the open tags list
open_tags.insert(0, tagname)
if current_len <= length:
return text
out = text[:end_text_pos]
truncate_text = self.add_truncation_text('', truncate)
if truncate_text:
out += truncate_text
# Close any tags still open
for tag in open_tags:
out += '</%s>' % tag
# Return string
return out
@keep_lazy_text
def get_valid_filename(s):
"""
Return the given string converted to a string that can be used for a clean
filename. Remove leading and trailing spaces; convert other spaces to
underscores; and remove anything that is not an alphanumeric, dash,
underscore, or dot.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
"""
s = force_text(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
@keep_lazy_text
def get_text_list(list_, last_word=gettext_lazy('or')):
"""
>>> get_text_list(['a', 'b', 'c', 'd'])
'a, b, c or d'
>>> get_text_list(['a', 'b', 'c'], 'and')
'a, b and c'
>>> get_text_list(['a', 'b'], 'and')
'a and b'
>>> get_text_list(['a'])
'a'
>>> get_text_list([])
''
"""
if len(list_) == 0:
return ''
if len(list_) == 1:
return force_text(list_[0])
return '%s %s %s' % (
# Translators: This string is used as a separator between list elements
_(', ').join(force_text(i) for i in list_[:-1]),
force_text(last_word), force_text(list_[-1]))
@keep_lazy_text
def normalize_newlines(text):
"""Normalize CRLF and CR newlines to just LF."""
text = force_text(text)
return re_newlines.sub('\n', text)
@keep_lazy_text
def phone2numeric(phone):
"""Convert a phone number with letters into its numeric equivalent."""
char2number = {
'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3', 'f': '3', 'g': '4',
'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5', 'm': '6', 'n': '6',
'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7', 't': '8', 'u': '8',
'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9',
}
return ''.join(char2number.get(c, c) for c in phone.lower())
# From http://www.xhaus.com/alan/python/httpcomp.html#gzip
# Used with permission.
def compress_string(s):
zbuf = BytesIO()
with GzipFile(mode='wb', compresslevel=6, fileobj=zbuf, mtime=0) as zfile:
zfile.write(s)
return zbuf.getvalue()
class StreamingBuffer:
def __init__(self):
self.vals = []
def write(self, val):
self.vals.append(val)
def read(self):
if not self.vals:
return b''
ret = b''.join(self.vals)
self.vals = []
return ret
def flush(self):
return
def close(self):
return
# Like compress_string, but for iterators of strings.
def compress_sequence(sequence):
buf = StreamingBuffer()
with GzipFile(mode='wb', compresslevel=6, fileobj=buf, mtime=0) as zfile:
# Output headers...
yield buf.read()
for item in sequence:
zfile.write(item)
data = buf.read()
if data:
yield data
yield buf.read()
# Expression to match some_token and some_token="with spaces" (and similarly
# for single-quoted strings).
smart_split_re = re.compile(r"""
((?:
[^\s'"]*
(?:
(?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
[^\s'"]*
)+
) | \S+)
""", re.VERBOSE)
def smart_split(text):
r"""
Generator that splits a string by spaces, leaving quoted phrases together.
Supports both single and double quotes, and supports escaping quotes with
backslashes. In the output, strings will keep their initial and trailing
quote marks and escaped quotes will remain escaped (the results can then
be further processed with unescape_string_literal()).
>>> list(smart_split(r'This is "a person\'s" test.'))
['This', 'is', '"a person\\\'s"', 'test.']
>>> list(smart_split(r"Another 'person\'s' test."))
['Another', "'person\\'s'", 'test.']
>>> list(smart_split(r'A "\"funky\" style" test.'))
['A', '"\\"funky\\" style"', 'test.']
"""
text = force_text(text)
for bit in smart_split_re.finditer(text):
yield bit.group(0)
def _replace_entity(match):
text = match.group(1)
if text[0] == '#':
text = text[1:]
try:
if text[0] in 'xX':
c = int(text[1:], 16)
else:
c = int(text)
return chr(c)
except ValueError:
return match.group(0)
else:
try:
return chr(html.entities.name2codepoint[text])
except (ValueError, KeyError):
return match.group(0)
_entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
@keep_lazy_text
def unescape_entities(text):
return _entity_re.sub(_replace_entity, force_text(text))
@keep_lazy_text
def unescape_string_literal(s):
r"""
Convert quoted string literals to unquoted strings with escaped quotes and
backslashes unquoted::
>>> unescape_string_literal('"abc"')
'abc'
>>> unescape_string_literal("'abc'")
'abc'
>>> unescape_string_literal('"a \"bc\""')
'a "bc"'
>>> unescape_string_literal("'\'ab\' c'")
"'ab' c"
"""
if s[0] not in "\"'" or s[-1] != s[0]:
raise ValueError("Not a string literal: %r" % s)
quote = s[0]
return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\')
@keep_lazy(str, SafeText)
def slugify(value, allow_unicode=False):
"""
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
"""
value = force_text(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
else:
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub(r'[-\s]+', '-', value))
def camel_case_to_spaces(value):
"""
Split CamelCase and convert to lower case. Strip surrounding whitespace.
"""
return re_camel_case.sub(r' \1', value).strip().lower()
def _format_lazy(format_string, *args, **kwargs):
"""
Apply str.format() on 'format_string' where format_string, args,
and/or kwargs might be lazy.
"""
return format_string.format(*args, **kwargs)
format_lazy = lazy(_format_lazy, str)
| |
# Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# based on
# https://github.com/openstack/nova/blob/master/nova/network/linux_net.py
"""Implements iptables rules using linux utilities."""
import contextlib
import os
import re
import sys
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from neutron.agent.common import config
from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import utils as linux_utils
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.i18n import _LE, _LW
LOG = logging.getLogger(__name__)
config.register_iptables_opts(cfg.CONF)
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
# add up to 12 characters to binary_name which is used as a prefix,
# so we limit it to 16 characters.
# (max_chain_name_length - len('-POSTROUTING') == 16)
def get_binary_name():
"""Grab the name of the binary we're running in."""
return os.path.basename(sys.argv[0])[:16].replace(' ', '_')
binary_name = get_binary_name()
# A length of a chain name must be less than or equal to 11 characters.
# <max length of iptables chain name> - (<binary_name> + '-') = 28-(16+1) = 11
MAX_CHAIN_LEN_WRAP = 11
MAX_CHAIN_LEN_NOWRAP = 28
# Number of iptables rules to print before and after a rule that causes a
# a failure during iptables-restore
IPTABLES_ERROR_LINES_OF_CONTEXT = 5
def comment_rule(rule, comment):
if not cfg.CONF.AGENT.comment_iptables_rules or not comment:
return rule
return '%s -m comment --comment "%s"' % (rule, comment)
def get_chain_name(chain_name, wrap=True):
if wrap:
return chain_name[:MAX_CHAIN_LEN_WRAP]
else:
return chain_name[:MAX_CHAIN_LEN_NOWRAP]
class IptablesRule(object):
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
IptablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False,
binary_name=binary_name, tag=None, comment=None):
self.chain = get_chain_name(chain, wrap)
self.rule = rule
self.wrap = wrap
self.top = top
self.wrap_name = binary_name[:16]
self.tag = tag
self.comment = comment
def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))
def __ne__(self, other):
return not self == other
def __str__(self):
if self.wrap:
chain = '%s-%s' % (self.wrap_name, self.chain)
else:
chain = self.chain
return comment_rule('-A %s %s' % (chain, self.rule), self.comment)
class IptablesTable(object):
"""An iptables table."""
def __init__(self, binary_name=binary_name):
self.rules = []
self.remove_rules = []
self.chains = set()
self.unwrapped_chains = set()
self.remove_chains = set()
self.wrap_name = binary_name[:16]
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
so if neutron-openvswitch-agent creates a chain named 'OUTPUT',
it'll actually end up being named 'neutron-openvswi-OUTPUT'.
"""
name = get_chain_name(name, wrap)
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)
def _select_chain_set(self, wrap):
if wrap:
return self.chains
else:
return self.unwrapped_chains
def remove_chain(self, name, wrap=True):
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
LOG.debug('Attempted to remove chain %s which does not exist',
name)
return
chain_set.remove(name)
if not wrap:
# non-wrapped chains and rules need to be dealt with specially,
# so we keep a list of them to be iterated over in apply()
self.remove_chains.add(name)
# first, add rules to remove that have a matching chain name
self.remove_rules += [r for r in self.rules if r.chain == name]
# next, remove rules from list that have a matching chain name
self.rules = [r for r in self.rules if r.chain != name]
if not wrap:
jump_snippet = '-j %s' % name
# next, add rules to remove that have a matching jump chain
self.remove_rules += [r for r in self.rules
if jump_snippet in r.rule]
else:
jump_snippet = '-j %s-%s' % (self.wrap_name, name)
# finally, remove rules from list that have a matching jump chain
self.rules = [r for r in self.rules
if jump_snippet not in r.rule]
def add_rule(self, chain, rule, wrap=True, top=False, tag=None,
comment=None):
"""Add a rule to the table.
This is just like what you'd feed to iptables, just without
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
chain = get_chain_name(chain, wrap)
if wrap and chain not in self.chains:
raise LookupError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.append(IptablesRule(chain, rule, wrap, top, self.wrap_name,
tag, comment))
def _wrap_target_chain(self, s, wrap):
if s.startswith('$'):
s = ('%s-%s' % (self.wrap_name, get_chain_name(s[1:], wrap)))
return s
def remove_rule(self, chain, rule, wrap=True, top=False, comment=None):
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
chain = get_chain_name(chain, wrap)
try:
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.remove(IptablesRule(chain, rule, wrap, top,
self.wrap_name,
comment=comment))
if not wrap:
self.remove_rules.append(IptablesRule(chain, rule, wrap, top,
self.wrap_name,
comment=comment))
except ValueError:
LOG.warn(_LW('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def _get_chain_rules(self, chain, wrap):
chain = get_chain_name(chain, wrap)
return [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chained_rules = self._get_chain_rules(chain, wrap)
for rule in chained_rules:
self.rules.remove(rule)
def clear_rules_by_tag(self, tag):
if not tag:
return
rules = [rule for rule in self.rules if rule.tag == tag]
for rule in rules:
self.rules.remove(rule)
class IptablesManager(object):
"""Wrapper for iptables.
See IptablesTable for some usage docs
A number of chains are set up to begin with.
First, neutron-filter-top. It's added at the top of FORWARD and OUTPUT.
Its name is not wrapped, so it's shared between the various neutron
workers. It's intended for rules that need to live at the top of the
FORWARD and OUTPUT chains. It's in both the ipv4 and ipv6 set of tables.
For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
"local" which is jumped to from neutron-filter-top.
For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the built-in filter chains. Additionally,
there's a snat chain that is applied after the POSTROUTING chain.
"""
def __init__(self, _execute=None, state_less=False, use_ipv6=False,
namespace=None, binary_name=binary_name):
if _execute:
self.execute = _execute
else:
self.execute = linux_utils.execute
self.use_ipv6 = use_ipv6
self.namespace = namespace
self.iptables_apply_deferred = False
self.wrap_name = binary_name[:16]
self.ipv4 = {'filter': IptablesTable(binary_name=self.wrap_name)}
self.ipv6 = {'filter': IptablesTable(binary_name=self.wrap_name)}
# Add a neutron-filter-top chain. It's intended to be shared
# among the various neutron components. It sits at the very top
# of FORWARD and OUTPUT.
for tables in [self.ipv4, self.ipv6]:
tables['filter'].add_chain('neutron-filter-top', wrap=False)
tables['filter'].add_rule('FORWARD', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_rule('OUTPUT', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_chain('local')
tables['filter'].add_rule('neutron-filter-top', '-j $local',
wrap=False)
# Wrap the built-in chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
if not state_less:
self.ipv4.update(
{'mangle': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update(
{'mangle': ['PREROUTING', 'INPUT', 'FORWARD', 'OUTPUT',
'POSTROUTING']})
self.ipv4.update(
{'nat': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update({'nat': ['PREROUTING',
'OUTPUT', 'POSTROUTING']})
self.ipv4.update(
{'raw': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update({'raw': ['PREROUTING',
'OUTPUT']})
for ip_version in builtin_chains:
if ip_version == 4:
tables = self.ipv4
elif ip_version == 6:
tables = self.ipv6
for table, chains in builtin_chains[ip_version].iteritems():
for chain in chains:
tables[table].add_chain(chain)
tables[table].add_rule(chain, '-j $%s' %
(chain), wrap=False)
if not state_less:
# Add a neutron-postrouting-bottom chain. It's intended to be
# shared among the various neutron components. We set it as the
# last chain of POSTROUTING chain.
self.ipv4['nat'].add_chain('neutron-postrouting-bottom',
wrap=False)
self.ipv4['nat'].add_rule('POSTROUTING',
'-j neutron-postrouting-bottom',
wrap=False)
# We add a snat chain to the shared neutron-postrouting-bottom
# chain so that it's applied last.
self.ipv4['nat'].add_chain('snat')
self.ipv4['nat'].add_rule('neutron-postrouting-bottom',
'-j $snat', wrap=False,
comment=ic.SNAT_OUT)
# And then we add a float-snat chain and jump to first thing in
# the snat chain.
self.ipv4['nat'].add_chain('float-snat')
self.ipv4['nat'].add_rule('snat', '-j $float-snat')
# Add a mark chain to mangle PREROUTING chain. It is used to
# identify ingress packets from a certain interface.
self.ipv4['mangle'].add_chain('mark')
self.ipv4['mangle'].add_rule('PREROUTING', '-j $mark')
def get_chain(self, table, chain, ip_version=4, wrap=True):
try:
requested_table = {4: self.ipv4, 6: self.ipv6}[ip_version][table]
except KeyError:
return []
return requested_table._get_chain_rules(chain, wrap)
def is_chain_empty(self, table, chain, ip_version=4, wrap=True):
return not self.get_chain(table, chain, ip_version, wrap)
@contextlib.contextmanager
def defer_apply(self):
"""Defer apply context."""
self.defer_apply_on()
try:
yield
finally:
try:
self.defer_apply_off()
except Exception:
msg = _LE('Failure applying iptables rules')
LOG.exception(msg)
raise n_exc.IpTablesApplyException(msg)
def defer_apply_on(self):
self.iptables_apply_deferred = True
def defer_apply_off(self):
self.iptables_apply_deferred = False
self._apply()
def apply(self):
if self.iptables_apply_deferred:
return
self._apply()
def _apply(self):
lock_name = 'iptables'
if self.namespace:
lock_name += '-' + self.namespace
try:
with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
LOG.debug('Got semaphore / lock "%s"', lock_name)
return self._apply_synchronized()
finally:
LOG.debug('Semaphore / lock released "%s"', lock_name)
def _apply_synchronized(self):
"""Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
"""
s = [('iptables', self.ipv4)]
if self.use_ipv6:
s += [('ip6tables', self.ipv6)]
for cmd, tables in s:
args = ['%s-save' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
all_tables = self.execute(args, run_as_root=True)
all_lines = all_tables.split('\n')
# Traverse tables in sorted order for predictable dump output
for table_name in sorted(tables):
table = tables[table_name]
start, end = self._find_table(all_lines, table_name)
all_lines[start:end] = self._modify_rules(
all_lines[start:end], table, table_name)
args = ['%s-restore' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
try:
self.execute(args, process_input='\n'.join(all_lines),
run_as_root=True)
except RuntimeError as r_error:
with excutils.save_and_reraise_exception():
try:
line_no = int(re.search(
'iptables-restore: line ([0-9]+?) failed',
str(r_error)).group(1))
context = IPTABLES_ERROR_LINES_OF_CONTEXT
log_start = max(0, line_no - context)
log_end = line_no + context
except AttributeError:
# line error wasn't found, print all lines instead
log_start = 0
log_end = len(all_lines)
log_lines = ('%7d. %s' % (idx, l)
for idx, l in enumerate(
all_lines[log_start:log_end],
log_start + 1)
)
LOG.error(_LE("IPTablesManager.apply failed to apply the "
"following set of iptables rules:\n%s"),
'\n'.join(log_lines))
LOG.debug("IPTablesManager.apply completed with success")
def _find_table(self, lines, table_name):
if len(lines) < 3:
# length only <2 when fake iptables
return (0, 0)
try:
start = lines.index('*%s' % table_name) - 1
except ValueError:
# Couldn't find table_name
LOG.debug('Unable to find table %s', table_name)
return (0, 0)
end = lines[start:].index('COMMIT') + start + 2
return (start, end)
def _find_rules_index(self, lines):
seen_chains = False
rules_index = 0
for rules_index, rule in enumerate(lines):
if not seen_chains:
if rule.startswith(':'):
seen_chains = True
else:
if not rule.startswith(':'):
break
if not seen_chains:
rules_index = 2
return rules_index
def _find_last_entry(self, filter_list, match_str):
# find a matching entry, starting from the bottom
for s in reversed(filter_list):
s = s.strip()
if match_str in s:
return s
def _modify_rules(self, current_lines, table, table_name):
# Chains are stored as sets to avoid duplicates.
# Sort the output chains here to make their order predictable.
unwrapped_chains = sorted(table.unwrapped_chains)
chains = sorted(table.chains)
remove_chains = table.remove_chains
rules = table.rules
remove_rules = table.remove_rules
if not current_lines:
fake_table = ['# Generated by iptables_manager',
'*' + table_name, 'COMMIT',
'# Completed by iptables_manager']
current_lines = fake_table
# Fill old_filter with any chains or rules we might have added,
# they could have a [packet:byte] count we want to preserve.
# Fill new_filter with any chains or rules without our name in them.
old_filter, new_filter = [], []
for line in current_lines:
(old_filter if self.wrap_name in line else
new_filter).append(line.strip())
rules_index = self._find_rules_index(new_filter)
all_chains = [':%s' % name for name in unwrapped_chains]
all_chains += [':%s-%s' % (self.wrap_name, name) for name in chains]
# Iterate through all the chains, trying to find an existing
# match.
our_chains = []
for chain in all_chains:
chain_str = str(chain).strip()
old = self._find_last_entry(old_filter, chain_str)
if not old:
dup = self._find_last_entry(new_filter, chain_str)
new_filter = [s for s in new_filter if chain_str not in s.strip()]
# if no old or duplicates, use original chain
if old or dup:
chain_str = str(old or dup)
else:
# add-on the [packet:bytes]
chain_str += ' - [0:0]'
our_chains += [chain_str]
# Iterate through all the rules, trying to find an existing
# match.
our_rules = []
bot_rules = []
for rule in rules:
rule_str = str(rule).strip()
# Further down, we weed out duplicates from the bottom of the
# list, so here we remove the dupes ahead of time.
old = self._find_last_entry(old_filter, rule_str)
if not old:
dup = self._find_last_entry(new_filter, rule_str)
new_filter = [s for s in new_filter if rule_str not in s.strip()]
# if no old or duplicates, use original rule
if old or dup:
rule_str = str(old or dup)
# backup one index so we write the array correctly
if not old:
rules_index -= 1
else:
# add-on the [packet:bytes]
rule_str = '[0:0] ' + rule_str
if rule.top:
# rule.top == True means we want this rule to be at the top.
our_rules += [rule_str]
else:
bot_rules += [rule_str]
our_rules += bot_rules
new_filter[rules_index:rules_index] = our_rules
new_filter[rules_index:rules_index] = our_chains
def _strip_packets_bytes(line):
# strip any [packet:byte] counts at start or end of lines
if line.startswith(':'):
# it's a chain, for example, ":neutron-billing - [0:0]"
line = line.split(':')[1]
line = line.split(' - [', 1)[0]
elif line.startswith('['):
# it's a rule, for example, "[0:0] -A neutron-billing..."
line = line.split('] ', 1)[1]
line = line.strip()
return line
seen_chains = set()
def _weed_out_duplicate_chains(line):
# ignore [packet:byte] counts at end of lines
if line.startswith(':'):
line = _strip_packets_bytes(line)
if line in seen_chains:
return False
else:
seen_chains.add(line)
# Leave it alone
return True
seen_rules = set()
def _weed_out_duplicate_rules(line):
if line.startswith('['):
line = _strip_packets_bytes(line)
if line in seen_rules:
return False
else:
seen_rules.add(line)
# Leave it alone
return True
def _weed_out_removes(line):
# We need to find exact matches here
if line.startswith(':'):
line = _strip_packets_bytes(line)
for chain in remove_chains:
if chain == line:
remove_chains.remove(chain)
return False
elif line.startswith('['):
line = _strip_packets_bytes(line)
for rule in remove_rules:
rule_str = _strip_packets_bytes(str(rule))
if rule_str == line:
remove_rules.remove(rule)
return False
# Leave it alone
return True
# We filter duplicates. Go through the chains and rules, letting
# the *last* occurrence take precedence since it could have a
# non-zero [packet:byte] count we want to preserve. We also filter
# out anything in the "remove" list.
new_filter.reverse()
new_filter = [line for line in new_filter
if _weed_out_duplicate_chains(line) and
_weed_out_duplicate_rules(line) and
_weed_out_removes(line)]
new_filter.reverse()
# flush lists, just in case we didn't find something
remove_chains.clear()
for rule in remove_rules:
remove_rules.remove(rule)
return new_filter
def _get_traffic_counters_cmd_tables(self, chain, wrap=True):
name = get_chain_name(chain, wrap)
cmd_tables = [('iptables', key) for key, table in self.ipv4.items()
if name in table._select_chain_set(wrap)]
if self.use_ipv6:
cmd_tables += [('ip6tables', key)
for key, table in self.ipv6.items()
if name in table._select_chain_set(wrap)]
return cmd_tables
def get_traffic_counters(self, chain, wrap=True, zero=False):
"""Return the sum of the traffic counters of all rules of a chain."""
cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap)
if not cmd_tables:
LOG.warn(_LW('Attempted to get traffic counters of chain %s which '
'does not exist'), chain)
return
name = get_chain_name(chain, wrap)
acc = {'pkts': 0, 'bytes': 0}
for cmd, table in cmd_tables:
args = [cmd, '-t', table, '-L', name, '-n', '-v', '-x']
if zero:
args.append('-Z')
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
current_table = self.execute(args, run_as_root=True)
current_lines = current_table.split('\n')
for line in current_lines[2:]:
if not line:
break
data = line.split()
if (len(data) < 2 or
not data[0].isdigit() or
not data[1].isdigit()):
break
acc['pkts'] += int(data[0])
acc['bytes'] += int(data[1])
return acc
| |
# encoding=utf-8
from __future__ import unicode_literals, division
import os
import re
from django.core.files.base import File, ContentFile
from django.core.files.storage import Storage # , default_storage
from django.utils.functional import LazyObject, empty
from sorl.thumbnail import default
from sorl.thumbnail.conf import settings
from sorl.thumbnail.compat import (json, urlopen, urlparse, urlsplit,
quote, quote_plus,
URLError, force_unicode, encode)
from sorl.thumbnail.default import storage as default_storage
from sorl.thumbnail.helpers import ThumbnailError, tokey, get_module_class, deserialize
from sorl.thumbnail.parsers import parse_geometry
url_pat = re.compile(r'^(https?|ftp):\/\/')
def serialize_image_file(image_file):
if image_file.size is None:
raise ThumbnailError('Trying to serialize an ``ImageFile`` with a '
'``None`` size.')
data = {
'name': image_file.name,
'storage': image_file.serialize_storage(),
'size': image_file.size,
}
return json.dumps(data)
def deserialize_image_file(s):
data = deserialize(s)
class LazyStorage(LazyObject):
def _setup(self):
self._wrapped = get_module_class(data['storage'])()
image_file = ImageFile(data['name'], LazyStorage())
image_file.set_size(data['size'])
return image_file
class BaseImageFile(object):
size = []
def exists(self):
raise NotImplementedError()
@property
def width(self):
return self.size[0]
x = width
@property
def height(self):
return self.size[1]
y = height
def is_portrait(self):
return self.y > self.x
@property
def ratio(self):
return float(self.x) / float(self.y)
@property
def url(self):
raise NotImplementedError()
src = url
class ImageFile(BaseImageFile):
_size = None
def __init__(self, file_, storage=None):
if not file_:
raise ThumbnailError('File is empty.')
# figure out name
if hasattr(file_, 'name'):
self.name = file_.name
else:
self.name = force_unicode(file_)
# figure out storage
if storage is not None:
self.storage = storage
elif hasattr(file_, 'storage'):
self.storage = file_.storage
elif url_pat.match(self.name):
self.storage = UrlStorage()
else:
self.storage = default_storage
if hasattr(self.storage, 'location'):
location = self.storage.location
if not self.storage.location.endswith("/"):
location += "/"
if self.name.startswith(location):
self.name = self.name[len(location):]
def __unicode__(self):
return self.name
def exists(self):
return self.storage.exists(self.name)
def set_size(self, size=None):
# set the size if given
if size is not None:
pass
# Don't try to set the size the expensive way if it already has a
# value.
elif self._size is not None:
return
elif hasattr(self.storage, 'image_size'):
# Storage backends can implement ``image_size`` method that
# optimizes this.
size = self.storage.image_size(self.name)
else:
# This is the worst case scenario
image = default.engine.get_image(self)
size = default.engine.get_image_size(image)
self._size = list(size)
@property
def size(self):
return self._size
@property
def url(self):
return self.storage.url(self.name)
def read(self):
return self.storage.open(self.name).read()
def write(self, content):
if not isinstance(content, File):
content = ContentFile(content)
self._size = None
self.name = self.storage.save(self.name, content)
return self.name
def delete(self):
return self.storage.delete(self.name)
def serialize_storage(self):
if isinstance(self.storage, LazyObject):
# if storage is wrapped in a lazy object we need to get the real
# thing.
if self.storage._wrapped is empty:
self.storage._setup()
cls = self.storage._wrapped.__class__
else:
cls = self.storage.__class__
return '%s.%s' % (cls.__module__, cls.__name__)
@property
def key(self):
return tokey(self.name, self.serialize_storage())
def serialize(self):
return serialize_image_file(self)
class DummyImageFile(BaseImageFile):
def __init__(self, geometry_string):
self.size = parse_geometry(
geometry_string,
settings.THUMBNAIL_DUMMY_RATIO,
)
def exists(self):
return True
@property
def url(self):
return settings.THUMBNAIL_DUMMY_SOURCE % (
{'width': self.x, 'height': self.y}
)
class UrlStorage(Storage):
def normalize_url(self, url, charset='utf-8'):
url = encode(url, charset, 'ignore')
scheme, netloc, path, qs, anchor = urlsplit(url)
# Encode to utf8 to prevent urllib KeyError
path = encode(path, charset, 'ignore')
path = quote(path, '/%')
qs = quote_plus(qs, ':&%=')
return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
def open(self, name, mode='rb'):
return urlopen(self.normalize_url(name))
def exists(self, name):
try:
self.open(name)
except URLError:
return False
return True
def url(self, name):
return name
def delete(self, name):
pass
def delete_all_thumbnails():
storage = default.storage
path = os.path.join(storage.location, settings.THUMBNAIL_PREFIX)
def walk(path):
dirs, files = storage.listdir(path)
for f in files:
storage.delete(os.path.join(path, f))
for d in dirs:
directory = os.path.join(path, d)
walk(directory)
try:
full_path = storage.path(directory)
except Exception:
continue
os.rmdir(full_path)
walk(path)
| |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
# TODO(jiayq): as more and more tests are moving to hypothesis test, we
# can gradually remove this test script. DO NOT ADD MORE TESTS TO THIS
# FILE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import (
brew,
core,
device_checker,
gradient_checker,
model_helper,
test_util,
workspace,
)
from caffe2.python.gradient_checker import NetGradientChecker
from caffe2.python.net_builder import ops, NetBuilder
from caffe2.proto import caffe2_pb2
import unittest
if workspace.has_gpu_support and workspace.NumCudaDevices() > 0:
gpu_device_option = caffe2_pb2.DeviceOption()
gpu_device_option.device_type = caffe2_pb2.CUDA
cpu_device_option = caffe2_pb2.DeviceOption()
gpu_device_checker = device_checker.DeviceChecker(
0.01, [gpu_device_option]
)
device_checker = device_checker.DeviceChecker(
0.01, [gpu_device_option, cpu_device_option]
)
gpu_gradient_checkers = [
gradient_checker.GradientChecker(
0.005, 0.05, gpu_device_option, "gpu_checker_ws"
),
]
gradient_checkers = [
gradient_checker.GradientChecker(
0.005, 0.05, gpu_device_option, "gpu_checker_ws"
),
gradient_checker.GradientChecker(
0.01, 0.05, cpu_device_option, "cpu_checker_ws"
),
]
else:
cpu_device_option = caffe2_pb2.DeviceOption()
gpu_device_option = None
gpu_device_checker = device_checker.DeviceChecker(
0.01, []
)
device_checker = device_checker.DeviceChecker(0.01, [cpu_device_option])
gradient_checkers = [
gradient_checker.GradientChecker(
0.01, 0.05, cpu_device_option, "cpu_checker_ws"
)
]
gpu_gradient_checkers = []
class TestLRN(test_util.TestCase):
def setUp(self):
self.test_configs = [(6, 10), (3, 13), ]
def testLRN(self):
for input_size, depth in self.test_configs:
op = core.CreateOperator("LRN",
["X"],
["Y", "Y_scale"],
size=11,
alpha=0.001,
beta=0.5,
bias=2.0,
order="NHWC"
)
X = np.random.rand(2, input_size, input_size,
depth).astype(np.float32)
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestFlatten(test_util.TestCase):
def testFlatten(self):
op = core.CreateOperator("Flatten", ["X"], ["Y"])
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestConcat(test_util.TestCase):
def setUp(self):
self.test_configs = [
# input_size, depth1, depth2, depth3, depth4
(3, 2, 3, 4, 5),
(4, 5, 4, 3, 2),
]
def testConcatNHWC(self):
for input_size, d1, d2, d3, d4 in self.test_configs:
op = core.CreateOperator("Concat",
["X1", "X2", "X3", "X4"],
["Y", "Y_dims"],
order="NHWC"
)
Xs = [
np.random.rand(2, input_size, input_size,
d1).astype(np.float32),
np.random.rand(2, input_size, input_size,
d2).astype(np.float32),
np.random.rand(2, input_size, input_size,
d3).astype(np.float32),
np.random.rand(2, input_size, input_size, d4).astype(np.float32)
]
for i in range(4):
res = device_checker.CheckSimple(op, Xs, [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, Xs, i,
[0])
self.assertTrue(res)
def testConcatNCHW(self):
for input_size, d1, d2, d3, d4 in self.test_configs:
op = core.CreateOperator("Concat",
["X1", "X2", "X3", "X4"],
["Y", "Y_dims"],
order="NCHW"
)
Xs = [
np.random.rand(2, d1, input_size,
input_size).astype(np.float32),
np.random.rand(2, d2, input_size,
input_size).astype(np.float32),
np.random.rand(2, d3, input_size,
input_size).astype(np.float32),
np.random.rand(2, d4, input_size, input_size).astype(np.float32)
]
for i in range(4):
res = device_checker.CheckSimple(op, Xs, [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, Xs, i,
[0])
self.assertTrue(res)
class TestRelu(test_util.TestCase):
def setUp(self):
self.test_configs = [
# input size
# (0, 1),
(1, 1),
(2, 1),
(1, 3, 3, 1),
(2, 3, 3, 1),
(1, 5, 5, 3),
(2, 5, 5, 3),
]
def testRelu(self):
for input_size in self.test_configs:
op = core.CreateOperator("Relu", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32)
# go away from the origin point to avoid kink problems
X += 0.01 * np.sign(X)
X[X == 0] = 0.01
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestTanh(test_util.TestCase):
def setUp(self):
self.test_configs = [
# (0, 1),
(1, 1),
(2, 1),
(1, 2, 3, 4),
]
def testTanh(self):
for input_size in self.test_configs:
op = core.CreateOperator("Tanh", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestAbs(test_util.TestCase):
def setUp(self):
self.test_configs = [
(1, 1),
(2, 3),
(2, 3, 4),
(2, 3, 4, 5),
]
def testAbs(self):
for input_size in self.test_configs:
op = core.CreateOperator("Abs", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32)
# go away from the origin point to avoid kink problems
X += 0.01 * np.sign(X)
X[X == 0] = 0.01
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestExp(test_util.TestCase):
def setUp(self):
self.test_configs = [
# (0, 1),
(1, 1),
(2, 1),
(1, 2, 3, 4),
]
def testExp(self):
for input_size in self.test_configs:
op = core.CreateOperator("Exp", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestCos(test_util.TestCase):
def setUp(self):
self.test_configs = [
(1, 1),
(2, 3),
(2, 3, 4),
(2, 3, 4, 5),
]
def testCos(self):
for input_size in self.test_configs:
op = core.CreateOperator("Cos", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestSin(test_util.TestCase):
def setUp(self):
self.test_configs = [
(1, 1),
(2, 3),
(2, 3, 4),
(2, 3, 4, 5),
]
def testSin(self):
for input_size in self.test_configs:
op = core.CreateOperator("Sin", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestSigmoid(test_util.TestCase):
def setUp(self):
self.test_configs = [
# (0, 1),
(1, 1),
(2, 1),
(1, 2, 3, 4),
]
def testSigmoid(self):
for input_size in self.test_configs:
op = core.CreateOperator("Sigmoid", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestSum(test_util.TestCase):
def setUp(self):
self.test_configs = [
# ((0, 1), False),
((1, 2, 3, 4), True),
((1, 2, 3, 4), False)]
def testSum(self):
for (input_size, in_place) in self.test_configs:
op = core.CreateOperator("Sum", ["X1", "X2"],
["Y" if not in_place else "X1"])
X1 = np.random.rand(*input_size).astype(np.float32) - 0.5
X2 = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X1, X2], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(
op, [X1, X2], 0, [0])
self.assertTrue(res)
class TestMakeTwoClass(test_util.TestCase):
def setUp(self):
self.test_configs = [
# input size
# (0, 1),
(1,),
(7,),
(1, 3),
(2, 5),
]
def testMakeTwoClass(self):
for input_size in self.test_configs:
op = core.CreateOperator("MakeTwoClass", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32)
# step a little to avoid gradient problems
X[X < 0.01] += 0.01
X[X > 0.99] -= 0.01
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestNetGradientChecker(test_util.TestCase):
def test_net_gradient_checker(self):
model = model_helper.ModelHelper(name="test")
const = model.net.AddExternalInputs("const1", "const2")
fc = brew.fc(model, dim_in=3, dim_out=4, blob_in="X", blob_out="Y", axis=0)
dist = [model.net.SquaredL2Distance([fc, c]) for c in const]
losses = [model.net.AveragedLoss(d) for d in dist] # using two losses here
workspace.RunNetOnce(model.param_init_net)
NetGradientChecker.Check(
model.net,
outputs_with_grad=losses,
input_values={"X": np.array([1, 2, 3], dtype="float32"),
const[0]: np.array([1, 1, 1, 1], dtype="float32"),
const[1]: np.array([2, 2, 2, 2], dtype="float32")},
input_to_check="X",
)
def test_net_comparison(self):
# (a + b) * (c + d) == a * c + a * d + b * c + b * d
net1 = core.Net("net1")
a, b, c, d = net1.AddExternalInputs("a", "b", "c", "d")
a_b = net1.Sum([a, b], "a+b")
c_d = net1.Sum([c, d], "c+d")
x = net1.Mul([a_b, c_d], "x")
net2 = core.Net("net2")
ac = net2.Mul([a, c], "ac")
ad = net2.Mul([a, d], "ad")
bc = net2.Mul([b, c], "bc")
bd = net2.Mul([b, d], "bd")
y = net2.Sum([ac, ad, bc, bd], "y")
input_values = {blob: np.array([i], dtype=np.float32)
for i, blob in enumerate([a, b, c, d])}
NetGradientChecker.CompareNets(
[net1, net2], [[x], [y]], [0],
inputs_with_grads=[a, b, c, d],
input_values=input_values,
)
class TestIf(test_util.TestCase):
def testIf(self):
W_a_values = [2.0, 1.5]
B_a_values = [0.5]
W_b_values = [7.0, 3.5]
B_b_values = [1.5]
with NetBuilder(_use_control_ops=True) as init_nb:
W_a = ops.UniformFill([], "W_a", shape=[1, 2], min=-1., max=1.)
B_a = ops.ConstantFill([], "B_a", shape=[1], value=0.0)
W_b = ops.UniformFill([], "W_b", shape=[1, 2], min=-1., max=1.)
B_b = ops.ConstantFill([], "B_b", shape=[1], value=0.0)
W_gt_a = ops.GivenTensorFill(
[], "W_gt_a", shape=[1, 2], values=W_a_values)
B_gt_a = ops.GivenTensorFill([], "B_gt_a", shape=[1], values=B_a_values)
W_gt_b = ops.GivenTensorFill(
[], "W_gt_b", shape=[1, 2], values=W_b_values)
B_gt_b = ops.GivenTensorFill([], "B_gt_b", shape=[1], values=B_b_values)
params = [W_gt_a, B_gt_a, W_a, B_a, W_gt_b, B_gt_b, W_b, B_b]
with NetBuilder(_use_control_ops=True, initial_scope=params) as train_nb:
Y_pred = ops.ConstantFill([], "Y_pred", shape=[1], value=0.0)
Y_noise = ops.ConstantFill([], "Y_noise", shape=[1], value=0.0)
switch = ops.UniformFill(
[], "switch", shape=[1], min=-1., max=1., run_once=0)
zero = ops.ConstantFill([], "zero", shape=[1], value=0.0)
X = ops.GaussianFill(
[], "X", shape=[4096, 2], mean=0.0, std=1.0, run_once=0)
noise = ops.GaussianFill(
[], "noise", shape=[4096, 1], mean=0.0, std=1.0, run_once=0)
with ops.IfNet(ops.LT([switch, zero])):
Y_gt = ops.FC([X, W_gt_a, B_gt_a], "Y_gt")
ops.Add([Y_gt, noise], Y_noise)
ops.FC([X, W_a, B_a], Y_pred)
with ops.Else():
Y_gt = ops.FC([X, W_gt_b, B_gt_b], "Y_gt")
ops.Add([Y_gt, noise], Y_noise)
ops.FC([X, W_b, B_b], Y_pred)
dist = ops.SquaredL2Distance([Y_noise, Y_pred], "dist")
loss = dist.AveragedLoss([], ["loss"])
assert len(init_nb.get()) == 1, "Expected a single init net produced"
assert len(train_nb.get()) == 1, "Expected a single train net produced"
train_net = train_nb.get()[0]
gradient_map = train_net.AddGradientOperators([loss])
init_net = init_nb.get()[0]
ITER = init_net.ConstantFill(
[], "ITER", shape=[1], value=0, dtype=core.DataType.INT32)
train_net.Iter(ITER, ITER)
LR = train_net.LearningRate(ITER, "LR", base_lr=-0.1,
policy="step", stepsize=20, gamma=0.9)
ONE = init_net.ConstantFill([], "ONE", shape=[1], value=1.)
train_net.WeightedSum([W_a, ONE, gradient_map[W_a], LR], W_a)
train_net.WeightedSum([B_a, ONE, gradient_map[B_a], LR], B_a)
train_net.WeightedSum([W_b, ONE, gradient_map[W_b], LR], W_b)
train_net.WeightedSum([B_b, ONE, gradient_map[B_b], LR], B_b)
workspace.RunNetOnce(init_net)
workspace.CreateNet(train_net)
# print("Before training, W_a is: {}".format(workspace.FetchBlob("W_a")))
# print("Before training, B_a is: {}".format(workspace.FetchBlob("B_a")))
# print("Before training, W_b is: {}".format(workspace.FetchBlob("W_b")))
# print("Before training, B_b is: {}".format(workspace.FetchBlob("B_b")))
for _epoch in range(1000):
workspace.RunNet(train_net.Proto().name)
# print("After training, W_a is: {}".format(workspace.FetchBlob("W_a")))
# print("After training, B_a is: {}".format(workspace.FetchBlob("B_a")))
# print("After training, W_b is: {}".format(workspace.FetchBlob("W_b")))
# print("After training, B_b is: {}".format(workspace.FetchBlob("B_b")))
# print("Ground truth W_a is: {}".format(workspace.FetchBlob("W_gt_a")))
# print("Ground truth B_a is: {}".format(workspace.FetchBlob("B_gt_a")))
# print("Ground truth W_b is: {}".format(workspace.FetchBlob("W_gt_b")))
# print("Ground truth B_b is: {}".format(workspace.FetchBlob("B_gt_b")))
values_map = {
"W_a": W_a_values,
"B_a": B_a_values,
"W_b": W_b_values,
"B_b": B_b_values,
}
train_eps = 0.01
for blob_name, values in values_map.items():
trained_values = workspace.FetchBlob(blob_name)
if trained_values.ndim == 2:
self.assertEqual(trained_values.shape[0], 1)
trained_values = trained_values[0][:]
else:
self.assertEqual(trained_values.ndim, 1)
self.assertEqual(trained_values.size, len(values))
for idx in range(len(trained_values)):
self.assertTrue(abs(trained_values[idx] - values[idx]) < train_eps)
class TestWhile(test_util.TestCase):
def testWhile(self):
with NetBuilder(_use_control_ops=True) as nb:
ops.Copy(ops.Const(0), "i")
ops.Copy(ops.Const(1), "one")
ops.Copy(ops.Const(2), "two")
ops.Copy(ops.Const(2.0), "x")
ops.Copy(ops.Const(3.0), "y")
ops.Copy(ops.Const(2.0), "z")
# raises x to the power of 4 and y to the power of 2
# and z to the power of 3
with ops.WhileNet():
with ops.Condition():
ops.Add(["i", "one"], "i")
ops.LE(["i", "two"])
ops.Pow("x", "x", exponent=2.0)
with ops.IfNet(ops.LT(["i", "two"])):
ops.Pow("y", "y", exponent=2.0)
with ops.Else():
ops.Pow("z", "z", exponent=3.0)
ops.Add(["x", "y"], "x_plus_y")
ops.Add(["x_plus_y", "z"], "s")
assert len(nb.get()) == 1, "Expected a single net produced"
net = nb.get()[0]
net.AddGradientOperators(["s"])
workspace.RunNetOnce(net)
# (x^4)' = 4x^3
self.assertAlmostEqual(workspace.FetchBlob("x_grad"), 32)
self.assertAlmostEqual(workspace.FetchBlob("x"), 16)
# (y^2)' = 2y
self.assertAlmostEqual(workspace.FetchBlob("y_grad"), 6)
self.assertAlmostEqual(workspace.FetchBlob("y"), 9)
# (z^3)' = 3z^2
self.assertAlmostEqual(workspace.FetchBlob("z_grad"), 12)
self.assertAlmostEqual(workspace.FetchBlob("z"), 8)
if __name__ == '__main__':
workspace.GlobalInit(["python"])
unittest.main()
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listtransactions API."""
from decimal import Decimal
from io import BytesIO
from test_framework.messages import COIN, CTransaction
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_array_result,
assert_equal,
bytes_to_hex_str,
hex_str_to_bytes,
sync_mempools,
)
def tx_from_hex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.enable_mocktime()
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid},
{"category": "send", "amount": Decimal("-0.1"), "confirmations": 0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid": txid},
{"category": "receive", "amount": Decimal("0.1"), "confirmations": 0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid},
{"category": "send", "amount": Decimal("-0.1"), "confirmations": 1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid": txid},
{"category": "receive", "amount": Decimal("0.1"), "confirmations": 1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid, "category": "send"},
{"amount": Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid, "category": "receive"},
{"amount": Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = {self.nodes[0].getnewaddress(): 0.11,
self.nodes[1].getnewaddress(): 0.22,
self.nodes[0].getnewaddress(): 0.33,
self.nodes[1].getnewaddress(): 0.44}
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.11")},
{"txid": txid})
assert_array_result(self.nodes[0].listtransactions(),
{"category": "receive", "amount": Decimal("0.11")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.22")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "receive", "amount": Decimal("0.22")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.33")},
{"txid": txid})
assert_array_result(self.nodes[0].listtransactions(),
{"category": "receive", "amount": Decimal("0.33")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.44")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "receive", "amount": Decimal("0.44")},
{"txid": txid})
pubkey = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']
multisig = self.nodes[1].createmultisig(1, [pubkey])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert not [tx for tx in self.nodes[0].listtransactions(dummy="*", count=100, skip=0, include_watchonly=False) if "label" in tx and tx["label"] == "watchonly"]
txs = [tx for tx in self.nodes[0].listtransactions(dummy="*", count=100, skip=0, include_watchonly=True) if "label" in tx and tx['label'] == 'watchonly']
assert_array_result(txs, {"category": "receive", "amount": Decimal("0.1")}, {"txid": txid})
self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_1)
assert_equal(utxo_to_use["safe"], True)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
assert_equal(utxo_to_use["safe"], False)
# Create tx2 using createrawtransaction
inputs = [{"txid": utxo_to_use["txid"], "vout": utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransactionwithwallet(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable": "no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable": "no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout": utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = tx_from_hex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = bytes_to_hex_str(tx3_modified.serialize())
tx3_signed = self.nodes[0].signrawtransactionwithwallet(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable": "yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable": "yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout": utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransactionwithwallet(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = bytes_to_hex_str(tx3_b.serialize())
tx3_b_signed = self.nodes[0].signrawtransactionwithwallet(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "unknown"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import struct
PROTOCOL_VERSION = [ 1, 12, 0x01, 0x00]
APPENDIX_RELEASE = [0x00, 0x00, ord('H'), 0x00]
EHD1 = 0x10
EHD2_FMT1 = 0x81
EHD2_FMT2 = 0x82
def _code_to_desc(code_dict):
desc_dict = {}
for key in code_dict:
desc_dict[code_dict[key]] = key
return desc_dict
ESV_CODE = {
'SETI_SNA': 0x50,
'SETC_SNA': 0x51,
'GET_SNA': 0x52,
'INF_SNA': 0x53,
'SETGET_SNA': 0x5e,
'SETI': 0x60,
'SETC': 0x61,
'GET': 0x62,
'INF_REQ': 0x63,
'SETGET': 0x6e,
'SET_RES': 0x71,
'GET_RES': 0x72,
'INF': 0x73,
'INFC': 0x74,
'INFC_RES': 0x7a,
'SETGET_RES': 0x7e,
}
ESV_DESC = _code_to_desc(ESV_CODE)
ESV_REQUEST_CODES = (
ESV_CODE['SETI'],
ESV_CODE['SETC'],
ESV_CODE['GET'],
ESV_CODE['INF_REQ'],
ESV_CODE['SETGET'],
ESV_CODE['INF'],
)
ESV_RESPONSE_CODES = (
ESV_CODE['SET_RES'],
ESV_CODE['GET_RES'],
ESV_CODE['INF'],
ESV_CODE['INFC_RES'],
ESV_CODE['SETGET_RES'],
)
ESV_ERROR_CODES = (
ESV_CODE['SETI_SNA'],
ESV_CODE['SETC_SNA'],
ESV_CODE['GET_SNA'],
ESV_CODE['INF_SNA'],
ESV_CODE['SETGET_SNA'],
)
CLSGRP_CODE = {
'SENSOR': 0x00,
'AIR_CONDITIONER': 0x01,
'HOUSING_FACILITIES': 0x02,
'COOKING_HOUSEHOLD': 0x03,
'HEALTH': 0x04,
'MANAGEMENT_OPERATION': 0x05,
'PROFILE': 0x0e,
}
CLSGRP_DESC = _code_to_desc(CLSGRP_CODE)
CLS_SE_CODE = {
'TEMPERATURE': 0x11,
}
CLS_AC_CODE = {}
CLS_HF_CODE = {
'LV_ELECTRIC_ENERGY_METER': 0x88,
}
CLS_CH_CODE = {}
CLS_HE_CODE = {}
CLS_MO_CODE = {
'CONTROLLER': 0xff,
}
CLS_PR_CODE = {
'PROFILE': 0xf0,
}
CLS_CODE = {
'SENSOR': CLS_SE_CODE,
'AIR_CONDITIONER': CLS_AC_CODE,
'HOUSING_FACILITIES': CLS_HF_CODE,
'COOKING_HOUSEHOLD': CLS_CH_CODE,
'HEALTH': CLS_HE_CODE,
'MANAGEMENT_OPERATION': CLS_MO_CODE,
'PROFILE': CLS_PR_CODE,
}
CLS_DESC = {
CLSGRP_CODE['SENSOR']: _code_to_desc(CLS_SE_CODE),
CLSGRP_CODE['AIR_CONDITIONER']: _code_to_desc(CLS_AC_CODE),
CLSGRP_CODE['HOUSING_FACILITIES']: _code_to_desc(CLS_HF_CODE),
CLSGRP_CODE['COOKING_HOUSEHOLD']: _code_to_desc(CLS_CH_CODE),
CLSGRP_CODE['HEALTH']: _code_to_desc(CLS_HE_CODE),
CLSGRP_CODE['MANAGEMENT_OPERATION']: _code_to_desc(CLS_MO_CODE),
CLSGRP_CODE['PROFILE']: _code_to_desc(CLS_PR_CODE),
}
INSTANCE_PR_NORMAL = 0x01
INSTANCE_PR_SENDONLY = 0x02
INSTANCE_ALL = 0x00
# EPC codes for all nodes
EPC_OPERATING_STATUS = 0x80
EDT_OPERATING_STATUS_BOOTING = 0x30
EDT_OPERATING_STATUS_NOT_BOOTING = 0x31
EPC_INSTALLATION_LOCATION = 0x81
EDT_INSTALLATION_LOCATION_NOT_SPECIFIED = 0x00
EPC_VERSION_INFORMATION = 0x82
EPC_IDENTIFICATION_NUMBER = 0x83
EPC_FAULT_STATUS = 0x88
EDT_FAULT_OCCURRED = 0x41
EDT_FAULT_NOT_OCCURRED = 0x42
EPC_MANUFACTURE_CODE = 0x8a
EPC_STATUS_CHANGE_PROPERTY_MAP = 0x9d
EPC_SET_PROPERTY_MAP = 0x9e
EPC_GET_PROPERTY_MAP = 0x9f
# EPC code for Node Profile class
EPC_NUM_SELF_NODE_INSTANCES = 0xd3
EPC_NUM_SELF_NODE_CLASSES = 0xd4
EPC_INSTANCE_LIST_NOTIFICATION = 0xd5
EPC_SELF_NODE_INSTANCE_LIST_S = 0xd6
EPC_SELF_NODE_CLASS_LIST_S = 0xd7
# EPC code for temperature sensor class
EPC_TEMPERATURE = 0xe0
# EPC code for Low-voltage smart electric enerty meter class
EPC_ELECTRIC_UNIT = 0xe1
EPC_HISTORICAL_CUMULATIVE_NORMAL = 0xe2
EPC_INSTANTENEOUS_ELECTRIC = 0xe7
class EOJ(object):
def __init__(self, clsgrp=0, cls=0, instance_id=0, eoj=None):
if eoj is None:
self._eoj = ((clsgrp << 16)
| (cls << 8)
| instance_id)
else:
self._eoj = eoj
@property
def eoj(self):
return self._eoj
@property
def clsgrp(self):
return (self._eoj >> 16) & 0xff
@property
def cls(self):
return (self._eoj >> 8) & 0xff
@property
def instance_id(self):
return self._eoj & 0xff
def __eq__(self, eoj):
return self._eoj == int(eoj)
def __int__(self):
return self._eoj
def __str__(self):
if self.clsgrp in CLSGRP_DESC:
s = '{0}'.format(CLSGRP_DESC[self.clsgrp])
else:
s = '{0:02x}'.format(self.clsgrp)
if self.clsgrp in CLS_DESC and self.cls in CLS_DESC[self.clsgrp]:
s += '.{0}'.format(CLS_DESC[self.clsgrp][self.cls])
else:
s += '.{0:02x}'.format(self.cls)
s += '.{0:02x}'.format(self.instance_id)
return s
def is_clsgrp(self, clsgrp):
return self.clsgrp == clsgrp
def is_cls(self, cls):
return self.cls == cls
def is_all_instance(self):
return self.instance_id == INS_ALL
class Message(object):
def __init__(self, tid=None, seoj=None, deoj=None,
esv=None, opc=None, properties=None):
self.tid = tid
self.seoj = seoj
self.deoj = deoj
self.esv = esv
self.opc = opc
if properties is not None:
self.properties = properties
else:
self.properties = []
def __str__(self):
s = ''
if self.tid is not None:
s += 'TID={0:#06x}'.format(self.tid)
else:
s += '(None)'
s += ', SEOJ={0}({1:#08x})'.format(str(self.seoj), int(self.seoj))
s += ', DEOJ={0}({1:#08x})'.format(str(self.deoj), int(self.deoj))
s += ', ESV={0}({1:#04x})'.format(self._get_esv_desc(self.esv),
self.esv)
s += ', OPC={0:#04x}'.format(self.opc)
for p in self.properties:
s += ', ' + str(p)
return s
def _get_esv_desc(self, esv):
if esv in ESV_DESC:
return ESV_DESC[esv]
return '{0:#04x}'.format(esv)
class Property(object):
def __init__(self, epc=None, edt=None):
self._epc = epc
if edt is None:
self._pdc = 0
else:
self._pdc = len(edt)
self._edt = edt
@property
def epc(self):
return self._epc
@property
def pdc(self):
return self._pdc
@property
def edt(self):
return self._edt
def __str__(self):
s = 'EPC={0:#04x}'.format(self.epc)
s+= ', PDC={0:#04x}'.format(self.pdc)
if self.edt is not None:
s+= ', EDT=['
for dt in self.edt:
s+='{0:#04x}, '.format(dt)
s+= ']'
return s
COMMON_HDR_LEN = 12 # EHD1, EHD2, TID, SEOJ, DEOJ, ESV, and OPC
def decode(data):
# decode Echonet Lite header and SEOJ, DEOJ.
if len(data) < COMMON_HDR_LEN:
# not an Echonet Lite frame
print('not an Echonet Lite frame, missing common headers.')
return None
(ehd1, ehd1, tid,
seoj0, seoj1, seoj2,
deoj0, deoj1, deoj2,
esv, opc) = struct.unpack('!2BH8B',
data[0:COMMON_HDR_LEN])
seoj = EOJ(seoj0, seoj1, seoj2)
deoj = EOJ(deoj0, deoj1, deoj2)
# decode EPCs
def decode_epc():
(epc, pdc) = struct.unpack('!2B', data[ptr:ptr+2])
edt = None
if pdc != 0:
edt = struct.unpack('!{0}B'.format(pdc), data[ptr+2:ptr+2+pdc])
pl = Property(epc, edt)
return (pl, 2 + pdc)
msg = Message()
msg.tid = tid
msg.seoj = seoj
msg.deoj = deoj
msg.esv = esv
msg.opc = opc
ptr = COMMON_HDR_LEN
nproperties = opc
while len(data[ptr:]) > 1:
(pl, pl_len) = decode_epc()
msg.properties.append(pl)
ptr += pl_len
nproperties -= 1
if nproperties != 0:
print('OPC count ({0}) and # of properties ({1}) doesn\'t match.'.format(
opc, opc - nproperties))
return None
return msg
def encode(message):
data = bytearray()
if message.opc == None:
message.opc = len(message.properties)
data += struct.pack('!2BH6B',
EHD1,
EHD2_FMT1,
message.tid & 0xffff,
message.seoj.clsgrp,
message.seoj.cls,
message.seoj.instance_id,
message.deoj.clsgrp,
message.deoj.cls,
message.deoj.instance_id)
data += struct.pack('!B', message.esv)
data += struct.pack('!B', message.opc)
for p in message.properties:
data += struct.pack('!BB', p.epc, p.pdc)
if p.pdc > 0:
data += struct.pack('!{0}B'.format(p.pdc), *p.edt)
return data
if __name__ == '__main__':
m = Message()
m.tid = 2
m.seoj = EOJ(10,10,10)
m.deoj = EOJ(20,20,20)
m.esv = 0x60
m.opc = 2
req1 = Property()
req1.epc = 0x80
req1.pdc = 0x02
req1.edt = [0x01, 0x02]
m.properties.append(req1)
req2 = Property()
req2.epc = 0x81
req2.pdc = 0x03
req2.edt = [0x03, 0x04, 0x05]
m.properties.append(req2)
print(m)
em = encode(m)
m2 = decode(em)
print(m2)
| |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Composable timestep processing, for DQN Atari preprocessing.
Aims:
* Be self-contained.
* Easy to have the preprocessing on the agent side or on the environment side.
* Easy to swap out and modify parts of the processing.
Conventions:
* The term "processor" is used to refer to any callable that could also have
a `reset()` function to clear any internal state. E.g. a plain function. Or an
instance of a class with `__call__` method, with or without a `reset()`
method.
* `None` means no output when subsampling inputs.
"""
import collections
from typing import Any, Callable, List, Iterable, Optional, Sequence, Text, Tuple
import dm_env
from dm_env import specs
import numpy as np
from PIL import Image
Processor = Callable # Actually a callable that may also have a reset() method.
Nest = Any # Recursive types are not yet supported by pytype.
NamedTuple = Any
StepType = dm_env.StepType
def reset(processor: Processor[[Any], Any]) -> None:
"""Calls `reset()` on a `Processor` or function if the method exists."""
if hasattr(processor, 'reset'):
processor.reset()
identity = lambda v: v
def trailing_zero_pad(
length: int) -> Processor[[List[np.ndarray]], List[np.ndarray]]:
"""Adds trailing zero padding to array lists to ensure a minimum length."""
def trailing_zero_pad_fn(arrays):
padding_length = length - len(arrays)
if padding_length <= 0:
return arrays
zero = np.zeros_like(arrays[0])
return arrays + [zero] * padding_length
return trailing_zero_pad_fn
def none_to_zero_pad(values: List[Optional[NamedTuple]]) -> List[NamedTuple]:
"""Replaces `None`s in a list of named tuples with zeros of same structure."""
actual_values = [n for n in values if n is not None]
if not actual_values:
raise ValueError('Must have at least one value which is not None.')
if len(actual_values) == len(values):
return values
example = actual_values[0]
zero = type(example)(*(np.zeros_like(x) for x in example))
return [zero if v is None else v for v in values]
def named_tuple_sequence_stack(values: Sequence[NamedTuple]) -> NamedTuple:
"""Converts a sequence of named tuples into a named tuple of tuples."""
# [T(1, 2), T(3, 4), T(5, 6)].
transposed = zip(*values)
# ((1, 3, 5), (2, 4, 6)).
return type(values[0])(*transposed)
# T((1, 3, 5), (2, 4, 6)).
class Deque:
"""Double ended queue with a maximum length and initial values."""
def __init__(self, max_length: int, initial_values=None):
self._deque = collections.deque(maxlen=max_length)
self._initial_values = initial_values or []
def reset(self) -> None:
self._deque.clear()
self._deque.extend(self._initial_values)
def __call__(self, value: Any) -> collections.deque:
self._deque.append(value)
return self._deque
class FixedPaddedBuffer:
"""Fixed size `None`-padded buffer which is cleared after it is filled.
E.g. with `length = 3`, `initial_index = 2` and values `[0, 1, 2, 3, 4, 5, 6]`
this will return `~~0`, `1~~`, `12~`, `123`, `4~~`, `45~`, `456`, where `~`
represents `None`. Used to concatenate timesteps for action repeats.
Action repeat requirements are:
* Fixed size buffer of timesteps.
* The `FIRST` timestep should return immediately to get the first action of
the episode, as there is no preceding action to repeat. Prefix with padding.
* For `MID` timesteps, the timestep buffer is periodically returned when full.
* When a `LAST` timestep is encountered, the current buffer of timesteps is
returned, suffixed with padding, as buffers should not cross episode
boundaries.
The requirements can be fulfilled by conditionally subsampling the output of
this processor.
"""
def __init__(self, length: int, initial_index: int):
self._length = length
self._initial_index = initial_index % length
self._index = self._initial_index
self._buffer = [None] * self._length
def reset(self) -> None:
self._index = self._initial_index
self._buffer = [None] * self._length
def __call__(self, value: Any) -> Sequence[Any]:
if self._index >= self._length:
assert self._index == self._length
self._index = 0
self._buffer = [None] * self._length
self._buffer[self._index] = value
self._index += 1
return self._buffer
class ConditionallySubsample:
"""Conditionally passes through input, returning `None` otherwise."""
def __init__(self, condition: Processor[[Any], bool]):
self._condition = condition
def reset(self) -> None:
reset(self._condition)
def __call__(self, value: Any) -> Optional[Any]:
return value if self._condition(value) else None
class TimestepBufferCondition:
"""Returns `True` when an iterable of timesteps should be passed on.
Specifically returns `True`:
* If timesteps contain a `FIRST`.
* If timesteps contain a `LAST`.
* If number of steps passed since `FIRST` timestep modulo `period` is `0`.
Returns `False` otherwise. Used for action repeats in Atari preprocessing.
"""
def __init__(self, period: int):
self._period = period
self._steps_since_first_timestep = None
self._should_reset = False
def reset(self):
self._should_reset = False
self._steps_since_first_timestep = None
def __call__(self, timesteps: Iterable[dm_env.TimeStep]) -> bool:
if self._should_reset:
raise RuntimeError('Should have reset.')
# Find the main step type, FIRST and LAST take precedence over MID.
main_step_type = StepType.MID
precedent_step_types = (StepType.FIRST, StepType.LAST)
for timestep in timesteps:
if timestep is None:
continue
if timestep.step_type in precedent_step_types:
if main_step_type in precedent_step_types:
raise RuntimeError('Expected at most one FIRST or LAST.')
main_step_type = timestep.step_type
# Must have FIRST timestep after a reset.
if self._steps_since_first_timestep is None:
if main_step_type != StepType.FIRST:
raise RuntimeError('After reset first timestep should be FIRST.')
# pytype: disable=unsupported-operands
if main_step_type == StepType.FIRST:
self._steps_since_first_timestep = 0
return True
elif main_step_type == StepType.LAST:
self._steps_since_first_timestep = None
self._should_reset = True
return True
elif (self._steps_since_first_timestep + 1) % self._period == 0:
self._steps_since_first_timestep += 1
return True
else:
self._steps_since_first_timestep += 1
return False
# pytype: enable=unsupported-operands
class ApplyToNamedTupleField:
"""Runs processors on a particular field of a named tuple."""
def __init__(self, field: Text, *processors: Processor[[Any], Any]):
self._field = field
self._processors = processors
def reset(self) -> None:
for processor in self._processors:
reset(processor)
def __call__(self, value: NamedTuple) -> NamedTuple:
attr_value = getattr(value, self._field)
for processor in self._processors:
attr_value = processor(attr_value)
return value._replace(**{self._field: attr_value})
class Maybe:
"""Wraps another processor so that `None` is returned when `None` is input."""
def __init__(self, processor: Processor[[Any], Any]):
self._processor = processor
def reset(self) -> None:
reset(self._processor)
def __call__(self, value: Optional[Any]) -> Optional[Any]:
if value is None:
return None
else:
return self._processor(value)
class Sequential:
"""Chains together multiple processors."""
def __init__(self, *processors: Processor[[Any], Any]):
self._processors = processors
def reset(self) -> None:
for processor in self._processors:
reset(processor)
def __call__(self, value: Any) -> Any:
for processor in self._processors:
value = processor(value)
return value
class ZeroDiscountOnLifeLoss:
"""Sets discount to zero on timestep if number of lives has decreased.
This processor assumes observations to be tuples whose second entry is a
scalar indicating the remaining number of lives.
"""
def __init__(self):
self._num_lives_on_prev_step = None
def reset(self) -> None:
self._num_lives_on_prev_step = None
def __call__(self, timestep: dm_env.TimeStep) -> dm_env.TimeStep:
# We have a life loss when the timestep is a regular transition and lives
# have decreased since the previous timestep.
num_lives = timestep.observation[1]
life_lost = timestep.mid() and (num_lives < self._num_lives_on_prev_step)
self._num_lives_on_prev_step = num_lives
return timestep._replace(discount=0.) if life_lost else timestep
def reduce_step_type(step_types: Sequence[StepType],
debug: bool = False) -> StepType:
"""Outputs a representative step type from an array of step types."""
# Zero padding will appear to be FIRST. Padding should only be seen before the
# FIRST (e.g. 000F) or after LAST (e.g. ML00).
if debug:
np_step_types = np.array(step_types)
output_step_type = StepType.MID
for i, step_type in enumerate(step_types):
if step_type == 0: # step_type not actually FIRST, but we do expect 000F.
if debug and not (np_step_types == 0).all():
raise ValueError('Expected zero padding followed by FIRST.')
output_step_type = StepType.FIRST
break
elif step_type == StepType.LAST:
output_step_type = StepType.LAST
if debug and not (np_step_types[i + 1:] == 0).all():
raise ValueError('Expected LAST to be followed by zero padding.')
break
else:
if step_type != StepType.MID:
raise ValueError('Expected MID if not FIRST or LAST.')
return output_step_type
def aggregate_rewards(rewards: Sequence[Optional[float]],
debug: bool = False) -> Optional[float]:
"""Sums up rewards, assumes discount is 1."""
if None in rewards:
if debug:
np_rewards = np.array(rewards)
if not (np_rewards[-1] is None and (np_rewards[:-1] == 0).all()):
# Should only ever have [0, 0, 0, None] due to zero padding.
raise ValueError('Should only have a None reward for FIRST.')
return None
else:
# Faster than np.sum for a list of floats.
return sum(rewards)
def aggregate_discounts(discounts: Sequence[Optional[float]],
debug: bool = False) -> Optional[float]:
"""Aggregates array of discounts into a scalar, expects `0`, `1` or `None`."""
if debug:
np_discounts = np.array(discounts)
if not np.isin(np_discounts, [0., 1., None]).all():
raise ValueError('All discounts should be 0 or 1, got: %s.' %
np_discounts)
if None in discounts:
if debug:
if not (np_discounts[-1] is None and (np_discounts[:-1] == 0).all()):
# Should have [0, 0, 0, None] due to zero padding.
raise ValueError('Should only have a None discount for FIRST.')
return None
else:
# Faster than np.prod for a list of floats.
result = 1
for d in discounts:
result *= d
return result
def rgb2y(array: np.ndarray) -> np.ndarray:
"""Converts RGB image array into grayscale."""
if array.ndim != 3:
raise ValueError('Input array should be 3D, got %s.' % array.ndim)
output = np.tensordot(array, [0.299, 0.587, 1 - (0.299 + 0.587)], (-1, 0))
return output.astype(np.uint8)
def resize(shape: Tuple[int, ...]) -> Processor[[np.ndarray], np.ndarray]:
"""Resizes array to the given shape."""
if len(shape) != 2:
raise ValueError('Resize shape has to be 2D, given: %s.' % str(shape))
# Image.resize takes (width, height) as output_shape argument.
image_shape = (shape[1], shape[0])
def resize_fn(array):
image = Image.fromarray(array).resize(image_shape, Image.BILINEAR)
return np.array(image, dtype=np.uint8)
return resize_fn
def select_rgb_observation(timestep: dm_env.TimeStep) -> dm_env.TimeStep:
"""Replaces an observation tuple by its first entry (the RGB observation)."""
return timestep._replace(observation=timestep.observation[0])
def apply_additional_discount(
additional_discount: float) -> Processor[[float], float]:
"""Returns a function that scales its non-`None` input by a constant."""
return lambda d: None if d is None else additional_discount * d
def clip_reward(bound: float) -> Processor[[Optional[float]], Optional[float]]:
"""Returns a function that clips non-`None` inputs to (`-bound`, `bound`)."""
def clip_reward_fn(reward):
return None if reward is None else max(min(reward, bound), -bound)
return clip_reward_fn
def show(prefix: Text) -> Processor[[Any], Any]:
"""Prints value and passes through, for debugging."""
def show_fn(value):
print('%s: %s' % (prefix, value))
return value
return show_fn
def atari(
additional_discount: float = 0.99,
max_abs_reward: Optional[float] = 1.0,
resize_shape: Optional[Tuple[int, int]] = (84, 84),
num_action_repeats: int = 4,
num_pooled_frames: int = 2,
zero_discount_on_life_loss: bool = True,
num_stacked_frames: int = 4,
grayscaling: bool = True,
) -> Processor[[dm_env.TimeStep], Optional[dm_env.TimeStep]]:
"""Standard DQN preprocessing on Atari."""
# This processor does the following to a sequence of timesteps.
#
# 1. Zeroes discount on loss of life.
# 2. Repeats actions (previous action should be repeated if None is returned).
# 3. Max pools action repeated observations.
# 4. Grayscales observations.
# 5. Resizes observations.
# 6. Stacks observations.
# 7. Clips rewards.
# 8. Applies an additional discount.
#
# For more detail see the annotations in the processors below.
# The FixedPaddedBuffer, ConditionallySubsample, none_to_zero_pad, stack and
# max_pool on the observation collectively does this (step types: F = FIRST,
# M = MID, L = LAST, ~ is None):
#
# Type: F | M M M M | M M L | F |
# Frames: A | B C D E | F G H | I |
# Output: max[0A]| ~ ~ ~ max[DE]| ~ ~ max[H0]|max[0I]|
return Sequential(
# When the number of lives decreases, set discount to 0.
ZeroDiscountOnLifeLoss() if zero_discount_on_life_loss else identity,
# Select the RGB observation as the main observation, dropping lives.
select_rgb_observation,
# obs: 1, 2, 3, 4, 5, 6, 7, 8, 9, ...
# Write timesteps into a fixed-sized buffer padded with None.
FixedPaddedBuffer(length=num_action_repeats, initial_index=-1),
# obs: ~~~1, 2~~~, 23~~, 234~, 2345, 6~~~, 67~~, 678~, 6789, ...
# Periodically return the deque of timesteps, when the current timestep is
# FIRST, after that every 4 steps, and when the current timestep is LAST.
ConditionallySubsample(TimestepBufferCondition(num_action_repeats)),
# obs: ~~~1, ~, ~, ~, 2345, ~, ~, ~, 6789, ...
# If None pass through, otherwise apply the processor.
Maybe(
Sequential(
# Replace Nones with zero padding in each buffer.
none_to_zero_pad,
# obs: 0001, ~, ~, ~, 2345, ~, ~, ~, 6789, ...
# Convert sequence of nests into a nest of sequences.
named_tuple_sequence_stack,
# Choose representative step type from an array of step types.
ApplyToNamedTupleField('step_type', reduce_step_type),
# Rewards: sum then clip.
ApplyToNamedTupleField(
'reward',
aggregate_rewards,
clip_reward(max_abs_reward) if max_abs_reward else identity,
),
# Discounts: take product and scale by an additional discount.
ApplyToNamedTupleField(
'discount',
aggregate_discounts,
apply_additional_discount(additional_discount),
),
# Observations: max pool, grayscale, resize, and stack.
ApplyToNamedTupleField(
'observation',
lambda obs: np.stack(obs[-num_pooled_frames:], axis=0),
lambda obs: np.max(obs, axis=0),
# obs: max[01], ~, ~, ~, max[45], ~, ~, ~, max[89], ...
# obs: A, ~, ~, ~, B, ~, ~, ~, C, ...
rgb2y if grayscaling else identity,
resize(resize_shape) if resize_shape else identity,
Deque(max_length=num_stacked_frames),
# obs: A, ~, ~, ~, AB, ~, ~, ~, ABC, ~, ~, ~, ABCD, ~, ~, ~,
# BCDE, ~, ~, ~, CDEF, ...
list,
trailing_zero_pad(length=num_stacked_frames),
# obs: A000, ~, ~, ~, AB00, ~, ~, ~, ABC0, ~, ~, ~, ABCD,
# ~, ~, ~, BCDE, ...
lambda obs: np.stack(obs, axis=-1),
),
)),
)
class AtariEnvironmentWrapper(dm_env.Environment):
"""Python environment wrapper that provides DQN Atari preprocessing.
This is a thin wrapper around the Atari processor.
Expects underlying Atari environment to have interleaved pixels (HWC) and
zero-indexed actions.
"""
def __init__(
self,
environment: dm_env.Environment,
additional_discount: float = 0.99,
max_abs_reward: Optional[float] = 1.0,
resize_shape: Optional[Tuple[int, int]] = (84, 84),
num_action_repeats: int = 4,
num_pooled_frames: int = 2,
zero_discount_on_life_loss: bool = True,
num_stacked_frames: int = 4,
grayscaling: bool = True,
):
rgb_spec, unused_lives_spec = environment.observation_spec()
if rgb_spec.shape[2] != 3:
raise ValueError(
'This wrapper assumes interleaved pixel observations with shape '
'(height, width, channels).')
if int(environment.action_spec().minimum) != 0:
raise ValueError('This wrapper assumes zero-indexed actions.')
self._environment = environment
self._processor = atari(
additional_discount=additional_discount,
max_abs_reward=max_abs_reward,
resize_shape=resize_shape,
num_action_repeats=num_action_repeats,
num_pooled_frames=num_pooled_frames,
zero_discount_on_life_loss=zero_discount_on_life_loss,
num_stacked_frames=num_stacked_frames,
grayscaling=grayscaling,
)
if grayscaling:
self._observation_shape = resize_shape + (num_stacked_frames,)
self._observation_spec_name = 'grayscale'
else:
self._observation_shape = resize_shape + (3, num_stacked_frames)
self._observation_spec_name = 'RGB'
self._reset_next_step = True
def reset(self) -> dm_env.TimeStep:
"""Resets environment and provides the first processed timestep."""
reset(self._processor)
timestep = self._environment.reset()
processed_timestep = self._processor(timestep)
assert processed_timestep is not None
self._reset_next_step = False
return processed_timestep
def step(self, action: int) -> dm_env.TimeStep:
"""Steps up to `num_action_repeat` times, returns a processed timestep."""
# This implements the action repeat by repeatedly passing in the last action
# until an actual timestep is returned by the processor.
if self._reset_next_step:
return self.reset() # Ignore action.
processed_timestep = None
while processed_timestep is None:
timestep = self._environment.step(action)
processed_timestep = self._processor(timestep)
if timestep.last():
self._reset_next_step = True
assert processed_timestep is not None
return processed_timestep
def action_spec(self) -> specs.DiscreteArray:
return self._environment.action_spec()
def observation_spec(self) -> specs.Array:
return specs.Array(
shape=self._observation_shape,
dtype=np.uint8,
name=self._observation_spec_name)
class AtariSimpleActionEnvironmentWrapper(dm_env.Environment):
"""Python environment wrapper for Atari so it takes integer actions.
Use this when processing is done on the agent side.
"""
def __init__(self, environment: dm_env.Environment):
self._environment = environment
if int(environment.action_spec()[0].minimum) != 0:
raise ValueError(
'This wrapper assumes zero-indexed actions. Use the Atari setting '
'zero_indexed_actions=\"true\" to get actions in this format.')
def reset(self) -> dm_env.TimeStep:
return self._environment.reset()
def step(self, action: int) -> dm_env.TimeStep:
return self._environment.step([np.array(action).reshape((1,))])
def action_spec(self) -> specs.DiscreteArray:
action_spec = self._environment.action_spec()[0]
return specs.DiscreteArray(
num_values=action_spec.maximum.item() + 1,
dtype=action_spec.dtype,
name='action_spec')
def observation_spec(self) -> specs.Array:
return self._environment.observation_spec()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class FirewallRulesOperations(object):
"""FirewallRulesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version to use for the request. Constant value: "2014-04-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2014-04-01"
self.config = config
def create_or_update(
self, resource_group_name, server_name, firewall_rule_name, start_ip_address, end_ip_address, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a firewall rule.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param firewall_rule_name: The name of the firewall rule.
:type firewall_rule_name: str
:param start_ip_address: The start IP address of the firewall rule.
Must be IPv4 format. Use value '0.0.0.0' to represent all
Azure-internal IP addresses.
:type start_ip_address: str
:param end_ip_address: The end IP address of the firewall rule. Must
be IPv4 format. Must be greater than or equal to startIpAddress. Use
value '0.0.0.0' to represent all Azure-internal IP addresses.
:type end_ip_address: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`FirewallRule <azure.mgmt.sql.models.FirewallRule>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.FirewallRule(start_ip_address=start_ip_address, end_ip_address=end_ip_address)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/firewallRules/{firewallRuleName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'FirewallRule')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FirewallRule', response)
if response.status_code == 201:
deserialized = self._deserialize('FirewallRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, server_name, firewall_rule_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a firewall rule.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param firewall_rule_name: The name of the firewall rule.
:type firewall_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/firewallRules/{firewallRuleName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get(
self, resource_group_name, server_name, firewall_rule_name, custom_headers=None, raw=False, **operation_config):
"""Gets a firewall rule.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param firewall_rule_name: The name of the firewall rule.
:type firewall_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`FirewallRule <azure.mgmt.sql.models.FirewallRule>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/firewallRules/{firewallRuleName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FirewallRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_by_server(
self, resource_group_name, server_name, custom_headers=None, raw=False, **operation_config):
"""Returns a list of firewall rules.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`FirewallRulePaged
<azure.mgmt.sql.models.FirewallRulePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/firewallRules'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.FirewallRulePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.FirewallRulePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| |
# (C) Fractal Industries, Inc. 2016
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from datetime import datetime
import glob
from itertools import groupby
import os
import re
import sys
import time
import traceback
# project
from checks import LaconicFilter
import modules
from util import windows_friendly_colon_split
from utils.tailfile import TailFile
def partition(s, sep):
pos = s.find(sep)
if pos == -1:
return (s, sep, '')
else:
return s[0:pos], sep, s[pos + len(sep):]
def point_sorter(p):
# Sort and group by timestamp, metric name, host_name, device_name
return (p[1], p[0], p[3].get('host_name', None), p[3].get('device_name', None))
class EventDefaults(object):
EVENT_TYPE = 'dogstream_event'
EVENT_OBJECT = 'dogstream_event:default'
class Dogstreams(object):
@classmethod
def init(cls, logger, config):
dogstreams_config = config.get('dogstreams', None)
if dogstreams_config:
dogstreams = cls._instantiate_dogstreams(logger, config, dogstreams_config)
else:
dogstreams = []
logger.info("Dogstream parsers: %s" % repr(dogstreams))
return cls(logger, dogstreams)
def __init__(self, logger, dogstreams):
self.logger = logger
self.dogstreams = dogstreams
@classmethod
def _instantiate_dogstreams(cls, logger, config, dogstreams_config):
"""
Expecting dogstreams config value to look like:
<dogstream value>, <dog stream value>, ...
Where <dogstream value> looks like:
<log path>
or
<log path>:<module>:<parser function>
"""
dogstreams = []
# Create a Dogstream object for each <dogstream value>
for config_item in dogstreams_config.split(','):
try:
config_item = config_item.strip()
parts = windows_friendly_colon_split(config_item)
if len(parts) == 2:
logger.warn("Invalid dogstream: %s" % ':'.join(parts))
continue
log_path = cls._get_dogstream_log_paths(parts[0]) if len(parts) else []
parser_spec = ':'.join(parts[1:3]) if len(parts) >= 3 else None
parser_args = parts[3:] if len(parts) >= 3 else None
for path in log_path:
dogstreams.append(Dogstream.init(
logger,
log_path=path,
parser_spec=parser_spec,
parser_args=parser_args,
config=config))
except Exception:
logger.exception("Cannot build dogstream")
return dogstreams
@classmethod
def _get_dogstream_log_paths(cls, path):
"""
Paths may include wildcard *'s and ?'s.
"""
if '*' not in path:
return [path]
return glob.glob(path)
def check(self, agentConfig, move_end=True):
if not self.dogstreams:
return {}
output = {}
for dogstream in self.dogstreams:
try:
result = dogstream.check(agentConfig, move_end)
# result may contain {"dogstream": [new]}.
# If output contains {"dogstream": [old]}, that old value will get concatenated with the new value
assert type(result) == type(output), "dogstream.check must return a dictionary"
for k in result:
if k in output:
output[k].extend(result[k])
else:
output[k] = result[k]
except Exception:
self.logger.exception("Error in parsing %s" % (dogstream.log_path))
return output
class Dogstream(object):
@classmethod
def init(cls, logger, log_path, parser_spec=None, parser_args=None, config=None):
class_based = False
parse_func = None
parse_args = tuple(parser_args or ())
if parser_spec:
try:
parse_func = modules.load(parser_spec)
if isinstance(parse_func, type):
logger.info('Instantiating class-based dogstream')
parse_func = parse_func(
user_args=parse_args or (),
logger=logger,
log_path=log_path,
config=config,
)
parse_args = ()
class_based = True
else:
logger.info('Instantiating function-based dogstream')
except Exception:
logger.exception(traceback.format_exc())
logger.error('Could not load Dogstream line parser "%s" PYTHONPATH=%s' % (
parser_spec,
os.environ.get('PYTHONPATH', ''))
)
logger.info("dogstream: parsing %s with %s (requested %s)" % (log_path, parse_func, parser_spec))
else:
logger.info("dogstream: parsing %s with default parser" % log_path)
return cls(logger, log_path, parse_func, parse_args, class_based=class_based)
def __init__(self, logger, log_path, parse_func=None, parse_args=(), class_based=False):
self.logger = logger
self.class_based = class_based
# Apply LaconicFilter to avoid log flooding
self.logger.addFilter(LaconicFilter("dogstream"))
self.log_path = log_path
self.parse_func = parse_func or self._default_line_parser
self.parse_args = parse_args
self._gen = None
self._values = None
self._freq = 15 # Will get updated on each check()
self._error_count = 0L
self._line_count = 0L
self.parser_state = {}
def check(self, agentConfig, move_end=True):
if self.log_path:
self._freq = int(agentConfig.get('check_freq', 15))
self._values = []
self._events = []
# Build our tail -f
if self._gen is None:
self._gen = TailFile(self.logger, self.log_path, self._line_parser).tail(line_by_line=False, move_end=move_end)
# read until the end of file
try:
self._gen.next()
self.logger.debug("Done dogstream check for file {0}".format(self.log_path))
self.logger.debug("Found {0} metric points".format(len(self._values)))
except StopIteration, e:
self.logger.exception(e)
self.logger.warn("Can't tail %s file" % self.log_path)
check_output = self._aggregate(self._values)
if self._events:
check_output.update({"dogstreamEvents": self._events})
self.logger.debug("Found {0} events".format(len(self._events)))
return check_output
else:
return {}
def _line_parser(self, line):
try:
# alq - Allow parser state to be kept between invocations
# This means a new argument can be passed the custom parsing function
# to store context that can be shared between parsing of lines.
# One example is a running counter, which is incremented each time
# a line is processed.
parsed = None
if self.class_based:
parsed = self.parse_func.parse_line(line)
else:
try:
parsed = self.parse_func(self.logger, line, self.parser_state, *self.parse_args)
except TypeError:
# Arity of parse_func is 3 (old-style), not 4
parsed = self.parse_func(self.logger, line)
self._line_count += 1
if parsed is None:
return
if isinstance(parsed, (tuple, dict)):
parsed = [parsed]
for datum in parsed:
# Check if it's an event
if isinstance(datum, dict):
# An event requires at least a title or a body
if 'msg_title' not in datum and 'msg_text' not in datum:
continue
# Populate the default fields
if 'event_type' not in datum:
datum['event_type'] = EventDefaults.EVENT_TYPE
if 'timestamp' not in datum:
datum['timestamp'] = time.time()
# Make sure event_object and aggregation_key (synonyms) are set
# FIXME when the backend treats those as true synonyms, we can
# deprecate event_object.
if 'event_object' in datum or 'aggregation_key' in datum:
datum['aggregation_key'] = datum.get('event_object', datum.get('aggregation_key'))
else:
datum['aggregation_key'] = EventDefaults.EVENT_OBJECT
datum['event_object'] = datum['aggregation_key']
self._events.append(datum)
continue
# Otherwise, assume it's a metric
try:
metric, ts, value, attrs = datum
except Exception:
continue
# Validation
invalid_reasons = []
try:
# Bucket points into 15 second buckets
ts = (int(float(ts)) / self._freq) * self._freq
date = datetime.fromtimestamp(ts)
assert date.year > 1990
except Exception:
invalid_reasons.append('invalid timestamp')
try:
value = float(value)
except Exception:
invalid_reasons.append('invalid metric value')
if invalid_reasons:
self.logger.debug('Invalid parsed values %s (%s): "%s"',
repr(datum), ', '.join(invalid_reasons), line)
else:
self._values.append((metric, ts, value, attrs))
except Exception:
self.logger.debug("Error while parsing line %s" % line, exc_info=True)
self._error_count += 1
self.logger.error("Parser error: %s out of %s" % (self._error_count, self._line_count))
def _default_line_parser(self, logger, line):
sep = ' '
metric, _, line = partition(line.strip(), sep)
timestamp, _, line = partition(line.strip(), sep)
value, _, line = partition(line.strip(), sep)
attributes = {}
try:
while line:
keyval, _, line = partition(line.strip(), sep)
key, val = keyval.split('=', 1)
attributes[key] = val
except Exception:
logger.debug(traceback.format_exc())
return metric, timestamp, value, attributes
def _aggregate(self, values):
""" Aggregate values down to the second and store as:
{
"dogstream": [(metric, timestamp, value, {key: val})]
}
If there are many values per second for a metric, take the median
"""
output = []
values.sort(key=point_sorter)
for (timestamp, metric, host_name, device_name), val_attrs in groupby(values, key=point_sorter):
attributes = {}
vals = []
for _metric, _timestamp, v, a in val_attrs:
try:
v = float(v)
vals.append(v)
attributes.update(a)
except Exception:
self.logger.debug("Could not convert %s into a float", v)
if len(vals) == 1:
val = vals[0]
elif len(vals) > 1:
val = vals[-1]
else: # len(vals) == 0
continue
metric_type = str(attributes.get('metric_type', '')).lower()
if metric_type == 'gauge':
val = float(val)
elif metric_type == 'counter':
val = sum(vals)
output.append((metric, timestamp, val, attributes))
if output:
return {"dogstream": output}
else:
return {}
# Allow a smooth uninstall of previous version
class RollupLP:
pass
class CMForwarder(object):
QUEUE_SIZE = "queue_size"
QUEUE_COUNT = "queue_count"
RE_QUEUE_STAT = re.compile(r"\[.*\] Queue size: at (.*), (\d+) transaction\(s\), (\d+) KB")
def __init__(self, logger, config):
self.log_path = config.get('ddforwarder_log', '/var/log/ddforwarder.log')
self.logger = logger
self._gen = None
def _init_metrics(self):
self.metrics = {}
def _add_metric(self, name, value, ts):
if name in self.metrics:
self.metrics[name].append((ts, value))
else:
self.metrics[name] = [(ts, value)]
def _parse_line(self, line):
try:
m = self.RE_QUEUE_STAT.match(line)
if m is not None:
ts, count, size = m.groups()
self._add_metric(self.QUEUE_SIZE, size, round(float(ts)))
self._add_metric(self.QUEUE_COUNT, count, round(float(ts)))
except Exception, e:
self.logger.exception(e)
def check(self, agentConfig, move_end=True):
if self.log_path and os.path.isfile(self.log_path):
#reset metric points
self._init_metrics()
# Build our tail -f
if self._gen is None:
self._gen = TailFile(self.logger, self.log_path, self._parse_line).tail(line_by_line=False,
move_end=move_end)
# read until the end of file
try:
self._gen.next()
self.logger.debug("Done ddforwarder check for file %s" % self.log_path)
except StopIteration, e:
self.logger.exception(e)
self.logger.warn("Can't tail %s file" % self.log_path)
return {'ddforwarder': self.metrics}
else:
self.logger.debug("Can't tail conmon forwarder log file: %s" % self.log_path)
return {}
def testCMForwarder():
import logging
logger = logging.getLogger("cmagent.checks.conmon")
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
config = {'api_key':'my_apikey', 'ddforwarder_log': sys.argv[1]}
cm = CMForwarder(logger, config)
m = cm.check(config, move_end=False)
while True:
print m
time.sleep(5)
m = cm.check(config)
if __name__ == '__main__':
testCMForwarder()
| |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import json
import os
import time
import mock
import unittest
from swift.common import swob, utils
from swift.common.middleware import versioned_writes, copy
from swift.common.swob import Request
from test.unit.common.middleware.helpers import FakeSwift
class FakeCache(object):
def __init__(self, val):
if 'status' not in val:
val['status'] = 200
self.val = val
def get(self, *args):
return self.val
def local_tz(func):
'''
Decorator to change the timezone when running a test.
This uses the Eastern Time Zone definition from the time module's docs.
Note that the timezone affects things like time.time() and time.mktime().
'''
@functools.wraps(func)
def wrapper(*args, **kwargs):
tz = os.environ.get('TZ', '')
try:
os.environ['TZ'] = 'EST+05EDT,M4.1.0,M10.5.0'
time.tzset()
return func(*args, **kwargs)
finally:
os.environ['TZ'] = tz
time.tzset()
return wrapper
class VersionedWritesBaseTestCase(unittest.TestCase):
def setUp(self):
self.app = FakeSwift()
conf = {'allow_versioned_writes': 'true'}
self.vw = versioned_writes.filter_factory(conf)(self.app)
def tearDown(self):
self.assertEqual(self.app.unclosed_requests, {})
def call_app(self, req, app=None):
if app is None:
app = self.app
self.authorized = []
def authorize(req):
self.authorized.append(req)
if 'swift.authorize' not in req.environ:
req.environ['swift.authorize'] = authorize
req.headers.setdefault("User-Agent", "Marula Kruger")
status = [None]
headers = [None]
def start_response(s, h, ei=None):
status[0] = s
headers[0] = h
body_iter = app(req.environ, start_response)
with utils.closing_if_possible(body_iter):
body = b''.join(body_iter)
return status[0], headers[0], body
def call_vw(self, req):
return self.call_app(req, app=self.vw)
def assertRequestEqual(self, req, other):
self.assertEqual(req.method, other.method)
self.assertEqual(req.path, other.path)
class VersionedWritesTestCase(VersionedWritesBaseTestCase):
def test_put_container(self):
self.app.register('PUT', '/v1/a/c', swob.HTTPOk, {}, 'passed')
req = Request.blank('/v1/a/c',
headers={'X-Versions-Location': 'ver_cont'},
environ={'REQUEST_METHOD': 'PUT'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
# check for sysmeta header
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c', path)
self.assertIn('x-container-sysmeta-versions-location', req_headers)
self.assertEqual(req.headers['x-container-sysmeta-versions-location'],
'ver_cont')
self.assertIn('x-container-sysmeta-versions-mode', req_headers)
self.assertEqual(req.headers['x-container-sysmeta-versions-mode'],
'stack')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_put_container_history_header(self):
self.app.register('PUT', '/v1/a/c', swob.HTTPOk, {}, 'passed')
req = Request.blank('/v1/a/c',
headers={'X-History-Location': 'ver_cont'},
environ={'REQUEST_METHOD': 'PUT'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
# check for sysmeta header
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c', path)
self.assertIn('x-container-sysmeta-versions-location', req_headers)
self.assertEqual('ver_cont',
req_headers['x-container-sysmeta-versions-location'])
self.assertIn('x-container-sysmeta-versions-mode', req_headers)
self.assertEqual('history',
req_headers['x-container-sysmeta-versions-mode'])
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_put_container_both_headers(self):
req = Request.blank('/v1/a/c',
headers={'X-Versions-Location': 'ver_cont',
'X-History-Location': 'ver_cont'},
environ={'REQUEST_METHOD': 'PUT'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '400 Bad Request')
self.assertFalse(self.app.calls)
def test_container_allow_versioned_writes_false(self):
self.vw.conf = {'allow_versioned_writes': 'false'}
# PUT/POST container must fail as 412 when allow_versioned_writes
# set to false
for method in ('PUT', 'POST'):
for header in ('X-Versions-Location', 'X-History-Location'):
req = Request.blank('/v1/a/c',
headers={header: 'ver_cont'},
environ={'REQUEST_METHOD': method})
status, headers, body = self.call_vw(req)
self.assertEqual(status, "412 Precondition Failed",
'Got %s instead of 412 when %sing '
'with %s header' % (status, method, header))
# GET performs as normal
self.app.register('GET', '/v1/a/c', swob.HTTPOk, {}, 'passed')
for method in ('GET', 'HEAD'):
req = Request.blank('/v1/a/c',
headers={'X-Versions-Location': 'ver_cont'},
environ={'REQUEST_METHOD': method})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
def _test_removal(self, headers):
self.app.register('POST', '/v1/a/c', swob.HTTPNoContent, {}, 'passed')
req = Request.blank('/v1/a/c',
headers=headers,
environ={'REQUEST_METHOD': 'POST'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '204 No Content')
# check for sysmeta header
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('POST', method)
self.assertEqual('/v1/a/c', path)
for header in ['x-container-sysmeta-versions-location',
'x-container-sysmeta-versions-mode',
'x-versions-location']:
self.assertIn(header, req_headers)
self.assertEqual('', req_headers[header])
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_remove_headers(self):
self._test_removal({'X-Remove-Versions-Location': 'x'})
self._test_removal({'X-Remove-History-Location': 'x'})
def test_empty_versions_location(self):
self._test_removal({'X-Versions-Location': ''})
self._test_removal({'X-History-Location': ''})
def test_remove_add_versions_precedence(self):
self.app.register(
'POST', '/v1/a/c', swob.HTTPOk,
{'x-container-sysmeta-versions-location': 'ver_cont'},
'passed')
req = Request.blank('/v1/a/c',
headers={'X-Remove-Versions-Location': 'x',
'X-Versions-Location': 'ver_cont'},
environ={'REQUEST_METHOD': 'POST'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertIn(('X-Versions-Location', 'ver_cont'), headers)
# check for sysmeta header
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('POST', method)
self.assertEqual('/v1/a/c', path)
self.assertIn('x-container-sysmeta-versions-location', req_headers)
self.assertNotIn('x-remove-versions-location', req_headers)
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def _test_blank_add_versions_precedence(self, blank_header, add_header):
self.app.register(
'POST', '/v1/a/c', swob.HTTPOk,
{'x-container-sysmeta-versions-location': 'ver_cont'},
'passed')
req = Request.blank('/v1/a/c',
headers={blank_header: '',
add_header: 'ver_cont'},
environ={'REQUEST_METHOD': 'POST'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
# check for sysmeta header
calls = self.app.calls_with_headers
method, path, req_headers = calls[-1]
self.assertEqual('POST', method)
self.assertEqual('/v1/a/c', path)
self.assertIn('x-container-sysmeta-versions-location', req_headers)
self.assertEqual('ver_cont',
req_headers['x-container-sysmeta-versions-location'])
self.assertIn('x-container-sysmeta-versions-mode', req_headers)
self.assertEqual('history' if add_header == 'X-History-Location'
else 'stack',
req_headers['x-container-sysmeta-versions-mode'])
self.assertNotIn('x-remove-versions-location', req_headers)
self.assertIn('x-versions-location', req_headers)
self.assertEqual('', req_headers['x-versions-location'])
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_blank_add_versions_precedence(self):
self._test_blank_add_versions_precedence(
'X-Versions-Location', 'X-History-Location')
self._test_blank_add_versions_precedence(
'X-History-Location', 'X-Versions-Location')
def test_get_container(self):
self.app.register(
'GET', '/v1/a/c', swob.HTTPOk,
{'x-container-sysmeta-versions-location': 'ver_cont',
'x-container-sysmeta-versions-mode': 'stack'}, None)
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertIn(('X-Versions-Location', 'ver_cont'), headers)
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_head_container(self):
self.app.register(
'HEAD', '/v1/a/c', swob.HTTPOk,
{'x-container-sysmeta-versions-location': 'other_ver_cont',
'x-container-sysmeta-versions-mode': 'history'}, None)
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': 'HEAD'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertIn(('X-History-Location', 'other_ver_cont'), headers)
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_get_head(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, None)
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
self.app.register('HEAD', '/v1/a/c/o', swob.HTTPOk, {}, None)
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_put_object_no_versioning(self):
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
cache = FakeCache({})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_put_object_post_as_copy(self):
# PUTs due to a post-as-copy should NOT cause a versioning op
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, 'passed')
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100',
'swift.post_as_copy': True})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '201 Created')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
self.assertEqual(1, self.app.call_count)
def test_put_first_object_success(self):
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPNotFound, {}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100',
'swift.trans_id': 'fake_trans_id'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 2)
# Versioned writes middleware now calls auth on the incoming request
# before we try the GET and then at the proxy, so there are 2
# atuhorized for the same request.
self.assertRequestEqual(req, self.authorized[0])
self.assertRequestEqual(req, self.authorized[1])
self.assertEqual(2, self.app.call_count)
self.assertEqual(['VW', None], self.app.swift_sources)
self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids))
def test_put_object_no_versioning_with_container_config_true(self):
# set False to versions_write and expect no GET occurred
self.vw.conf = {'allow_versioned_writes': 'false'}
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, 'passed')
cache = FakeCache({'versions': 'ver_cont'})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '201 Created')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
called_method = [method for (method, path, hdrs) in self.app._calls]
self.assertNotIn('GET', called_method)
def test_put_request_is_dlo_manifest_with_container_config_true(self):
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, 'passed')
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPOk,
{'last-modified': 'Thu, 1 Jan 1970 00:01:00 GMT'}, 'old version')
self.app.register(
'PUT', '/v1/a/ver_cont/001o/0000000060.00000', swob.HTTPCreated,
{}, '')
cache = FakeCache({'versions': 'ver_cont'})
req = Request.blank(
'/v1/a/c/o',
headers={'X-Object-Manifest': 'req/manifest'},
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '201 Created')
self.assertEqual(len(self.authorized), 2)
self.assertRequestEqual(req, self.authorized[0])
self.assertRequestEqual(req, self.authorized[1])
self.assertEqual(3, self.app.call_count)
self.assertEqual([
('GET', '/v1/a/c/o'),
('PUT', '/v1/a/ver_cont/001o/0000000060.00000'),
('PUT', '/v1/a/c/o'),
], self.app.calls)
self.assertIn('x-object-manifest',
self.app.calls_with_headers[2].headers)
def test_put_version_is_dlo_manifest_with_container_config_true(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk,
{'X-Object-Manifest': 'resp/manifest',
'last-modified': 'Thu, 1 Jan 1970 01:00:00 GMT'},
'passed')
self.app.register(
'PUT', '/v1/a/ver_cont/001o/0000003600.00000', swob.HTTPCreated,
{}, '')
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, 'passed')
cache = FakeCache({'versions': 'ver_cont'})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '201 Created')
# The middleware now auths the request before the initial GET, the
# same GET that gets the X-Object-Manifest back. So a second auth is
# now done.
self.assertEqual(len(self.authorized), 2)
self.assertRequestEqual(req, self.authorized[0])
self.assertRequestEqual(req, self.authorized[1])
self.assertEqual(3, self.app.call_count)
self.assertEqual([
('GET', '/v1/a/c/o'),
('PUT', '/v1/a/ver_cont/001o/0000003600.00000'),
('PUT', '/v1/a/c/o'),
], self.app.calls)
self.assertIn('x-object-manifest',
self.app.calls_with_headers[1].headers)
def test_delete_object_no_versioning_with_container_config_true(self):
# set False to versions_write obviously and expect no GET versioning
# container and GET/PUT called (just delete object as normal)
self.vw.conf = {'allow_versioned_writes': 'false'}
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPNoContent, {}, 'passed')
cache = FakeCache({'versions': 'ver_cont'})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '204 No Content')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
called_method = \
[method for (method, path, rheaders) in self.app._calls]
self.assertNotIn('PUT', called_method)
self.assertNotIn('GET', called_method)
self.assertEqual(1, self.app.call_count)
def test_new_version_success(self):
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, 'passed')
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPOk,
{'last-modified': 'Thu, 1 Jan 1970 00:00:01 GMT'}, 'passed')
self.app.register(
'PUT', '/v1/a/ver_cont/001o/0000000001.00000', swob.HTTPCreated,
{}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100',
'swift.trans_id': 'fake_trans_id'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '201 Created')
# authorized twice now because versioned_writes now makes a check on
# PUT
self.assertEqual(len(self.authorized), 2)
self.assertRequestEqual(req, self.authorized[0])
self.assertEqual(['VW', 'VW', None], self.app.swift_sources)
self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids))
def test_new_version_get_errors(self):
# GET on source fails, expect client error response,
# no PUT should happen
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPBadRequest, {}, None)
cache = FakeCache({'versions': 'ver_cont'})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '412 Precondition Failed')
self.assertEqual(1, self.app.call_count)
# GET on source fails, expect server error response
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPBadGateway, {}, None)
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '503 Service Unavailable')
self.assertEqual(2, self.app.call_count)
def test_new_version_put_errors(self):
# PUT of version fails, expect client error response
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPOk,
{'last-modified': 'Thu, 1 Jan 1970 00:00:01 GMT'}, 'passed')
self.app.register(
'PUT', '/v1/a/ver_cont/001o/0000000001.00000',
swob.HTTPUnauthorized, {}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '412 Precondition Failed')
self.assertEqual(2, self.app.call_count)
# PUT of version fails, expect server error response
self.app.register(
'PUT', '/v1/a/ver_cont/001o/0000000001.00000', swob.HTTPBadGateway,
{}, None)
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '503 Service Unavailable')
self.assertEqual(4, self.app.call_count)
@local_tz
def test_new_version_sysmeta_precedence(self):
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPOk,
{'last-modified': 'Thu, 1 Jan 1970 00:00:00 GMT'}, 'passed')
self.app.register(
'PUT', '/v1/a/ver_cont/001o/0000000000.00000', swob.HTTPOk,
{}, None)
# fill cache with two different values for versions location
# new middleware should use sysmeta first
cache = FakeCache({'versions': 'old_ver_cont',
'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
# authorized twice now because versioned_writes now makes a check on
# PUT
self.assertEqual(len(self.authorized), 2)
self.assertRequestEqual(req, self.authorized[0])
# check that sysmeta header was used
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/ver_cont/001o/0000000000.00000', path)
def test_delete_no_versions_container_success(self):
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET',
'/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on',
swob.HTTPNotFound, {}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0', 'swift.trans_id': 'fake_trans_id'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
self.assertEqual(2, self.app.call_count)
self.assertEqual(['VW', None], self.app.swift_sources)
self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids))
prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', prefix_listing_prefix + 'marker=&reverse=on'),
('DELETE', '/v1/a/c/o'),
])
def test_delete_first_object_success(self):
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET',
'/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on',
swob.HTTPOk, {}, '[]')
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', prefix_listing_prefix + 'marker=&reverse=on'),
('DELETE', '/v1/a/c/o'),
])
def test_delete_latest_version_no_marker_success(self):
self.app.register(
'GET',
'/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on',
swob.HTTPOk, {},
'[{"hash": "y", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/2", '
'"content_type": "text/plain"}, '
'{"hash": "x", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 3, '
'"name": "001o/1", '
'"content_type": "text/plain"}]')
self.app.register(
'GET', '/v1/a/ver_cont/001o/2', swob.HTTPCreated,
{'content-length': '3'}, None)
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, None)
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/2', swob.HTTPOk,
{}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
headers={'X-If-Delete-At': 1},
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0', 'swift.trans_id': 'fake_trans_id'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
self.assertEqual(4, self.app.call_count)
self.assertEqual(['VW', 'VW', 'VW', 'VW'], self.app.swift_sources)
self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids))
# check that X-If-Delete-At was removed from DELETE request
req_headers = self.app.headers[-1]
self.assertNotIn('x-if-delete-at', [h.lower() for h in req_headers])
prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', prefix_listing_prefix + 'marker=&reverse=on'),
('GET', '/v1/a/ver_cont/001o/2'),
('PUT', '/v1/a/c/o'),
('DELETE', '/v1/a/ver_cont/001o/2'),
])
def test_delete_latest_version_restores_marker_success(self):
self.app.register(
'GET',
'/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on',
swob.HTTPOk, {},
'[{"hash": "x", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/2", '
'"content_type": "application/x-deleted;swift_versions_deleted=1"'
'}, {"hash": "y", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 3, '
'"name": "001o/1", '
'"content_type": "text/plain"'
'}]')
self.app.register(
'HEAD', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPNoContent, {})
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
headers={'X-If-Delete-At': 1},
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '204 No Content')
self.assertEqual(len(self.authorized), 2)
self.assertRequestEqual(req, self.authorized[0])
self.assertRequestEqual(req, self.authorized[1])
calls = self.app.calls_with_headers
self.assertEqual(['GET', 'HEAD', 'DELETE'],
[c.method for c in calls])
self.assertIn('X-Newest', calls[1].headers)
self.assertEqual('True', calls[1].headers['X-Newest'])
method, path, req_headers = calls.pop()
self.assertTrue(path.startswith('/v1/a/c/o'))
# Since we're deleting the original, this *should* still be present:
self.assertEqual('1', req_headers.get('X-If-Delete-At'))
def test_delete_latest_version_is_marker_success(self):
# Test popping a delete marker off the stack. So, there's data in the
# versions container, topped by a delete marker, and there's nothing
# in the base versioned container.
self.app.register(
'GET',
'/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on',
swob.HTTPOk, {},
'[{"hash": "y", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/2", '
'"content_type": "application/x-deleted;swift_versions_deleted=1"'
'},{"hash": "x", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 3, '
'"name": "001o/1", '
'"content_type": "text/plain"'
'}]')
self.app.register(
'HEAD', '/v1/a/c/o', swob.HTTPNotFound, {}, 'passed')
self.app.register(
'GET', '/v1/a/ver_cont/001o/1', swob.HTTPOk, {}, 'passed')
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, None)
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/2', swob.HTTPOk, {}, 'passed')
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/1', swob.HTTPOk, {}, 'passed')
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
headers={'X-If-Delete-At': 1},
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', prefix_listing_prefix + 'marker=&reverse=on'),
('HEAD', '/v1/a/c/o'),
('GET', '/v1/a/ver_cont/001o/1'),
('PUT', '/v1/a/c/o'),
('DELETE', '/v1/a/ver_cont/001o/1'),
('DELETE', '/v1/a/ver_cont/001o/2'),
])
self.assertIn('X-Newest', self.app.headers[1])
self.assertEqual('True', self.app.headers[1]['X-Newest'])
self.assertIn('X-Newest', self.app.headers[2])
self.assertEqual('True', self.app.headers[2]['X-Newest'])
# check that X-If-Delete-At was removed from DELETE request
for req_headers in self.app.headers[-2:]:
self.assertNotIn('x-if-delete-at',
[h.lower() for h in req_headers])
def test_delete_latest_version_doubled_up_markers_success(self):
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/'
'&marker=&reverse=on',
swob.HTTPOk, {},
'[{"hash": "x", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/3", '
'"content_type": "application/x-deleted;swift_versions_deleted=1"'
'}, {"hash": "y", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 3, '
'"name": "001o/2", '
'"content_type": "application/x-deleted;swift_versions_deleted=1"'
'}, {"hash": "y", '
'"last_modified": "2014-11-20T14:23:02.206740", '
'"bytes": 30, '
'"name": "001o/1", '
'"content_type": "text/plain"'
'}]')
self.app.register(
'HEAD', '/v1/a/c/o', swob.HTTPNotFound, {}, 'passed')
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/3', swob.HTTPOk, {}, 'passed')
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
headers={'X-If-Delete-At': 1},
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
# check that X-If-Delete-At was removed from DELETE request
calls = self.app.calls_with_headers
self.assertEqual(['GET', 'HEAD', 'DELETE'],
[c.method for c in calls])
method, path, req_headers = calls.pop()
self.assertTrue(path.startswith('/v1/a/ver_cont/001o/3'))
self.assertNotIn('x-if-delete-at', [h.lower() for h in req_headers])
@mock.patch('swift.common.middleware.versioned_writes.time.time',
return_value=1234)
def test_history_delete_marker_no_object_success(self, mock_time):
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPNotFound,
{}, 'passed')
self.app.register(
'PUT', '/v1/a/ver_cont/001o/0000001234.00000', swob.HTTPCreated,
{}, 'passed')
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPNotFound, {}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont',
'versions-mode': 'history'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '404 Not Found')
self.assertEqual(len(self.authorized), 2)
req.environ['REQUEST_METHOD'] = 'PUT'
self.assertRequestEqual(req, self.authorized[0])
calls = self.app.calls_with_headers
self.assertEqual(['GET', 'PUT', 'DELETE'], [c.method for c in calls])
self.assertEqual('application/x-deleted;swift_versions_deleted=1',
calls[1].headers.get('Content-Type'))
@mock.patch('swift.common.middleware.versioned_writes.time.time',
return_value=123456789.54321)
def test_history_delete_marker_over_object_success(self, mock_time):
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPOk,
{'last-modified': 'Wed, 19 Nov 2014 18:19:02 GMT'}, 'passed')
self.app.register(
'PUT', '/v1/a/ver_cont/001o/1416421142.00000', swob.HTTPCreated,
{}, 'passed')
self.app.register(
'PUT', '/v1/a/ver_cont/001o/0123456789.54321', swob.HTTPCreated,
{}, 'passed')
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPNoContent, {}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont',
'versions-mode': 'history'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '204 No Content')
self.assertEqual('', body)
self.assertEqual(len(self.authorized), 2)
req.environ['REQUEST_METHOD'] = 'PUT'
self.assertRequestEqual(req, self.authorized[0])
calls = self.app.calls_with_headers
self.assertEqual(['GET', 'PUT', 'PUT', 'DELETE'],
[c.method for c in calls])
self.assertEqual('/v1/a/ver_cont/001o/1416421142.00000',
calls[1].path)
self.assertEqual('application/x-deleted;swift_versions_deleted=1',
calls[2].headers.get('Content-Type'))
def test_delete_single_version_success(self):
# check that if the first listing page has just a single item then
# it is not erroneously inferred to be a non-reversed listing
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET',
'/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on',
swob.HTTPOk, {},
'[{"hash": "y", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/1", '
'"content_type": "text/plain"}]')
self.app.register(
'GET', '/v1/a/ver_cont/001o/1', swob.HTTPOk,
{'content-length': '3'}, None)
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, None)
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/1', swob.HTTPOk,
{}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', prefix_listing_prefix + 'marker=&reverse=on'),
('GET', '/v1/a/ver_cont/001o/1'),
('PUT', '/v1/a/c/o'),
('DELETE', '/v1/a/ver_cont/001o/1'),
])
def test_DELETE_on_expired_versioned_object(self):
self.app.register(
'GET',
'/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on',
swob.HTTPOk, {},
'[{"hash": "y", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/2", '
'"content_type": "text/plain"}, '
'{"hash": "x", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 3, '
'"name": "001o/1", '
'"content_type": "text/plain"}]')
# expired object
self.app.register(
'GET', '/v1/a/ver_cont/001o/2', swob.HTTPNotFound,
{}, None)
self.app.register(
'GET', '/v1/a/ver_cont/001o/1', swob.HTTPCreated,
{'content-length': '3'}, None)
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPOk, {}, None)
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/1', swob.HTTPOk,
{}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
self.assertEqual(5, self.app.call_count)
prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', prefix_listing_prefix + 'marker=&reverse=on'),
('GET', '/v1/a/ver_cont/001o/2'),
('GET', '/v1/a/ver_cont/001o/1'),
('PUT', '/v1/a/c/o'),
('DELETE', '/v1/a/ver_cont/001o/1'),
])
def test_denied_DELETE_of_versioned_object(self):
authorize_call = []
self.app.register(
'GET',
'/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on',
swob.HTTPOk, {},
'[{"hash": "y", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/2", '
'"content_type": "text/plain"}, '
'{"hash": "x", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 3, '
'"name": "001o/1", '
'"content_type": "text/plain"}]')
def fake_authorize(req):
# the container GET is pre-auth'd so here we deny the object DELETE
authorize_call.append(req)
return swob.HTTPForbidden()
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'swift.authorize': fake_authorize,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '403 Forbidden')
self.assertEqual(len(authorize_call), 1)
self.assertRequestEqual(req, authorize_call[0])
prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', prefix_listing_prefix + 'marker=&reverse=on'),
])
def test_denied_PUT_of_versioned_object(self):
authorize_call = []
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPOk,
{'last-modified': 'Thu, 1 Jan 1970 00:00:01 GMT'}, 'passed')
def fake_authorize(req):
# we should deny the object PUT
authorize_call.append(req)
return swob.HTTPForbidden()
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'swift.authorize': fake_authorize,
'CONTENT_LENGTH': '0'})
# Save off a copy, as the middleware may modify the original
expected_req = Request(req.environ.copy())
status, headers, body = self.call_vw(req)
self.assertEqual(status, '403 Forbidden')
self.assertEqual(len(authorize_call), 1)
self.assertRequestEqual(expected_req, authorize_call[0])
self.assertEqual(self.app.calls, [])
class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase):
def test_delete_latest_version_success(self):
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/&'
'marker=&reverse=on',
swob.HTTPOk, {},
'[{"hash": "x", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 3, '
'"name": "001o/1", '
'"content_type": "text/plain"}, '
'{"hash": "y", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/2", '
'"content_type": "text/plain"}]')
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/'
'&marker=001o/2',
swob.HTTPNotFound, {}, None)
self.app.register(
'GET', '/v1/a/ver_cont/001o/2', swob.HTTPCreated,
{'content-length': '3'}, None)
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, None)
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/2', swob.HTTPOk,
{}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
headers={'X-If-Delete-At': 1},
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0', 'swift.trans_id': 'fake_trans_id'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
self.assertEqual(5, self.app.call_count)
self.assertEqual(['VW', 'VW', 'VW', 'VW', 'VW'],
self.app.swift_sources)
self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids))
# check that X-If-Delete-At was removed from DELETE request
req_headers = self.app.headers[-1]
self.assertNotIn('x-if-delete-at', [h.lower() for h in req_headers])
prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', prefix_listing_prefix + 'marker=&reverse=on'),
('GET', prefix_listing_prefix + 'marker=001o/2'),
('GET', '/v1/a/ver_cont/001o/2'),
('PUT', '/v1/a/c/o'),
('DELETE', '/v1/a/ver_cont/001o/2'),
])
def test_DELETE_on_expired_versioned_object(self):
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/&'
'marker=&reverse=on',
swob.HTTPOk, {},
'[{"hash": "x", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 3, '
'"name": "001o/1", '
'"content_type": "text/plain"}, '
'{"hash": "y", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/2", '
'"content_type": "text/plain"}]')
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/'
'&marker=001o/2',
swob.HTTPNotFound, {}, None)
# expired object
self.app.register(
'GET', '/v1/a/ver_cont/001o/2', swob.HTTPNotFound,
{}, None)
self.app.register(
'GET', '/v1/a/ver_cont/001o/1', swob.HTTPCreated,
{'content-length': '3'}, None)
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPOk, {}, None)
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/1', swob.HTTPOk,
{}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
self.assertEqual(6, self.app.call_count)
prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', prefix_listing_prefix + 'marker=&reverse=on'),
('GET', prefix_listing_prefix + 'marker=001o/2'),
('GET', '/v1/a/ver_cont/001o/2'),
('GET', '/v1/a/ver_cont/001o/1'),
('PUT', '/v1/a/c/o'),
('DELETE', '/v1/a/ver_cont/001o/1'),
])
def test_denied_DELETE_of_versioned_object(self):
authorize_call = []
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/&'
'marker=&reverse=on',
swob.HTTPOk, {},
'[{"hash": "x", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 3, '
'"name": "001o/1", '
'"content_type": "text/plain"}, '
'{"hash": "y", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/2", '
'"content_type": "text/plain"}]')
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/'
'&marker=001o/2',
swob.HTTPNotFound, {}, None)
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPForbidden,
{}, None)
def fake_authorize(req):
authorize_call.append(req)
return swob.HTTPForbidden()
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'swift.authorize': fake_authorize,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '403 Forbidden')
self.assertEqual(len(authorize_call), 1)
self.assertRequestEqual(req, authorize_call[0])
prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', prefix_listing_prefix + 'marker=&reverse=on'),
('GET', prefix_listing_prefix + 'marker=001o/2'),
])
def test_partially_upgraded_cluster(self):
old_versions = [
{'hash': 'etag%d' % x,
'last_modified': "2014-11-21T14:14:%02d.409100" % x,
'bytes': 3,
'name': '001o/%d' % x,
'content_type': 'text/plain'}
for x in range(5)]
# first container server can reverse
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/&'
'marker=&reverse=on',
swob.HTTPOk, {}, json.dumps(list(reversed(old_versions[2:]))))
# but all objects are already gone
self.app.register(
'GET', '/v1/a/ver_cont/001o/4', swob.HTTPNotFound,
{}, None)
self.app.register(
'GET', '/v1/a/ver_cont/001o/3', swob.HTTPNotFound,
{}, None)
self.app.register(
'GET', '/v1/a/ver_cont/001o/2', swob.HTTPNotFound,
{}, None)
# second container server can't reverse
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/&'
'marker=001o/2&reverse=on',
swob.HTTPOk, {}, json.dumps(old_versions[3:]))
# subsequent requests shouldn't reverse
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/&'
'marker=&end_marker=001o/2',
swob.HTTPOk, {}, json.dumps(old_versions[:1]))
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/&'
'marker=001o/0&end_marker=001o/2',
swob.HTTPOk, {}, json.dumps(old_versions[1:2]))
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/&'
'marker=001o/1&end_marker=001o/2',
swob.HTTPOk, {}, '[]')
self.app.register(
'GET', '/v1/a/ver_cont/001o/1', swob.HTTPOk,
{'content-length': '3'}, None)
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, None)
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/1', swob.HTTPNoContent,
{}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '204 No Content')
prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', prefix_listing_prefix + 'marker=&reverse=on'),
('GET', '/v1/a/ver_cont/001o/4'),
('GET', '/v1/a/ver_cont/001o/3'),
('GET', '/v1/a/ver_cont/001o/2'),
('GET', prefix_listing_prefix + 'marker=001o/2&reverse=on'),
('GET', prefix_listing_prefix + 'marker=&end_marker=001o/2'),
('GET', prefix_listing_prefix + 'marker=001o/0&end_marker=001o/2'),
('GET', prefix_listing_prefix + 'marker=001o/1&end_marker=001o/2'),
('GET', '/v1/a/ver_cont/001o/1'),
('PUT', '/v1/a/c/o'),
('DELETE', '/v1/a/ver_cont/001o/1'),
])
def test_partially_upgraded_cluster_single_result_on_second_page(self):
old_versions = [
{'hash': 'etag%d' % x,
'last_modified': "2014-11-21T14:14:%02d.409100" % x,
'bytes': 3,
'name': '001o/%d' % x,
'content_type': 'text/plain'}
for x in range(5)]
# first container server can reverse
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/&'
'marker=&reverse=on',
swob.HTTPOk, {}, json.dumps(list(reversed(old_versions[-2:]))))
# but both objects are already gone
self.app.register(
'GET', '/v1/a/ver_cont/001o/4', swob.HTTPNotFound,
{}, None)
self.app.register(
'GET', '/v1/a/ver_cont/001o/3', swob.HTTPNotFound,
{}, None)
# second container server can't reverse
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/&'
'marker=001o/3&reverse=on',
swob.HTTPOk, {}, json.dumps(old_versions[4:]))
# subsequent requests shouldn't reverse
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/&'
'marker=&end_marker=001o/3',
swob.HTTPOk, {}, json.dumps(old_versions[:2]))
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/&'
'marker=001o/1&end_marker=001o/3',
swob.HTTPOk, {}, json.dumps(old_versions[2:3]))
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/&'
'marker=001o/2&end_marker=001o/3',
swob.HTTPOk, {}, '[]')
self.app.register(
'GET', '/v1/a/ver_cont/001o/2', swob.HTTPOk,
{'content-length': '3'}, None)
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, None)
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/2', swob.HTTPNoContent,
{}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '204 No Content')
prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', prefix_listing_prefix + 'marker=&reverse=on'),
('GET', '/v1/a/ver_cont/001o/4'),
('GET', '/v1/a/ver_cont/001o/3'),
('GET', prefix_listing_prefix + 'marker=001o/3&reverse=on'),
('GET', prefix_listing_prefix + 'marker=&end_marker=001o/3'),
('GET', prefix_listing_prefix + 'marker=001o/1&end_marker=001o/3'),
('GET', prefix_listing_prefix + 'marker=001o/2&end_marker=001o/3'),
('GET', '/v1/a/ver_cont/001o/2'),
('PUT', '/v1/a/c/o'),
('DELETE', '/v1/a/ver_cont/001o/2'),
])
class VersionedWritesCopyingTestCase(VersionedWritesBaseTestCase):
# verify interaction of copy and versioned_writes middlewares
def setUp(self):
self.app = FakeSwift()
conf = {'allow_versioned_writes': 'true'}
self.vw = versioned_writes.filter_factory(conf)(self.app)
self.filter = copy.filter_factory({})(self.vw)
def call_filter(self, req, **kwargs):
return self.call_app(req, app=self.filter, **kwargs)
def test_copy_first_version(self):
# no existing object to move to the versions container
self.app.register(
'GET', '/v1/a/tgt_cont/tgt_obj', swob.HTTPNotFound, {}, None)
self.app.register(
'GET', '/v1/a/src_cont/src_obj', swob.HTTPOk, {}, 'passed')
self.app.register(
'PUT', '/v1/a/tgt_cont/tgt_obj', swob.HTTPCreated, {}, 'passed')
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/src_cont/src_obj',
environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache,
'CONTENT_LENGTH': '100'},
headers={'Destination': 'tgt_cont/tgt_obj'})
status, headers, body = self.call_filter(req)
self.assertEqual(status, '201 Created')
self.assertEqual(len(self.authorized), 3)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/src_cont/src_obj', self.authorized[0].path)
# At the moment we are calling authorize on the incoming request in
# the middleware before we do the PUT (and the source GET) and again
# on the incoming request when it gets to the proxy. So the 2nd and
# 3rd auths look the same.
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/tgt_cont/tgt_obj', self.authorized[1].path)
self.assertEqual('PUT', self.authorized[2].method)
self.assertEqual('/v1/a/tgt_cont/tgt_obj', self.authorized[2].path)
# note the GET on tgt_cont/tgt_obj is pre-authed
self.assertEqual(3, self.app.call_count, self.app.calls)
def test_copy_new_version(self):
# existing object should be moved to versions container
self.app.register(
'GET', '/v1/a/src_cont/src_obj', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET', '/v1/a/tgt_cont/tgt_obj', swob.HTTPOk,
{'last-modified': 'Thu, 1 Jan 1970 00:00:01 GMT'}, 'passed')
self.app.register(
'PUT', '/v1/a/ver_cont/007tgt_obj/0000000001.00000', swob.HTTPOk,
{}, None)
self.app.register(
'PUT', '/v1/a/tgt_cont/tgt_obj', swob.HTTPCreated, {}, 'passed')
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/src_cont/src_obj',
environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache,
'CONTENT_LENGTH': '100'},
headers={'Destination': 'tgt_cont/tgt_obj'})
status, headers, body = self.call_filter(req)
self.assertEqual(status, '201 Created')
self.assertEqual(len(self.authorized), 3)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/src_cont/src_obj', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/tgt_cont/tgt_obj', self.authorized[1].path)
self.assertEqual(4, self.app.call_count)
def test_copy_new_version_different_account(self):
self.app.register(
'GET', '/v1/src_a/src_cont/src_obj', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET', '/v1/tgt_a/tgt_cont/tgt_obj', swob.HTTPOk,
{'last-modified': 'Thu, 1 Jan 1970 00:00:01 GMT'}, 'passed')
self.app.register(
'PUT', '/v1/tgt_a/ver_cont/007tgt_obj/0000000001.00000',
swob.HTTPOk, {}, None)
self.app.register(
'PUT', '/v1/tgt_a/tgt_cont/tgt_obj', swob.HTTPCreated, {},
'passed')
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/src_a/src_cont/src_obj',
environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache,
'CONTENT_LENGTH': '100'},
headers={'Destination': 'tgt_cont/tgt_obj',
'Destination-Account': 'tgt_a'})
status, headers, body = self.call_filter(req)
self.assertEqual(status, '201 Created')
self.assertEqual(len(self.authorized), 3)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/src_a/src_cont/src_obj', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/tgt_a/tgt_cont/tgt_obj', self.authorized[1].path)
self.assertEqual(4, self.app.call_count)
def test_copy_object_no_versioning_with_container_config_true(self):
# set False to versions_write obviously and expect no extra
# COPY called (just copy object as normal)
self.vw.conf = {'allow_versioned_writes': 'false'}
self.app.register(
'GET', '/v1/a/src_cont/src_obj', swob.HTTPOk, {}, 'passed')
self.app.register(
'PUT', '/v1/a/tgt_cont/tgt_obj', swob.HTTPCreated, {}, 'passed')
cache = FakeCache({'versions': 'ver_cont'})
req = Request.blank(
'/v1/a/src_cont/src_obj',
environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache},
headers={'Destination': '/tgt_cont/tgt_obj'})
status, headers, body = self.call_filter(req)
self.assertEqual(status, '201 Created')
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/src_cont/src_obj', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/tgt_cont/tgt_obj', self.authorized[1].path)
self.assertEqual(2, self.app.call_count)
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
utils._swift_info = {}
utils._swift_admin_info = {}
def test_registered_defaults(self):
versioned_writes.filter_factory({})('have to pass in an app')
swift_info = utils.get_swift_info()
# in default, versioned_writes is not in swift_info
self.assertNotIn('versioned_writes', swift_info)
def test_registered_explicitly_set(self):
versioned_writes.filter_factory(
{'allow_versioned_writes': 'true'})('have to pass in an app')
swift_info = utils.get_swift_info()
self.assertIn('versioned_writes', swift_info)
self.assertEqual(
swift_info['versioned_writes'].get('allowed_flags'),
('x-versions-location', 'x-history-location'))
if __name__ == '__main__':
unittest.main()
| |
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import numpy as np
from ludwig.constants import *
from ludwig.decoders.sequence_decoders import DECODER_REGISTRY
from ludwig.encoders.sequence_encoders import ENCODER_REGISTRY as SEQUENCE_ENCODER_REGISTRY
from ludwig.encoders.text_encoders import *
from ludwig.features.base_feature import InputFeature
from ludwig.features.base_feature import OutputFeature
from ludwig.modules.loss_modules import SampledSoftmaxCrossEntropyLoss
from ludwig.modules.loss_modules import SequenceLoss
from ludwig.modules.metric_modules import EditDistanceMetric, \
SequenceAccuracyMetric
from ludwig.modules.metric_modules import PerplexityMetric
from ludwig.modules.metric_modules import SequenceLastAccuracyMetric
from ludwig.modules.metric_modules import SequenceLossMetric
from ludwig.modules.metric_modules import TokenAccuracyMetric
from ludwig.utils.math_utils import softmax
from ludwig.utils.metrics_utils import ConfusionMatrix
from ludwig.utils.misc_utils import set_default_value
from ludwig.utils.strings_utils import PADDING_SYMBOL
from ludwig.utils.strings_utils import UNKNOWN_SYMBOL
from ludwig.utils.strings_utils import build_sequence_matrix
from ludwig.utils.strings_utils import create_vocabulary
logger = logging.getLogger(__name__)
class SequenceFeatureMixin(object):
type = SEQUENCE
preprocessing_defaults = {
'sequence_length_limit': 256,
'most_common': 20000,
'padding_symbol': PADDING_SYMBOL,
'unknown_symbol': UNKNOWN_SYMBOL,
'padding': 'right',
'tokenizer': 'space',
'lowercase': False,
'vocab_file': None,
'missing_value_strategy': FILL_WITH_CONST,
'fill_value': UNKNOWN_SYMBOL
}
@staticmethod
def cast_column(feature, dataset_df, backend):
return dataset_df
@staticmethod
def get_feature_meta(column, preprocessing_parameters, backend):
column = column.astype(str)
idx2str, str2idx, str2freq, max_length, _, _, _ = create_vocabulary(
column, preprocessing_parameters['tokenizer'],
lowercase=preprocessing_parameters['lowercase'],
num_most_frequent=preprocessing_parameters['most_common'],
vocab_file=preprocessing_parameters['vocab_file'],
unknown_symbol=preprocessing_parameters['unknown_symbol'],
padding_symbol=preprocessing_parameters['padding_symbol'],
processor=backend.df_engine
)
max_length = min(
preprocessing_parameters['sequence_length_limit'],
max_length
)
return {
'idx2str': idx2str,
'str2idx': str2idx,
'str2freq': str2freq,
'vocab_size': len(idx2str),
'max_sequence_length': max_length
}
@staticmethod
def feature_data(column, metadata, preprocessing_parameters, backend):
sequence_data = build_sequence_matrix(
sequences=column,
inverse_vocabulary=metadata['str2idx'],
tokenizer_type=preprocessing_parameters['tokenizer'],
length_limit=metadata['max_sequence_length'],
padding_symbol=preprocessing_parameters['padding_symbol'],
padding=preprocessing_parameters['padding'],
unknown_symbol=preprocessing_parameters['unknown_symbol'],
lowercase=preprocessing_parameters['lowercase'],
tokenizer_vocab_file=preprocessing_parameters[
'vocab_file'
],
processor=backend.df_engine
)
return sequence_data
@staticmethod
def add_feature_data(
feature,
input_df,
proc_df,
metadata,
preprocessing_parameters,
backend
):
sequence_data = SequenceInputFeature.feature_data(
input_df[feature[COLUMN]].astype(str),
metadata[feature[NAME]], preprocessing_parameters,
backend
)
proc_df[feature[PROC_COLUMN]] = sequence_data
return proc_df
class SequenceInputFeature(SequenceFeatureMixin, InputFeature):
encoder = 'embed'
max_sequence_length = None
def __init__(self, feature, encoder_obj=None):
super().__init__(feature)
self.overwrite_defaults(feature)
if encoder_obj:
self.encoder_obj = encoder_obj
else:
self.encoder_obj = self.initialize_encoder(feature)
def call(self, inputs, training=None, mask=None):
assert isinstance(inputs, tf.Tensor)
assert inputs.dtype == tf.int8 or inputs.dtype == tf.int16 or \
inputs.dtype == tf.int32 or inputs.dtype == tf.int64
assert len(inputs.shape) == 2
inputs_exp = tf.cast(inputs, dtype=tf.int32)
inputs_mask = tf.not_equal(inputs, 0)
lengths = tf.reduce_sum(tf.cast(inputs_mask, dtype=tf.int32), axis=1)
encoder_output = self.encoder_obj(
inputs_exp, training=training, mask=inputs_mask
)
encoder_output[LENGTHS] = lengths
return encoder_output
@classmethod
def get_input_dtype(cls):
return tf.int32
def get_input_shape(self):
return None,
@staticmethod
def update_config_with_metadata(
input_feature,
feature_metadata,
*args,
**kwargs
):
input_feature['vocab'] = feature_metadata['idx2str']
input_feature['max_sequence_length'] = feature_metadata[
'max_sequence_length']
@staticmethod
def populate_defaults(input_feature):
set_default_value(input_feature, TIED, None)
set_default_value(input_feature, 'encoder', 'parallel_cnn')
encoder_registry = SEQUENCE_ENCODER_REGISTRY
class SequenceOutputFeature(SequenceFeatureMixin, OutputFeature):
decoder = 'generator'
loss = {TYPE: SOFTMAX_CROSS_ENTROPY}
metric_functions = {LOSS: None, TOKEN_ACCURACY: None,
SEQUENCE_ACCURACY: None, LAST_ACCURACY: None,
PERPLEXITY: None, EDIT_DISTANCE: None}
default_validation_metric = LOSS
max_sequence_length = 0
num_classes = 0
def __init__(self, feature):
super().__init__(feature)
self.overwrite_defaults(feature)
self.decoder_obj = self.initialize_decoder(feature)
self._setup_loss()
self._setup_metrics()
def _setup_loss(self):
if self.loss[TYPE] == 'softmax_cross_entropy':
self.train_loss_function = SequenceLoss()
elif self.loss[TYPE] == 'sampled_softmax_cross_entropy':
self.train_loss_function = SampledSoftmaxCrossEntropyLoss(
decoder_obj=self.decoder_obj,
num_classes=self.num_classes,
feature_loss=self.loss,
name='train_loss'
)
else:
raise ValueError(
"Loss type {} is not supported. Valid values are "
"'softmax_cross_entropy' or "
"'sampled_softmax_cross_entropy'".format(self.loss[TYPE])
)
self.eval_loss_function = SequenceLossMetric()
def _setup_metrics(self):
self.metric_functions = {} # needed to shadow class variable
self.metric_functions[LOSS] = self.eval_loss_function
self.metric_functions[TOKEN_ACCURACY] = TokenAccuracyMetric()
self.metric_functions[SEQUENCE_ACCURACY] = SequenceAccuracyMetric()
self.metric_functions[LAST_ACCURACY] = SequenceLastAccuracyMetric()
self.metric_functions[PERPLEXITY] = PerplexityMetric()
self.metric_functions[EDIT_DISTANCE] = EditDistanceMetric()
# overrides super class OutputFeature.update_metrics() method
def update_metrics(self, targets, predictions):
for metric, metric_fn in self.metric_functions.items():
if metric == LOSS or metric == PERPLEXITY:
metric_fn.update_state(targets, predictions)
elif metric == LAST_ACCURACY:
metric_fn.update_state(targets, predictions[LAST_PREDICTIONS])
else:
metric_fn.update_state(targets, predictions[PREDICTIONS])
def logits(
self,
inputs,
target=None,
training=None
):
if training and target is not None:
return self.decoder_obj._logits_training(
inputs,
target=tf.cast(target, dtype=tf.int32),
training=training
)
else:
return inputs
def predictions(self, inputs, training=None):
# Generator Decoder
return self.decoder_obj._predictions_eval(inputs, training=training)
@classmethod
def get_output_dtype(cls):
return tf.int32
def get_output_shape(self):
return self.max_sequence_length,
@staticmethod
def update_config_with_metadata(
output_feature,
feature_metadata,
*args,
**kwargs
):
output_feature['num_classes'] = feature_metadata['vocab_size']
output_feature['max_sequence_length'] = (
feature_metadata['max_sequence_length']
)
if isinstance(output_feature[LOSS]['class_weights'], (list, tuple)):
if (len(output_feature[LOSS]['class_weights']) !=
output_feature['num_classes']):
raise ValueError(
'The length of class_weights ({}) is not compatible with '
'the number of classes ({}) for feature {}. '
'Check the metadata JSON file to see the classes '
'and their order and consider there needs to be a weight '
'for the <UNK> and <PAD> class too.'.format(
len(output_feature[LOSS]['class_weights']),
output_feature['num_classes'],
output_feature[COLUMN]
)
)
if output_feature[LOSS]['class_similarities_temperature'] > 0:
if 'class_similarities' in output_feature[LOSS]:
similarities = output_feature[LOSS]['class_similarities']
temperature = output_feature[LOSS][
'class_similarities_temperature']
curr_row = 0
first_row_length = 0
is_first_row = True
for row in similarities:
if is_first_row:
first_row_length = len(row)
is_first_row = False
curr_row += 1
else:
curr_row_length = len(row)
if curr_row_length != first_row_length:
raise ValueError(
'The length of row {} of the class_similarities '
'of {} is {}, different from the length of '
'the first row {}. All rows must have '
'the same length.'.format(
curr_row,
output_feature[COLUMN],
curr_row_length,
first_row_length
)
)
else:
curr_row += 1
all_rows_length = first_row_length
if all_rows_length != len(similarities):
raise ValueError(
'The class_similarities matrix of {} has '
'{} rows and {} columns, '
'their number must be identical.'.format(
output_feature[COLUMN],
len(similarities),
all_rows_length
)
)
if all_rows_length != output_feature['num_classes']:
raise ValueError(
'The size of the class_similarities matrix of {} is '
'{}, different from the number of classe ({}). '
'Check the metadata JSON file to see the classes '
'and their order and '
'consider <UNK> and <PAD> class too.'.format(
output_feature[COLUMN],
all_rows_length,
output_feature['num_classes']
)
)
similarities = np.array(similarities, dtype=np.float32)
for i in range(len(similarities)):
similarities[i, :] = softmax(
similarities[i, :],
temperature=temperature
)
output_feature[LOSS]['class_similarities'] = similarities
else:
raise ValueError(
'class_similarities_temperature > 0, '
'but no class_similarities are provided '
'for feature {}'.format(output_feature[COLUMN])
)
if output_feature[LOSS][TYPE] == 'sampled_softmax_cross_entropy':
output_feature[LOSS]['class_counts'] = [
feature_metadata['str2freq'][cls]
for cls in feature_metadata['idx2str']
]
@staticmethod
def calculate_overall_stats(
predictions,
targets,
train_set_metadata
):
overall_stats = {}
sequences = targets
last_elem_sequence = sequences[np.arange(sequences.shape[0]),
(sequences != 0).cumsum(1).argmax(1)]
confusion_matrix = ConfusionMatrix(
last_elem_sequence,
predictions[LAST_PREDICTIONS],
labels=train_set_metadata['idx2str']
)
overall_stats['confusion_matrix'] = confusion_matrix.cm.tolist()
overall_stats['overall_stats'] = confusion_matrix.stats()
overall_stats['per_class_stats'] = confusion_matrix.per_class_stats()
return overall_stats
def postprocess_predictions(
self,
result,
metadata,
output_directory,
skip_save_unprocessed_output=False,
):
postprocessed = {}
name = self.feature_name
npy_filename = os.path.join(output_directory, '{}_{}.npy')
if PREDICTIONS in result and len(result[PREDICTIONS]) > 0:
preds = result[PREDICTIONS].numpy()
lengths = result[LENGTHS].numpy()
if 'idx2str' in metadata:
postprocessed[PREDICTIONS] = [
[metadata['idx2str'][token]
if token < len(metadata['idx2str']) else UNKNOWN_SYMBOL
for token in [pred[i] for i in range(length)]]
for pred, length in
[(preds[j], lengths[j]) for j in range(len(preds))]
]
else:
postprocessed[PREDICTIONS] = preds
if not skip_save_unprocessed_output:
np.save(npy_filename.format(name, PREDICTIONS), preds)
del result[PREDICTIONS]
if LAST_PREDICTIONS in result and len(result[LAST_PREDICTIONS]) > 0:
last_preds = result[LAST_PREDICTIONS].numpy()
if 'idx2str' in metadata:
postprocessed[LAST_PREDICTIONS] = [
metadata['idx2str'][last_pred]
if last_pred < len(metadata['idx2str']) else UNKNOWN_SYMBOL
for last_pred in last_preds
]
else:
postprocessed[LAST_PREDICTIONS] = last_preds
if not skip_save_unprocessed_output:
np.save(npy_filename.format(name, LAST_PREDICTIONS),
last_preds)
del result[LAST_PREDICTIONS]
if PROBABILITIES in result and len(result[PROBABILITIES]) > 0:
probs = result[PROBABILITIES].numpy()
if probs is not None:
# probs should be shape [b, s, nc]
if len(probs.shape) == 3:
# get probability of token in that sequence position
seq_probs = np.amax(probs, axis=-1)
# sum log probability for tokens up to sequence length
# create mask only tokens for sequence length
mask = np.arange(seq_probs.shape[-1]) \
< np.array(result[LENGTHS]).reshape(-1, 1)
log_prob = np.sum(np.log(seq_probs) * mask, axis=-1)
# commenting probabilities out because usually it is huge:
# dataset x length x classes
# todo: add a mechanism for letting the user decide to save it
postprocessed[PROBABILITIES] = seq_probs
postprocessed[PROBABILITY] = log_prob
else:
raise ValueError(
'Sequence probability array should be 3-dimensional '
'shape, instead shape is {:d}-dimensional'
.format(len(probs.shape))
)
if not skip_save_unprocessed_output:
np.save(npy_filename.format(name, PROBABILITIES), seq_probs)
np.save(npy_filename.format(name, PROBABILITY), log_prob)
del result[PROBABILITIES]
if LENGTHS in result:
del result[LENGTHS]
return postprocessed
@staticmethod
def populate_defaults(output_feature):
set_default_value(
output_feature,
LOSS,
{
TYPE: 'softmax_cross_entropy',
'sampler': None,
'negative_samples': 0,
'distortion': 1,
'labels_smoothing': 0,
'class_weights': 1,
'robust_lambda': 0,
'confidence_penalty': 0,
'class_similarities_temperature': 0,
'weight': 1
}
)
set_default_value(output_feature[LOSS], TYPE,
'softmax_cross_entropy')
set_default_value(output_feature[LOSS], 'labels_smoothing', 0)
set_default_value(output_feature[LOSS], 'class_weights', 1)
set_default_value(output_feature[LOSS], 'robust_lambda', 0)
set_default_value(output_feature[LOSS], 'confidence_penalty', 0)
set_default_value(output_feature[LOSS],
'class_similarities_temperature', 0)
set_default_value(output_feature[LOSS], 'weight', 1)
if output_feature[LOSS][TYPE] == 'sampled_softmax_cross_entropy':
set_default_value(output_feature[LOSS], 'sampler', 'log_uniform')
set_default_value(output_feature[LOSS], 'negative_samples', 25)
set_default_value(output_feature[LOSS], 'distortion', 0.75)
else:
set_default_value(output_feature[LOSS], 'sampler', None)
set_default_value(output_feature[LOSS], 'negative_samples', 0)
set_default_value(output_feature[LOSS], 'distortion', 1)
set_default_value(output_feature[LOSS], 'unique', False)
set_default_value(output_feature, 'decoder', 'generator')
if output_feature['decoder'] == 'tagger':
set_default_value(output_feature, 'reduce_input', None)
set_default_value(output_feature, 'dependencies', [])
set_default_value(output_feature, 'reduce_input', SUM)
set_default_value(output_feature, 'reduce_dependencies', SUM)
decoder_registry = DECODER_REGISTRY
| |
"""
Testing for Multi-layer Perceptron module (sklearn.neural_network)
"""
# Author: Issam H. Laradji
# License: BSD 3 clause
import sys
import warnings
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from sklearn.datasets import load_digits, load_boston
from sklearn.datasets import make_regression, make_multilabel_classification
from sklearn.exceptions import ConvergenceWarning
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.metrics import roc_auc_score
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from scipy.sparse import csr_matrix
from sklearn.utils.testing import (assert_raises, assert_greater, assert_equal,
assert_false, ignore_warnings)
np.seterr(all='warn')
ACTIVATION_TYPES = ["identity", "logistic", "tanh", "relu"]
digits_dataset_multi = load_digits(n_class=3)
X_digits_multi = MinMaxScaler().fit_transform(digits_dataset_multi.data[:200])
y_digits_multi = digits_dataset_multi.target[:200]
digits_dataset_binary = load_digits(n_class=2)
X_digits_binary = MinMaxScaler().fit_transform(
digits_dataset_binary.data[:200])
y_digits_binary = digits_dataset_binary.target[:200]
classification_datasets = [(X_digits_multi, y_digits_multi),
(X_digits_binary, y_digits_binary)]
boston = load_boston()
Xboston = StandardScaler().fit_transform(boston.data)[: 200]
yboston = boston.target[:200]
def test_alpha():
# Test that larger alpha yields weights closer to zero
X = X_digits_binary[:100]
y = y_digits_binary[:100]
alpha_vectors = []
alpha_values = np.arange(2)
absolute_sum = lambda x: np.sum(np.abs(x))
for alpha in alpha_values:
mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
alpha_vectors.append(np.array([absolute_sum(mlp.coefs_[0]),
absolute_sum(mlp.coefs_[1])]))
for i in range(len(alpha_values) - 1):
assert (alpha_vectors[i] > alpha_vectors[i + 1]).all()
def test_fit():
# Test that the algorithm solution is equal to a worked out example.
X = np.array([[0.6, 0.8, 0.7]])
y = np.array([0])
mlp = MLPClassifier(algorithm='sgd', learning_rate_init=0.1, alpha=0.1,
activation='logistic', random_state=1, max_iter=1,
hidden_layer_sizes=2, momentum=0)
# set weights
mlp.coefs_ = [0] * 2
mlp.intercepts_ = [0] * 2
mlp.classes_ = [0, 1]
mlp.n_outputs_ = 1
mlp.coefs_[0] = np.array([[0.1, 0.2], [0.3, 0.1], [0.5, 0]])
mlp.coefs_[1] = np.array([[0.1], [0.2]])
mlp.intercepts_[0] = np.array([0.1, 0.1])
mlp.intercepts_[1] = np.array([1.0])
mlp._coef_grads = [] * 2
mlp._intercept_grads = [] * 2
mlp.label_binarizer_.y_type_ = 'binary'
# Initialize parameters
mlp.n_iter_ = 0
mlp.learning_rate_ = 0.1
# Compute the number of layers
mlp.n_layers_ = 3
# Pre-allocate gradient matrices
mlp._coef_grads = [0] * (mlp.n_layers_ - 1)
mlp._intercept_grads = [0] * (mlp.n_layers_ - 1)
mlp.out_activation_ = 'logistic'
mlp.t_ = 0
mlp.best_loss_ = np.inf
mlp.loss_curve_ = []
mlp._no_improvement_count = 0
mlp._intercept_velocity = [np.zeros_like(intercepts) for
intercepts in
mlp.intercepts_]
mlp._coef_velocity = [np.zeros_like(coefs) for coefs in
mlp.coefs_]
mlp.partial_fit(X, y, classes=[0, 1])
# Manually worked out example
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.1 + 0.8 * 0.3 + 0.7 * 0.5 + 0.1)
# = 0.679178699175393
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.2 + 0.8 * 0.1 + 0.7 * 0 + 0.1)
# = 0.574442516811659
# o1 = g(h * W2 + b21) = g(0.679 * 0.1 + 0.574 * 0.2 + 1)
# = 0.7654329236196236
# d21 = -(0 - 0.765) = 0.765
# d11 = (1 - 0.679) * 0.679 * 0.765 * 0.1 = 0.01667
# d12 = (1 - 0.574) * 0.574 * 0.765 * 0.2 = 0.0374
# W1grad11 = X1 * d11 + alpha * W11 = 0.6 * 0.01667 + 0.1 * 0.1 = 0.0200
# W1grad11 = X1 * d12 + alpha * W12 = 0.6 * 0.0374 + 0.1 * 0.2 = 0.04244
# W1grad21 = X2 * d11 + alpha * W13 = 0.8 * 0.01667 + 0.1 * 0.3 = 0.043336
# W1grad22 = X2 * d12 + alpha * W14 = 0.8 * 0.0374 + 0.1 * 0.1 = 0.03992
# W1grad31 = X3 * d11 + alpha * W15 = 0.6 * 0.01667 + 0.1 * 0.5 = 0.060002
# W1grad32 = X3 * d12 + alpha * W16 = 0.6 * 0.0374 + 0.1 * 0 = 0.02244
# W2grad1 = h1 * d21 + alpha * W21 = 0.679 * 0.765 + 0.1 * 0.1 = 0.5294
# W2grad2 = h2 * d21 + alpha * W22 = 0.574 * 0.765 + 0.1 * 0.2 = 0.45911
# b1grad1 = d11 = 0.01667
# b1grad2 = d12 = 0.0374
# b2grad = d21 = 0.765
# W1 = W1 - eta * [W1grad11, .., W1grad32] = [[0.1, 0.2], [0.3, 0.1],
# [0.5, 0]] - 0.1 * [[0.0200, 0.04244], [0.043336, 0.03992],
# [0.060002, 0.02244]] = [[0.098, 0.195756], [0.2956664,
# 0.096008], [0.4939998, -0.002244]]
# W2 = W2 - eta * [W2grad1, W2grad2] = [[0.1], [0.2]] - 0.1 *
# [[0.5294], [0.45911]] = [[0.04706], [0.154089]]
# b1 = b1 - eta * [b1grad1, b1grad2] = 0.1 - 0.1 * [0.01667, 0.0374]
# = [0.098333, 0.09626]
# b2 = b2 - eta * b2grad = 1.0 - 0.1 * 0.765 = 0.9235
assert_almost_equal(mlp.coefs_[0], np.array([[0.098, 0.195756],
[0.2956664, 0.096008],
[0.4939998, -0.002244]]),
decimal=3)
assert_almost_equal(mlp.coefs_[1], np.array([[0.04706], [0.154089]]),
decimal=3)
assert_almost_equal(mlp.intercepts_[0],
np.array([0.098333, 0.09626]), decimal=3)
assert_almost_equal(mlp.intercepts_[1], np.array(0.9235), decimal=3)
# Testing output
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.098 + 0.8 * 0.2956664 +
# 0.7 * 0.4939998 + 0.098333) = 0.677
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.195756 + 0.8 * 0.096008 +
# 0.7 * -0.002244 + 0.09626) = 0.572
# o1 = h * W2 + b21 = 0.677 * 0.04706 +
# 0.572 * 0.154089 + 0.9235 = 1.043
# prob = sigmoid(o1) = 0.739
assert_almost_equal(mlp.predict_proba(X)[0, 1], 0.739, decimal=3)
def test_gradient():
# Test gradient.
# This makes sure that the activation functions and their derivatives
# are correct. The numerical and analytical computation of the gradient
# should be close.
for n_labels in [2, 3]:
n_samples = 5
n_features = 10
X = np.random.random((n_samples, n_features))
y = 1 + np.mod(np.arange(n_samples) + 1, n_labels)
Y = LabelBinarizer().fit_transform(y)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(activation=activation, hidden_layer_sizes=10,
algorithm='l-bfgs', alpha=1e-5,
learning_rate_init=0.2, max_iter=1,
random_state=1)
mlp.fit(X, y)
theta = np.hstack([l.ravel() for l in mlp.coefs_ +
mlp.intercepts_])
layer_units = ([X.shape[1]] + [mlp.hidden_layer_sizes] +
[mlp.n_outputs_])
activations = []
deltas = []
coef_grads = []
intercept_grads = []
activations.append(X)
for i in range(mlp.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
deltas.append(np.empty((X.shape[0],
layer_units[i + 1])))
fan_in = layer_units[i]
fan_out = layer_units[i + 1]
coef_grads.append(np.empty((fan_in, fan_out)))
intercept_grads.append(np.empty(fan_out))
# analytically compute the gradients
def loss_grad_fun(t):
return mlp._loss_grad_lbfgs(t, X, Y, activations, deltas,
coef_grads, intercept_grads)
[value, grad] = loss_grad_fun(theta)
numgrad = np.zeros(np.size(theta))
n = np.size(theta, 0)
E = np.eye(n)
epsilon = 1e-5
# numerically compute the gradients
for i in range(n):
dtheta = E[:, i] * epsilon
numgrad[i] = ((loss_grad_fun(theta + dtheta)[0] -
loss_grad_fun(theta - dtheta)[0]) /
(epsilon * 2.0))
assert_almost_equal(numgrad, grad)
def test_lbfgs_classification():
# Test lbfgs on classification.
# It should achieve a score higher than 0.95 for the binary and multi-class
# versions of the digits dataset.
for X, y in classification_datasets:
X_train = X[:150]
y_train = y[:150]
X_test = X[150:]
expected_shape_dtype = (X_test.shape[0], y_train.dtype.kind)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(algorithm='l-bfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X_train, y_train)
y_predict = mlp.predict(X_test)
assert_greater(mlp.score(X_train, y_train), 0.95)
assert_equal((y_predict.shape[0], y_predict.dtype.kind),
expected_shape_dtype)
def test_lbfgs_regression():
# Test lbfgs on the boston dataset, a regression problems.
X = Xboston
y = yboston
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(algorithm='l-bfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X, y)
if activation == 'identity':
assert_greater(mlp.score(X, y), 0.84)
else:
# Non linear models perform much better than linear bottleneck:
assert_greater(mlp.score(X, y), 0.95)
def test_learning_rate_warmstart():
# Tests that warm_start reuse past solutions.
X = [[3, 2], [1, 6], [5, 6], [-2, -4]]
y = [1, 1, 1, 0]
for learning_rate in ["invscaling", "constant"]:
mlp = MLPClassifier(algorithm='sgd', hidden_layer_sizes=4,
learning_rate=learning_rate, max_iter=1,
power_t=0.25, warm_start=True)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
prev_eta = mlp._optimizer.learning_rate
mlp.fit(X, y)
post_eta = mlp._optimizer.learning_rate
if learning_rate == 'constant':
assert_equal(prev_eta, post_eta)
elif learning_rate == 'invscaling':
assert_equal(mlp.learning_rate_init / pow(8 + 1, mlp.power_t),
post_eta)
def test_multilabel_classification():
# Test that multi-label classification works as expected.
# test fit method
X, y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
mlp = MLPClassifier(algorithm='l-bfgs', hidden_layer_sizes=50, alpha=1e-5,
max_iter=150, random_state=0, activation='logistic',
learning_rate_init=0.2)
mlp.fit(X, y)
assert_equal(mlp.score(X, y), 1)
# test partial fit method
mlp = MLPClassifier(algorithm='sgd', hidden_layer_sizes=50, max_iter=150,
random_state=0, activation='logistic', alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=[0, 1, 2, 3, 4])
assert_greater(mlp.score(X, y), 0.9)
def test_multioutput_regression():
# Test that multi-output regression works as expected
X, y = make_regression(n_samples=200, n_targets=5)
mlp = MLPRegressor(algorithm='l-bfgs', hidden_layer_sizes=50, max_iter=200,
random_state=1)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.9)
def test_partial_fit_classes_error():
# Tests that passing different classes to partial_fit raises an error
X = [[3, 2]]
y = [0]
clf = MLPClassifier(algorithm='sgd')
clf.partial_fit(X, y, classes=[0, 1])
assert_raises(ValueError, clf.partial_fit, X, y, classes=[1, 2])
def test_partial_fit_classification():
# Test partial_fit on classification.
# `partial_fit` should yield the same results as 'fit' for binary and
# multi-class classification.
for X, y in classification_datasets:
X = X
y = y
mlp = MLPClassifier(algorithm='sgd', max_iter=100, random_state=1,
tol=0, alpha=1e-5, learning_rate_init=0.2)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPClassifier(algorithm='sgd', random_state=1, alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=np.unique(y))
pred2 = mlp.predict(X)
assert_array_equal(pred1, pred2)
assert_greater(mlp.score(X, y), 0.95)
def test_partial_fit_regression():
# Test partial_fit on regression.
# `partial_fit` should yield the same results as 'fit' for regression.
X = Xboston
y = yboston
for momentum in [0, .9]:
mlp = MLPRegressor(algorithm='sgd', max_iter=100, activation='relu',
random_state=1, learning_rate_init=0.01,
batch_size=X.shape[0], momentum=momentum)
with warnings.catch_warnings(record=True):
# catch convergence warning
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPRegressor(algorithm='sgd', activation='relu',
learning_rate_init=0.01, random_state=1,
batch_size=X.shape[0], momentum=momentum)
for i in range(100):
mlp.partial_fit(X, y)
pred2 = mlp.predict(X)
assert_almost_equal(pred1, pred2, decimal=2)
score = mlp.score(X, y)
assert_greater(score, 0.75)
def test_partial_fit_errors():
# Test partial_fit error handling.
X = [[3, 2], [1, 6]]
y = [1, 0]
# no classes passed
assert_raises(ValueError,
MLPClassifier(
algorithm='sgd').partial_fit,
X, y,
classes=[2])
# l-bfgs doesn't support partial_fit
assert_false(hasattr(MLPClassifier(algorithm='l-bfgs'), 'partial_fit'))
def test_params_errors():
# Test that invalid parameters raise value error
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier
assert_raises(ValueError, clf(hidden_layer_sizes=-1).fit, X, y)
assert_raises(ValueError, clf(max_iter=-1).fit, X, y)
assert_raises(ValueError, clf(shuffle='true').fit, X, y)
assert_raises(ValueError, clf(alpha=-1).fit, X, y)
assert_raises(ValueError, clf(learning_rate_init=-1).fit, X, y)
assert_raises(ValueError, clf(momentum=2).fit, X, y)
assert_raises(ValueError, clf(momentum=-0.5).fit, X, y)
assert_raises(ValueError, clf(nesterovs_momentum='invalid').fit, X, y)
assert_raises(ValueError, clf(early_stopping='invalid').fit, X, y)
assert_raises(ValueError, clf(validation_fraction=1).fit, X, y)
assert_raises(ValueError, clf(validation_fraction=-0.5).fit, X, y)
assert_raises(ValueError, clf(beta_1=1).fit, X, y)
assert_raises(ValueError, clf(beta_1=-0.5).fit, X, y)
assert_raises(ValueError, clf(beta_2=1).fit, X, y)
assert_raises(ValueError, clf(beta_2=-0.5).fit, X, y)
assert_raises(ValueError, clf(epsilon=-0.5).fit, X, y)
assert_raises(ValueError, clf(algorithm='hadoken').fit, X, y)
assert_raises(ValueError, clf(learning_rate='converge').fit, X, y)
assert_raises(ValueError, clf(activation='cloak').fit, X, y)
def test_predict_proba_binary():
# Test that predict_proba works as expected for binary class.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
clf = MLPClassifier(hidden_layer_sizes=5)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], 2
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
assert_equal(roc_auc_score(y, y_proba[:, 1]), 1.0)
def test_predict_proba_multiclass():
# Test that predict_proba works as expected for multi class.
X = X_digits_multi[:10]
y = y_digits_multi[:10]
clf = MLPClassifier(hidden_layer_sizes=5)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], np.unique(y).size
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_predict_proba_multilabel():
# Test that predict_proba works as expected for multilabel.
# Multilabel should not use softmax which makes probabilities sum to 1
X, Y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
n_samples, n_classes = Y.shape
clf = MLPClassifier(algorithm='l-bfgs', hidden_layer_sizes=30,
random_state=0)
clf.fit(X, Y)
y_proba = clf.predict_proba(X)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(y_proba > 0.5, Y)
y_log_proba = clf.predict_log_proba(X)
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_greater((y_proba.sum(1) - 1).dot(y_proba.sum(1) - 1), 1e-10)
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_sparse_matrices():
# Test that sparse and dense input matrices output the same results.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
X_sparse = csr_matrix(X)
mlp = MLPClassifier(algorithm='l-bfgs', hidden_layer_sizes=15,
random_state=1)
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp.fit(X_sparse, y)
pred2 = mlp.predict(X_sparse)
assert_almost_equal(pred1, pred2)
pred1 = mlp.predict(X)
pred2 = mlp.predict(X_sparse)
assert_array_equal(pred1, pred2)
def test_tolerance():
# Test tolerance.
# It should force the algorithm to exit the loop when it converges.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, algorithm='sgd')
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
def test_verbose_sgd():
# Test verbose.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(algorithm='sgd', max_iter=2, verbose=10,
hidden_layer_sizes=2)
old_stdout = sys.stdout
sys.stdout = output = StringIO()
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
clf.partial_fit(X, y)
sys.stdout = old_stdout
assert 'Iteration' in output.getvalue()
def test_early_stopping():
X = X_digits_binary[:100]
y = y_digits_binary[:100]
tol = 0.2
clf = MLPClassifier(tol=tol, max_iter=3000, algorithm='sgd',
early_stopping=True)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
valid_scores = clf.validation_scores_
best_valid_score = clf.best_validation_score_
assert_equal(max(valid_scores), best_valid_score)
assert_greater(best_valid_score + tol, valid_scores[-2])
assert_greater(best_valid_score + tol, valid_scores[-1])
def test_adaptive_learning_rate():
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, algorithm='sgd',
learning_rate='adaptive')
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
assert_greater(1e-6, clf._optimizer.learning_rate)
| |
# $Id$
#
# Copyright (C) 2003-2008 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for the ScreenComposite functionality
"""
import io
import os
import unittest
from rdkit import RDConfig
from rdkit.ML import ScreenComposite
from rdkit.six.moves import cPickle as pickle
class TestCase(unittest.TestCase):
def setUp(self):
self.baseDir = os.path.join(RDConfig.RDCodeDir, 'ML', 'test_data')
self.dbName = RDConfig.RDTestDatabase
self.details = ScreenComposite.SetDefaults()
self.details.dbName = self.dbName
self.details.dbUser = RDConfig.defaultDBUser
self.details.dbPassword = RDConfig.defaultDBPassword
def test1_basics(self):
# """ basics """
self.details.tableName = 'ferro_quant'
with open(os.path.join(self.baseDir, 'ferromag_quant_10.pkl'), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
compos = pickle.load(pklF)
tgt = 5
self.assertEqual(len(compos), tgt)
nGood, misCount, nSkipped, avgGood, avgBad, avgSkip, tbl = ScreenComposite.ScreenFromDetails(
compos, self.details)
self.assertEqual(nGood, 93)
self.assertEqual(misCount, 2)
self.assertEqual(nSkipped, 0)
self.assertAlmostEqual(avgGood, .9871, 4)
self.assertAlmostEqual(avgBad, .8000, 4)
self.assertAlmostEqual(avgSkip, 0, 4)
self.assertEqual(tbl[0, 0], 54)
self.assertEqual(tbl[1, 1], 39)
self.assertEqual(tbl[0, 1], 2)
self.assertEqual(tbl[1, 0], 0)
def test2_include_holdout(self):
# """ include holdout data only """
self.details.tableName = 'ferro_quant'
self.details.doHoldout = 1
self.details.doTraining = 0
with open(os.path.join(self.baseDir, 'ferromag_quant_10.pkl'), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
compos = pickle.load(pklF)
tgt = 5
self.assertEqual(len(compos), tgt)
nGood, misCount, nSkipped, avgGood, avgBad, avgSkip, tbl = ScreenComposite.ScreenFromDetails(
compos, self.details)
self.assertEqual(nGood, 28)
self.assertEqual(misCount, 1)
self.assertEqual(nSkipped, 0)
self.assertAlmostEqual(avgGood, .9964, 4)
self.assertAlmostEqual(avgBad, 1.000, 4)
self.assertAlmostEqual(avgSkip, 0, 4)
self.assertEqual(tbl[0, 0], 16)
self.assertEqual(tbl[1, 1], 12)
self.assertEqual(tbl[0, 1], 1)
self.assertEqual(tbl[1, 0], 0)
def test3_include_training(self):
# """ include training data only """
self.details.tableName = 'ferro_quant'
self.details.doHoldout = 0
self.details.doTraining = 1
with open(os.path.join(self.baseDir, 'ferromag_quant_10.pkl'), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
compos = pickle.load(pklF)
tgt = 5
self.assertEqual(len(compos), tgt, 'bad composite loaded: %d != %d' % (len(compos), tgt))
nGood, misCount, nSkipped, avgGood, avgBad, avgSkip, tbl = ScreenComposite.ScreenFromDetails(
compos, self.details)
self.assertEqual(nGood, 65)
self.assertEqual(misCount, 1)
self.assertEqual(nSkipped, 0)
self.assertAlmostEqual(avgGood, .98307, 4)
self.assertAlmostEqual(avgBad, 0.600, 4)
self.assertAlmostEqual(avgSkip, 0, 4)
self.assertEqual(tbl[0, 0], 38, tbl)
self.assertEqual(tbl[1, 1], 27)
self.assertEqual(tbl[0, 1], 1)
self.assertEqual(tbl[1, 0], 0)
def test4_thresholding(self):
# """ include thresholding """
self.details.tableName = 'ferro_quant'
self.details.threshold = 0.80
self.details.doHoldout = 0
self.details.doTraining = 0
with open(os.path.join(self.baseDir, 'ferromag_quant_10.pkl'), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
compos = pickle.load(pklF)
tgt = 5
self.assertEqual(len(compos), tgt)
nGood, misCount, nSkipped, avgGood, avgBad, avgSkip, tbl = ScreenComposite.ScreenFromDetails(
compos, self.details)
self.assertEqual(nGood, 91)
self.assertEqual(misCount, 1)
self.assertEqual(nSkipped, 3)
self.assertAlmostEqual(avgGood, 0.9956, 4)
self.assertAlmostEqual(avgBad, 1.000, 4)
self.assertAlmostEqual(avgSkip, 0.6000, 4)
self.assertEqual(tbl[0, 0], 54)
self.assertEqual(tbl[1, 1], 37)
self.assertEqual(tbl[0, 1], 1)
self.assertEqual(tbl[1, 0], 0)
def test5_basics(self):
# """ basics """
self.details.tableName = 'ferro_noquant'
with open(os.path.join(self.baseDir, 'ferromag_auto_10_3.pkl'), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
compos = pickle.load(pklF)
tgt = 10
self.assertEqual(len(compos), tgt)
tpl = ScreenComposite.ScreenFromDetails(compos, self.details)
nGood, misCount, nSkipped, avgGood, avgBad, avgSkip, tbl = tpl
self.assertEqual(nGood, 95)
self.assertEqual(misCount, 8)
self.assertEqual(nSkipped, 0)
self.assertAlmostEqual(avgGood, .9684, 4)
self.assertAlmostEqual(avgBad, .8375, 4)
self.assertAlmostEqual(avgSkip, 0, 4)
self.assertEqual(tbl[0, 0], 50)
self.assertEqual(tbl[1, 1], 45)
self.assertEqual(tbl[0, 1], 5)
self.assertEqual(tbl[1, 0], 3)
def test6_multiple_models(self):
# """ multiple models """
self.details.tableName = 'ferro_noquant'
with open(os.path.join(self.baseDir, 'ferromag_auto_10_3.pkl'), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
compos = pickle.load(pklF)
tgt = 10
self.assertEqual(len(compos), tgt)
composites = [compos, compos]
tpl = ScreenComposite.ScreenFromDetails(composites, self.details)
nGood, misCount, nSkipped, avgGood, avgBad, avgSkip, tbl = tpl
self.assertEqual(nGood[0], 95)
self.assertEqual(misCount[0], 8)
self.assertEqual(nSkipped[0], 0)
self.assertAlmostEqual(avgGood[0], .9684, 4)
self.assertAlmostEqual(avgBad[0], .8375, 4)
self.assertAlmostEqual(avgSkip[0], 0.0, 4)
self.assertEqual(nGood[1], 0)
self.assertEqual(misCount[1], 0)
self.assertEqual(nSkipped[1], 0)
self.assertEqual(avgGood[1], 0)
self.assertEqual(avgBad[1], 0)
self.assertEqual(avgSkip[1], 0)
self.assertEqual(tbl[0, 0], 50)
self.assertEqual(tbl[1, 1], 45)
self.assertEqual(tbl[0, 1], 5)
self.assertEqual(tbl[1, 0], 3)
def test7_shuffle(self):
# """ shuffle """
self.details.tableName = 'ferro_noquant'
with open(os.path.join(self.baseDir, 'ferromag_shuffle_10_3.pkl'), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
compos = pickle.load(pklF)
tgt = 10
self.assertEqual(len(compos), tgt)
self.details.shuffleActivities = 1
nGood, misCount, nSkipped, avgGood, avgBad, avgSkip, tbl = ScreenComposite.ScreenFromDetails(
compos, self.details)
self.assertEqual(nGood, 50)
self.assertEqual(misCount, 53)
self.assertEqual(nSkipped, 0)
self.assertAlmostEqual(avgGood, .7380, 4)
self.assertAlmostEqual(avgBad, .7660, 4)
self.assertAlmostEqual(avgSkip, 0, 4)
self.assertEqual(tbl[0, 0], 30)
self.assertEqual(tbl[1, 1], 20)
self.assertEqual(tbl[0, 1], 25)
self.assertEqual(tbl[1, 0], 28)
def test8_shuffle_segmentation(self):
# """ shuffle with segmentation """
self.details.tableName = 'ferro_noquant'
with open(os.path.join(self.baseDir, 'ferromag_shuffle_10_3.pkl'), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
compos = pickle.load(pklF)
tgt = 10
self.assertEqual(len(compos), tgt)
self.details.shuffleActivities = 1
self.details.doHoldout = 1
nGood, misCount, nSkipped, avgGood, avgBad, avgSkip, tbl = ScreenComposite.ScreenFromDetails(
compos, self.details)
self.assertEqual(nGood, 19)
self.assertEqual(misCount, 12)
self.assertEqual(nSkipped, 0)
self.assertAlmostEqual(avgGood, .7737, 4)
self.assertAlmostEqual(avgBad, .7500, 4)
self.assertAlmostEqual(avgSkip, 0, 4)
self.assertEqual(tbl[0, 0], 12)
self.assertEqual(tbl[1, 1], 7)
self.assertEqual(tbl[0, 1], 6)
self.assertEqual(tbl[1, 0], 6)
def test9_shuffle_segmentation2(self):
# """ shuffle with segmentation2 """
self.details.tableName = 'ferro_noquant'
with open(os.path.join(self.baseDir, 'ferromag_shuffle_10_3.pkl'), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
compos = pickle.load(pklF)
tgt = 10
self.assertEqual(len(compos), tgt)
self.details.shuffleActivities = 1
self.details.doTraining = 1
nGood, misCount, nSkipped, avgGood, avgBad, avgSkip, tbl = ScreenComposite.ScreenFromDetails(
compos, self.details)
self.assertEqual(nGood, 31)
self.assertEqual(misCount, 41)
self.assertEqual(nSkipped, 0)
self.assertAlmostEqual(avgGood, .7161, 4)
self.assertAlmostEqual(avgBad, .7707, 4)
self.assertAlmostEqual(avgSkip, 0.0, 4)
self.assertEqual(tbl[0, 0], 18)
self.assertEqual(tbl[1, 1], 13)
self.assertEqual(tbl[0, 1], 19)
self.assertEqual(tbl[1, 0], 22)
def test10_filtering(self):
# """ filtering """
self.details.tableName = 'ferro_noquant'
with open(os.path.join(self.baseDir, 'ferromag_filt_10_3.pkl'), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
compos = pickle.load(pklF)
tgt = 10
self.assertEqual(len(compos), tgt)
self.details.filterVal = 1
self.details.filterFrac = .33
nGood, misCount, nSkipped, avgGood, avgBad, avgSkip, tbl = ScreenComposite.ScreenFromDetails(
compos, self.details)
self.assertEqual(nGood, 90)
self.assertEqual(misCount, 13)
self.assertEqual(nSkipped, 0)
self.assertAlmostEqual(avgGood, .9578, 4)
self.assertAlmostEqual(avgBad, .8538, 4)
self.assertAlmostEqual(avgSkip, 0, 4)
self.assertEqual(tbl[0, 0], 54)
self.assertEqual(tbl[1, 1], 36)
self.assertEqual(tbl[0, 1], 1)
self.assertEqual(tbl[1, 0], 12)
def test11_filtering_segmentation(self):
# """ filtering with segmentation """
self.details.tableName = 'ferro_noquant'
with open(os.path.join(self.baseDir, 'ferromag_filt_10_3.pkl'), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
compos = pickle.load(pklF)
tgt = 10
self.assertEqual(len(compos), tgt)
self.details.doHoldout = 1
self.details.filterVal = 1
self.details.filterFrac = .33
nGood, misCount, nSkipped, avgGood, avgBad, avgSkip, tbl = ScreenComposite.ScreenFromDetails(
compos, self.details)
self.assertEqual(nGood, 37)
self.assertEqual(misCount, 6)
self.assertEqual(nSkipped, 0)
self.assertAlmostEqual(avgGood, .95946, 4)
self.assertAlmostEqual(avgBad, .85, 4)
self.assertAlmostEqual(avgSkip, 0, 4)
self.assertEqual(tbl[0, 0], 14)
self.assertEqual(tbl[1, 1], 23)
self.assertEqual(tbl[0, 1], 1)
self.assertEqual(tbl[1, 0], 5)
def test12_naiveBayes_composite(self):
# """ test the naive bayes composite"""
self.details.tableName = 'ferro_noquant'
with open(os.path.join(self.baseDir, 'ferromag_NaiveBayes.pkl'), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
compos = pickle.load(pklF)
tgt = 10
self.assertEqual(len(compos), tgt)
self.details.doHoldout = 1
nGood, misCount, nSkipped, avgGood, avgBad, avgSkip, tbl = ScreenComposite.ScreenFromDetails(
compos, self.details)
self.assertEqual(nGood, 25)
self.assertEqual(misCount, 6)
self.assertEqual(nSkipped, 0)
self.assertAlmostEqual(avgGood, 0.9800, 4)
self.assertAlmostEqual(avgBad, 0.86667, 4)
self.assertAlmostEqual(avgSkip, 0, 4)
self.assertEqual(tbl[0, 0], 9)
self.assertEqual(tbl[0, 1], 6)
self.assertEqual(tbl[1, 0], 0)
self.assertEqual(tbl[1, 1], 16)
if __name__ == '__main__': # pragma: nocover
unittest.main()
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'StoryTemplate.slug'
db.add_column('storybase_story_storytemplate', 'slug',
self.gf('django.db.models.fields.SlugField')(default='', max_length=50, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'StoryTemplate.slug'
db.delete_column('storybase_story_storytemplate', 'slug')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'storybase_asset.asset': {
'Meta': {'object_name': 'Asset'},
'asset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'asset_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'assets'", 'blank': 'True', 'to': "orm['storybase_asset.DataSet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'CC BY-NC-SA'", 'max_length': '25'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'section_specific': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'storybase_asset.dataset': {
'Meta': {'object_name': 'DataSet'},
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dataset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dataset_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'links_to_file': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'datasets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'})
},
'storybase_geo.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['storybase_geo.GeoLevel']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'storybase_geo.location': {
'Meta': {'object_name': 'Location'},
'address': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'address2': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'lng': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'location_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': "orm['auth.User']"}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'raw': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'storybase_geo.place': {
'Meta': {'object_name': 'Place'},
'boundary': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'null': 'True', 'blank': 'True'}),
'children': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['storybase_geo.Place']", 'null': 'True', 'through': "orm['storybase_geo.PlaceRelation']", 'blank': 'True'}),
'geolevel': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'places'", 'null': 'True', 'to': "orm['storybase_geo.GeoLevel']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'place_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_geo.placerelation': {
'Meta': {'unique_together': "(('parent', 'child'),)", 'object_name': 'PlaceRelation'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place_parent'", 'to': "orm['storybase_geo.Place']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place_child'", 'to': "orm['storybase_geo.Place']"})
},
'storybase_help.help': {
'Meta': {'object_name': 'Help'},
'help_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'})
},
'storybase_story.container': {
'Meta': {'object_name': 'Container'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'storybase_story.section': {
'Meta': {'object_name': 'Section'},
'assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'sections'", 'blank': 'True', 'through': "orm['storybase_story.SectionAsset']", 'to': "orm['storybase_asset.Asset']"}),
'children': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['storybase_story.Section']", 'null': 'True', 'through': "orm['storybase_story.SectionRelation']", 'blank': 'True'}),
'help': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_help.Help']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.SectionLayout']", 'null': 'True'}),
'root': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'section_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sections'", 'to': "orm['storybase_story.Story']"}),
'template_section': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'template_for'", 'null': 'True', 'to': "orm['storybase_story.Section']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_story.sectionasset': {
'Meta': {'object_name': 'SectionAsset'},
'asset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_asset.Asset']"}),
'container': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Container']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Section']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_story.sectionlayout': {
'Meta': {'object_name': 'SectionLayout'},
'containers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'layouts'", 'blank': 'True', 'to': "orm['storybase_story.Container']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layout_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'storybase_story.sectionlayouttranslation': {
'Meta': {'object_name': 'SectionLayoutTranslation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'layout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.SectionLayout']"}),
'name': ('storybase.fields.ShortTextField', [], {}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_story.sectionrelation': {
'Meta': {'object_name': 'SectionRelation'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'section_parent'", 'to': "orm['storybase_story.Section']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'section_child'", 'to': "orm['storybase_story.Section']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_story.sectiontranslation': {
'Meta': {'unique_together': "(('section', 'language'),)", 'object_name': 'SectionTranslation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Section']"}),
'title': ('storybase.fields.ShortTextField', [], {}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_story.story': {
'Meta': {'object_name': 'Story'},
'assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'stories'", 'null': 'True', 'to': "orm['auth.User']"}),
'byline': ('django.db.models.fields.TextField', [], {}),
'connected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'contact_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_asset.DataSet']"}),
'featured_assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'featured_in_stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'CC BY-NC-SA'", 'max_length': '25'}),
'locations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_geo.Location']"}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'places': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_geo.Place']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Project']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'related_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'related_to'", 'blank': 'True', 'through': "orm['storybase_story.StoryRelation']", 'to': "orm['storybase_story.Story']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'story_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'structure_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'template_story': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'template_for'", 'null': 'True', 'to': "orm['storybase_story.Story']"}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_taxonomy.Category']"})
},
'storybase_story.storyrelation': {
'Meta': {'object_name': 'StoryRelation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'relation_type': ('django.db.models.fields.CharField', [], {'default': "'connected'", 'max_length': '25'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'target'", 'to': "orm['storybase_story.Story']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'source'", 'to': "orm['storybase_story.Story']"})
},
'storybase_story.storytemplate': {
'Meta': {'object_name': 'StoryTemplate'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']", 'null': 'True', 'blank': 'True'}),
'template_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'time_needed': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'})
},
'storybase_story.storytemplatetranslation': {
'Meta': {'object_name': 'StoryTemplateTranslation'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'story_template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.StoryTemplate']"}),
'tag_line': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'title': ('storybase.fields.ShortTextField', [], {}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_story.storytranslation': {
'Meta': {'unique_together': "(('story', 'language'),)", 'object_name': 'StoryTranslation'},
'call_to_action': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'connected_prompt': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_taxonomy.category': {
'Meta': {'object_name': 'Category'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['storybase_taxonomy.Category']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'storybase_taxonomy.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'tag_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_taxonomy.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'storybase_taxonomy_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['storybase_taxonomy.Tag']"})
},
'storybase_user.organization': {
'Meta': {'object_name': 'Organization'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_organizations'", 'blank': 'True', 'through': "orm['storybase_user.OrganizationStory']", 'to': "orm['storybase_story.Story']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'to': "orm['auth.User']"}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organization_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.organizationstory': {
'Meta': {'object_name': 'OrganizationStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Organization']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_user.project': {
'Meta': {'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_projects'", 'blank': 'True', 'through': "orm['storybase_user.ProjectStory']", 'to': "orm['storybase_story.Story']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['auth.User']"}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'project_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.projectstory': {
'Meta': {'object_name': 'ProjectStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Project']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['storybase_story']
| |
# Copyright 2015-2017 ProfitBricks GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import time
from helpers import configuration
from helpers.resources import resource, check_detached_cdrom_gone
from profitbricks.client import Datacenter, Server, Volume, NIC, FirewallRule
from profitbricks.client import ProfitBricksService
from profitbricks.errors import PBError, PBNotFoundError
from six import assertRegex
class TestServer(unittest.TestCase):
@classmethod
def setUpClass(self):
self.resource = resource()
self.client = ProfitBricksService(
username=configuration.USERNAME,
password=configuration.PASSWORD,
headers=configuration.HEADERS)
# Create test datacenter.
self.datacenter = self.client.create_datacenter(
datacenter=Datacenter(**self.resource['datacenter']))
self.client.wait_for_completion(self.datacenter)
# Create test volume1.
self.volume1 = self.client.create_volume(
datacenter_id=self.datacenter['id'],
volume=Volume(**self.resource['volume']))
self.client.wait_for_completion(self.volume1)
# Create test volume2 (attach volume test).
self.volume2 = self.client.create_volume(
datacenter_id=self.datacenter['id'],
volume=Volume(**self.resource['volume']))
self.client.wait_for_completion(self.volume2)
# Create test server.
server = Server(**self.resource['server'])
server.attach_volumes = [self.volume1['id']]
self.server = self.client.create_server(
datacenter_id=self.datacenter['id'],
server=server)
self.client.wait_for_completion(self.server)
# Create test NIC.
self.nic = self.client.create_nic(
datacenter_id=self.datacenter['id'],
server_id=self.server['id'],
nic=NIC(**self.resource['nic']))
self.client.wait_for_completion(self.nic)
# Find an Ubuntu image for testing.
for item in self.client.list_images()['items']:
if (configuration.IMAGE_NAME in item['properties']['name'] and
item['properties']['location'] == configuration.LOCATION):
self.image = item
# Find a cdrom image
images = self.client.list_images(depth=5)
usedIndex = 0
for index, image in enumerate(images['items']):
if (image['metadata']['state'] == "AVAILABLE"
and image['properties']['public'] is True
and image['properties']['imageType'] == "CDROM"
and image['properties']['location'] == configuration.LOCATION
and image['properties']['licenceType'] == "LINUX"):
if(usedIndex == 0):
self.test_image1 = image
usedIndex = index
else:
self.test_image2 = image
break
# Create test cdrom
self.cdrom = self.client.attach_cdrom(
datacenter_id=self.datacenter['id'],
server_id=self.server['id'],
cdrom_id=self.test_image1['id'])
self.client.wait_for_completion(self.cdrom)
@classmethod
def tearDownClass(self):
self.client.delete_datacenter(datacenter_id=self.datacenter['id'])
def test_list_servers(self):
servers = self.client.list_servers(datacenter_id=self.datacenter['id'])
self.assertGreater(len(servers), 0)
self.assertEqual(servers['items'][0]['type'], 'server')
self.assertTrue(self, len(servers['items']) > 0)
assertRegex(self, servers['items'][0]['id'], self.resource['uuid_match'])
def test_get_server(self):
server = self.client.get_server(
datacenter_id=self.datacenter['id'],
server_id=self.server['id']
)
self.assertEqual(server['type'], 'server')
self.assertEqual(server['id'], self.server['id'])
self.assertEqual(server['properties']['name'], self.resource['server']['name'])
self.assertEqual(server['properties']['cores'], self.resource['server']['cores'])
self.assertEqual(server['properties']['ram'], self.resource['server']['ram'])
self.assertEqual(server['properties']['availabilityZone'],
self.resource['server']['availability_zone'])
self.assertEqual(server['properties']['cpuFamily'], self.resource['server']['cpu_family'])
# assertRegex(self, server['properties']['bootVolume']['id'], self.resource['uuid_match'])
def test_get_failure(self):
try:
self.client.get_server(
datacenter_id=self.datacenter['id'],
server_id='00000000-0000-0000-0000-000000000000')
except PBNotFoundError as e:
self.assertIn(self.resource['not_found_error'], e.content[0]['message'])
def test_delete_server(self):
server = self.client.create_server(
datacenter_id=self.datacenter['id'],
server=Server(**self.resource['server'])
)
self.client.wait_for_completion(server)
response = self.client.delete_server(
datacenter_id=self.datacenter['id'],
server_id=server['id']
)
self.assertTrue(response)
assertRegex(self, response['requestId'], self.resource['uuid_match'])
def test_update_server(self):
server = self.client.update_server(
datacenter_id=self.datacenter['id'],
server_id=self.server['id'],
name=self.resource['server']['name'] + ' RENAME')
self.client.wait_for_completion(server)
server = self.client.get_server(
datacenter_id=self.datacenter['id'],
server_id=self.server['id']
)
self.assertEqual(server['id'], self.server['id'])
self.assertEqual(server['properties']['name'], self.resource['server']['name'] + ' RENAME')
self.assertEqual(server['properties']['cores'], self.resource['server']['cores'])
self.assertEqual(server['properties']['ram'], self.resource['server']['ram'])
def test_create_server(self):
# Use server created during server test setup
assertRegex(self, self.server['id'], self.resource['uuid_match'])
self.assertEqual(self.server['type'], 'server')
self.assertEqual(self.server['properties']['name'], self.resource['server']['name'])
self.assertEqual(self.server['properties']['cores'], self.resource['server']['cores'])
self.assertEqual(self.server['properties']['ram'], self.resource['server']['ram'])
self.assertEqual(self.server['properties']['availabilityZone'],
self.resource['server']['availability_zone'])
self.assertEqual(self.server['properties']['cpuFamily'],
self.resource['server']['cpu_family'])
# assertRegex(self, server['properties']['bootVolume']['id'], self.resource['uuid_match'])
# self.assertIsNone(self.server['properties']['availabilityZone'])
self.assertIsNone(self.server['properties']['vmState'])
def test_create_failure(self):
try:
server = Server(
name=self.resource['server']['name'],
ram=self.resource['server']['ram']
)
self.client.create_server(datacenter_id=self.datacenter['id'], server=server)
except PBError as e:
self.assertIn(self.resource['missing_attribute_error'] % 'cores',
e.content[0]['message'])
def test_create_composite(self):
fwrule = FirewallRule(**self.resource['fwrule'])
nic = NIC(firewall_rules=[fwrule], **self.resource['nic'])
volume = Volume(image=self.image['id'],
image_password='secretpassword123',
ssh_keys=['ssh-rsa AAAAB3NzaC1'],
**self.resource['volume'])
volume.availability_zone = 'ZONE_3'
server = Server(
nics=[nic],
create_volumes=[volume],
**self.resource['server'])
composite_server = self.client.create_server(
datacenter_id=self.datacenter['id'],
server=server)
self.client.wait_for_completion(composite_server, timeout=600)
composite_server = self.client.get_server(
datacenter_id=self.datacenter['id'],
server_id=composite_server['id'])
assertRegex(self, composite_server['id'], self.resource['uuid_match'])
self.assertEqual(composite_server['properties']['name'], self.resource['server']['name'])
self.assertEqual(composite_server['properties']['cores'], self.resource['server']['cores'])
self.assertEqual(composite_server['properties']['ram'], self.resource['server']['ram'])
self.assertEqual(composite_server['properties']['availabilityZone'], 'ZONE_1')
self.assertIn(composite_server['properties']['vmState'], self.resource['vm_states'])
self.assertGreater(len(composite_server['entities']['volumes']['items']), 0)
self.assertGreater(len(composite_server['entities']['nics']['items']), 0)
def test_start_server(self):
server = self.client.start_server(
datacenter_id=self.datacenter['id'],
server_id=self.server['id'])
self.assertTrue(server)
def test_stop_server(self):
server = self.client.stop_server(
datacenter_id=self.datacenter['id'],
server_id=self.server['id'])
self.assertTrue(server)
def test_reboot_server(self):
server = self.client.reboot_server(
datacenter_id=self.datacenter['id'],
server_id=self.server['id'])
self.assertTrue(server)
def test_get_attached_volumes(self):
volumes = self.client.get_attached_volumes(
datacenter_id=self.datacenter['id'],
server_id=self.server['id'])
self.assertGreater(len(volumes['items']), 0)
self.assertEqual(volumes['items'][0]['type'], 'volume')
self.assertEqual(volumes['items'][0]['id'], self.volume1['id'])
self.assertEqual(volumes['items'][0]['properties']['name'],
self.resource['volume']['name'])
self.assertEqual(volumes['items'][0]['properties']['size'],
self.resource['volume']['size'])
self.assertEqual(volumes['items'][0]['properties']['bus'],
self.resource['volume']['bus'])
self.assertEqual(volumes['items'][0]['properties']['type'],
self.resource['volume']['disk_type'])
self.assertEqual(volumes['items'][0]['properties']['licenceType'], 'UNKNOWN')
self.assertIsNone(volumes['items'][0]['properties']['image'])
self.assertIsNone(volumes['items'][0]['properties']['imagePassword'])
self.assertFalse(volumes['items'][0]['properties']['cpuHotPlug'])
self.assertFalse(volumes['items'][0]['properties']['cpuHotUnplug'])
self.assertFalse(volumes['items'][0]['properties']['ramHotPlug'])
self.assertFalse(volumes['items'][0]['properties']['ramHotUnplug'])
self.assertFalse(volumes['items'][0]['properties']['nicHotPlug'])
self.assertFalse(volumes['items'][0]['properties']['nicHotUnplug'])
self.assertFalse(volumes['items'][0]['properties']['discVirtioHotPlug'])
self.assertFalse(volumes['items'][0]['properties']['discVirtioHotUnplug'])
self.assertFalse(volumes['items'][0]['properties']['discScsiHotPlug'])
self.assertFalse(volumes['items'][0]['properties']['discScsiHotUnplug'])
def test_get_attached_volume(self):
volume = self.client.get_attached_volume(
datacenter_id=self.datacenter['id'],
server_id=self.server['id'],
volume_id=self.volume1['id'])
self.assertEqual(volume['id'], self.volume1['id'])
self.assertEqual(volume['properties']['name'], self.resource['volume']['name'])
self.assertEqual(volume['properties']['size'], self.resource['volume']['size'])
self.assertEqual(volume['properties']['bus'], self.resource['volume']['bus'])
self.assertEqual(volume['properties']['type'], self.resource['volume']['disk_type'])
self.assertEqual(volume['properties']['licenceType'],
self.resource['volume']['licence_type'])
self.assertIsNone(volume['properties']['image'])
self.assertIsNone(volume['properties']['imagePassword'])
self.assertFalse(volume['properties']['cpuHotPlug'])
self.assertFalse(volume['properties']['cpuHotUnplug'])
self.assertFalse(volume['properties']['ramHotPlug'])
self.assertFalse(volume['properties']['ramHotUnplug'])
self.assertFalse(volume['properties']['nicHotPlug'])
self.assertFalse(volume['properties']['nicHotUnplug'])
self.assertFalse(volume['properties']['discVirtioHotPlug'])
self.assertFalse(volume['properties']['discVirtioHotUnplug'])
self.assertFalse(volume['properties']['discScsiHotPlug'])
self.assertFalse(volume['properties']['discScsiHotUnplug'])
def test_attach_volume(self):
volume = self.client.attach_volume(
datacenter_id=self.datacenter['id'],
server_id=self.server['id'],
volume_id=self.volume2['id'])
self.client.wait_for_completion(volume)
self.assertEqual(volume['id'], self.volume2['id'])
self.assertEqual(volume['properties']['name'], self.resource['volume']['name'])
self.assertEqual(volume['properties']['size'], self.resource['volume']['size'])
self.assertEqual(volume['properties']['type'], self.resource['volume']['disk_type'])
self.assertEqual(volume['properties']['licenceType'],
self.resource['volume']['licence_type'])
self.assertIsNone(volume['properties']['bus'])
self.assertIsNone(volume['properties']['image'])
self.assertIsNone(volume['properties']['imagePassword'])
self.assertFalse(volume['properties']['cpuHotPlug'])
self.assertFalse(volume['properties']['cpuHotUnplug'])
self.assertFalse(volume['properties']['ramHotPlug'])
self.assertFalse(volume['properties']['ramHotUnplug'])
self.assertFalse(volume['properties']['nicHotPlug'])
self.assertFalse(volume['properties']['nicHotUnplug'])
self.assertFalse(volume['properties']['discVirtioHotPlug'])
self.assertFalse(volume['properties']['discVirtioHotUnplug'])
self.assertFalse(volume['properties']['discScsiHotPlug'])
self.assertFalse(volume['properties']['discScsiHotUnplug'])
self.client.detach_volume(
datacenter_id=self.datacenter['id'],
server_id=self.server['id'],
volume_id=self.volume2['id'])
def test_detach_volume(self):
volume = self.client.detach_volume(
datacenter_id=self.datacenter['id'],
server_id=self.server['id'],
volume_id=self.volume1['id'])
self.assertTrue(volume)
def test_list_cdroms(self):
cdroms = self.client.get_attached_cdroms(
datacenter_id=self.datacenter['id'],
server_id=self.server['id'])
self.assertGreater(len(cdroms['items']), 0)
def test_attach_cdrom(self):
attached_cdrom = self.client.attach_cdrom(
datacenter_id=self.datacenter['id'],
server_id=self.server['id'],
cdrom_id=self.test_image2['id'])
self.client.wait_for_completion(attached_cdrom, timeout=600)
self.assertEqual(attached_cdrom['id'], self.test_image2['id'])
self.assertEqual(attached_cdrom['properties']['name'],
self.test_image2['properties']['name'])
def test_get_cdrom(self):
attached_cdrom = self.client.attach_cdrom(
datacenter_id=self.datacenter['id'],
server_id=self.server['id'],
cdrom_id=self.test_image1['id'])
self.client.wait_for_completion(attached_cdrom, timeout=600)
cdrom = self.client.get_attached_cdrom(
datacenter_id=self.datacenter['id'],
server_id=self.server['id'],
cdrom_id=attached_cdrom['id'])
self.assertEqual(cdrom['id'], attached_cdrom['id'])
self.assertEqual(cdrom['properties']['name'], attached_cdrom['properties']['name'])
def test_detach_cdrom(self):
detached_cd = self.client.detach_cdrom(
datacenter_id=self.datacenter['id'],
server_id=self.server['id'],
cdrom_id=self.cdrom['id'])
time.sleep(15)
self.assertTrue(detached_cd)
try:
check_detached_cdrom_gone(self)
except PBNotFoundError as e:
self.assertIn(self.resource['not_found_error'], e.content[0]['message'])
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for monorail issue management."""
import datetime
import unittest
import mock
import six
from clusterfuzz._internal.tests.test_libs import helpers
from libs.issue_management import monorail
from libs.issue_management.monorail import issue_tracker_manager
from libs.issue_management.monorail.comment import Comment as MonorailComment
from libs.issue_management.monorail.issue import Issue as MonorailIssue
class IssueTrackerManager(issue_tracker_manager.IssueTrackerManager):
"""Mock issue tracker manager."""
def __init__(self, project_name, mock_issues):
# pylint: disable=super-init-not-called
self.project_name = project_name
self.last_issue = None
self.mock_issues = mock_issues
def get_issue(self, issue_id):
issue = self.mock_issues.get(issue_id)
if not issue:
return None
issue.itm = self
return issue
def save(self, issue, *args, **kwargs): # pylint: disable=unused-argument,arguments-differ
self.last_issue = issue
class MonorailTests(unittest.TestCase):
"""Tests for the monorail issue tracker."""
def setUp(self):
helpers.patch(self, [
'libs.issue_management.monorail.issue_tracker_manager.'
'IssueTrackerManager.get_issues',
])
mock_issue = MonorailIssue()
mock_issue.id = 1337
mock_issue.summary = 'summary'
mock_issue.body = 'body'
mock_issue.owner = 'owner'
mock_issue.reporter = 'reporter'
mock_issue.status = 'New'
mock_issue.add_label('label1')
mock_issue.add_label('label2')
mock_issue.add_component('A>B')
mock_issue.add_component('C>D')
mock_issue.add_cc('cc@cc.com')
mock_comment0 = MonorailComment()
mock_comment0.author = 'author'
mock_comment0.cc = ['-removed@cc.com', 'cc@cc.com']
mock_comment0.labels = ['-label0', 'label1']
mock_comment0.components = ['-E>F', 'A>B']
mock_comment0.comment = 'comment'
mock_comment0.summary = 'summary'
mock_comment0.status = 'status'
mock_comment0.owner = 'owner'
mock_comment1 = MonorailComment()
mock_comment1.author = 'author'
mock_comment1.comment = 'comment'
mock_issue.comments = [
mock_comment0,
mock_comment1,
]
mock_issue_merged = MonorailIssue()
mock_issue_merged.id = 1338
mock_issue_merged.merged_into = 1337
mock_issue_merged.merged_into_project = 'project'
mock_issue_merged.closed = datetime.datetime(2019, 1, 1)
mock_issue_merged_another_project = MonorailIssue()
mock_issue_merged_another_project.id = 1339
mock_issue_merged_another_project.merged_into = 1
mock_issue_merged_another_project.merged_into_project = 'different-project'
mock_issues = {
1337: mock_issue,
1338: mock_issue_merged,
1339: mock_issue_merged_another_project,
}
self.itm = IssueTrackerManager('project', mock_issues)
self.issue_tracker = monorail.IssueTracker(self.itm)
def test_get_issue(self):
"""Test get_issue."""
self.assertIsNone(self.issue_tracker.get_issue(1))
issue = self.issue_tracker.get_issue(1337)
self.assertEqual(1337, issue.id)
self.assertEqual('summary', issue.title)
self.assertEqual('body', issue.body)
self.assertEqual('owner', issue.assignee)
self.assertEqual('reporter', issue.reporter)
self.assertEqual('New', issue.status)
self.assertIsNone(issue.merged_into)
six.assertCountEqual(self, [
'label1',
'label2',
], issue.labels)
six.assertCountEqual(self, [
'A>B',
'C>D',
], issue.components)
six.assertCountEqual(self, [
'cc@cc.com',
], issue.ccs)
issue = self.issue_tracker.get_issue(1338)
self.assertEqual(1338, issue.id)
self.assertEqual(datetime.datetime(2019, 1, 1), issue.closed_time)
def test_new_issue(self):
"""Test new_issue."""
issue = self.issue_tracker.new_issue()
issue.assignee = 'owner'
issue.title = 'summary'
issue.body = 'body'
issue.assignee = 'owner'
issue.reporter = 'reporter'
issue.status = 'New'
issue.labels.add('label1')
issue.labels.add('label2')
issue.components.add('A>B')
issue.components.add('C>D')
issue.ccs.add('cc@cc.com')
issue.save(new_comment='comment')
monorail_issue = self.itm.last_issue
self.assertEqual('summary', monorail_issue.summary)
self.assertEqual('body', monorail_issue.body)
self.assertEqual('owner', monorail_issue.owner)
self.assertEqual('reporter', monorail_issue.reporter)
self.assertEqual('New', monorail_issue.status)
self.assertEqual('comment', monorail_issue.comment)
six.assertCountEqual(self, [
'label1',
'label2',
], monorail_issue.labels)
six.assertCountEqual(self, [
'A>B',
'C>D',
], monorail_issue.components)
six.assertCountEqual(self, [
'cc@cc.com',
], monorail_issue.cc)
def test_actions(self):
"""Test actions."""
issue = self.issue_tracker.get_issue(1337)
actions = list(issue.actions)
self.assertEqual(2, len(actions))
self.assertEqual('summary', actions[0].title)
self.assertEqual('comment', actions[0].comment)
self.assertEqual('owner', actions[0].assignee)
self.assertEqual('status', actions[0].status)
six.assertCountEqual(self, ['cc@cc.com'], actions[0].ccs.added)
six.assertCountEqual(self, ['removed@cc.com'], actions[0].ccs.removed)
six.assertCountEqual(self, ['label1'], actions[0].labels.added)
six.assertCountEqual(self, ['label0'], actions[0].labels.removed)
six.assertCountEqual(self, ['A>B'], actions[0].components.added)
six.assertCountEqual(self, ['E>F'], actions[0].components.removed)
self.assertIsNone(actions[1].title)
self.assertEqual('comment', actions[1].comment)
self.assertIsNone(actions[1].assignee)
self.assertIsNone(actions[1].status)
six.assertCountEqual(self, [], actions[1].ccs.added)
six.assertCountEqual(self, [], actions[1].ccs.removed)
six.assertCountEqual(self, [], actions[1].labels.added)
six.assertCountEqual(self, [], actions[1].labels.removed)
six.assertCountEqual(self, [], actions[1].components.added)
six.assertCountEqual(self, [], actions[1].components.removed)
def test_modify_labels(self):
"""Test modifying labels."""
issue = self.issue_tracker.get_issue(1337)
issue.labels.add('Label3')
issue.labels.remove('laBel1')
six.assertCountEqual(self, ['label2', 'Label3'], issue.labels)
issue.save()
six.assertCountEqual(self, ['label2', 'Label3', '-laBel1'],
self.itm.last_issue.labels)
def test_modify_components(self):
"""Test modifying labels."""
issue = self.issue_tracker.get_issue(1337)
issue.components.add('Y>Z')
issue.components.remove('a>B')
six.assertCountEqual(self, ['C>D', 'Y>Z'], issue.components)
issue.save()
six.assertCountEqual(self, ['-a>B', 'C>D', 'Y>Z'],
self.itm.last_issue.components)
def test_get_original_issue(self):
"""Test get_original_issue."""
issue = self.issue_tracker.get_original_issue(1338)
self.assertEqual(1337, issue.id)
def test_find_issues(self):
"""Test find_issues."""
issue0 = MonorailIssue()
issue0.id = 1
issue1 = MonorailIssue()
issue1.id = 2
self.mock.get_issues.return_value = [
issue0,
issue1,
]
issues = self.issue_tracker.find_issues(
keywords=['one', 'two'], only_open=True)
six.assertCountEqual(self, [1, 2], [issue.id for issue in issues])
self.mock.get_issues.assert_has_calls([
mock.call(mock.ANY, '"one" "two"', can='open'),
])
issues = self.issue_tracker.find_issues(
keywords=['one', 'two'], only_open=False)
six.assertCountEqual(self, [1, 2], [issue.id for issue in issues])
self.mock.get_issues.assert_has_calls([
mock.call(mock.ANY, '"one" "two"', can='all'),
])
def test_find_issues_url(self):
"""Test find_issues_url."""
url = self.issue_tracker.find_issues_url(
keywords=['one', 'two'], only_open=False)
self.assertEqual(
'https://bugs.chromium.org/p/project/issues/list'
'?can_id=1&q=%22one%22+%22two%22', url)
url = self.issue_tracker.find_issues_url(
keywords=['one', 'two'], only_open=True)
self.assertEqual(
'https://bugs.chromium.org/p/project/issues/list'
'?can_id=2&q=%22one%22+%22two%22', url)
def test_issue_url(self):
"""Test issue_url."""
issue_url = self.issue_tracker.issue_url(1337)
self.assertEqual(
'https://bugs.chromium.org/p/project/issues/detail?id=1337', issue_url)
def test_merged_into_different_project(self):
"""Test merged_into for a different issue tracker project."""
issue = self.issue_tracker.get_issue(1339)
self.assertIsNone(issue.merged_into)
| |
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from collections import OrderedDict, namedtuple
import os.path
import time
import flask
import gevent
from digits import device_query
from digits.task import Task
from digits.utils import subclass, override
# NOTE: Increment this everytime the picked object changes
PICKLE_VERSION = 2
# Used to store network outputs
NetworkOutput = namedtuple('NetworkOutput', ['kind', 'data'])
@subclass
class TrainTask(Task):
"""
Defines required methods for child classes
"""
def __init__(self, job, dataset, train_epochs, snapshot_interval, learning_rate, lr_policy, **kwargs):
"""
Arguments:
job -- model job
dataset -- a DatasetJob containing the dataset for this model
train_epochs -- how many epochs of training data to train on
snapshot_interval -- how many epochs between taking a snapshot
learning_rate -- the base learning rate
lr_policy -- a hash of options to be used for the learning rate policy
Keyword arguments:
gpu_count -- how many GPUs to use for training (integer)
selected_gpus -- a list of GPU indexes to be used for training
batch_size -- if set, override any network specific batch_size with this value
batch_accumulation -- accumulate gradients over multiple batches
val_interval -- how many epochs between validating the model with an epoch of validation data
pretrained_model -- filename for a model to use for fine-tuning
crop_size -- crop each image down to a square of this size
use_mean -- subtract the dataset's mean file or mean pixel
random_seed -- optional random seed
"""
self.gpu_count = kwargs.pop('gpu_count', None)
self.selected_gpus = kwargs.pop('selected_gpus', None)
self.batch_size = kwargs.pop('batch_size', None)
self.batch_accumulation = kwargs.pop('batch_accumulation', None)
self.val_interval = kwargs.pop('val_interval', None)
self.pretrained_model = kwargs.pop('pretrained_model', None)
self.crop_size = kwargs.pop('crop_size', None)
self.use_mean = kwargs.pop('use_mean', None)
self.random_seed = kwargs.pop('random_seed', None)
self.solver_type = kwargs.pop('solver_type', None)
self.shuffle = kwargs.pop('shuffle', None)
self.network = kwargs.pop('network', None)
self.framework_id = kwargs.pop('framework_id', None)
super(TrainTask, self).__init__(job_dir = job.dir(), **kwargs)
self.pickver_task_train = PICKLE_VERSION
self.job = job
self.dataset = dataset
self.train_epochs = train_epochs
self.snapshot_interval = snapshot_interval
self.learning_rate = learning_rate
self.lr_policy = lr_policy
self.current_epoch = 0
self.snapshots = []
# data gets stored as dicts of lists (for graphing)
self.train_outputs = OrderedDict()
self.val_outputs = OrderedDict()
def __getstate__(self):
state = super(TrainTask, self).__getstate__()
if 'dataset' in state:
del state['dataset']
if 'snapshots' in state:
del state['snapshots']
if '_labels' in state:
del state['_labels']
if '_gpu_socketio_thread' in state:
del state['_gpu_socketio_thread']
return state
def __setstate__(self, state):
if state['pickver_task_train'] < 2:
state['train_outputs'] = OrderedDict()
state['val_outputs'] = OrderedDict()
tl = state.pop('train_loss_updates', None)
vl = state.pop('val_loss_updates', None)
va = state.pop('val_accuracy_updates', None)
lr = state.pop('lr_updates', None)
if tl:
state['train_outputs']['epoch'] = NetworkOutput('Epoch', [x[0] for x in tl])
state['train_outputs']['loss'] = NetworkOutput('SoftmaxWithLoss', [x[1] for x in tl])
state['train_outputs']['learning_rate'] = NetworkOutput('LearningRate', [x[1] for x in lr])
if vl:
state['val_outputs']['epoch'] = NetworkOutput('Epoch', [x[0] for x in vl])
if va:
state['val_outputs']['accuracy'] = NetworkOutput('Accuracy', [x[1]/100 for x in va])
state['val_outputs']['loss'] = NetworkOutput('SoftmaxWithLoss', [x[1] for x in vl])
if state['use_mean'] == True:
state['use_mean'] = 'pixel'
elif state['use_mean'] == False:
state['use_mean'] = 'none'
state['pickver_task_train'] = PICKLE_VERSION
super(TrainTask, self).__setstate__(state)
self.snapshots = []
self.dataset = None
@override
def offer_resources(self, resources):
if 'gpus' not in resources:
return None
if not resources['gpus']:
return {} # don't use a GPU at all
if self.gpu_count is not None:
identifiers = []
for resource in resources['gpus']:
if resource.remaining() >= 1:
identifiers.append(resource.identifier)
if len(identifiers) == self.gpu_count:
break
if len(identifiers) == self.gpu_count:
return {'gpus': [(i, 1) for i in identifiers]}
else:
return None
elif self.selected_gpus is not None:
all_available = True
for i in self.selected_gpus:
available = False
for gpu in resources['gpus']:
if i == gpu.identifier:
if gpu.remaining() >= 1:
available = True
break
if not available:
all_available = False
break
if all_available:
return {'gpus': [(i, 1) for i in self.selected_gpus]}
else:
return None
return None
@override
def before_run(self):
if 'gpus' in self.current_resources:
# start a thread which sends SocketIO updates about GPU utilization
self._gpu_socketio_thread = gevent.spawn(
self.gpu_socketio_updater,
[identifier for (identifier, value)
in self.current_resources['gpus']]
)
def gpu_socketio_updater(self, gpus):
"""
This thread sends SocketIO messages about GPU utilization
to connected clients
Arguments:
gpus -- a list of identifiers for the GPUs currently being used
"""
from digits.webapp import app, socketio
devices = []
for index in gpus:
device = device_query.get_device(index)
if device:
devices.append((index, device))
if not devices:
raise RuntimeError('Failed to load gpu information for "%s"' % gpus)
# this thread continues until killed in after_run()
while True:
data = []
for index, device in devices:
update = {'name': device.name, 'index': index}
nvml_info = device_query.get_nvml_info(index)
if nvml_info is not None:
update.update(nvml_info)
data.append(update)
with app.app_context():
html = flask.render_template('models/gpu_utilization.html',
data = data)
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'gpu_utilization',
'html': html,
},
namespace='/jobs',
room=self.job_id,
)
gevent.sleep(1)
def send_progress_update(self, epoch):
"""
Sends socketio message about the current progress
"""
if self.current_epoch == epoch:
return
self.current_epoch = epoch
self.progress = epoch/self.train_epochs
self.emit_progress_update()
def save_train_output(self, *args):
"""
Save output to self.train_outputs
"""
from digits.webapp import socketio
if not self.save_output(self.train_outputs, *args):
return
if self.last_train_update and (time.time() - self.last_train_update) < 5:
return
self.last_train_update = time.time()
self.logger.debug('Training %s%% complete.' % round(100 * self.current_epoch/self.train_epochs,2))
# loss graph data
data = self.combined_graph_data()
if data:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'combined_graph',
'data': data,
},
namespace='/jobs',
room=self.job_id,
)
if data['columns']:
# isolate the Loss column data for the sparkline
graph_data = data['columns'][0][1:]
socketio.emit('task update',
{
'task': self.html_id(),
'job_id': self.job_id,
'update': 'combined_graph',
'data': graph_data,
},
namespace='/jobs',
room='job_management',
)
# lr graph data
data = self.lr_graph_data()
if data:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'lr_graph',
'data': data,
},
namespace='/jobs',
room=self.job_id,
)
def save_val_output(self, *args):
"""
Save output to self.val_outputs
"""
from digits.webapp import socketio
if not self.save_output(self.val_outputs, *args):
return
# loss graph data
data = self.combined_graph_data()
if data:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'combined_graph',
'data': data,
},
namespace='/jobs',
room=self.job_id,
)
def save_output(self, d, name, kind, value):
"""
Save output to self.train_outputs or self.val_outputs
Returns true if all outputs for this epoch have been added
Arguments:
d -- the dictionary where the output should be stored
name -- name of the output (e.g. "accuracy")
kind -- the type of outputs (e.g. "Accuracy")
value -- value for this output (e.g. 0.95)
"""
# don't let them be unicode
name = str(name)
kind = str(kind)
# update d['epoch']
if 'epoch' not in d:
d['epoch'] = NetworkOutput('Epoch', [self.current_epoch])
elif d['epoch'].data[-1] != self.current_epoch:
d['epoch'].data.append(self.current_epoch)
if name not in d:
d[name] = NetworkOutput(kind, [])
epoch_len = len(d['epoch'].data)
name_len = len(d[name].data)
# save to back of d[name]
if name_len > epoch_len:
raise Exception('Received a new output without being told the new epoch')
elif name_len == epoch_len:
# already exists
if isinstance(d[name].data[-1], list):
d[name].data[-1].append(value)
else:
d[name].data[-1] = [d[name].data[-1], value]
elif name_len == epoch_len - 1:
# expected case
d[name].data.append(value)
else:
# we might have missed one
for _ in xrange(epoch_len - name_len - 1):
d[name].data.append(None)
d[name].data.append(value)
for key in d:
if key not in ['epoch', 'learning_rate']:
if len(d[key].data) != epoch_len:
return False
return True
@override
def after_run(self):
if hasattr(self, '_gpu_socketio_thread'):
self._gpu_socketio_thread.kill()
def detect_snapshots(self):
"""
Populate self.snapshots with snapshots that exist on disk
Returns True if at least one usable snapshot is found
"""
return False
def snapshot_list(self):
"""
Returns an array of arrays for creating an HTML select field
"""
return [[s[1], 'Epoch #%s' % s[1]] for s in reversed(self.snapshots)]
def est_next_snapshot(self):
"""
Returns the estimated time in seconds until the next snapshot is taken
"""
return None
def can_view_weights(self):
"""
Returns True if this Task can visualize the weights of each layer for a given model
"""
raise NotImplementedError()
def view_weights(self, model_epoch=None, layers=None):
"""
View the weights for a specific model and layer[s]
"""
return None
def can_view_activations(self):
"""
Returns True if this Task can visualize the activations of a model after inference
"""
raise NotImplementedError()
def infer_one(self, data, model_epoch=None, layers=None):
"""
Run inference on one input
"""
return None
def can_infer_many(self):
"""
Returns True if this Task can run inference on many inputs
"""
raise NotImplementedError()
def infer_many(self, data, model_epoch=None):
"""
Run inference on many inputs
"""
return None
def get_labels(self):
"""
Read labels from labels_file and return them in a list
"""
# The labels might be set already
if hasattr(self, '_labels') and self._labels and len(self._labels) > 0:
return self._labels
assert hasattr(self.dataset, 'labels_file'), 'labels_file not set'
assert self.dataset.labels_file, 'labels_file not set'
assert os.path.exists(self.dataset.path(self.dataset.labels_file)), 'labels_file does not exist'
labels = []
with open(self.dataset.path(self.dataset.labels_file)) as infile:
for line in infile:
label = line.strip()
if label:
labels.append(label)
assert len(labels) > 0, 'no labels in labels_file'
self._labels = labels
return self._labels
def lr_graph_data(self):
"""
Returns learning rate data formatted for a C3.js graph
Keyword arguments:
"""
if not self.train_outputs or 'epoch' not in self.train_outputs or 'learning_rate' not in self.train_outputs:
return None
# return 100-200 values or fewer
stride = max(len(self.train_outputs['epoch'].data)/100,1)
e = ['epoch'] + self.train_outputs['epoch'].data[::stride]
lr = ['lr'] + self.train_outputs['learning_rate'].data[::stride]
return {
'columns': [e, lr],
'xs': {
'lr': 'epoch'
},
'names': {
'lr': 'Learning Rate'
},
}
def combined_graph_data(self, cull=True):
"""
Returns all train/val outputs in data for one C3.js graph
Keyword arguments:
cull -- if True, cut down the number of data points returned to a reasonable size
"""
data = {
'columns': [],
'xs': {},
'axes': {},
'names': {},
}
added_train_data = False
added_val_data = False
if self.train_outputs and 'epoch' in self.train_outputs:
if cull:
# max 200 data points
stride = max(len(self.train_outputs['epoch'].data)/100,1)
else:
# return all data
stride = 1
for name, output in self.train_outputs.iteritems():
if name not in ['epoch', 'learning_rate']:
col_id = '%s-train' % name
data['xs'][col_id] = 'train_epochs'
data['names'][col_id] = '%s (train)' % name
if 'accuracy' in output.kind.lower() or 'accuracy' in name.lower():
data['columns'].append([col_id] + [
(100*x if x is not None else 'none')
for x in output.data[::stride]])
data['axes'][col_id] = 'y2'
else:
data['columns'].append([col_id] + [
(x if x is not None else 'none')
for x in output.data[::stride]])
added_train_data = True
if added_train_data:
data['columns'].append(['train_epochs'] + self.train_outputs['epoch'].data[::stride])
if self.val_outputs and 'epoch' in self.val_outputs:
if cull:
# max 200 data points
stride = max(len(self.val_outputs['epoch'].data)/100,1)
else:
# return all data
stride = 1
for name, output in self.val_outputs.iteritems():
if name not in ['epoch']:
col_id = '%s-val' % name
data['xs'][col_id] = 'val_epochs'
data['names'][col_id] = '%s (val)' % name
if 'accuracy' in output.kind.lower() or 'accuracy' in name.lower():
data['columns'].append([col_id] + [
(100*x if x is not None else 'none')
for x in output.data[::stride]])
data['axes'][col_id] = 'y2'
else:
data['columns'].append([col_id] + [
(x if x is not None else 'none')
for x in output.data[::stride]])
added_val_data = True
if added_val_data:
data['columns'].append(['val_epochs'] + self.val_outputs['epoch'].data[::stride])
if added_train_data:
return data
else:
# return None if only validation data exists
# helps with ordering of columns in graph
return None
# return id of framework used for training
def get_framework_id(self):
"""
Returns a string
"""
return self.framework_id
def get_model_files(self):
"""
return path to model file
"""
raise NotImplementedError()
def get_network_desc(self):
"""
return text description of model
"""
raise NotImplementedError()
| |
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import os.path
from commoncode.testcase import FileBasedTesting
from cluecode_assert_utils import check_detection
class TestAuthors(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_author_addr_c(self):
test_file = self.get_test_loc('authors/author_addr_c-addr_c.c')
expected = [
u'John Doe',
]
check_detection(expected, test_file, what='authors')
def test_author_avinash(self):
test_file = self.get_test_loc('authors/author_avinash-BitVector_py.py')
expected = [
u'Avinash Kak (kak@purdue.edu)',
u'Avinash Kak (kak@purdue.edu)',
]
check_detection(expected, test_file, what='authors')
def test_author_avinash_kak(self):
test_file = self.get_test_loc('authors/author_avinash_kak-BitVector_py.py')
expected = [
u'Avinash Kak (kak@purdue.edu)',
u'Avinash Kak (kak@purdue.edu)',
]
check_detection(expected, test_file, what='authors')
def test_author_complex_author(self):
test_file = self.get_test_loc('authors/author_complex_author-strtol_c.c')
expected = [
'developed by the University of California, Berkeley and its contributors.',
]
check_detection(expected, test_file, what='authors')
def test_author_correct(self):
test_file = self.get_test_loc('authors/author_correct-detail_9_html.html')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_do_not_detect_authorize_as_author(self):
test_file = self.get_test_loc('authors/author_do_not_detect_authorize_as_author.csv')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_expat(self):
test_file = self.get_test_loc('authors/author_expat-expat_h.h')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_gary(self):
test_file = self.get_test_loc('authors/author_gary-ProjectInfo_java.java')
expected = [
"Gary O'Neall",
]
check_detection(expected, test_file, what='authors')
def test_author_gthomas_c(self):
test_file = self.get_test_loc('authors/author_gthomas_c-c.c')
expected = [
u'Author(s) gthomas, sorin@netappi.com',
u'Contributors gthomas, sorin@netappi.com, andrew.lunn@ascom.ch',
]
check_detection(expected, test_file, what='authors')
def test_author_in_java(self):
test_file = self.get_test_loc('authors/author_in_java-MergeSort_java.java')
expected = [
u'Scott Violet',
]
check_detection(expected, test_file, what='authors')
def test_author_in_java_tag(self):
test_file = self.get_test_loc('authors/author_in_java_tag-java.java')
expected = [
u'Apple Banana Car',
]
check_detection(expected, test_file, what='authors')
def test_author_in_postcript(self):
test_file = self.get_test_loc('authors/author_in_postcript-9__ps.ps')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_in_visio_doc(self):
test_file = self.get_test_loc('authors/author_in_visio_doc-Glitch_ERD_vsd.vsd')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_nathan(self):
test_file = self.get_test_loc('authors/author_nathan-KEYS')
expected = [
'Nathan Mittler <nathan.mittler@gmail.com>',
]
check_detection(expected, test_file, what='authors')
def test_author_no_author(self):
test_file = self.get_test_loc('authors/author_no_author-c.c')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_none(self):
test_file = self.get_test_loc('authors/author_none-wrong')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_none_c(self):
test_file = self.get_test_loc('authors/author_none_c-c.c')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_none_fp(self):
test_file = self.get_test_loc('authors/author_none_fp-false_positives_c.c')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_none_js(self):
test_file = self.get_test_loc('authors/author_none_js-editor_beta_de_js.js')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_none_license(self):
test_file = self.get_test_loc('authors/author_none_license-LICENSE')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_none_sample_java(self):
test_file = self.get_test_loc('authors/author_none_sample_java-java.java')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_russ_c(self):
test_file = self.get_test_loc('authors/author_russ_c-c.c')
# these are detected as copyrights, not authors
# u'Russ Dill <Russ.Dill@asu.edu>',
# u'Vladimir Oleynik <dzo@simtreas.ru>',
expected = []
check_detection(expected, test_file, what='authors')
def test_author_sample(self):
test_file = self.get_test_loc('authors/author_sample-c.c')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_samplepy(self):
test_file = self.get_test_loc('authors/author_samplepy-py.py')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_snippet(self):
test_file = self.get_test_loc('authors/author_snippet')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_stacktrace_cpp(self):
test_file = self.get_test_loc('authors/author_stacktrace_cpp-stacktrace_cpp.cpp')
expected = [
u'by faith@dict.org',
]
check_detection(expected, test_file, what='authors')
def test_author_treetablemodeladapter_java(self):
test_file = self.get_test_loc('authors/author_treetablemodeladapter_java-TreeTableModelAdapter_java.java')
expected = [
u'Philip Milne author Scott Violet',
]
check_detection(expected, test_file, what='authors')
def test_author_uc(self):
test_file = self.get_test_loc('authors/author_uc-LICENSE')
expected = [
'developed by the University of California, Berkeley and its contributors.',
'developed by UC Berkeley and its contributors.',
'developed by the University of California, Berkeley and its contributors.',
]
check_detection(expected, test_file, what='authors')
def test_author_var_route_c(self):
test_file = self.get_test_loc('authors/author_var_route_c-var_route_c.c')
# these are detected as copyrights, not authors
# u'Erik Schoenfelder (schoenfr@ibr.cs.tu-bs.de)',
# u'Simon Leinen (simon@switch.ch)',
expected = []
check_detection(expected, test_file, what='authors')
def test_author_vs(self):
test_file = self.get_test_loc('authors/author_vs-visual_studio.txt')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_young_c(self):
test_file = self.get_test_loc('authors/author_young_c-c.c')
expected = [
u'written by Eric Young (eay@mincom.oz.au).',
u'Tim Hudson (tjh@mincom.oz.au).',
u'written by Eric Young (eay@mincom.oz.au)',
u'written by Tim Hudson (tjh@mincom.oz.au)',
]
check_detection(expected, test_file, what='authors')
| |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for generating the feature_statistics proto from generic data.
The proto is used as input for the Overview visualization.
"""
import numpy as np
import pandas as pd
import sys
import warnings
class BaseGenericFeatureStatisticsGenerator(object):
"""Base class for generator of stats proto from generic data."""
def __init__(self, fs_proto, datasets_proto, histogram_proto):
self.fs_proto = fs_proto
self.datasets_proto = datasets_proto
self.histogram_proto = histogram_proto
def ProtoFromDataFrames(self, dataframes,
histogram_categorical_levels_count=None):
"""Creates a feature statistics proto from a set of pandas dataframes.
Args:
dataframes: A list of dicts describing tables for each dataset for the
proto. Each entry contains a 'table' field of the dataframe of the
data
and a 'name' field to identify the dataset in the proto.
histogram_categorical_levels_count: int, controls the maximum number of
levels to display in histograms for categorical features.
Useful to prevent codes/IDs features from bloating the stats object.
Defaults to None.
Returns:
The feature statistics proto for the provided tables.
"""
warnings.warn(
'Code in this directory is deprecated. Use the facets-overview pip package instead.',
DeprecationWarning
)
datasets = []
for dataframe in dataframes:
table = dataframe['table']
table_entries = {}
for col in table:
table_entries[col] = self.NdarrayToEntry(table[col])
datasets.append({
'entries': table_entries,
'size': len(table),
'name': dataframe['name']
})
return self.GetDatasetsProto(
datasets,
histogram_categorical_levels_count=histogram_categorical_levels_count)
def DtypeToType(self, dtype):
"""Converts a Numpy dtype to the FeatureNameStatistics.Type proto enum."""
if dtype.char in np.typecodes['AllFloat']:
return self.fs_proto.FLOAT
elif (dtype.char in np.typecodes['AllInteger'] or dtype == np.bool or
np.issubdtype(dtype, np.datetime64) or
np.issubdtype(dtype, np.timedelta64)):
return self.fs_proto.INT
else:
return self.fs_proto.STRING
def DtypeToNumberConverter(self, dtype):
"""Converts a Numpy dtype to a converter method if applicable.
The converter method takes in a numpy array of objects of the provided
dtype
and returns a numpy array of the numbers backing that object for
statistical
analysis. Returns None if no converter is necessary.
Args:
dtype: The numpy dtype to make a converter for.
Returns:
The converter method or None.
"""
if np.issubdtype(dtype, np.datetime64):
def DatetimesToNumbers(dt_list):
return np.array([pd.Timestamp(dt).value for dt in dt_list])
return DatetimesToNumbers
elif np.issubdtype(dtype, np.timedelta64):
def TimedetlasToNumbers(td_list):
return np.array([pd.Timedelta(td).value for td in td_list])
return TimedetlasToNumbers
else:
return None
def NdarrayToEntry(self, x):
"""Converts an ndarray to the Entry format."""
row_counts = []
for row in x:
try:
rc = np.count_nonzero(~np.isnan(row))
if rc != 0:
row_counts.append(rc)
except TypeError:
try:
row_counts.append(row.size)
except AttributeError:
row_counts.append(1)
data_type = self.DtypeToType(x.dtype)
converter = self.DtypeToNumberConverter(x.dtype)
flattened = x.ravel()
orig_size = len(flattened)
# Remove all None and nan values and count how many were removed.
flattened = flattened[flattened != np.array(None)]
if converter:
flattened = converter(flattened)
if data_type == self.fs_proto.STRING:
flattened_temp = []
for x in flattened:
try:
if str(x) != 'nan':
flattened_temp.append(x)
except UnicodeEncodeError:
if x.encode('utf-8') != 'nan':
flattened_temp.append(x)
flattened = flattened_temp
else:
flattened = flattened[~np.isnan(flattened)].tolist()
missing = orig_size - len(flattened)
return {
'vals': flattened,
'counts': row_counts,
'missing': missing,
'type': data_type
}
def GetDatasetsProto(self, datasets, features=None,
histogram_categorical_levels_count=None):
"""Generates the feature stats proto from dictionaries of feature values.
Args:
datasets: An array of dictionaries, one per dataset, each one containing:
- 'entries': The dictionary of features in the dataset from the parsed
examples.
- 'size': The number of examples parsed for the dataset.
- 'name': The name of the dataset.
features: A list of strings that is a whitelist of feature names to create
feature statistics for. If set to None then all features in the
dataset
are analyzed. Defaults to None.
histogram_categorical_levels_count: int, controls the maximum number of
levels to display in histograms for categorical features.
Useful to prevent codes/IDs features from bloating the stats object.
Defaults to None.
Returns:
The feature statistics proto for the provided datasets.
"""
features_seen = set()
whitelist_features = set(features) if features else None
all_datasets = self.datasets_proto()
# TODO(jwexler): Add ability to generate weighted feature stats
# if there is a specified weight feature in the dataset.
# Initialize each dataset
for dataset in datasets:
all_datasets.datasets.add(
name=dataset['name'], num_examples=dataset['size'])
# This outer loop ensures that for each feature seen in any of the provided
# datasets, we check the feature once against all datasets.
for outer_dataset in datasets:
for key, value in outer_dataset['entries'].items():
# If we have a feature whitelist and this feature is not in the
# whitelist then do not process it.
# If we have processed this feature already, no need to do it again.
if ((whitelist_features and key not in whitelist_features) or
key in features_seen):
continue
features_seen.add(key)
# Default to type int if no type is found, so that the fact that all
# values are missing from this feature can be displayed.
feature_type = value['type'] if 'type' in value else self.fs_proto.INT
# Process the found feature for each dataset.
for j, dataset in enumerate(datasets):
feat = all_datasets.datasets[j].features.add(
type=feature_type, name=key.encode('utf-8'))
value = dataset['entries'].get(key)
has_data = value is not None and (value['vals'].size != 0
if isinstance(
value['vals'], np.ndarray) else
value['vals'])
commonstats = None
# For numeric features, calculate numeric statistics.
if feat.type in (self.fs_proto.INT, self.fs_proto.FLOAT):
featstats = feat.num_stats
commonstats = featstats.common_stats
if has_data:
nums = value['vals']
featstats.std_dev = np.asscalar(np.std(nums))
featstats.mean = np.asscalar(np.mean(nums))
featstats.min = np.asscalar(np.min(nums))
featstats.max = np.asscalar(np.max(nums))
featstats.median = np.asscalar(np.median(nums))
featstats.num_zeros = len(nums) - np.count_nonzero(nums)
nums = np.array(nums)
num_nan = len(nums[np.isnan(nums)])
num_posinf = len(nums[np.isposinf(nums)])
num_neginf = len(nums[np.isneginf(nums)])
# Remove all non-finite (including NaN) values from the numeric
# values in order to calculate histogram buckets/counts. The
# inf values will be added back to the first and last buckets.
nums = nums[np.isfinite(nums)]
counts, buckets = np.histogram(nums)
hist = featstats.histograms.add()
hist.type = self.histogram_proto.STANDARD
hist.num_nan = num_nan
for bucket_count in range(len(counts)):
bucket = hist.buckets.add(
low_value=buckets[bucket_count],
high_value=buckets[bucket_count + 1],
sample_count=np.asscalar(counts[bucket_count]))
# Add any negative or positive infinities to the first and last
# buckets in the histogram.
if bucket_count == 0 and num_neginf > 0:
bucket.low_value = float('-inf')
bucket.sample_count += num_neginf
elif bucket_count == len(counts) - 1 and num_posinf > 0:
bucket.high_value = float('inf')
bucket.sample_count += num_posinf
if not hist.buckets:
if num_neginf:
hist.buckets.add(
low_value=float('-inf'),
high_value=float('-inf'),
sample_count=num_neginf)
if num_posinf:
hist.buckets.add(
low_value=float('inf'),
high_value=float('inf'),
sample_count=num_posinf)
self._PopulateQuantilesHistogram(featstats.histograms.add(),
nums.tolist())
elif feat.type == self.fs_proto.STRING:
featstats = feat.string_stats
commonstats = featstats.common_stats
if has_data:
strs = []
for item in value['vals']:
strs.append(item if hasattr(item, '__len__') else
item.encode('utf-8') if hasattr(item, 'encode') else str(
item))
featstats.avg_length = np.mean(np.vectorize(len)(strs))
vals, counts = np.unique(strs, return_counts=True)
featstats.unique = len(vals)
sorted_vals = sorted(zip(counts, vals), reverse=True)
sorted_vals = sorted_vals[:histogram_categorical_levels_count]
for val_index, val in enumerate(sorted_vals):
try:
if (sys.version_info.major < 3 or
isinstance(val[1], (bytes, bytearray))):
printable_val = val[1].decode('UTF-8', 'strict')
else:
printable_val = val[1]
except (UnicodeDecodeError, UnicodeEncodeError):
printable_val = '__BYTES_VALUE__'
bucket = featstats.rank_histogram.buckets.add(
low_rank=val_index,
high_rank=val_index,
sample_count=np.asscalar(val[0]),
label=printable_val)
if val_index < 2:
featstats.top_values.add(
value=bucket.label, frequency=bucket.sample_count)
# Add the common stats regardless of the feature type.
if has_data:
commonstats.num_missing = value['missing']
commonstats.num_non_missing = (all_datasets.datasets[j].num_examples
- featstats.common_stats.num_missing)
commonstats.min_num_values = int(np.min(value['counts']).astype(int))
commonstats.max_num_values = int(np.max(value['counts']).astype(int))
commonstats.avg_num_values = np.mean(value['counts'])
if 'feat_lens' in value and value['feat_lens']:
self._PopulateQuantilesHistogram(
commonstats.feature_list_length_histogram, value['feat_lens'])
self._PopulateQuantilesHistogram(commonstats.num_values_histogram,
value['counts'])
else:
commonstats.num_non_missing = 0
commonstats.num_missing = all_datasets.datasets[j].num_examples
return all_datasets
def _PopulateQuantilesHistogram(self, hist, nums):
"""Fills in the histogram with quantile information from the provided array.
Args:
hist: A Histogram proto message to fill in.
nums: A list of numbers to create a quantiles histogram from.
"""
if not nums:
return
num_quantile_buckets = 10
quantiles_to_get = [
x * 100 / num_quantile_buckets for x in range(num_quantile_buckets + 1)
]
quantiles = np.percentile(nums, quantiles_to_get)
hist.type = self.histogram_proto.QUANTILES
quantiles_sample_count = float(len(nums)) / num_quantile_buckets
for low, high in zip(quantiles, quantiles[1:]):
hist.buckets.add(
low_value=low, high_value=high, sample_count=quantiles_sample_count)
| |
# -*- coding: utf-8 -*-
from flaskApp import celery
from flask import jsonify
import dcs.load
import dcs.view
import dcs.analyze
import dcs.clean
import os
import requests
import pandas as pd
import numpy as np
import json
import datetime
import traceback
@celery.task()
def userUploadedCSVToDataFrame(uploadID, initialSkip, sampleSize, seed, headerIncluded):
"""Task invoked synchronously by :ref:`POST /upload <flask-upload>` request in Flask application
Calls :func:`dcs.load.CSVtoDataFrame` and :func:`flaskApp.tasks.saveToCache`
Returns:
str: new Willow sessionID
"""
toReturn = None
path = 'flaskApp/temp/' + uploadID + '.csv'
if uploadID and os.path.isfile(path):
data = dcs.load.CSVtoDataFrame('flaskApp/temp/' + uploadID + '.csv', initialSkip=initialSkip, sampleSize=sampleSize, seed=seed, headerIncluded=headerIncluded)
os.remove(path)
if data is not None and saveToCache(data, uploadID):
toReturn = uploadID
return toReturn
@celery.task()
def userUploadedJSONToDataFrame(uploadID, initialSkip, sampleSize, seed):
"""Task invoked synchronously by :ref:`POST /upload <flask-upload>` request in Flask application
Calls :func:`dcs.load.JSONtoDataFrame` and :func:`flaskApp.tasks.saveToCache`
Returns:
str: new Willow sessionID
"""
toReturn = None
path = 'flaskApp/temp/' + uploadID + '.json'
if uploadID and os.path.isfile(path):
data = dcs.load.JSONtoDataFrame('flaskApp/temp/' + uploadID + '.json', sampleSize=sampleSize, seed=seed)
os.remove(path)
if data is not None and saveToCache(data, uploadID):
toReturn = uploadID
return toReturn
@celery.task()
def userUploadedXLSXToDataFrame(uploadID, initialSkip, sampleSize, seed, headerIncluded):
"""Task invoked synchronously by :ref:`POST /upload <flask-upload>` request in Flask application
Calls :func:`dcs.load.XLSXtoDataFrame` and :func:`flaskApp.tasks.saveToCache`
Returns:
str: new Willow sessionID
"""
toReturn = None
path = 'flaskApp/temp/' + uploadID + '.xlsx'
if uploadID and os.path.isfile(path):
data = dcs.load.XLSXtoDataFrame('flaskApp/temp/' + uploadID + '.xlsx', initialSkip=initialSkip, sampleSize=sampleSize, seed=seed, headerIncluded=headerIncluded)
os.remove(path)
if data is not None and saveToCache(data, uploadID):
toReturn = uploadID
return toReturn
@celery.task()
def userUploadedXLSToDataFrame(uploadID, initialSkip, sampleSize, seed, headerIncluded):
"""Task invoked synchronously by :ref:`POST /upload <flask-upload>` request in Flask application
Calls :func:`dcs.load.XLSXtoDataFrame` and :func:`flaskApp.tasks.saveToCache`
Returns:
str: new Willow sessionID
"""
toReturn = None
path = 'flaskApp/temp/' + uploadID + '.xls'
if uploadID and os.path.isfile(path):
data = dcs.load.XLSXtoDataFrame('flaskApp/temp/' + uploadID + '.xls', initialSkip=initialSkip, sampleSize=sampleSize, seed=seed, headerIncluded=headerIncluded)
os.remove(path)
if data is not None and saveToCache(data, uploadID):
toReturn = uploadID
return toReturn
def undoAvailable(sessionID):
"""Supporting function that detects whether an undo operation is available.
The HDF file format supports storing multiple datasets with different labels in the same file.
Undo operations revert to a dataset to what is stored under the 'undo' label in the
same HDF file.
This function checks if the 'undo' label is present for the HDF file for the
specified sessionID.
Args:
sessionID (str): Willow sessionID
Returns:
bool
"""
return type(loadDataFrameFromCache(sessionID, "undo")) is pd.DataFrame
@celery.task()
def loadDataFrameFromCache(sessionID, key="original"):
"""Supporting function that loads a dataset from the HDF file store.
The HDF file format supports storing multiple datasets with different labels in the same file.
The HDF file associated with a Willow sessionID *always* stores the current version of the dataset,
under the 'original' label.
Uses :func:`pandas.read_hdf`.
Args:
sessionID (str): Willow sessionID
key (str, optional): retrieve a dataset under a different label in the HDF file
Returns:
pandas.dataFrame: pandas.dataFrame on success, ``None`` on failure"""
if isinstance(sessionID, basestring) and len(sessionID) == 30:
try:
data = pd.read_hdf("flaskApp/cache/" + sessionID + ".h5", key)
if type(data) is pd.DataFrame:
return data
except:
pass
return None
@celery.task()
def DataFrameToCSV(sessionID):
"""Task invoked synchronously by :ref:`GET /downloadCSV <flask-download-CSV>` request in Flask application
Uses :meth:`pandas.DataFrame.to_csv`.
Returns:
str: CSV text"""
df = loadDataFrameFromCache(sessionID)
if type(df) is pd.DataFrame:
return df.to_csv(None, index=False, force_ascii=False)
else:
return None
@celery.task()
def DFtoJSON(sessionID):
"""Task invoked synchronously by :ref:`GET /downloadJSON <flask-download-JSON>` request in Flask application
Uses :meth:`pandas.DataFrame.to_json`.
Returns:
str: CSV text"""
df = loadDataFrameFromCache(sessionID)
if type(df) is pd.DataFrame:
return df.to_json(orient="records", date_format="iso", force_ascii=True)
else:
return None
def uniquefyDataFrameColumnNames(df):
"""Supporting function that ensures that all column names in a :class:`pandas.DataFrame` object are unique.
The HDF fixed file format used by Willow does not support duplicate column names, so this function
checks if every column name is unique. If a duplicate column name is found, the column name is
renamed with a unique integer appended to the column name
e.g. (..., Date, Date, ...) becomes (..., Date_1, Date, ...)"""
frequencies = {}
newNames = []
for index, name in enumerate(reversed(df.columns)):
if frequencies.get(name, 0) > 0:
newName = "%s.%d" % (name, frequencies[name])
incrementer = 0
while newName in df.columns:
incrementer += 1
newName = "%s.%d" % (name, frequencies[name] + incrementer)
newNames.append(newName)
else:
newNames.append(name)
frequencies[name] = frequencies.get(name, 0) + 1
df.columns = reversed(newNames)
@celery.task()
def saveToCache(df, sessionID):
"""Supporting function that saves a :class:`pandas.DataFrame` object to the HDF file store.
This function must be called after every Celery operation that modifies the dataset, as the
Willow backend depends on the invariant that the HDF file corresponding to a Willow sessionID
always holds the latest version of the dataset.
Uses :meth:`pandas.DataFrame.to_hdf`.
Returns:
bool: ``True`` on success, ``False`` on failure"""
if isinstance(sessionID, basestring) and len(sessionID) == 30:
try:
uniquefyDataFrameColumnNames(df) # hdf fixed format does not support duplicate column names
path = "flaskApp/cache/" + sessionID + ".h5"
oldDF = loadDataFrameFromCache(sessionID)
if type(oldDF) is pd.DataFrame:
# save one undo
oldDF.to_hdf(path, "undo", mode="w", format="fixed")
df.to_hdf(path, "original", mode="a", format="fixed")
return True
except Exception as e:
print("failed to save hdf ", e)
return False
@celery.task()
def undo(sessionID, requestID):
"""Task invoked asynchronously by :ref:`'undo' WebSocket request <socket-undo>` in Flask application
Uses :func:`loadDataFrameFromCache`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "undo"}
backup = loadDataFrameFromCache(sessionID, "undo")
if type(backup) is pd.DataFrame:
saveToCache(backup, sessionID)
toReturn['changed'] = True
toReturn['success'] = True
else:
toReturn['error'] = "IllegalOperation"
toReturn['errorDescription'] = "The undo operation is currently not available on this dataframe. "
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def renameColumn(sessionID, requestID, column, newName):
"""Task invoked asynchronously by :ref:`'renameColumn' WebSocket request <socket-rename-column>` in Flask application
Uses :func:`dcs.load.renameColumn`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "renameColumn"}
df = loadDataFrameFromCache(sessionID)
try:
dcs.load.renameColumn(df, column, newName)
saveToCache(df, sessionID)
toReturn['changed'] = True
toReturn['success'] = True
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def newCellValue(sessionID, requestID, columnIndex, rowIndex, newValue):
"""Task invoked asynchronously by :ref:`'newCellValue' WebSocket request <socket-rename-column>` in Flask application
Uses :func:`dcs.load.newCellValue`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "newCellValue"}
df = loadDataFrameFromCache(sessionID)
try:
dcs.load.newCellValue(df, columnIndex, rowIndex, newValue)
saveToCache(df, sessionID)
toReturn['changed'] = True
toReturn['success'] = True
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def changeColumnDataType(sessionID, requestID, column, newDataType, dateFormat=None):
"""Task invoked asynchronously by :ref:`'changeColumnDataType' WebSocket request <socket-change-column-data-type>` in Flask application
Uses :func:`dcs.load.changeColumnDataType`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "changeColumnDataType"}
df = loadDataFrameFromCache(sessionID)
try:
dcs.load.changeColumnDataType(df, column, newDataType, dateFormat=dateFormat)
saveToCache(df, sessionID)
toReturn['changed'] = True
toReturn['success'] = True
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def deleteRows(sessionID, requestID, rowIndices):
"""Task invoked asynchronously by :ref:`'deleteRows' WebSocket request <socket-delete-rows>` in Flask application
Uses :func:`dcs.load.removeRows`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "deleteRows"}
df = loadDataFrameFromCache(sessionID)
try:
dcs.load.removeRows(df, rowIndices)
saveToCache(df, sessionID)
toReturn['changed'] = True
toReturn['success'] = True
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def deleteColumns(sessionID, requestID, columnIndices):
"""Task invoked asynchronously by :ref:`'deleteColumns' WebSocket request <socket-delete-columns>` in Flask application
Uses :func:`dcs.load.removeColumns`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "deleteColumns"}
df = loadDataFrameFromCache(sessionID)
try:
dcs.load.removeColumns(df, columnIndices)
saveToCache(df, sessionID)
toReturn['changed'] = True
toReturn['success'] = True
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def emptyStringToNan(sessionID, requestID, columnIndex):
"""Task invoked asynchronously by :ref:`'emptyStringToNan' WebSocket request <socket-empty-string-to-nan>` in Flask application
Uses :func:`dcs.load.emptyStringToNan`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "emptyStringToNan"}
df = loadDataFrameFromCache(sessionID)
try:
dcs.load.emptyStringToNan(df, columnIndex)
saveToCache(df, sessionID)
toReturn['changed'] = True
toReturn['success'] = True
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def fillDown(sessionID, requestID, columnFrom, columnTo, method):
"""Task invoked asynchronously by :ref:`'fillDown' WebSocket request <socket-fill-down>` in Flask application
Uses :func:`dcs.clean.fillDown`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "fillDown"}
df = loadDataFrameFromCache(sessionID)
try:
dcs.clean.fillDown(df, columnFrom, columnTo, method)
saveToCache(df, sessionID)
toReturn['changed'] = list(range(columnFrom, columnTo + 1))
toReturn['success'] = True
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def interpolate(sessionID, requestID, columnIndex, method, order):
"""Task invoked asynchronously by :ref:`'interpolate' WebSocket request <socket-interpolate>` in Flask application
Uses :func:`dcs.clean.fillByInterpolation`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "interpolate"}
df = loadDataFrameFromCache(sessionID)
try:
dcs.clean.fillByInterpolation(df, columnIndex, method, order)
saveToCache(df, sessionID)
toReturn['changed'] = True
toReturn['success'] = True
except MemoryError as e:
toReturn['error'] = "Memory Error"
toReturn['errorDescription'] = traceback.format_exc()
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def fillWithCustomValue(sessionID, requestID, columnIndex, newValue):
"""Task invoked asynchronously by :ref:`'fillWithCustomValue' WebSocket request <socket-fill-with-custom-value>` in Flask application
Uses :func:`dcs.clean.fillWithCustomValue`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "fillWithCustomValue"}
df = loadDataFrameFromCache(sessionID)
try:
dcs.clean.fillWithCustomValue(df, columnIndex, newValue)
saveToCache(df, sessionID)
toReturn['changed'] = True
toReturn['success'] = True
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def fillWithAverage(sessionID, requestID, columnIndex, metric):
"""Task invoked asynchronously by :ref:`'fillWithAverage' WebSocket request <socket-fill-with-average>` in Flask application
Uses :func:`dcs.clean.fillWithAverage`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "fillWithAverage"}
df = loadDataFrameFromCache(sessionID)
try:
dcs.clean.fillWithAverage(df, columnIndex, metric)
saveToCache(df, sessionID)
toReturn['changed'] = True
toReturn['success'] = True
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def normalize(sessionID, requestID, columnIndex, rangeFrom, rangeTo):
"""Task invoked asynchronously by :ref:`'normalize' WebSocket request <socket-normalize>` in Flask application
Uses :func:`dcs.clean.normalize`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "normalize"}
df = loadDataFrameFromCache(sessionID)
try:
dcs.clean.normalize(df, columnIndex, rangeFrom, rangeTo)
saveToCache(df, sessionID)
toReturn['changed'] = True
toReturn['success'] = True
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def standardize(sessionID, requestID, columnIndex):
"""Task invoked asynchronously by :ref:`'standardize' WebSocket request <socket-standardize>` in Flask application
Uses :func:`dcs.clean.standardize`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "standardize"}
df = loadDataFrameFromCache(sessionID)
try:
dcs.clean.standardize(df, columnIndex)
saveToCache(df, sessionID)
toReturn['changed'] = True
toReturn['success'] = True
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
print("standardize done")
except:
pass
@celery.task()
def deleteRowsWithNA(sessionID, requestID, columnIndex):
"""Task invoked asynchronously by :ref:`'deleteRowsWithNA' WebSocket request <socket-delete-rows-with-na>` in Flask application
Uses :func:`dcs.clean.deleteRowsWithNA`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "deleteRowsWithNA"}
df = loadDataFrameFromCache(sessionID)
try:
dcs.clean.deleteRowsWithNA(df, columnIndex)
saveToCache(df, sessionID)
toReturn['changed'] = True
toReturn['success'] = True
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def findReplace(sessionID, requestID, columnIndex, toReplace, replaceWith, matchRegex):
"""Task invoked asynchronously by :ref:`'findReplace' WebSocket request <socket-find-replace>` in Flask application
Uses :func:`dcs.clean.findReplace`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "findReplace"}
df = loadDataFrameFromCache(sessionID)
try:
dcs.clean.findReplace(df, columnIndex, toReplace, replaceWith, matchRegex)
saveToCache(df, sessionID)
toReturn['changed'] = True
toReturn['success'] = True
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def generateDummies(sessionID, requestID, columnIndex, inplace):
"""Task invoked asynchronously by :ref:`'generateDummies' WebSocket request <socket-generate-dummies>` in Flask application
Uses :func:`dcs.clean.generateDummies`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "generateDummies"}
df = loadDataFrameFromCache(sessionID)
try:
dcs.clean.generateDummies(df, columnIndex, inplace)
saveToCache(df, sessionID)
toReturn['changed'] = True
toReturn['success'] = True
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def insertDuplicateColumn(sessionID, requestID, columnIndex):
"""Task invoked asynchronously by :ref:`'insertDuplicateColumn' WebSocket request <socket-insert-duplicate-column>` in Flask application
Uses :func:`dcs.clean.insertDuplicateColumn`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "insertDuplicateColumn"}
df = loadDataFrameFromCache(sessionID)
try:
dcs.clean.insertDuplicateColumn(df, columnIndex)
saveToCache(df, sessionID)
toReturn['changed'] = True
toReturn['success'] = True
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def splitColumn(sessionID, requestID, columnIndex, delimiter, regex):
"""Task invoked asynchronously by :ref:`'splitColumn' WebSocket request <socket-split-column>` in Flask application
Uses :func:`dcs.clean.splitColumn`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "splitColumn"}
df = loadDataFrameFromCache(sessionID)
try:
dcs.clean.splitColumn(df, columnIndex, delimiter, regex)
saveToCache(df, sessionID)
toReturn['changed'] = True
toReturn['success'] = True
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def combineColumns(sessionID, requestID, columnsToCombine, seperator, newName, insertIndex):
"""Task invoked asynchronously by :ref:`'combineColumns' WebSocket request <socket-combine-columns>` in Flask application
Uses :func:`dcs.clean.combineColumns`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "combineColumns"}
df = loadDataFrameFromCache(sessionID)
try:
dcs.clean.combineColumns(df, columnsToCombine, seperator, newName, insertIndex)
saveToCache(df, sessionID)
toReturn['changed'] = True
toReturn['success'] = True
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def discretize(sessionID, requestID, columnIndex, cutMode, numberOfBins):
"""Task invoked asynchronously by :ref:`'discretize' WebSocket request <socket-discretize>` in Flask application
Uses :func:`dcs.clean.discretize`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "discretize"}
df = loadDataFrameFromCache(sessionID)
try:
dcs.clean.discretize(df, columnIndex, cutMode, numberOfBins)
saveToCache(df, sessionID)
toReturn['changed'] = True
toReturn['success'] = True
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def executeCommand(sessionID, requestID, command):
"""Task invoked asynchronously by :ref:`'executeCommand' WebSocket request <socket-execute-command>` in Flask application
Uses :func:`dcs.clean.executeCommand`.
.. danger::
Using this function carries direct risk, as any arbitrary command can be executed
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "executeCommand"}
df = loadDataFrameFromCache(sessionID)
try:
dcs.clean.executeCommand(df, command)
saveToCache(df, sessionID)
toReturn['changed'] = True
toReturn['success'] = True
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def metadata(request):
"""Task invoked asynchronously by :ref:`'metadata' WebSocket request <socket-metadata>` in Flask application
Uses :func:`dcs.load.dataFrameToJSON`, :func:`dcs.load.rowsWithInvalidValuesInColumns`,
:func`dcs.load.outliersTrimmedMeanSd`, :func:`dcs.load.duplicateRowsInColumns` and
:func:`dcs.view.filterWithSearchQuery`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': request["requestID"], 'sessionID': request["sessionID"], 'operation': "metadata"}
# start = datetime.datetime.now()
df = loadDataFrameFromCache(request["sessionID"])
# print("Metadata: Loaded HDF from cache in ", str(datetime.datetime.now() - start))
if df is not None:
if "filterColumnIndices" in request and type(request["filterColumnIndices"]) is list and "filterType" in request:
# filtered metadata
if request["filterType"] == "invalid":
df = dcs.load.rowsWithInvalidValuesInColumns(df, request["filterColumnIndices"])
elif request["filterType"] == "outliers":
df = dcs.load.outliersTrimmedMeanSd(df, request["filterColumnIndices"], request.get("outliersStdDev", 2), request.get("outliersTrimPortion", 0))
elif request["filterType"] == "duplicates":
df = dcs.load.duplicateRowsInColumns(df, request["filterColumnIndices"])
if "searchColumnIndices" in request and type(request["searchColumnIndices"]) is list and "searchQuery" in request:
df = dcs.view.filterWithSearchQuery(df, request["searchColumnIndices"], request["searchQuery"], request["searchIsRegex"] if "searchIsRegex" in request else False)
toReturn['success'] = True
toReturn['undoAvailable'] = undoAvailable(request["sessionID"])
toReturn['dataSize'] = { 'rows': df.shape[0], 'columns': df.shape[1] }
toReturn['columns'] = []
toReturn['columnInfo'] = {}
for index, column in enumerate(df.columns):
toReturn['columns'].append(column)
information = {}
information['index'] = index
if np.issubdtype(df[column].dtype, np.integer):
information['dataType'] = 'int'
elif np.issubdtype(df[column].dtype, np.float):
information['dataType'] = 'float'
elif np.issubdtype(df[column].dtype, np.datetime64):
information['dataType'] = 'datetime'
elif df[column].dtype == np.object:
information['dataType'] = 'string'
else:
information['dataType'] = str(df[column].dtype)
information['invalidValues'] = df[column].isnull().sum()
toReturn['columnInfo'][column] = information
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def data(request):
"""Task invoked asynchronously by :ref:`'data' WebSocket request <socket-data>` in Flask application
Uses :func:`dcs.load.dataFrameToJSON`, :func:`dcs.load.rowsWithInvalidValuesInColumns`,
:func`dcs.load.outliersTrimmedMeanSd`, :func:`dcs.load.duplicateRowsInColumns` and
:func:`dcs.view.filterWithSearchQuery`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': request["requestID"], 'sessionID': request["sessionID"], 'operation': "data"}
df = loadDataFrameFromCache(request["sessionID"])
if df is not None:
try:
if "rowIndexFrom" in request and "rowIndexTo" in request and "columnIndexFrom" in request and "columnIndexTo" in request:
if "filterColumnIndices" in request and type(request["filterColumnIndices"]) is list:
if request["filterType"] == "invalid":
df = dcs.load.rowsWithInvalidValuesInColumns(df, request["filterColumnIndices"])
elif request["filterType"] == "outliers":
df = dcs.load.outliersTrimmedMeanSd(df, request["filterColumnIndices"], request.get("outliersStdDev", 2), request.get("outliersTrimPortion", 0))
elif request["filterType"] == "duplicates":
df = dcs.load.duplicateRowsInColumns(df, request["filterColumnIndices"])
if "searchColumnIndices" in request and type(request["searchColumnIndices"]) is list and "searchQuery" in request:
df = dcs.view.filterWithSearchQuery(df, request["searchColumnIndices"], request["searchQuery"], request.get("searchIsRegex", False))
if "sortColumnIndex" in request and type(request["sortColumnIndex"]) is int and request["sortColumnIndex"] >= 0 and request["sortColumnIndex"] < len(df.columns):
df.sort_values(df.columns[request["sortColumnIndex"]], ascending=request.get("sortAscending", True), inplace=True)
data = dcs.load.dataFrameToJSON(df, request["rowIndexFrom"], request["rowIndexTo"], request["columnIndexFrom"], request["columnIndexTo"])
if data is not None:
toReturn['success'] = True
toReturn['data'] = data
except:
pass
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def analyze(sessionID, requestID, column):
"""Task invoked asynchronously by :ref:`'analyze' WebSocket request <socket-analyze>` in Flask application
Uses :func:`dcs.analyze.analysisForColumn`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': requestID, 'sessionID': sessionID, 'operation': "analyze"}
df = loadDataFrameFromCache(sessionID)
print('requesting analysis for %s' % column)
try:
toReturn['data'] = dcs.analyze.analysisForColumn(df, column)
print('got analysis for %s' % column)
toReturn['success'] = True
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
print(str(e))
print(traceback.format_exc())
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
@celery.task()
def visualize(request):
"""Task invoked asynchronously by :ref:`'visualize' WebSocket request <socket-visualize>` in Flask application
Uses :func:`dcs.view.histogram`, :func:`dcs.view.scatter`, :func:`dcs.view.line`,
:func:`dcs.view.date` and :func:`dcs.view.frequency`.
POSTs result dictionary in JSON format to :ref:`/celeryTaskCompleted <flask-celery-task-completed>`
endpoint in Flask application. """
toReturn = {'success' : False, 'requestID': request["requestID"], 'sessionID': request["sessionID"], 'operation': "visualize"}
df = loadDataFrameFromCache(request["sessionID"])
try:
if request["type"] == "histogram" and "columnIndices" in request:
toReturn.update(dcs.view.histogram(df, request["columnIndices"], request))
toReturn['success'] = True
elif request["type"] == "scatter" and "xColumnIndex" in request and "yColumnIndices" in request:
toReturn.update(dcs.view.scatter(df, request["xColumnIndex"], request["yColumnIndices"], request))
toReturn['success'] = True
elif request["type"] == "line" and "xColumnIndex" in request and "yColumnIndices" in request:
toReturn.update(dcs.view.line(df, request["xColumnIndex"], request["yColumnIndices"], request))
toReturn['success'] = True
elif request["type"] == "date" and "xColumnIndex" in request and "yColumnIndices" in request:
toReturn.update(dcs.view.date(df, request["xColumnIndex"], request["yColumnIndices"], request))
toReturn['success'] = True
elif request["type"] == "frequency" and "columnIndex" in request:
toReturn.update(dcs.view.frequency(df, request["columnIndex"], request))
toReturn['success'] = True
except Exception as e:
toReturn['error'] = str(e)
toReturn['errorDescription'] = traceback.format_exc()
try:
requests.post("http://localhost:5000/celeryTaskCompleted/", json=toReturn, timeout=0.001)
except:
pass
| |
# -*- coding: utf-8 -*-
"""
sync_openstreetmap.py
superlachaise_api
Created by Maxime Le Moine on 26/05/2015.
Copyright (c) 2015 Maxime Le Moine.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http:www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json, math, os, requests, sys, traceback
from decimal import Decimal
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone, translation
from django.utils.translation import ugettext as _
from superlachaise_api.models import *
def print_unicode(str):
print str.encode('utf-8')
def decimal_handler(obj):
return str(obj) if isinstance(obj, Decimal) else obj
def none_to_blank(s):
if s is None:
return u''
return unicode(s)
class Command(BaseCommand):
def request_wikidata_with_wikipedia_links(self, language_code, wikipedia_links):
result = {}
last_continue = {
'continue': '',
}
languages = Language.objects.all().values_list('code', flat=True)
titles = '|'.join(wikipedia_links).encode('utf8')
sites = language_code + 'wiki'
while True:
# Request properties
params = {
'languages': languages,
'action': 'wbgetentities',
'props': 'sitelinks',
'format': 'json',
'sites': sites,
'titles': titles,
}
params.update(last_continue)
if settings.MEDIAWIKI_USER_AGENT:
headers = {"User-Agent" : settings.MEDIAWIKI_USER_AGENT}
else:
raise 'no USER_AGENT defined in settings.py'
json_result = requests.get('https://www.wikidata.org/w/api.php', params=params, headers=headers).json()
if 'entities' in json_result:
result.update(json_result['entities'])
if 'continue' not in json_result: break
last_continue = json_result['continue']
return result
def get_wikipedia(self, entity, language_code):
try:
wikipedia = entity['sitelinks'][language_code + 'wiki']
return wikipedia['title']
except:
return None
def request_overpass(self, bounding_box):
query_string_list = ['[out:json];\n', '(\n']
for synced_tag in self.synced_tags:
query_string_list.append(""\
"\tnode[{tag}]({bounding_box});\n" \
"\tway[{tag}]({bounding_box});\n" \
"\trelation[{tag}]({bounding_box});\n".format(tag=synced_tag, bounding_box=bounding_box)
)
query_string_list.append(');\n(._;>;);out center;')
query_string = "".join(query_string_list)
# Kill any other query
requests.get('http://overpass-api.de/api/kill_my_queries')
result = requests.get('http://overpass-api.de/api/interpreter', data=query_string).json()
return result
def get_wiki_values(self, overpass_element, field_name):
result = []
if 'tags' in overpass_element:
for key, value in {key:value for (key,value) in overpass_element['tags'].iteritems() if field_name in key.split(':')}.iteritems():
wiki_value = []
for key_part in key.split(':'):
if not key_part == field_name:
wiki_value.append(key_part)
value_field = value
if len(value_field.split(':')) == 2:
# fr:foo;bar
wiki_value.append(value.split(':')[0])
value_field = value.split(':')[1]
for sub_value in value_field.split(';'):
sub_list = list(wiki_value)
sub_list.extend(sub_value.split(':'))
result.append(':'.join(sub_list))
return ';'.join(result)
def get_nature(self, overpass_element):
return overpass_element['tags'].get("historic")
def get_values_from_element(self, overpass_element, center):
tags = overpass_element['tags']
result = {
'name': none_to_blank(tags.get("name", None)),
'sorting_name': none_to_blank(tags.get("sorting_name", None)),
'latitude': Decimal(center['lat']).quantize(Decimal('.0000001')),
'longitude': Decimal(center['lon']).quantize(Decimal('.0000001')),
'wikimedia_commons': none_to_blank(tags.get("wikimedia_commons", None)),
}
element_wikipedia = none_to_blank(self.get_wiki_values(overpass_element, 'wikipedia'))
element_wikidata = none_to_blank(self.get_wiki_values(overpass_element, 'wikidata'))
result['nature'] = none_to_blank(self.get_nature(overpass_element))
# Get combined wikidata field
wikidata_combined = []
if element_wikipedia:
for wikipedia in element_wikipedia.split(';'):
if ':' in wikipedia:
language_code = wikipedia.split(':')[-2]
link = wikipedia.split(':')[-1]
if language_code in self.wikidata_codes and link in self.wikidata_codes[language_code]:
wikidata_code = self.wikidata_codes[language_code][link]
wikidata_link = wikipedia.split(language_code + u':' + link)[0] + wikidata_code
if not wikidata_link in wikidata_combined:
wikidata_combined.append(wikidata_link)
else:
self.errors.append(_('Error: The wikipedia page {language_code}:{link} does not exist').format(language_code=language_code, link=link))
if element_wikidata:
for wikidata_link in element_wikidata.split(';'):
if not wikidata_link in wikidata_combined:
wikidata_combined.append(wikidata_link)
wikidata_combined.sort()
result['wikidata'] = ';'.join(wikidata_combined)
if not result['sorting_name']:
result['sorting_name'] = result['name']
return result
def handle_element(self, overpass_element, center):
# Get values
values_dict = self.get_values_from_element(overpass_element, center)
# Get or create object in database
target_object_id_dict = {"type": overpass_element['type'], "openstreetmap_id": overpass_element['id']}
openStreetMap_element, created = OpenStreetMapElement.objects.get_or_create(**target_object_id_dict)
self.fetched_objects_pks.append(openStreetMap_element.pk)
modified = False
if created:
self.created_objects = self.created_objects + 1
else:
# Search for modifications
for field, value in values_dict.iteritems():
if value != getattr(openStreetMap_element, field):
modified = True
self.modified_objects = self.modified_objects + 1
break
if created or modified:
for field, value in values_dict.iteritems():
setattr(openStreetMap_element, field, value)
openStreetMap_element.save()
def element_accepted(self, element):
result = False
# Check if tag is explicitky excluded
for excluded_id in self.exclude_ids:
if excluded_id['type'] == element['type'] and excluded_id['id'] == element['id']:
return False
# Check if tag is to be synced
for synced_tag in self.synced_tags:
tag_splitted = synced_tag.split("=")
if 'tags' in element:
if element['tags'].get(tag_splitted[0]) == tag_splitted[1]:
result = True
break
return result
def sync_openstreetmap(self):
# Download data from OSM
print_unicode(_('Requesting Overpass API...'))
result = self.request_overpass(self.bounding_box)
wikipedia_to_fetch = {}
self.wikidata_codes = {}
for element in result['elements']:
wikipedias = self.get_wiki_values(element, 'wikipedia')
for wikipedia in wikipedias.split(';'):
if ':' in wikipedia:
language_code = wikipedia.split(':')[-2]
link = wikipedia.split(':')[-1]
if not language_code in wikipedia_to_fetch:
wikipedia_to_fetch[language_code] = []
if not link in wikipedia_to_fetch[language_code]:
wikipedia_to_fetch[language_code].append(link)
print_unicode(_('Requesting Wikidata...'))
total = 0
for language, wikipedia_links in wikipedia_to_fetch.iteritems():
total += len(wikipedia_links)
count = 0
max_count_per_request = 25
for language_code, wikipedia_links in wikipedia_to_fetch.iteritems():
self.wikidata_codes[language_code] = {}
wikipedia_links = list(set(wikipedia_links))
for chunk in [wikipedia_links[i:i+max_count_per_request] for i in range(0,len(wikipedia_links),max_count_per_request)]:
print_unicode(str(count) + u'/' + str(total))
count += len(chunk)
entities = self.request_wikidata_with_wikipedia_links(language_code, chunk)
for wikidata_code, entity in entities.iteritems():
wikipedia = self.get_wikipedia(entity, language_code)
self.wikidata_codes[language_code][wikipedia] = wikidata_code
print_unicode(str(count) + u'/' + str(total))
# Handle downloaded elements
self.fetched_objects_pks = []
for element in result['elements']:
if self.element_accepted(element):
if 'center' in element:
self.handle_element(element, element['center'])
else:
self.handle_element(element, element)
# Look for deleted elements
for openStreetMap_element in OpenStreetMapElement.objects.exclude(pk__in=self.fetched_objects_pks):
self.deleted_objects = self.deleted_objects + 1
openStreetMap_element.delete()
def handle(self, *args, **options):
try:
self.synchronization = Synchronization.objects.get(name=os.path.basename(__file__).split('.')[0].split('sync_')[-1])
except:
raise CommandError(sys.exc_info()[1])
error = None
try:
translation.activate(settings.LANGUAGE_CODE)
self.bounding_box = Setting.objects.get(key=u'openstreetmap:bounding_box').value
self.exclude_ids = json.loads(Setting.objects.get(key=u'openstreetmap:exclude_ids').value)
self.synced_tags = json.loads(Setting.objects.get(key=u'openstreetmap:synced_tags').value)
self.created_objects = 0
self.modified_objects = 0
self.deleted_objects = 0
self.errors = []
print_unicode(_('== Start %s ==') % self.synchronization.name)
self.sync_openstreetmap()
print_unicode(_('== End %s ==') % self.synchronization.name)
self.synchronization.created_objects = self.created_objects
self.synchronization.modified_objects = self.modified_objects
self.synchronization.deleted_objects = self.deleted_objects
self.synchronization.errors = ', '.join(self.errors)
translation.deactivate()
except:
print_unicode(traceback.format_exc())
error = sys.exc_info()[1]
self.synchronization.errors = traceback.format_exc()
self.synchronization.last_executed = timezone.now()
self.synchronization.save()
if error:
raise CommandError(error)
| |
from gosubl import about
from gosubl import ev
from gosubl import gs
from gosubl import gsq
from gosubl import sh
import atexit
import base64
import hashlib
import json
import os
import re
import sublime
import subprocess
import threading
import time
import uuid
DOMAIN = 'MarGo'
REQUEST_PREFIX = '%s.rqst.' % DOMAIN
PROC_ATTR_NAME = 'mg9.proc'
TAG = about.VERSION
INSTALL_VERSION = about.VERSION
INSTALL_EXE = about.MARGO_EXE
def gs_init(m={}):
global INSTALL_VERSION
global INSTALL_EXE
atexit.register(killSrv)
version = m.get('version')
if version:
INSTALL_VERSION = version
margo_exe = m.get('margo_exe')
if margo_exe:
INSTALL_EXE = margo_exe
aso_install_vesion = gs.aso().get('install_version', '')
f = lambda: install(aso_install_vesion, False)
gsq.do('GoSublime', f, msg='Installing MarGo', set_status=False)
class Request(object):
def __init__(self, f, method='', token=''):
self.f = f
self.tm = time.time()
self.method = method
if token:
self.token = token
else:
self.token = 'mg9.autoken.%s' % uuid.uuid4()
def header(self):
return {
'method': self.method,
'token': self.token,
}
def _inst_state():
return gs.attr(_inst_name(), '')
def _inst_name():
return 'mg9.install.%s' % INSTALL_VERSION
def _margo_src():
return gs.dist_path('margo9')
def _margo_bin(exe=''):
return gs.home_path('bin', exe or INSTALL_EXE)
def sanity_check_sl(sl):
n = 0
for p in sl:
n = max(n, len(p[0]))
t = '%d' % n
t = '| %'+t+'s: %s'
indent = '| %s> ' % (' ' * n)
a = '~%s' % os.sep
b = os.path.expanduser(a)
return [t % (k, gs.ustr(v).replace(b, a).replace('\n', '\n%s' % indent)) for k,v in sl]
def sanity_check(env={}, error_log=False):
if not env:
env = sh.env()
ns = '(not set)'
sl = [
('install state', _inst_state()),
('sublime.version', sublime.version()),
('sublime.channel', sublime.channel()),
('about.ann', gs.attr('about.ann', '')),
('about.version', gs.attr('about.version', '')),
('version', about.VERSION),
('platform', about.PLATFORM),
('~bin', '%s' % gs.home_dir_path('bin')),
('margo.exe', '%s (%s)' % _tp(_margo_bin())),
('go.exe', '%s (%s)' % _tp(sh.which('go') or 'go')),
('go.version', sh.GO_VERSION),
('GOROOT', '%s' % env.get('GOROOT', ns)),
('GOPATH', '%s' % env.get('GOPATH', ns)),
('GOBIN', '%s (should usually be `%s`)' % (env.get('GOBIN', ns), ns)),
('set.shell', str(gs.lst(gs.setting('shell')))),
('env.shell', env.get('SHELL', '')),
('shell.cmd', str(sh.cmd('${CMD}'))),
]
if error_log:
try:
with open(gs.home_path('log.txt'), 'r') as f:
s = f.read().strip()
sl.append(('error log', s))
except Exception:
pass
return sl
def _sb(s):
bdir = gs.home_dir_path('bin')
if s.startswith(bdir):
s = '~bin%s' % (s[len(bdir):])
return s
def _tp(s):
return (_sb(s), ('ok' if os.path.exists(s) else 'missing'))
def _bins_exist():
return os.path.exists(_margo_bin())
def maybe_install():
if _inst_state() == '' and not _bins_exist():
install('', True)
def install(aso_install_vesion, force_install):
global INSTALL_EXE
if _inst_state() != "":
gs.notify(DOMAIN, 'Installation aborted. Install command already called for GoSublime %s.' % INSTALL_VERSION)
return
INSTALL_EXE = INSTALL_EXE.replace('_%s.exe' % about.DEFAULT_GO_VERSION, '_%s.exe' % sh.GO_VERSION)
about.MARGO_EXE = INSTALL_EXE
is_update = about.VERSION != INSTALL_VERSION
gs.set_attr(_inst_name(), 'busy')
init_start = time.time()
if not is_update and not force_install and _bins_exist() and aso_install_vesion == INSTALL_VERSION:
m_out = 'no'
else:
gs.notify('GoSublime', 'Installing MarGo')
start = time.time()
cmd = sh.Command(['go', 'build', '-v', '-x', '-o', INSTALL_EXE, 'gosubli.me/margo'])
cmd.wd = gs.home_dir_path('bin')
cmd.env = {
'CGO_ENABLED': '0',
'GOBIN': '',
'GOPATH': gs.dist_path(),
}
ev.debug('%s.build' % DOMAIN, {
'cmd': cmd.cmd_lst,
'cwd': cmd.wd,
})
cr = cmd.run()
m_out = 'cmd: `%s`\nstdout: `%s`\nstderr: `%s`\nexception: `%s`' % (
cr.cmd_lst,
cr.out.strip(),
cr.err.strip(),
cr.exc,
)
if cr.ok and _bins_exist():
def f():
gs.aso().set('install_version', INSTALL_VERSION)
gs.save_aso()
sublime.set_timeout(f, 0)
else:
err_prefix = 'MarGo build failed'
gs.error(DOMAIN, '%s\n%s' % (err_prefix, m_out))
sl = [
('GoSublime error', '\n'.join((
err_prefix,
'This is possibly a bug or miss-configuration of your environment.',
'For more help, please file an issue with the following build output',
'at: https://github.com/DisposaBoy/GoSublime/issues/new',
'or alternatively, you may send an email to: gosublime@dby.me',
'\n',
m_out,
)))
]
sl.extend(sanity_check({}, False))
gs.show_output('GoSublime', '\n'.join(sanity_check_sl(sl)))
gs.set_attr(_inst_name(), 'done')
if is_update:
gs.show_output('GoSublime-source', '\n'.join([
'GoSublime source has been updated.',
'New version: `%s`, current version: `%s`' % (INSTALL_VERSION, about.VERSION),
'Please restart Sublime Text to complete the update.',
]))
else:
e = sh.env()
a = [
'GoSublime init %s (%0.3fs)' % (INSTALL_VERSION, time.time() - init_start),
]
sl = [('install margo', m_out)]
sl.extend(sanity_check(e))
a.extend(sanity_check_sl(sl))
gs.println(*a)
missing = [k for k in ('GOROOT', 'GOPATH') if not e.get(k)]
if missing:
missing_message = '\n'.join([
'Missing required environment variables: %s' % ' '.join(missing),
'See the `Quirks` section of USAGE.md for info',
])
cb = lambda ok: gs.show_output(DOMAIN, missing_message, merge_domain=True, print_output=False)
gs.error(DOMAIN, missing_message)
gs.focus(gs.dist_path('USAGE.md'), focus_pat='^Quirks', cb=cb)
killSrv()
start = time.time()
# acall('ping', {}, lambda res, err: gs.println('MarGo Ready %0.3fs' % (time.time() - start)))
report_x = lambda: gs.println("GoSublime: Exception while cleaning up old binaries", gs.traceback())
try:
bin_dirs = [
gs.home_path('bin'),
]
l = []
for d in bin_dirs:
try:
for fn in os.listdir(d):
if fn != INSTALL_EXE and about.MARGO_EXE_PAT.match(fn):
l.append(os.path.join(d, fn))
except Exception:
pass
for fn in l:
try:
gs.println("GoSublime: removing old binary: `%s'" % fn)
os.remove(fn)
except Exception:
report_x()
except Exception:
report_x()
def calltip(fn, src, pos, quiet, f):
tid = ''
if not quiet:
tid = gs.begin(DOMAIN, 'Fetching calltips')
def cb(res, err):
if tid:
gs.end(tid)
res = gs.dval(res.get('Candidates'), [])
f(res, err)
return acall('gocode_calltip', _complete_opts(fn, src, pos, True), cb)
def complete(fn, src, pos):
builtins = (gs.setting('autocomplete_builtins') is True or gs.setting('complete_builtins') is True)
res, err = bcall('gocode_complete', _complete_opts(fn, src, pos, builtins))
res = gs.dval(res.get('Candidates'), [])
return res, err
def _complete_opts(fn, src, pos, builtins):
nv = sh.env()
return {
'Dir': gs.basedir_or_cwd(fn),
'Builtins': builtins,
'Fn': fn or '',
'Src': src or '',
'Pos': pos or 0,
'Home': sh.vdir(),
'Autoinst': gs.setting('autoinst'),
'InstallSuffix': gs.setting('installsuffix', ''),
'Env': {
'GOROOT': nv.get('GOROOT', ''),
'GOPATH': nv.get('GOPATH', ''),
},
}
def fmt(fn, src):
st = gs.settings_dict()
x = st.get('fmt_cmd')
if x:
res, err = bcall('sh', {
'Env': sh.env(),
'Cmd': {
'Name': x[0],
'Args': x[1:],
'Input': src or '',
},
})
return res.get('out', ''), (err or res.get('err', ''))
res, err = bcall('fmt', {
'Fn': fn or '',
'Src': src or '',
'TabIndent': st.get('fmt_tab_indent'),
'TabWidth': st.get('fmt_tab_width'),
})
return res.get('src', ''), err
def import_paths(fn, src, f):
tid = gs.begin(DOMAIN, 'Fetching import paths')
def cb(res, err):
gs.end(tid)
f(res, err)
acall('import_paths', {
'fn': fn or '',
'src': src or '',
'env': sh.env(),
'InstallSuffix': gs.setting('installsuffix', ''),
}, cb)
def pkg_name(fn, src):
res, err = bcall('pkg', {
'fn': fn or '',
'src': src or '',
})
return res.get('name'), err
def pkg_dirs(f):
tid = gs.begin(DOMAIN, 'Fetching pkg dirs')
def cb(res, err):
gs.end(tid)
f(res, err)
acall('pkg_dirs', {
'env': sh.env(),
}, cb)
def a_pkgpaths(exclude, f):
tid = gs.begin(DOMAIN, '')
def cb(res, err):
gs.end(tid)
f(res, err)
m = sh.env()
acall('pkgpaths', {
'env': {
'GOPATH': m.get('GOPATH'),
'GOROOT': m.get('GOROOT'),
'_pathsep': m.get('_pathsep'),
},
'exclude': exclude,
}, cb)
def declarations(fn, src, pkg_dir, f):
tid = gs.begin(DOMAIN, 'Fetching declarations')
def cb(res, err):
gs.end(tid)
f(res, err)
return acall('declarations', {
'fn': fn or '',
'src': src,
'env': sh.env(),
'pkgDir': pkg_dir,
}, cb)
def imports(fn, src, toggle):
return bcall('imports', {
'autoinst': gs.setting('autoinst'),
'env': sh.env(),
'fn': fn or '',
'src': src or '',
'toggle': toggle or [],
'tabIndent': gs.setting('fmt_tab_indent'),
'tabWidth': gs.setting('fmt_tab_width'),
})
def doc(fn, src, offset, f, mode='doc'):
tid = gs.begin(DOMAIN, 'Fetching doc info')
def cb(res, err):
gs.end(tid)
f(res, err)
#default to doc
if mode not in ['usage', 'doc']:
mode = 'doc'
acall(mode, {
'fn': fn or '',
'src': src or '',
'offset': offset or 0,
'env': sh.env(),
'tabIndent': gs.setting('fmt_tab_indent'),
'tabWidth': gs.setting('fmt_tab_width'),
}, cb)
def share(src, f):
warning = 'Are you sure you want to share this file. It will be public on play.golang.org'
if sublime.ok_cancel_dialog(warning):
acall('share', {'Src': src or ''}, f)
else:
f({}, 'Share cancelled')
def acall(method, arg, cb):
gs.mg9_send_q.put((method, arg, cb))
def bcall(method, arg):
if _inst_state() != "done":
return {}, 'Blocking call(%s) aborted: Install is not done' % method
q = gs.queue.Queue()
acall(method, arg, lambda r,e: q.put((r, e)))
try:
res, err = q.get(True, gs.setting('ipc_timeout', 1))
return res, err
except:
return {}, 'Blocking Call(%s): Timeout' % method
def expand_jdata(v):
if gs.is_a(v, {}):
for k in v:
v[k] = expand_jdata(v[k])
elif gs.is_a(v, []):
v = [expand_jdata(e) for e in v]
else:
if gs.PY3K and isinstance(v, bytes):
v = gs.ustr(v)
if gs.is_a_string(v) and v.startswith('base64:'):
try:
v = gs.ustr(base64.b64decode(v[7:]))
except Exception:
v = ''
gs.error_traceback(DOMAIN)
return v
def _recv():
while True:
try:
ln = gs.mg9_recv_q.get()
try:
ln = ln.strip()
if ln:
r, _ = gs.json_decode(ln, {})
token = r.get('token', '')
tag = r.get('tag', '')
k = REQUEST_PREFIX+token
req = gs.attr(k, {})
gs.del_attr(k)
if req and req.f:
if tag != TAG:
gs.notice(DOMAIN, "\n".join([
"GoSublime/MarGo appears to be out-of-sync.",
"Maybe restart Sublime Text.",
"Received tag `%s', expected tag `%s'. " % (tag, TAG),
]))
err = r.get('error', '')
ev.debug(DOMAIN, "margo response: %s" % {
'method': req.method,
'tag': tag,
'token': token,
'dur': '%0.3fs' % (time.time() - req.tm),
'err': err,
'size': '%0.1fK' % (len(ln)/1024.0),
})
dat = expand_jdata(r.get('data', {}))
try:
keep = req.f(dat, err) is True
if keep:
req.tm = time.time()
gs.set_attr(k, req)
except Exception:
gs.error_traceback(DOMAIN)
else:
ev.debug(DOMAIN, 'Ignoring margo: token: %s' % token)
except Exception:
gs.println(gs.traceback())
except Exception:
gs.println(gs.traceback())
break
def _send():
while True:
try:
try:
method, arg, cb = gs.mg9_send_q.get()
proc = gs.attr(PROC_ATTR_NAME)
if not proc or proc.poll() is not None:
killSrv()
if _inst_state() != "busy":
maybe_install()
while _inst_state() == "busy":
time.sleep(0.100)
mg_bin = _margo_bin()
cmd = [
mg_bin,
'-oom', gs.setting('margo_oom', 0),
'-poll', 30,
'-tag', TAG,
]
c = sh.Command(cmd)
c.stderr = gs.LOGFILE
c.env = {
'GOGC': 10,
'XDG_CONFIG_HOME': gs.home_path(),
}
pr = c.proc()
if pr.ok:
proc = pr.p
err = ''
else:
proc = None
err = 'Exception: %s' % pr.exc
if err or not proc or proc.poll() is not None:
killSrv()
_call(cb, {}, 'Abort. Cannot start MarGo: %s' % err)
continue
gs.set_attr(PROC_ATTR_NAME, proc)
gsq.launch(DOMAIN, lambda: _read_stdout(proc))
req = Request(f=cb, method=method)
gs.set_attr(REQUEST_PREFIX+req.token, req)
header, err = gs.json_encode(req.header())
if err:
_cb_err(cb, 'Failed to construct ipc header: %s' % err)
continue
body, err = gs.json_encode(arg)
if err:
_cb_err(cb, 'Failed to construct ipc body: %s' % err)
continue
ev.debug(DOMAIN, 'margo request: %s ' % header)
ln = '%s %s\n' % (header, body)
try:
if gs.PY3K:
proc.stdin.write(bytes(ln, 'UTF-8'))
else:
proc.stdin.write(ln)
except Exception as ex:
_cb_err(cb, 'Cannot talk to MarGo: %s' % err)
killSrv()
gs.println(gs.traceback())
except Exception:
killSrv()
gs.println(gs.traceback())
except Exception:
gs.println(gs.traceback())
break
def _call(cb, res, err):
try:
cb(res, err)
except Exception:
gs.error_traceback(DOMAIN)
def _cb_err(cb, err):
gs.error(DOMAIN, err)
_call(cb, {}, err)
def _read_stdout(proc):
try:
while True:
ln = proc.stdout.readline()
if not ln:
break
gs.mg9_recv_q.put(gs.ustr(ln))
except Exception:
gs.println(gs.traceback())
proc.stdout.close()
proc.wait()
proc = None
def killSrv():
p = gs.del_attr(PROC_ATTR_NAME)
if p:
try:
p.stdout.close()
except Exception:
pass
try:
p.stdin.close()
except Exception:
pass
def on(token, cb):
req = Request(f=cb, token=token)
gs.set_attr(REQUEST_PREFIX+req.token, req)
def _dump(res, err):
gs.println(json.dumps({
'res': res,
'err': err,
}, sort_keys=True, indent=2))
if not gs.checked(DOMAIN, 'launch ipc threads'):
gsq.launch(DOMAIN, _send)
gsq.launch(DOMAIN, _recv)
def on_mg_msg(res, err):
msg = res.get('message', '')
if msg:
print('GoSublime: MarGo: %s' % msg)
gs.notify('MarGo', msg)
return True
on('margo.message', on_mg_msg)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exports a SavedModel from a Trackable Python object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import gc
import os
from tensorflow.core.framework import versions_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.core.protobuf import saved_object_graph_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as defun
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import error_interpolation
from tensorflow.python.framework import errors
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import versions
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.saved_model import builder_impl
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import function_serialization
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.saved_model import revived_types
from tensorflow.python.saved_model import save_context
from tensorflow.python.saved_model import save_options
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import signature_serialization
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import utils_impl
from tensorflow.python.training.saving import checkpoint_options
from tensorflow.python.training.saving import functional_saver
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import graph_view
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util
from tensorflow.python.util import compat
from tensorflow.python.util import object_identity
from tensorflow.python.util.tf_export import tf_export
_UNCOPIABLE_DTYPES = frozenset((dtypes.resource, dtypes.variant))
# A container for an EagerTensor constant which has been copied to the exported
# Graph.
_CapturedConstant = collections.namedtuple("_CapturedConstant",
["eager_tensor", "graph_tensor"])
class _AugmentedGraphView(graph_view.ObjectGraphView):
"""An extendable graph which also tracks functions attached to objects.
Extensions through `add_object` appear in the object graph and any checkpoints
generated from it, even if they are not dependencies of the node they were
attached to in the saving program. For example a `.signatures` attribute is
added to exported SavedModel root objects without modifying the root object
itself.
Also tracks functions attached to objects in the graph, through the caching
`list_functions` method. Enumerating functions only through this method
ensures that we get a consistent view of functions, even if object attributes
create new functions every time they are accessed.
"""
def __init__(self, root):
if (not context.executing_eagerly() and not ops.inside_function()):
saveables_cache = object_identity.ObjectIdentityWeakKeyDictionary()
else:
saveables_cache = None
super(_AugmentedGraphView, self).__init__(root, saveables_cache)
# Object -> (name -> dep)
self._extra_dependencies = object_identity.ObjectIdentityDictionary()
self._functions = object_identity.ObjectIdentityDictionary()
# Cache shared between objects in the same object graph. This is passed to
# each trackable object's `_list_extra_dependencies_for_serialization` and
# `_list_functions_for_serialization` function.
self._serialization_cache = object_identity.ObjectIdentityDictionary()
def add_object(self, parent_node, name_in_parent, subgraph_root):
"""Attach an object to `parent_node`, overriding any existing dependency."""
self._extra_dependencies.setdefault(parent_node,
{})[name_in_parent] = subgraph_root
def list_dependencies(self, obj):
"""Overrides a parent method to include `add_object` objects."""
extra_dependencies = self.list_extra_dependencies(obj)
extra_dependencies.update(self._extra_dependencies.get(obj, {}))
used_names = set()
for name, dep in super(_AugmentedGraphView, self).list_dependencies(obj):
used_names.add(name)
if name in extra_dependencies:
# Extra dependencies (except for `.signatures`, which is always added
# when saving) should not have naming conflicts with dependencies
# defined by the user.
if name != signature_serialization.SIGNATURE_ATTRIBUTE_NAME:
raise ValueError(
"Error when exporting object {} of with identifier={}. The object"
" has an attribute named {}, which is reserved. List of all "
"reserved attributes: {}".format(
obj,
obj._object_identifier, # pylint: disable=protected-access
name,
extra_dependencies.keys()))
yield base.TrackableReference(name, extra_dependencies[name])
else:
yield base.TrackableReference(name, dep)
for name, dep in extra_dependencies.items():
if name in used_names:
continue
yield base.TrackableReference(name, dep)
def list_extra_dependencies(self, obj):
return obj._list_extra_dependencies_for_serialization( # pylint: disable=protected-access
self._serialization_cache)
def list_functions(self, obj, extra_functions=None):
obj_functions = self._functions.get(obj, None)
if obj_functions is None:
obj_functions = obj._list_functions_for_serialization( # pylint: disable=protected-access
self._serialization_cache)
self._functions[obj] = obj_functions
if extra_functions:
obj_functions = obj_functions.copy()
obj_functions.update(extra_functions)
return obj_functions
class _SaveableView(object):
"""Provides a frozen view over a trackable root.
This class helps to create a single stable view over an object to save. The
saving code should access properties and functions via this class and not via
the original object as there are cases where an object construct their
trackable attributes and functions dynamically per call and will yield
different objects if invoked more than once.
Changes to the graph, for example adding objects, must happen in
`checkpoint_view` (an `_AugmentedGraphView`) before the `_SaveableView` is
constructed. Changes after the `_SaveableView` has been constructed will be
ignored.
"""
def __init__(self, checkpoint_view, options, wrapped_functions=None):
"""Initializes a SaveableView.
Args:
checkpoint_view: A GraphView object.
options: A SaveOptions instance.
wrapped_functions: Dictionary that maps concrete functions to functions
that do not capture cached variable values.
"""
self.options = options
self.checkpoint_view = checkpoint_view
trackable_objects, node_ids, slot_variables = (
self.checkpoint_view.objects_ids_and_slot_variables())
self.nodes = trackable_objects
self.node_ids = node_ids
self.captured_tensor_node_ids = object_identity.ObjectIdentityDictionary()
self.slot_variables = slot_variables
self.concrete_functions = []
self.saveable_objects_for_node, all_saveable_functions = (
self._add_saveable_objects())
saveable_object_functions = {
"__SAVEABLE_FUNCTION_{}".format(n): fn
for n, fn in enumerate(all_saveable_functions)}
# Maps functions -> wrapped functions that capture variables
self.wrapped_functions = wrapped_functions or {}
# Maps names of concrete functions in the object to names of wrapped
# functions. When writing the SavedFunction protos, the names of the
# wrapped functions should be used in place of the original functions.
self.function_name_map = {
compat.as_text(original.name): compat.as_text(wrapped.name)
for original, wrapped in self.wrapped_functions.items()}
# Also add `Function`s as nodes.
nodes_without_functions = list(self.nodes)
seen_function_names = set()
for node in nodes_without_functions:
for function in checkpoint_view.list_functions(
node, saveable_object_functions).values():
if function not in self.node_ids:
self.node_ids[function] = len(self.nodes)
self.nodes.append(function)
if isinstance(function, def_function.Function):
# Force listing the concrete functions for the side effects:
# - populate the cache for functions that have an input_signature
# and have not been called.
# - force side effects of creation of concrete functions, e.g. create
# variables on first run.
concrete_functions = (
function._list_all_concrete_functions_for_serialization()) # pylint: disable=protected-access
else:
concrete_functions = [function]
for concrete_function in concrete_functions:
if concrete_function.name not in seen_function_names:
seen_function_names.add(concrete_function.name)
self.concrete_functions.append(concrete_function)
def _add_saveable_objects(self):
"""Retrieves SaveablesObjects and traces their save/restore functions."""
# Maps node -> local name -> (save function, restore function)
saveable_objects_map = object_identity.ObjectIdentityDictionary()
all_saveable_functions = []
for node in self.nodes:
if resource_variable_ops.is_resource_variable(node):
# Resource (and TPU/Mirrored) variables are automatically revived with
# their saveables defined, so there is no need to trace the save
# and restore functions.
continue
saveable_map = saveable_object_util.trace_save_restore_functions(node)
if saveable_map:
saveable_objects_map[node] = saveable_map
for save_fn, restore_fn in saveable_map.values():
all_saveable_functions.append(save_fn)
all_saveable_functions.append(restore_fn)
return saveable_objects_map, all_saveable_functions
@property
def root(self):
return self.nodes[0]
def fill_object_graph_proto(self, proto):
"""Populate the nodes, children and slot_variables of a SavedObjectGraph."""
for node_id, node in enumerate(self.nodes):
assert self.node_ids[node] == node_id
object_proto = proto.nodes.add()
object_proto.slot_variables.extend(self.slot_variables.get(node, ()))
if isinstance(
node,
(def_function.Function, defun.ConcreteFunction, _CapturedConstant)):
continue
for child in self.checkpoint_view.list_dependencies(node):
child_proto = object_proto.children.add()
child_proto.node_id = self.node_ids[child.ref]
child_proto.local_name = child.name
for local_name, ref_function in (
self.checkpoint_view.list_functions(node).items()):
child_proto = object_proto.children.add()
child_proto.node_id = self.node_ids[ref_function]
child_proto.local_name = local_name
if node not in self.saveable_objects_for_node:
continue
for local_name, (save_fn, restore_fn) in (
self.saveable_objects_for_node[node].items()):
saveable_object_proto = object_proto.saveable_objects[local_name]
saveable_object_proto.save_function = self.node_ids[save_fn]
saveable_object_proto.restore_function = self.node_ids[restore_fn]
def map_resources(self):
"""Makes new resource handle ops corresponding to existing resource tensors.
Creates resource handle ops in the current default graph, whereas
`accessible_objects` will be from an eager context. Resource mapping adds
resource handle ops to the main GraphDef of a SavedModel, which allows the
C++ loader API to interact with resources.
Returns:
A tuple of (object_map, resource_map, asset_info):
object_map: A dictionary mapping from object in `accessible_objects` to
replacement objects created to hold the new resource tensors.
resource_map: A dictionary mapping from resource tensors extracted from
`accessible_objects` to newly created resource tensors.
asset_info: An _AssetInfo tuple describing external assets referenced
from accessible_objects.
"""
# Only makes sense when adding to the export Graph
assert not context.executing_eagerly()
# TODO(allenl): Handle MirroredVariables and other types of variables which
# may need special casing.
object_map = object_identity.ObjectIdentityDictionary()
resource_map = {}
asset_info = _AssetInfo(
asset_defs=[],
asset_initializers_by_resource={},
asset_filename_map={},
asset_index={})
for node_id, obj in enumerate(self.nodes):
if isinstance(obj, tracking.Asset):
_process_asset(obj, asset_info, resource_map)
self.captured_tensor_node_ids[obj.asset_path] = node_id
elif isinstance(obj, base.Trackable):
node_object_map, node_resource_map = obj._map_resources(self.options) # pylint: disable=protected-access
for capturable in node_resource_map.keys():
self.captured_tensor_node_ids[capturable] = node_id
object_map.update(node_object_map)
resource_map.update(node_resource_map)
# Note: some concrete functions can have been realized when tracing other
# functions, and might closure-capture tensors from their parent functions.
# This is normal, but it means those concrete functions can't be serialized
# as their own independent endpoints, so we filter them out here.
bad_functions = []
for concrete_function in self.concrete_functions:
if not concrete_function.graph.saveable:
raise ValueError(
("Unable to save function {name} for the following reason(s):\n" +
"\n".join(concrete_function.graph.saving_errors)).format(
name=concrete_function.name))
for capture in concrete_function.captured_inputs:
if (tensor_util.is_tensor(capture) and
capture.dtype not in _UNCOPIABLE_DTYPES and
capture not in self.captured_tensor_node_ids):
if hasattr(capture, "_cached_variable"):
if concrete_function not in self.wrapped_functions:
wrapped = self.wrapped_functions[concrete_function] = (
function_serialization.wrap_cached_variables(
concrete_function))
self.function_name_map[compat.as_text(concrete_function.name)] = (
compat.as_text(wrapped.name))
continue
capture_constant_value = tensor_util.constant_value(capture)
if capture_constant_value is None:
bad_functions.append(concrete_function)
continue
copied_tensor = constant_op.constant(capture_constant_value)
node_id = len(self.nodes)
node = _CapturedConstant(
eager_tensor=capture, graph_tensor=copied_tensor)
self.nodes.append(node)
self.node_ids[capture] = node_id
self.node_ids[node] = node_id
self.captured_tensor_node_ids[capture] = node_id
resource_map[capture] = copied_tensor
self.concrete_functions = [
self.wrapped_functions.get(x, x) for x in self.concrete_functions
if x not in bad_functions
]
return object_map, resource_map, asset_info
def _tensor_dict_to_tensorinfo(tensor_dict):
return {
key: utils_impl.build_tensor_info_internal(value)
for key, value in tensor_dict.items()
}
def _map_captures_to_created_tensors(original_captures, resource_map):
"""Maps eager tensors captured by a function to Graph resources for export.
Args:
original_captures: A dictionary mapping from tensors captured by the
function to interior placeholders for those tensors (inside the function
body).
resource_map: A dictionary mapping from resource tensors owned by the eager
context to resource tensors in the exported graph.
Returns:
A list of stand-in tensors which belong to the exported graph, corresponding
to the function's captures.
Raises:
AssertionError: If the function references a resource which is not part of
`resource_map`.
"""
export_captures = []
for exterior, interior in original_captures:
mapped_resource = resource_map.get(exterior, None)
if mapped_resource is None:
trackable_referrers = []
# Try to figure out where the resource came from by iterating over objects
# which reference it. This is slow and doesn't help us figure out how to
# match it to other objects when loading the SavedModel as a checkpoint,
# so we can't continue saving. But we can at least tell the user what
# needs attaching.
for primary_referrer in gc.get_referrers(exterior):
if isinstance(primary_referrer, base.Trackable):
trackable_referrers.append(primary_referrer)
for secondary_referrer in gc.get_referrers(primary_referrer):
if isinstance(secondary_referrer, base.Trackable):
trackable_referrers.append(secondary_referrer)
raise AssertionError(
("Tried to export a function which references untracked resource {}."
"TensorFlow objects (e.g. tf.Variable) captured by functions must "
"be tracked by assigning them to an attribute of a tracked object "
"or assigned to an attribute of the main object directly.\n\n"
"Trackable Python objects referring to this tensor "
"(from gc.get_referrers, limited to two hops):\n{}"
).format(interior,
"\n".join([repr(obj) for obj in trackable_referrers])))
export_captures.append(mapped_resource)
return export_captures
def _map_function_arguments_to_created_inputs(function_arguments, signature_key,
function_name):
"""Creates exterior placeholders in the exported graph for function arguments.
Functions have two types of inputs: tensors captured from the outside (eager)
context, and arguments to the function which we expect to receive from the
user at each call. `_map_captures_to_created_tensors` replaces
captured tensors with stand-ins (typically these are resource dtype tensors
associated with variables). `_map_function_inputs_to_created_inputs` runs over
every argument, creating a new placeholder for each which will belong to the
exported graph rather than the function body.
Args:
function_arguments: A list of argument placeholders in the function body.
signature_key: The name of the signature being exported, for error messages.
function_name: The name of the function, for error messages.
Returns:
A tuple of (mapped_inputs, exterior_placeholders)
mapped_inputs: A list with entries corresponding to `function_arguments`
containing all of the inputs of the function gathered from the exported
graph (both captured resources and arguments).
exterior_argument_placeholders: A dictionary mapping from argument names
to placeholders in the exported graph, containing the explicit arguments
to the function which a user is expected to provide.
Raises:
ValueError: If argument names are not unique.
"""
# `exterior_argument_placeholders` holds placeholders which are outside the
# function body, directly contained in a MetaGraph of the SavedModel. The
# function body itself contains nearly identical placeholders used when
# running the function, but these exterior placeholders allow Session-based
# APIs to call the function using feeds and fetches which name Tensors in the
# MetaGraph.
exterior_argument_placeholders = {}
mapped_inputs = []
for placeholder in function_arguments:
# `export_captures` contains an exhaustive set of captures, so if we don't
# find the input there then we now know we have an argument.
user_input_name = compat.as_str_any(
placeholder.op.get_attr("_user_specified_name"))
# If the internal placeholders for a function have names which were
# uniquified by TensorFlow, then a single user-specified argument name
# must refer to multiple Tensors. The resulting signatures would be
# confusing to call. Instead, we throw an exception telling the user to
# specify explicit names.
if user_input_name != placeholder.op.name:
# This should be unreachable, since concrete functions may not be
# generated with non-unique argument names.
raise ValueError(
("Got non-flat/non-unique argument names for SavedModel "
"signature '{}': more than one argument to '{}' was named '{}'. "
"Signatures have one Tensor per named input, so to have "
"predictable names Python functions used to generate these "
"signatures should avoid *args and Tensors in nested "
"structures unless unique names are specified for each. Use "
"tf.TensorSpec(..., name=...) to provide a name for a Tensor "
"input.").format(signature_key, compat.as_str_any(function_name),
user_input_name))
arg_placeholder = array_ops.placeholder(
shape=placeholder.shape,
dtype=placeholder.dtype,
name="{}_{}".format(signature_key, user_input_name))
exterior_argument_placeholders[user_input_name] = arg_placeholder
mapped_inputs.append(arg_placeholder)
return mapped_inputs, exterior_argument_placeholders
def _call_function_with_mapped_captures(function, args, resource_map):
"""Calls `function` in the exported graph, using mapped resource captures."""
export_captures = _map_captures_to_created_tensors(function.graph.captures,
resource_map)
# Calls the function quite directly, since we have new captured resource
# tensors we need to feed in which weren't part of the original function
# definition.
# pylint: disable=protected-access
outputs = function._call_flat(args, export_captures)
# pylint: enable=protected-access
return outputs
def _generate_signatures(signature_functions, resource_map):
"""Validates and calls `signature_functions` in the default graph.
Args:
signature_functions: A dictionary mapping string keys to concrete TensorFlow
functions (e.g. from `signature_serialization.canonicalize_signatures`)
which will be used to generate SignatureDefs.
resource_map: A dictionary mapping from resource tensors in the eager
context to resource tensors in the Graph being exported. This dictionary
is used to re-bind resources captured by functions to tensors which will
exist in the SavedModel.
Returns:
Each function in the `signature_functions` dictionary is called with
placeholder Tensors, generating a function call operation and output
Tensors. The placeholder Tensors, the function call operation, and the
output Tensors from the function call are part of the default Graph.
This function then returns a dictionary with the same structure as
`signature_functions`, with the concrete functions replaced by SignatureDefs
implicitly containing information about how to call each function from a
TensorFlow 1.x Session / the C++ Loader API. These SignatureDefs reference
the generated placeholders and Tensor outputs by name.
The caller is expected to include the default Graph set while calling this
function as a MetaGraph in a SavedModel, including the returned
SignatureDefs as part of that MetaGraph.
"""
signatures = {}
for signature_key, function in sorted(signature_functions.items()):
if function.graph.captures:
argument_inputs = function.graph.inputs[:-len(function.graph.captures)]
else:
argument_inputs = function.graph.inputs
mapped_inputs, exterior_argument_placeholders = (
_map_function_arguments_to_created_inputs(argument_inputs,
signature_key, function.name))
outputs = _call_function_with_mapped_captures(
function, mapped_inputs, resource_map)
signatures[signature_key] = signature_def_utils.build_signature_def(
_tensor_dict_to_tensorinfo(exterior_argument_placeholders),
_tensor_dict_to_tensorinfo(outputs),
method_name=signature_constants.PREDICT_METHOD_NAME)
return signatures
def _trace_resource_initializers(accessible_objects):
"""Create concrete functions from `CapturableResource` objects."""
resource_initializers = []
def _wrap_initializer(obj):
obj._initialize() # pylint: disable=protected-access
return constant_op.constant(1.) # Dummy control output
def _wrap_obj_initializer(obj):
return lambda: _wrap_initializer(obj)
for obj in accessible_objects:
if isinstance(obj, tracking.CapturableResource):
resource_initializers.append(
def_function.function(
_wrap_obj_initializer(obj),
# All inputs are captures.
input_signature=[]).get_concrete_function())
return resource_initializers
_AssetInfo = collections.namedtuple(
"_AssetInfo",
[
# List of AssetFileDef protocol buffers
"asset_defs",
# Map from asset variable resource Tensors to their init ops
"asset_initializers_by_resource",
# Map from base asset filenames to full paths
"asset_filename_map",
# Map from Asset to index of corresponding AssetFileDef
"asset_index"
])
def _process_asset(trackable_asset, asset_info, resource_map):
"""Add `trackable_asset` to `asset_info` and `resource_map`."""
original_path_tensor = trackable_asset.asset_path
original_path = tensor_util.constant_value(original_path_tensor)
try:
original_path = str(original_path.astype(str))
except AttributeError:
# Already a string rather than a numpy array
pass
path = builder_impl.get_asset_filename_to_add(
asset_filepath=original_path,
asset_filename_map=asset_info.asset_filename_map)
# TODO(andresp): Instead of mapping 1-1 between trackable asset
# and asset in the graph def consider deduping the assets that
# point to the same file.
asset_path_initializer = array_ops.placeholder(
shape=original_path_tensor.shape,
dtype=dtypes.string,
name="asset_path_initializer")
asset_variable = resource_variable_ops.ResourceVariable(
asset_path_initializer)
asset_info.asset_filename_map[path] = original_path
asset_def = meta_graph_pb2.AssetFileDef()
asset_def.filename = path
asset_def.tensor_info.name = asset_path_initializer.name
asset_info.asset_defs.append(asset_def)
asset_info.asset_initializers_by_resource[original_path_tensor] = (
asset_variable.initializer)
asset_info.asset_index[trackable_asset] = len(asset_info.asset_defs) - 1
resource_map[original_path_tensor] = asset_variable
def _fill_meta_graph_def(meta_graph_def, saveable_view, signature_functions,
namespace_whitelist):
"""Generates a MetaGraph which calls `signature_functions`.
Args:
meta_graph_def: The MetaGraphDef proto to fill.
saveable_view: The _SaveableView being exported.
signature_functions: A dictionary mapping signature keys to concrete
functions containing signatures to add to the MetaGraph.
namespace_whitelist: List of strings containing whitelisted op namespaces.
Returns:
A tuple of (_AssetInfo, Graph) containing the captured assets and
exported Graph generated from tracing the saveable_view.
"""
# List objects from the eager context to make sure Optimizers give us the
# right Graph-dependent variables.
accessible_objects = saveable_view.nodes
resource_initializer_functions = _trace_resource_initializers(
accessible_objects)
exported_graph = ops.Graph()
resource_initializer_ops = []
with exported_graph.as_default():
object_map, resource_map, asset_info = saveable_view.map_resources()
for resource_initializer_function in resource_initializer_functions:
asset_dependencies = []
for capture in resource_initializer_function.graph.external_captures:
asset_initializer = asset_info.asset_initializers_by_resource.get(
capture, None)
if asset_initializer is not None:
asset_dependencies.append(asset_initializer)
with ops.control_dependencies(asset_dependencies):
resource_initializer_ops.append(
_call_function_with_mapped_captures(resource_initializer_function,
[], resource_map))
resource_initializer_ops.extend(
asset_info.asset_initializers_by_resource.values())
with ops.control_dependencies(resource_initializer_ops):
init_op = control_flow_ops.no_op()
# Add the same op to the main_op collection and to the init_op
# signature. The collection is for compatibility with older loader APIs;
# only one will be executed.
meta_graph_def.collection_def[constants.MAIN_OP_KEY].node_list.value.append(
init_op.name)
meta_graph_def.signature_def[constants.INIT_OP_SIGNATURE_KEY].CopyFrom(
signature_def_utils.op_signature_def(init_op,
constants.INIT_OP_SIGNATURE_KEY))
# Saving an object-based checkpoint again gathers variables. We need to do the
# gathering from the eager context so Optimizers save the right set of
# variables, but want any operations associated with the save/restore to be in
# the exported graph (thus the `to_graph` argument).
saver = functional_saver.MultiDeviceSaver(
saveable_view.checkpoint_view.frozen_saveable_objects(
object_map=object_map, to_graph=exported_graph,
call_with_mapped_captures=functools.partial(
_call_function_with_mapped_captures, resource_map=resource_map)))
with exported_graph.as_default():
signatures = _generate_signatures(signature_functions, resource_map)
for concrete_function in saveable_view.concrete_functions:
concrete_function.add_to_graph()
saver_def = saver.to_proto()
meta_graph_def.saver_def.CopyFrom(saver_def)
graph_def = exported_graph.as_graph_def(add_shapes=True)
_verify_ops(graph_def, namespace_whitelist)
meta_graph_def.graph_def.CopyFrom(graph_def)
meta_graph_def.meta_info_def.tags.append(tag_constants.SERVING)
meta_graph_def.meta_info_def.tensorflow_version = versions.__version__
meta_graph_def.meta_info_def.tensorflow_git_version = (
versions.__git_version__)
# We currently always strip default attributes.
meta_graph_def.meta_info_def.stripped_default_attrs = True
meta_graph_def.meta_info_def.stripped_op_list.MergeFrom(
meta_graph.stripped_op_list_for_graph(meta_graph_def.graph_def))
meta_graph_def.asset_file_def.extend(asset_info.asset_defs)
for signature_key, signature in signatures.items():
meta_graph_def.signature_def[signature_key].CopyFrom(signature)
meta_graph.strip_graph_default_valued_attrs(meta_graph_def)
return asset_info, exported_graph
def _verify_ops(graph_def, namespace_whitelist):
"""Verifies that all namespaced ops in the graph are whitelisted."""
invalid_ops = []
invalid_namespaces = set()
all_operations = []
all_operations.extend(meta_graph.ops_used_by_graph_def(graph_def))
for op in all_operations:
if ">" in op:
namespace = op.split(">")[0]
if namespace not in namespace_whitelist:
invalid_ops.append(op)
invalid_namespaces.add(namespace)
if invalid_ops:
raise ValueError(
"Attempted to save ops from non-whitelisted namespaces to SavedModel: "
"{}.\nPlease verify that these ops should be saved, since they must be "
"available when loading the SavedModel. If loading from Python, you "
"must import the library defining these ops. From C++, link the custom "
"ops to the serving binary. Once you've confirmed this, please add the "
"following namespaces to the `namespace_whitelist` argument in "
"tf.saved_model.SaveOptions: {}.".format(invalid_ops,
invalid_namespaces))
def _serialize_object_graph(saveable_view, asset_file_def_index):
"""Save a SavedObjectGraph proto for `root`."""
# SavedObjectGraph is similar to the TrackableObjectGraph proto in the
# checkpoint. It will eventually go into the SavedModel.
proto = saved_object_graph_pb2.SavedObjectGraph()
saveable_view.fill_object_graph_proto(proto)
coder = nested_structure_coder.StructureCoder()
for concrete_function in saveable_view.concrete_functions:
name = compat.as_text(concrete_function.name)
name = saveable_view.function_name_map.get(name, name)
serialized = function_serialization.serialize_concrete_function(
concrete_function, saveable_view.captured_tensor_node_ids, coder)
if serialized is not None:
proto.concrete_functions[name].CopyFrom(serialized)
for obj, obj_proto in zip(saveable_view.nodes, proto.nodes):
_write_object_proto(obj, obj_proto, asset_file_def_index,
saveable_view.function_name_map)
return proto
def _write_object_proto(obj, proto, asset_file_def_index, function_name_map):
"""Saves an object into SavedObject proto."""
if isinstance(obj, tracking.Asset):
proto.asset.SetInParent()
proto.asset.asset_file_def_index = asset_file_def_index[obj]
elif resource_variable_ops.is_resource_variable(obj):
proto.variable.SetInParent()
if not obj.name.endswith(":0"):
raise ValueError("Cowardly refusing to save variable %s because of"
" unexpected suffix which won't be restored.")
proto.variable.name = meta_graph._op_name(obj.name) # pylint: disable=protected-access
proto.variable.trainable = obj.trainable
proto.variable.dtype = obj.dtype.as_datatype_enum
proto.variable.synchronization = obj.synchronization.value
proto.variable.aggregation = obj.aggregation.value
proto.variable.shape.CopyFrom(obj.shape.as_proto())
options = save_context.get_save_options()
if options.experimental_variable_policy._save_variable_devices( # pylint: disable=protected-access
):
if hasattr(obj, "device"):
proto.variable.device = obj.device
elif isinstance(obj, def_function.Function):
proto.function.CopyFrom(function_serialization.serialize_function(
obj, function_name_map))
elif isinstance(obj, defun.ConcreteFunction):
proto.bare_concrete_function.CopyFrom(
function_serialization.serialize_bare_concrete_function(
obj, function_name_map))
elif isinstance(obj, _CapturedConstant):
proto.constant.operation = obj.graph_tensor.op.name
elif isinstance(obj, tracking.CapturableResource):
proto.resource.device = obj._resource_device # pylint: disable=protected-access
else:
registered_type_proto = revived_types.serialize(obj)
if registered_type_proto is None:
# Fallback for types with no matching registration
# pylint:disable=protected-access
registered_type_proto = saved_object_graph_pb2.SavedUserObject(
identifier=obj._object_identifier,
version=versions_pb2.VersionDef(
producer=1, min_consumer=1, bad_consumers=[]),
metadata=obj._tracking_metadata)
# pylint:enable=protected-access
proto.user_object.CopyFrom(registered_type_proto)
# Give the object a chance to modify the SavedObject proto.
# This is currently used by MirroredVariables to optionally write their
# component variables to the proto.
#
# This is not yet an official Trackable method, the only current use case
# being MirroredVariables. See the method implementation there for more
# documentation.
if hasattr(obj, "_write_object_proto"):
obj._write_object_proto(proto, options) # pylint: disable=protected-access
def _export_debug_info(exported_graph):
"""Exports debug information from a graph.
Args:
exported_graph: A Graph that has been created by tracing a saveable view.
Returns:
Corresponding GraphDebugInfo with traces for ops in all functions of the
exported_graph.
"""
exported_operations = []
for fn_name in exported_graph._functions: # pylint: disable=protected-access
fn = exported_graph._get_function(fn_name) # pylint: disable=protected-access
if not isinstance(fn, defun._EagerDefinedFunction): # pylint: disable=protected-access
continue
fn_graph = fn.graph
for fn_op in fn_graph.get_operations():
exported_operations.append((fn_name, fn_op))
return error_interpolation.create_graph_debug_info_def(exported_operations)
@tf_export(
"saved_model.save",
v1=["saved_model.save", "saved_model.experimental.save"])
def save(obj, export_dir, signatures=None, options=None):
# pylint: disable=line-too-long
"""Exports the Trackable object `obj` to [SavedModel format](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md).
Example usage:
```python
class Adder(tf.Module):
@tf.function(input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])
def add(self, x):
return x + x + 1.
to_export = Adder()
tf.saved_model.save(to_export, '/tmp/adder')
```
The resulting SavedModel is then servable with an input named "x", its value
having any shape and dtype float32.
The optional `signatures` argument controls which methods in `obj` will be
available to programs which consume `SavedModel`s, for example, serving
APIs. Python functions may be decorated with
`@tf.function(input_signature=...)` and passed as signatures directly, or
lazily with a call to `get_concrete_function` on the method decorated with
`@tf.function`.
If the `signatures` argument is omitted, `obj` will be searched for
`@tf.function`-decorated methods. If exactly one `@tf.function` is found, that
method will be used as the default signature for the SavedModel. This behavior
is expected to change in the future, when a corresponding
`tf.saved_model.load` symbol is added. At that point signatures will be
completely optional, and any `@tf.function` attached to `obj` or its
dependencies will be exported for use with `load`.
When invoking a signature in an exported SavedModel, `Tensor` arguments are
identified by name. These names will come from the Python function's argument
names by default. They may be overridden by specifying a `name=...` argument
in the corresponding `tf.TensorSpec` object. Explicit naming is required if
multiple `Tensor`s are passed through a single argument to the Python
function.
The outputs of functions used as `signatures` must either be flat lists, in
which case outputs will be numbered, or a dictionary mapping string keys to
`Tensor`, in which case the keys will be used to name outputs.
Signatures are available in objects returned by `tf.saved_model.load` as a
`.signatures` attribute. This is a reserved attribute: `tf.saved_model.save`
on an object with a custom `.signatures` attribute will raise an exception.
Since `tf.keras.Model` objects are also Trackable, this function can be
used to export Keras models. For example, exporting with a signature
specified:
```python
class Model(tf.keras.Model):
@tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string)])
def serve(self, serialized):
...
m = Model()
tf.saved_model.save(m, '/tmp/saved_model/')
```
Exporting from a function without a fixed signature:
```python
class Model(tf.keras.Model):
@tf.function
def call(self, x):
...
m = Model()
tf.saved_model.save(
m, '/tmp/saved_model/',
signatures=m.call.get_concrete_function(
tf.TensorSpec(shape=[None, 3], dtype=tf.float32, name="inp")))
```
`tf.keras.Model` instances constructed from inputs and outputs already have a
signature and so do not require a `@tf.function` decorator or a `signatures`
argument. If neither are specified, the model's forward pass is exported.
```python
x = input_layer.Input((4,), name="x")
y = core.Dense(5, name="out")(x)
model = training.Model(x, y)
tf.saved_model.save(model, '/tmp/saved_model/')
# The exported SavedModel takes "x" with shape [None, 4] and returns "out"
# with shape [None, 5]
```
Variables must be tracked by assigning them to an attribute of a tracked
object or to an attribute of `obj` directly. TensorFlow objects (e.g. layers
from `tf.keras.layers`, optimizers from `tf.train`) track their variables
automatically. This is the same tracking scheme that `tf.train.Checkpoint`
uses, and an exported `Checkpoint` object may be restored as a training
checkpoint by pointing `tf.train.Checkpoint.restore` to the SavedModel's
"variables/" subdirectory. Currently, variables are the only stateful objects
supported by `tf.saved_model.save`, but others (e.g. tables) will be supported
in the future.
`tf.function` does not hard-code device annotations from outside the function
body, instead of using the calling context's device. This means for example
that exporting a model that runs on a GPU and serving it on a CPU will
generally work, with some exceptions. `tf.device` annotations inside the body
of the function will be hard-coded in the exported model; this type of
annotation is discouraged. Device-specific operations, e.g. with "cuDNN" in
the name or with device-specific layouts, may cause issues. Currently a
`DistributionStrategy` is another exception: active distribution strategies
will cause device placements to be hard-coded in a function. Exporting a
single-device computation and importing under a `DistributionStrategy` is
not currently supported, but may be in the future.
SavedModels exported with `tf.saved_model.save` [strip default-valued
attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes)
automatically, which removes one source of incompatibilities when the consumer
of a SavedModel is running an older TensorFlow version than the
producer. There are however other sources of incompatibilities which are not
handled automatically, such as when the exported model contains operations
which the consumer does not have definitions for.
A single tf.function can generate many ConcreteFunctions. If a downstream tool
wants to refer to all concrete functions generated by a single tf.function you
can use the `function_aliases` argument to store a map from the alias name to
all concrete function names.
E.g.
```python
class MyModel:
@tf.function
def func():
...
@tf.function
def serve():
...
func()
model = MyModel()
signatures = {
'serving_default': model.serve.get_concrete_function(),
}
options = tf.saved_model.SaveOptions(function_aliases={
'my_func': func,
})
tf.saved_model.save(model, export_dir, signatures, options)
```
Args:
obj: A trackable object to export.
export_dir: A directory in which to write the SavedModel.
signatures: Optional, one of three types:
* a `tf.function` with an input signature specified, which will use the
default serving signature key,
* the result of `f.get_concrete_function` on a `@tf.function`-decorated
function `f`, in which case `f` will be used to generate a signature for
the SavedModel under the default serving signature key,
* a dictionary, which maps signature keys to either `tf.function`
instances with input signatures or concrete functions. Keys of such a
dictionary may be arbitrary strings, but will typically be from the
`tf.saved_model.signature_constants` module.
options: Optional, `tf.saved_model.SaveOptions` object that specifies
options for saving.
Raises:
ValueError: If `obj` is not trackable.
@compatibility(eager)
Not well supported when graph building. From TensorFlow 1.x,
`tf.compat.v1.enable_eager_execution()` should run first. Calling
tf.saved_model.save in a loop when graph building from TensorFlow 1.x will
add new save operations to the default graph each iteration.
May not be called from within a function body.
@end_compatibility
"""
options = options or save_options.SaveOptions()
# TODO(allenl): Factor out some subset of SavedModelBuilder which is 2.x
# compatible (no sessions) and share it with this export API rather than
# making a SavedModel proto and writing it directly.
saved_model = saved_model_pb2.SavedModel()
meta_graph_def = saved_model.meta_graphs.add()
_, exported_graph, object_saver, asset_info = _build_meta_graph(
obj, export_dir, signatures, options, meta_graph_def)
saved_model.saved_model_schema_version = constants.SAVED_MODEL_SCHEMA_VERSION
# Write the checkpoint, copy assets into the assets directory, and write out
# the SavedModel proto itself.
utils_impl.get_or_create_variables_dir(export_dir)
ckpt_options = checkpoint_options.CheckpointOptions(
experimental_io_device=options.experimental_io_device)
object_saver.save(
utils_impl.get_variables_path(export_dir), options=ckpt_options)
builder_impl.copy_assets_to_destination_dir(asset_info.asset_filename_map,
export_dir)
# Note that this needs to be the last file operation when saving the
# SavedModel. Users rely on checking saved_model_dir/saved_model.pb as an
# indication that the SavedModel is completely written.
if context.executing_eagerly():
try:
context.async_wait() # Ensure save operations have completed.
except errors.NotFoundError as err:
raise FileNotFoundError(
str(err) + "\n If trying to save on a different device from the "
"computational device, consider using setting the "
"`experimental_io_device` option on tf.saved_model.SaveOptions "
"to the io_device such as '/job:localhost'."
)
path = os.path.join(
compat.as_str(export_dir),
compat.as_str(constants.SAVED_MODEL_FILENAME_PB))
file_io.atomic_write_string_to_file(
path, saved_model.SerializeToString(deterministic=True))
# Clean reference cycles so repeated export()s don't make work for the garbage
# collector. Before this point, we need to keep references to captured
# constants in the saved graph.
ops.dismantle_graph(exported_graph)
def export_meta_graph(obj, filename, signatures=None, options=None):
"""Exports the MetaGraph proto to a file.
This function goes through the same procedures saved_model.save goes to
produce the given object's MetaGraph, then saves it to the given file. It
skips saving checkpoint information, and is useful when all one wants is the
graph defining the model.
Args:
obj: A trackable object to build the MetaGraph from.
filename: The file into which to write the MetaGraph.
signatures: Optional, either a `tf.function` with an input signature
specified or the result of `f.get_concrete_function` on a
`@tf.function`-decorated function `f`, in which case `f` will be used to
generate a signature for the SavedModel under the default serving
signature key. `signatures` may also be a dictionary, in which case it
maps from signature keys to either `tf.function` instances with input
signatures or concrete functions. The keys of such a dictionary may be
arbitrary strings, but will typically be from the
`tf.saved_model.signature_constants` module.
options: Optional, `tf.saved_model.SaveOptions` object that specifies
options for saving.
"""
options = options or save_options.SaveOptions()
export_dir = os.path.dirname(filename)
meta_graph_def, exported_graph, _, _ = _build_meta_graph(
obj, export_dir, signatures, options)
file_io.atomic_write_string_to_file(
filename, meta_graph_def.SerializeToString(deterministic=True))
# Clean reference cycles so repeated export()s don't make work for the garbage
# collector. Before this point, we need to keep references to captured
# constants in the saved graph.
ops.dismantle_graph(exported_graph)
def _build_meta_graph_impl(obj,
export_dir,
signatures,
options,
meta_graph_def=None):
"""Creates a MetaGraph containing the resources and functions of an object."""
if ops.inside_function():
raise AssertionError(
"tf.saved_model.save is not supported inside a traced "
"@tf.function. Move the call to the outer eagerly-executed "
"context.")
# pylint: enable=line-too-long
if not isinstance(obj, base.Trackable):
raise ValueError(
"Expected a Trackable object for export, got {}.".format(obj))
meta_graph_def = meta_graph_def or meta_graph_pb2.MetaGraphDef()
checkpoint_graph_view = _AugmentedGraphView(obj)
if signatures is None:
signatures = signature_serialization.find_function_to_export(
checkpoint_graph_view)
signatures, wrapped_functions = (
signature_serialization.canonicalize_signatures(signatures))
signature_serialization.validate_saveable_view(checkpoint_graph_view)
signature_map = signature_serialization.create_signature_map(signatures)
checkpoint_graph_view.add_object(
parent_node=checkpoint_graph_view.root,
name_in_parent=signature_serialization.SIGNATURE_ATTRIBUTE_NAME,
subgraph_root=signature_map)
# Use _SaveableView to provide a frozen listing of properties and functions.
# Note we run this twice since, while constructing the view the first time
# there can be side effects of creating variables.
_ = _SaveableView(checkpoint_graph_view, options)
saveable_view = _SaveableView(checkpoint_graph_view, options,
wrapped_functions)
object_saver = util.TrackableSaver(checkpoint_graph_view)
asset_info, exported_graph = _fill_meta_graph_def(meta_graph_def,
saveable_view, signatures,
options.namespace_whitelist)
if options.function_aliases:
function_aliases = meta_graph_def.meta_info_def.function_aliases
for alias, func in options.function_aliases.items():
for fdef in func._stateful_fn._function_cache.all_values(): # pylint: disable=protected-access
function_aliases[fdef.name] = alias
for fdef in func._stateless_fn._function_cache.all_values(): # pylint: disable=protected-access
function_aliases[fdef.name] = alias
object_graph_proto = _serialize_object_graph(saveable_view,
asset_info.asset_index)
meta_graph_def.object_graph_def.CopyFrom(object_graph_proto)
# Save debug info, if requested.
if options.save_debug_info:
graph_debug_info = _export_debug_info(exported_graph)
file_io.atomic_write_string_to_file(
os.path.join(
utils_impl.get_or_create_debug_dir(export_dir),
constants.DEBUG_INFO_FILENAME_PB),
graph_debug_info.SerializeToString(deterministic=True))
return meta_graph_def, exported_graph, object_saver, asset_info
def _build_meta_graph(obj,
export_dir,
signatures,
options,
meta_graph_def=None):
"""Creates a MetaGraph under a SaveContext."""
with save_context.save_context(options):
return _build_meta_graph_impl(obj, export_dir, signatures, options,
meta_graph_def)
| |
# :[diStorm64}: Python binding
# Copyright (c) 2009, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
info = (
":[diStorm64}: by Gil Dabah, http://ragestorm.net/distorm/\n"
"Python binding by Mario Vilas, http://breakingcode.wordpress.com/\n"
)
__revision__ = "$Id: __init__.py 376 2009-08-24 16:42:29Z QvasiModo $"
__all__ = [
'Decode',
'DecodeGenerator',
'Decode16Bits',
'Decode32Bits',
'Decode64Bits',
]
from ctypes import *
from exceptions import *
from os.path import split, join
#==============================================================================
# Load the diStorm library
SUPPORT_64BIT_OFFSET = True
_OffsetType = c_ulonglong
try:
_distorm_path = split(__file__)[0]
_distorm_file = join(_distorm_path, 'distorm64.dll')
_distorm = cdll.LoadLibrary(_distorm_file)
except OSError:
raise ImportError, "Error loading diStorm: dynamic link library not found"
try:
distorm_decode = _distorm.distorm_decode64
except AttributeError:
raise ImportError, "Error loading diStorm: exported function not found"
#==============================================================================
# diStorm C interface
MAX_TEXT_SIZE = 60
MAX_INSTRUCTIONS = 1000
DECRES_NONE = 0
DECRES_SUCCESS = 1
DECRES_MEMORYERR = 2
DECRES_INPUTERR = 3
_DecodeType = c_uint
_DecodeResult = c_uint
class _WString (Structure):
_fields_ = [
('length', c_uint), # unused
('p', c_char * MAX_TEXT_SIZE),
]
class _DecodedInst (Structure):
_fields_ = [
('mnemonic', _WString),
('operands', _WString),
('instructionHex', _WString),
('size', c_uint),
('offset', _OffsetType),
]
distorm_decode.restype = _DecodeResult
distorm_decode.argtypes = [
_OffsetType, # codeOffset
c_void_p, # code
c_int, # codeLen
_DecodeType, # dt
POINTER(_DecodedInst), # result
c_uint, # maxInstructions
POINTER(c_uint) # usedInstructionsCount
]
#==============================================================================
# diStorm Python interface
Decode16Bits = 0 # 80286 decoding
Decode32Bits = 1 # IA-32 decoding
Decode64Bits = 2 # AMD64 decoding
OffsetTypeSize = sizeof(_OffsetType) * 8 # XXX why 8 ???
def DecodeGenerator(codeOffset, code, dt = Decode32Bits):
"""
@type codeOffset: long
@param codeOffset: Memory address where the code is located.
This is B{not} an offset into the code!
It's the actual memory address where it was read from.
@type code: str
@param code: Code to disassemble.
@type dt: int
@param dt: Disassembly type. Can be one of the following:
* L{Decode16Bits}: 80286 decoding
* L{Decode32Bits}: IA-32 decoding
* L{Decode64Bits}: AMD64 decoding
@rtype: generator of tuple( long, int, str, str )
@return: Generator of tuples. Each tuple represents an assembly instruction
and contains:
- Memory address of instruction.
- Size of instruction in bytes.
- Disassembly line of instruction.
- Hexadecimal dump of instruction.
@raise ValueError: Invalid arguments.
"""
# Sanitize the code parameter.
code = str(code)
# Stop the iteration if there's no code to disassemble.
if code == '':
return
# Sanitize the codeOffset parameter.
if not codeOffset:
codeOffset = 0
# Check the validity of the decode type.
if dt not in (Decode16Bits, Decode32Bits, Decode64Bits):
raise ValueError, "Invalid decode type value: %r" % (dt,)
# Prepare input buffer.
codeLen = len(code) # total bytes to disassemble
code = create_string_buffer(code) # allocate code buffer
p_code = addressof(code) # pointer to code buffer
# Prepare output buffer.
l_result = MAX_INSTRUCTIONS # length of output array
result = (_DecodedInst * l_result)() # allocate output array
p_result = pointer(result) # pointer to output array
p_result = cast(p_result, POINTER(_DecodedInst))
# Prepare used instructions counter.
usedInstructionsCount = c_uint(0)
p_usedInstructionsCount = byref(usedInstructionsCount)
# Loop while we have code left to disassemble.
while codeLen > 0:
# Call the decode function.
status = distorm_decode(codeOffset, p_code, min(codeLen, l_result), dt,
p_result, l_result, p_usedInstructionsCount)
if status == DECRES_INPUTERR:
raise ValueError, "Invalid arguments passed to distorm_decode()"
if status == DECRES_MEMORYERR:
raise MemoryError, "Not enough memory to disassemble"
used = usedInstructionsCount.value
if not used:
break
## raise AssertionError, "Internal error while disassembling"
# Yield each decoded instruction but the last one.
for index in xrange(used - 1):
di = result[index]
asm = '%s %s' % (di.mnemonic.p, di.operands.p)
pydi = ( di.offset, di.size, asm, di.instructionHex.p )
yield pydi
# Continue decoding from the last instruction found.
# This prevents truncating the last instruction.
# If there are no more instructions to decode, yield
# the last one and stop the iteration.
di = result[used - 1]
delta = di.offset - codeOffset
if delta <= 0:
asm = '%s %s' % (di.mnemonic.p, di.operands.p)
pydi = ( di.offset, di.size, asm, di.instructionHex.p )
yield pydi
break
codeOffset = codeOffset + delta
p_code = p_code + delta
codeLen = codeLen - delta
# Reset the used instructions counter.
usedInstructionsCount.value = 0
def Decode(offset, code, type = Decode32Bits):
"""
@type offset: long
@param offset: Memory address where the code is located.
This is B{not} an offset into the code!
It's the actual memory address where it was read from.
@type code: str
@param code: Code to disassemble.
@type type: int
@param type: Disassembly type. Can be one of the following:
* L{Decode16Bits}: 80286 decoding
* L{Decode32Bits}: IA-32 decoding
* L{Decode64Bits}: AMD64 decoding
@rtype: list of tuple( long, int, str, str )
@return: List of tuples. Each tuple represents an assembly instruction
and contains:
- Memory address of instruction.
- Size of instruction in bytes.
- Disassembly line of instruction.
- Hexadecimal dump of instruction.
@raise ValueError: Invalid arguments.
"""
return list( DecodeGenerator(offset, code, type) )
| |
"""
Deuce Valere - Tests - Client - Valere - Validate Storage
"""
import functools
import json
import mock
from deuceclient.tests import *
import httpretty
from deucevalere.tests import *
from deucevalere.tests.client_base import TestValereClientBase
from deucevalere.tests.client_base import calculate_ref_modified
@httpretty.activate
class TestValereClientValidateStorage(TestValereClientBase):
def setUp(self):
super().setUp()
self.project_id = create_project_name()
self.vault_id = create_vault_name()
self.generate_blocks(count=20)
def tearDown(self):
super().tearDown()
def test_validate_storage(self):
"""Basic Validate Storage Test
Note: "orphaned" data is only what was deleted
this is just due to how the test is structured.
"""
self.secondary_setup(manager_start=None,
manager_end=None)
def metadata_listing_callback(request, uri, headers):
return self.metadata_block_listing_success(request,
uri,
headers)
def metadata_head_callback(request, uri, headers):
return self.metadata_block_head_success(request,
uri,
headers)
def metadata_delete_callback(request, uri, headers):
return (204, headers, '')
def storage_listing_callback(request, uri, headers):
return self.storage_block_listing_success(request,
uri,
headers)
url = get_blocks_url(self.apihost, self.vault.vault_id)
httpretty.register_uri(httpretty.GET,
url,
body=metadata_listing_callback)
httpretty.register_uri(httpretty.HEAD,
self.get_metadata_block_pattern_matcher(),
body=metadata_head_callback)
httpretty.register_uri(httpretty.DELETE,
self.get_metadata_block_pattern_matcher(),
body=metadata_delete_callback)
surl = get_storage_blocks_url(self.apihost, self.vault.vault_id)
httpretty.register_uri(httpretty.GET,
surl,
body=storage_listing_callback)
self.guarantee_expired(expired_count=10,
expired_age=datetime.timedelta(minutes=1))
self.client.validate_metadata()
self.client.cleanup_expired_blocks()
self.client.build_cross_references()
self.assertIsNone(self.manager.storage.orphaned)
self.client.validate_storage()
self.assertIsInstance(self.manager.storage.orphaned, list)
self.assertEqual(len(self.manager.metadata.deleted),
len(self.manager.storage.orphaned))
def test_validate_storage_existing_storage_list(self):
"""Test with an existing list of storage blocks
Note: "orphaned" data is only what was deleted
this is just due to how the test is structured
"""
self.secondary_setup(manager_start=None,
manager_end=None)
def metadata_listing_callback(request, uri, headers):
return self.metadata_block_listing_success(request,
uri,
headers)
def metadata_head_callback(request, uri, headers):
return self.metadata_block_head_success(request,
uri,
headers)
def metadata_delete_callback(request, uri, headers):
return (204, headers, '')
def storage_listing_callback(request, uri, headers):
return self.storage_block_listing_success(request,
uri,
headers)
url = get_blocks_url(self.apihost, self.vault.vault_id)
httpretty.register_uri(httpretty.GET,
url,
body=metadata_listing_callback)
httpretty.register_uri(httpretty.HEAD,
self.get_metadata_block_pattern_matcher(),
body=metadata_head_callback)
httpretty.register_uri(httpretty.DELETE,
self.get_metadata_block_pattern_matcher(),
body=metadata_delete_callback)
surl = get_storage_blocks_url(self.apihost, self.vault.vault_id)
httpretty.register_uri(httpretty.GET,
surl,
body=storage_listing_callback)
self.guarantee_expired(expired_count=10,
expired_age=datetime.timedelta(minutes=1))
self.client.validate_metadata()
self.client.cleanup_expired_blocks()
self.client.build_cross_references()
self.client.get_storage_list()
self.assertIsNotNone(self.manager.storage.current)
self.assertIsNone(self.manager.storage.orphaned)
self.client.validate_storage()
self.assertIsInstance(self.manager.storage.orphaned, list)
self.assertEqual(len(self.manager.metadata.deleted),
len(self.manager.storage.orphaned))
def test_validate_storage_no_orphans_no_storage_data(self):
"""Test with no storage blocks available
"""
self.secondary_setup(manager_start=None,
manager_end=None)
def metadata_listing_callback(request, uri, headers):
return self.metadata_block_listing_success(request,
uri,
headers)
def metadata_head_callback(request, uri, headers):
return self.metadata_block_head_success(request,
uri,
headers)
def metadata_delete_callback(request, uri, headers):
return (204, headers, '')
def storage_listing_callback(request, uri, headers):
return (200, headers, json.dumps([]))
url = get_blocks_url(self.apihost, self.vault.vault_id)
httpretty.register_uri(httpretty.GET,
url,
body=metadata_listing_callback)
httpretty.register_uri(httpretty.HEAD,
self.get_metadata_block_pattern_matcher(),
body=metadata_head_callback)
httpretty.register_uri(httpretty.DELETE,
self.get_metadata_block_pattern_matcher(),
body=metadata_delete_callback)
surl = get_storage_blocks_url(self.apihost, self.vault.vault_id)
httpretty.register_uri(httpretty.GET,
surl,
body=storage_listing_callback)
self.guarantee_expired(expired_count=10,
expired_age=datetime.timedelta(minutes=1))
self.client.validate_metadata()
self.client.cleanup_expired_blocks()
self.client.build_cross_references()
self.client.get_storage_list()
# This is the point of this test:
self.assertIsNotNone(self.manager.storage.current)
self.assertIsNone(self.manager.storage.orphaned)
self.manager.storage.orphaned = []
self.assertIsNotNone(self.manager.storage.orphaned)
self.client.validate_storage()
self.assertIsInstance(self.manager.storage.orphaned, list)
self.assertEqual(0,
len(self.manager.storage.orphaned))
def test_validate_storage_no_cross_references(self):
"""Test with no metadata blocks available
Note: This test essentially makes all blocks in
storage be detected as orphaned blocks
"""
self.secondary_setup(manager_start=None,
manager_end=None)
def metadata_listing_callback(request, uri, headers):
return (200, headers, json.dumps([]))
def metadata_head_callback(request, uri, headers):
return self.metadata_block_head_success(request,
uri,
headers)
def metadata_delete_callback(request, uri, headers):
return (204, headers, '')
def storage_listing_callback(request, uri, headers):
return self.storage_block_listing_success(request,
uri,
headers)
url = get_blocks_url(self.apihost, self.vault.vault_id)
httpretty.register_uri(httpretty.GET,
url,
body=metadata_listing_callback)
httpretty.register_uri(httpretty.HEAD,
self.get_metadata_block_pattern_matcher(),
body=metadata_head_callback)
httpretty.register_uri(httpretty.DELETE,
self.get_metadata_block_pattern_matcher(),
body=metadata_delete_callback)
surl = get_storage_blocks_url(self.apihost, self.vault.vault_id)
httpretty.register_uri(httpretty.GET,
surl,
body=storage_listing_callback)
self.guarantee_expired(expired_count=10,
expired_age=datetime.timedelta(minutes=1))
# Note: this will have zero cross references because there are no
# blocks
self.client.get_block_list()
self.client.build_cross_references()
self.client.get_storage_list()
self.assertIsNone(self.manager.storage.orphaned)
self.client.validate_storage()
self.assertIsInstance(self.manager.storage.orphaned, list)
self.assertEqual(len(self.meta_data),
len(self.manager.storage.orphaned))
def test_validate_storage_no_overlap(self):
"""Test with no metadata blocks available
Note: This test essentially makes all blocks in
storage be detected as orphaned blocks
"""
self.secondary_setup(manager_start=None,
manager_end=None)
def metadata_listing_callback(request, uri, headers):
return (200, headers, json.dumps([]))
def metadata_head_callback(request, uri, headers):
return self.metadata_block_head_success(request,
uri,
headers)
def metadata_delete_callback(request, uri, headers):
return (204, headers, '')
def storage_listing_callback(request, uri, headers):
return self.storage_block_listing_success(request,
uri,
headers)
url = get_blocks_url(self.apihost, self.vault.vault_id)
httpretty.register_uri(httpretty.GET,
url,
body=metadata_listing_callback)
httpretty.register_uri(httpretty.HEAD,
self.get_metadata_block_pattern_matcher(),
body=metadata_head_callback)
httpretty.register_uri(httpretty.DELETE,
self.get_metadata_block_pattern_matcher(),
body=metadata_delete_callback)
surl = get_storage_blocks_url(self.apihost, self.vault.vault_id)
httpretty.register_uri(httpretty.GET,
surl,
body=storage_listing_callback)
self.guarantee_expired(expired_count=10,
expired_age=datetime.timedelta(minutes=1))
fully_orphaned_blocks = self.generate_orphaned_blocks_no_metadata(
count=10)
# Note: this will have zero cross references because there are no
# blocks
self.client.get_block_list()
self.client.build_cross_references()
self.client.get_storage_list()
for storage_id in fully_orphaned_blocks:
self.vault.storageblocks[storage_id].set_block_size(
len(self.storage_data[storage_id]))
self.assertIsNone(self.manager.storage.orphaned)
self.client.validate_storage()
self.assertIsInstance(self.manager.storage.orphaned, list)
self.assertEqual(len(self.storage_data),
len(self.manager.storage.orphaned))
| |
"""
Darwin Platform Module
"""
# Copyright (C) 2007 Invisigoth - See LICENSE file for details
import os
import struct
import ctypes
import signal
import ctypes.util as c_util
import envi.memory as e_mem
import vtrace
import vtrace.archs.i386 as v_i386
import vtrace.archs.amd64 as v_amd64
import vtrace.platforms.base as v_base
import vtrace.platforms.posix as v_posix
addrof = ctypes.pointer
# The OSX ptrace defines...
PT_TRACE_ME = 0 # child declares it's being traced
PT_READ_I = 1 # read word in child's I space
PT_READ_D = 2 # read word in child's D space
PT_READ_U = 3 # read word in child's user structure
PT_WRITE_I = 4 # write word in child's I space
PT_WRITE_D = 5 # write word in child's D space
PT_WRITE_U = 6 # write word in child's user structure
PT_CONTINUE = 7 # continue the child
PT_KILL = 8 # kill the child process
PT_STEP = 9 # single step the child
PT_ATTACH = 10 # trace some running process
PT_DETACH = 11 # stop tracing a process
PT_SIGEXC = 12 # signals as exceptions for current_proc
PT_THUPDATE = 13 # signal for thread#
PT_ATTACHEXC = 14 # attach to running process with signal exception
PT_FORCEQUOTA = 30 # Enforce quota for root
PT_DENY_ATTACH = 31
# Top-level identifiers
CTL_UNSPEC = 0 # unused
CTL_KERN = 1 # "high kernel": proc, limits
CTL_VM = 2 # virtual memory
CTL_VFS = 3 # file system, mount type is next
CTL_NET = 4 # network, see socket.h
CTL_DEBUG = 5 # debugging parameters
CTL_HW = 6 # generic cpu/io
CTL_MACHDEP = 7 # machine dependent
CTL_USER = 8 # user-level
CTL_MAXID = 9 # number of valid top-level ids
KERN_OSTYPE = 1 # string: system version
KERN_OSRELEASE = 2 # string: system release
KERN_OSREV = 3 # int: system revision
KERN_VERSION = 4 # string: compile time info
KERN_MAXVNODES = 5 # int: max vnodes
KERN_MAXPROC = 6 # int: max processes
KERN_MAXFILES = 7 # int: max open files
KERN_ARGMAX = 8 # int: max arguments to exec
KERN_SECURELVL = 9 # int: system security level
KERN_HOSTNAME = 10 # string: hostname
KERN_HOSTID = 11 # int: host identifier
KERN_CLOCKRATE = 12 # struct: struct clockrate
KERN_VNODE = 13 # struct: vnode structures
KERN_PROC = 14 # struct: process entries
KERN_FILE = 15 # struct: file entries
KERN_PROF = 16 # node: kernel profiling info
KERN_POSIX1 = 17 # int: POSIX.1 version
KERN_NGROUPS = 18 # int: # of supplemental group ids
KERN_JOB_CONTROL = 19 # int: is job control available
KERN_SAVED_IDS = 20 # int: saved set-user/group-ID
KERN_BOOTTIME = 21 # struct: time kernel was booted
KERN_NISDOMAINNAME = 22 # string: YP domain name
KERN_DOMAINNAME = KERN_NISDOMAINNAME
KERN_MAXPARTITIONS = 23 # int: number of partitions/disk
KERN_KDEBUG = 24 # int: kernel trace points
KERN_UPDATEINTERVAL = 25 # int: update process sleep time
KERN_OSRELDATE = 26 # int: OS release date
KERN_NTP_PLL = 27 # node: NTP PLL control
KERN_BOOTFILE = 28 # string: name of booted kernel
KERN_MAXFILESPERPROC = 29 # int: max open files per proc
KERN_MAXPROCPERUID = 30 # int: max processes per uid
KERN_DUMPDEV = 31 # dev_t: device to dump on
KERN_IPC = 32 # node: anything related to IPC
KERN_DUMMY = 33 # unused
KERN_PS_STRINGS = 34 # int: address of PS_STRINGS
KERN_USRSTACK32 = 35 # int: address of USRSTACK
KERN_LOGSIGEXIT = 36 # int: do we log sigexit procs?
KERN_SYMFILE = 37 # string: kernel symbol filename
KERN_PROCARGS = 38
#/* 39 was KERN_PCSAMPLES... now deprecated
KERN_NETBOOT = 40 # int: are we netbooted? 1=yes,0=no
KERN_PANICINFO = 41 # node: panic UI information
KERN_SYSV = 42 # node: System V IPC information
KERN_AFFINITY = 43 # xxx
KERN_TRANSLATE = 44 # xxx
KERN_CLASSIC = KERN_TRANSLATE # XXX backwards compat
KERN_EXEC = 45 # xxx
KERN_CLASSICHANDLER = KERN_EXEC # XXX backwards compatibility
KERN_AIOMAX = 46 # int: max aio requests
KERN_AIOPROCMAX = 47 # int: max aio requests per process
KERN_AIOTHREADS = 48 # int: max aio worker threads
KERN_PROCARGS2 = 49
KERN_COREFILE = 50 # string: corefile format string
KERN_COREDUMP = 51 # int: whether to coredump at all
KERN_SUGID_COREDUMP = 52 # int: whether to dump SUGID cores
KERN_PROCDELAYTERM = 53 # int: set/reset current proc for delayed termination during shutdown
KERN_SHREG_PRIVATIZABLE = 54 # int: can shared regions be privatized ?
KERN_PROC_LOW_PRI_IO = 55 # int: set/reset current proc for low priority I/O
KERN_LOW_PRI_WINDOW = 56 # int: set/reset throttle window - milliseconds
KERN_LOW_PRI_DELAY = 57 # int: set/reset throttle delay - milliseconds
KERN_POSIX = 58 # node: posix tunables
KERN_USRSTACK64 = 59 # LP64 user stack query
KERN_NX_PROTECTION = 60 # int: whether no-execute protection is enabled
KERN_TFP = 61 # Task for pid settings
KERN_PROCNAME = 62 # setup process program name(2*MAXCOMLEN)
KERN_THALTSTACK = 63 # for compat with older x86 and does nothing
KERN_SPECULATIVE_READS = 64 # int: whether speculative reads are disabled
KERN_OSVERSION = 65 # for build number i.e. 9A127
KERN_SAFEBOOT = 66 # are we booted safe?
KERN_LCTX = 67 # node: login context
KERN_RAGEVNODE = 68
KERN_TTY = 69 # node: tty settings
KERN_CHECKOPENEVT = 70 # spi: check the VOPENEVT flag on vnodes at open time
KERN_MAXID = 71 # number of valid kern ids
# # KERN_RAGEVNODE types
KERN_RAGE_PROC = 1
KERN_RAGE_THREAD = 2
KERN_UNRAGE_PROC = 3
KERN_UNRAGE_THREAD = 4
# # KERN_OPENEVT types
KERN_OPENEVT_PROC = 1
KERN_UNOPENEVT_PROC = 2
# # KERN_TFP types
KERN_TFP_POLICY = 1
# # KERN_TFP_POLICY values . All policies allow task port for self
KERN_TFP_POLICY_DENY = 0 # Deny Mode: None allowed except privileged
KERN_TFP_POLICY_DEFAULT = 2 # Default Mode: related ones allowed and upcall authentication
# # KERN_KDEBUG types
KERN_KDEFLAGS = 1
KERN_KDDFLAGS = 2
KERN_KDENABLE = 3
KERN_KDSETBUF = 4
KERN_KDGETBUF = 5
KERN_KDSETUP = 6
KERN_KDREMOVE = 7
KERN_KDSETREG = 8
KERN_KDGETREG = 9
KERN_KDREADTR = 10
KERN_KDPIDTR = 11
KERN_KDTHRMAP = 12
# # Don't use 13 as it is overloaded with KERN_VNODE
KERN_KDPIDEX = 14
KERN_KDSETRTCDEC = 15
KERN_KDGETENTROPY = 16
# # KERN_PANICINFO types
KERN_PANICINFO_MAXSIZE = 1 # quad: panic UI image size limit
KERN_PANICINFO_IMAGE = 2 # panic UI in 8-bit kraw format
# * KERN_PROC subtypes
KERN_PROC_ALL = 0 # everything
KERN_PROC_PID = 1 # by process id
KERN_PROC_PGRP = 2 # by process group id
KERN_PROC_SESSION = 3 # by session of pid
KERN_PROC_TTY = 4 # by controlling tty
KERN_PROC_UID = 5 # by effective uid
KERN_PROC_RUID = 6 # by real uid
KERN_PROC_LCID = 7 # by login context id
# Stupid backwards perms defs...
VM_PROT_READ = 1
VM_PROT_WRITE = 2
VM_PROT_EXECUTE = 4
# Thread status types...
x86_THREAD_STATE32 = 1
x86_FLOAT_STATE32 = 2
x86_EXCEPTION_STATE32 = 3
x86_THREAD_STATE64 = 4
x86_FLOAT_STATE64 = 5
x86_EXCEPTION_STATE64 = 6
x86_THREAD_STATE = 7
x86_FLOAT_STATE = 8
x86_EXCEPTION_STATE = 9
x86_DEBUG_STATE32 = 10
x86_DEBUG_STATE64 = 11
x86_DEBUG_STATE = 12
THREAD_STATE_NONE = 13
class X86_STATE_HDR(ctypes.Structure):
_fields_ = [
('flavor', ctypes.c_uint32),
('count', ctypes.c_uint32),
]
class STRUCT_X86_THREAD_STATE32(ctypes.Structure):
_fields_ = [
#('tsh', X86_STATE_HDR),
('eax', ctypes.c_uint32),
('ebx', ctypes.c_uint32),
('ecx', ctypes.c_uint32),
('edx', ctypes.c_uint32),
('edi', ctypes.c_uint32),
('esi', ctypes.c_uint32),
('ebp', ctypes.c_uint32),
('esp', ctypes.c_uint32),
('ss', ctypes.c_uint32),
('eflags', ctypes.c_uint32),
('eip', ctypes.c_uint32),
('cs', ctypes.c_uint32),
('ds', ctypes.c_uint32),
('es', ctypes.c_uint32),
('fs', ctypes.c_uint32),
('gs', ctypes.c_uint32),
]
class STRUCT_X86_EXCEPTION_STATE32(ctypes.Structure):
_fields_ = [
('trapno', ctypes.c_uint32),
('err', ctypes.c_uint32),
('faultvaddr', ctypes.c_uint32),
]
class STRUCT_X86_DEBUG_STATE32(ctypes.Structure):
_fields_ = [ ('debug%d', ctypes.c_uint32) for i in range(8) ]
class STRUCT_X86_THREAD_STATE64(ctypes.Structure):
_fields_ = [
#('tsh', X86_STATE_HDR),
('rax', ctypes.c_uint64),
('rbx', ctypes.c_uint64),
('rcx', ctypes.c_uint64),
('rdx', ctypes.c_uint64),
('rdi', ctypes.c_uint64),
('rsi', ctypes.c_uint64),
('rbp', ctypes.c_uint64),
('rsp', ctypes.c_uint64),
('r8', ctypes.c_uint64),
('r9', ctypes.c_uint64),
('r10', ctypes.c_uint64),
('r11', ctypes.c_uint64),
('r12', ctypes.c_uint64),
('r13', ctypes.c_uint64),
('r14', ctypes.c_uint64),
('r15', ctypes.c_uint64),
('rip', ctypes.c_uint64),
('rflags', ctypes.c_uint64),
('cs', ctypes.c_uint64),
('fs', ctypes.c_uint64),
('gs', ctypes.c_uint64),
]
class STRUCT_X86_EXCEPTION_STATE64(ctypes.Structure):
_fields_ = [
('trapno', ctypes.c_uint32),
('err', ctypes.c_uint32),
('faultvaddr', ctypes.c_uint64),
]
class STRUCT_X86_DEBUG_STATE64(ctypes.Structure):
_fields_ = [ ('debug%d', ctypes.c_uint64) for i in range(8) ]
###########################################################################
#
# mach port enumerations
#
MACH_PORT_NULL = 0
#MACH_PORT_RIGHT_* definitions are used as arguments
MACH_PORT_RIGHT_SEND = 0
MACH_PORT_RIGHT_RECEIVE = 1
MACH_PORT_RIGHT_SEND_ONCE = 2
MACH_PORT_RIGHT_PORT_SET = 3
MACH_PORT_RIGHT_DEAD_NAME = 4
MACH_PORT_RIGHT_LABELH = 5
MACH_PORT_RIGHT_NUMBER = 6
def MACH_PORT_TYPE(right):
return 1 << (right + 16)
MACH_PORT_TYPE_SEND = MACH_PORT_TYPE(MACH_PORT_RIGHT_SEND)
MACH_PORT_TYPE_RECEIVE = MACH_PORT_TYPE(MACH_PORT_RIGHT_RECEIVE)
MACH_PORT_TYPE_SEND_ONCE = MACH_PORT_TYPE(MACH_PORT_RIGHT_SEND_ONCE)
MACH_PORT_TYPE_PORT_SET = MACH_PORT_TYPE(MACH_PORT_RIGHT_PORT_SET)
MACH_PORT_TYPE_DEAD_NAME = MACH_PORT_TYPE(MACH_PORT_RIGHT_DEAD_NAME)
MACH_PORT_TYPE_LABELH = MACH_PORT_TYPE(MACH_PORT_RIGHT_LABELH)
###########################################################################
#
# mach message types and structures
#
MACH_MSG_TIMEOUT_NONE = 0
MACH_MSG_OPTION_NONE = 0
MACH_SEND_MSG = 0x00000001
MACH_RCV_MSG = 0x00000002
MACH_RCV_LARGE = 0x00000004
MACH_SEND_TIMEOUT = 0x00000010
MACH_SEND_INTERRUPT = 0x00000040 # libmach implements
MACH_SEND_CANCEL = 0x00000080
MACH_SEND_ALWAYS = 0x00010000 # internal use only
MACH_SEND_TRAILER = 0x00020000
MACH_RCV_TIMEOUT = 0x00000100
MACH_RCV_NOTIFY = 0x00000200
MACH_RCV_INTERRUPT = 0x00000400 # libmach implements
MACH_RCV_OVERWRITE = 0x00001000
# Return codes from mach_msg...
MACH_RCV_TIMED_OUT = 0x10004003
MACH_MSG_TYPE_MOVE_RECEIVE = 16 # Must hold receive rights
MACH_MSG_TYPE_MOVE_SEND = 17 # Must hold send rights
MACH_MSG_TYPE_MOVE_SEND_ONCE = 18 # Must hold sendonce rights
MACH_MSG_TYPE_COPY_SEND = 19 # Must hold send rights
MACH_MSG_TYPE_MAKE_SEND = 20 # Must hold receive rights
MACH_MSG_TYPE_MAKE_SEND_ONCE = 21 # Must hold receive rights
MACH_MSG_TYPE_COPY_RECEIVE = 22 # Must hold receive rights
size_t = ctypes.c_ulong
mach_port_t = ctypes.c_uint32
mach_port_name_t = ctypes.c_uint32
mach_port_right_t = ctypes.c_uint32
mach_msg_size_t = ctypes.c_uint32
mach_msg_bits_t = ctypes.c_uint32
mach_msg_id_t = ctypes.c_uint32
ipc_space_t = ctypes.c_uint32
kern_return_t = ctypes.c_uint32
class mach_msg_header_t(ctypes.Structure):
_fields_ = [
('msgh_bits', mach_msg_bits_t),
('msgh_size', mach_msg_size_t),
('msgh_remote_port', mach_port_t),
('msgh_local_port', mach_port_t),
('msgh_reserved', mach_msg_size_t),
('msgh_id', mach_msg_id_t),
]
class mach_msg_body_t(ctypes.Structure):
_fields_ = [
('msgh_descriptor_count', ctypes.c_uint32),
]
class mach_msg_port_descriptor_t(ctypes.Structure):
_fields_ = [
('name', mach_port_t),
('pad1', mach_msg_size_t),
('pad2', ctypes.c_uint32),
]
class NDR_record_t(ctypes.Structure):
_fields_ = [
('mig_vers', ctypes.c_uint8),
('if_vers', ctypes.c_uint8),
('reserved', ctypes.c_uint8),
('mig_encoding', ctypes.c_uint8),
('int_rep', ctypes.c_uint8),
('char_rep', ctypes.c_uint8),
('float_rep', ctypes.c_uint8),
('reserved2', ctypes.c_uint8),
]
exception_type_t = ctypes.c_uint32
mach_msg_type_number_t = ctypes.c_uint32
exception_data_t = ctypes.POINTER(ctypes.c_uint32)
# the message type we receive from the kernel for exceptions
class exc_msg(ctypes.Structure):
_fields_ = [
('Head', mach_msg_header_t),
#('data', ctypes.c_uint8 * 1024),
('body', mach_msg_body_t),
('thread', mach_msg_port_descriptor_t),
('task', mach_msg_port_descriptor_t),
('NDR', NDR_record_t),
('exception', exception_type_t),
('codeCnt', mach_msg_type_number_t),
('codes', ctypes.c_uint32 * 128),
##('codes', exception_data_t),
##('pad', ctypes.c_uint8 * 512)
]
# The response message we send back
class exc_rep_msg(ctypes.Structure):
_fields_ = [
('Head', mach_msg_header_t),
('data', ctypes.c_uint8 * 1024),
#('NDR', NDR_record_t),
#('RetCode', ctypes.c_uint32)
]
##########################################################################
# mach generic exception codes
#
EXC_BAD_ACCESS = 1
EXC_BAD_INSTRUCTION = 2
EXC_ARITHMETIC = 3
EXC_EMULATION = 4
EXC_SOFTWARE = 5
EXC_BREAKPOINT = 6
EXC_SYSCALL = 7
EXC_MACH_SYSCALL = 8
EXC_RPC_ALERT = 9
EXC_CRASH = 10
# EXC_SOFTWARE will have code[0] == EXC_SOFT_SIGNAL for posix sigs
EXC_SOFT_BREAK = 0x00001 # LLDB: (exc_type == EXC_BREAKPOINT || ((exc_type == EXC_SOFTWARE) && exc_data[0] == 1))
EXC_SOFT_SIGNAL = 0x10003 # Unix signal exceptions
EXC_MASK_MACHINE = 0
EXC_MASK_BAD_ACCESS = 1 << EXC_BAD_ACCESS
EXC_MASK_BAD_INSTRUCTION = 1 << EXC_BAD_INSTRUCTION
EXC_MASK_ARITHMETIC = 1 << EXC_ARITHMETIC
EXC_MASK_EMULATION = 1 << EXC_EMULATION
EXC_MASK_SOFTWARE = 1 << EXC_SOFTWARE
EXC_MASK_BREAKPOINT = 1 << EXC_BREAKPOINT
EXC_MASK_SYSCALL = 1 << EXC_SYSCALL
EXC_MASK_MACH_SYSCALL = 1 << EXC_MACH_SYSCALL
EXC_MASK_RPC_ALERT = 1 << EXC_RPC_ALERT
EXC_MASK_CRASH = 1 << EXC_CRASH
EXC_MASK_ALL = (EXC_MASK_BAD_ACCESS |
EXC_MASK_BAD_INSTRUCTION |
EXC_MASK_ARITHMETIC |
EXC_MASK_EMULATION |
EXC_MASK_SOFTWARE |
EXC_MASK_BREAKPOINT |
EXC_MASK_SYSCALL |
EXC_MASK_MACH_SYSCALL |
EXC_MASK_RPC_ALERT |
EXC_MASK_CRASH |
EXC_MASK_MACHINE)
EXCEPTION_DEFAULT = 1 # Send a catch_exception_raise message including the identity.
EXCEPTION_STATE = 2 # Send a catch_exception_raise_state message including the thread state.
EXCEPTION_STATE_IDENTITY = 3 # Send a catch_exception_raise_state_identity message including the thread identity and state.
MACH_EXCEPTION_CODES = 0x80000000 # Send 64-bit code and subcode in the exception header
boolean_t = ctypes.c_uint32
pid_t = ctypes.c_uint32
#u_int = ctypes.c_uint32
#pvoid = ctypes.c_void_p
#fixpt_t = ctypes.c_uint32
#u_quad_t = ctypes.c_ulonglong
#sigset_t = ctypes.c_uint32
thread_t = ctypes.c_uint32
####################################################################
#
# mach VM related stuff....
#
vm_prot_t = ctypes.c_uint32
vm_inherit_t = ctypes.c_uint32
vm_behavior_t = ctypes.c_uint32
memory_object_offset_t = ctypes.c_ulonglong
VM_REGION_BASIC_INFO_64 = 9
class vm_region_basic_info_64(ctypes.Structure):
_fields_ = [
('protection', vm_prot_t),
('max_protection', vm_prot_t),
('inheritance', vm_inherit_t),
('shared', boolean_t),
('reserved', boolean_t),
('offset', memory_object_offset_t),
('behavior', vm_behavior_t),
('user_wired_count',ctypes.c_ushort),
]
print 'vm_region_basic_info_64',ctypes.sizeof(vm_region_basic_info_64)
VM_REGION_BASIC_INFO_COUNT_64 = ctypes.sizeof(vm_region_basic_info_64) / 4
#mach_helper = ctypes.CDLL('./darwin_mach.dylib')
#
# These are used by the machhelper library code
#
class DarwinDebugCtx(ctypes.Structure):
_fields_ = [
('dbgtask', mach_port_t),
('task', mach_port_t),
('portset', mach_port_name_t),
('excport', mach_port_name_t),
('msgin', ctypes.c_void_p),
('msgout', ctypes.c_void_p),
]
class ProcessListEntry(ctypes.Structure):
_fields_ = [
('pid', ctypes.c_uint),
('name', ctypes.c_char * 17),
]
darwindir = os.path.dirname(__file__)
####################################################################
class DarwinMixin(v_posix.PosixMixin, v_posix.PtraceMixin):
def __init__(self):
v_posix.PosixMixin.__init__(self)
v_posix.PtraceMixin.__init__(self)
self.libc = ctypes.CDLL(c_util.find_library('c'))
self.myport = self.libc.mach_task_self()
self.libc.mach_port_allocate.argtypes = [ipc_space_t, mach_port_right_t, ctypes.POINTER(mach_port_name_t)]
self.libc.mach_port_allocate.restype = kern_return_t
self.libc.mach_vm_read.argtypes = [ mach_port_t, size_t, size_t, ctypes.POINTER(ctypes.c_void_p), ctypes.POINTER(ctypes.c_uint32)]
self.libc.mach_vm_read.restype = kern_return_t
self.libc.ptrace.restype = ctypes.c_int
self.libc.ptrace.argtypes = [ctypes.c_int, ctypes.c_uint32, ctypes.c_size_t, ctypes.c_int]
machhelp_path = os.path.join(darwindir, 'machhelper.dylib')
self.machhelper = ctypes.CDLL(machhelp_path)
self.machhelper.platformPs.restype = ctypes.POINTER(ProcessListEntry)
self.useptrace = False
self.portset = self.newMachPort(MACH_PORT_RIGHT_PORT_SET)
self.excport = self.newMachRWPort()
self.addPortToSet(self.excport)
def platformPs(self):
ret = []
y = self.machhelper.platformPs()
i = 0
while y[i].pid != 0xffffffff:
ret.append((y[i].pid, y[i].name))
i += 1
# FIXME free!
ret.reverse()
return ret
def platformParseBinary(self, filename, baseaddr, normname):
pass
def platformGetFds(self):
print "FIXME platformGetFds() no workie on darwin yet..."
return []
def platformExec(self, cmdline):
pid = v_posix.PtraceMixin.platformExec(self, cmdline)
self.task = self.taskForPid(pid)
self.setExceptionPort()
return pid
def _getThreadPorts(self):
count = ctypes.c_uint32()
tlist = ctypes.POINTER(thread_t)()
assert( self.libc.task_threads(self.task, addrof(tlist), addrof(count)) == 0 )
ret = [ tlist[i] for i in range(count.value)]
self.libc.vm_deallocate(self.task, tlist)
return ret
def platformSuspendThread(self, tid):
self.libc.thread_suspend(tid)
def platformResumeThread(self, tid):
self.libc.thread_resume(tid)
def platformGetThreads(self):
ret = {}
for tid in self._getThreadPorts():
ret[tid] = tid
return ret
def platformGetMaps(self):
maps = []
address = ctypes.c_ulong(0)
mapsize = ctypes.c_ulong(0)
name = ctypes.c_uint32(0)
count = ctypes.c_uint32(VM_REGION_BASIC_INFO_COUNT_64)
info = vm_region_basic_info_64()
while True:
r = self.libc.mach_vm_region(self.task, addrof(address),
addrof(mapsize), VM_REGION_BASIC_INFO_64,
addrof(info), addrof(count),
addrof(name))
# If we get told "invalid address", we have crossed into kernel land...
if r == 1:
break
if r != 0:
self.libc.mach_error("mach_vm_region", r)
raise Exception('vm_region Failed for 0x%.8x: 0x%.8x' % (address.value,r))
perms = 0
p = info.protection
if p & VM_PROT_READ:
perms |= e_mem.MM_READ
if p & VM_PROT_WRITE:
perms |= e_mem.MM_WRITE
if p & VM_PROT_EXECUTE:
perms |= e_mem.MM_EXEC
if info.shared:
perms |= e_mem.MM_SHARED
# If we got any perms, report the map
if perms:
maps.append((address.value, mapsize.value, perms, ''))
address.value += mapsize.value
return maps
def handlePosixSignal(self, sig):
if sig == signal.SIGTRAP:
# FIXME I think we can catch these!
# Traps on posix systems are a little complicated
if self.stepping:
self.stepping = False
self.fireNotifiers(vtrace.NOTIFY_STEP)
# FIXME and these too...
elif self.checkBreakpoints():
# It was either a known BP or a sendBreak()
return
elif self.execing:
self.execing = False
self.handleAttach()
else:
self._fireSignal(sig)
elif sig == signal.SIGSTOP:
# We get a regular POSIX stop signal on attach
#self.attaching = False
self.handleAttach()
else:
self._fireSignal(sig)
def platformProcessEvent(self, event):
"""
Handle a mach exception message
"""
#if self.attaching:
#self.useptrace = True
#return self.handlePosixSignal(event)
#self.useptrace = False
# Some event states that need to be reset
self.softexc = False
threadid, excode, codes = event
# Set the thread that signaled.
self.setMeta('ThreadId', threadid)
self.setMeta('StoppedThreadId', threadid)
self.setMeta('MachException', event)
if excode == EXC_SOFTWARE:
self.softexc = True
assert( len(codes) == 2 )
assert( codes[0] == EXC_SOFT_SIGNAL )
sig = codes[1]
self.handlePosixSignal(sig)
elif excode == EXC_BAD_ACCESS:
print 'exc_bad_access',repr([hex(x) for x in codes ])
signo = signal.SIGSEGV
#if codes[0] == KERN_INVALID_ADDRESS:
#signo = signal.SIGBUS
self._fireSignal(signo)
elif excode == EXC_BAD_INSTRUCTION:
print 'exc_bad_instruction',repr([hex(x) for x in codes ])
self._fireSignal(signal.SIGILL)
elif excode == EXC_CRASH:
print 'exc_crash'
print 'Crash:',repr([hex(x) for x in codes])
self._fireExit(0xffffffff)
elif excode == EXC_BREAKPOINT:
print 'exc_breakpoint',codes
self.handlePosixSignal(signal.SIGTRAP)
else:
print 'Unprocessed Exception Type: %d' % excode
self.fireNotifiers(vtrace.NOTIFY_SIGNAL)
return
def platformAttach(self, pid):
#print 'CLASSIC',self.machhelper.is_pid_classic(pid)
#self.attaching = True
self.task = self.taskForPid(pid)
self.setExceptionPort()
assert( self.libc.ptrace(PT_ATTACHEXC, pid, 0, 0) == 0 )
def taskForPid(self, pid):
task = ctypes.c_uint32()
ret = self.libc.task_for_pid(self.myport, pid, addrof(task))
if ret != 0:
raise Exception('task_for_pid failed: 0x%.8x\n' % ret)
return task.value
def newMachPort(self, right):
port = mach_port_name_t()
assert( self.libc.mach_port_allocate(self.myport, right, addrof(port)) == 0)
return port.value
def newMachRWPort(self):
port = self.newMachPort(MACH_PORT_RIGHT_RECEIVE)
assert( self.libc.mach_port_insert_right(self.myport, port, port, MACH_MSG_TYPE_MAKE_SEND) == 0 )
return port
def addPortToSet(self, port):
assert( self.libc.mach_port_move_member(self.myport, port, self.portset) == 0 )
def setExceptionPort(self):
# Set the target task's exception port to our excport
#r = self.libc.task_set_exception_ports(self.task, EXC_MASK_SOFTWARE, self.excport,
r = self.libc.task_set_exception_ports( self.task,
EXC_MASK_ALL,
self.excport,
EXCEPTION_DEFAULT,
THREAD_STATE_NONE)
if r != 0:
raise Exception('task_set_exception_ports failed: 0x%.8x' % r)
def _getNextExc(self, timeout=None):
exc = exc_msg()
flags = MACH_RCV_MSG | MACH_RCV_LARGE
if timeout != None:
flags |= MACH_RCV_TIMEOUT
r = self.libc.mach_msg(addrof(exc),
flags,
0, # Send size...
ctypes.sizeof(exc), # Recv msg size
self.excport,
timeout,
MACH_PORT_NULL)
if r == MACH_RCV_TIMED_OUT:
return None
if r != 0:
raise Exception('mach_msg (RECV) failed: 0x%.8x' % r)
return exc
def platformWait(self):
# Wait for a mach message on the exception port
#exc = None
#while exc == None:
#exc = self._getNextExc()
#self.setMeta('ThreadId', exc.thread.name)
#self.setMeta('StoppedThreadId', exc.thread.name)
#e2 = self._getNextExc(timeout=0)
#if e2 != None:
#print "ALSO GOT",e2
# Sometimes there are still posix signals anyway...
#while os.waitpid(-1, os.WNOHANG) != (0,0):
#pass
#if self.attaching:
#pid,status = os.waitpid(self.pid, 0)
#return os.WSTOPSIG(status)
#pid,status = os.waitpid(self.pid, 0)
exc = self._getNextExc()
# Suspend the task so reading etc is safe...
self.libc.task_suspend(self.task)
# NOTE We must extract *all* needed info from the event here!
codes = [exc.codes[i] for i in range(exc.codeCnt)]
ret = (exc.thread.name, exc.exception, codes)
self.setMeta('MachExcMsg', exc)
#self.sendExcResp(exc)
return ret
def sendExcResp(self, exc, maskexc=False):
res = self.buildExcResp(exc, maskexc=maskexc)
x = self.libc.mach_msg( addrof(res),
MACH_SEND_MSG,
res.Head.msgh_size,
0,
res.Head.msgh_remote_port,
MACH_MSG_TIMEOUT_NONE,
MACH_PORT_NULL)
if x != 0:
raise Exception('mach_msg MACH_SEND_MSG failed: 0x%.8x' % (x,))
def buildExcResp(self, exc, maskexc=False):
res = exc_rep_msg()
self.machhelper.vtrace_exc_server(ctypes.pointer(exc.Head), ctypes.pointer(res.Head), maskexc)
return res
def platformStepi(self):
self.stepping = True
exc = self.getMeta('MachExcMsg')
if exc != None:
self.setMeta('MachExcMsg', None)
maskexc = ( self.getCurrentSignal() == None )
self.sendExcResp(exc, maskexc=maskexc)
assert( self.libc.task_resume(self.task) == 0 )
assert( self.libc.ptrace(PT_STEP, self.pid, 1, 0) == 0 )
def platformContinue(self):
sig = self.getCurrentSignal()
if sig == None:
sig = 0
#threadid = self.getMeta('StoppedThreadId', 0)
#if self.softexc:
#assert( self.macptrace(PT_THUPDATE, self.pid, threadid, sig) == 0 )
# If we have a mach message to respond to, lets do that....
exc = self.getMeta('MachExcMsg')
if exc != None:
self.setMeta('MachExcMsg', None)
maskexc = ( self.getCurrentSignal() == None )
self.sendExcResp(exc, maskexc=maskexc)
#if self.useptrace:
#assert( self.libc.ptrace(PT_THUPDATE, self.pid, threadid, sig) == 0 )
#assert( self.libc.ptrace(PT_CONTINUE, self.pid, 1, sig) == 0 )
#return
if self.softexc:
assert( self.macptrace(PT_CONTINUE, self.pid, 1, sig) == 0 )
assert( self.libc.task_resume(self.task) == 0 )
def macptrace(self, request, pid, addr, data, zok=True):
ret = self.libc.ptrace(request, pid, addr, data)
if ret != 0 and zok:
self.libc.perror('macptrace: ')
return ret
def platformDetach(self):
assert( self.libc.task_resume(self.task) == 0 )
assert( self.macptrace(PT_DETACH, self.pid, 0, 0) == 0 )
print 'DETACH'
#for threadport in self._getThreadPorts():
#print 'threadport', self.libc.mach_port_deallocate(self.myport, threadport)
print 'askport',self.libc.mach_port_deallocate(self.myport, self.task)
print 'excport',self.libc.mach_port_deallocate(self.myport, self.excport)
print 'portset',self.libc.mach_port_deallocate(self.myport, self.portset)
def platformReadMemory(self, address, size):
pval = ctypes.c_void_p(0)
sval = ctypes.c_uint32(0)
assert( self.libc.mach_vm_read(self.task, address, size, addrof(pval), addrof(sval)) == 0 )
buf = ctypes.string_at(pval.value, sval.value)
assert( self.libc.vm_deallocate(self.myport, pval, sval) == 0 )
return buf
def platformWriteMemory(self, address, data):
print 'WRITE'*100
assert( self.libc.vm_write(self.task, address, data, len(data)) == 0 )
# FIXME use vm_allocate for allocate memory
# FIXME use vm_protect
class Darwini386Trace(
vtrace.Trace,
DarwinMixin,
v_i386.i386Mixin,
v_base.TracerBase):
def __init__(self):
vtrace.Trace.__init__(self)
v_base.TracerBase.__init__(self)
v_i386.i386Mixin.__init__(self)
DarwinMixin.__init__(self)
def getThreadException(self, tid):
# Each arch trace must implement this...
state = STRUCT_X86_EXCEPTION_STATE32()
scount = ctypes.c_uint32(ctypes.sizeof(state) / 4)
ret = self.libc.thread_get_state(tid, x86_EXCEPTION_STATE32, addrof(state), addrof(scount));
if ret != 0:
raise Exception('thread_get_state failed: 0x%.8x' % ret)
return state.trapno, state.err, state.faultvaddr
def platformGetRegCtx(self, tid):
ctx = self.archGetRegCtx()
# NOTE: the tid *is* the port...
state = STRUCT_X86_THREAD_STATE32()
scount = ctypes.c_uint32(ctypes.sizeof(state) / 4)
ret = self.libc.thread_get_state(tid, x86_THREAD_STATE32, addrof(state), addrof(scount));
if ret != 0:
raise Exception('thread_get_state (THREAD_STATE32) failed: 0x%.8x' % ret)
ctx._rctx_Import(state)
state = STRUCT_X86_DEBUG_STATE32()
scount = ctypes.c_uint32(ctypes.sizeof(state) / 4)
ret = self.libc.thread_get_state(tid, x86_DEBUG_STATE32, addrof(state), addrof(scount));
if ret != 0:
raise Exception('thread_get_state (DEBUG_STATE32) failed: 0x%.8x' % ret)
ctx._rctx_Import(state)
return ctx
def platformSetRegCtx(self, tid, ctx):
state = STRUCT_X86_THREAD_STATE32()
# Sync up a struct first...
scount = ctypes.c_uint32(ctypes.sizeof(state) / 4)
ret = self.libc.thread_get_state(tid, x86_THREAD_STATE32, addrof(state), addrof(scount));
if ret != 0:
raise Exception('thread_get_state (THREAD_STATE32) failed: 0x%.8x' % ret)
# Export our shit into it...
ctx._rctx_Export(state)
scount = ctypes.sizeof(state) / 4
r = self.libc.thread_set_state(tid, x86_THREAD_STATE32, addrof(state), scount)
if r != 0:
raise Exception('thread_set_state (THREAD_STATE32) failed: 0x%.8x' % r)
state = STRUCT_X86_DEBUG_STATE32()
ctx._rctx_Export(state)
scount = ctypes.sizeof(state) / 4
r = self.libc.thread_set_state(tid, x86_DEBUG_STATE32, addrof(state), scount)
if r != 0:
raise Exception('thread_set_state (DEBUG_STATE32) failed: 0x%.8x' % r)
class DarwinAmd64Trace(
vtrace.Trace,
DarwinMixin,
v_amd64.Amd64Mixin,
v_base.TracerBase):
def __init__(self):
vtrace.Trace.__init__(self)
v_base.TracerBase.__init__(self)
v_amd64.Amd64Mixin.__init__(self)
DarwinMixin.__init__(self)
def getThreadException(self, tid):
# Each arch trace must implement this...
state = STRUCT_X86_EXCEPTION_STATE64()
scount = ctypes.c_uint32(ctypes.sizeof(state) / 8)
ret = self.libc.thread_get_state(tid, x86_EXCEPTION_STATE64, addrof(state), addrof(scount));
if ret != 0:
raise Exception('thread_get_state failed: 0x%.8x' % ret)
return state.trapno, state.err, state.faultvaddr
def platformGetRegCtx(self, tid):
ctx = self.archGetRegCtx()
# NOTE: the tid *is* the port...
state = STRUCT_X86_THREAD_STATE64()
scount = ctypes.c_uint32(ctypes.sizeof(state) / 4)
ret = self.libc.thread_get_state(tid, x86_THREAD_STATE64, addrof(state), addrof(scount));
if ret != 0:
self.libc.mach_error("thread_get_state x86_THREAD_STATE64 failed:", ret)
raise Exception('thread_get_state (THREAD_STATE64) failed: 0x%.8x' % ret)
ctx._rctx_Import(state)
state = STRUCT_X86_DEBUG_STATE64()
scount = ctypes.c_uint32(ctypes.sizeof(state) / 4)
ret = self.libc.thread_get_state(tid, x86_DEBUG_STATE64, addrof(state), addrof(scount));
if ret != 0:
self.libc.mach_error("thread_get_state x86_DEBUG_STATE64 failed:", ret)
raise Exception('thread_get_state (DEBUG_STATE64) failed: 0x%.8x' % ret)
ctx._rctx_Import(state)
return ctx
def platformSetRegCtx(self, tid, ctx):
state = STRUCT_X86_THREAD_STATE64()
# Sync up a struct first...
scount = ctypes.c_uint32(ctypes.sizeof(state) / 8)
ret = self.libc.thread_get_state(tid, x86_THREAD_STATE64, addrof(state), addrof(scount));
if ret != 0:
raise Exception('thread_get_state (THREAD_STATE64) failed: 0x%.8x' % ret)
# Export our shit into it...
ctx._rctx_Export(state)
scount = ctypes.sizeof(state) / 8
r = self.libc.thread_set_state(tid, x86_THREAD_STATE64, addrof(state), scount)
if r != 0:
raise Exception('thread_set_state (THREAD_STATE64) failed: 0x%.8x' % r)
state = STRUCT_X86_DEBUG_STATE64()
ctx._rctx_Export(state)
scount = ctypes.sizeof(state) / 8
r = self.libc.thread_set_state(tid, x86_DEBUG_STATE64, addrof(state), scount)
if r != 0:
raise Exception('thread_set_state (DEBUG_STATE64) failed: 0x%.8x' % r)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for scan ops."""
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def numpy_reverse(x, axis):
length = len(x.shape)
if axis < 0:
axis = length + axis
ix = [
slice(None, None, -1) if i == axis else slice(None) for i in range(length)
]
return x[ix]
def handle_options(func, x, axis, exclusive, reverse):
"""Adds tf options to numpy scan ops."""
length = len(x.shape)
if axis < 0:
axis = length + axis
if reverse:
x = numpy_reverse(x, axis)
if exclusive:
ix_head = [slice(0, 1) if i == axis else slice(None) for i in range(length)]
ix_init = [
slice(0, -1) if i == axis else slice(None) for i in range(length)
]
if func == np.cumsum:
init = np.zeros_like(x[ix_head])
elif func == np.cumprod:
init = np.ones_like(x[ix_head])
else:
raise ValueError("Unknown scan function.")
x = np.concatenate([init, func(x[ix_init], axis)], axis=axis)
else:
x = func(x, axis=axis)
if reverse:
x = numpy_reverse(x, axis)
return x
class CumsumTest(xla_test.XLATestCase):
valid_dtypes = [np.float32, np.int32, np.int64]
def axis_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumsum, x, axis, exclusive, reverse)
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
tf_out = math_ops.cumsum(p, axis, exclusive, reverse).eval(
feed_dict={p: x})
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in self.axis_dtypes():
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
axis = constant_op.constant(0, axis_dtype)
math_ops.cumsum(p, axis).eval(feed_dict={p: x})
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 10).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 20).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
def testMixedPrecision(self):
with self.session(), self.test_scope():
y = math_ops.cumsum(
constant_op.constant([1., 2., 3., 4.], dtypes.bfloat16),
-1,
exclusive=True).eval()
self.assertAllEqual(y, [0., 1., 3., 6.])
@test_util.disable_mlir_bridge("Error handling")
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
with self.session(), self.test_scope():
input_tensor = ops.convert_to_tensor(x)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumsum(input_tensor, [0]).eval()
class CumprodTest(xla_test.XLATestCase):
valid_dtypes = [np.float32, np.int32]
def axis_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumprod, x, axis, exclusive, reverse)
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
prod = math_ops.cumprod(p, axis, exclusive, reverse)
tf_out = prod.eval(feed_dict={p: x})
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in self.axis_dtypes():
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
axis = constant_op.constant(0, axis_dtype)
math_ops.cumprod(x, axis).eval(feed_dict={p: x})
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 11).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 21).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
@test_util.disable_mlir_bridge("Error handling")
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
with self.session(), self.test_scope():
input_tensor = ops.convert_to_tensor(x)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumprod(input_tensor, [0]).eval()
if __name__ == "__main__":
test.main()
| |
""" ietf_diffserv_policy
This module contains a collection of YANG definitions for
configuring diffserv specification implementations.
Copyright (c) 2014 IETF Trust and the persons identified as
authors of the code. All rights reserved.
Redistribution and use in source and binary forms, with or
without modification, is permitted pursuant to, and subject
to the license terms contained in, the Simplified BSD License
set forth in Section 4.c of the IETF Trust's Legal Provisions
Relating to IETF Documents
(http\://trustee.ietf.org/license\-info).
This version of this YANG module is part of RFC XXXX; see
the RFC itself for full legal notices.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class ActionTypeIdentity(object):
"""
This base identity type defines action\-types
"""
_prefix = 'policy'
_revision = '2015-04-07'
def __init__(self):
pass
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_policy as meta
return meta._meta_table['ActionTypeIdentity']['meta_info']
class Policies(object):
"""
list of policy templates
.. attribute:: policy_entry
policy template
**type**\: list of :py:class:`PolicyEntry <ydk.models.ietf.ietf_diffserv_policy.Policies.PolicyEntry>`
"""
_prefix = 'policy'
_revision = '2015-04-07'
def __init__(self):
self.policy_entry = YList()
self.policy_entry.parent = self
self.policy_entry.name = 'policy_entry'
class PolicyEntry(object):
"""
policy template
.. attribute:: policy_name <key>
Diffserv policy name
**type**\: str
.. attribute:: classifier_entry
Classifier entry configuration in a policy
**type**\: list of :py:class:`ClassifierEntry <ydk.models.ietf.ietf_diffserv_policy.Policies.PolicyEntry.ClassifierEntry>`
.. attribute:: policy_descr
Diffserv policy description
**type**\: str
"""
_prefix = 'policy'
_revision = '2015-04-07'
def __init__(self):
self.parent = None
self.policy_name = None
self.classifier_entry = YList()
self.classifier_entry.parent = self
self.classifier_entry.name = 'classifier_entry'
self.policy_descr = None
class ClassifierEntry(object):
"""
Classifier entry configuration in a policy
.. attribute:: classifier_entry_name <key>
Diffserv classifier entry name
**type**\: str
.. attribute:: classifier_action_entry_cfg
Configuration of classifier & associated actions
**type**\: list of :py:class:`ClassifierActionEntryCfg <ydk.models.ietf.ietf_diffserv_policy.Policies.PolicyEntry.ClassifierEntry.ClassifierActionEntryCfg>`
.. attribute:: classifier_entry_filter_oper
Filters are applicable as any or all filters
**type**\: :py:class:`ClassifierEntryFilterOperationTypeIdentity <ydk.models.ietf.ietf_diffserv_classifier.ClassifierEntryFilterOperationTypeIdentity>`
**default value**\: match-any-filter
.. attribute:: classifier_entry_inline
Indication of inline classifier entry
**type**\: bool
**default value**\: false
.. attribute:: filter_entry
Filters configured inline in a policy
**type**\: list of :py:class:`FilterEntry <ydk.models.ietf.ietf_diffserv_policy.Policies.PolicyEntry.ClassifierEntry.FilterEntry>`
"""
_prefix = 'policy'
_revision = '2015-04-07'
def __init__(self):
self.parent = None
self.classifier_entry_name = None
self.classifier_action_entry_cfg = YList()
self.classifier_action_entry_cfg.parent = self
self.classifier_action_entry_cfg.name = 'classifier_action_entry_cfg'
self.classifier_entry_filter_oper = None
self.classifier_entry_inline = None
self.filter_entry = YList()
self.filter_entry.parent = self
self.filter_entry.name = 'filter_entry'
class FilterEntry(object):
"""
Filters configured inline in a policy
.. attribute:: filter_type <key>
This leaf defines type of the filter
**type**\: :py:class:`FilterTypeIdentity <ydk.models.ietf.ietf_diffserv_classifier.FilterTypeIdentity>`
.. attribute:: filter_logical_not <key>
This is logical\-not operator for a filter. When true, it indicates filter looks for absence of a pattern defined by the filter
**type**\: bool
.. attribute:: destination_ip_address_cfg
list of destination ip address
**type**\: list of :py:class:`DestinationIpAddressCfg <ydk.models.ietf.ietf_diffserv_policy.Policies.PolicyEntry.ClassifierEntry.FilterEntry.DestinationIpAddressCfg>`
.. attribute:: destination_port_cfg
list of ranges of destination port
**type**\: list of :py:class:`DestinationPortCfg <ydk.models.ietf.ietf_diffserv_policy.Policies.PolicyEntry.ClassifierEntry.FilterEntry.DestinationPortCfg>`
.. attribute:: dscp_cfg
list of dscp ranges
**type**\: list of :py:class:`DscpCfg <ydk.models.ietf.ietf_diffserv_policy.Policies.PolicyEntry.ClassifierEntry.FilterEntry.DscpCfg>`
.. attribute:: protocol_cfg
list of ranges of protocol values
**type**\: list of :py:class:`ProtocolCfg <ydk.models.ietf.ietf_diffserv_policy.Policies.PolicyEntry.ClassifierEntry.FilterEntry.ProtocolCfg>`
.. attribute:: source_ip_address_cfg
list of source ip address
**type**\: list of :py:class:`SourceIpAddressCfg <ydk.models.ietf.ietf_diffserv_policy.Policies.PolicyEntry.ClassifierEntry.FilterEntry.SourceIpAddressCfg>`
.. attribute:: source_port_cfg
list of ranges of source port
**type**\: list of :py:class:`SourcePortCfg <ydk.models.ietf.ietf_diffserv_policy.Policies.PolicyEntry.ClassifierEntry.FilterEntry.SourcePortCfg>`
"""
_prefix = 'policy'
_revision = '2015-04-07'
def __init__(self):
self.parent = None
self.filter_type = None
self.filter_logical_not = None
self.destination_ip_address_cfg = YList()
self.destination_ip_address_cfg.parent = self
self.destination_ip_address_cfg.name = 'destination_ip_address_cfg'
self.destination_port_cfg = YList()
self.destination_port_cfg.parent = self
self.destination_port_cfg.name = 'destination_port_cfg'
self.dscp_cfg = YList()
self.dscp_cfg.parent = self
self.dscp_cfg.name = 'dscp_cfg'
self.protocol_cfg = YList()
self.protocol_cfg.parent = self
self.protocol_cfg.name = 'protocol_cfg'
self.source_ip_address_cfg = YList()
self.source_ip_address_cfg.parent = self
self.source_ip_address_cfg.name = 'source_ip_address_cfg'
self.source_port_cfg = YList()
self.source_port_cfg.parent = self
self.source_port_cfg.name = 'source_port_cfg'
class DscpCfg(object):
"""
list of dscp ranges
.. attribute:: dscp_min <key>
Minimum value of dscp range
**type**\: int
**range:** 0..63
.. attribute:: dscp_max <key>
maximum value of dscp range
**type**\: int
**range:** 0..63
"""
_prefix = 'policy'
_revision = '2015-04-07'
def __init__(self):
self.parent = None
self.dscp_min = None
self.dscp_max = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.dscp_min is None:
raise YPYModelError('Key property dscp_min is None')
if self.dscp_max is None:
raise YPYModelError('Key property dscp_max is None')
return self.parent._common_path +'/ietf-diffserv-policy:dscp-cfg[ietf-diffserv-policy:dscp-min = ' + str(self.dscp_min) + '][ietf-diffserv-policy:dscp-max = ' + str(self.dscp_max) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.dscp_min is not None:
return True
if self.dscp_max is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_policy as meta
return meta._meta_table['Policies.PolicyEntry.ClassifierEntry.FilterEntry.DscpCfg']['meta_info']
class SourceIpAddressCfg(object):
"""
list of source ip address
.. attribute:: source_ip_addr <key>
source ip prefix
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(/(([0\-9])\|([0\-9]{2})\|(1[0\-1][0\-9])\|(12[0\-8])))
----
"""
_prefix = 'policy'
_revision = '2015-04-07'
def __init__(self):
self.parent = None
self.source_ip_addr = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.source_ip_addr is None:
raise YPYModelError('Key property source_ip_addr is None')
return self.parent._common_path +'/ietf-diffserv-policy:source-ip-address-cfg[ietf-diffserv-policy:source-ip-addr = ' + str(self.source_ip_addr) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.source_ip_addr is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_policy as meta
return meta._meta_table['Policies.PolicyEntry.ClassifierEntry.FilterEntry.SourceIpAddressCfg']['meta_info']
class DestinationIpAddressCfg(object):
"""
list of destination ip address
.. attribute:: destination_ip_addr <key>
destination ip prefix
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(/(([0\-9])\|([0\-9]{2})\|(1[0\-1][0\-9])\|(12[0\-8])))
----
"""
_prefix = 'policy'
_revision = '2015-04-07'
def __init__(self):
self.parent = None
self.destination_ip_addr = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.destination_ip_addr is None:
raise YPYModelError('Key property destination_ip_addr is None')
return self.parent._common_path +'/ietf-diffserv-policy:destination-ip-address-cfg[ietf-diffserv-policy:destination-ip-addr = ' + str(self.destination_ip_addr) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.destination_ip_addr is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_policy as meta
return meta._meta_table['Policies.PolicyEntry.ClassifierEntry.FilterEntry.DestinationIpAddressCfg']['meta_info']
class SourcePortCfg(object):
"""
list of ranges of source port
.. attribute:: source_port_min <key>
minimum value of source port range
**type**\: int
**range:** 0..65535
.. attribute:: source_port_max <key>
maximum value of source port range
**type**\: int
**range:** 0..65535
"""
_prefix = 'policy'
_revision = '2015-04-07'
def __init__(self):
self.parent = None
self.source_port_min = None
self.source_port_max = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.source_port_min is None:
raise YPYModelError('Key property source_port_min is None')
if self.source_port_max is None:
raise YPYModelError('Key property source_port_max is None')
return self.parent._common_path +'/ietf-diffserv-policy:source-port-cfg[ietf-diffserv-policy:source-port-min = ' + str(self.source_port_min) + '][ietf-diffserv-policy:source-port-max = ' + str(self.source_port_max) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.source_port_min is not None:
return True
if self.source_port_max is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_policy as meta
return meta._meta_table['Policies.PolicyEntry.ClassifierEntry.FilterEntry.SourcePortCfg']['meta_info']
class DestinationPortCfg(object):
"""
list of ranges of destination port
.. attribute:: destination_port_min <key>
minimum value of destination port range
**type**\: int
**range:** 0..65535
.. attribute:: destination_port_max <key>
maximum value of destination port range
**type**\: int
**range:** 0..65535
"""
_prefix = 'policy'
_revision = '2015-04-07'
def __init__(self):
self.parent = None
self.destination_port_min = None
self.destination_port_max = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.destination_port_min is None:
raise YPYModelError('Key property destination_port_min is None')
if self.destination_port_max is None:
raise YPYModelError('Key property destination_port_max is None')
return self.parent._common_path +'/ietf-diffserv-policy:destination-port-cfg[ietf-diffserv-policy:destination-port-min = ' + str(self.destination_port_min) + '][ietf-diffserv-policy:destination-port-max = ' + str(self.destination_port_max) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.destination_port_min is not None:
return True
if self.destination_port_max is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_policy as meta
return meta._meta_table['Policies.PolicyEntry.ClassifierEntry.FilterEntry.DestinationPortCfg']['meta_info']
class ProtocolCfg(object):
"""
list of ranges of protocol values
.. attribute:: protocol_min <key>
minimum value of protocol range
**type**\: int
**range:** 0..255
.. attribute:: protocol_max <key>
maximum value of protocol range
**type**\: int
**range:** 0..255
"""
_prefix = 'policy'
_revision = '2015-04-07'
def __init__(self):
self.parent = None
self.protocol_min = None
self.protocol_max = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.protocol_min is None:
raise YPYModelError('Key property protocol_min is None')
if self.protocol_max is None:
raise YPYModelError('Key property protocol_max is None')
return self.parent._common_path +'/ietf-diffserv-policy:protocol-cfg[ietf-diffserv-policy:protocol-min = ' + str(self.protocol_min) + '][ietf-diffserv-policy:protocol-max = ' + str(self.protocol_max) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.protocol_min is not None:
return True
if self.protocol_max is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_policy as meta
return meta._meta_table['Policies.PolicyEntry.ClassifierEntry.FilterEntry.ProtocolCfg']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.filter_type is None:
raise YPYModelError('Key property filter_type is None')
if self.filter_logical_not is None:
raise YPYModelError('Key property filter_logical_not is None')
return self.parent._common_path +'/ietf-diffserv-policy:filter-entry[ietf-diffserv-policy:filter-type = ' + str(self.filter_type) + '][ietf-diffserv-policy:filter-logical-not = ' + str(self.filter_logical_not) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.filter_type is not None:
return True
if self.filter_logical_not is not None:
return True
if self.destination_ip_address_cfg is not None:
for child_ref in self.destination_ip_address_cfg:
if child_ref._has_data():
return True
if self.destination_port_cfg is not None:
for child_ref in self.destination_port_cfg:
if child_ref._has_data():
return True
if self.dscp_cfg is not None:
for child_ref in self.dscp_cfg:
if child_ref._has_data():
return True
if self.protocol_cfg is not None:
for child_ref in self.protocol_cfg:
if child_ref._has_data():
return True
if self.source_ip_address_cfg is not None:
for child_ref in self.source_ip_address_cfg:
if child_ref._has_data():
return True
if self.source_port_cfg is not None:
for child_ref in self.source_port_cfg:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_policy as meta
return meta._meta_table['Policies.PolicyEntry.ClassifierEntry.FilterEntry']['meta_info']
class ClassifierActionEntryCfg(object):
"""
Configuration of classifier & associated actions
.. attribute:: action_type <key>
This defines action type
**type**\: :py:class:`ActionTypeIdentity <ydk.models.ietf.ietf_diffserv_policy.ActionTypeIdentity>`
"""
_prefix = 'policy'
_revision = '2015-04-07'
def __init__(self):
self.parent = None
self.action_type = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.action_type is None:
raise YPYModelError('Key property action_type is None')
return self.parent._common_path +'/ietf-diffserv-policy:classifier-action-entry-cfg[ietf-diffserv-policy:action-type = ' + str(self.action_type) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.action_type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_policy as meta
return meta._meta_table['Policies.PolicyEntry.ClassifierEntry.ClassifierActionEntryCfg']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.classifier_entry_name is None:
raise YPYModelError('Key property classifier_entry_name is None')
return self.parent._common_path +'/ietf-diffserv-policy:classifier-entry[ietf-diffserv-policy:classifier-entry-name = ' + str(self.classifier_entry_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.classifier_entry_name is not None:
return True
if self.classifier_action_entry_cfg is not None:
for child_ref in self.classifier_action_entry_cfg:
if child_ref._has_data():
return True
if self.classifier_entry_filter_oper is not None:
return True
if self.classifier_entry_inline is not None:
return True
if self.filter_entry is not None:
for child_ref in self.filter_entry:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_policy as meta
return meta._meta_table['Policies.PolicyEntry.ClassifierEntry']['meta_info']
@property
def _common_path(self):
if self.policy_name is None:
raise YPYModelError('Key property policy_name is None')
return '/ietf-diffserv-policy:policies/ietf-diffserv-policy:policy-entry[ietf-diffserv-policy:policy-name = ' + str(self.policy_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.policy_name is not None:
return True
if self.classifier_entry is not None:
for child_ref in self.classifier_entry:
if child_ref._has_data():
return True
if self.policy_descr is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_policy as meta
return meta._meta_table['Policies.PolicyEntry']['meta_info']
@property
def _common_path(self):
return '/ietf-diffserv-policy:policies'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.policy_entry is not None:
for child_ref in self.policy_entry:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_policy as meta
return meta._meta_table['Policies']['meta_info']
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 vArmour Networks Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Gary Duan, vArmour Networks Inc.
#
import eventlet
import netaddr
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent import l3_agent
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.common import constants as l3_constants
from neutron.common import legacy
from neutron.common import topics
from neutron.openstack.common import log as logging
from neutron.openstack.common import service
from neutron import service as neutron_service
from neutron.services.firewall.agents.l3reference import firewall_l3_agent
from neutron.services.firewall.agents.varmour import varmour_api
from neutron.services.firewall.agents.varmour import varmour_utils as va_utils
LOG = logging.getLogger(__name__)
class vArmourL3NATAgent(l3_agent.L3NATAgent,
firewall_l3_agent.FWaaSL3AgentRpcCallback):
def __init__(self, host, conf=None):
LOG.debug(_('vArmourL3NATAgent: __init__'))
self.rest = varmour_api.vArmourRestAPI()
super(vArmourL3NATAgent, self).__init__(host, conf)
def _destroy_router_namespaces(self, only_router_id=None):
return
def _destroy_router_namespace(self, namespace):
return
def _create_router_namespace(self, ri):
return
def _router_added(self, router_id, router):
LOG.debug(_("_router_added: %s"), router_id)
ri = l3_agent.RouterInfo(router_id, self.root_helper,
self.conf.use_namespaces, router)
self.router_info[router_id] = ri
super(vArmourL3NATAgent, self).process_router_add(ri)
def _router_removed(self, router_id):
LOG.debug(_("_router_removed: %s"), router_id)
ri = self.router_info[router_id]
if ri:
ri.router['gw_port'] = None
ri.router[l3_constants.INTERFACE_KEY] = []
ri.router[l3_constants.FLOATINGIP_KEY] = []
self.process_router(ri)
name = va_utils.get_snat_rule_name(ri)
self.rest.del_cfg_objs(va_utils.REST_URL_CONF_NAT_RULE, name)
name = va_utils.get_dnat_rule_name(ri)
self.rest.del_cfg_objs(va_utils.REST_URL_CONF_NAT_RULE, name)
name = va_utils.get_trusted_zone_name(ri)
self._va_unset_zone_interfaces(name, True)
name = va_utils.get_untrusted_zone_name(ri)
self._va_unset_zone_interfaces(name, True)
del self.router_info[router_id]
def _spawn_metadata_proxy(self, router_info):
return
def _destroy_metadata_proxy(self, router_info):
return
def _set_subnet_info(self, port):
ips = port['fixed_ips']
if not ips:
raise Exception(_("Router port %s has no IP address") % port['id'])
return
if len(ips) > 1:
LOG.warn(_("Ignoring multiple IPs on router port %s"), port['id'])
prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen
port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen)
def _va_unset_zone_interfaces(self, zone_name, remove_zone=False):
# return True if zone exists; otherwise, return False
LOG.debug(_("_va_unset_zone_interfaces: %s"), zone_name)
resp = self.rest.rest_api('GET', va_utils.REST_URL_CONF_ZONE)
if resp and resp['status'] == 200:
zlist = resp['body']['response']
for zn in zlist:
if zn == zone_name:
commit = False
if 'interface' in zlist[zn]:
for intf in zlist[zn]['interface']:
self.rest.rest_api('DELETE',
va_utils.REST_URL_CONF +
va_utils.REST_ZONE_NAME % zn +
va_utils.REST_INTF_NAME % intf)
commit = True
if remove_zone:
self.rest.rest_api('DELETE',
va_utils.REST_URL_CONF +
va_utils.REST_ZONE_NAME % zn)
commit = True
if commit:
self.rest.commit()
return True
return False
def _va_pif_2_lif(self, pif):
return pif + '.0'
def _va_set_interface_ip(self, pif, cidr):
LOG.debug(_("_va_set_interface_ip: %(pif)s %(cidr)s"),
{'pif': pif, 'cidr': cidr})
lif = self._va_pif_2_lif(pif)
obj = va_utils.REST_INTF_NAME % pif + va_utils.REST_LOGIC_NAME % lif
body = {
'name': lif,
'family': 'ipv4',
'address': cidr
}
self.rest.rest_api('PUT', va_utils.REST_URL_CONF + obj, body)
def _va_get_port_name(self, port_list, name):
if name:
for p in port_list:
if p['VM name'] == name:
return p['name']
def _va_config_trusted_zone(self, ri, plist):
zone = va_utils.get_trusted_zone_name(ri)
LOG.debug(_("_va_config_trusted_zone: %s"), zone)
body = {
'name': zone,
'type': 'L3',
'interface': []
}
if not self._va_unset_zone_interfaces(zone):
# if zone doesn't exist, create it
self.rest.rest_api('POST', va_utils.REST_URL_CONF_ZONE, body)
self.rest.commit()
# add new internal ports to trusted zone
for p in ri.internal_ports:
if p['admin_state_up']:
dev = self.get_internal_device_name(p['id'])
pif = self._va_get_port_name(plist, dev)
if pif:
lif = self._va_pif_2_lif(pif)
if lif not in body['interface']:
body['interface'].append(lif)
self._va_set_interface_ip(pif, p['ip_cidr'])
if body['interface']:
self.rest.rest_api('PUT', va_utils.REST_URL_CONF_ZONE, body)
self.rest.commit()
def _va_config_untrusted_zone(self, ri, plist):
zone = va_utils.get_untrusted_zone_name(ri)
LOG.debug(_("_va_config_untrusted_zone: %s"), zone)
body = {
'name': zone,
'type': 'L3',
'interface': []
}
if not self._va_unset_zone_interfaces(zone):
# if zone doesn't exist, create it
self.rest.rest_api('POST', va_utils.REST_URL_CONF_ZONE, body)
self.rest.commit()
# add new gateway ports to untrusted zone
if ri.ex_gw_port:
LOG.debug(_("_va_config_untrusted_zone: gw=%r"), ri.ex_gw_port)
dev = self.get_external_device_name(ri.ex_gw_port['id'])
pif = self._va_get_port_name(plist, dev)
if pif:
lif = self._va_pif_2_lif(pif)
self._va_set_interface_ip(pif, ri.ex_gw_port['ip_cidr'])
body['interface'].append(lif)
self.rest.rest_api('PUT', va_utils.REST_URL_CONF_ZONE, body)
self.rest.commit()
def _va_config_router_snat_rules(self, ri, plist):
LOG.debug(_('_va_config_router_snat_rules: %s'), ri.router['id'])
prefix = va_utils.get_snat_rule_name(ri)
self.rest.del_cfg_objs(va_utils.REST_URL_CONF_NAT_RULE, prefix)
if not ri.enable_snat:
return
for idx, p in enumerate(ri.internal_ports):
if p['admin_state_up']:
dev = self.get_internal_device_name(p['id'])
pif = self._va_get_port_name(plist, dev)
if pif:
net = netaddr.IPNetwork(p['ip_cidr'])
body = {
'name': '%s_%d' % (prefix, idx),
'ingress-context-type': 'interface',
'ingress-index': self._va_pif_2_lif(pif),
'source-address': [
[str(netaddr.IPAddress(net.first + 2)),
str(netaddr.IPAddress(net.last - 1))]
],
'flag': 'interface translate-source'
}
self.rest.rest_api('POST',
va_utils.REST_URL_CONF_NAT_RULE,
body)
if ri.internal_ports:
self.rest.commit()
def _va_config_floating_ips(self, ri):
LOG.debug(_('_va_config_floating_ips: %s'), ri.router['id'])
prefix = va_utils.get_dnat_rule_name(ri)
self.rest.del_cfg_objs(va_utils.REST_URL_CONF_NAT_RULE, prefix)
# add new dnat rules
for idx, fip in enumerate(ri.floating_ips):
body = {
'name': '%s_%d' % (prefix, idx),
'ingress-context-type': 'zone',
'ingress-index': va_utils.get_untrusted_zone_name(ri),
'destination-address': [[fip['floating_ip_address'],
fip['floating_ip_address']]],
'static': [fip['fixed_ip_address'], fip['fixed_ip_address']],
'flag': 'translate-destination'
}
self.rest.rest_api('POST', va_utils.REST_URL_CONF_NAT_RULE, body)
if ri.floating_ips:
self.rest.commit()
def process_router(self, ri):
LOG.debug(_("process_router: %s"), ri.router['id'])
super(vArmourL3NATAgent, self).process_router(ri)
self.rest.auth()
# read internal port name and configuration port name map
resp = self.rest.rest_api('GET', va_utils.REST_URL_INTF_MAP)
if resp and resp['status'] == 200:
try:
plist = resp['body']['response']
except ValueError:
LOG.warn(_("Unable to parse interface mapping."))
return
else:
LOG.warn(_("Unable to read interface mapping."))
return
if ri.ex_gw_port:
self._set_subnet_info(ri.ex_gw_port)
self._va_config_trusted_zone(ri, plist)
self._va_config_untrusted_zone(ri, plist)
self._va_config_router_snat_rules(ri, plist)
self._va_config_floating_ips(ri)
def _handle_router_snat_rules(self, ri, ex_gw_port, internal_cidrs,
interface_name, action):
return
def _send_gratuitous_arp_packet(self, ri, interface_name, ip_address):
return
def external_gateway_added(self, ri, ex_gw_port,
interface_name, internal_cidrs):
LOG.debug(_("external_gateway_added: %s"), ri.router['id'])
if not ip_lib.device_exists(interface_name,
root_helper=self.root_helper,
namespace=ri.ns_name()):
self.driver.plug(ex_gw_port['network_id'],
ex_gw_port['id'], interface_name,
ex_gw_port['mac_address'],
bridge=self.conf.external_network_bridge,
namespace=ri.ns_name(),
prefix=l3_agent.EXTERNAL_DEV_PREFIX)
self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']],
namespace=ri.ns_name())
def _update_routing_table(self, ri, operation, route):
return
class vArmourL3NATAgentWithStateReport(vArmourL3NATAgent,
l3_agent.L3NATAgentWithStateReport):
pass
def main():
eventlet.monkey_patch()
conf = cfg.CONF
conf.register_opts(vArmourL3NATAgent.OPTS)
config.register_agent_state_opts_helper(conf)
config.register_root_helper(conf)
conf.register_opts(interface.OPTS)
conf.register_opts(external_process.OPTS)
conf(project='neutron')
config.setup_logging(conf)
legacy.modernize_quantum_config(conf)
server = neutron_service.Service.create(
binary='neutron-l3-agent',
topic=topics.L3_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager='neutron.services.firewall.agents.varmour.varmour_router.'
'vArmourL3NATAgentWithStateReport')
service.launch(server).wait()
| |
import os
import json
import pytest
from datetime import datetime, timedelta
from unittest.mock import patch, MagicMock
from awsume.awsumepy.lib import cache, constants
@patch('os.chmod')
@patch('os.makedirs')
@patch('os.path.exists')
def test_ensure_cache_dir(exists: MagicMock, makedirs: MagicMock, chmod: MagicMock):
exists.return_value = False
cache.ensure_cache_dir()
makedirs.assert_called()
@patch('os.chmod')
@patch('os.makedirs')
@patch('os.path.exists')
def test_ensure_cache_dir_already_exists(exists: MagicMock, makedirs: MagicMock, chmod: MagicMock):
exists.return_value = True
cache.ensure_cache_dir()
makedirs.assert_not_called()
@patch('builtins.open')
@patch('json.load')
@patch('os.path.isfile')
@patch.object(cache, 'ensure_cache_dir')
def test_read_aws_cache(ensure_cache_dir: MagicMock, is_file: MagicMock, json_load: MagicMock, open: MagicMock):
is_file.return_value = True
json_load.return_value = {
'AccessKeyId': 'AKIA...',
'SecretAccessKey': 'SECRET',
'SessionToken': 'LONGSECRET',
'Expiration': '2065-10-24 12:24:36',
}
result = cache.read_aws_cache('cache-file')
ensure_cache_dir.assert_called()
open.assert_called()
json_load.assert_called()
assert type(result.get('Expiration')) is datetime
@patch('builtins.open')
@patch('json.load')
@patch('os.path.isfile')
@patch.object(cache, 'ensure_cache_dir')
def test_read_aws_cache_no_expiration(ensure_cache_dir: MagicMock, is_file: MagicMock, json_load: MagicMock, open: MagicMock):
is_file.return_value = True
json_load.return_value = {
'AccessKeyId': 'AKIA...',
'SecretAccessKey': 'SECRET',
'SessionToken': 'LONGSECRET',
}
result = cache.read_aws_cache('cache-file')
ensure_cache_dir.assert_called()
open.assert_called()
json_load.assert_called()
assert result == json_load.return_value
@patch('builtins.open')
@patch('json.load')
@patch('os.path.isfile')
@patch.object(cache, 'ensure_cache_dir')
def test_read_aws_cache_no_file(ensure_cache_dir: MagicMock, is_file: MagicMock, json_load: MagicMock, open: MagicMock):
is_file.return_value = False
result = cache.read_aws_cache('cache-file')
ensure_cache_dir.assert_called()
assert result == {}
@patch('builtins.open')
@patch('json.load')
@patch('os.path.isfile')
@patch.object(cache, 'ensure_cache_dir')
def test_read_aws_cache_catch_exception(ensure_cache_dir: MagicMock, is_file: MagicMock, json_load: MagicMock, open: MagicMock):
is_file.return_value = True
json_load.side_effect = Exception('Some exception')
cache.read_aws_cache('cache-file')
@patch('builtins.open')
@patch('json.dump')
@patch('os.chmod')
@patch('os.path.isfile')
@patch.object(cache, 'ensure_cache_dir')
def test_write_aws_cache(ensure_cache_dir: MagicMock, is_file: MagicMock, chmod: MagicMock, json_dump: MagicMock, open: MagicMock):
is_file.return_value = True
session = {
'AccessKeyId': 'AKIA...',
'SecretAccessKey': 'SECRET',
'SessionToken': 'LONGSECRET',
'Expiration': datetime.now(),
}
cache.write_aws_cache('cache-file', session)
ensure_cache_dir.assert_called()
open.assert_called()
chmod.assert_called_with(str(constants.AWSUME_CACHE_DIR) + '/cache-file', 0o600)
json_dump.assert_called()
written_session = json_dump.call_args[0][0]
assert type(written_session.get('Expiration')) is str
@patch('builtins.open')
@patch('json.dump')
@patch('os.chmod')
@patch('os.path.isfile')
@patch.object(cache, 'ensure_cache_dir')
def test_write_aws_cache_catch_exception(ensure_cache_dir: MagicMock, is_file: MagicMock, chmod: MagicMock, json_dump: MagicMock, open: MagicMock):
is_file.return_value = True
json_dump.side_effect = Exception('Some Exception')
session = {
'AccessKeyId': 'AKIA...',
'SecretAccessKey': 'SECRET',
'SessionToken': 'LONGSECRET',
'Expiration': datetime.now(),
}
cache.write_aws_cache('cache-file', session)
chmod.assert_called_with(str(constants.AWSUME_CACHE_DIR) + '/cache-file', 0o600)
def test_valid_cache_session():
result = cache.valid_cache_session({
'AccessKeyId': 'AKIA...',
'SecretAccessKey': 'SECRET',
'SessionToken': 'LONGSECRET',
'Expiration': datetime.now() + timedelta(hours=1),
})
assert result is True
def test_valid_cache_session_expired():
result = cache.valid_cache_session({
'AccessKeyId': 'AKIA...',
'SecretAccessKey': 'SECRET',
'SessionToken': 'LONGSECRET',
'Expiration': datetime.now() - timedelta(hours=1),
})
assert result is False
def test_valid_cache_session_no_access_key_id():
result = cache.valid_cache_session({
'SecretAccessKey': 'SECRET',
'SessionToken': 'LONGSECRET',
'Expiration': datetime.now() + timedelta(hours=1),
})
assert result is False
def test_valid_cache_session_no_secret_access_key():
result = cache.valid_cache_session({
'AccessKeyId': 'AKIA...',
'SessionToken': 'LONGSECRET',
'Expiration': datetime.now() + timedelta(hours=1),
})
assert result is False
def test_valid_cache_session_no_session_token():
result = cache.valid_cache_session({
'AccessKeyId': 'AKIA...',
'SecretAccessKey': 'SECRET',
'Expiration': datetime.now() + timedelta(hours=1),
})
assert result is False
def test_valid_cache_session_no_expiration():
result = cache.valid_cache_session({
'AccessKeyId': 'AKIA...',
'SecretAccessKey': 'SECRET',
'SessionToken': 'LONGSECRET',
})
assert result is True
def test_valid_cache_session_datetime_str():
result = cache.valid_cache_session({
'AccessKeyId': 'AKIA...',
'SecretAccessKey': 'SECRET',
'SessionToken': 'LONGSECRET',
'Expiration': '2065-10-24 12:24:36',
})
assert result is True
| |
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Cells RPCAPI
"""
from oslo_config import cfg
import six
from nova.cells import rpcapi as cells_rpcapi
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
CONF = cfg.CONF
CONF.import_opt('topic', 'nova.cells.opts', group='cells')
class CellsAPITestCase(test.NoDBTestCase):
"""Test case for cells.api interfaces."""
def setUp(self):
super(CellsAPITestCase, self).setUp()
self.fake_topic = 'fake_topic'
self.fake_context = 'fake_context'
self.flags(topic=self.fake_topic, enable=True, group='cells')
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _stub_rpc_method(self, rpc_method, result):
call_info = {}
orig_prepare = self.cells_rpcapi.client.prepare
def fake_rpc_prepare(**kwargs):
if 'version' in kwargs:
call_info['version'] = kwargs.pop('version')
return self.cells_rpcapi.client
def fake_csv(version):
return orig_prepare(version).can_send_version()
def fake_rpc_method(ctxt, method, **kwargs):
call_info['context'] = ctxt
call_info['method'] = method
call_info['args'] = kwargs
return result
self.stubs.Set(self.cells_rpcapi.client, 'prepare', fake_rpc_prepare)
self.stubs.Set(self.cells_rpcapi.client, 'can_send_version', fake_csv)
self.stubs.Set(self.cells_rpcapi.client, rpc_method, fake_rpc_method)
return call_info
def _check_result(self, call_info, method, args, version=None):
self.assertEqual(self.fake_topic,
self.cells_rpcapi.client.target.topic)
self.assertEqual(self.fake_context, call_info['context'])
self.assertEqual(method, call_info['method'])
self.assertEqual(args, call_info['args'])
if version is not None:
self.assertIn('version', call_info)
self.assertIsInstance(call_info['version'], six.string_types,
msg="Message version %s is not a string" %
call_info['version'])
self.assertEqual(version, call_info['version'])
else:
self.assertNotIn('version', call_info)
def test_cast_compute_api_method(self):
fake_cell_name = 'fake_cell_name'
fake_method = 'fake_method'
fake_method_args = (1, 2)
fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20}
expected_method_info = {'method': fake_method,
'method_args': fake_method_args,
'method_kwargs': fake_method_kwargs}
expected_args = {'method_info': expected_method_info,
'cell_name': fake_cell_name,
'call': False}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.cast_compute_api_method(self.fake_context,
fake_cell_name, fake_method,
*fake_method_args, **fake_method_kwargs)
self._check_result(call_info, 'run_compute_api_method',
expected_args)
def test_call_compute_api_method(self):
fake_cell_name = 'fake_cell_name'
fake_method = 'fake_method'
fake_method_args = (1, 2)
fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20}
fake_response = 'fake_response'
expected_method_info = {'method': fake_method,
'method_args': fake_method_args,
'method_kwargs': fake_method_kwargs}
expected_args = {'method_info': expected_method_info,
'cell_name': fake_cell_name,
'call': True}
call_info = self._stub_rpc_method('call', fake_response)
result = self.cells_rpcapi.call_compute_api_method(self.fake_context,
fake_cell_name, fake_method,
*fake_method_args, **fake_method_kwargs)
self._check_result(call_info, 'run_compute_api_method',
expected_args)
self.assertEqual(fake_response, result)
def test_build_instances(self):
call_info = self._stub_rpc_method('cast', None)
instances = [objects.Instance(id=1),
objects.Instance(id=2)]
self.cells_rpcapi.build_instances(
self.fake_context, instances=instances,
image={'fake': 'image'}, arg1=1, arg2=2, arg3=3)
expected_args = {'build_inst_kwargs': {'instances': instances,
'image': {'fake': 'image'},
'arg1': 1,
'arg2': 2,
'arg3': 3}}
self._check_result(call_info, 'build_instances',
expected_args, version='1.34')
def test_get_capacities(self):
capacity_info = {"capacity": "info"}
call_info = self._stub_rpc_method('call',
result=capacity_info)
result = self.cells_rpcapi.get_capacities(self.fake_context,
cell_name="name")
self._check_result(call_info, 'get_capacities',
{'cell_name': 'name'}, version='1.9')
self.assertEqual(capacity_info, result)
def test_instance_update_at_top(self):
fake_info_cache = objects.InstanceInfoCache(instance_uuid='fake-uuid')
fake_sys_metadata = {'key1': 'value1',
'key2': 'value2'}
fake_attrs = {'id': 2,
'cell_name': 'fake',
'metadata': {'fake': 'fake'},
'info_cache': fake_info_cache,
'system_metadata': fake_sys_metadata}
fake_instance = objects.Instance(**fake_attrs)
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_update_at_top(
self.fake_context, fake_instance)
expected_args = {'instance': fake_instance}
self._check_result(call_info, 'instance_update_at_top',
expected_args, version='1.35')
def test_instance_destroy_at_top(self):
fake_instance = objects.Instance(uuid='fake-uuid')
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_destroy_at_top(
self.fake_context, fake_instance)
expected_args = {'instance': fake_instance}
self._check_result(call_info, 'instance_destroy_at_top',
expected_args, version='1.35')
def test_instance_delete_everywhere(self):
instance = fake_instance.fake_instance_obj(self.fake_context)
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_delete_everywhere(
self.fake_context, instance,
'fake-type')
expected_args = {'instance': instance,
'delete_type': 'fake-type'}
self._check_result(call_info, 'instance_delete_everywhere',
expected_args, version='1.27')
def test_instance_fault_create_at_top(self):
fake_instance_fault = {'id': 2,
'other': 'meow'}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_fault_create_at_top(
self.fake_context, fake_instance_fault)
expected_args = {'instance_fault': fake_instance_fault}
self._check_result(call_info, 'instance_fault_create_at_top',
expected_args)
def test_bw_usage_update_at_top(self):
update_args = ('fake_uuid', 'fake_mac', 'fake_start_period',
'fake_bw_in', 'fake_bw_out', 'fake_ctr_in',
'fake_ctr_out')
update_kwargs = {'last_refreshed': 'fake_refreshed'}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.bw_usage_update_at_top(
self.fake_context, *update_args, **update_kwargs)
bw_update_info = {'uuid': 'fake_uuid',
'mac': 'fake_mac',
'start_period': 'fake_start_period',
'bw_in': 'fake_bw_in',
'bw_out': 'fake_bw_out',
'last_ctr_in': 'fake_ctr_in',
'last_ctr_out': 'fake_ctr_out',
'last_refreshed': 'fake_refreshed'}
expected_args = {'bw_update_info': bw_update_info}
self._check_result(call_info, 'bw_usage_update_at_top',
expected_args)
def test_get_cell_info_for_neighbors(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.get_cell_info_for_neighbors(
self.fake_context)
self._check_result(call_info, 'get_cell_info_for_neighbors', {},
version='1.1')
self.assertEqual('fake_response', result)
def test_sync_instances(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.sync_instances(self.fake_context,
project_id='fake_project', updated_since='fake_time',
deleted=True)
expected_args = {'project_id': 'fake_project',
'updated_since': 'fake_time',
'deleted': True}
self._check_result(call_info, 'sync_instances', expected_args,
version='1.1')
def test_service_get_all(self):
call_info = self._stub_rpc_method('call', 'fake_response')
fake_filters = {'key1': 'val1', 'key2': 'val2'}
result = self.cells_rpcapi.service_get_all(self.fake_context,
filters=fake_filters)
expected_args = {'filters': fake_filters}
self._check_result(call_info, 'service_get_all', expected_args,
version='1.2')
self.assertEqual('fake_response', result)
def test_service_get_by_compute_host(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.service_get_by_compute_host(
self.fake_context, host_name='fake-host-name')
expected_args = {'host_name': 'fake-host-name'}
self._check_result(call_info, 'service_get_by_compute_host',
expected_args,
version='1.2')
self.assertEqual('fake_response', result)
def test_get_host_uptime(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.get_host_uptime(
self.fake_context, host_name='fake-host-name')
expected_args = {'host_name': 'fake-host-name'}
self._check_result(call_info, 'get_host_uptime',
expected_args,
version='1.17')
self.assertEqual('fake_response', result)
def test_service_update(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.service_update(
self.fake_context, host_name='fake-host-name',
binary='nova-api', params_to_update={'disabled': True})
expected_args = {
'host_name': 'fake-host-name',
'binary': 'nova-api',
'params_to_update': {'disabled': True}}
self._check_result(call_info, 'service_update',
expected_args,
version='1.7')
self.assertEqual('fake_response', result)
def test_service_delete(self):
call_info = self._stub_rpc_method('call', None)
cell_service_id = 'cell@id'
result = self.cells_rpcapi.service_delete(
self.fake_context, cell_service_id=cell_service_id)
expected_args = {'cell_service_id': cell_service_id}
self._check_result(call_info, 'service_delete',
expected_args, version='1.26')
self.assertIsNone(result)
def test_proxy_rpc_to_manager(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.proxy_rpc_to_manager(
self.fake_context, rpc_message='fake-msg',
topic='fake-topic', call=True, timeout=-1)
expected_args = {'rpc_message': 'fake-msg',
'topic': 'fake-topic',
'call': True,
'timeout': -1}
self._check_result(call_info, 'proxy_rpc_to_manager',
expected_args,
version='1.2')
self.assertEqual('fake_response', result)
def test_task_log_get_all(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.task_log_get_all(self.fake_context,
task_name='fake_name',
period_beginning='fake_begin',
period_ending='fake_end',
host='fake_host',
state='fake_state')
expected_args = {'task_name': 'fake_name',
'period_beginning': 'fake_begin',
'period_ending': 'fake_end',
'host': 'fake_host',
'state': 'fake_state'}
self._check_result(call_info, 'task_log_get_all', expected_args,
version='1.3')
self.assertEqual('fake_response', result)
def test_compute_node_get_all(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.compute_node_get_all(self.fake_context,
hypervisor_match='fake-match')
expected_args = {'hypervisor_match': 'fake-match'}
self._check_result(call_info, 'compute_node_get_all', expected_args,
version='1.4')
self.assertEqual('fake_response', result)
def test_compute_node_stats(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.compute_node_stats(self.fake_context)
expected_args = {}
self._check_result(call_info, 'compute_node_stats',
expected_args, version='1.4')
self.assertEqual('fake_response', result)
def test_compute_node_get(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.compute_node_get(self.fake_context,
'fake_compute_id')
expected_args = {'compute_id': 'fake_compute_id'}
self._check_result(call_info, 'compute_node_get',
expected_args, version='1.4')
self.assertEqual('fake_response', result)
def test_actions_get(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'}
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.actions_get(self.fake_context,
fake_instance)
expected_args = {'cell_name': 'region!child',
'instance_uuid': fake_instance['uuid']}
self._check_result(call_info, 'actions_get', expected_args,
version='1.5')
self.assertEqual('fake_response', result)
def test_actions_get_no_cell(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': None}
self.assertRaises(exception.InstanceUnknownCell,
self.cells_rpcapi.actions_get, self.fake_context,
fake_instance)
def test_action_get_by_request_id(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'}
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.action_get_by_request_id(self.fake_context,
fake_instance,
'req-fake')
expected_args = {'cell_name': 'region!child',
'instance_uuid': fake_instance['uuid'],
'request_id': 'req-fake'}
self._check_result(call_info, 'action_get_by_request_id',
expected_args, version='1.5')
self.assertEqual('fake_response', result)
def test_action_get_by_request_id_no_cell(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': None}
self.assertRaises(exception.InstanceUnknownCell,
self.cells_rpcapi.action_get_by_request_id,
self.fake_context, fake_instance, 'req-fake')
def test_action_events_get(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'}
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.action_events_get(self.fake_context,
fake_instance,
'fake-action')
expected_args = {'cell_name': 'region!child',
'action_id': 'fake-action'}
self._check_result(call_info, 'action_events_get', expected_args,
version='1.5')
self.assertEqual('fake_response', result)
def test_action_events_get_no_cell(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': None}
self.assertRaises(exception.InstanceUnknownCell,
self.cells_rpcapi.action_events_get,
self.fake_context, fake_instance, 'fake-action')
def test_consoleauth_delete_tokens(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.consoleauth_delete_tokens(self.fake_context,
'fake-uuid')
expected_args = {'instance_uuid': 'fake-uuid'}
self._check_result(call_info, 'consoleauth_delete_tokens',
expected_args, version='1.6')
def test_validate_console_port(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.validate_console_port(self.fake_context,
'fake-uuid', 'fake-port', 'fake-type')
expected_args = {'instance_uuid': 'fake-uuid',
'console_port': 'fake-port',
'console_type': 'fake-type'}
self._check_result(call_info, 'validate_console_port',
expected_args, version='1.6')
self.assertEqual('fake_response', result)
def test_bdm_update_or_create_at_top(self):
fake_bdm = {'id': 2, 'other': 'meow'}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.bdm_update_or_create_at_top(
self.fake_context, fake_bdm, create='fake-create')
expected_args = {'bdm': fake_bdm, 'create': 'fake-create'}
self._check_result(call_info, 'bdm_update_or_create_at_top',
expected_args, version='1.28')
def test_bdm_destroy_at_top(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.bdm_destroy_at_top(self.fake_context,
'fake-uuid',
device_name='fake-device',
volume_id='fake-vol')
expected_args = {'instance_uuid': 'fake-uuid',
'device_name': 'fake-device',
'volume_id': 'fake-vol'}
self._check_result(call_info, 'bdm_destroy_at_top',
expected_args, version='1.10')
def test_get_migrations(self):
call_info = self._stub_rpc_method('call', None)
filters = {'cell_name': 'ChildCell', 'status': 'confirmed'}
self.cells_rpcapi.get_migrations(self.fake_context, filters)
expected_args = {'filters': filters}
self._check_result(call_info, 'get_migrations', expected_args,
version="1.11")
def test_instance_update_from_api(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_update_from_api(
self.fake_context, 'fake-instance',
expected_vm_state='exp_vm',
expected_task_state='exp_task',
admin_state_reset='admin_reset')
expected_args = {'instance': 'fake-instance',
'expected_vm_state': 'exp_vm',
'expected_task_state': 'exp_task',
'admin_state_reset': 'admin_reset'}
self._check_result(call_info, 'instance_update_from_api',
expected_args, version='1.16')
def test_start_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.start_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'start_instance',
expected_args, version='1.12')
def test_stop_instance_cast(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.stop_instance(
self.fake_context, 'fake-instance', do_cast=True,
clean_shutdown=True)
expected_args = {'instance': 'fake-instance',
'do_cast': True,
'clean_shutdown': True}
self._check_result(call_info, 'stop_instance',
expected_args, version='1.31')
def test_stop_instance_call(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.stop_instance(
self.fake_context, 'fake-instance', do_cast=False,
clean_shutdown=True)
expected_args = {'instance': 'fake-instance',
'do_cast': False,
'clean_shutdown': True}
self._check_result(call_info, 'stop_instance',
expected_args, version='1.31')
self.assertEqual('fake_response', result)
def test_cell_create(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.cell_create(self.fake_context, 'values')
expected_args = {'values': 'values'}
self._check_result(call_info, 'cell_create',
expected_args, version='1.13')
self.assertEqual('fake_response', result)
def test_cell_update(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.cell_update(self.fake_context,
'cell_name', 'values')
expected_args = {'cell_name': 'cell_name',
'values': 'values'}
self._check_result(call_info, 'cell_update',
expected_args, version='1.13')
self.assertEqual('fake_response', result)
def test_cell_delete(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.cell_delete(self.fake_context,
'cell_name')
expected_args = {'cell_name': 'cell_name'}
self._check_result(call_info, 'cell_delete',
expected_args, version='1.13')
self.assertEqual('fake_response', result)
def test_cell_get(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.cell_get(self.fake_context,
'cell_name')
expected_args = {'cell_name': 'cell_name'}
self._check_result(call_info, 'cell_get',
expected_args, version='1.13')
self.assertEqual('fake_response', result)
def test_reboot_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.reboot_instance(
self.fake_context, 'fake-instance',
block_device_info='ignored', reboot_type='HARD')
expected_args = {'instance': 'fake-instance',
'reboot_type': 'HARD'}
self._check_result(call_info, 'reboot_instance',
expected_args, version='1.14')
def test_pause_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.pause_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'pause_instance',
expected_args, version='1.19')
def test_unpause_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.unpause_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'unpause_instance',
expected_args, version='1.19')
def test_suspend_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.suspend_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'suspend_instance',
expected_args, version='1.15')
def test_resume_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.resume_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'resume_instance',
expected_args, version='1.15')
def test_terminate_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.terminate_instance(self.fake_context,
'fake-instance', [],
delete_type='delete')
expected_args = {'instance': 'fake-instance',
'delete_type': 'delete'}
self._check_result(call_info, 'terminate_instance',
expected_args, version='1.36')
def test_soft_delete_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.soft_delete_instance(self.fake_context,
'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'soft_delete_instance',
expected_args, version='1.18')
def test_resize_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.resize_instance(self.fake_context,
'fake-instance',
dict(cow='moo'),
'fake-hint',
'fake-flavor',
'fake-reservations',
clean_shutdown=True)
expected_args = {'instance': 'fake-instance',
'flavor': 'fake-flavor',
'extra_instance_updates': dict(cow='moo'),
'clean_shutdown': True}
self._check_result(call_info, 'resize_instance',
expected_args, version='1.33')
def test_live_migrate_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.live_migrate_instance(self.fake_context,
'fake-instance',
'fake-host',
'fake-block',
'fake-commit')
expected_args = {'instance': 'fake-instance',
'block_migration': 'fake-block',
'disk_over_commit': 'fake-commit',
'host_name': 'fake-host'}
self._check_result(call_info, 'live_migrate_instance',
expected_args, version='1.20')
def test_revert_resize(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.revert_resize(self.fake_context,
'fake-instance',
'fake-migration',
'fake-dest',
'resvs')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'revert_resize',
expected_args, version='1.21')
def test_confirm_resize(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.confirm_resize(self.fake_context,
'fake-instance',
'fake-migration',
'fake-source',
'resvs')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'confirm_resize',
expected_args, version='1.21')
def test_reset_network(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.reset_network(self.fake_context,
'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'reset_network',
expected_args, version='1.22')
def test_inject_network_info(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.inject_network_info(self.fake_context,
'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'inject_network_info',
expected_args, version='1.23')
def test_snapshot_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.snapshot_instance(self.fake_context,
'fake-instance',
'image-id')
expected_args = {'instance': 'fake-instance',
'image_id': 'image-id'}
self._check_result(call_info, 'snapshot_instance',
expected_args, version='1.24')
def test_backup_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.backup_instance(self.fake_context,
'fake-instance',
'image-id',
'backup-type',
'rotation')
expected_args = {'instance': 'fake-instance',
'image_id': 'image-id',
'backup_type': 'backup-type',
'rotation': 'rotation'}
self._check_result(call_info, 'backup_instance',
expected_args, version='1.24')
def test_set_admin_password(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.set_admin_password(self.fake_context,
'fake-instance', 'fake-password')
expected_args = {'instance': 'fake-instance',
'new_pass': 'fake-password'}
self._check_result(call_info, 'set_admin_password',
expected_args, version='1.29')
| |
from functools import partial
from io import BytesIO
from os import path
from django.core.exceptions import ImproperlyConfigured
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import models
from django.db.models import signals
from django.forms import Textarea, ValidationError
from django.template.defaultfilters import filesizeformat
from django.utils.encoding import smart_str
from django.utils.safestring import SafeData, mark_safe
from django.utils.translation import gettext_lazy as _
from machina.conf import settings as machina_settings
_rendered_field_name = lambda name: '_{}_rendered'.format(name)
def _get_markup_widget():
dotted_path = machina_settings.MARKUP_WIDGET
try:
assert dotted_path is not None
module, widget = dotted_path.rsplit('.', 1)
module, widget = smart_str(module), smart_str(widget)
widget = getattr(__import__(module, {}, {}, [widget]), widget)
return widget
except ImportError as e:
raise ImproperlyConfigured(
_('Could not import MACHINA_MARKUP_WIDGET {}: {}').format(
machina_settings.MARKUP_WIDGET, e
)
)
except AssertionError:
return Textarea
MarkupTextFieldWidget = _get_markup_widget()
def _get_render_function(dotted_path, kwargs):
module, func = dotted_path.rsplit('.', 1)
module, func = smart_str(module), smart_str(func)
func = getattr(__import__(module, {}, {}, [func]), func)
return partial(func, **kwargs)
try:
markup_lang = machina_settings.MARKUP_LANGUAGE
render_func = (
_get_render_function(markup_lang[0], markup_lang[1]) if markup_lang
else lambda text: text
)
except ImportError as e:
raise ImproperlyConfigured(
_('Could not import MACHINA_MARKUP_LANGUAGE {}: {}').format(
machina_settings.MARKUP_LANGUAGE, e,
)
)
except AttributeError:
raise ImproperlyConfigured(_('MACHINA_MARKUP_LANGUAGE setting is required'))
class MarkupText(SafeData):
def __init__(self, instance, field_name, rendered_field_name):
# Stores a reference to the instance along with field names
# to make assignment possible.
self.instance = instance
self.field_name = field_name
self.rendered_field_name = rendered_field_name
# raw is read/write
def _get_raw(self):
return self.instance.__dict__[self.field_name]
def _set_raw(self, val):
setattr(self.instance, self.field_name, val)
raw = property(_get_raw, _set_raw)
# rendered is a read only property
def _get_rendered(self):
return mark_safe(getattr(self.instance, self.rendered_field_name))
rendered = property(_get_rendered)
# Allows display via templates to work without safe filter
def __str__(self):
return self.raw
# Return the length of the rendered string so that bool tests work as expected
def __len__(self):
return len(self.raw)
class MarkupTextDescriptor:
"""
Acts as the Django's default attribute descriptor class, enabled via the SubfieldBase metaclass.
The main difference is that it does not call to_python() on the MarkupTextField class. Instead,
it stores the two different values of a markup content (the raw and the rendered data)
separately. These values can be separately updated when something is assigned. When the field is
accessed, a MarkupText instance will be returned ; this one is built with the current data.
"""
def __init__(self, field):
self.field = field
self.rendered_field_name = _rendered_field_name(self.field.name)
def __get__(self, instance, owner):
if instance is None:
return None
raw = instance.__dict__[self.field.name]
if raw is None:
return None
return MarkupText(instance, self.field.name, self.rendered_field_name)
def __set__(self, instance, value):
if isinstance(value, MarkupText):
instance.__dict__[self.field.name] = value.raw
setattr(instance, self.rendered_field_name, value.rendered)
else:
# Set only the raw field
instance.__dict__[self.field.name] = value
class MarkupTextField(models.TextField):
"""
A MarkupTextField contributes two columns to the model instead of the standard single column.
The initial column store any content written by using a given markup language and the other one
keeps the rendered content returned by a specific render function.
"""
def __init__(self, *args, **kwargs):
# For Django 1.7 migration serializer compatibility: the frozen version of a
# MarkupTextField can't try to add a '*_rendered' field, because the '*_rendered' field
# itself is frozen / serialized as well.
self.add_rendered_field = not kwargs.pop('no_rendered_field', False)
super().__init__(*args, **kwargs)
def deconstruct(self): # pragma: no cover
"""
As outlined in the Django 1.7 documentation, this method tells Django how to take an
instance of a new field in order to reduce it to a serialized form. This can be used to
configure what arguments need to be passed to the __init__() method of the field in order to
re-create it. We use it in order to pass the 'no_rendered_field' to the __init__() method.
This will allow the _rendered field to not be added to the model class twice.
"""
name, import_path, args, kwargs = super().deconstruct()
kwargs['no_rendered_field'] = True
return name, import_path, args, kwargs
def contribute_to_class(self, cls, name):
if self.add_rendered_field and not cls._meta.abstract:
rendered_field = models.TextField(editable=False, blank=True, null=True)
cls.add_to_class(_rendered_field_name(name), rendered_field)
# The data will be rendered before each save
signals.pre_save.connect(self.render_data, sender=cls)
# Add the default text field
super().contribute_to_class(cls, name)
# Associates the name of this field to a special descriptor that will return
# an appropriate Markup object each time the field is accessed
setattr(cls, name, MarkupTextDescriptor(self))
def value_to_string(self, obj):
value = self.value_from_object(obj)
return value.raw
def get_db_prep_value(self, value, connection=None, prepared=False):
try:
return value.raw
except AttributeError:
return value
def render_data(self, signal, sender, instance=None, **kwargs):
value = getattr(instance, self.attname)
rendered = None
if hasattr(value, 'raw'):
rendered = render_func(value.raw)
setattr(instance, _rendered_field_name(self.attname), rendered)
def formfield(self, **kwargs):
widget = _get_markup_widget()
defaults = {'widget': widget(**machina_settings.MARKUP_WIDGET_KWARGS)}
defaults.update(kwargs)
field = super().formfield(**defaults)
return field
class ExtendedImageField(models.ImageField):
"""
An ExtendedImageField is an ImageField whose image can be resized before being saved.
This field also add the capability of checking the image size, width and height a user may send.
"""
def __init__(self, *args, **kwargs):
self.width = kwargs.pop('width', None)
self.height = kwargs.pop('height', None)
# Both min_width and max_width must be provided in order to be used
self.min_width = kwargs.pop('min_width', None)
self.max_width = kwargs.pop('max_width', None)
# Both min_height and max_height must be provided in order to be used
self.min_height = kwargs.pop('min_height', None)
self.max_height = kwargs.pop('max_height', None)
self.max_upload_size = kwargs.pop('max_upload_size', 0)
super().__init__(*args, **kwargs)
def clean(self, *args, **kwargs):
from django.core.files.images import get_image_dimensions
data = super().clean(*args, **kwargs)
image = data.file
# Controls the file size
if self.max_upload_size and hasattr(image, 'size'):
if image.size > self.max_upload_size:
raise ValidationError(
_('Files of size greater than {} are not allowed. Your file is {}').format(
filesizeformat(self.max_upload_size),
filesizeformat(image.size)
)
)
# Controls the image size
image_width, image_height = get_image_dimensions(data)
if self.min_width and self.max_width \
and not self.min_width <= image_width <= self.max_width:
raise ValidationError(
_('Images of width lesser than {}px or greater than {}px or are not allowed. '
'The width of your image is {}px').format(
self.min_width, self.max_width, image_width
)
)
if self.min_height and self.max_height \
and not self.min_height <= image_height <= self.max_height:
raise ValidationError(
_('Images of height lesser than {}px or greater than {}px or are not allowed. '
'The height of your image is {}px').format(
self.min_height, self.max_height, image_height
)
)
return data
def save_form_data(self, instance, data):
if data and self.width and self.height:
content = self.resize_image(data.read(), (self.width, self.height))
# Handle the filename because the image will be converted to PNG
filename = path.splitext(path.split(data.name)[-1])[0]
filename = '{}.png'.format(filename)
# Regenerate a File object
data = SimpleUploadedFile(filename, content)
super().save_form_data(instance, data)
def resize_image(self, data, size):
""" Resizes the given image to fit inside a box of the given size. """
from machina.core.compat import PILImage as Image
image = Image.open(BytesIO(data))
# Resize!
image.thumbnail(size, Image.ANTIALIAS)
string = BytesIO()
image.save(string, format='PNG')
return string.getvalue()
| |
import pytest
from plenum.common.signer_did import DidSigner
from plenum.common.signer_simple import SimpleSigner
from sovrin_client.client.wallet.wallet import Wallet
from sovrin_client.test.cli.helper import prompt_is, addNym, ensureConnectedToTestEnv, createUuidIdentifier, \
createHalfKeyIdentifierAndAbbrevVerkey
from sovrin_common.roles import Roles
from plenum.common.constants import TARGET_NYM
from sovrin_node.test.did.conftest import wallet, abbrevVerkey
TRUST_ANCHOR_SEED = b'TRUST0NO0ONE00000000000000000000'
NYM_ADDED = 'Nym {dest} added'
CURRENT_VERKEY_FOR_NYM = 'Current verkey for NYM {dest} is {verkey}'
NOT_OWNER = 'is neither Trustee nor owner of'
@pytest.fixture("module")
def trust_anchor_did_signer():
return DidSigner(seed=TRUST_ANCHOR_SEED)
@pytest.fixture("module")
def trust_anchor_cid_signer():
return SimpleSigner(seed=TRUST_ANCHOR_SEED)
@pytest.fixture("module")
def trustAnchorWallet(trustAnchorSigner):
w = Wallet(trustAnchorSigner.identifier)
w.addIdentifier(signer=trustAnchorSigner)
return w
def testPoolNodesStarted(poolNodesStarted):
pass
@pytest.fixture(scope="module")
def aliceCli(be, do, poolNodesStarted, aliceCLI, connectedToTest, wallet):
be(aliceCLI)
do('prompt Alice', expect=prompt_is('Alice'))
addAndActivateCLIWallet(aliceCLI, wallet)
do('connect test', within=3, expect=connectedToTest)
return aliceCLI
@pytest.fixture(scope="module")
def trustAnchorCli(be, do, poolNodesStarted, earlCLI, connectedToTest,
trustAnchorWallet):
be(earlCLI)
do('prompt Earl', expect=prompt_is('Earl'))
addAndActivateCLIWallet(earlCLI, trustAnchorWallet)
do('connect test', within=3, expect=connectedToTest)
return earlCLI
def getNym(be, do, userCli, idr, expectedMsgs):
be(userCli)
do('send GET_NYM dest={}'.format(idr),
within=3,
expect=expectedMsgs
)
def getNymNotFoundExpectedMsgs(idr):
return ["NYM {} not found".format(idr)]
def testGetDIDWithoutAddingIt(be, do, philCli, trust_anchor_did_signer):
ensureConnectedToTestEnv(be, do, philCli)
getNym(be, do, philCli, trust_anchor_did_signer.identifier,
getNymNotFoundExpectedMsgs(trust_anchor_did_signer.identifier))
def testGetCIDWithoutAddingIt(be, do, philCli, trust_anchor_cid_signer):
ensureConnectedToTestEnv(be, do, philCli)
getNym(be, do, philCli, trust_anchor_cid_signer.identifier,
getNymNotFoundExpectedMsgs(trust_anchor_cid_signer.identifier))
def addAndActivateCLIWallet(cli, wallet):
cli.wallets[wallet.name] = wallet
cli.activeWallet = wallet
@pytest.fixture(scope="module")
def didAdded(be, do, philCli, trust_anchor_did_signer):
ensureConnectedToTestEnv(be, do, philCli)
addNym(be, do, philCli,
trust_anchor_did_signer.identifier,
role=Roles.TRUST_ANCHOR.name
)
return philCli
def testAddDID(didAdded):
pass
@pytest.fixture(scope="module")
def cidAdded(be, do, philCli, trust_anchor_cid_signer):
addNym(be, do, philCli, trust_anchor_cid_signer.identifier,
role=Roles.TRUST_ANCHOR.name)
return philCli
def testAddCID(cidAdded):
pass
def getNoVerkeyEverAssignedMsgs(idr):
return ["No verkey ever assigned to the DID {}".format(idr)]
def testGetDIDWithoutVerkey(be, do, philCli, didAdded,
trust_anchor_did_signer):
getNym(be, do, philCli, trust_anchor_did_signer.identifier,
getNoVerkeyEverAssignedMsgs(trust_anchor_did_signer.identifier))
def getVerkeyIsSameAsIdentifierMsgs(idr):
return ["Current verkey is same as DID {}".format(idr)]
def testGetCIDWithoutVerkey(be, do, philCli, cidAdded,
trust_anchor_cid_signer):
getNym(be, do, philCli, trust_anchor_cid_signer.identifier,
getVerkeyIsSameAsIdentifierMsgs(trust_anchor_cid_signer.identifier))
@pytest.fixture(scope="module")
def verkeyAddedToDID(be, do, philCli, didAdded, trust_anchor_did_signer):
addNym(be, do, philCli, trust_anchor_did_signer.identifier,
trust_anchor_did_signer.verkey)
def testAddVerkeyToExistingDID(verkeyAddedToDID):
pass
@pytest.fixture(scope="module")
def verkeyAddedToCID(be, do, philCli, cidAdded, trust_anchor_cid_signer):
# newSigner = SimpleSigner(identifier=trust_anchor_cid_signer.identifier)
# new_verkey = newSigner.verkey
addNym(be, do, philCli, trust_anchor_cid_signer.identifier,
verkey=trust_anchor_cid_signer.verkey)
return trust_anchor_cid_signer
def testAddVerkeyToExistingCID(verkeyAddedToCID):
pass
def getCurrentVerkeyIsgMsgs(idr, verkey):
return ["Current verkey for NYM {} is {}".format(idr, verkey)]
def testGetDIDWithVerKey(be, do, philCli, verkeyAddedToDID,
trust_anchor_did_signer):
getNym(be, do, philCli, trust_anchor_did_signer.identifier,
getCurrentVerkeyIsgMsgs(trust_anchor_did_signer.identifier,
trust_anchor_did_signer.verkey))
def testGetCIDWithVerKey(be, do, philCli, verkeyAddedToCID,
trust_anchor_cid_signer):
getNym(be, do, philCli, trust_anchor_cid_signer.identifier,
getCurrentVerkeyIsgMsgs(trust_anchor_cid_signer.identifier,
trust_anchor_cid_signer.verkey))
def getNoActiveVerkeyFoundMsgs(idr):
return ["No active verkey found for the identifier {}".format(idr)]
def addAttribToNym(be, do, userCli, idr, raw):
be(userCli)
do('send ATTRIB dest={} raw={}'.format(idr, raw),
within=5,
expect=["Attribute added for nym {}".format(idr)])
@pytest.mark.skip("INDY- This should not have worked")
def testSendAttribForDID(be, do, verkeyAddedToDID,
trust_anchor_did_signer, aliceCli):
raw = '{"name": "Alice"}'
addAttribToNym(be, do, aliceCli, trust_anchor_did_signer.identifier, raw)
@pytest.mark.skip("INDY- This should not have worked")
def testSendAttribForCID(be, do, verkeyAddedToCID,
trust_anchor_cid_signer, trustAnchorCli):
raw = '{"name": "Earl"}'
addAttribToNym(be, do, trustAnchorCli,
trust_anchor_cid_signer.identifier, raw)
@pytest.fixture(scope="module")
def verkeyRemovedFromExistingDID(
be, do, verkeyAddedToDID, abbrevIdr, aliceCli):
be(aliceCli)
addNym(be, do, aliceCli, abbrevIdr, '')
getNym(be, do, aliceCli, abbrevIdr, getNoActiveVerkeyFoundMsgs(abbrevIdr))
@pytest.mark.skip(reason="verkey removal is not supported")
def testRemoveVerkeyFromDID(verkeyRemovedFromExistingDID):
pass
@pytest.fixture(scope="module")
def verkeyRemovedFromExistingCID(
be,
do,
verkeyAddedToCID,
trustAnchorSigner,
trustAnchorCli,
trustAnchorWallet):
be(trustAnchorCli)
addNym(be, do, trustAnchorCli, trustAnchorSigner.identifier, '')
getNym(be, do, trustAnchorCli, trustAnchorSigner.identifier,
getNoActiveVerkeyFoundMsgs(trustAnchorSigner.identifier))
@pytest.mark.skip(reason="verkey removal is not supported")
def testRemoveVerkeyFromCID(verkeyRemovedFromExistingCID):
pass
@pytest.mark.skip(
reason="SOV-568. Obsolete assumption, if an identity has set "
"its verkey to blank, no-one including "
"itself can change it")
def testNewverkeyAddedToDID(be, do, philCli, abbrevIdr,
verkeyRemovedFromExistingDID):
newSigner = DidSigner()
addNym(be, do, philCli, abbrevIdr, newSigner.verkey)
getNym(be, do, philCli, abbrevIdr,
getCurrentVerkeyIsgMsgs(abbrevIdr, newSigner.verkey))
@pytest.mark.skip(
reason="SOV-568. Obsolete assumption, if an identity has set "
"its verkey to blank, no-one including "
"itself can change it")
def testNewverkeyAddedToCID(be, do, philCli, trustAnchorSigner,
verkeyRemovedFromExistingCID):
newSigner = DidSigner()
addNym(be, do, philCli, trustAnchorSigner.identifier, newSigner.verkey)
getNym(
be,
do,
philCli,
trustAnchorSigner.identifier,
getCurrentVerkeyIsgMsgs(
trustAnchorSigner.identifier,
newSigner.verkey))
def testNewKeyChangesWalletsDefaultId(be, do, poolNodesStarted, poolTxnData,
susanCLI, connectedToTest):
mywallet = Wallet('my wallet')
keyseed = 'a' * 32
idr, _ = mywallet.addIdentifier(seed=keyseed.encode("utf-8"))
be(susanCLI)
do('connect test', within=3, expect=connectedToTest)
do('new key with seed {}'.format(keyseed))
do('send NYM dest={}'.format(idr))
do('new key with seed {}'.format(poolTxnData['seeds']['Steward1']))
do('send NYM dest={}'.format(idr), within=3,
expect=["Nym {} added".format(idr)])
def test_send_same_nyms_only_first_gets_written(
be, do, poolNodesStarted, newStewardCli):
be(newStewardCli)
halfKeyIdentifier, abbrevVerkey = createHalfKeyIdentifierAndAbbrevVerkey()
_, anotherAbbrevVerkey = createHalfKeyIdentifierAndAbbrevVerkey()
# request 1
newStewardCli.enterCmd(
"send NYM {dest}={nym} verkey={verkey}". format(
dest=TARGET_NYM,
nym=halfKeyIdentifier,
verkey=abbrevVerkey))
parameters = {
'dest': halfKeyIdentifier,
'verkey': anotherAbbrevVerkey
}
# "enterCmd" does not immediately send to server, second request with same NYM
# and different verkey should not get written to ledger.
# request 2
do('send NYM dest={dest} verkey={verkey}',
mapper=parameters, expect=NYM_ADDED, within=10)
parameters = {
'dest': halfKeyIdentifier,
'verkey': abbrevVerkey
}
# check that second request didn't write to ledger and first verkey is
# written
do('send GET_NYM dest={dest}',
mapper=parameters, expect=CURRENT_VERKEY_FOR_NYM, within=2)
def test_send_different_nyms_succeeds_when_batched(
be, do, poolNodesStarted, newStewardCli):
be(newStewardCli)
idr_1, verkey_1 = createHalfKeyIdentifierAndAbbrevVerkey()
idr_2, verkey_2 = createHalfKeyIdentifierAndAbbrevVerkey()
parameters = {
'dest': idr_1,
'verkey': verkey_1
}
# request 1
newStewardCli.enterCmd(
"send NYM dest={dest} verkey={verkey}".format(
dest=idr_1, verkey=verkey_1))
parameters = {
'dest': idr_2,
'verkey': verkey_2
}
# two different nyms, batched, both should be written
# request 2
do('send NYM dest={dest} verkey={verkey}',
mapper=parameters, expect=NYM_ADDED, within=10)
parameters = {
'dest': idr_1,
'verkey': verkey_1
}
do('send GET_NYM dest={dest}',
mapper=parameters, expect=CURRENT_VERKEY_FOR_NYM, within=2)
parameters = {
'dest': idr_2,
'verkey': verkey_2
}
do('send GET_NYM dest={dest}',
mapper=parameters, expect=CURRENT_VERKEY_FOR_NYM, within=2)
| |
"""prettyunit - prettysite views.py
Andrew Scott 10/21/2016"""
#pylint: disable=line-too-long, invalid-name, bare-except, broad-except
import json
import datetime
import logging
from flask import render_template, request
from prettysite import app, config
from prettysite.models import Suite, TestCase, Test, Project, PrettySiteSettings, APIToken, Internal
from prettysite.JunitParse import JunitParse
from prettysite.APIValidation import APIHandler
from prettysite.APIKey import APIKey
# ---------------------------------------Logging---------------------------------------------
LOG_FILENAME = config.LOG_PATH
# if not app.config['DEBUG']:
# app.logger.setLevel(logging.INFO)
# else:
app.logger.setLevel(logging.INFO)
handler = logging.handlers.RotatingFileHandler(
LOG_FILENAME,
maxBytes=1024 * 1024 * 100,
backupCount=20
)
app.logger.addHandler(handler)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
@app.before_request
def pre_request_logging():
#Logging statement
if 'text/html' in request.headers['Accept']:
if app.config['DEBUG']:
app.logger.debug(' - '.join([
datetime.datetime.today().ctime(),
request.remote_addr,
request.method,
request.url,
', '.join([': '.join(x) for x in request.headers])]))
else:
app.logger.info(' - '.join([
datetime.datetime.today().ctime(),
request.remote_addr,
request.method,
request.url]))
@app.after_request
def log_the_status_code(response):
status_as_string = response.status
if response.status_code >= 400:
app.logger.error(status_as_string)
return response
# ----------------------------------------------------------------------------------------
@app.route('/', methods=['GET'])
def index():
'''
This should return the base page, typically where the user would pick which project they'd
like to work in.
:return: 200 if successful
500 if an error occurs
'''
try:
projects = Project.listprojects()
site_settings = PrettySiteSettings.listsettings()
name = PrettySiteSettings.getsettingvalue("Name")
return render_template('project.html', name=name, projects=projects, settings=site_settings)
except Exception, err:
app.logger.error(' Error in base page call: {}'.format(err))
return render_template('500.html'), 500
@app.route('/<int:projectid>', methods=['GET'])
def project_overview(projectid):
'''
This route should return the base page of a project, displaying aggregated results of all test suites.
:param projectid: int - This should be the ID of an existing project.
:return: 200 returns html page template and a variety of params to the template.
500 if an error occurs
'''
try:
if Project.does_exist(projectid):
dates, timeline, site_settings, project_desc = details_from_project_id(projectid)
suitelist = [item for item in Suite.get_suites_by_project(projectid).items()]
name = PrettySiteSettings.getsettingvalue("Name")
return render_template('index.html', timeline=timeline, name=name, timeline_dates=dates, suitelist=suitelist, settings=site_settings, project_desc=project_desc)
else:
app.logger.error('Project ID not found: {}'.format(projectid))
return render_template('404.html'), 404
except Exception, err:
app.logger.error('Error in project call: {}'.format(err))
return render_template('500.html'), 500
@app.route('/<int:projectid>/<int:suiteid>', methods=['GET'])
def suite_overview(suiteid, projectid):
'''
This call returns the aggregated results for a test suite within a project
:param suiteid: int - existing suite id
:param projectid: int - existing project id, the suiteid above should belong to this project.
:return: 200 returns html page template and a variety of params to the template.
500 if an error occurs
'''
try:
if Project.does_exist(projectid):
raise_error = None
dates, timeline, site_settings, project_desc = details_from_project_id(projectid)
suiteDetails = []
details = Suite.get_suite_details(suiteid)
suiteDetails.append(["Pass Rate", str(details[0])])
suiteDetails.append(["Last Run", str(details[1])])
suiteDetails.append(["Project Name", str(details[2])])
suiteDetails.append(["Server", str(details[3])])
suiteDetails.append(["Platform", str(details[4])])
if Suite.does_exist(suiteid):
suiteResults = Suite.results(suiteid)
caseList = [[case.id, case.TestCaseName, case.DateRun]
for case in TestCase.get_testcase_by_suiteid(suiteid)]
caseResults = [[case.PassCount, case.FailCount, case.ErrorCount, case.SkipCount]
for case in TestCase.get_testcase_by_suiteid(suiteid)]
testResults = []
if len(caseList) > 0:
for i, case in enumerate(caseList):
testResults.append([])
# if length of list if greater than 0
list_of_tests = Test.get_test_by_testcaseid(case[0])
if len(list_of_tests) > 0:
for test in list_of_tests:
testResults[i].append([test.TestName, test.Message, test.Result])
else:
raise_error = 'No Tests to display.'
caseToDisplay = (0 if request.args.get('case') is None else int(request.args.get('case')))
if caseToDisplay != 0:
for i, c in enumerate(caseList):
if c[0] == int(caseToDisplay):
caseToDisplay = i
else:
caseToDisplay = 0
raise_error = "No Test Cases to display."
name = PrettySiteSettings.getsettingvalue("Name")
return render_template('suite.html', timeline=timeline, name=name, timeline_dates=dates,
suite_results=suiteResults, testcaseslist=caseList,
suiteid=suiteid, caseresults=caseResults,
testresults=testResults, casetodisplay=caseToDisplay, suitedetails=suiteDetails,
settings=site_settings, project_desc=project_desc, raise_error=raise_error)
else:
app.logger.error('Suite ID not found: {}'.format(suiteid))
return render_template('404.html'), 404
else:
app.logger.error('Project ID not found: {}'.format(projectid))
return render_template('404.html'), 404
except Exception, err:
app.logger.error('Error in suite call: {}'.format(err))
return render_template('500.html'), 500
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def server_error(e):
return render_template('500.html'), 500
# -------------------------------- API ----------------------------------------------------
# -----------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------
# -------------------------------- Version ------------------------------------------------
@app.route('/version', methods=['GET', 'HEAD'])
def version():
'''
Returns the current version of prettysite
:return: 200 response if Version is set
500 if there is an issue
'''
try:
return (config.VERSION, 200)
except Exception, err:
app.logger.error('Error in version call: {}'.format(err))
return ('', 500)
# -------------------------------- Settings ------------------------------------------------
@app.route('/settings', methods=['GET', 'HEAD'])
def settings():
'''
This call can be used to return a json object containing the prettyunit settings.
:return: 200 if successful
500 if there was an error
'''
try:
site_settings = PrettySiteSettings.listsettings()
data = {'version' : site_settings[0][1], 'name' : site_settings[1][1], 'api_tokens_enabled' : site_settings[2][1]}
if data['api_tokens_enabled'] == 'True' and len(site_settings) >= 4:
if site_settings[3][0] == 'Key2':
data['Key2'] = site_settings[3][1]
if site_settings[3][0] == 'Key1':
data['Key1'] = site_settings[3][1]
if site_settings[4][0] == 'Key2':
data['Key2'] = site_settings[4][1]
if site_settings[4][0] == 'Key1':
data['Key1'] = site_settings[4][1]
return (str(json.dumps(data)), 200)
except Exception, err:
app.logger.error('Error in settings call: {}'.format(err))
return ('', 500)
@app.route('/settings', methods=['POST'])
def update_settings():
'''
This call can be used to update the prettyunit settings. The second value in the tuple should always be "False" in order for this call to work.
{
"Name":[{string}, "False"],
"API Tokens Enabled":[["True" or "False"], "False"]
}
:return: 200 if successful
500 if there was an error
'''
try:
content = request.get_json(silent=True)
newKeys = {}
for key, val in content.items():
if val[1] == "False":
PrettySiteSettings.setsettingvalue(key, val[0])
print type(key)
if key == 'Key1' or key == 'Key2':
newKeys[key] = val[0]
PrettySiteSettings.setsettingvalue(key, val[0])
if len(newKeys) > 0:
keyHandler = APIKey()
APIToken.replaceAPItoken(keyHandler.createMasterKey(newKeys["Key1"], newKeys["Key2"]))
return '', 200
except Exception, err:
app.logger.error('Error in settings call: {}'.format(err))
return '', 500
# -------------------------------- Results ------------------------------------------------
@app.route('/api/results', methods=['POST'])
def add_results():
'''
This call is used to add new test records to pretty unit. Currently it can accept the
following formats [json v 1.0, ]
---------------------------- json v 1.0 --------------------------------------------------
{
"puv": "1.0",
"tests-error": {integer},
"tests-skipped": {integer},
"timestamp": {string},
"system": {string},
"server": {string},
"project": {string},
"test-to-run": {integer},
"tests-failure": {integer},
"tests-run": {integer},
"suite-name": {string},
"test-cases": {
"BaseTest1": [
{
"test-name": {string},
"message": null or {string},
"result": {string},
"time": {float}
}
]
}
}
:return: 200 if call was successfully parsed
400 if request body was determined to be malformed
401 if API keys are enabled and request includes missing or incorrect keys
500 if a server error occurs
'''
try:
keygen = APIKey()
useTokens = keygen.areTokensEnabledAndExist()
if useTokens:
keyHeader = request.headers.get('X-Keys')
keys = json.loads(keyHeader)
token = keygen.createMasterKey(keys["Key1"], keys["Key2"])
if APIToken.validateToken(token):
if request.headers.get('content-type') == 'application/json':
content = request.get_json(silent=True)
return json_parsing_loop(content)
elif request.headers.get('content-type') == 'application/xml':
data = request.get_data()
jp = JunitParse()
content = jp.add_project(jp.junit_parse(data))
return json_parsing_loop(content[0])
else:
if request.headers.get('content-type') != None:
app.logger.error('Invalid format in attempt: {}'.format(request.headers.get('content-type')))
else:
app.logger.error('Content-Type header missing in request')
return ('non-json format not yet supported', 400)
else:
app.logger.error('Invalid token in attempt: {}'.format(keys))
return ('Invalid token', 401)
else:
if request.headers.get('content-type') == 'application/json':
content = request.get_json(silent=True)
print json_parsing_loop(content)
return json_parsing_loop(content)
elif request.headers.get('content-type') == 'application/xml':
data = request.get_data()
jp = JunitParse()
content = jp.add_project(jp.junit_parse(data))
print content
for i in range(0, len(content)):
json_parsing_loop(content[i])
return ('', 200)
else:
if request.headers.get('content-type') != None:
app.logger.error('Invalid format in attempt: {}'.format(request.headers.get('content-type')))
else:
app.logger.error('Content-Type header missing in request')
return ('non-json format not yet supported', 400)
except Exception, err:
app.logger.error('Error in api/results call: {}'.format(err))
return ('', 500)
# -------------------------------- Security ------------------------------------------------
@app.route('/token', methods=['GET'])
def generate_tokens():
'''
This call should be used to get a new pair of API keys, which are combined to consititute a security token.
:return: 200 and a json obj with the new key values
500 if there was an error
'''
try:
keygen = APIKey()
k1 = keygen.generateKey()
k2 = keygen.generateKey()
data = {"Key1" : k1, "Key2" : k2}
return (str(json.dumps(data)), 200)
except Exception, err:
app.logger.error('Error in token call: {}'.format(err))
return ('', 500)
@app.route('/usetokens', methods=['GET'])
def usertokens():
'''
Can be used to check whether API tokens are enabled and populated
:return: 200 for tokens ready for use
404 if tokens are not enabled or populated
500 if there was an error
'''
try:
keygen = APIKey()
if keygen.areTokensEnabledAndExist():
return ('', 200)
else:
app.logger.error('Tokens are not populated or are not enabled for this user')
return ('', 404)
except Exception, err:
app.logger.error('Error in token call: {}'.format(err))
return ('', 500)
# -------------------------------- Project ------------------------------------------------
@app.route('/project/<int:projectid>', methods=['PUT'])
def update_project(projectid):
'''
This call can be used to update the details of an existing project.
{
'Project': {string},
'Url': {string},
'Description': {string},
'Language': {string}
}
:param projectid: int - This should be the ID of an existing project.
:return: 200 if successful
404 if the project does not exist
500 if there was an error
'''
if Project.does_exist(projectid):
try:
content = request.get_json(silent=True)
Project.setprojectfields(projectid, content)
return ('', 200)
except Exception, err:
app.logger.error('Error in project details PUT call: {}'.format(err))
return ('', 500)
app.logger.error('Project ID not found: {}'.format(projectid))
return ('', 404)
@app.route('/project/<int:projectid>', methods=['GET'])
def get_project(projectid):
'''
This call should return a json object with the details for an existing project.
:param projectid: int - This should be the ID of an existing project.
:return: 200 if successful
404 if the project does not exist
500 if there was an error
'''
if Project.does_exist(projectid):
try:
project = Project.getprojectdetails(projectid)[0]
data = {'id' : project[0], 'name' : project[1], 'description' : project[2], 'language' : project[3], 'url' : project[4]}
return (str(json.dumps(data)), 200)
except Exception, err:
app.logger.error('Error in project details GET call: {}'.format(err))
return ('', 500)
app.logger.error('Project ID not found: {}'.format(projectid))
return ('', 404)
@app.route('/project', methods=['GET'])
def list_projects():
'''
This call should return a list of all projects as an [id, project_name] list.
:return: 200 List of project id/name pairs
500 if error
'''
try:
projects = Project.listprojects()
return (str(json.dumps(projects)), 200)
except Exception, err:
app.logger.error('Error in project call: {}'.format(err))
return ('', 500)
# -------------------------------- Project ------------------------------------------------
@app.route('/properties', methods=['GET'])
def get_properties():
try:
properties = Internal.getInternals(1)
prop = {"id": properties[0], "UUID": properties[1], "creation_date": properties[2].strftime("%m/%d/%y %H:%M UTC"), "creator": properties[3], "version": properties[4]}
return (str(json.dumps(prop)), 200)
except Exception, err:
app.logger.error('Error in properties call: {}'.format(err))
return ('', 500)
# -------------------------------- Helpers ------------------------------------------------
# -----------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------
def details_from_project_id(projectid):
try:
tl = Suite.timeline(projectid)
site_settings = PrettySiteSettings.listsettings()
project = Project.getprojectdetails(projectid)[0]
project_desc = {'id' : project[0], 'name' : project[1], 'description' : project[2], 'language' : project[3], 'url' : project[4]}
timeline = [[], [], [], []] # skip, error, fail, pass
dates = []
for t in tl:
timeline[0].append(t[3])
timeline[1].append(t[2])
timeline[2].append(t[1])
timeline[3].append(t[0])
dates.append(t[4].strftime("%m/%d/%y %H:%M UTC"))
return (dates, timeline, site_settings, project_desc)
except Exception, err:
app.logger.error('Error in project details helper call: {}'.format(err))
pass
def json_parsing_loop(content):
APIV = APIHandler()
if APIV.is_v1(content):
# Parse Project
APIV.project_parser_v1(content)
# Parse Server
APIV.server_parser_v1(content)
# Parse Suite
APIV.suite_parser_v1(content)
# Parse TestCases and Tests
APIV.tests_parser_v1(content)
else:
return ('unsupported PU json version', 400)
return ('', 200)
| |
from __future__ import unicode_literals
import json
import six
from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores
from .models import dynamodb_backend2, dynamo_json_dump
GET_SESSION_TOKEN_RESULT = """
<GetSessionTokenResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
<GetSessionTokenResult>
<Credentials>
<SessionToken>
AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/L
To6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3z
rkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtp
Z3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE
</SessionToken>
<SecretAccessKey>
wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY
</SecretAccessKey>
<Expiration>2011-07-11T19:55:29.611Z</Expiration>
<AccessKeyId>AKIAIOSFODNN7EXAMPLE</AccessKeyId>
</Credentials>
</GetSessionTokenResult>
<ResponseMetadata>
<RequestId>58c5dbae-abef-11e0-8cfe-09039844ac7d</RequestId>
</ResponseMetadata>
</GetSessionTokenResponse>"""
def sts_handler():
return GET_SESSION_TOKEN_RESULT
class DynamoHandler(BaseResponse):
def get_endpoint_name(self, headers):
"""Parses request headers and extracts part od the X-Amz-Target
that corresponds to a method of DynamoHandler
ie: X-Amz-Target: DynamoDB_20111205.ListTables -> ListTables
"""
# Headers are case-insensitive. Probably a better way to do this.
match = headers.get('x-amz-target') or headers.get('X-Amz-Target')
if match:
return match.split(".")[1]
def error(self, type_, status=400):
return status, self.response_headers, dynamo_json_dump({'__type': type_})
def call_action(self):
body = self.body.decode('utf-8')
if 'GetSessionToken' in body:
return 200, self.response_headers, sts_handler()
self.body = json.loads(body or '{}')
endpoint = self.get_endpoint_name(self.headers)
if endpoint:
endpoint = camelcase_to_underscores(endpoint)
response = getattr(self, endpoint)()
if isinstance(response, six.string_types):
return 200, self.response_headers, response
else:
status_code, new_headers, response_content = response
self.response_headers.update(new_headers)
return status_code, self.response_headers, response_content
else:
return 404, self.response_headers, ""
def list_tables(self):
body = self.body
limit = body.get('Limit')
if body.get("ExclusiveStartTableName"):
last = body.get("ExclusiveStartTableName")
start = list(dynamodb_backend2.tables.keys()).index(last) + 1
else:
start = 0
all_tables = list(dynamodb_backend2.tables.keys())
if limit:
tables = all_tables[start:start + limit]
else:
tables = all_tables[start:]
response = {"TableNames": tables}
if limit and len(all_tables) > start + limit:
response["LastEvaluatedTableName"] = tables[-1]
return dynamo_json_dump(response)
def create_table(self):
body = self.body
#get the table name
table_name = body['TableName']
#get the throughput
throughput = body["ProvisionedThroughput"]
#getting the schema
key_schema = body['KeySchema']
#getting attribute definition
attr = body["AttributeDefinitions"]
#getting the indexes
table = dynamodb_backend2.create_table(table_name,
schema = key_schema,
throughput = throughput,
attr = attr)
return dynamo_json_dump(table.describe)
def delete_table(self):
name = self.body['TableName']
table = dynamodb_backend2.delete_table(name)
if table is not None:
return dynamo_json_dump(table.describe)
else:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
def update_table(self):
name = self.body['TableName']
throughput = self.body["ProvisionedThroughput"]
table = dynamodb_backend2.update_table_throughput(name, throughput)
return dynamo_json_dump(table.describe)
def describe_table(self):
name = self.body['TableName']
try:
table = dynamodb_backend2.tables[name]
except KeyError:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
return dynamo_json_dump(table.describe)
def put_item(self):
name = self.body['TableName']
item = self.body['Item']
result = dynamodb_backend2.put_item(name, item)
if result:
item_dict = result.to_json()
item_dict['ConsumedCapacityUnits'] = 1
return dynamo_json_dump(item_dict)
else:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
def batch_write_item(self):
table_batches = self.body['RequestItems']
for table_name, table_requests in table_batches.items():
for table_request in table_requests:
request_type = list(table_request.keys())[0]
request = list(table_request.values())[0]
if request_type == 'PutRequest':
item = request['Item']
dynamodb_backend2.put_item(table_name, item)
elif request_type == 'DeleteRequest':
keys = request['Key']
item = dynamodb_backend2.delete_item(table_name, keys)
response = {
"Responses": {
"Thread": {
"ConsumedCapacityUnits": 1.0
},
"Reply": {
"ConsumedCapacityUnits": 1.0
}
},
"UnprocessedItems": {}
}
return dynamo_json_dump(response)
def get_item(self):
name = self.body['TableName']
key = self.body['Key']
try:
item = dynamodb_backend2.get_item(name, key)
except ValueError:
er = 'com.amazon.coral.validate#ValidationException'
return self.error(er, status=400)
if item:
item_dict = item.describe_attrs(attributes = None)
item_dict['ConsumedCapacityUnits'] = 0.5
return dynamo_json_dump(item_dict)
else:
# Item not found
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er, status=404)
def batch_get_item(self):
table_batches = self.body['RequestItems']
results = {
"ConsumedCapacity":[],
"Responses": {
},
"UnprocessedKeys": {
}
}
for table_name, table_request in table_batches.items():
items = []
keys = table_request['Keys']
attributes_to_get = table_request.get('AttributesToGet')
results["Responses"][table_name]=[]
for key in keys:
item = dynamodb_backend2.get_item(table_name, key)
if item:
item_describe = item.describe_attrs(attributes_to_get)
results["Responses"][table_name].append(item_describe["Item"])
results["ConsumedCapacity"].append({
"CapacityUnits": len(keys),
"TableName": table_name
})
return dynamo_json_dump(results)
def query(self):
name = self.body['TableName']
keys = self.body['KeyConditions']
hash_key_name, range_key_name = dynamodb_backend2.get_table_keys_name(name)
if hash_key_name is None:
er = "'com.amazonaws.dynamodb.v20120810#ResourceNotFoundException"
return self.error(er)
hash_key = keys[hash_key_name]['AttributeValueList'][0]
if len(keys) == 1:
range_comparison = None
range_values = []
else:
if range_key_name == None:
er = "com.amazon.coral.validate#ValidationException"
return self.error(er)
else:
range_condition = keys[range_key_name]
if range_condition:
range_comparison = range_condition['ComparisonOperator']
range_values = range_condition['AttributeValueList']
else:
range_comparison = None
range_values = []
items, last_page = dynamodb_backend2.query(name, hash_key, range_comparison, range_values)
if items is None:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
limit = self.body.get("Limit")
if limit:
items = items[:limit]
reversed = self.body.get("ScanIndexForward")
if reversed != False:
items.reverse()
result = {
"Count": len(items),
"Items": [item.attrs for item in items],
"ConsumedCapacityUnits": 1,
}
# Implement this when we do pagination
# if not last_page:
# result["LastEvaluatedKey"] = {
# "HashKeyElement": items[-1].hash_key,
# "RangeKeyElement": items[-1].range_key,
# }
return dynamo_json_dump(result)
def scan(self):
name = self.body['TableName']
filters = {}
scan_filters = self.body.get('ScanFilter', {})
for attribute_name, scan_filter in scan_filters.items():
# Keys are attribute names. Values are tuples of (comparison, comparison_value)
comparison_operator = scan_filter["ComparisonOperator"]
comparison_values = scan_filter.get("AttributeValueList", [])
filters[attribute_name] = (comparison_operator, comparison_values)
items, scanned_count, last_page = dynamodb_backend2.scan(name, filters)
if items is None:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
limit = self.body.get("Limit")
if limit:
items = items[:limit]
result = {
"Count": len(items),
"Items": [item.attrs for item in items],
"ConsumedCapacityUnits": 1,
"ScannedCount": scanned_count
}
# Implement this when we do pagination
# if not last_page:
# result["LastEvaluatedKey"] = {
# "HashKeyElement": items[-1].hash_key,
# "RangeKeyElement": items[-1].range_key,
# }
return dynamo_json_dump(result)
def delete_item(self):
name = self.body['TableName']
keys = self.body['Key']
return_values = self.body.get('ReturnValues', '')
item = dynamodb_backend2.delete_item(name, keys)
if item:
if return_values == 'ALL_OLD':
item_dict = item.to_json()
else:
item_dict = {'Attributes': []}
item_dict['ConsumedCapacityUnits'] = 0.5
return dynamo_json_dump(item_dict)
else:
er = 'com.amazonaws.dynamodb.v20120810#ConditionalCheckFailedException'
return self.error(er)
| |
from __future__ import absolute_import, division, print_function
import os
import re
import sys
from os.path import isdir, isfile, join
from conda.compat import iteritems, PY3, text_type
from conda.utils import memoized, md5_file
import conda.config as cc
from conda.resolve import MatchSpec
from conda.cli.common import specs_from_url
from . import exceptions
try:
import yaml
from yaml import Loader, SafeLoader
except ImportError:
sys.exit('Error: could not import yaml (required to read meta.yaml '
'files of conda recipes)')
# Override the default string handling function to always return unicode
# objects (taken from StackOverflow)
def construct_yaml_str(self, node):
return self.construct_scalar(node)
Loader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
SafeLoader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
from conda_build.config import config
def ns_cfg():
# Remember to update the docs of any of this changes
plat = cc.subdir
py = config.CONDA_PY
np = config.CONDA_NPY
pl = config.CONDA_PERL
for x in py, np:
assert isinstance(x, int), x
d = dict(
linux = plat.startswith('linux-'),
linux32 = bool(plat == 'linux-32'),
linux64 = bool(plat == 'linux-64'),
osx = plat.startswith('osx-'),
unix = plat.startswith(('linux-', 'osx-')),
win = plat.startswith('win-'),
win32 = bool(plat == 'win-32'),
win64 = bool(plat == 'win-64'),
pl = pl,
py = py,
py3k = bool(30 <= py < 40),
py2k = bool(20 <= py < 30),
py26 = bool(py == 26),
py27 = bool(py == 27),
py33 = bool(py == 33),
py34 = bool(py == 34),
np = np,
os = os,
environ = os.environ,
)
for machine in cc.non_x86_linux_machines:
d[machine] = bool(plat == 'linux-%s' % machine)
d.update(os.environ)
return d
sel_pat = re.compile(r'(.+?)\s*(#.*)?\[(.+)\](?(2).*)$')
def select_lines(data, namespace):
lines = []
for i, line in enumerate(data.splitlines()):
line = line.rstrip()
if line.lstrip().startswith('#'):
# Don't bother with comment only lines
continue
m = sel_pat.match(line)
if m:
cond = m.group(3)
try:
if eval(cond, namespace, {}):
lines.append(m.group(1))
except:
sys.exit('''\
Error: Invalid selector in meta.yaml line %d:
%s
''' % (i + 1, line))
sys.exit(1)
continue
lines.append(line)
return '\n'.join(lines) + '\n'
@memoized
def yamlize(data):
try:
return yaml.load(data)
except yaml.parser.ParserError as e:
try:
import jinja2
jinja2
except ImportError:
raise exceptions.UnableToParseMissingJinja2(original=e)
raise exceptions.UnableToParse(original=e)
def parse(data):
data = select_lines(data, ns_cfg())
res = yamlize(data)
# ensure the result is a dict
if res is None:
res = {}
for field in FIELDS:
if field in res and not isinstance(res[field], dict):
raise RuntimeError("The %s field should be a dict, not %s" % (field, res[field].__class__.__name__))
# ensure those are lists
for field in ('source/patches',
'build/entry_points', 'build/script_env',
'build/features', 'build/track_features',
'requirements/build', 'requirements/run',
'requirements/conflicts', 'test/requires',
'test/files', 'test/commands', 'test/imports'):
section, key = field.split('/')
if res.get(section) is None:
res[section] = {}
if res[section].get(key, None) is None:
res[section][key] = []
# ensure those are strings
for field in ('package/version', 'build/string', 'source/svn_rev',
'source/git_tag', 'source/git_branch', 'source/md5',
'source/git_rev', 'source/path'):
section, key = field.split('/')
if res.get(section) is None:
res[section] = {}
val = res[section].get(key, '')
if val is None:
val = ''
res[section][key] = text_type(val)
return sanitize(res)
def sanitize(meta):
"""
Sanitize the meta-data to remove aliases/handle deprecation
"""
# make a copy to avoid side-effects
meta = dict(meta)
sanitize_funs = [('source', _git_clean), ]
for section, func in sanitize_funs:
if section in meta:
meta[section] = func(meta[section])
return meta
def _git_clean(source_meta):
"""
Reduce the redundancy in git specification by removing git_tag and
git_branch.
If one is specified, copy to git_rev.
If more than one field is used to specified, exit
and complain.
"""
git_rev_tags_old = ('git_branch', 'git_tag')
git_rev = 'git_rev'
git_rev_tags = (git_rev,) + git_rev_tags_old
has_rev_tags = tuple(bool(source_meta[tag]) for
tag in git_rev_tags)
if sum(has_rev_tags) > 1:
msg = "Error: mulitple git_revs:"
msg += ', '.join("{}".format(key) for key, has in
zip(git_rev_tags, has_rev_tags) if has)
sys.exit(msg)
# make a copy of the input so we have no side-effects
ret_meta = dict(source_meta)
# loop over the old versions
for key, has in zip(git_rev_tags[1:], has_rev_tags[1:]):
# update if needed
if has:
ret_meta[git_rev_tags[0]] = ret_meta[key]
# and remove
del ret_meta[key]
return ret_meta
# If you update this please update the example in
# conda-docs/docs/source/build.rst
FIELDS = {
'package': ['name', 'version'],
'source': ['fn', 'url', 'md5', 'sha1', 'sha256', 'path',
'git_url', 'git_tag', 'git_branch', 'git_rev',
'hg_url', 'hg_tag',
'svn_url', 'svn_rev', 'svn_ignore_externals',
'patches'],
'build': ['number', 'string', 'entry_points', 'osx_is_app',
'features', 'track_features', 'preserve_egg_dir',
'no_link', 'binary_relocation', 'script', 'noarch_python',
'has_prefix_files', 'binary_has_prefix_files', 'script_env',
'detect_binary_files_with_prefix', 'rpaths',
'always_include_files', ],
'requirements': ['build', 'run', 'conflicts'],
'app': ['entry', 'icon', 'summary', 'type', 'cli_opts',
'own_environment'],
'test': ['requires', 'commands', 'files', 'imports'],
'about': ['home', 'license', 'summary', 'readme'],
}
def check_bad_chrs(s, field):
bad_chrs = '=!@#$%^&*:;"\'\\|<>?/ '
if field in ('package/version', 'build/string'):
bad_chrs += '-'
for c in bad_chrs:
if c in s:
sys.exit("Error: bad character '%s' in %s: %s" % (c, field, s))
def get_contents(meta_path):
'''
Get the contents of the [meta.yaml|conda.yaml] file.
If jinja is installed, then the template.render function is called
before standard conda macro processors
'''
try:
import jinja2
except ImportError:
print("There was an error importing jinja2.", file=sys.stderr)
print("Please run `conda install jinja2` to enable jinja template support", file=sys.stderr)
with open(meta_path) as fd:
return fd.read()
from conda_build.jinja_context import context_processor
path, filename = os.path.split(meta_path)
loaders = [jinja2.PackageLoader('conda_build'),
jinja2.FileSystemLoader(path)
]
env = jinja2.Environment(loader=jinja2.ChoiceLoader(loaders))
env.globals.update(ns_cfg())
env.globals.update(context_processor())
template = env.get_or_select_template(filename)
contents = template.render(environment=env)
return contents
class MetaData(object):
def __init__(self, path):
assert isdir(path)
self.path = path
self.meta_path = join(path, 'meta.yaml')
self.requirements_path = join(path, 'requirements.txt')
if not isfile(self.meta_path):
self.meta_path = join(path, 'conda.yaml')
if not isfile(self.meta_path):
sys.exit("Error: meta.yaml or conda.yaml not found in %s" % path)
self.parse_again()
def parse_again(self):
"""Redo parsing for key-value pairs that are not initialized in the
first pass.
"""
if not self.meta_path:
return
self.meta = parse(get_contents(self.meta_path))
if isfile(self.requirements_path) and not self.meta['requirements']['run']:
self.meta.setdefault('requirements', {})
run_requirements = specs_from_url(self.requirements_path)
self.meta['requirements']['run'] = run_requirements
@classmethod
def fromdict(cls, metadata):
"""
Create a MetaData object from metadata dict directly.
"""
m = super(MetaData, cls).__new__(cls)
m.path = ''
m.meta_path = ''
m.meta = sanitize(metadata)
return m
def get_section(self, section):
return self.meta.get(section, {})
def get_value(self, field, default=None):
section, key = field.split('/')
return self.get_section(section).get(key, default)
def check_fields(self):
for section, submeta in iteritems(self.meta):
if section == 'extra':
continue
if section not in FIELDS:
sys.exit("Error: unknown section: %s" % section)
for key in submeta:
if key not in FIELDS[section]:
sys.exit("Error: in section %r: unknown key %r" %
(section, key))
def name(self):
res = self.get_value('package/name')
if not res:
sys.exit('Error: package/name missing in: %r' % self.meta_path)
res = text_type(res)
if res != res.lower():
sys.exit('Error: package/name must be lowercase, got: %r' % res)
check_bad_chrs(res, 'package/name')
return res
def version(self):
res = self.get_value('package/version')
if res is None:
sys.exit("Error: package/version missing in: %r" % self.meta_path)
check_bad_chrs(res, 'package/version')
return res
def build_number(self):
return int(self.get_value('build/number', 0))
def ms_depends(self, typ='run'):
res = []
name_ver_list = [
('python', config.CONDA_PY),
('numpy', config.CONDA_NPY),
('perl', config.CONDA_PERL),
('r', config.CONDA_R),
]
for spec in self.get_value('requirements/' + typ, []):
try:
ms = MatchSpec(spec)
except AssertionError:
raise RuntimeError("Invalid package specification: %r" % spec)
if ms.name == self.name():
raise RuntimeError("Error: %s cannot depend on itself" % self.name())
for name, ver in name_ver_list:
if ms.name == name:
if (ms.strictness != 1 or
self.get_value('build/noarch_python')):
continue
str_ver = text_type(ver)
if '.' not in str_ver:
str_ver = '.'.join(str_ver)
ms = MatchSpec('%s %s*' % (name, str_ver))
for c in '=!@#$%^&*:;"\'\\|<>?/':
if c in ms.name:
sys.exit("Error: bad character '%s' in package name "
"dependency '%s'" % (c, ms.name))
parts = spec.split()
if len(parts) >= 2:
if parts[1] in {'>', '>=', '=', '==', '!=', '<', '<='}:
msg = ("Error: bad character '%s' in package version "
"dependency '%s'" % (parts[1], ms.name))
if len(parts) >= 3:
msg += "\nPerhaps you meant '%s %s%s'" % (ms.name,
parts[1], parts[2])
sys.exit(msg)
res.append(ms)
return res
def build_id(self):
ret = self.get_value('build/string')
if ret:
check_bad_chrs(ret, 'build/string')
return ret
res = []
version_re = re.compile(r'(?:==)?(\d)\.(\d)')
for name, s in (('numpy', 'np'), ('python', 'py'), ('perl', 'pl'), ('r', 'r')):
for ms in self.ms_depends():
if ms.name == name:
try:
v = ms.spec.split()[1]
except IndexError:
res.append(s)
break
if any(i in v for i in ',|>!<'):
break
if name not in ['perl', 'r']:
match = version_re.match(v)
if match:
res.append(s + match.group(1) + match.group(2))
else:
res.append(s + v.strip('*'))
break
if res:
res.append('_')
res.append('%d' % self.build_number())
return ''.join(res)
def dist(self):
return '%s-%s-%s' % (self.name(), self.version(), self.build_id())
def pkg_fn(self):
return "%s.tar.bz2" % self.dist()
def is_app(self):
return bool(self.get_value('app/entry'))
def app_meta(self):
d = {'type': 'app'}
if self.get_value('app/icon'):
d['icon'] = '%s.png' % md5_file(join(
self.path, self.get_value('app/icon')))
for field, key in [('app/entry', 'app_entry'),
('app/type', 'app_type'),
('app/cli_opts', 'app_cli_opts'),
('app/summary', 'summary'),
('app/own_environment', 'app_own_environment')]:
value = self.get_value(field)
if value:
d[key] = value
return d
def info_index(self):
d = dict(
name = self.name(),
version = self.version(),
build = self.build_id(),
build_number = self.build_number(),
license = self.get_value('about/license'),
platform = cc.platform,
arch = cc.arch_name,
subdir = cc.subdir,
depends = sorted(ms.spec for ms in self.ms_depends())
)
if self.get_value('build/features'):
d['features'] = ' '.join(self.get_value('build/features'))
if self.get_value('build/track_features'):
d['track_features'] = ' '.join(self.get_value('build/track_features'))
if self.get_value('build/noarch_python'):
d['platform'] = d['arch'] = None
d['subdir'] = 'noarch'
if self.is_app():
d.update(self.app_meta())
return d
def has_prefix_files(self):
ret = self.get_value('build/has_prefix_files', [])
if not isinstance(ret, list):
raise RuntimeError('build/has_prefix_files should be a list of paths')
if sys.platform == 'win32':
if any('\\' in i for i in ret):
raise RuntimeError("build/has_prefix_files paths must use / as the path delimiter on Windows")
return ret
def always_include_files(self):
return self.get_value('build/always_include_files', [])
def binary_has_prefix_files(self):
ret = self.get_value('build/binary_has_prefix_files', [])
if not isinstance(ret, list):
raise RuntimeError('build/binary_has_prefix_files should be a list of paths')
if sys.platform == 'win32':
if any('\\' in i for i in ret):
raise RuntimeError("build/binary_has_prefix_files paths must use / as the path delimiter on Windows")
return ret
def __unicode__(self):
'''
String representation of the MetaData.
'''
return text_type(self.__dict__)
def __str__(self):
if PY3:
return self.__unicode__()
else:
return self.__unicode__().encode('utf-8')
def __repr__(self):
'''
String representation of the MetaData.
'''
return self.__str__()
if __name__ == '__main__':
from pprint import pprint
from os.path import expanduser
m = MetaData(expanduser('~/conda-recipes/pycosat'))
pprint(m.info_index())
| |
import numpy as np
from numpy.testing import (assert_allclose, assert_equal,
assert_almost_equal, assert_array_equal,
assert_array_almost_equal)
from scipy.ndimage import convolve1d
from scipy.signal import savgol_coeffs, savgol_filter
from scipy.signal._savitzky_golay import _polyder
def check_polyder(p, m, expected):
dp = _polyder(p, m)
assert_array_equal(dp, expected)
def test_polyder():
cases = [
([5], 0, [5]),
([5], 1, [0]),
([3, 2, 1], 0, [3, 2, 1]),
([3, 2, 1], 1, [6, 2]),
([3, 2, 1], 2, [6]),
([3, 2, 1], 3, [0]),
([[3, 2, 1], [5, 6, 7]], 0, [[3, 2, 1], [5, 6, 7]]),
([[3, 2, 1], [5, 6, 7]], 1, [[6, 2], [10, 6]]),
([[3, 2, 1], [5, 6, 7]], 2, [[6], [10]]),
([[3, 2, 1], [5, 6, 7]], 3, [[0], [0]]),
]
for p, m, expected in cases:
check_polyder(np.array(p).T, m, np.array(expected).T)
#--------------------------------------------------------------------
# savgol_coeffs tests
#--------------------------------------------------------------------
def alt_sg_coeffs(window_length, polyorder, pos):
"""This is an alternative implementation of the SG coefficients.
It uses numpy.polyfit and numpy.polyval. The results should be
equivalent to those of savgol_coeffs(), but this implementation
is slower.
window_length should be odd.
"""
if pos is None:
pos = window_length // 2
t = np.arange(window_length)
unit = (t == pos).astype(int)
h = np.polyval(np.polyfit(t, unit, polyorder), t)
return h
def test_sg_coeffs_trivial():
# Test a trivial case of savgol_coeffs: polyorder = window_length - 1
h = savgol_coeffs(1, 0)
assert_allclose(h, [1])
h = savgol_coeffs(3, 2)
assert_allclose(h, [0, 1, 0], atol=1e-10)
h = savgol_coeffs(5, 4)
assert_allclose(h, [0, 0, 1, 0, 0], atol=1e-10)
h = savgol_coeffs(5, 4, pos=1)
assert_allclose(h, [0, 0, 0, 1, 0], atol=1e-10)
h = savgol_coeffs(5, 4, pos=1, use='dot')
assert_allclose(h, [0, 1, 0, 0, 0], atol=1e-10)
def compare_coeffs_to_alt(window_length, order):
# For the given window_length and order, compare the results
# of savgol_coeffs and alt_sg_coeffs for pos from 0 to window_length - 1.
# Also include pos=None.
for pos in [None] + list(range(window_length)):
h1 = savgol_coeffs(window_length, order, pos=pos, use='dot')
h2 = alt_sg_coeffs(window_length, order, pos=pos)
assert_allclose(h1, h2, atol=1e-10,
err_msg=("window_length = %d, order = %d, pos = %s" %
(window_length, order, pos)))
def test_sg_coeffs_compare():
# Compare savgol_coeffs() to alt_sg_coeffs().
for window_length in range(1, 8, 2):
for order in range(window_length):
compare_coeffs_to_alt(window_length, order)
def test_sg_coeffs_exact():
polyorder = 4
window_length = 9
halflen = window_length // 2
x = np.linspace(0, 21, 43)
delta = x[1] - x[0]
# The data is a cubic polynomial. We'll use an order 4
# SG filter, so the filtered values should equal the input data
# (except within half window_length of the edges).
y = 0.5 * x ** 3 - x
h = savgol_coeffs(window_length, polyorder)
y0 = convolve1d(y, h)
assert_allclose(y0[halflen:-halflen], y[halflen:-halflen])
# Check the same input, but use deriv=1. dy is the exact result.
dy = 1.5 * x ** 2 - 1
h = savgol_coeffs(window_length, polyorder, deriv=1, delta=delta)
y1 = convolve1d(y, h)
assert_allclose(y1[halflen:-halflen], dy[halflen:-halflen])
# Check the same input, but use deriv=2. d2y is the exact result.
d2y = 3.0 * x
h = savgol_coeffs(window_length, polyorder, deriv=2, delta=delta)
y2 = convolve1d(y, h)
assert_allclose(y2[halflen:-halflen], d2y[halflen:-halflen])
def test_sg_coeffs_deriv():
# The data in `x` is a sampled parabola, so using savgol_coeffs with an
# order 2 or higher polynomial should give exact results.
i = np.array([-2.0, 0.0, 2.0, 4.0, 6.0])
x = i ** 2 / 4
dx = i / 2
d2x = np.full_like(i, 0.5)
for pos in range(x.size):
coeffs0 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot')
assert_allclose(coeffs0.dot(x), x[pos], atol=1e-10)
coeffs1 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=1)
assert_allclose(coeffs1.dot(x), dx[pos], atol=1e-10)
coeffs2 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=2)
assert_allclose(coeffs2.dot(x), d2x[pos], atol=1e-10)
def test_sg_coeffs_deriv_gt_polyorder():
"""
If deriv > polyorder, the coefficients should be all 0.
This is a regression test for a bug where, e.g.,
savgol_coeffs(5, polyorder=1, deriv=2)
raised an error.
"""
coeffs = savgol_coeffs(5, polyorder=1, deriv=2)
assert_array_equal(coeffs, np.zeros(5))
coeffs = savgol_coeffs(7, polyorder=4, deriv=6)
assert_array_equal(coeffs, np.zeros(7))
def test_sg_coeffs_large():
# Test that for large values of window_length and polyorder the array of
# coefficients returned is symmetric. The aim is to ensure that
# no potential numeric overflow occurs.
coeffs0 = savgol_coeffs(31, 9)
assert_array_almost_equal(coeffs0, coeffs0[::-1])
coeffs1 = savgol_coeffs(31, 9, deriv=1)
assert_array_almost_equal(coeffs1, -coeffs1[::-1])
#--------------------------------------------------------------------
# savgol_filter tests
#--------------------------------------------------------------------
def test_sg_filter_trivial():
""" Test some trivial edge cases for savgol_filter()."""
x = np.array([1.0])
y = savgol_filter(x, 1, 0)
assert_equal(y, [1.0])
# Input is a single value. With a window length of 3 and polyorder 1,
# the value in y is from the straight-line fit of (-1,0), (0,3) and
# (1, 0) at 0. This is just the average of the three values, hence 1.0.
x = np.array([3.0])
y = savgol_filter(x, 3, 1, mode='constant')
assert_almost_equal(y, [1.0], decimal=15)
x = np.array([3.0])
y = savgol_filter(x, 3, 1, mode='nearest')
assert_almost_equal(y, [3.0], decimal=15)
x = np.array([1.0] * 3)
y = savgol_filter(x, 3, 1, mode='wrap')
assert_almost_equal(y, [1.0, 1.0, 1.0], decimal=15)
def test_sg_filter_basic():
# Some basic test cases for savgol_filter().
x = np.array([1.0, 2.0, 1.0])
y = savgol_filter(x, 3, 1, mode='constant')
assert_allclose(y, [1.0, 4.0 / 3, 1.0])
y = savgol_filter(x, 3, 1, mode='mirror')
assert_allclose(y, [5.0 / 3, 4.0 / 3, 5.0 / 3])
y = savgol_filter(x, 3, 1, mode='wrap')
assert_allclose(y, [4.0 / 3, 4.0 / 3, 4.0 / 3])
def test_sg_filter_2d():
x = np.array([[1.0, 2.0, 1.0],
[2.0, 4.0, 2.0]])
expected = np.array([[1.0, 4.0 / 3, 1.0],
[2.0, 8.0 / 3, 2.0]])
y = savgol_filter(x, 3, 1, mode='constant')
assert_allclose(y, expected)
y = savgol_filter(x.T, 3, 1, mode='constant', axis=0)
assert_allclose(y, expected.T)
def test_sg_filter_interp_edges():
# Another test with low degree polynomial data, for which we can easily
# give the exact results. In this test, we use mode='interp', so
# savgol_filter should match the exact solution for the entire data set,
# including the edges.
t = np.linspace(-5, 5, 21)
delta = t[1] - t[0]
# Polynomial test data.
x = np.array([t,
3 * t ** 2,
t ** 3 - t])
dx = np.array([np.ones_like(t),
6 * t,
3 * t ** 2 - 1.0])
d2x = np.array([np.zeros_like(t),
np.full_like(t, 6),
6 * t])
window_length = 7
y = savgol_filter(x, window_length, 3, axis=-1, mode='interp')
assert_allclose(y, x, atol=1e-12)
y1 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',
deriv=1, delta=delta)
assert_allclose(y1, dx, atol=1e-12)
y2 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',
deriv=2, delta=delta)
assert_allclose(y2, d2x, atol=1e-12)
# Transpose everything, and test again with axis=0.
x = x.T
dx = dx.T
d2x = d2x.T
y = savgol_filter(x, window_length, 3, axis=0, mode='interp')
assert_allclose(y, x, atol=1e-12)
y1 = savgol_filter(x, window_length, 3, axis=0, mode='interp',
deriv=1, delta=delta)
assert_allclose(y1, dx, atol=1e-12)
y2 = savgol_filter(x, window_length, 3, axis=0, mode='interp',
deriv=2, delta=delta)
assert_allclose(y2, d2x, atol=1e-12)
def test_sg_filter_interp_edges_3d():
# Test mode='interp' with a 3-D array.
t = np.linspace(-5, 5, 21)
delta = t[1] - t[0]
x1 = np.array([t, -t])
x2 = np.array([t ** 2, 3 * t ** 2 + 5])
x3 = np.array([t ** 3, 2 * t ** 3 + t ** 2 - 0.5 * t])
dx1 = np.array([np.ones_like(t), -np.ones_like(t)])
dx2 = np.array([2 * t, 6 * t])
dx3 = np.array([3 * t ** 2, 6 * t ** 2 + 2 * t - 0.5])
# z has shape (3, 2, 21)
z = np.array([x1, x2, x3])
dz = np.array([dx1, dx2, dx3])
y = savgol_filter(z, 7, 3, axis=-1, mode='interp', delta=delta)
assert_allclose(y, z, atol=1e-10)
dy = savgol_filter(z, 7, 3, axis=-1, mode='interp', deriv=1, delta=delta)
assert_allclose(dy, dz, atol=1e-10)
# z has shape (3, 21, 2)
z = np.array([x1.T, x2.T, x3.T])
dz = np.array([dx1.T, dx2.T, dx3.T])
y = savgol_filter(z, 7, 3, axis=1, mode='interp', delta=delta)
assert_allclose(y, z, atol=1e-10)
dy = savgol_filter(z, 7, 3, axis=1, mode='interp', deriv=1, delta=delta)
assert_allclose(dy, dz, atol=1e-10)
# z has shape (21, 3, 2)
z = z.swapaxes(0, 1).copy()
dz = dz.swapaxes(0, 1).copy()
y = savgol_filter(z, 7, 3, axis=0, mode='interp', delta=delta)
assert_allclose(y, z, atol=1e-10)
dy = savgol_filter(z, 7, 3, axis=0, mode='interp', deriv=1, delta=delta)
assert_allclose(dy, dz, atol=1e-10)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
__metaclass__ = type
# Copyright 2017 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_ike_crypto_profile
short_description: Configures IKE Crypto profile on the firewall with subset of settings
description:
- Use the IKE Crypto Profiles page to specify protocols and algorithms for identification, authentication, and
- encryption (IKEv1 or IKEv2, Phase 1).
author: "Ivan Bojer (@ivanbojer)"
version_added: "2.8"
requirements:
- pan-python can be obtained from PyPI U(https://pypi.python.org/pypi/pan-python)
- pandevice can be obtained from PyPI U(https://pypi.python.org/pypi/pandevice)
notes:
- Panorama is supported.
- Check mode is supported.
extends_documentation_fragment:
- panos.transitional_provider
- panos.state
- panos.full_template_support
options:
name:
description:
- Name for the profile.
required: true
dh_group:
description:
- Specify the priority for Diffie-Hellman (DH) groups.
default: group2
choices: ['group1', 'group2', 'group5', 'group14', 'group19', 'group20']
aliases: dhgroup
authentication:
description:
- Authentication hashes used for IKE phase 1 proposal.
choices: ['md5', 'sha1', 'sha256', 'sha384', 'sha512']
default: sha1
encryption:
description:
- Encryption algorithms used for IKE phase 1 proposal.
choices: ['des', '3des', 'aes-128-cbc', 'aes-192-cbc', 'aes-256-cbc']
default: ['aes-256-cbc', '3des']
lifetime_seconds:
description:
- IKE phase 1 key lifetime in seconds.
aliases: lifetime_sec
lifetime_minutes:
description:
- IKE phase 1 key lifetime in minutes.
lifetime_hours:
description:
- IKE phase 1 key lifetime in hours. If no key lifetime is
specified, default to 8 hours.
lifetime_days:
description:
- IKE phase 1 key lifetime in days.
commit:
description:
- Commit configuration if changed.
default: true
'''
EXAMPLES = '''
- name: Add IKE crypto config to the firewall
panos_ike_crypto_profile:
provider: '{{ provider }}'
state: 'present'
name: 'vpn-0cc61dd8c06f95cfd-0'
dh_group: ['group2']
authentication: ['sha1']
encryption: ['aes-128-cbc']
lifetime_seconds: '28800'
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.panos.panos import get_connection
try:
from pandevice.network import IkeCryptoProfile
from pandevice.errors import PanDeviceError
except ImportError:
pass
def main():
helper = get_connection(
template=True,
template_stack=True,
with_classic_provider_spec=True,
with_state=True,
argument_spec=dict(
name=dict(required=True),
dh_group=dict(
type='list',
default=['group2'],
choices=[
'group1', 'group2', 'group5', 'group14', 'group19', 'group20'
],
aliases=['dhgroup']
),
authentication=dict(
type='list',
choices=[
'md5', 'sha1', 'sha256', 'sha384', 'sha512'
],
default=['sha1']
),
encryption=dict(
type='list',
choices=[
'des', '3des', 'aes-128-cbc', 'aes-192-cbc', 'aes-256-cbc'
],
default=['aes-256-cbc', '3des']
),
lifetime_seconds=dict(type='int', aliases=['lifetime_sec']),
lifetime_minutes=dict(type='int'),
lifetime_hours=dict(type='int'),
lifetime_days=dict(type='int'),
commit=dict(type='bool', default=True)
)
)
module = AnsibleModule(
argument_spec=helper.argument_spec,
supports_check_mode=True,
required_one_of=helper.required_one_of,
mutually_exclusive=[
[
'lifetime_seconds',
'lifetime_minutes',
'lifetime_hours',
'lifetime_days'
]
]
)
# Verify libs are present, get parent object.
parent = helper.get_pandevice_parent(module)
# Object params.
spec = {
'name': module.params['name'],
'dh_group': module.params['dh_group'],
'authentication': module.params['authentication'],
'encryption': module.params['encryption'],
'lifetime_seconds': module.params['lifetime_seconds'],
'lifetime_minutes': module.params['lifetime_minutes'],
'lifetime_hours': module.params['lifetime_hours'],
'lifetime_days': module.params['lifetime_days']
}
# Other info.
commit = module.params['commit']
# Reflect GUI behavior. Default is 8 hour key lifetime if nothing else is
# specified.
if not any([
spec['lifetime_seconds'], spec['lifetime_minutes'], spec['lifetime_hours'], spec['lifetime_days']
]):
spec['lifetime_hours'] = 8
# Retrieve current info.
try:
listing = IkeCryptoProfile.refreshall(parent, add=False)
except PanDeviceError as e:
module.fail_json(msg='Failed refresh: {0}'.format(e))
obj = IkeCryptoProfile(**spec)
parent.add(obj)
# Apply the state.
changed = helper.apply_state(obj, listing, module)
# Commit.
if commit and changed:
helper.commit(module)
# Done.
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
| |
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
from tinyblog.models import EmailSubscriber
from .utils import EmailSubscriberFactory, is_before_django_1_5
class TestSubscribeViews(TestCase):
def test_subscribe_get(self):
url = reverse('tinyblog_subscribe')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, reverse('tinyblog_subscribe'))
def test_subscribe_bad_post(self):
self.assertEqual(EmailSubscriber.objects.all().count(),
0)
url = reverse('tinyblog_subscribe')
response = self.client.post(url,
{'email': 'toexample.com'})
self.assertEqual(response.status_code, 200)
def test_subscribe_post(self):
self.assertEqual(EmailSubscriber.objects.all().count(),
0)
url = reverse('tinyblog_subscribe')
response = self.client.post(url,
{'email': 'to@example.com'},
follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(EmailSubscriber.objects.all().count(),
1)
subscriber = EmailSubscriber.objects.get(pk=1)
self.assertFalse(subscriber.confirmed)
self.assertFalse(subscriber.unsubscribed)
self.assertEqual(unicode(subscriber), 'to@example.com')
self.assertEqual(response.context['subscriber'], subscriber)
self.assertEqual(response.request['PATH_INFO'],
reverse('tinyblog_subscribe_thanks'))
self.assertContains(response, "An email is on its way")
self.assertContains(response, subscriber.email)
self.assertEqual(len(mail.outbox), 1)
themail = mail.outbox[0]
self.assertEqual(themail.subject,
'Thanks for subscribing to example.com')
self.assertEqual(themail.to,
['to@example.com', ])
self.assertEqual(themail.from_email,
'from@example.com')
self.assertTrue(themail.body.index(str(subscriber.uuid_second)) > 0)
self.assertTrue(themail.body.index(subscriber.confirm_url()) > 0)
self.assertTrue(themail.body.index(subscriber.unsubscribe_url()) > 0)
def test_invite_get(self):
url = reverse('tinyblog_invite')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, reverse('tinyblog_invite'))
def test_invite_bad_post(self):
self.assertEqual(EmailSubscriber.objects.all().count(),
0)
url = reverse('tinyblog_invite')
response = self.client.post(url,
{'email': 'toexample.com'})
self.assertEqual(response.status_code, 200)
def test_invite_post(self):
self.assertEqual(EmailSubscriber.objects.all().count(),
0)
url = reverse('tinyblog_invite')
response = self.client.post(url,
{'email': 'to@example.com'},
follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(EmailSubscriber.objects.all().count(),
1)
subscriber = EmailSubscriber.objects.get(pk=1)
self.assertFalse(subscriber.confirmed)
self.assertFalse(subscriber.unsubscribed)
self.assertEqual(unicode(subscriber), 'to@example.com')
self.assertEqual(response.context['subscriber'], subscriber)
self.assertEqual(response.request['PATH_INFO'],
reverse('tinyblog_subscribe_thanks'))
self.assertContains(response, "An email is on its way")
self.assertContains(response, subscriber.email)
self.assertEqual(len(mail.outbox), 1)
themail = mail.outbox[0]
self.assertEqual(
themail.subject,
'Would you like to receive regular updates from example.com?'
)
self.assertEqual(themail.to,
['to@example.com', ])
self.assertEqual(themail.from_email,
'from@example.com')
self.assertTrue(themail.body.index(str(subscriber.uuid_second)) > 0)
self.assertTrue(themail.body.index(subscriber.confirm_url()) > 0)
def test_subscribe_confirm(self):
subscriber = EmailSubscriberFactory.create()
self.assertFalse(subscriber.confirmed)
response = self.client.get(subscriber.confirm_url())
self.assertEqual(response.status_code, 200)
subscriber = EmailSubscriber.objects.get(pk=subscriber.pk)
self.assertTrue(subscriber.confirmed)
self.assertFalse(subscriber.unsubscribed)
self.assertEqual(unicode(subscriber), 'to@example.com')
self.assertEqual(response.context['subscriber'], subscriber)
def test_unsubscribe_get_form(self):
subscriber = EmailSubscriberFactory.create(confirmed=True)
self.assertFalse(subscriber.unsubscribed)
self.assertEqual(EmailSubscriber.current_objects.count(),
1)
response = self.client.get(subscriber.unsubscribe_url())
self.assertEqual(response.status_code, 200)
self.assertTrue('form' in response.context_data)
subscriber = EmailSubscriber.objects.get(pk=subscriber.pk)
self.assertFalse(subscriber.unsubscribed)
self.assertEqual(EmailSubscriber.current_objects.count(),
1)
def test_unsubscribe_submit_form(self):
subscriber = EmailSubscriberFactory.create(confirmed=True)
EmailSubscriberFactory.create(confirmed=True)
self.assertFalse(subscriber.unsubscribed)
self.assertEqual(EmailSubscriber.current_objects.count(),
2)
response = self.client.post(
subscriber.unsubscribe_url(),
{'email': subscriber.email}
)
self.assertEqual(response.status_code, 302)
subscriber = EmailSubscriber.objects.get(pk=subscriber.pk)
self.assertTrue(subscriber.unsubscribed)
self.assertEqual(EmailSubscriber.current_objects.count(),
0)
def test_unsubscribe_submit_form_non_existent_email(self):
subscriber = EmailSubscriberFactory.create(confirmed=True)
self.assertFalse(subscriber.unsubscribed)
self.assertEqual(EmailSubscriber.current_objects.count(),
1)
response = self.client.post(
subscriber.unsubscribe_url(),
{'email': 'notthere@example.com'}
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context_data['form'].errors['email'],
[u'notthere@example.com is not currently subscribed.']
)
subscriber = EmailSubscriber.objects.get(pk=subscriber.pk)
self.assertFalse(subscriber.unsubscribed)
self.assertEqual(EmailSubscriber.current_objects.count(),
1)
def test_unsubscribe_submit_form_bad_email_address(self):
subscriber = EmailSubscriberFactory.create(confirmed=True)
self.assertFalse(subscriber.unsubscribed)
self.assertEqual(EmailSubscriber.current_objects.count(),
1)
response = self.client.post(
subscriber.unsubscribe_url(),
{'email': 'notanemail'}
)
self.assertEqual(response.status_code, 200)
if is_before_django_1_5():
email_string = 'e-mail'
else:
email_string = 'email'
self.assertEqual(
response.context_data['form'].errors['email'],
[u'Enter a valid %s address.' % email_string,
u'notanemail is not currently subscribed.']
)
subscriber = EmailSubscriber.objects.get(pk=subscriber.pk)
self.assertFalse(subscriber.unsubscribed)
self.assertEqual(EmailSubscriber.current_objects.count(),
1)
def test_direct_access_to_thanks_view(self):
url = reverse('tinyblog_subscribe_thanks')
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
| |
#!/usr/bin/env python
# encoding: utf-8
"""
cmsketch.py
An implementation of count-min sketching from the paper due to Cormode and
Muthukrishnan 2005
"""
import sys
import random
import numpy as np
import heapq
import json
import time
import operator
import collections
BIG_PRIME = 9223372036854775783
def random_parameter():
return random.randrange(0, BIG_PRIME - 1)
# Count Min sketch helper functions for Enumerator
def cms_combiner(current_cms_obj, new_value):
if isinstance(new_value,Sketch):
current_cms_obj.combine(new_value)
else:
current_cms_obj.update(new_value, 1)
return current_cms_obj
def cms_evaluator(sketch):
top_n = list(
reversed(
sorted(sketch.top_k.values(), key=operator.itemgetter(0))
)
)
output_list = []
counter = 1
for item in top_n:
output_list.append({"key":"item {0:d}".format(counter),"value":item[1]})
output_list.append({"key":"count {0:d}".format(counter),"value":int(item[0])})
counter += 1
return output_list
class Sketch:
def __init__(self, kwargs):
"""
Setup a new count-min sketch with parameters delta, epsilon and k
The parameters delta and epsilon control the accuracy of the
estimates of the sketch
Cormode and Muthukrishnan prove that for an item i with count a_i, the
estimate from the sketch a_i_hat will satisfy the relation
a_hat_i <= a_i + epsilon * ||a||_1
with probability at least 1 - delta, where a is the the vector of all
all counts and ||x||_1 is the L1 norm of a vector x
Parameters
----------
delta : float
A value in the unit interval that sets the precision of the sketch
epsilon : float
A value in the unit interval that sets the precision of the sketch
k : int
A positive integer that sets the number of top items counted
Examples
--------
>>> s = Sketch(10**-7, 0.005, 40)
Raises
------
ValueError
If delta or epsilon are not in the unit interval, or if k is
not a positive integer
"""
delta = kwargs['delta']
epsilon = kwargs['epsilon']
k = kwargs['k']
if delta <= 0 or delta >= 1:
raise ValueError("delta must be between 0 and 1, exclusive")
if epsilon <= 0 or epsilon >= 1:
raise ValueError("epsilon must be between 0 and 1, exclusive")
if k < 1:
raise ValueError("k must be a positive integer")
self.w = int(np.ceil(np.exp(1) / epsilon))
self.d = int(np.ceil(np.log(1 / delta)))
self.k = k
self.hash_functions = [self.__generate_hash_function() for i in range(self.d)]
self.count = np.zeros((self.d, self.w), dtype='int32')
self.heap, self.top_k = [], {} # top_k => key, [estimate, key] pairs
def update(self, key, increment):
"""
Updates the sketch for the item with name of key by the amount
specified in increment
Parameters
----------
key : string
The item to update the value of in the sketch
increment : integer
The amount to update the sketch by for the given key
Examples
--------
>>> s = Sketch(10**-7, 0.005, 40)
>>> s.update('http://www.cnn.com/', 1)
"""
for row, hash_function in enumerate(self.hash_functions):
column = hash_function(abs(hash(key)))
self.count[row, column] += increment
self.update_heap(key)
def update_heap(self, key):
"""
Updates the class's heap that keeps track of the top k items for a
given key
For the given key, it checks whether the key is present in the heap,
updating accordingly if so, and adding it to the heap if it is
absent
Parameters
----------
key : string
The item to check against the heap
"""
estimate = self.get(key)
if not self.heap or estimate >= self.heap[0][0]:
if key in self.top_k:
#update top_k
old_pair = self.top_k.get(key)
old_pair[0] = estimate
#update heap queue
for item in self.heap:
if item[1] == key:
item[0] = estimate
heapq.heapify(self.heap)
else:
if len(self.top_k) < self.k:
heapq.heappush(self.heap, [estimate, key])
self.top_k[key] = [estimate, key]
else:
new_pair = [estimate, key]
old_pair = heapq.heappushpop(self.heap, new_pair)
if new_pair[1] != old_pair[1]:
del self.top_k[old_pair[1]]
self.top_k[key] = new_pair
def get(self, key):
"""
Fetches the sketch estimate for the given key
Parameters
----------
key : string
The item to produce an estimate for
Returns
-------
estimate : int
The best estimate of the count for the given key based on the
sketch
Examples
--------
>>> s = Sketch(10**-7, 0.005, 40)
>>> s.update('http://www.cnn.com/', 1)
>>> s.get('http://www.cnn.com/')
1
"""
value = sys.maxsize
for row, hash_function in enumerate(self.hash_functions):
column = hash_function(abs(hash(key)))
value = min(self.count[row, column], value)
return value
def combine(self, new_sketch):
"""
Combines a new sketch with the current sketch.
Sketch combination is exact; new top_k list is approximate.
Must combine counting array and top_k list.
"""
self.count += new_sketch.count
counts_dict = collections.defaultdict(int)
# top_n dictionary entries have the form:
# key : (value, key)
#
for v,k in self.top_k.values() + new_sketch.top_k.values():
counts_dict[k] += v
sorted_kv_pairs = list(reversed(sorted(counts_dict.items(),key=operator.itemgetter(1))))
top_kv_pairs = sorted_kv_pairs[0:self.k]
self.top_k = {}
for key, value in top_kv_pairs:
self.top_k[key] = [value, key]
def __generate_hash_function(self):
"""
Returns a hash function from a family of pairwise-independent hash
functions
"""
a, b = random_parameter(), random_parameter()
return lambda x: (a * x + b) % BIG_PRIME % self.w
if __name__ == '__main__':
def print_results(s):
print('Top tweeters')
for value in s.top_k.values():
print('{0} {1}'.format(str(value[0]),str(value[1])))
print('\n')
s = Sketch(dict(delta=10**-5,epsilon=0.001,k=20))
now = time.time()
for line in sys.stdin:
if time.time() - 5 > now:
now = time.time()
#print_results(s)
try:
user_name = json.loads(line)['actor']['preferredUsername']
except ValueError:
continue
except (KeyError, e):
continue
s.update(user_name,1)
print_results(s)
| |
#!/usr/bin/env python3
from __future__ import print_function
import argparse
import base64
import hashlib
import json
import os
import requests
import sys
import time
class Error(Exception):
pass
def sleep_time(attempt):
if attempt <= 0:
raise Exception('Unexpected')
if attempt == 1:
return 0
if attempt == 2:
return 15
if attempt == 3:
return 60
if attempt == 4:
return 90
if attempt == 5:
return 300
return 1200
def retry(func_in):
def func_out(*args, **kwargs):
retry_max = 10
i = 0
while True:
i = i + 1
try:
return func_in(*args, **kwargs)
except Error as err:
# Treat Errors as fatal and do not retry.
# Also explicitly flush message to avoid "no output" issue on some CIs.
print('Error:\n {}'.format(err))
sys.stdout.flush()
raise err
except Exception as exc:
if i > retry_max:
raise exc
print('Operation failed. Exception:\n {}'.format(exc))
sec = sleep_time(i)
print('Retry #{} (of {}) after {} seconds'.format(i, retry_max, sec))
sys.stdout.flush()
time.sleep(sec)
return func_out
# http://stackoverflow.com/a/16696317/2288008
@retry
def download_file(url, local_file, auth, chunk_size=1024):
print('Downloading:\n {}\n -> {}'.format(url, local_file))
r = requests.get(url, stream=True, auth=auth)
if not r.ok:
raise Exception('Downloading failed')
with open(local_file, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk:
f.write(chunk)
class Github:
def __init__(self, username, password, repo_owner, repo):
self.repo_owner = repo_owner
self.repo = repo
self.auth = requests.auth.HTTPBasicAuth(username, password)
self.simple_request()
self.release_id = None
@retry
def simple_request(self):
print('Processing simple request')
r = requests.get('https://api.github.com', auth=self.auth)
if not r.ok:
sys.exit('Simple request fails. Check your password.')
limit = int(r.headers['X-RateLimit-Remaining'])
print('GitHub Limit: {}'.format(limit))
if limit == 0:
raise Exception('GitHub limit is 0')
print('Simple request pass')
@retry
def get_release_by_tag(self, tagname):
print('Get release-id by tag `{}`'.format(tagname))
# https://developer.github.com/v3/repos/releases/#get-a-release-by-tag-name
# GET /repos/:owner/:repo/releases/tags/:tag
url = 'https://api.github.com/repos/{}/{}/releases/tags/{}'.format(
self.repo_owner,
self.repo,
tagname
)
r = requests.get(url, auth=self.auth)
if r.status_code == 404:
raise Error('Release {} does not exist. Create a GitHub release for with this tag'.format(tagname))
if not r.ok:
raise Exception('Get tag id failed. Requested url: {}'.format(url))
tag_id = r.json()['id']
print('Tag id is {}'.format(tag_id))
return tag_id
@retry
def find_asset_id_by_name(self, release_id, name):
# https://developer.github.com/v3/repos/releases/#list-assets-for-a-release
# GET /repos/:owner/:repo/releases/:id/assets
page_number = 1
keep_searching = True
while keep_searching:
url = 'https://api.github.com/repos/{}/{}/releases/{}/assets?page={}'.format(
self.repo_owner,
self.repo,
release_id,
page_number
)
print('Requesting URL: {}'.format(url))
r = requests.get(url, auth=self.auth)
if not r.ok:
raise Exception('Getting list of assets failed. Requested url: {}'.format(url))
json = r.json()
for x in json:
if name == x['name']:
return x['id']
if not json:
keep_searching = False
page_number = page_number + 1
return None
@retry
def delete_asset_by_id(self, asset_id, asset_name):
# https://developer.github.com/v3/repos/releases/#delete-a-release-asset
# DELETE /repos/:owner/:repo/releases/assets/:id
url = 'https://api.github.com/repos/{}/{}/releases/assets/{}'.format(
self.repo_owner,
self.repo,
asset_id
)
r = requests.delete(url, auth=self.auth)
if r.status_code == 204:
print('Asset removed: {}'.format(asset_name))
else:
raise Exception('Deletion of asset failed: {}'.format(asset_name))
def delete_asset_if_exists(self, release_id, asset_name):
asset_id = self.find_asset_id_by_name(release_id, asset_name)
if not asset_id:
print('Asset not exists: {}'.format(asset_name))
return
self.delete_asset_by_id(asset_id, asset_name)
def upload_bzip_once(self, url, local_path):
headers = {'Content-Type': 'application/x-bzip2'}
file_to_upload = open(local_path, 'rb')
r = requests.post(url, data=file_to_upload, headers=headers, auth=self.auth)
if not r.ok:
raise Exception('Upload of file failed')
@retry
def upload_bzip(self, url, local_path, release_id, asset_name):
print('Uploading:\n {}\n -> {}'.format(local_path, url))
try:
self.upload_bzip_once(url, local_path)
except Exception as exc:
print('Exception catched while uploading, removing asset...')
self.delete_asset_if_exists(release_id, asset_name)
raise exc
def upload_raw_file(self, local_path):
tagname = 'cache'
if self.release_id is None:
self.release_id = self.get_release_by_tag(tagname)
# https://developer.github.com/v3/repos/releases/#upload-a-release-asset
# POST https://<upload_url>/repos/:owner/:repo/releases/:id/assets?name=foo.zip
asset_name = hashlib.sha1(open(local_path, 'rb').read()).hexdigest()
asset_name = asset_name + '.tar.bz2'
url = 'https://uploads.github.com/repos/{}/{}/releases/{}/assets?name={}'.format(
self.repo_owner,
self.repo,
self.release_id,
asset_name
)
self.upload_bzip(url, local_path, self.release_id, asset_name)
@retry
def create_new_file(self, local_path, github_path):
# https://developer.github.com/v3/repos/contents/#create-a-file
# PUT /repos/:owner/:repo/contents/:path
message = 'Uploading cache info\n\n'
message += 'Create file: {}\n\n'.format(github_path)
env_list = []
job_url = ''
if os.getenv('TRAVIS') == 'true':
# * https://docs.travis-ci.com/user/environment-variables/#Default-Environment-Variables
message += 'Travis:\n'
job_url = 'https://travis-ci.org/{}/jobs/{}'.format(
os.getenv('TRAVIS_REPO_SLUG'),
os.getenv('TRAVIS_JOB_ID')
)
env_list += [
'TRAVIS_BRANCH',
'TRAVIS_BUILD_ID',
'TRAVIS_BUILD_NUMBER',
'TRAVIS_JOB_ID',
'TRAVIS_JOB_NUMBER',
'TRAVIS_OS_NAME',
'TRAVIS_REPO_SLUG'
]
if os.getenv('APPVEYOR') == 'True':
# * http://www.appveyor.com/docs/environment-variables
message += 'AppVeyor:\n'
job_url = 'https://ci.appveyor.com/project/{}/{}/build/{}/job/{}'.format(
os.getenv('APPVEYOR_ACCOUNT_NAME'),
os.getenv('APPVEYOR_PROJECT_SLUG'),
os.getenv('APPVEYOR_BUILD_VERSION'),
os.getenv('APPVEYOR_JOB_ID')
)
env_list += [
'APPVEYOR_ACCOUNT_NAME',
'APPVEYOR_PROJECT_ID',
'APPVEYOR_PROJECT_NAME',
'APPVEYOR_PROJECT_SLUG',
'APPVEYOR_BUILD_ID',
'APPVEYOR_BUILD_NUMBER',
'APPVEYOR_BUILD_VERSION',
'APPVEYOR_JOB_ID',
'APPVEYOR_JOB_NAME',
'APPVEYOR_REPO_BRANCH'
]
# Store some info about build
for env_name in env_list:
env_value = os.getenv(env_name)
if env_value:
message += ' {}: {}\n'.format(env_name, env_value)
if job_url:
message += '\n Job URL: {}\n'.format(job_url)
url = 'https://api.github.com/repos/{}/{}/contents/{}'.format(
self.repo_owner,
self.repo,
github_path
)
content = base64.b64encode(open(local_path, 'rb').read()).decode()
put_data = {
'message': message,
'content': content
}
r = requests.put(url, data = json.dumps(put_data), auth=self.auth)
if not r.ok:
print('Put failed. Status code: {}'.format(r.status_code))
if r.status_code == 409:
raise Exception('Unavailable repository')
return r.ok
class CacheEntry:
def __init__(self, cache_done_path, cache_dir, temp_dir):
self.cache_dir = cache_dir
self.temp_dir = temp_dir
self.cache_raw = os.path.join(self.cache_dir, 'raw')
self.cache_meta = os.path.join(self.cache_dir, 'meta')
self.cache_done_path = cache_done_path
if not os.path.exists(cache_done_path):
raise Exception('File not exists: {}'.format(cache_done_path))
self.cache_done_dir = os.path.dirname(self.cache_done_path)
self.from_server = os.path.join(self.cache_done_dir, 'from.server')
self.cache_sha1 = os.path.join(self.cache_done_dir, 'cache.sha1')
self.internal_deps_id = os.path.split(self.cache_done_dir)[0]
self.type_id = os.path.split(self.internal_deps_id)[0]
self.args_id = os.path.split(self.type_id)[0]
self.archive_id = os.path.split(self.args_id)[0]
self.version = os.path.split(self.archive_id)[0]
self.component = os.path.split(self.version)[0]
if os.path.split(self.component)[1].startswith('__'):
self.package = os.path.split(self.component)[0]
else:
self.package = self.component
self.component = ''
self.toolchain_id = os.path.split(self.package)[0]
meta = os.path.split(self.toolchain_id)[0]
assert(meta == self.cache_meta)
def entry_from_server(self):
return os.path.exists(self.from_server)
def upload_raw(self, github):
sha1 = open(self.cache_sha1, 'r').read()
raw = os.path.join(self.cache_raw, sha1 + '.tar.bz2')
github.upload_raw_file(raw)
def upload_meta(self, github, cache_done):
self.upload_files_from_common_dir(github, self.cache_done_dir, cache_done)
self.upload_files_from_common_dir(github, self.internal_deps_id, cache_done)
self.upload_files_from_common_dir(github, self.type_id, cache_done)
self.upload_files_from_common_dir(github, self.args_id, cache_done)
self.upload_files_from_common_dir(github, self.archive_id, cache_done)
self.upload_files_from_common_dir(github, self.version, cache_done, check_is_empty=True)
if self.component != '':
self.upload_files_from_common_dir(github, self.component, cache_done, check_is_empty=True)
self.upload_files_from_common_dir(github, self.package, cache_done, check_is_empty=True)
self.upload_files_from_common_dir(github, self.toolchain_id, cache_done)
def upload_files_from_common_dir(self, github, dir_path, cache_done, check_is_empty=False):
to_upload = []
for i in os.listdir(dir_path):
if i == 'cmake.lock':
continue
if i == 'DONE':
continue
done_file = (i == 'CACHE.DONE') or (i == 'basic-deps.DONE')
if done_file and not cache_done:
continue
if not done_file and cache_done:
continue
i_fullpath = os.path.join(dir_path, i)
if os.path.isfile(i_fullpath):
to_upload.append(i_fullpath)
if not cache_done:
if check_is_empty and len(to_upload) != 0:
raise Exception('Expected no files in directory: {}'.format(dir_path))
if not check_is_empty and len(to_upload) == 0:
raise Exception('No files found in directory: {}'.format(dir_path))
for i in to_upload:
relative_path = i[len(self.cache_meta)+1:]
relative_unix_path = relative_path.replace('\\', '/') # convert windows path
expected_download_url = 'https://raw.githubusercontent.com/{}/{}/master/{}'.format(
github.repo_owner,
github.repo,
relative_unix_path
)
github_url = 'https://github.com/{}/{}/blob/master/{}'.format(
github.repo_owner,
github.repo,
relative_unix_path
)
print('Uploading file: {}'.format(relative_path))
ok = github.create_new_file(i, relative_unix_path)
if not ok:
print('Already exist')
temp_file = os.path.join(self.temp_dir, '__TEMP.FILE')
download_file(expected_download_url, temp_file, github.auth)
expected_content = open(i, 'rb').read()
downloaded_content = open(temp_file, 'rb').read()
expected_hash = hashlib.sha1(expected_content).hexdigest()
downloaded_hash = hashlib.sha1(downloaded_content).hexdigest()
os.remove(temp_file)
if expected_hash != downloaded_hash:
print('Hash mismatch:')
print(
' expected {} (content: {})'.format(
expected_hash, expected_content
)
)
print(
' downloaded {} (content: {})'.format(
downloaded_hash, downloaded_content
)
)
print('GitHub link: {}'.format(github_url))
raise Exception('Hash mismatch')
class Cache:
def __init__(self, cache_dir, temp_dir):
self.entries = self.create_entries(cache_dir, temp_dir)
self.remove_entries_from_server()
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
def create_entries(self, cache_dir, temp_dir):
print('Searching for CACHE.DONE files in directory:\n {}\n'.format(cache_dir))
entries = []
for root, dirs, files in os.walk(cache_dir):
for filename in files:
if filename == 'CACHE.DONE':
entries.append(CacheEntry(os.path.join(root, filename), cache_dir, temp_dir))
print('Found {} files:'.format(len(entries)))
for i in entries:
print(' {}'.format(i.cache_done_path))
print('')
return entries
def remove_entries_from_server(self):
new_entries = []
for i in self.entries:
if i.entry_from_server():
print('Remove entry (from server):\n {}'.format(i.cache_done_path))
else:
new_entries.append(i)
self.entries = new_entries
def upload_raw(self, github):
for i in self.entries:
i.upload_raw(github)
def upload_meta(self, github, cache_done):
for i in self.entries:
i.upload_meta(github, cache_done)
parser = argparse.ArgumentParser(
description='Script for uploading Hunter cache files to GitHub'
)
parser.add_argument(
'--username',
required=True,
help='Username'
)
parser.add_argument(
'--repo-owner',
required=True,
help='Repository owner'
)
parser.add_argument(
'--repo',
required=True,
help='Repository name'
)
parser.add_argument(
'--cache-dir',
required=True,
help='Hunter cache directory, e.g. /home/user/.hunter/_Base/Cache'
)
parser.add_argument(
'--temp-dir',
required=True,
help='Temporary directory where files will be downloaded for verification'
)
parser.add_argument(
'--skip-raw', action='store_true', help="Skip uploading of raw files"
)
args = parser.parse_args()
cache_dir = os.path.normpath(args.cache_dir)
# Some tests don't produce cache for some toolchains:
# * https://travis-ci.org/ingenue/hunter/jobs/185550289
if not os.path.exists(cache_dir):
print("*** WARNING *** Cache directory '{}' not found, skipping...".format(cache_dir))
sys.exit()
if not os.path.isdir(cache_dir):
raise Exception('Not a directory: {}'.format(cache_dir))
if os.path.split(cache_dir)[1] != 'Cache':
raise Exception('Cache directory path should ends with Cache: {}'.format(cache_dir))
cache = Cache(cache_dir, args.temp_dir)
password = os.getenv('GITHUB_USER_PASSWORD')
if password == '' or password is None:
raise Exception('Expected GITHUB_USER_PASSWORD environment variable')
github = Github(
username = args.username,
password = password,
repo_owner = args.repo_owner,
repo = args.repo
)
if args.skip_raw:
print('*** WARNING *** Skip uploading of raw files')
else:
cache.upload_raw(github)
cache.upload_meta(github, cache_done=False)
print('Uploading DONE files')
cache.upload_meta(github, cache_done=True) # Should be last
| |
import os
import struct
import portalocker
import pickle
class Storage(object):
"""Class to manage writing to file in consecutive blocks with locking"""
SUPERBLOCK_SIZE = 4096
INTEGER_FORMAT = "!Q"
INTEGER_LENGTH = 8
def __init__(self, f):
"""Initialize Storage block, Set Lock to False"""
self._f = f
self.locked = False
# we ensure that we start in a sector boundary
self._ensure_superblock()
def _ensure_superblock(self):
"""Guarantee that the next write will start on a sector boundary"""
self.lock()
self._seek_end()
end_address = self._f.tell()
if end_address < self.SUPERBLOCK_SIZE:
self._f.write(b'\x00' * (self.SUPERBLOCK_SIZE - end_address))
self.unlock()
def lock(self):
"""If storage is not already locked, lock the file for writing"""
if not self.locked:
portalocker.lock(self._f, portalocker.LOCK_EX)
self.locked = True
return True
else:
return False
def unlock(self):
"""Unlock the file if it is currently locked"""
if self.locked:
self._f.flush()
portalocker.unlock(self._f)
self.locked = False
def _seek_end(self):
"""Seek pointer to the end of the block"""
self._f.seek(0, os.SEEK_END)
def _seek_superblock(self):
"""Go to beginning of file which is on sector boundary"""
self._f.seek(0)
def _bytes_to_integer(self, integer_bytes):
"""Convert bytes to integer format"""
return struct.unpack(self.INTEGER_FORMAT, integer_bytes)[0]
def _integer_to_bytes(self, integer):
"""Convert integers to byte format"""
return struct.pack(self.INTEGER_FORMAT, integer)
def _read_integer(self):
"""Read an integer from file"""
return self._bytes_to_integer(self._f.read(self.INTEGER_LENGTH))
def _write_integer(self, integer):
"""Write an integer to file"""
self.lock()
self._f.write(self._integer_to_bytes(integer))
def write(self, data):
"""Write data to disk, returning the address at which you wrote it"""
# first lock, get to end, get address to return, write size
self.lock()
self._seek_end()
object_address = self._f.tell()
self._write_integer(len(data))
self._f.write(data)
return object_address
def read(self, address):
"""Read data from address on disk"""
self._f.seek(address)
length = self._read_integer()
data = self._f.read(length)
return data
def commit_root_address(self, root_address):
"""Write the root address at position 0 of the superblock"""
self.lock()
self._f.flush()
# make sure you write root address at position 0
self._seek_superblock()
# write is atomic because we store the address on a sector boundary.
self._write_integer(root_address)
self._f.flush()
self.unlock()
def get_root_address(self):
"""Read in the root"""
# read the first integer in the file
self._seek_superblock()
root_address = self._read_integer()
return root_address
def close(self):
"""Close the storage file"""
self.unlock()
self._f.close()
@property
def closed(self):
"""Check if file is closed"""
return self._f.closed
class DBDB(object):
"""A Database that implements a simple key/value database.
It lets you associate a key with a value, and store that association
on disk for later retrieval."""
def __init__(self, f):
"""Initialize the storage file and structure for the database"""
self._storage = Storage(f)
self._tree = BinaryTree(self._storage)
def _assert_not_closed(self):
"""Check if the storage file is closed"""
if self._storage.closed:
raise ValueError('Database closed.')
def close(self):
"""Close the storage file"""
self._storage.close()
def commit(self):
"""Check if the storage file is closed. If not, write database to file"""
self._assert_not_closed()
self._tree.commit()
def printTree(self):
"""Check if the storage file is closed. If not, printTree"""
self._assert_not_closed()
self._tree.printTree()
def get(self, key):
"""Retrieve the value associated with a key"""
self._assert_not_closed()
return self._tree.get(key)
def get_All_LTE(self, key):
"get all Keys and Values with keys less than or equal to passed key"
"Returns two lists: first list is of Keys, second lists is of Vals"
"where Keys is less than or equal to key"
"Uses recursion to find such"
self._assert_not_closed()
return self._tree.get_All_LTE(key)
def set(self, key, value):
"""Set the value associated with a key"""
self._assert_not_closed()
#print(self._tree)
return self._tree.set(key, value)
def delete(self, key):
"""Delete a key, value pair from the Database"""
self._assert_not_closed()
return self._tree.delete(key)
class ValueRef(object):
"""A reference to a string value on disk"""
def __init__(self, referent=None, address=0):
"""Initialize with either or both the object to be stored
and its disk address"""
self._referent = referent # value to store
self._address = address # address to store at
@property
def address(self):
"""Return the disk address of the object"""
return self._address
def prepare_to_store(self, storage):
pass
@staticmethod
def referent_to_bytes(referent):
"""Encode string value as utf-8"""
return referent.encode('utf-8')
@staticmethod
def bytes_to_referent(bytes):
"""Decode bytes to string value"""
return bytes.decode('utf-8')
def get(self, storage):
"""Read bytes for value from disk"""
if self._referent is None and self._address:
self._referent = self.bytes_to_referent(storage.read(self._address))
return self._referent
def store(self, storage):
"""Store bytes for value to disk"""
# called by BinaryNode.store_refs
if self._referent is not None and not self._address:
self.prepare_to_store(storage)
self._address = storage.write(self.referent_to_bytes(self._referent))
class BinaryNodeRef(ValueRef):
"""Reference to a Red-Black Binary Tree node on disk"""
# calls the BinaryNode's store_refs
def prepare_to_store(self, storage):
"""have a node store its refs"""
if self._referent:
self._referent.store_refs(storage)
@staticmethod
def referent_to_bytes(referent):
"""Use pickle to convert node to bytes"""
return pickle.dumps({
'left': referent.left_ref.address,
'key': referent.key,
'value': referent.value_ref.address,
'right': referent.right_ref.address,
'color': referent.color,
})
@staticmethod
def bytes_to_referent(string):
"""Unpickle bytes to get a node object"""
d = pickle.loads(string)
return BinaryNode(
BinaryNodeRef(address=d['left']),
d['key'],
ValueRef(address=d['value']),
BinaryNodeRef(address=d['right']),
d['color'],
)
class BinaryNode(object):
"""Represents a Node of a Red-Black Binary Tree"""
@classmethod
def from_node(cls, node, **kwargs):
"""Clone a node with some changes from another one"""
return cls(
left_ref=kwargs.get('left_ref', node.left_ref),
key=kwargs.get('key', node.key),
value_ref=kwargs.get('value_ref', node.value_ref),
right_ref=kwargs.get('right_ref', node.right_ref),
color=kwargs.get('color', node.color),
# parent_ref=kwargs.get('parent', node.parent),
)
def __init__(self, left_ref, key, value_ref, right_ref, color): # , parent):
"""Initialize a node with key, value, left and right children, and color"""
self.left_ref = left_ref
self.key = key
self.value_ref = value_ref
self.right_ref = right_ref
self.color = color
# self.parent = parent
def is_red(self):
"""Return true if node is red"""
return self.color == Color.RED
def is_black(self):
"""Return true if node is black"""
return self.color == Color.BLACK
def store_refs(self, storage):
"""Method for a node to store all of its stuff"""
self.value_ref.store(storage)
# calls BinaryNodeRef.store. which calls
# BinaryNodeRef.prepate_to_store
# which calls this again and recursively stores
# the whole tree
self.left_ref.store(storage)
self.right_ref.store(storage)
class Color(object):
"""Class which encodes the Red or Black parameter of a tree node"""
RED = 0
BLACK = 1
class BinaryTree(object):
"""Immutable Red-Black Binary Tree class. Constructs new tree on changes"""
def __init__(self, storage):
"""Initialize tree with disk storage and tree node if it already exists"""
self._storage = storage
self._refresh_tree_ref()
def commit(self):
"""Commit database changes to file, making them persistent"""
# triggers BinaryNodeRef.store
self._tree_ref.store(self._storage)
# make sure address of new tree is stored
self._storage.commit_root_address(self._tree_ref.address)
def _refresh_tree_ref(self):
"""Get reference to new tree if it has changed"""
self._tree_ref = BinaryNodeRef(
address=self._storage.get_root_address())
def get(self, key):
"""Get value associated with a key"""
# if tree is not locked by another writer
# refresh the references and get new tree if needed
if not self._storage.locked:
self._refresh_tree_ref()
# get the top level node
node = self._follow(self._tree_ref)
# traverse until you find appropriate node
while node is not None:
if key < node.key:
# print("searching left", node.key, key)
node = self._follow(node.left_ref)
elif key > node.key:
# print("searching right", node.key, key)
node = self._follow(node.right_ref)
else:
return self._follow(node.value_ref)
raise KeyError
def get_All_LTE(self, key):
"get all keys and values with keys less than or equal to passed key"
"Returns a list of Keys and a corresponding list of Values"
"Where all Keys are less than passed key"
"Calls recursive function follow_LTE to find such"
#if tree is not locked by another writer
#refresh the references and get new tree if needed
if not self._storage.locked:
self._refresh_tree_ref()
#get the top level node
node = self._follow(self._tree_ref)
#Initial Key and Val List set to empty
LTE_Keys = []
LTE_Vals = []
#Recursively find Keys and Values where Key is Less Than or Equal to key
LTE_Keys,LTE_Vals = self.follow_LTE(key, node, LTE_Keys,LTE_Vals)
return LTE_Keys,LTE_Vals
def follow_LTE(self, key, node, LTE_Keys,LTE_Vals):
"Recursive function to add Keys and Values"
"to lists, where Keys are less than or equal to key"
#If node is None, stop and return lists
if node is None:
return LTE_Keys,LTE_Vals
#If node's Key is <= key, add Key and Value to list
elif key >= node.key:
LTE_Keys.append(node.key)
LTE_Vals.append(self._follow(node.value_ref))
rightNode = self._follow(node.right_ref)
self.follow_LTE(key, rightNode, LTE_Keys,LTE_Vals)
#Always move left if current node is not None
leftNode = self._follow(node.left_ref)
self.follow_LTE(key, leftNode, LTE_Keys,LTE_Vals)
#After checking left and right nodes, return lists
return LTE_Keys,LTE_Vals
def set(self, key, value):
"""Set a new value in the tree. Will cause a new tree"""
# try to lock the tree. If we succeed make sure
# we dont lose updates from any other process
if self._storage.lock():
self._refresh_tree_ref()
# get current top-level node and make a value-ref
node = self._follow(self._tree_ref)
value_ref = ValueRef(value)
# insert and get new tree ref
# print (self._tree_ref, node, key, value_ref)
self._tree_ref = BinaryNodeRef(referent=self.blacken(self._follow(self._insert(node, key, value_ref))))
# self.printTree()
def _insert(self, node, key, value_ref):
"""Insert a new node, creating a new path from root"""
# create a tree ifnthere was none so far
if node is None:
# print ("reached empty node", key, value_ref._referent)
new_node = BinaryNode(
BinaryNodeRef(), key, value_ref, BinaryNodeRef(), Color.RED)
# return self.balance(self._follow(BinaryNodeRef(referent=new_node)))
return BinaryNodeRef(referent=self.balance(self._follow(BinaryNodeRef(referent=new_node))))
elif key < node.key:
# print ("recursively inserting left", self, node.key, key, value_ref._referent)
new_node = BinaryNode.from_node(
node,
left_ref=self._insert(
self._follow(node.left_ref), key, value_ref))
return BinaryNodeRef(referent=self.balance(new_node))
elif key > node.key:
new_node = BinaryNode.from_node(
node,
right_ref=self._insert(
self._follow(node.right_ref), key, value_ref))
return BinaryNodeRef(referent=self.balance(self._follow(BinaryNodeRef(referent=new_node))))
else: # create a new node to represent this data
new_node = BinaryNode.from_node(node, value_ref=value_ref)
return BinaryNodeRef(referent=new_node)
def printTree(self):
"""Print a rough representation of the tree for error checking"""
print("printing tree")
node = self._follow(self._tree_ref)
self.printNode(node)
def printNode(self, node):
"""Recursively print nodes within the printTree function"""
print(node.key, node.value_ref._referent, node.color)
left_node = self._follow(node.left_ref)
right_node = self._follow(node.right_ref)
if left_node is not None:
print("left of ", node.key)
self.printNode(left_node)
if right_node is not None:
print("right of ", node.key)
self.printNode(right_node)
@staticmethod
def blacken(node):
"""If a node is red, change its color to black"""
if node.is_red():
return BinaryNode.from_node(node, color=Color.BLACK)
return node
def recolored(self, node):
"""Recolor a node and its two children such that the parent is red
and the children are black"""
return BinaryNode.from_node(node, left_ref=BinaryNodeRef(
referent=self.blacken(BinaryNode.from_node(self._follow(node.left_ref)))),
right_ref=BinaryNodeRef(
referent=self.blacken(BinaryNode.from_node(self._follow(node.right_ref)))),
color=Color.RED)
@staticmethod
def is_empty():
return False
def rotate_left(self, node):
"""Perform an Okasaki left rotation"""
right_node = self._follow(node.right_ref)
left_node = self._follow(node.left_ref)
right_left_node = self._follow(right_node.left_ref)
return BinaryNode.from_node(right_node,
left_ref=BinaryNodeRef(
referent=BinaryNode.from_node(node, right_ref=right_node.left_ref)))
def rotate_right(self, node):
"""Perform an Okasaki right rotation"""
left_node = self._follow(node.left_ref)
left_right_node = self._follow(left_node.right_ref)
return BinaryNode.from_node(left_node,
right_ref=BinaryNodeRef(
referent=BinaryNode.from_node(node, left_ref=left_node.right_ref)))
def balance(self, node):
"""Balance the tree after an insert"""
if node.is_red():
return node
if self._follow(node.left_ref) is not None:
if self._follow(node.left_ref).is_red():
# print ("left: red")
if self._follow(node.right_ref) is not None:
if self._follow(node.right_ref).is_red():
return self.recolored(node)
left_node = self._follow(node.left_ref)
# print (node.key, node.value_ref._referent, node.color)
# print (left_node.key, left_node.value_ref._referent, left_node.color)
if self._follow(left_node.left_ref) is not None:
if self._follow(left_node.left_ref).is_red():
# print ("left, left: black, red")
# print ("node", node.key)
# print ("node right", node.right_ref)
new_node = self.recolored(self.rotate_right(node))
return new_node
if self._follow(left_node.right_ref) is not None:
if self._follow(left_node.right_ref).is_red():
return self.balance(BinaryNode.from_node(
node,
left_ref=BinaryNodeRef(referent=self.rotate_left(self._follow(node.left_ref)))))
right_node = self._follow(node.right_ref)
if self._follow(node.right_ref) is not None:
if self._follow(node.right_ref).is_red():
if self._follow(right_node.right_ref) is not None:
if self._follow(right_node.right_ref).is_red():
return self.recolored(self.rotate_left(node))
if self._follow(right_node.left_ref) is not None:
if self._follow(right_node.left_ref).is_red():
# return self.recolored(self.rotate_left(BinaryNode.from_node(
# node,
# right_ref=BinaryNodeRef(referent=self.rotate_right(self._follow(node.right_ref))),
# key=key, value_ref=value_ref)))
return self.recolored(self.rotate_left(BinaryNode.from_node(
node,
right_ref=BinaryNodeRef(referent=self.rotate_right(self._follow(node.right_ref))))))
return node
def _follow(self, ref):
"""Get a node from a reference"""
# calls BinaryNodeRef.get
return ref.get(self._storage)
def _find_max(self, node):
"""Find the max value of the binary tree"""
while True:
next_node = self._follow(node.right_ref)
if next_node is None:
return node
node = next_node
def connect(dbname):
"""Opens the database file (possibly creating it,
but never overwriting it) and returns an instance of DBDB"""
try:
f = open(dbname, 'r+b')
except IOError:
fd = os.open(dbname, os.O_RDWR | os.O_CREAT)
f = os.fdopen(fd, 'r+b')
return DBDB(f)
| |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
def bilinear_interp_np(input,
out_h,
out_w,
out_size=None,
actual_shape=None,
align_corners=True,
align_mode=0,
data_layout='NCHW'):
"""bilinear interpolation implement in shape [N, C, H, W]"""
if data_layout == "NHWC":
input = np.transpose(input, (0, 3, 1, 2)) # NHWC => NCHW
if out_size is not None:
out_h = out_size[0]
out_w = out_size[1]
if actual_shape is not None:
out_h = actual_shape[0]
out_w = actual_shape[1]
batch_size, channel, in_h, in_w = input.shape
ratio_h = ratio_w = 0.0
if out_h > 1:
if (align_corners):
ratio_h = (in_h - 1.0) / (out_h - 1.0)
else:
ratio_h = 1.0 * in_h / out_h
if out_w > 1:
if (align_corners):
ratio_w = (in_w - 1.0) / (out_w - 1.0)
else:
ratio_w = 1.0 * in_w / out_w
out = np.zeros((batch_size, channel, out_h, out_w))
for i in range(out_h):
if (align_mode == 0 and not align_corners):
h = int(ratio_h * (i + 0.5) - 0.5)
else:
h = int(ratio_h * i)
h = max(0, h)
hid = 1 if h < in_h - 1 else 0
if (align_mode == 0 and not align_corners):
idx_src_h = max(ratio_h * (i + 0.5) - 0.5, 0)
h1lambda = idx_src_h - h
else:
h1lambda = ratio_h * i - h
h2lambda = 1.0 - h1lambda
for j in range(out_w):
if (align_mode == 0 and not align_corners):
w = int(ratio_w * (j + 0.5) - 0.5)
else:
w = int(ratio_w * j)
w = max(0, w)
wid = 1 if w < in_w - 1 else 0
if (align_mode == 0 and not align_corners):
idx_src_w = max(ratio_w * (j + 0.5) - 0.5, 0)
w1lambda = idx_src_w - w
else:
w1lambda = ratio_w * j - w
w2lambda = 1.0 - w1lambda
out[:, :, i, j] = h2lambda*(w2lambda*input[:, :, h, w] +
w1lambda*input[:, :, h, w+wid]) + \
h1lambda*(w2lambda*input[:, :, h+hid, w] +
w1lambda*input[:, :, h+hid, w+wid])
if data_layout == "NHWC":
out = np.transpose(out, (0, 2, 3, 1)) # NCHW => NHWC
return out.astype(input.dtype)
class TestBilinearInterpOp(OpTest):
def setUp(self):
self.out_size = None
self.actual_shape = None
self.data_layout = 'NCHW'
self.init_test_case()
self.op_type = "bilinear_interp"
input_np = np.random.random(self.input_shape).astype("float32")
if self.data_layout == "NCHW":
in_h = self.input_shape[2]
in_w = self.input_shape[3]
else:
in_h = self.input_shape[1]
in_w = self.input_shape[2]
if self.scale > 0:
out_h = int(in_h * self.scale)
out_w = int(in_w * self.scale)
else:
out_h = self.out_h
out_w = self.out_w
output_np = bilinear_interp_np(input_np, out_h, out_w, self.out_size,
self.actual_shape, self.align_corners,
self.align_mode, self.data_layout)
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape
self.attrs = {
'out_h': self.out_h,
'out_w': self.out_w,
'scale': self.scale,
'interp_method': self.interp_method,
'align_corners': self.align_corners,
'align_mode': self.align_mode,
'data_layout': self.data_layout
}
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True)
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 4, 4]
self.out_h = 2
self.out_w = 2
self.scale = 0.
self.out_size = np.array([3, 3]).astype("int32")
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase1(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [4, 1, 7, 8]
self.out_h = 1
self.out_w = 1
self.scale = 0.
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase2(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase3(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [1, 1, 128, 64]
self.out_h = 64
self.out_w = 128
self.scale = 0.
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase4(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [4, 1, 7, 8]
self.out_h = 1
self.out_w = 1
self.scale = 0.
self.out_size = np.array([2, 2]).astype("int32")
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase5(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.
self.out_size = np.array([11, 11]).astype("int32")
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase6(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [1, 1, 128, 64]
self.out_h = 64
self.out_w = 128
self.scale = 0.
self.out_size = np.array([65, 129]).astype("int32")
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpSame(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 128, 64]
self.out_h = 128
self.out_w = 64
self.scale = 0.
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpActualShape(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpDataLayout(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 4, 4, 3]
self.out_h = 2
self.out_w = 2
self.scale = 0.
self.out_size = np.array([3, 3]).astype("int32")
self.align_corners = True
self.align_mode = 1
self.data_layout = "NHWC"
class TestBilinearInterpOpUint8(OpTest):
def setUp(self):
self.out_size = None
self.actual_shape = None
self.init_test_case()
self.op_type = "bilinear_interp"
input_np = np.random.randint(
low=0, high=256, size=self.input_shape).astype("uint8")
if self.scale > 0:
out_h = int(self.input_shape[2] * self.scale)
out_w = int(self.input_shape[3] * self.scale)
else:
out_h = self.out_h
out_w = self.out_w
output_np = bilinear_interp_np(input_np, out_h, out_w, self.out_size,
self.actual_shape, self.align_corners,
self.align_mode)
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
self.attrs = {
'out_h': self.out_h,
'out_w': self.out_w,
'scale': self.scale,
'interp_method': self.interp_method,
'align_corners': self.align_corners,
'align_mode': self.align_mode
}
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output_with_place(place=core.CPUPlace(), atol=1)
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [1, 3, 9, 6]
self.out_h = 10
self.out_w = 9
self.scale = 0.
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase1Uint8(TestBilinearInterpOpUint8):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 128, 64]
self.out_h = 120
self.out_w = 50
self.scale = 0.
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase2Uint8(TestBilinearInterpOpUint8):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [4, 1, 7, 8]
self.out_h = 5
self.out_w = 13
self.scale = 0.
self.out_size = np.array([6, 15]).astype("int32")
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpOtherMethod1(TestBilinearInterpOp):
def set_align_mode(self):
self.align_corners = False
self.align_mode = 1
class TestBilinearInterpWithMethod2(TestBilinearInterpOp):
def set_align_mode(self):
self.align_corners = False
self.align_mode = 0
class TestBilinearInterpWithMethod3(TestBilinearInterpOp):
def set_align_mode(self):
self.align_corners = True
self.align_mode = 0
class TestBilinearInterpScale1(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 7]
self.out_h = 60
self.out_w = 25
self.scale = 2.
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpScale2(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 7]
self.out_h = 60
self.out_w = 25
self.scale = 1.
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpScale3(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 7]
self.out_h = 60
self.out_w = 25
self.scale = 1.5
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpZero(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 7]
self.out_h = 60
self.out_w = 25
self.scale = 0.2
self.align_corners = False
self.align_mode = 0
class TestBilinearInterpOp_attr_tensor(OpTest):
def setUp(self):
self.out_size = None
self.actual_shape = None
self.init_test_case()
self.op_type = "bilinear_interp"
self.shape_by_1Dtensor = False
self.scale_by_1Dtensor = False
self.attrs = {
'interp_method': self.interp_method,
'align_corners': self.align_corners,
}
input_np = np.random.random(self.input_shape).astype("float32")
self.inputs = {'X': input_np}
if self.scale_by_1Dtensor:
self.inputs['Scale'] = np.array([self.scale]).astype("float32")
elif self.scale > 0:
out_h = int(self.input_shape[2] * self.scale)
out_w = int(self.input_shape[3] * self.scale)
self.attrs['scale'] = self.scale
else:
out_h = self.out_h
out_w = self.out_w
if self.shape_by_1Dtensor:
self.inputs['OutSize'] = self.out_size
elif self.out_size is not None:
size_tensor = []
for index, ele in enumerate(self.out_size):
size_tensor.append(("x" + str(index), np.ones(
(1)).astype('int32') * ele))
self.inputs['SizeTensor'] = size_tensor
self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w
output_np = bilinear_interp_np(input_np, out_h, out_w, self.out_size,
self.actual_shape, self.align_corners)
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True)
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 4, 4]
self.out_h = 3
self.out_w = 3
self.scale = 0.
self.out_size = [3, 3]
self.align_corners = True
# out_size is a 1-D tensor
class TestBilinearInterp_attr_tensor_Case1(TestBilinearInterpOp_attr_tensor):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.
self.out_size = [8, 12]
self.align_corners = True
# scale is a 1-D tensor
class TestBilinearInterp_attr_tensor_Case2(TestBilinearInterpOp_attr_tensor):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = True
self.shape_by_1Dtensor = True
# scale is a 1-D tensor
class TestBilinearInterp_attr_tensor_Case3(TestBilinearInterpOp_attr_tensor):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 2.0
self.out_size = None
self.align_corners = True
self.scale_by_1Dtensor = True
class TestBilinearInterpOpAPI(OpTest):
def test_case(self):
x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32")
dim = fluid.data(name="dim", shape=[1], dtype="int32")
shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
actual_size = fluid.data(name="actual_size", shape=[2], dtype="int32")
scale_tensor = fluid.data(
name="scale_tensor", shape=[1], dtype="float32")
out1 = fluid.layers.resize_bilinear(x, out_shape=[12, 12])
out2 = fluid.layers.resize_bilinear(x, out_shape=[12, dim])
out3 = fluid.layers.resize_bilinear(x, out_shape=shape_tensor)
out4 = fluid.layers.resize_bilinear(
x, out_shape=[4, 4], actual_shape=actual_size)
out5 = fluid.layers.resize_bilinear(x, scale=scale_tensor)
x_data = np.random.random((2, 3, 6, 6)).astype("float32")
dim_data = np.array([12]).astype("int32")
shape_data = np.array([12, 12]).astype("int32")
actual_size_data = np.array([12, 12]).astype("int32")
scale_data = np.array([2.0]).astype("float32")
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
results = exe.run(fluid.default_main_program(),
feed={
"x": x_data,
"dim": dim_data,
"shape_tensor": shape_data,
"actual_size": actual_size_data,
"scale_tensor": scale_data
},
fetch_list=[out1, out2, out3, out4, out5],
return_numpy=True)
expect_res = bilinear_interp_np(
x_data, out_h=12, out_w=12, align_corners=True)
for res in results:
self.assertTrue(np.allclose(res, expect_res))
if __name__ == "__main__":
unittest.main()
| |
# -*- coding: utf-8 -*-
""" Handle most tasks related to DynamoDB interaction """
import re
import sys
import time
import datetime
from boto import dynamodb2
from boto.dynamodb2.table import Table
from boto.exception import DynamoDBResponseError, JSONResponseError
from dynamic_dynamodb.log_handler import LOGGER as logger
from dynamic_dynamodb.config_handler import (
get_configured_tables,
get_global_option,
get_gsi_option,
get_table_option)
from dynamic_dynamodb.aws import sns
def get_tables_and_gsis():
""" Get a set of tables and gsis and their configuration keys
:returns: set -- A set of tuples (table_name, table_conf_key)
"""
table_names = set()
configured_tables = get_configured_tables()
not_used_tables = set(configured_tables)
# Add regexp table names
for table_instance in list_tables():
for key_name in configured_tables:
try:
if re.match(key_name, table_instance.table_name):
logger.debug("Table {0} match with config key {1}".format(
table_instance.table_name, key_name))
# Notify users about regexps that match multiple tables
if table_instance.table_name in [x[0] for x in table_names]:
logger.warning(
'Table {0} matches more than one regexp in config, '
'skipping this match: "{1}"'.format(
table_instance.table_name, key_name))
else:
table_names.add(
(
table_instance.table_name,
key_name
))
not_used_tables.discard(key_name)
else:
logger.debug(
"Table {0} did not match with config key {1}".format(
table_instance.table_name, key_name))
except re.error:
logger.error('Invalid regular expression: "{0}"'.format(
key_name))
sys.exit(1)
if not_used_tables:
logger.warning(
'No tables matching the following configured '
'tables found: {0}'.format(', '.join(not_used_tables)))
return sorted(table_names)
def get_table(table_name):
""" Return the DynamoDB table
:type table_name: str
:param table_name: Name of the DynamoDB table
:returns: boto.dynamodb.table.Table
"""
try:
table = Table(table_name, connection=DYNAMODB_CONNECTION)
except DynamoDBResponseError as error:
dynamodb_error = error.body['__type'].rsplit('#', 1)[1]
if dynamodb_error == 'ResourceNotFoundException':
logger.error(
'{0} - Table {1} not found'.format(table_name, table_name))
raise
return table
def get_gsi_status(table_name, gsi_name):
""" Return the DynamoDB table
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of the GSI
:returns: str
"""
try:
desc = DYNAMODB_CONNECTION.describe_table(table_name)
except JSONResponseError:
raise
for gsi in desc[u'Table'][u'GlobalSecondaryIndexes']:
if gsi[u'IndexName'] == gsi_name:
return gsi[u'IndexStatus']
def get_provisioned_gsi_read_units(table_name, gsi_name):
""" Returns the number of provisioned read units for the table
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of the GSI
:returns: int -- Number of read units
"""
try:
desc = DYNAMODB_CONNECTION.describe_table(table_name)
except JSONResponseError:
raise
for gsi in desc[u'Table'][u'GlobalSecondaryIndexes']:
if gsi[u'IndexName'] == gsi_name:
read_units = int(
gsi[u'ProvisionedThroughput'][u'ReadCapacityUnits'])
break
logger.debug(
'{0} - GSI: {1} - Currently provisioned read units: {2:d}'.format(
table_name, gsi_name, read_units))
return read_units
def get_provisioned_gsi_write_units(table_name, gsi_name):
""" Returns the number of provisioned write units for the table
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of the GSI
:returns: int -- Number of write units
"""
try:
desc = DYNAMODB_CONNECTION.describe_table(table_name)
except JSONResponseError:
raise
for gsi in desc[u'Table'][u'GlobalSecondaryIndexes']:
if gsi[u'IndexName'] == gsi_name:
write_units = int(
gsi[u'ProvisionedThroughput'][u'WriteCapacityUnits'])
break
logger.debug(
'{0} - GSI: {1} - Currently provisioned write units: {2:d}'.format(
table_name, gsi_name, write_units))
return write_units
def get_provisioned_table_read_units(table_name):
""" Returns the number of provisioned read units for the table
:type table_name: str
:param table_name: Name of the DynamoDB table
:returns: int -- Number of read units
"""
try:
desc = DYNAMODB_CONNECTION.describe_table(table_name)
except JSONResponseError:
raise
read_units = int(
desc[u'Table'][u'ProvisionedThroughput'][u'ReadCapacityUnits'])
logger.debug('{0} - Currently provisioned read units: {1:d}'.format(
table_name, read_units))
return read_units
def get_provisioned_table_write_units(table_name):
""" Returns the number of provisioned write units for the table
:type table_name: str
:param table_name: Name of the DynamoDB table
:returns: int -- Number of write units
"""
try:
desc = DYNAMODB_CONNECTION.describe_table(table_name)
except JSONResponseError:
raise
write_units = int(
desc[u'Table'][u'ProvisionedThroughput'][u'WriteCapacityUnits'])
logger.debug('{0} - Currently provisioned write units: {1:d}'.format(
table_name, write_units))
return write_units
def get_table_status(table_name):
""" Return the DynamoDB table
:type table_name: str
:param table_name: Name of the DynamoDB table
:returns: str
"""
try:
desc = DYNAMODB_CONNECTION.describe_table(table_name)
except JSONResponseError:
raise
return desc[u'Table'][u'TableStatus']
def list_tables():
""" Return list of DynamoDB tables available from AWS
:returns: list -- List of DynamoDB tables
"""
tables = []
try:
table_list = DYNAMODB_CONNECTION.list_tables()
while True:
for table_name in table_list[u'TableNames']:
tables.append(get_table(table_name))
if u'LastEvaluatedTableName' in table_list:
table_list = DYNAMODB_CONNECTION.list_tables(
table_list[u'LastEvaluatedTableName'])
else:
break
except DynamoDBResponseError as error:
dynamodb_error = error.body['__type'].rsplit('#', 1)[1]
if dynamodb_error == 'ResourceNotFoundException':
logger.error('No tables found')
elif dynamodb_error == 'AccessDeniedException':
logger.debug(
'Your AWS API keys lack access to listing tables. '
'That is an issue if you are trying to use regular '
'expressions in your table configuration.')
elif dynamodb_error == 'UnrecognizedClientException':
logger.error(
'Invalid security token. Are your AWS API keys correct?')
else:
logger.error(
(
'Unhandled exception: {0}: {1}. '
'Please file a bug report at '
'https://github.com/sebdah/dynamic-dynamodb/issues'
).format(
dynamodb_error,
error.body['message']))
except JSONResponseError as error:
logger.error('Communication error: {0}'.format(error))
sys.exit(1)
return tables
def update_table_provisioning(
table_name, key_name, reads, writes, retry_with_only_increase=False):
""" Update provisioning for a given table
:type table_name: str
:param table_name: Name of the table
:type key_name: str
:param key_name: Configuration option key name
:type reads: int
:param reads: New number of provisioned read units
:type writes: int
:param writes: New number of provisioned write units
:type retry_with_only_increase: bool
:param retry_with_only_increase: Set to True to ensure only increases
"""
table = get_table(table_name)
current_reads = int(get_provisioned_table_read_units(table_name))
current_writes = int(get_provisioned_table_write_units(table_name))
# Make sure we aren't scaling down if we turned off downscaling
if (not get_table_option(key_name, 'enable_reads_down_scaling') or
not get_table_option(key_name, 'enable_writes_down_scaling')):
if (not get_table_option(key_name, 'enable_reads_down_scaling') and
current_reads > reads):
reads = current_reads
if (not get_table_option(key_name, 'enable_writes_down_scaling') and
current_writes > writes):
writes = current_writes
# Return if we do not need to scale at all
if reads == current_reads and writes == current_writes:
logger.info(
'{0} - No need to scale up reads nor writes'.format(
table_name))
return
if retry_with_only_increase:
# Ensure that we are only doing increases
if current_reads > reads:
reads = current_reads
if current_writes > writes:
writes = current_writes
# Return if we do not need to scale at all
if reads == current_reads and writes == current_writes:
logger.info(
'{0} - No need to scale up reads nor writes'.format(
table_name))
return
logger.info(
'{0} - Retrying to update provisioning, excluding any decreases. '
'Setting new reads to {1} and new writes to {2}'.format(
table_name, reads, writes))
# Check that we are in the right time frame
maintenance_windows = get_table_option(key_name, 'maintenance_windows')
if maintenance_windows:
if not __is_table_maintenance_window(table_name, maintenance_windows):
logger.warning(
'{0} - We are outside a maintenace window. '
'Will only perform up scaling activites'.format(table_name))
# Ensure that we are only doing increases
if current_reads > reads:
reads = current_reads
if current_writes > writes:
writes = current_writes
# Return if we do not need to scale up
if reads == current_reads and writes == current_writes:
logger.info(
'{0} - No need to scale up reads nor writes'.format(
table_name))
return
else:
logger.info(
'{0} - Current time is within maintenance window'.format(
table_name))
logger.info(
'{0} - Updating provisioning to {1} reads and {2} writes'.format(
table_name, reads, writes))
# Return if dry-run
if get_global_option('dry_run'):
return
try:
table.update(
throughput={
'read': reads,
'write': writes
})
# See if we should send notifications for scale-down, scale-up or both
sns_message_types = []
if current_reads > reads or current_writes > writes:
sns_message_types.append('scale-down')
if current_reads < reads or current_writes < writes:
sns_message_types.append('scale-up')
message = []
if current_reads > reads:
message.append('{0} - Reads: DOWN from {1} to {2}\n'.format(
table_name, current_reads, reads))
elif current_reads < reads:
message.append('{0} - Reads: UP from {1} to {2}\n'.format(
table_name, current_reads, reads))
if current_writes > writes:
message.append('{0} - Writes: DOWN from {1} to {2}\n'.format(
table_name, current_writes, writes))
elif current_writes < writes:
message.append('{0} - Writes: UP from {1} to {2}\n'.format(
table_name, current_writes, writes))
sns.publish_table_notification(
key_name,
''.join(message),
sns_message_types,
subject='Updated provisioning for table {0}'.format(table_name))
except JSONResponseError as error:
exception = error.body['__type'].split('#')[1]
know_exceptions = [
'LimitExceededException',
'ValidationException',
'ResourceInUseException']
if exception in know_exceptions:
logger.warning('{0} - {1}: {2}'.format(
table_name, exception, error.body['message']))
else:
if 'message' in error.body:
msg = error.body['message']
else:
msg = error
logger.error(
(
'{0} - Unhandled exception: {1}: {2}. '
'Please file a bug report at '
'https://github.com/sebdah/dynamic-dynamodb/issues'
).format(table_name, exception, msg))
if (not retry_with_only_increase and
exception == 'LimitExceededException'):
logger.info(
'{0} - Will retry to update provisioning '
'with only increases'.format(table_name))
update_table_provisioning(
table_name,
key_name,
reads,
writes,
retry_with_only_increase=True)
def update_gsi_provisioning(
table_name, table_key, gsi_name, gsi_key,
reads, writes, retry_with_only_increase=False):
""" Update provisioning on a global secondary index
:type table_name: str
:param table_name: Name of the DynamoDB table
:type table_key: str
:param table_key: Table configuration option key name
:type gsi_name: str
:param gsi_name: Name of the GSI
:type gsi_key: str
:param gsi_key: GSI configuration option key name
:type reads: int
:param reads: Number of reads to provision
:type writes: int
:param writes: Number of writes to provision
:type retry_with_only_increase: bool
:param retry_with_only_increase: Set to True to ensure only increases
"""
current_reads = int(get_provisioned_gsi_read_units(table_name, gsi_name))
current_writes = int(get_provisioned_gsi_write_units(table_name, gsi_name))
# Make sure we aren't scaling down if we turned off downscaling
if (not get_gsi_option(table_key, gsi_key, 'enable_reads_down_scaling') or
not get_gsi_option(
table_key, gsi_key, 'enable_writes_down_scaling')):
if (not get_gsi_option(
table_key, gsi_key, 'enable_reads_down_scaling') and
current_reads > reads):
reads = current_reads
if (not get_gsi_option(
table_key, gsi_key, 'enable_writes_down_scaling') and
current_writes > writes):
writes = current_writes
# Return if we do not need to scale at all
if reads == current_reads and writes == current_writes:
logger.info(
'{0} - No need to scale up reads nor writes'.format(
table_name))
return
if retry_with_only_increase:
# Ensure that we are only doing increases
if current_reads > reads:
reads = current_reads
if current_writes > writes:
writes = current_writes
# Return if we do not need to scale at all
if reads == current_reads and writes == current_writes:
logger.info(
'{0} - GSI: {1} - No need to scale up reads nor writes'.format(
table_name, gsi_name))
return
logger.info(
'{0} - GSI: {1} - Retrying to update provisioning, '
'excluding any decreases. '
'Setting new reads to {2} and new writes to {3}'.format(
table_name, gsi_name, reads, writes))
# Check that we are in the right time frame
m_windows = get_gsi_option(table_key, gsi_key, 'maintenance_windows')
if m_windows:
if not __is_gsi_maintenance_window(table_name, gsi_name, m_windows):
logger.warning(
'{0} - GSI: {1} - We are outside a maintenace window. '
'Will only perform up scaling activites'.format(
table_name,
gsi_name))
# Ensure that we are only doing increases
if current_reads > reads:
reads = current_reads
if current_writes > writes:
writes = current_writes
# Return if we do not need to scale up
if reads == current_reads and writes == current_writes:
logger.info(
'{0} - GSI: {1} - '
'No need to scale up reads nor writes'.format(
table_name,
gsi_name))
return
else:
logger.info(
'{0} - GSI: {1} - '
'Current time is within maintenance window'.format(
table_name,
gsi_name))
logger.info(
'{0} - GSI: {1} - '
'Updating provisioning to {2} reads and {3} writes'.format(
table_name, gsi_name, reads, writes))
# Return if dry-run
if get_global_option('dry_run'):
return
try:
DYNAMODB_CONNECTION.update_table(
table_name=table_name,
global_secondary_index_updates=[
{
"Update": {
"IndexName": gsi_name,
"ProvisionedThroughput": {
"ReadCapacityUnits": reads,
"WriteCapacityUnits": writes
}
}
}
])
message = []
if current_reads > reads:
message.append(
'{0} - GSI: {1} - Reads: DOWN from {2} to {3}\n'.format(
table_name, gsi_name, current_reads, reads))
elif current_reads < reads:
message.append(
'{0} - GSI: {1} - Reads: UP from {2} to {3}\n'.format(
table_name, gsi_name, current_reads, reads))
if current_writes > writes:
message.append(
'{0} - GSI: {1} - Writes: DOWN from {2} to {3}\n'.format(
table_name, gsi_name, current_writes, writes))
elif current_writes < writes:
message.append(
'{0} - GSI: {1} - Writes: UP from {2} to {3}\n'.format(
table_name, gsi_name, current_writes, writes))
# See if we should send notifications for scale-down, scale-up or both
sns_message_types = []
if current_reads > reads or current_writes > writes:
sns_message_types.append('scale-down')
if current_reads < reads or current_writes < writes:
sns_message_types.append('scale-up')
sns.publish_gsi_notification(
table_key,
gsi_key,
''.join(message),
sns_message_types,
subject='Updated provisioning for GSI {0}'.format(gsi_name))
except JSONResponseError as error:
exception = error.body['__type'].split('#')[1]
know_exceptions = ['LimitExceededException']
if exception in know_exceptions:
logger.warning('{0} - GSI: {1} - {2}: {3}'.format(
table_name, gsi_name, exception, error.body['message']))
else:
logger.error(
(
'{0} - GSI: {1} - Unhandled exception: {2}: {3}. '
'Please file a bug report at '
'https://github.com/sebdah/dynamic-dynamodb/issues'
).format(
table_name, gsi_name, exception, error.body['message']))
if (not retry_with_only_increase and
exception == 'LimitExceededException'):
logger.info(
'{0} - GSI: {1} - Will retry to update provisioning '
'with only increases'.format(table_name, gsi_name))
update_gsi_provisioning(
table_name,
table_key,
gsi_name,
gsi_key,
reads,
writes,
retry_with_only_increase=True)
def table_gsis(table_name):
""" Returns a list of GSIs for the given table
:type table_name: str
:param table_name: Name of the DynamoDB table
:returns: list -- List of GSI names
"""
try:
desc = DYNAMODB_CONNECTION.describe_table(table_name)[u'Table']
except JSONResponseError:
raise
if u'GlobalSecondaryIndexes' in desc:
return desc[u'GlobalSecondaryIndexes']
return []
def __get_connection_dynamodb(retries=3):
""" Ensure connection to DynamoDB
:type retries: int
:param retries: Number of times to retry to connect to DynamoDB
"""
connected = False
region = get_global_option('region')
while not connected:
if (get_global_option('aws_access_key_id') and
get_global_option('aws_secret_access_key')):
logger.debug(
'Authenticating to DynamoDB using '
'credentials in configuration file')
connection = dynamodb2.connect_to_region(
region,
aws_access_key_id=get_global_option('aws_access_key_id'),
aws_secret_access_key=get_global_option(
'aws_secret_access_key'))
else:
logger.debug(
'Authenticating using boto\'s authentication handler')
connection = dynamodb2.connect_to_region(region)
if not connection:
if retries == 0:
logger.error('Failed to connect to DynamoDB. Giving up.')
raise
else:
logger.error(
'Failed to connect to DynamoDB. Retrying in 5 seconds')
retries -= 1
time.sleep(5)
else:
connected = True
logger.debug('Connected to DynamoDB in {0}'.format(region))
return connection
def __is_gsi_maintenance_window(table_name, gsi_name, maintenance_windows):
""" Checks that the current time is within the maintenance window
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of the GSI
:type maintenance_windows: str
:param maintenance_windows: Example: '00:00-01:00,10:00-11:00'
:returns: bool -- True if within maintenance window
"""
# Example string '00:00-01:00,10:00-11:00'
maintenance_window_list = []
for window in maintenance_windows.split(','):
try:
start, end = window.split('-', 1)
except ValueError:
logger.error(
'{0} - GSI: {1} - '
'Malformatted maintenance window'.format(table_name, gsi_name))
return False
maintenance_window_list.append((start, end))
now = datetime.datetime.utcnow().strftime('%H%M')
for maintenance_window in maintenance_window_list:
start = ''.join(maintenance_window[0].split(':'))
end = ''.join(maintenance_window[1].split(':'))
if now >= start and now <= end:
return True
return False
def __is_table_maintenance_window(table_name, maintenance_windows):
""" Checks that the current time is within the maintenance window
:type table_name: str
:param table_name: Name of the DynamoDB table
:type maintenance_windows: str
:param maintenance_windows: Example: '00:00-01:00,10:00-11:00'
:returns: bool -- True if within maintenance window
"""
# Example string '00:00-01:00,10:00-11:00'
maintenance_window_list = []
for window in maintenance_windows.split(','):
try:
start, end = window.split('-', 1)
except ValueError:
logger.error(
'{0} - Malformatted maintenance window'.format(table_name))
return False
maintenance_window_list.append((start, end))
now = datetime.datetime.utcnow().strftime('%H%M')
for maintenance_window in maintenance_window_list:
start = ''.join(maintenance_window[0].split(':'))
end = ''.join(maintenance_window[1].split(':'))
if now >= start and now <= end:
return True
return False
DYNAMODB_CONNECTION = __get_connection_dynamodb()
| |
#!/usr/bin/env python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
import os
import logging
import logging.handlers
from typing import List
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
DATA_DIR = os.path.join(BASE_DIR, "data")
TEMPLATES_DIR = os.path.join(BASE_DIR, "templates")
# Statement for enabling the development environment.
# Shows an interactive debugger for unhandled exceptions.
# Attention: This will be disabled by default.
DEBUG = False
# Show a maintenance page.
MAINTENANCE_MODE = os.getenv("MAINTENANCE_MODE", "false").lower() == "true"
# Enables connecting to the remote database using the cloud sql proxy.
USE_REMOTE_DB_THROUGH_CLOUDSQL_PROXY = (
os.getenv("USE_REMOTE_DB_THROUGH_CLOUDSQL_PROXY", "false").lower() == "true"
)
IS_PROD = IS_QA = IS_LOCAL = False
if os.getenv("GAE_ENV", "").startswith("standard"):
# Running on GAE. This is either PROD or QA.
appname = os.environ["GAE_APPLICATION"]
appname = appname.replace("s~", "")
if appname == os.environ["QA_PROJECT_ID"]:
IS_QA = True
elif appname == os.environ["PROD_PROJECT_ID"]:
IS_PROD = True
else:
raise AssertionError(f"Deployed in unknown environment: {appname}.")
if not IS_PROD and not IS_QA:
IS_LOCAL = True
# Make sure exactly one mode is active at all times.
assert int(IS_PROD) + int(IS_QA) + int(IS_LOCAL) == 1
if IS_PROD:
DEBUG = False
MYSQL_USER = os.environ["MYSQL_PROD_USER"]
MYSQL_PASS = os.environ["MYSQL_PROD_PASS"]
MYSQL_CONNECTION_NAME = os.environ["MYSQL_PROD_CONNECTION_NAME"]
GCE_VCS_PROXY_URL = os.environ["VCS_PROXY_PROD_URL"]
elif IS_QA:
DEBUG = True
MYSQL_USER = os.environ["MYSQL_QA_USER"]
MYSQL_PASS = os.environ["MYSQL_QA_PASS"]
MYSQL_CONNECTION_NAME = os.environ["MYSQL_QA_CONNECTION_NAME"]
GCE_VCS_PROXY_URL = os.environ["VCS_PROXY_QA_URL"]
elif IS_LOCAL:
DEBUG = True
MYSQL_USER = os.getenv("MYSQL_LOCAL_USER", "root")
MYSQL_PASS = os.getenv("MYSQL_LOCAL_PASS", "pass")
GCE_VCS_PROXY_URL = os.getenv("VCS_PROXY_LOCAL_URL", "")
else:
raise AssertionError("Invalid deployment mode detected.")
PROD_LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO")
def gen_connection_string():
# if not on Google then use local MySQL
if IS_PROD or IS_QA:
return (
f"mysql+mysqldb://{MYSQL_USER}:{MYSQL_PASS}@localhost:3306"
f"/_DB_NAME_?unix_socket=/cloudsql/{MYSQL_CONNECTION_NAME}"
)
use_name = MYSQL_USER
use_pass = MYSQL_PASS
use_host = "127.0.0.1"
use_port = 3306
if "MYSQL_HOST" in os.environ:
use_host = os.environ["MYSQL_HOST"]
elif "MYSQL_LOCAL_PORT" in os.environ:
use_port = int(os.environ["MYSQL_LOCAL_PORT"])
if USE_REMOTE_DB_THROUGH_CLOUDSQL_PROXY:
use_name = os.environ["CLOUDSQL_NAME"]
use_pass = os.environ["CLOUDSQL_PASS"]
use_port = int(os.getenv("CLOUDSQL_PORT", "3307"))
return f"mysql+mysqldb://{use_name}:{use_pass}@{use_host}:{use_port}" "/_DB_NAME_"
SQLALCHEMY_DATABASE_URI = gen_connection_string().replace("_DB_NAME_", "main")
SQLALCHEMY_ECHO = False
DATABASE_CONNECT_OPTIONS = {}
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Application threads. A common general assumption is
# using 2 per available processor cores - to handle
# incoming requests using one and performing background
# operations using the other.
THREADS_PER_PAGE = 2
# Enable protection against *Cross-site Request Forgery (CSRF)*
CSRF_ENABLED = True
# Use a secure, unique and absolutely secret key for signing the data.
CSRF_SESSION_KEY = os.getenv("CSRF_SESSION_KEY", "")
# Secret key for signing cookies
SECRET_KEY = os.getenv("COOKIE_SECRET_KEY", "")
PATCH_REGEX = r".*(github\.com|\.git|\.patch|\/hg\.|\/\+\/)"
GOOGLE_CLIENT_ID = os.getenv(
"GOOGLE_OAUTH_CONSUMER_KEY", os.getenv("OAUTH_CONSUMER_KEY", "")
)
GOOGLE_CLIENT_SECRET = os.getenv(
"GOOGLE_OAUTH_CONSUMER_SECRET", os.getenv("OAUTH_CONSUMER_SECRET", "")
)
GITHUB_CLIENT_ID = os.getenv("GITHUB_OAUTH_CONSUMER_KEY", "")
GITHUB_CLIENT_SECRET = os.getenv("GITHUB_OAUTH_CONSUMER_SECRET", "")
# Make sure relevant properties are always set for QA and PROD.
if IS_PROD or IS_QA:
assert len(CSRF_SESSION_KEY) > 0
assert len(SECRET_KEY) > 0
assert len(GOOGLE_CLIENT_ID) > 0
assert len(GOOGLE_CLIENT_SECRET) > 0
# Emails (checked with OAuth) of admins who are allowed to make admin changes.
suggested_admins = os.getenv("APPLICATION_ADMINS", "").replace(" ", "")
APPLICATION_ADMINS: List[str] = []
if suggested_admins != "":
APPLICATION_ADMINS = suggested_admins.split(",")
# Restrict the login to administrators only.
RESTRICT_LOGIN = os.getenv("RESTRICT_LOGIN", "true").lower() == "true"
# Registration mode can be CLOSED, INVITE_ONLY or OPEN.
REGISTRATION_MODE = os.getenv("REGISTRATION_MODE", "INVITE_ONLY").upper()
if REGISTRATION_MODE not in ["CLOSED", "INVITE_ONLY", "OPEN"]:
raise AssertionError("Invalid REGISTRATION_MODE passed.")
AUTO_ENABLE_INVITED_USERS = True
DEMO_MODE = os.getenv("DEMO_MODE", "false").lower() == "true"
# Disable link intercepts for the Flask toolbar.
DEBUG_TB_INTERCEPT_REDIRECTS = False
# We use certificate pinning to ensure correct communication between
# components.
APP_CERT_FILE = "cert/cert.pem"
class clsproperty(property): # pylint: disable=invalid-name
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)() # pylint: disable=no-member
class __lazy: # pylint: disable=invalid-name
@clsproperty
@classmethod
def root_level(cls):
return logging.DEBUG if DEBUG else logging.INFO
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "basic",
"level": logging.NOTSET,
"stream": "ext://sys.stdout",
},
"console_mini": {
"class": "logging.StreamHandler",
"formatter": "minimalistic",
"level": logging.NOTSET,
"stream": "ext://sys.stdout",
},
"info_file": {
"class": "logging.handlers.RotatingFileHandler",
"formatter": "full",
"filename": os.path.join(BASE_DIR, "info.log"),
"maxBytes": 100000,
"backupCount": 1,
},
"error_file": {
"class": "logging.handlers.RotatingFileHandler",
"formatter": "full",
"filename": os.path.join(BASE_DIR, "error.log"),
"maxBytes": 100000,
"backupCount": 1,
"level": logging.WARNING,
},
},
"formatters": {
"minimalistic": {
"format": "%(message)s",
},
"basic": {
"format": "%(levelname)-4.4s [%(name)s] %(message)s",
},
"full": {
"format": "%(asctime)s - %(levelname)-4.4s [%(name)s,%(filename)s:%(lineno)d] %(message)s",
},
},
"loggers": {
"": {
"level": "ext://cfg.__lazy.root_level",
"handlers": ["console", "error_file", "info_file"],
},
"werkzeug": {
"handlers": ["console_mini"],
"propagate": False,
},
},
}
# local overrides
try:
# pylint: disable=wildcard-import
from local_cfg import *
# pylint: enable=wildcard-import
except ImportError:
pass
| |
'''
Earth2012: Shape and topography (with respect to mean sea level) of Earth
expanded to degree and order 2160.
shape_air : Earth's shape (with water)
shape_bathy : Earth's shape (without water)
shape_bathy_bed : Earth's shape (without water and ice)
shape_ret : Earth's rock-equivalent topography as shape model
topo_air : Earth's surface (with water)
topo_bathy : Earth's surface (without water)
topo_bathy_bed : Earth's surface (without water and ice)
ret : Earth's rock-equivalent topography
Reference
---------
Hirt, C., Kuhn, M., Featherstone, W.E., Goettl, F. (2012). Topographic/
isostatic evaluation of new-generation GOCE gravity field models, Journal
of Geophysical Research: Solid Earth, B05407, doi:10.1029/2011JB008878.
'''
from pooch import os_cache as _os_cache
from pooch import retrieve as _retrieve
from pooch import HTTPDownloader as _HTTPDownloader
from ....shclasses import SHCoeffs as _SHCoeffs
def shape_air(lmax=2160):
'''
Earth's shape (with water): Harmonic shape model of the interface between
Earth and its atmosphere, providing radii of the terrain and ice.
Parameters
----------
lmax : int, optional
The maximum spherical harmonic degree to return.
Reference
---------
Hirt, C., Kuhn, M., Featherstone, W.E., Goettl, F. (2012). Topographic/
isostatic evaluation of new-generation GOCE gravity field models,
Journal of Geophysical Research: Solid Earth, B05407,
doi:10.1029/2011JB008878.
'''
fname = _retrieve(
url="http://ddfe.curtin.edu.au/gravitymodels/Earth2012/topo_shape_to2160/Earth2012.shape_air.SHCto2160.zip", # noqa: E501
known_hash="sha256:07b948727c96022f40375d82cb3e505732fb3e1bf72f41b1cc072445dafab059", # noqa: E501
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('pyshtools'),
)
return _SHCoeffs.from_file(fname, lmax=lmax, name='Earth2020.shape_air',
units='m')
def shape_bathy(lmax=2160):
'''
Earth's shape (without water): Harmonic shape model of the Earth without
ocean water masses. This model provides radii of the terrain and ice.
Parameters
----------
lmax : int, optional
The maximum spherical harmonic degree to return.
Reference
---------
Hirt, C., Kuhn, M., Featherstone, W.E., Goettl, F. (2012). Topographic/
isostatic evaluation of new-generation GOCE gravity field models,
Journal of Geophysical Research: Solid Earth, B05407,
doi:10.1029/2011JB008878.
'''
fname = _retrieve(
url="http://ddfe.curtin.edu.au/gravitymodels/Earth2012/topo_shape_to2160/Earth2012.shape_bathy.SHCto2160.zip", # noqa: E501
known_hash="sha256:ee03c7addf13f60efd3c928f166f83f5a9f17991c9dd74a88c6c8d9ede8bb15e", # noqa: E501
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('pyshtools'),
)
return _SHCoeffs.from_file(fname, lmax=lmax, name='Earth2012.shape_bathy',
units='m')
def shape_bathy_bed(lmax=2160):
'''
Earth's shape (without water and ice): Harmonic shape model of the Earth
without icesheets and without ocean water masses. This model provides radii
of the terrain over land, of the sea bed over the oceans and inland lakes
and of bedrock heights over Antarctica and Greenland.
Parameters
----------
lmax : int, optional
The maximum spherical harmonic degree to return.
Reference
---------
Hirt, C., Kuhn, M., Featherstone, W.E., Goettl, F. (2012). Topographic/
isostatic evaluation of new-generation GOCE gravity field models,
Journal of Geophysical Research: Solid Earth, B05407,
doi:10.1029/2011JB008878.
'''
fname = _retrieve(
url="http://ddfe.curtin.edu.au/gravitymodels/Earth2012/topo_shape_to2160/Earth2012.shape_bathy_bed.SHCto2160.zip", # noqa: E501
known_hash="sha256:66ffd58246566b4f62cc1f71145388057a4c2a1a8142f55e089e26a0b2a22d57", # noqa: E501
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('pyshtools'),
)
return _SHCoeffs.from_file(fname, lmax=lmax,
name='Earth2012.shape_bathy_bed', units='m')
def shape_ret(lmax=2160):
'''
Earth's rock-equivalent topography as shape model: Harmonic shape model of
Earth's rock-equivalent topography.
Parameters
----------
lmax : int, optional
The maximum spherical harmonic degree to return.
Reference
---------
Hirt, C., Kuhn, M., Featherstone, W.E., Goettl, F. (2012). Topographic/
isostatic evaluation of new-generation GOCE gravity field models,
Journal of Geophysical Research: Solid Earth, B05407,
doi:10.1029/2011JB008878.
'''
fname = _retrieve(
url="http://ddfe.curtin.edu.au/gravitymodels/Earth2012/topo_shape_to2160/Earth2012.shape_RET2012.SHCto2160.zip", # noqa: E501
known_hash="sha256:16c769df9d2c790fb62d1a1e73fd6070c5b5b663c2bd7f68fd21ad4aa8c96a2b", # noqa: E501
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('pyshtools'),
)
return _SHCoeffs.from_file(fname, lmax=lmax, name='Earth2012.shape_ret',
units='m')
def topo_air(lmax=2160):
'''
Earth's surface (with water): Harmonic model of the interface between
Earth and its atmosphere, providing heights above mean sea level of the
terrain and ice over land and zero heights over the oceans.
Parameters
----------
lmax : int, optional
The maximum spherical harmonic degree to return.
Reference
---------
Hirt, C., Kuhn, M., Featherstone, W.E., Goettl, F. (2012). Topographic/
isostatic evaluation of new-generation GOCE gravity field models,
Journal of Geophysical Research: Solid Earth, B05407,
doi:10.1029/2011JB008878.
'''
fname = _retrieve(
url="http://ddfe.curtin.edu.au/gravitymodels/Earth2012/topo_shape_to2160/Earth2012.topo_air.SHCto2160.zip", # noqa: E501
known_hash="sha256:7b68053ba74246f1a755fcce05266a58ab96529b1e48309b98a0e9ba49b4ba3f", # noqa: E501
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('pyshtools'),
)
return _SHCoeffs.from_file(fname, lmax=lmax, name='Earth2012.topo_air',
units='m')
def topo_bathy(lmax=2160):
'''
Earth's surface (without water): Harmonic model of the Earth's topography
without ocean water masses. This model provides heights of the terrain and
of ice over land and bathymetric depths over the oceans, Caspian Sea and
major inland lakes (Superior, Michigan, Huron, Erie, Ontario and Baikal).
Parameters
----------
lmax : int, optional
The maximum spherical harmonic degree to return.
Reference
---------
Hirt, C., Kuhn, M., Featherstone, W.E., Goettl, F. (2012). Topographic/
isostatic evaluation of new-generation GOCE gravity field models,
Journal of Geophysical Research: Solid Earth, B05407,
doi:10.1029/2011JB008878.
'''
fname = _retrieve(
url="http://ddfe.curtin.edu.au/gravitymodels/Earth2012/topo_shape_to2160/Earth2012.topo_bathy.SHCto2160.zip", # noqa: E501
known_hash="sha256:997a16f85dafbfd1a0ee2339f503470acea6c8cb8eecdf5f2e057904a97f9718", # noqa: E501
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('pyshtools'),
)
return _SHCoeffs.from_file(fname, lmax=lmax, name='Earth2012.topo_bathy',
units='m')
def topo_bathy_bed(lmax=2160):
'''
Earth's surface (without water and ice): Harmonic model of the Earth's
topography without ice sheets and without ocean water masses. This model
provides heights of the terrain over land, bathymetric depths over the
oceans, Caspian Sea and major inland lakes, and bedrock heights over
Antarctica and Greenland.
Parameters
----------
lmax : int, optional
The maximum spherical harmonic degree to return.
Reference
---------
Hirt, C., Kuhn, M., Featherstone, W.E., Goettl, F. (2012). Topographic/
isostatic evaluation of new-generation GOCE gravity field models,
Journal of Geophysical Research: Solid Earth, B05407,
doi:10.1029/2011JB008878.
'''
fname = _retrieve(
url="http://ddfe.curtin.edu.au/gravitymodels/Earth2012/topo_shape_to2160/Earth2012.topo_bathy_bed.SHCto2160.zip", # noqa: E501
known_hash="sha256:f8b0535a76de11de767d0ebb6c032f5983ac48ad29d1750b0d22e15a627f88e1", # noqa: E501
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('pyshtools'),
)
return _SHCoeffs.from_file(fname, lmax=lmax,
name='Earth2012.topo_bathy_bed', units='m')
def ret(lmax=2160):
'''
Earth's rock-equivalent topography: Harmonic model of Earth's
rock-equivalent topography.
Parameters
----------
lmax : int, optional
The maximum spherical harmonic degree to return.
Reference
---------
Hirt, C., Kuhn, M., Featherstone, W.E., Goettl, F. (2012). Topographic/
isostatic evaluation of new-generation GOCE gravity field models,
Journal of Geophysical Research: Solid Earth, B05407,
doi:10.1029/2011JB008878.
'''
fname = _retrieve(
url="http://ddfe.curtin.edu.au/gravitymodels/Earth2012/topo_shape_to2160/Earth2012.RET2012.SHCto2160.zip", # noqa: E501
known_hash="sha256:36b3204d86fa01fa9e8f693e2df8e91905b67619a0192cc0c269be21a7ba5799", # noqa: E501
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('pyshtools'),
)
return _SHCoeffs.from_file(fname, lmax=lmax, name='Earth2012.ret',
units='m')
__all__ = ['shape_air', 'shape_bathy', 'shape_bathy_bed', 'shape_ret',
'topo_air', 'topo_bathy', 'topo_bathy_bed', 'ret']
| |
######################################################################
#
# Copyright (C) 2013
# Associated Universities, Inc. Washington DC, USA,
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Library General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
# License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 675 Massachusetts Ave, Cambridge, MA 02139, USA.
#
# Correspondence concerning VLA Pipelines should be addressed as follows:
# Please register and submit helpdesk tickets via: https://help.nrao.edu
# Postal address:
# National Radio Astronomy Observatory
# VLA Pipeline Support Office
# PO Box O
# Socorro, NM, USA
#
######################################################################
# The pipeline assumes all files for a single dataset are located in
# the current directory, and that this directory contains only files
# relating to this dataset.
logprint("Starting EVLA_pipe_startup.py", logfileout='logs/startup.log')
logprint("SVN revision " + svnrevision, logfileout='logs/startup.log')
import os
import subprocess as sp
import commands
import numpy as np
import re
import time
from time import gmtime, strftime
import casa
import sys
from math import sin, cos, acos, fabs, pi, e, log10
import scipy as scp
import scipy.optimize as scpo
import pickle
import shutil
import shelve
import copy
import string
def interrupt(message=''):
"""Exit if interrupted
"""
logprint('Keyboard Interrupt')
def pipeline_save(shelf_filename='pipeline_shelf.restore'):
'''Save the state of the pipeline
'''
if not os.path.exists(shelf_filename):
pipe_shelf = shelve.open(shelf_filename, 'n')
else:
pipe_shelf = shelve.open(shelf_filename)
try:
keys = [k for k in
open(pipepath + 'EVLA_pipe_restore.list').read().split('\n')
if k]
except Exception, e:
logprint("Problem with opening keys for pipeline restart: " + str(e))
for key in keys:
try:
pipe_shelf[key] = globals()[key]
key_status = True
except:
key_status = False
pipe_shelf.close()
logprint("EVLA prototype pipeline reduction", 'logs/startup.log')
logprint("version " + version + " created on " + date, 'logs/startup.log')
logprint("running from path: " + pipepath, 'logs/startup.log')
# Include functions:
# selectReferenceAntenna
# uniq
execfile(pipepath + 'EVLA_functions.py')
execfile(pipepath + 'lib_EVLApipeutils.py')
# File names
#
# if SDM_name is already defined, then assume it holds the SDM directory
# name, otherwise, read it in from stdin
#
SDM_name_already_defined = 1
try:
SDM_name
except NameError:
SDM_name_already_defined = 0
SDM_name = raw_input("Enter SDM file name: ")
# Trap for '.ms', just in case, also for directory slash if present:
SDM_name = SDM_name.rstrip('/')
if SDM_name.endswith('.ms'):
SDM_name = SDM_name[:-3]
msname = SDM_name + '.ms'
# this is terribly non-robust. should really trap all the inputs from
# the automatic pipeline (the root directory and the relative paths).
# and also make sure that 'rawdata' only occurs once in the string.
# but for now, take the quick and easy route.
if (SDM_name_already_defined):
msname = msname.replace('rawdata', 'working')
if not os.path.isdir(msname):
while not os.path.isdir(SDM_name) and not os.path.isdir(msname):
print SDM_name + " is not a valid SDM directory"
SDM_name = raw_input(
"Re-enter a valid SDM directory (without '.ms'): ")
SDM_name = SDM_name.rstrip('/')
if SDM_name.endswith('.ms'):
SDM_name = SDM_name[:-3]
msname = SDM_name + '.ms'
mshsmooth = SDM_name + '.hsmooth.ms'
if (SDM_name_already_defined):
mshsmooth = mshsmooth.replace('rawdata', 'working')
ms_spave = SDM_name + '.spave.ms'
if (SDM_name_already_defined):
ms_spave = ms_spave.replace('rawdata', 'working')
logprint("SDM used is: " + SDM_name, logfileout='logs/startup.log')
# Other inputs:
# Ask if a a real model column should be created, or the virtual model
# should be used
mymodel_already_set = 1
try:
mymodel
except NameError:
mymodel_already_set = 0
mymodel = raw_input("Create the real model column (y/n): ")
if mymodel == "y":
scratch = True
else:
scratch = False
myHanning_already_set = 1
try:
myHanning
except NameError:
myHanning_already_set = 0
myHanning = raw_input("Hanning smooth the data (y/n): ")
try:
test_imaging
imaging_sources
except NameError:
test_imaging = \
True if raw_input("Perform test imaging (y/n): ") == 'y' else False
if test_imaging:
sources = raw_input("Field name(s) to image (will image all "
"containing that name): ")
# Remove whitespaces then split by commas
imaging_sources = sources.replace(" ", "").split(",")
else:
imaging_sources = ""
# if myHanning=="y":
# ms_active=mshsmooth
# else:
# ms_active=msname
ms_active = msname
# and the auxiliary information
try:
projectCode
except NameError:
projectCode = 'Unknown'
try:
piName
except NameError:
piName = 'Unknown'
try:
piGlobalId
except NameError:
piGlobalId = 'Unknown'
try:
observeDateString
except NameError:
observeDateString = 'Unknown'
try:
pipelineDateString
except NameError:
pipelineDateString = 'Unknown'
# For now, use same ms name for Hanning smoothed data, for speed.
# However, we only want to smooth the data the first time around, we do
# not want to do more smoothing on restarts, so note that this parameter
# is reset to "n" after doing the smoothing in EVLA_pipe_hanning.py.
logprint("Finished EVLA_pipe_startup.py", logfileout='logs/startup.log')
| |
"""Test Cases Resource functions."""
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Dict, List
from google.cloud.dialogflowcx_v3beta1 import services
from google.cloud.dialogflowcx_v3beta1 import types
from google.protobuf import field_mask_pb2
from dfcx_scrapi.core.scrapi_base import ScrapiBase
# logging config
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)-8s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
class TestCases(ScrapiBase):
"""Core Class for CX Test Cases."""
def __init__(
self,
creds_path: str = None,
creds_dict: Dict = None,
creds=None,
scope=False,
agent_id: str = None,
test_case_id: str = None,
):
super().__init__(
creds_path=creds_path,
creds_dict=creds_dict,
creds=creds,
scope=scope,
)
if agent_id:
self.agent_id = agent_id
self.client_options = self._set_region(self.agent_id)
if test_case_id:
self.test_case_id = test_case_id
self.client_options = self._set_region(self.test_case_id)
def list_test_cases(self, agent_id: str = None):
"""List test cases from an agent.
Args:
agent_id: The agent to list all pages for.
`projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>`
Returns:
response: list of test cases from an agent.
"""
if not agent_id:
agent_id = self.agent_id
request = types.test_case.ListTestCasesRequest()
request.parent = agent_id
client_options = self._set_region(agent_id)
client = services.test_cases.TestCasesClient(
credentials=self.creds, client_options=client_options
)
response = client.list_test_cases(request)
test_cases = []
for page in response.pages:
for test_case in page.test_cases:
test_cases.append(test_case)
return test_cases
def export_test_cases(
self,
gcs_uri: str,
agent_id: str = None,
data_format: str = None,
data_filter: str = None,
):
"""Export test cases from an agent to cloud storage
Args:
gcs_uri:
The GCS URI to export the test cases to. The format of this URI
must be `gs://<bucket-name>/<object-name>`. If unspecified,
the serialized test cases is returned inline.
agent_id: The agent to export test cases from.
`projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>`
data_format:
The data format of the exported test cases. If not specified,
`BLOB` is assumed.
data_filter:
The filter expression used to filter exported test cases, see
`API Filtering <https://aip.dev/160>`__. The expression is case
insensitive and supports the following syntax:
name = [OR name = ] ...
For example:
- "name = t1 OR name = t2" matches the test case with the
exact resource name "t1" or "t2".
Returns:
response: long running operation for export
"""
if not agent_id:
agent_id = self.agent_id
request = types.test_case.ExportTestCasesRequest()
request.parent = agent_id
request.gcs_uri = gcs_uri
request.data_format = data_format
request.filter = data_filter
client_options = self._set_region(agent_id)
client = services.test_cases.TestCasesClient(
credentials=self.creds, client_options=client_options
)
response = client.export_test_cases(request)
return response
def create_test_case(self, test_case: types.TestCase, agent_id: str = None):
"""Create a new Test Case in the specified CX Agent.
Args:
test_case: The test case to create.
agent_id: The agent to create the test case for. Format:
`projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>`
Returns:
response: test case which was created
"""
if not agent_id:
agent_id = self.agent_id
request = types.test_case.CreateTestCaseRequest()
request.parent = agent_id
request.test_case = test_case
client_options = self._set_region(agent_id)
client = services.test_cases.TestCasesClient(
credentials=self.creds, client_options=client_options
)
response = client.create_test_case(request)
return response
def get_test_case(self, test_case_id: str):
"""Get test case object from CX Agent.
Args:
test_case_id: The name of the test case. Format:
`projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/
testCases/<TestCase ID>`
Returns:
response: test case
"""
request = types.test_case.GetTestCaseRequest()
request.name = test_case_id
client_options = self._set_region(test_case_id)
client = services.test_cases.TestCasesClient(
credentials=self.creds, client_options=client_options
)
response = client.get_test_case(request)
return response
def import_test_cases(self, gcs_uri: str, agent_id: str = None):
"""Import test cases from cloud storage.
Args:
gcs_uri: The GCS URI to import test cases from. The format of this
URI must be `gs://<bucket-name>/<object-name>`
agent_id: The agent to import test cases to. Format:
`projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>`
Returns:
response: long running operation for importing test cases.
"""
if not agent_id:
agent_id = self.agent_id
request = types.test_case.ImportTestCasesRequest()
request.parent = agent_id
request.gcs_uri = gcs_uri
client_options = self._set_region(agent_id)
client = services.test_cases.TestCasesClient(
credentials=self.creds, client_options=client_options
)
response = client.import_test_cases(request)
result = response.result()
return result
def batch_delete_test_cases(
self,
test_case_ids: List[str],
agent_id: str = None):
"""Delete a set of test cases from an agent.
Args:
test_case_id: List of test case names in the following format:
`projects/<Project ID>/locations/ <Location ID>/agents/<AgentID>/
testCases/<TestCase ID>`
agent_id: The agent to delete test cases from. Format:
`projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>`.
Returns:
response: deleted test cases
"""
if not agent_id:
agent_id = self.agent_id
request = types.test_case.BatchDeleteTestCasesRequest()
request.parent = agent_id
request.names = test_case_ids
client_options = self._set_region(agent_id)
client = services.test_cases.TestCasesClient(
credentials=self.creds, client_options=client_options
)
client.batch_delete_test_cases(request)
def list_test_case_results(self, test_case_id: str):
"""List the results from a specific Test Case.
Args:
test_case_id: The test case to list results for. Format:
`projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/
testCases/<TestCase ID>`
NOTE: Specify a ``-`` as a wildcard for TestCase ID to list
results across multiple test cases.
Returns:
response: List of test case results
"""
request = types.test_case.ListTestCaseResultsRequest()
request.parent = test_case_id
client_options = self._set_region(test_case_id)
client = services.test_cases.TestCasesClient(
credentials=self.creds, client_options=client_options
)
response = client.list_test_case_results(request)
test_case_results = []
for page in response.pages:
for result in page.test_case_results:
test_case_results.append(result)
return test_case_results
def batch_run_test_cases(
self,
test_cases: List[str],
agent_id: str = None,
environment: str = None):
"""Run a set of test cases to get their latest results.
Args:
test_cases: List of Test Case IDs in the following format:
`projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/
testCases/<TestCase ID>`
agent_id: The CX Agent ID to run the Test Cases on.
`projects/<Project ID>/locations/<Location ID>/agents/<AgentID>`
environment: If not set, draft environment is assumed. Format:
`projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/
environments/<Environment ID>`
Returns:
response: results for the set of run test cases.
"""
if not agent_id:
agent_id = self.agent_id
request = types.test_case.BatchRunTestCasesRequest()
request.parent = agent_id
request.environment = environment
request.test_cases = test_cases
client_options = self._set_region(agent_id)
client = services.test_cases.TestCasesClient(
credentials=self.creds, client_options=client_options
)
response = client.batch_run_test_cases(request)
results = response.result()
return results
def update_test_case(
self,
test_case_id: str = None,
obj: types.TestCase = None,
**kwargs) -> types.TestCase:
"""Update Test Case attributes for a specified Test Case.
Args:
test_case_id: The Test Case ID to update.
obj: The Test Case obj of types.TestCase to use for the update.
Returns:
response: updated Test Case.
"""
if obj:
test_case = obj
test_case.name = test_case_id
else:
if not test_case_id:
test_case_id = self.test_case_id
test_case = self.get_test_case(test_case_id)
for key, value in kwargs.items():
setattr(test_case, key, value)
paths = kwargs.keys()
mask = field_mask_pb2.FieldMask(paths=paths)
request = types.test_case.UpdateTestCaseRequest()
request.test_case = test_case
request.update_mask = mask
client_options = self._set_region(test_case_id)
client = services.test_cases.TestCasesClient(
credentials=self.creds, client_options=client_options
)
response = client.update_test_case(request)
return response
def run_test_case(self, test_case_id: str, environment: str = None):
"""Run test case and get result for a specified test case.
Args:
test_case_id: Test Case ID in the following format:
`projects/<Project ID>/locations/ <Location ID>/agents/<AgentID>/
testCases/<TestCase ID>`
environment: The CX Environment name. If not set, DRAFT environment
is assumed. Format: `projects/<Project ID>/locations/<Location ID>/
agents/<Agent ID>/environments/<Environment ID>`
Returns:
response: test case result.
"""
request = types.test_case.RunTestCaseRequest()
request.name = test_case_id
request.environment = environment
client_options = self._set_region(test_case_id)
client = services.test_cases.TestCasesClient(
credentials=self.creds, client_options=client_options
)
response = client.run_test_case(request)
results = response.result()
return results
def get_test_case_result(self, test_case_result_id: str):
"""Get test case result for a specified run on a specified test case.
Args:
test_case_result_id: The Test Case Result ID to retrieve.
projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/
testCases/<TestCase ID>/results/<TestCaseResult ID>
Returns:
response: test case result.
"""
request = types.test_case.GetTestCaseResultRequest()
request.name = test_case_result_id
client_options = self._set_region(test_case_result_id)
client = services.test_cases.TestCasesClient(
credentials=self.creds, client_options=client_options
)
response = client.get_test_case_result(request)
return response
def calculate_coverage(self, coverage_type: int, agent_id: str = None):
"""Calculate coverage of different resources in the test case set.
Args:
coverage_type: The type of coverage requested.
INTENT = 1
PAGE_TRANSITION = 2
TRANSITION_ROUTE_GROUP = 3
agent: The CX agent to calculate coverage for.
`projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>`
Returns:
response: Coverage of the test cases for the type specified.
"""
if not agent_id:
agent_id = self.agent_id
if coverage_type not in [1, 2, 3]:
raise ValueError(
f"Invalid coverage_type: {coverage_type}. coverage_type must "
"be must be 1, 2 or 3"
)
request = types.test_case.CalculateCoverageRequest()
request.agent = agent_id
request.type_ = coverage_type
client_options = self._set_region(agent_id)
client = services.test_cases.TestCasesClient(
credentials=self.creds, client_options=client_options
)
response = client.calculate_coverage(request)
return response
| |
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import jmespath
import json
import six
from botocore.exceptions import ClientError
from botocore.paginate import Paginator
from concurrent.futures import as_completed
from c7n.actions import BaseAction, RemovePolicyBase, ModifyVpcSecurityGroupsAction
from c7n.filters import CrossAccountAccessFilter, ValueFilter
import c7n.filters.vpc as net_filters
from c7n.manager import resources
from c7n import query
from c7n.resources.iam import CheckPermissions
from c7n.tags import universal_augment
from c7n.utils import local_session, type_schema
ErrAccessDenied = "AccessDeniedException"
@resources.register('lambda')
class AWSLambda(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'lambda'
arn_type = 'function'
arn_separator = ":"
enum_spec = ('list_functions', 'Functions', None)
name = id = 'FunctionName'
date = 'LastModified'
dimension = 'FunctionName'
config_type = "AWS::Lambda::Function"
universal_taggable = object()
def get_source(self, source_type):
if source_type == 'describe':
return DescribeLambda(self)
elif source_type == 'config':
return ConfigLambda(self)
raise ValueError("Unsupported source: %s for %s" % (
source_type, self.resource_type.config_type))
def get_resources(self, ids, cache=True, augment=False):
return super(AWSLambda, self).get_resources(ids, cache, augment)
class DescribeLambda(query.DescribeSource):
def augment(self, resources):
return universal_augment(
self.manager, super(DescribeLambda, self).augment(resources))
def get_resources(self, ids):
client = local_session(self.manager.session_factory).client('lambda')
resources = []
for rid in ids:
try:
func = self.manager.retry(client.get_function, FunctionName=rid)
except client.exceptions.ResourceNotFoundException:
continue
config = func.pop('Configuration')
config.update(func)
resources.append(config)
return resources
class ConfigLambda(query.ConfigSource):
def load_resource(self, item):
resource = super(ConfigLambda, self).load_resource(item)
resource['Tags'] = [
{u'Key': k, u'Value': v} for k, v in item[
'supplementaryConfiguration'].get('Tags', {}).items()]
resource['c7n:Policy'] = item[
'supplementaryConfiguration'].get('Policy')
return resource
@AWSLambda.filter_registry.register('security-group')
class SecurityGroupFilter(net_filters.SecurityGroupFilter):
RelatedIdsExpression = "VpcConfig.SecurityGroupIds[]"
@AWSLambda.filter_registry.register('subnet')
class SubnetFilter(net_filters.SubnetFilter):
RelatedIdsExpression = "VpcConfig.SubnetIds[]"
@AWSLambda.filter_registry.register('vpc')
class VpcFilter(net_filters.VpcFilter):
RelatedIdsExpression = "VpcConfig.VpcId"
AWSLambda.filter_registry.register('network-location', net_filters.NetworkLocation)
@AWSLambda.filter_registry.register('check-permissions')
class LambdaPermissions(CheckPermissions):
def get_iam_arns(self, resources):
return [r['Role'] for r in resources]
@AWSLambda.filter_registry.register('reserved-concurrency')
class ReservedConcurrency(ValueFilter):
annotation_key = "c7n:FunctionInfo"
value_key = '"c7n:FunctionInfo".Concurrency.ReservedConcurrentExecutions'
schema = type_schema('reserved-concurrency', rinherit=ValueFilter.schema)
schema_alias = False
permissions = ('lambda:GetFunction',)
def validate(self):
self.data['key'] = self.value_key
return super(ReservedConcurrency, self).validate()
def process(self, resources, event=None):
self.data['key'] = self.value_key
client = local_session(self.manager.session_factory).client('lambda')
def _augment(r):
try:
r[self.annotation_key] = self.manager.retry(
client.get_function, FunctionName=r['FunctionArn'])
r[self.annotation_key].pop('ResponseMetadata')
except ClientError as e:
if e.response['Error']['Code'] == ErrAccessDenied:
self.log.warning(
"Access denied getting lambda:%s",
r['FunctionName'])
raise
return r
with self.executor_factory(max_workers=3) as w:
resources = list(filter(None, w.map(_augment, resources)))
return super(ReservedConcurrency, self).process(resources, event)
def get_lambda_policies(client, executor_factory, resources, log):
def _augment(r):
try:
r['c7n:Policy'] = client.get_policy(
FunctionName=r['FunctionName'])['Policy']
except client.exceptions.ResourceNotFoundException:
return None
except ClientError as e:
if e.response['Error']['Code'] == 'AccessDeniedException':
log.warning(
"Access denied getting policy lambda:%s",
r['FunctionName'])
return r
results = []
futures = {}
with executor_factory(max_workers=3) as w:
for r in resources:
if 'c7n:Policy' in r:
results.append(r)
continue
futures[w.submit(_augment, r)] = r
for f in as_completed(futures):
if f.exception():
log.warning("Error getting policy for:%s err:%s",
r['FunctionName'], f.exception())
r = futures[f]
continue
results.append(f.result())
return filter(None, results)
@AWSLambda.filter_registry.register('event-source')
class LambdaEventSource(ValueFilter):
# this uses iam policy, it should probably use
# event source mapping api
annotation_key = "c7n:EventSources"
schema = type_schema('event-source', rinherit=ValueFilter.schema)
schema_alias = False
permissions = ('lambda:GetPolicy',)
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('lambda')
self.log.debug("fetching policy for %d lambdas" % len(resources))
resources = get_lambda_policies(
client, self.executor_factory, resources, self.log)
self.data['key'] = self.annotation_key
return super(LambdaEventSource, self).process(resources, event)
def __call__(self, r):
if 'c7n:Policy' not in r:
return False
sources = set()
data = json.loads(r['c7n:Policy'])
for s in data.get('Statement', ()):
if s['Effect'] != 'Allow':
continue
if 'Service' in s['Principal']:
sources.add(s['Principal']['Service'])
if sources:
r[self.annotation_key] = list(sources)
return self.match(r)
@AWSLambda.filter_registry.register('cross-account')
class LambdaCrossAccountAccessFilter(CrossAccountAccessFilter):
"""Filters lambda functions with cross-account permissions
The whitelist parameter can be used to prevent certain accounts
from being included in the results (essentially stating that these
accounts permissions are allowed to exist)
This can be useful when combining this filter with the delete action.
:example:
.. code-block:: yaml
policies:
- name: lambda-cross-account
resource: lambda
filters:
- type: cross-account
whitelist:
- 'IAM-Policy-Cross-Account-Access'
"""
permissions = ('lambda:GetPolicy',)
policy_attribute = 'c7n:Policy'
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('lambda')
self.log.debug("fetching policy for %d lambdas" % len(resources))
resources = get_lambda_policies(
client, self.executor_factory, resources, self.log)
return super(LambdaCrossAccountAccessFilter, self).process(
resources, event)
@AWSLambda.action_registry.register('remove-statements')
class RemovePolicyStatement(RemovePolicyBase):
"""Action to remove policy/permission statements from lambda functions.
:example:
.. code-block:: yaml
policies:
- name: lambda-remove-cross-accounts
resource: lambda
filters:
- type: cross-account
actions:
- type: remove-statements
statement_ids: matched
"""
schema = type_schema(
'remove-statements',
required=['statement_ids'],
statement_ids={'oneOf': [
{'enum': ['matched']},
{'type': 'array', 'items': {'type': 'string'}}]})
permissions = ("lambda:GetPolicy", "lambda:RemovePermission")
def process(self, resources):
results = []
client = local_session(self.manager.session_factory).client('lambda')
for r in resources:
try:
if self.process_resource(client, r):
results.append(r)
except Exception:
self.log.exception(
"Error processing lambda %s", r['FunctionArn'])
return results
def process_resource(self, client, resource):
if 'c7n:Policy' not in resource:
try:
resource['c7n:Policy'] = client.get_policy(
FunctionName=resource['FunctionName']).get('Policy')
except ClientError as e:
if e.response['Error']['Code'] != ErrAccessDenied:
raise
resource['c7n:Policy'] = None
if not resource['c7n:Policy']:
return
p = json.loads(resource['c7n:Policy'])
statements, found = self.process_policy(
p, resource, CrossAccountAccessFilter.annotation_key)
if not found:
return
for f in found:
client.remove_permission(
FunctionName=resource['FunctionName'],
StatementId=f['Sid'])
@AWSLambda.action_registry.register('set-concurrency')
class SetConcurrency(BaseAction):
"""Set lambda function concurrency to the desired level.
Can be used to set the reserved function concurrency to an exact value,
to delete reserved concurrency, or to set the value to an attribute of
the resource.
"""
schema = type_schema(
'set-concurrency',
required=('value',),
**{'expr': {'type': 'boolean'},
'value': {'oneOf': [
{'type': 'string'},
{'type': 'integer'},
{'type': 'null'}]}})
permissions = ('lambda:DeleteFunctionConcurrency',
'lambda:PutFunctionConcurrency')
def validate(self):
if self.data.get('expr', False) and not isinstance(self.data['value'], six.text_type):
raise ValueError("invalid value expression %s" % self.data['value'])
return self
def process(self, functions):
client = local_session(self.manager.session_factory).client('lambda')
is_expr = self.data.get('expr', False)
value = self.data['value']
if is_expr:
value = jmespath.compile(value)
none_type = type(None)
for function in functions:
fvalue = value
if is_expr:
fvalue = value.search(function)
if isinstance(fvalue, float):
fvalue = int(fvalue)
if isinstance(value, int) or isinstance(value, none_type):
self.policy.log.warning(
"Function: %s Invalid expression value for concurrency: %s",
function['FunctionName'], fvalue)
continue
if fvalue is None:
client.delete_function_concurrency(
FunctionName=function['FunctionName'])
else:
client.put_function_concurrency(
FunctionName=function['FunctionName'],
ReservedConcurrentExecutions=fvalue)
@AWSLambda.action_registry.register('delete')
class Delete(BaseAction):
"""Delete a lambda function (including aliases and older versions).
:example:
.. code-block:: yaml
policies:
- name: lambda-delete-dotnet-functions
resource: lambda
filters:
- Runtime: dotnetcore1.0
actions:
- delete
"""
schema = type_schema('delete')
permissions = ("lambda:DeleteFunction",)
def process(self, functions):
client = local_session(self.manager.session_factory).client('lambda')
for function in functions:
try:
client.delete_function(FunctionName=function['FunctionName'])
except ClientError as e:
if e.response['Error']['Code'] == "ResourceNotFoundException":
continue
raise
self.log.debug("Deleted %d functions", len(functions))
@AWSLambda.action_registry.register('modify-security-groups')
class LambdaModifyVpcSecurityGroups(ModifyVpcSecurityGroupsAction):
permissions = ("lambda:UpdateFunctionConfiguration",)
def process(self, functions):
client = local_session(self.manager.session_factory).client('lambda')
groups = super(LambdaModifyVpcSecurityGroups, self).get_groups(
functions)
for idx, i in enumerate(functions):
if 'VpcConfig' not in i: # only continue if Lambda func is VPC-enabled
continue
try:
client.update_function_configuration(FunctionName=i['FunctionName'],
VpcConfig={'SecurityGroupIds': groups[idx]})
except client.exceptions.ResourceNotFoundException:
continue
@resources.register('lambda-layer')
class LambdaLayerVersion(query.QueryResourceManager):
"""Note custodian models the lambda layer version.
Layers end up being a logical asset, the physical asset for use
and management is the layer verison.
To ease that distinction, we support querying just the latest
layer version or having a policy against all layer versions.
By default we query all versions, the following is an example
to query just the latest.
.. code-block:: yaml
policies:
- name: lambda-layer
resource: lambda
query:
- version: latest
"""
class resource_type(query.TypeInfo):
service = 'lambda'
enum_spec = ('list_layers', 'Layers', None)
name = id = 'LayerName'
date = 'CreatedDate'
arn = "LayerVersionArn"
arn_type = "layer"
def augment(self, resources):
versions = {}
for r in resources:
versions[r['LayerName']] = v = r['LatestMatchingVersion']
v['LayerName'] = r['LayerName']
if {'version': 'latest'} in self.data.get('query', []):
return list(versions.values())
layer_names = list(versions)
client = local_session(self.session_factory).client('lambda')
versions = []
for layer_name in layer_names:
pager = get_layer_version_paginator(client)
for v in pager.paginate(
LayerName=layer_name).build_full_result().get('LayerVersions'):
v['LayerName'] = layer_name
versions.append(v)
return versions
def get_layer_version_paginator(client):
pager = Paginator(
client.list_layer_versions,
{'input_token': 'NextToken',
'output_token': 'NextToken',
'result_key': 'LayerVersions'},
client.meta.service_model.operation_model('ListLayerVersions'))
pager.PAGE_ITERATOR_CLS = query.RetryPageIterator
return pager
@LambdaLayerVersion.filter_registry.register('cross-account')
class LayerCrossAccount(CrossAccountAccessFilter):
permissions = ('lambda:GetLayerVersionPolicy',)
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('lambda')
for r in resources:
r['c7n:Policy'] = self.manager.retry(
client.get_layer_version_policy,
LayerName=r['LayerName'],
VersionNumber=r['Version']).get('Policy')
return super(LayerCrossAccount, self).process(resources)
def get_resource_policy(self, r):
return r['c7n:Policy']
@LambdaLayerVersion.action_registry.register('remove-statements')
class LayerRemovePermissions(RemovePolicyBase):
schema = type_schema(
'remove-statements',
required=['statement_ids'],
statement_ids={'oneOf': [
{'enum': ['matched']},
{'type': 'array', 'items': {'type': 'string'}}]})
permissions = (
"lambda:GetLayerVersionPolicy",
"lambda:RemoveLayerVersionPermission")
def process(self, resources):
client = local_session(self.manager.session_factory).client('lambda')
for r in resources:
self.process_resource(client, r)
def process_resource(self, client, r):
if 'c7n:Policy' not in r:
try:
r['c7n:Policy'] = self.manager.retry(
client.get_layer_version_policy,
LayerName=r['LayerName'],
VersionNumber=r['Version'])
except client.exceptions.ResourceNotFound:
return
p = json.loads(r['c7n:Policy'])
statements, found = self.process_policy(
p, r, CrossAccountAccessFilter.annotation_key)
if not found:
return
for f in found:
self.manager.retry(
client.remove_layer_version_permission,
LayerName=r['LayerName'],
StatementId=f['Sid'],
VersionNumber=r['Version'])
@LambdaLayerVersion.action_registry.register('delete')
class DeleteLayerVersion(BaseAction):
schema = type_schema('delete')
permissions = ('lambda:DeleteLayerVersion',)
def process(self, resources):
client = local_session(
self.manager.session_factory).client('lambda')
for r in resources:
try:
self.manager.retry(
client.delete_layer_version,
LayerName=r['LayerName'],
VersionNumber=r['Version'])
except client.exceptions.ResourceNotFound:
continue
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from cairis.core.ARM import *
from cairis.core.AttackerEnvironmentProperties import AttackerEnvironmentProperties
from cairis.daemon.CairisHTTPError import ARMHTTPError, ObjectNotFoundHTTPError, MalformedJSONHTTPError, MissingParameterHTTPError, \
OverwriteNotAllowedHTTPError
from cairis.core.Attacker import Attacker
from cairis.core.AttackerParameters import AttackerParameters
from cairis.core.ValueType import ValueType
from cairis.core.ValueTypeParameters import ValueTypeParameters
from cairis.data.CairisDAO import CairisDAO
from cairis.tools.JsonConverter import json_serialize, json_deserialize
from cairis.tools.ModelDefinitions import AttackerModel, AttackerEnvironmentPropertiesModel
from cairis.tools.SessionValidator import check_required_keys
__author__ = 'Robin Quetin, Shamal Faily'
class AttackerDAO(CairisDAO):
def __init__(self, session_id):
CairisDAO.__init__(self, session_id, 'attacker')
def get_objects(self, constraint_id=-1, simplify=True):
try:
attackers = self.db_proxy.getAttackers(constraint_id)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
if simplify:
for key, value in list(attackers.items()):
attackers[key] = self.simplify(value)
return attackers
def get_objects_summary(self):
try:
ats = self.db_proxy.getAttackersSummary()
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
return ats
def get_object_by_name(self, name, simplify=True):
attackers = self.get_objects(simplify=simplify)
found_attacker = attackers.get(name, None)
if found_attacker is None:
self.close()
raise ObjectNotFoundHTTPError('The provided attacker name')
return found_attacker
def add_object(self, attacker):
attacker_params = AttackerParameters(
name=attacker.theName,
desc=attacker.theDescription,
image=attacker.theImage,
tags=attacker.theTags,
properties=attacker.theEnvironmentProperties
)
try:
if not self.check_existing_attacker(attacker.theName):
self.db_proxy.addAttacker(attacker_params)
else:
self.close()
raise OverwriteNotAllowedHTTPError(obj_name=attacker.theName)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def update_object(self, attacker, name):
attacker_params = AttackerParameters(
name=attacker.theName,
desc=attacker.theDescription,
image=attacker.theImage,
tags=attacker.theTags,
properties=attacker.theEnvironmentProperties
)
try:
attackerId = self.db_proxy.getDimensionId(name,'attacker')
attacker_params.setId(attackerId)
self.db_proxy.updateAttacker(attacker_params)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def delete_object(self, name):
try:
attackerId = self.db_proxy.getDimensionId(name,'attacker')
self.db_proxy.deleteAttacker(attackerId)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def check_existing_attacker(self, name):
try:
self.db_proxy.nameCheck(name, 'attacker')
return False
except DatabaseProxyException as ex:
if str(ex.value).find('already exists') > -1:
return True
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
if str(ex.value).find('already exists') > -1:
return True
self.close()
raise ARMHTTPError(ex)
# region Capabilities
def get_attacker_capabilities(self, pathValues):
try:
environment_name = pathValues[0]
attacker_capabilities = self.db_proxy.getValueTypes('capability', environment_name)
return attacker_capabilities
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def get_attacker_capability_by_name(self, name, environment_name=''):
found_capability = None
attacker_capabilities = self.get_attacker_capabilities(environment_name=environment_name)
if attacker_capabilities is None or len(attacker_capabilities) < 1:
self.close()
raise ObjectNotFoundHTTPError('Attacker capabilities')
idx = 0
while found_capability is None and idx < len(attacker_capabilities):
if attacker_capabilities[idx].theName == name:
found_capability = attacker_capabilities[idx]
idx += 1
if found_capability is None:
self.close()
raise ObjectNotFoundHTTPError('The provided attacker capability name')
return found_capability
def add_attacker_capability(self, attacker_capability, pathValues):
assert isinstance(attacker_capability, ValueType)
environment_name = pathValues[0]
type_exists = self.check_existing_attacker_capability(attacker_capability.theName, environment_name=environment_name)
if type_exists:
self.close()
raise OverwriteNotAllowedHTTPError(obj_name='The attacker capability')
params = ValueTypeParameters(
vtName=attacker_capability.theName,
vtDesc=attacker_capability.theDescription,
vType='capability',
envName=environment_name,
vtScore=attacker_capability.theScore,
vtRat=attacker_capability.theRationale
)
try:
return self.db_proxy.addValueType(params)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def update_attacker_capability(self, attacker_capability, name, pathValues):
assert isinstance(attacker_capability, ValueType)
environment_name = pathValues[0]
found_capability = self.get_attacker_capability_by_name(name, environment_name)
params = ValueTypeParameters(
vtName=attacker_capability.theName,
vtDesc=attacker_capability.theDescription,
vType='capability',
envName=environment_name,
vtScore=attacker_capability.theScore,
vtRat=attacker_capability.theRationale
)
params.setId(found_capability.theId)
try:
self.db_proxy.updateValueType(params)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def delete_attacker_capability(self, name, pathValues):
environment_name = pathValues[0]
found_capability = self.get_attacker_capability_by_name(name, environment_name)
try:
self.db_proxy.deleteAssetType(found_capability.theId)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def check_existing_attacker_capability(self, name, environment_name):
try:
self.get_attacker_capability_by_name(name, environment_name)
return True
except ObjectNotFoundHTTPError:
self.db_proxy.reconnect(session_id=self.session_id)
return False
# endregion
# region Motivations
def get_attacker_motivations(self, pathValues):
try:
environment_name = pathValues[0]
attacker_motivations = self.db_proxy.getValueTypes('motivation', environment_name)
return attacker_motivations
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def get_attacker_motivation_by_name(self, name, pathValues):
found_motivation = None
environment_name = pathValues[0]
attacker_motivations = self.get_attacker_motivations(environment_name=environment_name)
if attacker_motivations is None or len(attacker_motivations) < 1:
self.close()
raise ObjectNotFoundHTTPError('Attacker motivations')
idx = 0
while found_motivation is None and idx < len(attacker_motivations):
if attacker_motivations[idx].theName == name:
found_motivation = attacker_motivations[idx]
idx += 1
if found_motivation is None:
self.close()
raise ObjectNotFoundHTTPError('The provided attacker motivation name')
return found_motivation
def add_attacker_motivation(self, attacker_motivation, pathValues):
assert isinstance(attacker_motivation, ValueType)
environment_name = pathValues[0]
type_exists = self.check_existing_attacker_motivation(attacker_motivation.theName, environment_name=environment_name)
if type_exists:
self.close()
raise OverwriteNotAllowedHTTPError(obj_name='The attacker motivation')
params = ValueTypeParameters(
vtName=attacker_motivation.theName,
vtDesc=attacker_motivation.theDescription,
vType='motivation',
envName=environment_name,
vtScore=attacker_motivation.theScore,
vtRat=attacker_motivation.theRationale
)
try:
return self.db_proxy.addValueType(params)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def update_attacker_motivation(self, attacker_motivation, name, pathValues):
assert isinstance(attacker_motivation, ValueType)
environment_name = pathValues[0]
found_motivation = self.get_attacker_motivation_by_name(name, environment_name)
params = ValueTypeParameters(
vtName=attacker_motivation.theName,
vtDesc=attacker_motivation.theDescription,
vType='motivation',
envName=environment_name,
vtScore=attacker_motivation.theScore,
vtRat=attacker_motivation.theRationale
)
params.setId(found_motivation.theId)
try:
self.db_proxy.updateValueType(params)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def delete_attacker_motivation(self, name):
environment_name = pathValues[0]
found_motivation = self.get_attacker_motivation_by_name(name, environment_name)
try:
self.db_proxy.deleteAssetType(found_motivation.theId)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def check_existing_attacker_motivation(self, name, environment_name):
try:
self.get_attacker_motivation_by_name(name, environment_name)
return True
except ObjectNotFoundHTTPError:
self.db_proxy.reconnect(session_id=self.session_id)
return False
# endregion
def from_json(self, request):
json = request.get_json(silent=True)
if json is False or json is None:
self.close()
raise MalformedJSONHTTPError(data=request.get_data())
json_dict = json['object']
check_required_keys(json_dict, AttackerModel.required)
json_dict['__python_obj__'] = Attacker.__module__ + '.' + Attacker.__name__
attacker_props = self.convert_props(fake_props=json_dict['theEnvironmentProperties'])
json_dict['theEnvironmentProperties'] = []
attacker = json_serialize(json_dict)
attacker = json_deserialize(attacker)
attacker.theEnvironmentProperties = attacker_props
if not isinstance(attacker, Attacker):
self.close()
raise MalformedJSONHTTPError(data=request.get_data())
else:
return attacker
def simplify(self, obj):
assert isinstance(obj, Attacker)
del obj.theEnvironmentDictionary
del obj.theId
del obj.isPersona
obj.theEnvironmentProperties = self.convert_props(real_props=obj.theEnvironmentProperties)
return obj
def convert_props(self, real_props=None, fake_props=None):
new_props = []
if real_props is not None:
if len(real_props) > 0:
for real_prop in real_props:
assert isinstance(real_prop, AttackerEnvironmentProperties)
capabilities = []
for capability in real_prop.theCapabilities:
if len(capability) == 2:
capabilities.append({
'name': capability[0],
'value': capability[1]
})
real_prop.theCapabilities = capabilities
new_props.append(real_prop)
elif fake_props is not None:
if len(fake_props) > 0:
for fake_prop in fake_props:
check_required_keys(fake_prop, AttackerEnvironmentPropertiesModel.required)
cap_list = []
assert isinstance(cap_list, list)
for cap in fake_prop['theCapabilities']:
cap_list.append((cap['name'], cap['value']))
new_prop = AttackerEnvironmentProperties(
environmentName=fake_prop['theEnvironmentName'],
roles=fake_prop['theRoles'],
motives=fake_prop['theMotives'],
capabilities=cap_list
)
new_props.append(new_prop)
else:
self.close()
raise MissingParameterHTTPError(param_names=['real_props', 'fake_props'])
return new_props
| |
import pytest
import pip.wheel
import pip.pep425tags
from pkg_resources import parse_version, Distribution
from pip.req import InstallRequirement
from pip.index import (
InstallationCandidate, PackageFinder, Link, FormatControl,
fmt_ctl_formats)
from pip.exceptions import (
BestVersionAlreadyInstalled, DistributionNotFound, InstallationError,
)
from pip.utils import Inf
from pip.download import PipSession
from mock import Mock, patch
def test_no_mpkg(data):
"""Finder skips zipfiles with "macosx10" in the name."""
finder = PackageFinder([data.find_links], [], session=PipSession())
req = InstallRequirement.from_line("pkgwithmpkg")
found = finder.find_requirement(req, False)
assert found.url.endswith("pkgwithmpkg-1.0.tar.gz"), found
def test_no_partial_name_match(data):
"""Finder requires the full project name to match, not just beginning."""
finder = PackageFinder([data.find_links], [], session=PipSession())
req = InstallRequirement.from_line("gmpy")
found = finder.find_requirement(req, False)
assert found.url.endswith("gmpy-1.15.tar.gz"), found
@patch(
'pip.index.os.path.exists',
return_value=True # b/c we only use tilde expanded version if it exists
)
def test_tilde(data):
"""Finder can accept a path with ~ in it and will normalize it."""
finder = PackageFinder(['~/python-pkgs'], [], session=PipSession())
req = InstallRequirement.from_line("gmpy")
with pytest.raises(DistributionNotFound):
finder.find_requirement(req, False)
def test_duplicates_sort_ok(data):
"""Finder successfully finds one of a set of duplicates in different
locations"""
finder = PackageFinder(
[data.find_links, data.find_links2],
[],
session=PipSession(),
)
req = InstallRequirement.from_line("duplicate")
found = finder.find_requirement(req, False)
assert found.url.endswith("duplicate-1.0.tar.gz"), found
def test_finder_detects_latest_find_links(data):
"""Test PackageFinder detects latest using find-links"""
req = InstallRequirement.from_line('simple', None)
finder = PackageFinder([data.find_links], [], session=PipSession())
link = finder.find_requirement(req, False)
assert link.url.endswith("simple-3.0.tar.gz")
def test_incorrect_case_file_index(data):
"""Test PackageFinder detects latest using wrong case"""
req = InstallRequirement.from_line('dinner', None)
finder = PackageFinder([], [data.find_links3], session=PipSession())
link = finder.find_requirement(req, False)
assert link.url.endswith("Dinner-2.0.tar.gz")
@pytest.mark.network
def test_finder_detects_latest_already_satisfied_find_links(data):
"""Test PackageFinder detects latest already satisfied using find-links"""
req = InstallRequirement.from_line('simple', None)
# the latest simple in local pkgs is 3.0
latest_version = "3.0"
satisfied_by = Mock(
location="/path",
parsed_version=parse_version(latest_version),
version=latest_version
)
req.satisfied_by = satisfied_by
finder = PackageFinder([data.find_links], [], session=PipSession())
with pytest.raises(BestVersionAlreadyInstalled):
finder.find_requirement(req, True)
@pytest.mark.network
def test_finder_detects_latest_already_satisfied_pypi_links():
"""Test PackageFinder detects latest already satisfied using pypi links"""
req = InstallRequirement.from_line('initools', None)
# the latest initools on pypi is 0.3.1
latest_version = "0.3.1"
satisfied_by = Mock(
location="/path",
parsed_version=parse_version(latest_version),
version=latest_version,
)
req.satisfied_by = satisfied_by
finder = PackageFinder(
[],
["http://pypi.python.org/simple"],
session=PipSession(),
)
with pytest.raises(BestVersionAlreadyInstalled):
finder.find_requirement(req, True)
class TestWheel:
def test_skip_invalid_wheel_link(self, caplog, data):
"""
Test if PackageFinder skips invalid wheel filenames
"""
req = InstallRequirement.from_line("invalid")
# data.find_links contains "invalid.whl", which is an invalid wheel
finder = PackageFinder(
[data.find_links],
[],
session=PipSession(),
)
with pytest.raises(DistributionNotFound):
finder.find_requirement(req, True)
assert (
"invalid.whl; invalid wheel filename"
in caplog.text()
)
def test_not_find_wheel_not_supported(self, data, monkeypatch):
"""
Test not finding an unsupported wheel.
"""
monkeypatch.setattr(
pip.pep425tags,
"supported_tags",
[('py1', 'none', 'any')],
)
req = InstallRequirement.from_line("simple.dist")
finder = PackageFinder(
[data.find_links],
[],
session=PipSession(),
)
with pytest.raises(DistributionNotFound):
finder.find_requirement(req, True)
def test_find_wheel_supported(self, data, monkeypatch):
"""
Test finding supported wheel.
"""
monkeypatch.setattr(
pip.pep425tags,
"supported_tags",
[('py2', 'none', 'any')],
)
req = InstallRequirement.from_line("simple.dist")
finder = PackageFinder(
[data.find_links],
[],
session=PipSession(),
)
found = finder.find_requirement(req, True)
assert (
found.url.endswith("simple.dist-0.1-py2.py3-none-any.whl")
), found
def test_wheel_over_sdist_priority(self, data):
"""
Test wheels have priority over sdists.
`test_link_sorting` also covers this at lower level
"""
req = InstallRequirement.from_line("priority")
finder = PackageFinder(
[data.find_links],
[],
session=PipSession(),
)
found = finder.find_requirement(req, True)
assert found.url.endswith("priority-1.0-py2.py3-none-any.whl"), found
def test_existing_over_wheel_priority(self, data):
"""
Test existing install has priority over wheels.
`test_link_sorting` also covers this at a lower level
"""
req = InstallRequirement.from_line('priority', None)
latest_version = "1.0"
satisfied_by = Mock(
location="/path",
parsed_version=parse_version(latest_version),
version=latest_version,
)
req.satisfied_by = satisfied_by
finder = PackageFinder(
[data.find_links],
[],
session=PipSession(),
)
with pytest.raises(BestVersionAlreadyInstalled):
finder.find_requirement(req, True)
@patch('pip.pep425tags.supported_tags', [
('pyT', 'none', 'TEST'),
('pyT', 'TEST', 'any'),
('pyT', 'none', 'any'),
])
def test_link_sorting(self):
"""
Test link sorting
"""
links = [
InstallationCandidate("simple", "2.0", Link(Inf)),
InstallationCandidate("simple", "2.0", Link('simple-2.0.tar.gz')),
InstallationCandidate(
"simple",
"1.0",
Link('simple-1.0-pyT-none-TEST.whl'),
),
InstallationCandidate(
"simple",
'1.0',
Link('simple-1.0-pyT-TEST-any.whl'),
),
InstallationCandidate(
"simple",
'1.0',
Link('simple-1.0-pyT-none-any.whl'),
),
InstallationCandidate(
"simple",
'1.0',
Link('simple-1.0.tar.gz'),
),
]
finder = PackageFinder([], [], session=PipSession())
results = finder._sort_versions(links)
results2 = finder._sort_versions(reversed(links))
assert links == results == results2, results2
@patch('pip.pep425tags.supported_tags', [])
def test_link_sorting_raises_when_wheel_unsupported(self):
links = [
InstallationCandidate(
"simple",
'1.0',
Link('simple-1.0-py2.py3-none-TEST.whl'),
),
]
finder = PackageFinder([], [], session=PipSession())
with pytest.raises(InstallationError):
finder._sort_versions(links)
def test_finder_priority_file_over_page(data):
"""Test PackageFinder prefers file links over equivalent page links"""
req = InstallRequirement.from_line('gmpy==1.15', None)
finder = PackageFinder(
[data.find_links],
["http://pypi.python.org/simple"],
session=PipSession(),
)
all_versions = finder._find_all_versions(req.name)
# 1 file InstallationCandidate followed by all https ones
assert all_versions[0].location.scheme == 'file'
assert all(version.location.scheme == 'https'
for version in all_versions[1:]), all_versions
link = finder.find_requirement(req, False)
assert link.url.startswith("file://")
def test_finder_deplink():
"""
Test PackageFinder with dependency links only
"""
req = InstallRequirement.from_line('gmpy==1.15', None)
finder = PackageFinder(
[],
[],
process_dependency_links=True,
session=PipSession(),
)
finder.add_dependency_links(
['https://pypi.python.org/packages/source/g/gmpy/gmpy-1.15.zip'])
link = finder.find_requirement(req, False)
assert link.url.startswith("https://pypi"), link
@pytest.mark.network
def test_finder_priority_page_over_deplink():
"""
Test PackageFinder prefers page links over equivalent dependency links
"""
req = InstallRequirement.from_line('pip==1.5.6', None)
finder = PackageFinder(
[],
["https://pypi.python.org/simple"],
process_dependency_links=True,
session=PipSession(),
)
finder.add_dependency_links([
'https://warehouse.python.org/packages/source/p/pip/pip-1.5.6.tar.gz'])
all_versions = finder._find_all_versions(req.name)
# Check that the dependency_link is last
assert all_versions[-1].location.url.startswith('https://warehouse')
link = finder.find_requirement(req, False)
assert link.url.startswith("https://pypi"), link
def test_finder_priority_nonegg_over_eggfragments():
"""Test PackageFinder prefers non-egg links over "#egg=" links"""
req = InstallRequirement.from_line('bar==1.0', None)
links = ['http://foo/bar.py#egg=bar-1.0', 'http://foo/bar-1.0.tar.gz']
finder = PackageFinder(links, [], session=PipSession())
with patch.object(finder, "_get_pages", lambda x, y: []):
all_versions = finder._find_all_versions(req.name)
assert all_versions[0].location.url.endswith('tar.gz')
assert all_versions[1].location.url.endswith('#egg=bar-1.0')
link = finder.find_requirement(req, False)
assert link.url.endswith('tar.gz')
links.reverse()
finder = PackageFinder(links, [], session=PipSession())
with patch.object(finder, "_get_pages", lambda x, y: []):
all_versions = finder._find_all_versions(req.name)
assert all_versions[0].location.url.endswith('tar.gz')
assert all_versions[1].location.url.endswith('#egg=bar-1.0')
link = finder.find_requirement(req, False)
assert link.url.endswith('tar.gz')
def test_finder_only_installs_stable_releases(data):
"""
Test PackageFinder only accepts stable versioned releases by default.
"""
req = InstallRequirement.from_line("bar", None)
# using a local index (that has pre & dev releases)
finder = PackageFinder([], [data.index_url("pre")], session=PipSession())
link = finder.find_requirement(req, False)
assert link.url.endswith("bar-1.0.tar.gz"), link.url
# using find-links
links = ["https://foo/bar-1.0.tar.gz", "https://foo/bar-2.0b1.tar.gz"]
finder = PackageFinder(links, [], session=PipSession())
with patch.object(finder, "_get_pages", lambda x, y: []):
link = finder.find_requirement(req, False)
assert link.url == "https://foo/bar-1.0.tar.gz"
links.reverse()
finder = PackageFinder(links, [], session=PipSession())
with patch.object(finder, "_get_pages", lambda x, y: []):
link = finder.find_requirement(req, False)
assert link.url == "https://foo/bar-1.0.tar.gz"
def test_finder_installs_pre_releases(data):
"""
Test PackageFinder finds pre-releases if asked to.
"""
req = InstallRequirement.from_line("bar", None)
# using a local index (that has pre & dev releases)
finder = PackageFinder(
[], [data.index_url("pre")],
allow_all_prereleases=True,
session=PipSession(),
)
link = finder.find_requirement(req, False)
assert link.url.endswith("bar-2.0b1.tar.gz"), link.url
# using find-links
links = ["https://foo/bar-1.0.tar.gz", "https://foo/bar-2.0b1.tar.gz"]
finder = PackageFinder(
links, [],
allow_all_prereleases=True,
session=PipSession(),
)
with patch.object(finder, "_get_pages", lambda x, y: []):
link = finder.find_requirement(req, False)
assert link.url == "https://foo/bar-2.0b1.tar.gz"
links.reverse()
finder = PackageFinder(
links, [],
allow_all_prereleases=True,
session=PipSession(),
)
with patch.object(finder, "_get_pages", lambda x, y: []):
link = finder.find_requirement(req, False)
assert link.url == "https://foo/bar-2.0b1.tar.gz"
def test_finder_installs_dev_releases(data):
"""
Test PackageFinder finds dev releases if asked to.
"""
req = InstallRequirement.from_line("bar", None)
# using a local index (that has dev releases)
finder = PackageFinder(
[], [data.index_url("dev")],
allow_all_prereleases=True,
session=PipSession(),
)
link = finder.find_requirement(req, False)
assert link.url.endswith("bar-2.0.dev1.tar.gz"), link.url
def test_finder_installs_pre_releases_with_version_spec():
"""
Test PackageFinder only accepts stable versioned releases by default.
"""
req = InstallRequirement.from_line("bar>=0.0.dev0", None)
links = ["https://foo/bar-1.0.tar.gz", "https://foo/bar-2.0b1.tar.gz"]
finder = PackageFinder(links, [], session=PipSession())
with patch.object(finder, "_get_pages", lambda x, y: []):
link = finder.find_requirement(req, False)
assert link.url == "https://foo/bar-2.0b1.tar.gz"
links.reverse()
finder = PackageFinder(links, [], session=PipSession())
with patch.object(finder, "_get_pages", lambda x, y: []):
link = finder.find_requirement(req, False)
assert link.url == "https://foo/bar-2.0b1.tar.gz"
def test_finder_ignores_external_links(data):
"""
Tests that PackageFinder ignores external links, with or without hashes.
"""
req = InstallRequirement.from_line("bar", None)
# using a local index
finder = PackageFinder(
[],
[data.index_url("externals")],
session=PipSession(),
)
link = finder.find_requirement(req, False)
assert link.filename == "bar-1.0.tar.gz"
def test_finder_finds_external_links_with_hashes_per_project(data):
"""
Tests that PackageFinder finds external links but only if they have a hash
using the per project configuration.
"""
req = InstallRequirement.from_line("bar", None)
# using a local index
finder = PackageFinder(
[],
[data.index_url("externals")],
allow_external=["bar"],
session=PipSession(),
)
link = finder.find_requirement(req, False)
assert link.filename == "bar-2.0.tar.gz"
def test_finder_finds_external_links_with_hashes_all(data):
"""
Tests that PackageFinder finds external links but only if they have a hash
using the all externals flag.
"""
req = InstallRequirement.from_line("bar", None)
# using a local index
finder = PackageFinder(
[],
[data.index_url("externals")],
allow_all_external=True,
session=PipSession(),
)
link = finder.find_requirement(req, False)
assert link.filename == "bar-2.0.tar.gz"
def test_finder_finds_external_links_without_hashes_per_project(data):
"""
Tests that PackageFinder finds external links if they do not have a hash
"""
req = InstallRequirement.from_line("bar==3.0", None)
# using a local index
finder = PackageFinder(
[],
[data.index_url("externals")],
allow_external=["bar"],
allow_unverified=["bar"],
session=PipSession(),
)
link = finder.find_requirement(req, False)
assert link.filename == "bar-3.0.tar.gz"
def test_finder_finds_external_links_without_hashes_all(data):
"""
Tests that PackageFinder finds external links if they do not have a hash
using the all external flag
"""
req = InstallRequirement.from_line("bar==3.0", None)
# using a local index
finder = PackageFinder(
[],
[data.index_url("externals")],
allow_all_external=True,
allow_unverified=["bar"],
session=PipSession(),
)
link = finder.find_requirement(req, False)
assert link.filename == "bar-3.0.tar.gz"
def test_finder_finds_external_links_without_hashes_scraped_per_project(data):
"""
Tests that PackageFinder finds externally scraped links
"""
req = InstallRequirement.from_line("bar", None)
# using a local index
finder = PackageFinder(
[],
[data.index_url("externals")],
allow_external=["bar"],
allow_unverified=["bar"],
session=PipSession(),
)
link = finder.find_requirement(req, False)
assert link.filename == "bar-4.0.tar.gz"
def test_finder_finds_external_links_without_hashes_scraped_all(data):
"""
Tests that PackageFinder finds externally scraped links using the all
external flag.
"""
req = InstallRequirement.from_line("bar", None)
# using a local index
finder = PackageFinder(
[],
[data.index_url("externals")],
allow_all_external=True,
allow_unverified=["bar"],
session=PipSession(),
)
link = finder.find_requirement(req, False)
assert link.filename == "bar-4.0.tar.gz"
def test_finder_finds_external_links_without_hashes_per_project_all_insecure(
data):
"""
Tests that PackageFinder finds external links if they do not have a hash
"""
req = InstallRequirement.from_line("bar==3.0", None)
# using a local index
finder = PackageFinder(
[],
[data.index_url("externals")],
allow_external=["bar"],
allow_unverified=["bar"],
session=PipSession(),
)
link = finder.find_requirement(req, False)
assert link.filename == "bar-3.0.tar.gz"
def test_finder_finds_external_links_without_hashes_all_all_insecure(data):
"""
Tests that PackageFinder finds external links if they do not have a hash
using the all external flag
"""
req = InstallRequirement.from_line("bar==3.0", None)
# using a local index
finder = PackageFinder(
[],
[data.index_url("externals")],
allow_all_external=True,
allow_unverified=["bar"],
session=PipSession(),
)
link = finder.find_requirement(req, False)
assert link.filename == "bar-3.0.tar.gz"
def test_finder_finds_external_links_without_hashes_scraped_per_project_all_insecure(data): # noqa
"""
Tests that PackageFinder finds externally scraped links
"""
req = InstallRequirement.from_line("bar", None)
# using a local index
finder = PackageFinder(
[],
[data.index_url("externals")],
allow_external=["bar"],
allow_unverified=["bar"],
session=PipSession(),
)
link = finder.find_requirement(req, False)
assert link.filename == "bar-4.0.tar.gz"
def test_finder_finds_external_links_without_hashes_scraped_all_all_insecure(
data):
"""
Tests that PackageFinder finds externally scraped links using the all
external flag.
"""
req = InstallRequirement.from_line("bar", None)
# using a local index
finder = PackageFinder(
[],
[data.index_url("externals")],
allow_all_external=True,
allow_unverified=["bar"],
session=PipSession(),
)
link = finder.find_requirement(req, False)
assert link.filename == "bar-4.0.tar.gz"
def test_finder_finds_external_links_without_hashes_scraped_insecure(data):
"""
Tests that PackageFinder finds externally scraped links without the
external flag
"""
req = InstallRequirement.from_line("bar", None)
# using a local index
finder = PackageFinder(
[],
[data.index_url("externals")],
allow_unverified=["bar"],
session=PipSession(),
)
link = finder.find_requirement(req, False)
assert link.filename == "bar-4.0.tar.gz"
class test_link_package_versions(object):
# patch this for travis which has distribute in its base env for now
@patch(
'pip.wheel.pkg_resources.get_distribution',
lambda x: Distribution(project_name='setuptools', version='0.9')
)
def setup(self):
self.version = '1.0'
self.parsed_version = parse_version(self.version)
self.search_name = 'pytest'
self.finder = PackageFinder(
[],
[],
session=PipSession(),
)
def test_link_package_versions_match_wheel(self):
"""Test that 'pytest' archives match for 'pytest'"""
# TODO: Uncomment these, when #1217 is fixed
# link = Link('http:/yo/pytest-1.0.tar.gz')
# result = self.finder._link_package_versions(link, self.search_name)
# assert result == [(self.parsed_version, link, self.version)], result
link = Link('http:/yo/pytest-1.0-py2.py3-none-any.whl')
result = self.finder._link_package_versions(link, self.search_name)
assert result == [(self.parsed_version, link, self.version)], result
def test_link_package_versions_substring_fails(self):
"""Test that 'pytest<something> archives won't match for 'pytest'"""
# TODO: Uncomment these, when #1217 is fixed
# link = Link('http:/yo/pytest-xdist-1.0.tar.gz')
# result = self.finder._link_package_versions(link, self.search_name)
# assert result == [], result
# link = Link('http:/yo/pytest2-1.0.tar.gz')
# result = self.finder._link_package_versions(link, self.search_name)
# assert result == [], result
link = Link('http:/yo/pytest_xdist-1.0-py2.py3-none-any.whl')
result = self.finder._link_package_versions(link, self.search_name)
assert result == [], result
def test_get_index_urls_locations():
"""Check that the canonical name is on all indexes"""
finder = PackageFinder(
[], ['file://index1/', 'file://index2'], session=PipSession())
locations = finder._get_index_urls_locations(
InstallRequirement.from_line('Complex_Name').name)
assert locations == ['file://index1/complex-name/',
'file://index2/complex-name/']
def test_find_all_versions_nothing(data):
"""Find nothing without anything"""
finder = PackageFinder([], [], session=PipSession())
assert not finder._find_all_versions('pip')
def test_find_all_versions_find_links(data):
finder = PackageFinder(
[data.find_links], [], session=PipSession())
versions = finder._find_all_versions('simple')
assert [str(v.version) for v in versions] == ['3.0', '2.0', '1.0']
def test_find_all_versions_index(data):
finder = PackageFinder(
[], [data.index_url('simple')], session=PipSession())
versions = finder._find_all_versions('simple')
assert [str(v.version) for v in versions] == ['1.0']
def test_find_all_versions_find_links_and_index(data):
finder = PackageFinder(
[data.find_links], [data.index_url('simple')], session=PipSession())
versions = finder._find_all_versions('simple')
# first the find-links versions then the page versions
assert [str(v.version) for v in versions] == ['3.0', '2.0', '1.0', '1.0']
def test_fmt_ctl_matches():
fmt = FormatControl(set(), set())
assert fmt_ctl_formats(fmt, "fred") == frozenset(["source", "binary"])
fmt = FormatControl(set(["fred"]), set())
assert fmt_ctl_formats(fmt, "fred") == frozenset(["source"])
fmt = FormatControl(set(["fred"]), set([":all:"]))
assert fmt_ctl_formats(fmt, "fred") == frozenset(["source"])
fmt = FormatControl(set(), set(["fred"]))
assert fmt_ctl_formats(fmt, "fred") == frozenset(["binary"])
fmt = FormatControl(set([":all:"]), set(["fred"]))
assert fmt_ctl_formats(fmt, "fred") == frozenset(["binary"])
| |
import functools
import logging
import unittest
from unittest import mock
import pytest
from socketio import base_manager
from socketio import pubsub_manager
class TestPubSubManager(unittest.TestCase):
def setUp(self):
id = 0
def generate_id():
nonlocal id
id += 1
return str(id)
mock_server = mock.MagicMock()
mock_server.eio.generate_id = generate_id
self.pm = pubsub_manager.PubSubManager()
self.pm._publish = mock.MagicMock()
self.pm.set_server(mock_server)
self.pm.host_id = '123456'
self.pm.initialize()
def test_default_init(self):
assert self.pm.channel == 'socketio'
self.pm.server.start_background_task.assert_called_once_with(
self.pm._thread
)
def test_custom_init(self):
pubsub = pubsub_manager.PubSubManager(channel='foo')
assert pubsub.channel == 'foo'
assert len(pubsub.host_id) == 32
def test_write_only_init(self):
mock_server = mock.MagicMock()
pm = pubsub_manager.PubSubManager(write_only=True)
pm.set_server(mock_server)
pm.initialize()
assert pm.channel == 'socketio'
assert len(pm.host_id) == 32
assert pm.server.start_background_task.call_count == 0
def test_write_only_default_logger(self):
pm = pubsub_manager.PubSubManager(write_only=True)
pm.initialize()
assert pm.channel == 'socketio'
assert len(pm.host_id) == 32
assert pm._get_logger() == logging.getLogger('socketio')
def test_write_only_with_provided_logger(self):
test_logger = logging.getLogger('new_logger')
pm = pubsub_manager.PubSubManager(write_only=True, logger=test_logger)
pm.initialize()
assert pm.channel == 'socketio'
assert len(pm.host_id) == 32
assert pm._get_logger() == test_logger
def test_emit(self):
self.pm.emit('foo', 'bar')
self.pm._publish.assert_called_once_with(
{
'method': 'emit',
'event': 'foo',
'data': 'bar',
'namespace': '/',
'room': None,
'skip_sid': None,
'callback': None,
'host_id': '123456',
}
)
def test_emit_with_namespace(self):
self.pm.emit('foo', 'bar', namespace='/baz')
self.pm._publish.assert_called_once_with(
{
'method': 'emit',
'event': 'foo',
'data': 'bar',
'namespace': '/baz',
'room': None,
'skip_sid': None,
'callback': None,
'host_id': '123456',
}
)
def test_emit_with_room(self):
self.pm.emit('foo', 'bar', room='baz')
self.pm._publish.assert_called_once_with(
{
'method': 'emit',
'event': 'foo',
'data': 'bar',
'namespace': '/',
'room': 'baz',
'skip_sid': None,
'callback': None,
'host_id': '123456',
}
)
def test_emit_with_skip_sid(self):
self.pm.emit('foo', 'bar', skip_sid='baz')
self.pm._publish.assert_called_once_with(
{
'method': 'emit',
'event': 'foo',
'data': 'bar',
'namespace': '/',
'room': None,
'skip_sid': 'baz',
'callback': None,
'host_id': '123456',
}
)
def test_emit_with_callback(self):
with mock.patch.object(
self.pm, '_generate_ack_id', return_value='123'
):
self.pm.emit('foo', 'bar', room='baz', callback='cb')
self.pm._publish.assert_called_once_with(
{
'method': 'emit',
'event': 'foo',
'data': 'bar',
'namespace': '/',
'room': 'baz',
'skip_sid': None,
'callback': ('baz', '/', '123'),
'host_id': '123456',
}
)
def test_emit_with_callback_without_server(self):
standalone_pm = pubsub_manager.PubSubManager()
with pytest.raises(RuntimeError):
standalone_pm.emit('foo', 'bar', callback='cb')
def test_emit_with_callback_missing_room(self):
with mock.patch.object(
self.pm, '_generate_ack_id', return_value='123'
):
with pytest.raises(ValueError):
self.pm.emit('foo', 'bar', callback='cb')
def test_emit_with_ignore_queue(self):
sid = self.pm.connect('123', '/')
self.pm.emit(
'foo', 'bar', room=sid, namespace='/', ignore_queue=True
)
self.pm._publish.assert_not_called()
self.pm.server._emit_internal.assert_called_once_with(
'123', 'foo', 'bar', '/', None
)
def test_can_disconnect(self):
sid = self.pm.connect('123', '/')
assert self.pm.can_disconnect(sid, '/')
self.pm.can_disconnect(sid, '/foo')
self.pm._publish.assert_called_once_with(
{'method': 'disconnect', 'sid': sid, 'namespace': '/foo'}
)
def test_disconnect(self):
self.pm.disconnect('foo')
self.pm._publish.assert_called_once_with(
{'method': 'disconnect', 'sid': 'foo', 'namespace': '/'}
)
def test_close_room(self):
self.pm.close_room('foo')
self.pm._publish.assert_called_once_with(
{'method': 'close_room', 'room': 'foo', 'namespace': '/'}
)
def test_close_room_with_namespace(self):
self.pm.close_room('foo', '/bar')
self.pm._publish.assert_called_once_with(
{'method': 'close_room', 'room': 'foo', 'namespace': '/bar'}
)
def test_handle_emit(self):
with mock.patch.object(base_manager.BaseManager, 'emit') as super_emit:
self.pm._handle_emit({'event': 'foo', 'data': 'bar'})
super_emit.assert_called_once_with(
'foo',
'bar',
namespace=None,
room=None,
skip_sid=None,
callback=None,
)
def test_handle_emit_with_namespace(self):
with mock.patch.object(base_manager.BaseManager, 'emit') as super_emit:
self.pm._handle_emit(
{'event': 'foo', 'data': 'bar', 'namespace': '/baz'}
)
super_emit.assert_called_once_with(
'foo',
'bar',
namespace='/baz',
room=None,
skip_sid=None,
callback=None,
)
def test_handle_emit_with_room(self):
with mock.patch.object(base_manager.BaseManager, 'emit') as super_emit:
self.pm._handle_emit(
{'event': 'foo', 'data': 'bar', 'room': 'baz'}
)
super_emit.assert_called_once_with(
'foo',
'bar',
namespace=None,
room='baz',
skip_sid=None,
callback=None,
)
def test_handle_emit_with_skip_sid(self):
with mock.patch.object(base_manager.BaseManager, 'emit') as super_emit:
self.pm._handle_emit(
{'event': 'foo', 'data': 'bar', 'skip_sid': '123'}
)
super_emit.assert_called_once_with(
'foo',
'bar',
namespace=None,
room=None,
skip_sid='123',
callback=None,
)
def test_handle_emit_with_callback(self):
host_id = self.pm.host_id
with mock.patch.object(base_manager.BaseManager, 'emit') as super_emit:
self.pm._handle_emit(
{
'event': 'foo',
'data': 'bar',
'namespace': '/baz',
'callback': ('sid', '/baz', 123),
'host_id': host_id,
}
)
assert super_emit.call_count == 1
assert super_emit.call_args[0] == ('foo', 'bar')
assert super_emit.call_args[1]['namespace'] == '/baz'
assert super_emit.call_args[1]['room'] is None
assert super_emit.call_args[1]['skip_sid'] is None
assert isinstance(
super_emit.call_args[1]['callback'], functools.partial
)
super_emit.call_args[1]['callback']('one', 2, 'three')
self.pm._publish.assert_called_once_with(
{
'method': 'callback',
'host_id': host_id,
'sid': 'sid',
'namespace': '/baz',
'id': 123,
'args': ('one', 2, 'three'),
}
)
def test_handle_callback(self):
host_id = self.pm.host_id
with mock.patch.object(self.pm, 'trigger_callback') as trigger:
self.pm._handle_callback(
{
'method': 'callback',
'host_id': host_id,
'sid': 'sid',
'namespace': '/',
'id': 123,
'args': ('one', 2),
}
)
trigger.assert_called_once_with('sid', 123, ('one', 2))
def test_handle_callback_bad_host_id(self):
with mock.patch.object(self.pm, 'trigger_callback') as trigger:
self.pm._handle_callback(
{
'method': 'callback',
'host_id': 'bad',
'sid': 'sid',
'namespace': '/',
'id': 123,
'args': ('one', 2),
}
)
assert trigger.call_count == 0
def test_handle_callback_missing_args(self):
host_id = self.pm.host_id
with mock.patch.object(self.pm, 'trigger_callback') as trigger:
self.pm._handle_callback(
{
'method': 'callback',
'host_id': host_id,
'sid': 'sid',
'namespace': '/',
'id': 123,
}
)
self.pm._handle_callback(
{
'method': 'callback',
'host_id': host_id,
'sid': 'sid',
'namespace': '/',
}
)
self.pm._handle_callback(
{'method': 'callback', 'host_id': host_id, 'sid': 'sid'}
)
self.pm._handle_callback(
{'method': 'callback', 'host_id': host_id}
)
assert trigger.call_count == 0
def test_handle_disconnect(self):
self.pm._handle_disconnect(
{'method': 'disconnect', 'sid': '123', 'namespace': '/foo'}
)
self.pm.server.disconnect.assert_called_once_with(
sid='123', namespace='/foo', ignore_queue=True
)
def test_handle_close_room(self):
with mock.patch.object(
base_manager.BaseManager, 'close_room'
) as super_close_room:
self.pm._handle_close_room({'method': 'close_room', 'room': 'foo'})
super_close_room.assert_called_once_with(
room='foo', namespace=None
)
def test_handle_close_room_with_namespace(self):
with mock.patch.object(
base_manager.BaseManager, 'close_room'
) as super_close_room:
self.pm._handle_close_room(
{'method': 'close_room', 'room': 'foo', 'namespace': '/bar'}
)
super_close_room.assert_called_once_with(
room='foo', namespace='/bar'
)
def test_background_thread(self):
self.pm._handle_emit = mock.MagicMock()
self.pm._handle_callback = mock.MagicMock()
self.pm._handle_disconnect = mock.MagicMock()
self.pm._handle_close_room = mock.MagicMock()
def messages():
import pickle
yield {'method': 'emit', 'value': 'foo'}
yield {'missing': 'method'}
yield '{"method": "callback", "value": "bar"}'
yield {'method': 'disconnect', 'sid': '123', 'namespace': '/foo'}
yield {'method': 'bogus'}
yield pickle.dumps({'method': 'close_room', 'value': 'baz'})
yield 'bad json'
yield b'bad pickled'
self.pm._listen = mock.MagicMock(side_effect=messages)
try:
self.pm._thread()
except StopIteration:
pass
self.pm._handle_emit.assert_called_once_with(
{'method': 'emit', 'value': 'foo'}
)
self.pm._handle_callback.assert_called_once_with(
{'method': 'callback', 'value': 'bar'}
)
self.pm._handle_disconnect.assert_called_once_with(
{'method': 'disconnect', 'sid': '123', 'namespace': '/foo'}
)
self.pm._handle_close_room.assert_called_once_with(
{'method': 'close_room', 'value': 'baz'}
)
| |
#!/usr/bin/env python
# Copyright (c) 2016, HLP-R
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of hlpr_simulator nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Author: Vivian Chu, vchu@gatech.edu
"""
vector_control_interface.py
This script takes commands sent through the "real robot" topics and converts them
into standard ROS messages for Gazebo to interpret
Note: it currently depends on MoveIt! to perform arm manipulation using the joystick
"""
import rospy
import tf
import math
from collections import defaultdict
from vector_msgs.msg import LinearActuatorCmd, GripperCmd, JacoCartesianVelocityCmd, GripperStat
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from control_msgs.msg import JointTrajectoryControllerState
from sensor_msgs.msg import JointState
from std_msgs.msg import Float64
from geometry_msgs.msg import Pose, PoseStamped, Point, Quaternion
from moveit_msgs.srv import GetPositionIK
from moveit_msgs.msg import PositionIKRequest
import dynamixel_msgs.msg
def get_param(name, value=None):
private = "~%s" % name
if rospy.has_param(private):
return rospy.get_param(private)
elif rospy.has_param(name):
return rospy.get_param(name)
else:
return value
class VectorControllerConverter():
'''
This node converts Vector Robot specific messages into standard ROS messages
'''
def __init__(self):
rospy.loginfo("Setting up subscribers and publishers")
self.RIGHT_KEY = 'right'
# Specific for the linear actuator
self.linact_pub = rospy.Publisher('/linear_actuator_controller/command', JointTrajectory, queue_size=10)
self.linact_joint_names = get_param('/linear_actuator_controller/joints', '')
# Setup pan/tilt controller
self.pan_pub = rospy.Publisher('/pan_sim_controller/command', JointTrajectory, queue_size=10)
self.tilt_pub = rospy.Publisher('/tilt_sim_controller/command', JointTrajectory, queue_size=10)
self.pan_names = get_param('/pan_sim_controller/joints', '')
self.tilt_names = get_param('/tilt_sim_controller/joints', '')
# Get flag for one vs. two arms
self.two_arms = get_param('/two_arms', False)
# Setup pan/tilt sim to real controller topics to simulate the real robot
# Needed because ROS controller has a different message than Stanley
self.pan_state_sub = rospy.Subscriber('/pan_sim_controller/state', JointTrajectoryControllerState, self.panStateCallback, queue_size=1)
self.tilt_state_sub = rospy.Subscriber('/tilt_sim_controller/state', JointTrajectoryControllerState, self.tiltStateCallback, queue_size=1)
self.pan_state_pub = rospy.Publisher('/pan_controller/state', dynamixel_msgs.msg.JointState, queue_size=10)
self.tilt_state_pub = rospy.Publisher('/tilt_controller/state', dynamixel_msgs.msg.JointState, queue_size=10)
# Setup the gripper controller
self.gripper_pub = rospy.Publisher('/gripper_controller/command', JointTrajectory, queue_size=10)
self.gripper_name = get_param('/gripper_controller/joints', '')
# Setup the arm controller
self.arm_pub = rospy.Publisher('/vector/right_arm/command', JointTrajectory, queue_size=10)
# Setup subscribers to listen to the commands
self.linact_command_sub = rospy.Subscriber('/vector/linear_actuator_cmd', LinearActuatorCmd, self.linactCallback, queue_size=10)
self.pan_sub = rospy.Subscriber('/pan_controller/command', Float64, self.panCallback, queue_size=1)
self.tilt_sub = rospy.Subscriber('/tilt_controller/command', Float64, self.tiltCallback, queue_size=1)
self.gripper_sub = rospy.Subscriber('/vector/right_gripper/cmd', GripperCmd, self.gripperCallback, (self.RIGHT_KEY), queue_size=1)
# Setup some topics that mimic the real robot state topics
self.lin_state_sub = rospy.Subscriber('/linear_actuator_controller/state', JointTrajectoryControllerState, self.linStateCallback, queue_size=1)
self.lin_state_pub = rospy.Publisher('/vector/joint_states', JointState, queue_size=1)
self.gripper_state_sub = rospy.Subscriber('/gripper_controller/state', JointTrajectoryControllerState, self.gripperStateCallback, (self.RIGHT_KEY), queue_size=1)
self.gripper_joint_state_pub = rospy.Publisher('/vector/right_gripper/joint_states', JointState, queue_size=1)
self.gripper_stat_pub = rospy.Publisher('/vector/right_gripper/stat', GripperStat, queue_size=1)
# Initialize necessary components for TF
self.listener = tf.TransformListener()
self.trans = None
self.rot = None
# Constants for gripper
# Fully closed = 0
# Fully open = 0.085
# The joint is interval is inverted in that 0 joint position = 0.085 gripper
# Joint maximum is 0.8
self.grip_max_width = 0.085
self.grip_joint_max = 0.8
self.gripper_cmd = dict()
self.gripper_cmd[self.RIGHT_KEY] = None
# Initialize components for moveit IK service
rospy.logwarn("Waiting for MoveIt! for 10 seconds...")
try:
rospy.wait_for_service('compute_ik', timeout=10.0) # Wait for 10 seconds and assumes we don't want IK
self.compute_ik = rospy.ServiceProxy('compute_ik', GetPositionIK)
except rospy.ROSException, e:
rospy.logwarn("MoveIt was not loaded and arm teleop will not be available")
self.compute_ik = None
else:
rospy.logwarn("MoveIt detected")
self.eef_sub = rospy.Subscriber('/vector/right_arm/cartesian_vel_cmd', JacoCartesianVelocityCmd, self.EEFCallback, queue_size=1)
rospy.loginfo("Done Init")
def jointTrajHelper(self, joint_names, positions):
# Setup the joint trajectory
jtm = JointTrajectory()
jtp = JointTrajectoryPoint()
jtm.joint_names = joint_names
jtp.time_from_start = rospy.Duration(1.0)
jtp.positions = [positions]
jtm.points = [jtp]
return jtm
def linactCallback(self, msg):
# Create the trajectory
jtm = self.jointTrajHelper(self.linact_joint_names, msg.desired_position_m)
# Publish the command
self.linact_pub.publish(jtm)
def panCallback(self, msg):
# Create the trajectory
jtm = self.jointTrajHelper(self.pan_names, msg.data)
# Publish the command
self.pan_pub.publish(jtm)
def tiltCallback(self, msg):
# Create the trajectory
jtm = self.jointTrajHelper(self.tilt_names, msg.data)
# Publish the command
self.tilt_pub.publish(jtm)
def convertJointTrajectoryControllerState(self, msg, motor_ids=""):
# Generate a fake dynamixel msg
sim_data_msg = dynamixel_msgs.msg.JointState()
sim_data_msg.header = msg.header
sim_data_msg.name = msg.joint_names[0]
sim_data_msg.motor_ids = motor_ids
sim_data_msg.motor_temps = []
sim_data_msg.goal_pos = msg.desired.positions[0]
sim_data_msg.current_pos = msg.actual.positions[0]
sim_data_msg.error = msg.error.positions[0]
sim_data_msg.velocity = msg.actual.velocities[0]
return sim_data_msg
def convertJointTrajectorySensorMsg(self, msg):
# Generate fake JointState msg
sim_data_msg = JointState()
sim_data_msg.header = msg.header
sim_data_msg.name = msg.joint_names
sim_data_msg.position = msg.actual.positions
sim_data_msg.velocity = msg.actual.velocities
sim_data_msg.effort = msg.actual.effort
return sim_data_msg
def convertJointState2GripperWidth(self, pos):
grip_cmd_pos = ((self.grip_joint_max - pos)/self.grip_joint_max) * self.grip_max_width
return grip_cmd_pos
def convertJointTrajectoryGripperStat(self, msg, side):
sim_data_msg = GripperStat()
sim_data_msg.header = msg.header
sim_data_msg.position = self.convertJointState2GripperWidth(msg.actual.positions[0])
desired_pos = self.convertJointState2GripperWidth(msg.desired.positions[0])
# Replace with commanded value
if not self.gripper_cmd[side] == None:
desired_pos = self.gripper_cmd[side]
sim_data_msg.requested_position = desired_pos
return sim_data_msg
def gripperStateCallback(self, msg, side):
# Publish the joint state message
self.gripper_joint_state_pub.publish(self.convertJointTrajectorySensorMsg(msg))
# Publish the gripper stat message
self.gripper_stat_pub.publish(self.convertJointTrajectoryGripperStat(msg, side))
def panStateCallback(self, msg):
self.pan_state_pub.publish(self.convertJointTrajectoryControllerState(msg))
def tiltStateCallback(self, msg):
self.tilt_state_pub.publish(self.convertJointTrajectoryControllerState(msg))
def linStateCallback(self, msg):
self.lin_state_pub.publish(self.convertJointTrajectorySensorMsg(msg))
def gripperCallback(self, msg, side):
# Store the commanded value
self.gripper_cmd[side] = msg.position
# Fully closed = 0
# Fully open = 0.085
# The joint is interval is inverted in that 0 joint position = 0.085 gripper
# Joint maximum is 0.8
grip_joint_pos = ((self.grip_max_width - msg.position)/self.grip_max_width) * self.grip_joint_max
# Send the position command for now (does not do force)
jtm = self.jointTrajHelper(self.gripper_name, grip_joint_pos)
# Send the command
self.gripper_pub.publish(jtm)
def isCommandAllZero(self, msg):
return (msg.x + msg.y + msg.z +
msg.theta_x + msg.theta_y + msg.theta_z) == 0
def EEFCallback(self, msg):
# Check if we have EEF positions yet
if self.trans == None or self.rot == None:
return
# Check if the command is just zero - if so do nothing
if self.isCommandAllZero(msg):
return
# Determine current position of EEF
position = self.trans
# Convert from Quaternion to RPY in radians
rotation = tf.transformations.euler_from_quaternion(self.rot, 'rxyz')
rotation = [r * (180/math.pi) for r in rotation] # Convert to degrees
# Convert msg from kinova convention (xyz) to vector convention
converted_msg = JacoCartesianVelocityCmd()
converted_msg.x = msg.z
converted_msg.y = msg.x
converted_msg.z = msg.y
converted_msg.theta_x = msg.theta_z
converted_msg.theta_y = msg.theta_y
converted_msg.theta_z = msg.theta_x
# Propogate the position based on velocity
# assume a small time dt to compute position and rotation
pose = defaultdict(dict)
pose['position']['value'] = dict()
pose['rotation']['value'] = dict()
pose['position']['keys'] = ['x','y','z']
pose['rotation']['keys'] = ['theta_x','theta_y','theta_z']
pose['rotation']['speed'] = "0.075" # rotation speed (degrees)
pose['position']['speed'] = "0.15" # position speed (cm)
for val in ['position','rotation']:
for i in xrange(len(pose[val]['keys'])):
field = pose[val]['keys'][i]
pose[val]['value'][field] = eval('converted_msg.'+field+'*'+pose[val]['speed']+' + '+val+'[i]')
# Pull out values from dictionary
new_position = pose['position']['value']
new_rot = pose['rotation']['value']
for theta in new_rot:
new_rot[theta] = new_rot[theta] * (math.pi/180.0) # conver to radians
# Convert into quaternion
new_rot = tf.transformations.quaternion_from_euler(new_rot['theta_x'], new_rot['theta_y'],new_rot['theta_z'], 'rxyz')
# Create a Pose and populate
eef_pose = Pose()
eef_pose.position = Point()
eef_pose.position.x = new_position['x']
eef_pose.position.y = new_position['y']
eef_pose.position.z = new_position['z']
eef_pose.orientation = Quaternion()
eef_pose.orientation.x = new_rot[0]
eef_pose.orientation.y = new_rot[1]
eef_pose.orientation.z = new_rot[2]
eef_pose.orientation.w = new_rot[3]
# Convert EEF position into joint positions
joint_positions, joint_names = self.computeIK(eef_pose)
if joint_positions is not None:
# Send to trajectory controller
# For now send directly to gazebo
jtm = JointTrajectory()
jtm.joint_names = joint_names
jtp = JointTrajectoryPoint()
jtp.positions = joint_positions
jtp.time_from_start = rospy.Duration(1.0)
jtm.points = [jtp]
self.arm_pub.publish(jtm)
def computeIK(self, pose):
# Create a pose to compute IK for
pose_stamped = PoseStamped()
pose_stamped.header.frame_id = 'base_link' # Hard coded for now
pose_stamped.header.stamp = rospy.Time.now()
pose_stamped.pose = pose
# Create a moveit ik request
ik_request = PositionIKRequest()
ik_request.group_name = 'arm' # Hard coded for now
ik_request.pose_stamped = pose_stamped
ik_request.timeout.secs = 0.1
ik_request.avoid_collisions = True
try:
request_value = self.compute_ik(ik_request)
if request_value.error_code.val == -31:
rospy.logwarn("Teleop Arm: No IK Solution")
if request_value.error_code.val == 1:
joint_positions = request_value.solution.joint_state.position[1:7]
joint_names = request_value.solution.joint_state.name[1:7]
return joint_positions,joint_names
else:
return None,None
except rospy.ServiceException, e:
print "IK service request failed: %s" % e
return None,None
def updateEEF(self):
rate = rospy.Rate(100.0) # Run at 100hz?
# Continuously cycles and updates the EEF using tf if available
while not rospy.is_shutdown():
try:
(self.trans,self.rot) = self.listener.lookupTransform('/base_link', 'right_ee_link', rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
rate.sleep()
if __name__=='__main__':
rospy.init_node('VectorController2Gazebo')
rospy.loginfo("Starting up Vector Controller Converter Node")
vec = VectorControllerConverter()
vec.updateEEF()
#rospy.spin()
| |
# See readme.md for instructions on running this code.
import re
import os
class VirtualFsHandler(object):
def usage(self):
return get_help()
def handle_message(self, message, client, state_handler):
command = message['content']
stream = message['display_recipient']
topic = message['subject']
sender = message['sender_email']
state = state_handler.get_state()
if state is None:
state = {}
if stream not in state:
state[stream] = fs_new()
fs = state[stream]
if sender not in fs['user_paths']:
fs['user_paths'][sender] = '/'
fs, msg = fs_command(fs, sender, command)
prependix = '{}:\n'.format(sender)
msg = prependix + msg
state[stream] = fs
state_handler.set_state(state)
client.send_message(dict(
type='stream',
to=stream,
subject=topic,
content=msg,
))
def get_help():
return '''
The "fs" commands implement a virtual file system for a stream.
The locations of text are persisted for the lifetime of the bot
running, and if you rename a stream, you will lose the info.
Example commands:
```
fs mkdir: create a directory
fs ls: list a directory
fs cd: change directory
fs pwd: show current path
fs write: write text
fs read: read text
fs rm: remove a file
fs rmdir: remove a directory
```
Use commands like `fs help write` for more details on specific
commands.
'''
def test():
fs = fs_new()
user = 'test_user'
fs['user_paths'][user] = '/'
assert is_directory(fs, '/')
for cmd, expected_response in sample_conversation():
fs, msg = fs_command(fs, user, cmd)
if msg != expected_response:
raise AssertionError('''
cmd: %s
expected: %s
but got : %s
''' % (cmd, expected_response, msg))
def sample_conversation():
return [
('cd /', 'Current path: /'),
('cd /home', 'ERROR: invalid path'),
('cd .', 'ERROR: invalid path'),
('mkdir home', 'directory created'),
('cd home', 'Current path: /home/'),
('cd /home/', 'Current path: /home/'),
('mkdir stuff/', 'ERROR: stuff/ is not a valid name'),
('mkdir stuff', 'directory created'),
('write stuff/file1 something', 'file written'),
('read stuff/file1', 'something'),
('read /home/stuff/file1', 'something'),
('read home/stuff/file1', 'ERROR: file does not exist'),
('pwd ', '/home/'),
('pwd bla', 'ERROR: syntax: pwd'),
('ls bla foo', 'ERROR: syntax: ls <optional_path>'),
('cd /', 'Current path: /'),
('rm home', 'ERROR: /home/ is a directory, file required'),
('rmdir home', 'removed'),
('ls ', 'WARNING: directory is empty'),
('cd home', 'ERROR: invalid path'),
('read /home/stuff/file1', 'ERROR: file does not exist'),
('cd /', 'Current path: /'),
('write /foo contents of /foo', 'file written'),
('read /foo', 'contents of /foo'),
('write /bar Contents: bar bar', 'file written'),
('read /bar', 'Contents: bar bar'),
('write /bar invalid', 'ERROR: file already exists'),
('rm /bar', 'removed'),
('rm /bar', 'ERROR: file does not exist'),
('write /bar new bar', 'file written'),
('read /bar', 'new bar'),
('write /yo/invalid whatever', 'ERROR: /yo is not a directory'),
('mkdir /yo', 'directory created'),
('read /yo', 'ERROR: /yo/ is a directory, file required'),
('ls /yo', 'WARNING: directory is empty'),
('read /yo/nada', 'ERROR: file does not exist'),
('write /yo whatever', 'ERROR: file already exists'),
('write /yo/apple red', 'file written'),
('read /yo/apple', 'red'),
('mkdir /yo/apple', 'ERROR: file already exists'),
('ls /invalid', 'ERROR: file does not exist'),
('ls /foo', 'ERROR: /foo is not a directory'),
('ls /', '* /*bar*\n* /*foo*\n* /yo/'),
('invalid command', 'ERROR: unrecognized command'),
('write', 'ERROR: syntax: write <path> <some_text>'),
('help', get_help()),
('help ls', 'syntax: ls <optional_path>'),
('help invalid_command', get_help()),
]
REGEXES = dict(
command='(cd|ls|mkdir|read|rmdir|rm|write|pwd)',
path='(\S+)',
optional_path='(\S*)',
some_text='(.+)',
)
def get_commands():
return {
'help': (fs_help, ['command']),
'ls': (fs_ls, ['optional_path']),
'mkdir': (fs_mkdir, ['path']),
'read': (fs_read, ['path']),
'rm': (fs_rm, ['path']),
'rmdir': (fs_rmdir, ['path']),
'write': (fs_write, ['path', 'some_text']),
'cd': (fs_cd, ['path']),
'pwd': (fs_pwd, []),
}
def fs_command(fs, user, cmd):
cmd = cmd.strip()
if cmd == 'help':
return fs, get_help()
cmd_name = cmd.split()[0]
cmd_args = cmd[len(cmd_name):].strip()
commands = get_commands()
if cmd_name not in commands:
return fs, 'ERROR: unrecognized command'
f, arg_names = commands[cmd_name]
partial_regexes = [REGEXES[a] for a in arg_names]
regex = ' '.join(partial_regexes)
regex += '$'
m = re.match(regex, cmd_args)
if m:
return f(fs, user, *m.groups())
elif cmd_name == 'help':
return fs, get_help()
else:
return fs, 'ERROR: ' + syntax_help(cmd_name)
def syntax_help(cmd_name):
commands = get_commands()
f, arg_names = commands[cmd_name]
arg_syntax = ' '.join('<' + a + '>' for a in arg_names)
if arg_syntax:
cmd = cmd_name + ' ' + arg_syntax
else:
cmd = cmd_name
return 'syntax: {}'.format(cmd)
def fs_new():
fs = {
'/': directory([]),
'user_paths': dict()
}
return fs
def fs_help(fs, user, cmd_name):
return fs, syntax_help(cmd_name)
def fs_mkdir(fs, user, fn):
path, msg = make_path(fs, user, fn)
if msg:
return fs, msg
if path in fs:
return fs, 'ERROR: file already exists'
dir_path = os.path.dirname(path)
if not is_directory(fs, dir_path):
msg = 'ERROR: {} is not a directory'.format(dir_path)
return fs, msg
new_fs = fs.copy()
new_dir = directory({path}.union(fs[dir_path]['fns']))
new_fs[dir_path] = new_dir
new_fs[path] = directory([])
msg = 'directory created'
return new_fs, msg
def fs_ls(fs, user, fn):
if fn == '.' or fn == '':
path = fs['user_paths'][user]
else:
path, msg = make_path(fs, user, fn)
if msg:
return fs, msg
if path not in fs:
msg = 'ERROR: file does not exist'
return fs, msg
if not is_directory(fs, path):
return fs, 'ERROR: {} is not a directory'.format(path)
fns = fs[path]['fns']
if not fns:
return fs, 'WARNING: directory is empty'
msg = '\n'.join('* ' + nice_path(fs, path) for path in sorted(fns))
return fs, msg
def fs_pwd(fs, user):
path = fs['user_paths'][user]
msg = nice_path(fs, path)
return fs, msg
def fs_rm(fs, user, fn):
path, msg = make_path(fs, user, fn)
if msg:
return fs, msg
if path not in fs:
msg = 'ERROR: file does not exist'
return fs, msg
if fs[path]['kind'] == 'dir':
msg = 'ERROR: {} is a directory, file required'.format(nice_path(fs, path))
return fs, msg
new_fs = fs.copy()
new_fs.pop(path)
directory = get_directory(path)
new_fs[directory]['fns'].remove(path)
msg = 'removed'
return new_fs, msg
def fs_rmdir(fs, user, fn):
path, msg = make_path(fs, user, fn)
if msg:
return fs, msg
if path not in fs:
msg = 'ERROR: directory does not exist'
return fs, msg
if fs[path]['kind'] == 'text':
msg = 'ERROR: {} is a file, directory required'.format(nice_path(fs, path))
return fs, msg
new_fs = fs.copy()
new_fs.pop(path)
directory = get_directory(path)
new_fs[directory]['fns'].remove(path)
for sub_path in new_fs.keys():
if sub_path.startswith(path+'/'):
new_fs.pop(sub_path)
msg = 'removed'
return new_fs, msg
def fs_write(fs, user, fn, content):
path, msg = make_path(fs, user, fn)
if msg:
return fs, msg
if path in fs:
msg = 'ERROR: file already exists'
return fs, msg
dir_path = os.path.dirname(path)
if not is_directory(fs, dir_path):
msg = 'ERROR: {} is not a directory'.format(dir_path)
return fs, msg
new_fs = fs.copy()
new_dir = directory({path}.union(fs[dir_path]['fns']))
new_fs[dir_path] = new_dir
new_fs[path] = text_file(content)
msg = 'file written'
return new_fs, msg
def fs_read(fs, user, fn):
path, msg = make_path(fs, user, fn)
if msg:
return fs, msg
if path not in fs:
msg = 'ERROR: file does not exist'
return fs, msg
if fs[path]['kind'] == 'dir':
msg = 'ERROR: {} is a directory, file required'.format(nice_path(fs, path))
return fs, msg
val = fs[path]['content']
return fs, val
def fs_cd(fs, user, fn):
if len(fn) > 1 and fn[-1] == '/':
fn = fn[:-1]
path = fn if len(fn) > 0 and fn[0] == '/' else make_path(fs, user, fn)[0]
if path not in fs:
msg = 'ERROR: invalid path'
return fs, msg
if fs[path]['kind'] == 'text':
msg = 'ERROR: {} is a file, directory required'.format(nice_path(fs, path))
return fs, msg
fs['user_paths'][user] = path
return fs, "Current path: {}".format(nice_path(fs, path))
def make_path(fs, user, leaf):
if leaf == '/':
return ['/', '']
if leaf.endswith('/'):
return ['', 'ERROR: {} is not a valid name'.format(leaf)]
if leaf.startswith('/'):
return [leaf, '']
path = fs['user_paths'][user]
if not path.endswith('/'):
path += '/'
path += leaf
return path, ''
def nice_path(fs, path):
path_nice = path
slash = path.rfind('/')
if path not in fs:
return 'ERROR: the current directory does not exist'
if fs[path]['kind'] == 'text':
path_nice = '{}*{}*'.format(path[:slash+1], path[slash+1:])
elif path != '/':
path_nice = '{}/'.format(path)
return path_nice
def get_directory(path):
slash = path.rfind('/')
if slash == 0:
return '/'
else:
return path[:slash]
def directory(fns):
return dict(kind='dir', fns=set(fns))
def text_file(content):
return dict(kind='text', content=content)
def is_directory(fs, fn):
if fn not in fs:
return False
return fs[fn]['kind'] == 'dir'
handler_class = VirtualFsHandler
if __name__ == '__main__':
# We eventually want to test bots with a "real" testing
# framework.
test()
| |
"""Helpers for polyphemus.
Utilities API
=============
"""
from __future__ import print_function
import os
import io
import re
import sys
import glob
import tempfile
import functools
import subprocess
from copy import deepcopy
from pprint import pformat
from collections import Mapping, Iterable, Hashable, Sequence, namedtuple, \
MutableMapping
from hashlib import md5
from warnings import warn
try:
import cPickle as pickle
except ImportError:
import pickle
if sys.version_info[0] >= 3:
basestring = str
DEFAULT_RC_FILE = "polyphemusrc.py"
"""Default run control file name."""
DEFAULT_PLUGINS = ('polyphemus.base', 'polyphemus.githubhook', 'polyphemus.batlabrun',
'polyphemus.batlabstat', 'polyphemus.githubstat',
'polyphemus.dashboard')
"""Default list of plugin module names."""
FORBIDDEN_NAMES = frozenset(['del', 'global'])
def warn_forbidden_name(forname, inname=None, rename=None):
"""Warns the user that a forbidden name has been found."""
msg = "found forbidden name {0!r}".format(forname)
if inname is not None:
msg += " in {0!r}".format(inname)
if rename is not None:
msg += ", renaming to {0!r}".format(rename)
warn(msg, RuntimeWarning)
def indent(s, n=4, join=True):
"""Indents all lines in the string or list s by n spaces."""
spaces = " " * n
lines = s.splitlines() if isinstance(s, basestring) else s
lines = lines or ()
if join:
return '\n'.join([spaces + l for l in lines if l is not None])
else:
return [spaces + l for l in lines if l is not None]
class indentstr(str):
"""A special string subclass that can be used to indent the whol string
inside of format strings by accessing an ``indentN`` attr. For example,
``s.indent8`` will return a copy of the string s where every line starts
with 8 spaces."""
def __getattr__(self, key):
if key.startswith('indent'):
return indent(self, n=int(key[6:]))
return getattr(super(indentstr, self), key)
def expand_default_args(methods):
"""This function takes a collection of method tuples and expands all of
the default arguments, returning a set of all methods possible."""
methitems = set()
for mkey, mrtn in methods:
mname, margs = mkey[0], mkey[1:]
havedefaults = [3 == len(arg) for arg in margs]
if any(havedefaults):
# expand default arguments
n = havedefaults.index(True)
items = [((mname,)+tuple(margs[:n]), mrtn)] + \
[((mname,)+tuple(margs[:i]), mrtn) for i in range(n+1,len(margs)+1)]
methitems.update(items)
else:
# no default args
methitems.add((mkey, mrtn))
return methitems
def newoverwrite(s, filename, verbose=False):
"""Useful for not forcing re-compiles and thus playing nicely with the
build system. This is acomplished by not writing the file if the existsing
contents are exactly the same as what would be written out.
Parameters
----------
s : str
string contents of file to possible
filename : str
Path to file.
vebose : bool, optional
prints extra message
"""
if os.path.isfile(filename):
with io.open(filename, 'rb') as f:
old = f.read()
if s == old:
return
else:
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
with io.open(filename, 'wb') as f:
f.write(s.encode())
if verbose:
print(" wrote " + filename)
def newcopyover(f1, f2, verbose=False):
"""Useful for not forcing re-compiles and thus playing nicely with the
build system. This is acomplished by not writing the file if the existsing
contents are exactly the same as what would be written out.
Parameters
----------
f1 : str
Path to file to copy from
f2 : str
Path to file to copy over
vebose : bool, optional
prints extra message
"""
if os.path.isfile(f1):
with io.open(f1, 'r') as f:
s = f.read()
return newoverwrite(s, f2, verbose)
def writenewonly(s, filename, verbose=False):
"""Only writes the contents of the string to a file if the file does not exist.
Useful for not tocuhing files.
Parameters
----------
s : str
string contents of file to possible
filename : str
Path to file.
vebose : bool, optional
prints extra message
"""
if os.path.isfile(filename):
return
with open(filename, 'w') as f:
f.write(str(s))
if verbose:
print(" wrote " + filename)
def ensuredirs(f):
"""For a file path, ensure that its directory path exists."""
d = os.path.split(f)[0]
if not os.path.isdir(d):
os.makedirs(d)
def touch(filename):
"""Opens a file and updates the mtime, like the posix command of the same name."""
with io.open(filename, 'a') as f:
os.utime(filename, None)
def exec_file(filename, glb=None, loc=None):
"""A function equivalent to the Python 2.x execfile statement."""
with io.open(filename, 'r') as f:
src = f.read()
exec(compile(src, filename, "exec"), glb, loc)
#
# Run Control
#
class NotSpecified(object):
"""A helper class singleton for run control meaning that a 'real' value
has not been given."""
def __repr__(self):
return "NotSpecified"
NotSpecified = NotSpecified()
"""A helper class singleton for run control meaning that a 'real' value
has not been given."""
class RunControl(object):
"""A composable configuration class. Unlike argparse.Namespace,
this keeps the object dictionary (__dict__) separate from the run
control attributes dictionary (_dict)."""
def __init__(self, **kwargs):
"""Parameters
-------------
kwargs : optional
Items to place into run control.
"""
self._dict = {}
for k, v in kwargs.items():
setattr(self, k, v)
self._updaters = {}
def __getattr__(self, key):
if key in self._dict:
return self._dict[key]
elif key in self.__dict__:
return self.__dict__[key]
elif key in self.__class__.__dict__:
return self.__class__.__dict__[key]
else:
msg = "RunControl object has no attribute {0!r}.".format(key)
raise AttributeError(msg)
def __setattr__(self, key, value):
if key.startswith('_'):
self.__dict__[key] = value
else:
if value is NotSpecified and key in self:
return
self._dict[key] = value
def __delattr__(self, key):
if key in self._dict:
del self._dict[key]
elif key in self.__dict__:
del self.__dict__[key]
elif key in self.__class__.__dict__:
del self.__class__.__dict__[key]
else:
msg = "RunControl object has no attribute {0!r}.".format(key)
raise AttributeError(msg)
def __iter__(self):
return iter(self._dict)
def __repr__(self):
keys = sorted(self._dict.keys())
s = ", ".join(["{0!s}={1!r}".format(k, self._dict[k]) for k in keys])
return "{0}({1})".format(self.__class__.__name__, s)
def _pformat(self):
keys = sorted(self._dict.keys())
f = lambda k: "{0!s}={1}".format(k, pformat(self._dict[k], indent=2))
s = ",\n ".join(map(f, keys))
return "{0}({1})".format(self.__class__.__name__, s)
def __contains__(self, key):
return key in self._dict or key in self.__dict__ or \
key in self.__class__.__dict__
def __eq__(self, other):
if hasattr(other, '_dict'):
return self._dict == other._dict
elif isinstance(other, Mapping):
return self._dict == other
else:
return NotImplemented
def __ne__(self, other):
if hasattr(other, '_dict'):
return self._dict != other._dict
elif isinstance(other, Mapping):
return self._dict != other
else:
return NotImplemented
def _update(self, other):
"""Updates the rc with values from another mapping. If this rc has
if a key is in self, other, and self._updaters, then the updaters
value is called to perform the update. This function should return
a copy to be safe and not update in-place.
"""
if hasattr(other, '_dict'):
other = other._dict
elif not hasattr(other, 'items'):
other = dict(other)
for k, v in other.items():
if v is NotSpecified:
pass
elif k in self._updaters and k in self:
v = self._updaters[k](getattr(self, k), v)
setattr(self, k, v)
def infer_format(filename, format):
"""Tries to figure out a file format."""
if isinstance(format, basestring):
pass
elif filename.endswith('.pkl.gz'):
format = 'pkl.gz'
elif filename.endswith('.pkl'):
format = 'pkl'
else:
raise ValueError("file format could not be determined.")
return format
def sortedbytype(iterable):
"""Sorts an iterable by types first, then value."""
items = {}
for x in iterable:
t = type(x).__name__
if t not in items:
items[t] = []
items[t].append(x)
rtn = []
for t in sorted(items.keys()):
rtn.extend(sorted(items[t]))
return rtn
nyansep = r'~\_/' * 17 + '~=[,,_,,]:3'
"""WAT?!"""
def flatten(iterable):
"""Generator which returns flattened version of nested sequences."""
for el in iterable:
if isinstance(el, basestring):
yield el
elif isinstance(el, Iterable):
for subel in flatten(el):
yield subel
else:
yield el
#
# Memoization
#
def ishashable(x):
"""Tests if a value is hashable."""
if isinstance(x, Hashable):
if isinstance(x, basestring):
return True
elif isinstance(x, Iterable):
return all(map(ishashable, x))
else:
return True
else:
return False
def memoize(obj):
"""Generic memoziation decorator based off of code from
http://wiki.python.org/moin/PythonDecoratorLibrary .
This is not suitabe for method caching.
"""
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = (args, tuple(sorted(kwargs.items())))
hashable = ishashable(key)
if hashable:
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
else:
return obj(*args, **kwargs)
return memoizer
class memoize_method(object):
"""Decorator suitable for memoizing methods, rather than functions
and classes. This is based off of code that may be found at
http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
This code was originally released under the MIT license.
"""
def __init__(self, meth):
self.meth = meth
def __get__(self, obj, objtype=None):
if obj is None:
return self.meth
p = functools.partial(self, obj)
p.__doc__ = self.meth.__doc__
p.__name__ = self.meth.__name__
return p
def __call__(self, *args, **kwargs):
obj = args[0]
cache = obj._cache = getattr(obj, '_cache', {})
key = (self.meth, args[1:], tuple(sorted(kwargs.items())))
hashable = ishashable(key)
if hashable:
if key not in cache:
cache[key] = self.meth(*args, **kwargs)
return cache[key]
else:
return self.meth(*args, **kwargs)
def check_cmd(args):
"""Runs a command in a subprocess and verifies that it executed properly.
"""
#if not isinstance(args, basestring):
# args = " ".join(args)
f = tempfile.NamedTemporaryFile()
#rtn = subprocess.call(args, shell=True, stdout=f, stderr=f)
rtn = subprocess.call(args, stdout=f, stderr=f)
f.seek(0)
out = f.read()
f.close()
return rtn, out
#
# Persisted Cache
#
class PersistentCache(MutableMapping):
"""A quick persistent cache."""
def __init__(self, cachefile='cache.pkl'):
"""Parameters
-------------
cachefile : str, optional
Path to description cachefile.
"""
self.cachefile = cachefile
if os.path.isfile(cachefile):
with io.open(cachefile, 'rb') as f:
self.cache = pickle.load(f)
else:
self.cache = {}
def __len__(self):
return len(self.cache)
def __contains__(self, key):
return key in self.cache
def __getitem__(self, key):
return self.cache[key] # return the results of the finder only
def __setitem__(self, key, value):
self.cache[key] = value
self.dump()
def __delitem__(self, key):
del self.cache[key]
self.dump()
def __iter__(self):
for key in self.cache.keys():
yield key
def dump(self):
"""Writes the cache out to the filesystem."""
if not os.path.exists(self.cachefile):
pardir = os.path.split(os.path.abspath(self.cachefile))[0]
if not os.path.exists(pardir):
os.makedirs(pardir)
with io.open(self.cachefile, 'wb') as f:
pickle.dump(self.cache, f, pickle.HIGHEST_PROTOCOL)
def __str__(self):
return pformat(self.cache)
| |
# -*- coding: utf-8 -*-
import numpy as np
class Kernel(object):
"""
Definition the kernels and its properties,
that includes sum and multiplication of kernels.
"""
def __init__(self, *args):
""" puts all Kernel arguments in an array pars """
self.pars = np.array(args)
def __call__(self, r):
raise NotImplementedError
#return self.k1(x1, x2, i, j) * self.k2(x1, x2, i, j)
def __add__(self, b):
return Sum(self, b)
def __radd__(self, b):
return self.__add__(b)
def __mul__(self, b):
return Product(self, b)
def __rmul__(self, b):
return self.__mul__(b)
def __repr__(self):
""" Representation of each Kernel instance """
return "{0}({1})".format(self.__class__.__name__,
", ".join(map(str, self.pars)))
class _operator(Kernel):
""" To allow operations between two kernels """
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
@property
def pars(self):
return np.append(self.k1.pars, self.k2.pars)
class Sum(_operator):
""" To allow the sum of kernels """
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
def __call__(self, r):
return self.k1(r) + self.k2(r)
class Product(_operator):
""" To allow the multiplycation of kernels """
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
def __call__(self, r):
return self.k1(r) * self.k2(r)
class ExpSquared(Kernel):
"""
Definition of the exponential squared kernel and its derivatives,
it is also know as radial basis function (RBF kernel).
Important
The derivative its in respect to log(parameter)
Parameters
ES_theta = amplitude of the kernel
ES_l = characteristic lenght scale to define how smooth the kernel is
"""
def __init__(self, ES_theta, ES_l):
"""
Because we are "overwriting" the function __init__
we use this weird super function
"""
super(ExpSquared, self).__init__(ES_theta, ES_l)
self.ES_theta = ES_theta
self.ES_l = ES_l
def __call__(self, r):
f1 = self.ES_theta**2
f2 = self.ES_l**2
f3 = (r)**2
return f1 * np.exp(-0.5* f3/f2)
def des_dtheta(self, r):
""" Log-derivative in order to theta """
f1=self.ES_theta**2
f2=self.ES_l**2
f3=(r)**2
return 2*f1*np.exp(-0.5*f3/f2)
def des_dl(self, r):
""" Log-derivative in order to l """
f1=self.ES_theta**2
f2=self.ES_l
f3=(r)**2
f4=self.ES_l**3
return f1*(f3/f4)*np.exp(-0.5*f3/f2**2) *f2
class ExpSineSquared(Kernel):
"""
Definition of the exponential sine squared kernel and its derivatives,
it is also know as periodic kernel.
Important
The derivative its in respect to log(parameter)
Parameters
ESS_theta = amplitude of the kernel
ESS_l = characteristic lenght scale to define how smooth the kernel is
ESS_P = periodic repetitions of the kernel
"""
def __init__(self, ESS_theta, ESS_l, ESS_P):
"""
Because we are "overwriting" the function __init__
we use this weird super function
"""
super(ExpSineSquared, self).__init__(ESS_theta, ESS_l, ESS_P)
self.ESS_theta = ESS_theta
self.ESS_l = ESS_l
self.ESS_P = ESS_P
def __call__(self, r):
f1 = self.ESS_theta**2
f2 = self.ESS_l**2
f3 = np.abs(r)
f4 = self.ESS_P
return f1*np.exp((-2/f2)*((np.sin(np.pi*f3/f4))**2))
def dess_dtheta(self,r):
""" Log-derivative in order to theta """
f1 = self.ESS_theta**2
f2 = self.ESS_l**2
f3 = np.pi/self.ESS_P
f4 = np.abs(r)
return 2*f1*np.exp(-(2.0/f2)*np.sin(f3*f4)**2)
def dess_dl(self,r):
""" Log-derivative in order to l """
f1=self.ESS_theta**2
f2=self.ESS_l**3
f3=np.pi/self.ESS_P
f4=np.abs(r)
f5=self.ESS_l**2
f6=self.ESS_l
return (4*f1/f2)*(np.sin(f3*f4)**2)*np.exp((-2./f5)*np.sin(f3*f4)**2) \
*f6
def dess_dp(self,r):
""" Log-derivative in order to P """
f1=self.ESS_theta**2
f2=self.ESS_l**2
f3=np.pi/self.ESS_P
f5=np.abs(r)
return f1*(4./f2)*f3*f5*np.cos(f3*f5)*np.sin(f3*f5) \
*np.exp((-2.0/f2)*np.sin(f3*f5)**2)
class QuasiPeriodic(Kernel):
"""
Definition of the product between the exponential sine squared kernel
and the exponential squared kernel, also known as quasi periodic kernel.
I define this kernel because it is widely used and makes things more
efficient to run instead of multiplying two kernels and make GEDI to run.
Important
The derivative its in respect to log(parameter)
Parameters
QP_theta = amplitude of the kernel
QP_l1 and QP_l2 = characteristic lenght scales to define how
smooth the kernel is
QP_P = periodic repetitions of the kernel
"""
def __init__(self, QP_theta, QP_l1, QP_l2, QP_P):
"""
Because we are "overwriting" the function __init__
we use this weird super function
"""
super(QuasiPeriodic, self).__init__(QP_theta, QP_l1, QP_l2, QP_P)
self.QP_theta = QP_theta
self.QP_l1 = QP_l1
self.QP_l2 = QP_l2
self.QP_P = QP_P
def __call__(self, r):
f1 = self.QP_theta**2
f2 = self.QP_l1**2
ff2= self.QP_l2**2
f3 = np.abs(r)
f4 = self.QP_P
return f1*np.exp((-2/f2)*((np.sin(np.pi*f3/f4))**2)-(0.5*f3*f3/ff2))
def dqp_dtheta(self,r):
""" Log-derivative in order to theta """
f1 = self.QP_theta**2
f2 = self.QP_l1**2
ff2= self.QP_l2**2
f3 = np.abs(r)
f4 = self.QP_P
return 2*f1*np.exp((-2/f2)*((np.sin(np.pi*f3/f4))**2)-(0.5*f3*f3/ff2))
def dqp_dl1(self,r):
""" Log-derivative in order to l1 """
f1 = self.QP_theta**2
f2 = self.QP_l1**2
ff2= self.QP_l2**2
f3 = np.abs(r)
f4 = self.QP_P
return 4*f1*((np.sin(np.pi*f3/f4))**2)/f2 \
*np.exp((-2/f2)*((np.sin(np.pi*f3/f4))**2)-(0.5*f3*f3/ff2))
def dqp_dl2(self,r):
""" Log-derivative in order to l2 """
f1 = self.QP_theta**2
f2 = self.QP_l1**2
ff2= self.QP_l2**2
f3 = np.abs(r)
f4 = self.QP_P
return f1*f3*f3/ff2 \
*np.exp((-2/f2)*((np.sin(np.pi*f3/f4))**2)-(0.5*f3*f3/ff2))
def dqp_dp(self,r):
""" Log-derivative in order to P """
f1 = self.QP_theta**2
f2 = self.QP_l1**2
ff2= self.QP_l2**2
f3 = np.abs(r)
f4 = self.QP_P
return 4*np.pi*f1*np.cos(np.pi*f3/f4)*np.sin(np.pi*f3/f4)/(f2*f4) \
*np.exp((-2/f2)*((np.sin(np.pi*f3/f4))**2)-(0.5*f3*f3/ff2))
class RatQuadratic(Kernel):
"""
Definition of the rational quadratic kernel and its derivatives.
Important
The derivative its in respect to log(parameter)
Parameters
RQ_theta = amplitude of the kernel
RQ_alpha = weight of large and small scale variations
RQ_l = characteristic lenght scale to define how smooth the kernel is
"""
def __init__(self, RQ_theta, RQ_alpha, RQ_l):
"""
Because we are "overwriting" the function __init__
we use this weird super function
"""
super(RatQuadratic, self).__init__(RQ_theta, RQ_alpha, RQ_l)
self.RQ_theta = RQ_theta
self.RQ_alpha = RQ_alpha
self.RQ_l = RQ_l
def __call__(self, r):
f1 = self.RQ_theta**2
f2 = self.RQ_l**2
f3 = (r)**2
f4 = self.RQ_alpha
return f1*(1+(0.5*f3/(f4*f2)))**(-f4)
def drq_dtheta(self,r):
""" Log-derivative in order to theta """
f1=self.RQ_theta**2
f2=(r)**2
f3=self.RQ_alpha
f4=self.RQ_l**2
return 2*f1*(1.0 + f2/(2.0*f3*f4))**(-f3)
def drq_dl(self,r):
""" Log-derivatives in order to l """
f1=self.RQ_theta**2
f2=(r)**2
f3=self.RQ_alpha
f4=self.RQ_l**2
return (f1*f2/f4)*(1.0 + f2/(2.0*f3*f4))**(-1.0-f3)
def drq_dalpha(self,r):
""" Log-derivative in order to alpha """
f1=self.RQ_theta**2
f2=(r)**2
f3=self.RQ_alpha
f4=self.RQ_l**2
func0=1.0 + f2/(2.0*f3*f4)
func1=f2/(2.0*f3*f4*func0)
return f1*(func1-np.log(func0))*func0**(-f3) *f3
class WhiteNoise(Kernel):
"""
Definition of the white noise kernel and its derivatives.
Important
The derivative its in respect to log(parameter)
Parameters
WN_theta = amplitude of the kernel
"""
def __init__(self,WN_theta):
"""
Because we are "overwriting" the function __init__
we use this weird super function
"""
super(WhiteNoise,self).__init__(WN_theta)
self.WN_theta=WN_theta
def __call__(self, r):
f1=self.WN_theta**2
f2=np.diag(np.diag(np.ones_like(r)))
return f1*f2
def dwn_dtheta(self,r):
""" Log-derivative in order to theta """
f1=self.WN_theta**2
f2=np.diag(np.diag(np.ones_like(r)))
return 2*f1*f2
class Exponential(Kernel):
"""
Definition of the exponential kernel and its derivatives, this kernel
arise when setting v=1/2 in the matern family of kernels
Important
The derivative its in respect to log(parameter)
Parameters
EXP_theta = amplitude of the kernel
EXP_l = characteristic lenght scale to define how smooth the kernel is
"""
def __init__(self,Exp_theta,Exp_l):
"""
Because we are "overwriting" the function __init__
we use this weird super function
"""
super(Exponential,self).__init__(Exp_theta,Exp_l)
self.Exp_theta=Exp_theta
self.Exp_l=Exp_l
def __call__(self, r):
f1=np.abs(r)
f2=self.Exp_l
f3=self.Exp_theta**2
return f3*np.exp(-f1/f2)
def dexp_dtheta(self,r):
""" Log-derivative in order to theta """
f1=np.abs(r)
f2=self.Exp_l
f3=self.Exp_theta**2
return 2*f3*np.exp(-f1/f2)
def dexp_dl(self,r):
""" Log-derivative in order to l """
f1=self.Exp_theta**2
f2=np.abs(r)
f3=self.Exp_l
return (f1*f2/f3)*np.exp(-f2/f3)
class Matern32(Kernel):
"""
Definition of the Matern 3/2 kernel and its derivatives, this kernel
arise when setting v=3/2 in the matern family of kernels
Important
The derivative its in respect to log(parameter)
Parameters
M32_theta = amplitude of the kernel
M32_l = characteristic lenght scale to define how smooth the kernel is
"""
def __init__(self,M32_theta,M32_l):
"""
Because we are "overwriting" the function __init__
we use this weird super function
"""
super(Matern32,self).__init__(M32_theta,M32_l)
self.M32_theta=M32_theta
self.M32_l=M32_l
def __call__(self, r):
f1=np.sqrt(3.0)*np.abs(r)
f2=self.M32_l
f3=self.M32_theta**2
return f3*(1.0 + f1/f2)*np.exp(-f1/f2)
def dm32_dtheta(self,r):
""" Log-derivative in order to theta """
f1=np.sqrt(3.0)*np.abs(r)
f2=self.M32_l
f3=self.M32_theta**2
return 2*f3*(1.0 + f1/f2)*np.exp(-f1/f2)
def dm32_dl(self,r):
""" Log-derivative in order to l """
f1=self.M32_theta**2
f2=np.sqrt(3.0)*np.abs(r)
f3=self.M32_l
f4=self.M32_l**2
return f3*f1*(f2/f4)*(1+f2/f3)*np.exp(-f2/f3) \
- f3*f1*(f2/f4)*np.exp(-f2/f3)
class Matern52(Kernel):
"""
Definition of the Matern 5/2 kernel and its derivatives, this kernel
arise when setting v=5/2 in the matern family of kernels
Important
The derivative its in respect to log(parameter)
Parameters
M52_theta = amplitude of the kernel
M52_l = characteristic lenght scale to define how smooth the kernel is
"""
def __init__(self,M52_theta,M52_l):
"""
Because we are "overwriting" the function __init__
we use this weird super function
"""
super(Matern52,self).__init__(M52_theta,M52_l)
self.M52_theta=M52_theta
self.M52_l=M52_l
def __call__(self, r):
f1=np.sqrt(5.0)*np.abs(r)
f2=(np.abs(r))**2
f3=self.M52_l
f4=self.M52_l**2
f5=self.M52_theta**2
return f5*(1.0 + f1/f3 + (5.0*f2)/(3.0*f4))*np.exp(-f1/f3)
def dm52_dtheta(self,r):
""" Log-derivative in order to theta """
f1=self.M52_theta**2
f2=self.M52_l
f3=3*(self.M52_l)**2
f4=np.sqrt(5)*np.abs(r)
f5=5*np.abs(r)**2
return 2*f1*(f5/f3 + f4/f2 +1)*np.exp(-f4/f2)
def dm52_dl(self,r):
""" Log-derivative in order to l """
f1=self.M52_theta**2
f2=self.M52_l
f3=self.M52_l**2
f4=np.abs(r)
f5=np.abs(r)**2
return 2*f1*((5*f2*f5 + np.sqrt(5**3)*f5*f4)/(3*f3*f2) \
*np.exp(-np.sqrt(5)*f4/f2))
##### END
| |
#!/usr/bin/env python
# -*- coding: ascii -*-
u"""
:Copyright:
Copyright 2011 - 2021
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
======================================
pyliblinear - a liblinear python API
======================================
pyliblinear - a liblinear python API.
"""
__author__ = u"Andr\xe9 Malo"
import os as _os
import posixpath as _posixpath
# pylint: disable = no-name-in-module, import-error, raise-missing-from
import setuptools as _setuptools
# pylint: disable = invalid-name
def _doc(filename):
""" Read docs file """
# pylint: disable = unspecified-encoding
args = {} if str is bytes else dict(encoding='utf-8')
try:
with open(_os.path.join('docs', filename), **args) as fp:
return fp.read()
except IOError:
return None
def _lines(multiline):
""" Split multiline string into single line % empty and comments """
return [line for line in (
line.strip() for line in multiline.splitlines(False)
) if line and not line.startswith('#')]
package = dict(
name='pyliblinear',
top='pyliblinear',
pathname='pyliblinear',
provides=_doc('PROVIDES'),
desc=_doc('SUMMARY').strip(),
longdesc=_doc('DESCRIPTION'),
author=__author__,
email='nd@perlig.de',
license="Apache License, Version 2.0",
# keywords=_lines(_doc('KEYWORDS')),
url='http://opensource.perlig.de/pyliblinear/',
classifiers=_lines(_doc('CLASSIFIERS') or ''),
install_requires=[],
)
class BuildFailed(Exception):
""" The build has failed """
from distutils.command import build_ext as _build_ext # pylint: disable = wrong-import-order
from distutils import errors as _errors # pylint: disable = wrong-import-order
class build_ext(_build_ext.build_ext): # pylint: disable = no-init
""" Improved extension building code """
def run(self):
""" Unify exception """
try:
_build_ext.build_ext.run(self)
except _errors.DistutilsPlatformError:
raise BuildFailed()
def build_extension(self, ext):
"""
Build C extension - with extended functionality
The following features are added here:
- The macros ``EXT_PACKAGE`` and ``EXT_MODULE`` will be filled (or
unset) depending on the extensions name, but only if they are not
already defined.
- "." is added to the include directories (for cext.h)
:Parameters:
`ext` : `Extension`
The extension to build
:Return: whatever ``distutils.command.build_ext.build_ext`` returns
:Rtype: any
"""
# handle name macros
macros = dict(ext.define_macros or ())
tup = ext.name.split('.')
if len(tup) == 1:
pkg, mod = None, tup[0]
else:
pkg, mod = '.'.join(tup[:-1]), tup[-1]
if pkg is not None and 'EXT_PACKAGE' not in macros:
ext.define_macros.append(('EXT_PACKAGE', pkg))
if 'EXT_MODULE' not in macros:
ext.define_macros.append(('EXT_MODULE', mod))
if pkg is None:
macros = dict(ext.undef_macros or ())
if 'EXT_PACKAGE' not in macros:
ext.undef_macros.append('EXT_PACKAGE')
try:
return _build_ext.build_ext.build_extension(self, ext)
except (_errors.CCompilerError, _errors.DistutilsExecError,
_errors.DistutilsPlatformError, IOError, ValueError) as e:
raise BuildFailed(str(e))
class Extension(_setuptools.Extension):
""" improved functionality """
def __init__(self, *args, **kwargs):
""" Initialization """
version = kwargs.pop('version')
self.depends = []
if 'depends' in kwargs:
self.depends = kwargs['depends']
_setuptools.Extension.__init__(self, *args, **kwargs)
self.define_macros.append(('EXT_VERSION', version))
# add include path
included = '.'
if included not in self.include_dirs:
self.include_dirs.append(included)
# add cext.h to the dependencies
cext_h = _posixpath.normpath(_posixpath.join(included, 'cext.h'))
for item in self.depends:
if _posixpath.normpath(item) == cext_h:
break
else:
self.depends.append(cext_h)
EXTENSIONS = lambda v: [
Extension('pyliblinear._liblinear', [
"pyliblinear/bufwriter.c",
"pyliblinear/compat.c",
"pyliblinear/iter.c",
"pyliblinear/main.c",
"pyliblinear/matrix.c",
"pyliblinear/model.c",
"pyliblinear/solver.c",
"pyliblinear/tokreader.c",
"pyliblinear/util.c",
"pyliblinear/vector.c",
"pyliblinear/liblinear/blas/ddot.c",
"pyliblinear/liblinear/blas/dscal.c",
"pyliblinear/liblinear/blas/dnrm2.c",
"pyliblinear/liblinear/blas/daxpy.c",
"pyliblinear/liblinear/linear.cpp",
"pyliblinear/liblinear/newton.cpp",
], depends=[
"pyliblinear/pyliblinear.h",
"pyliblinear/liblinear/linear.h",
"pyliblinear/liblinear/newton.h",
"pyliblinear/liblinear/blas/blasp.h",
"pyliblinear/liblinear/blas/blas.h",
"pyliblinear/liblinear/blas/blas-missing-decl.h",
], include_dirs=[
"pyliblinear",
"pyliblinear/liblinear",
"pyliblinear/liblinear/blas",
], version=v)
]
def setup():
""" Main """
# pylint: disable = too-many-branches
# pylint: disable = unspecified-encoding
args = {} if str is bytes else dict(encoding='utf-8')
version_file = '%s/%s' % (package['pathname'],
package.get('version_file', '__init__.py'))
with open(version_file, **args) as fp:
for line in fp: # pylint: disable = redefined-outer-name
if line.startswith('__version__'):
version = line.split('=', 1)[1].strip()
if version.startswith(("'", '"')):
version = version[1:-1].strip()
break
else:
raise RuntimeError("Version not found")
kwargs = {}
extensions = EXTENSIONS(version)
if extensions:
if 'build_ext' in globals():
kwargs.setdefault('cmdclass', {})['build_ext'] = build_ext
kwargs['ext_modules'] = extensions
cflags = None
if _os.environ.get('CFLAGS') is None:
from distutils import ccompiler as _ccompiler
compiler = _ccompiler.get_default_compiler()
try:
with open("debug.%s.cflags" % compiler) as fp:
cflags = ' '.join([
line for line in (line.strip() for line in fp)
if line and not line.startswith('#')
]).split() or None
except IOError:
pass
if cflags:
gcov = 'coverage' in ' '.join(cflags)
for ext in extensions:
# pylint: disable = attribute-defined-outside-init
ext.extra_compile_args = \
getattr(ext, 'extra_compile_args', []) + cflags
if gcov:
ext.libraries.append('gcov')
if package.get('packages', True):
kwargs['packages'] = [package['top']] + [
'%s.%s' % (package['top'], item)
for item in
_setuptools.find_packages(package['pathname'])
]
if package.get('py_modules'):
kwargs['py_modules'] = package['py_modules']
_setuptools.setup(
name=package['name'],
author=package['author'],
author_email=package['email'],
license=package['license'],
classifiers=package['classifiers'],
description=package['desc'],
long_description=package['longdesc'],
url=package['url'],
install_requires=package['install_requires'],
version=version,
zip_safe=False,
**kwargs
)
if __name__ == '__main__':
setup()
| |
"""Routines related to PyPI, indexes"""
from __future__ import absolute_import
import logging
import cgi
import sys
import os
import re
import mimetypes
import posixpath
import warnings
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.compat import ipaddress
from pip.utils import Inf, cached_property, normalize_name, splitext
from pip.utils.deprecation import RemovedInPip7Warning, RemovedInPip8Warning
from pip.utils.logging import indent_log
from pip.exceptions import (
DistributionNotFound, BestVersionAlreadyInstalled, InvalidWheelFilename,
UnsupportedWheel,
)
from pip.download import url_to_path, path_to_url
from pip.models import PyPI
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags, supported_tags_noarch, get_platform
from pip.req.req_requirement import InstallationCandidate
from pip._vendor import html5lib, requests, pkg_resources, six
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.requests.exceptions import SSLError
__all__ = ['PackageFinder']
# Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
SECURE_ORIGINS = [
# protocol, hostname, port
("https", "*", "*"),
("*", "localhost", "*"),
("*", "127.0.0.0/8", "*"),
("*", "::1/128", "*"),
("file", "*", None),
]
logger = logging.getLogger(__name__)
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links
"""
def __init__(self, find_links, index_urls,
use_wheel=True, allow_external=(), allow_unverified=(),
allow_all_external=False, allow_all_prereleases=False,
trusted_hosts=None, process_dependency_links=False,
session=None):
if session is None:
raise TypeError(
"PackageFinder() missing 1 required keyword argument: "
"'session'"
)
self.find_links = find_links
self.index_urls = index_urls
self.dependency_links = []
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.use_wheel = use_wheel
# Do we allow (safe and verifiable) externally hosted files?
self.allow_external = set(normalize_name(n) for n in allow_external)
# Which names are allowed to install insecure and unverifiable files?
self.allow_unverified = set(
normalize_name(n) for n in allow_unverified
)
# Anything that is allowed unverified is also allowed external
self.allow_external |= self.allow_unverified
# Do we allow all (safe and verifiable) externally hosted files?
self.allow_all_external = allow_all_external
# Domains that we won't emit warnings for when not using HTTPS
self.secure_origins = [
("*", host, "*")
for host in (trusted_hosts if trusted_hosts else [])
]
# Stores if we ignored any external links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_external = False
# Stores if we ignored any unsafe links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_unverified = False
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
# The Session we'll use to make requests
self.session = session
def add_dependency_links(self, links):
# # FIXME: this shouldn't be global list this, it should only
# # apply to requirements of the package that specifies the
# # dependency_links value
# # FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
warnings.warn(
"Dependency Links processing has been deprecated and will be "
"removed in a future release.",
RemovedInPip7Warning,
)
self.dependency_links.extend(links)
def _sort_locations(self, locations):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
is_find_link = url in self.find_links
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if is_find_link and os.path.isdir(path):
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url and os.path.isdir(path):
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
urls.append(url)
return files, urls
def _candidate_sort_key(self, candidate):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min()
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
if self.use_wheel:
support_num = len(supported_tags)
if candidate.location == INSTALLED_VERSION:
pri = 1
elif candidate.location.ext == wheel_ext:
# can raise InvalidWheelFilename
wheel = Wheel(candidate.location.filename)
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel for this platform. It "
"can't be sorted." % wheel.filename
)
pri = -(wheel.support_index_min())
else: # sdist
pri = -(support_num)
return (candidate.version, pri)
else:
return candidate.version
def _sort_versions(self, applicable_versions):
"""
Bring the latest version (and wheels) to the front, but maintain the
existing ordering as secondary. See the docstring for `_link_sort_key`
for details. This function is isolated for easier unit testing.
"""
return sorted(
applicable_versions,
key=self._candidate_sort_key,
reverse=True
)
def _validate_secure_origin(self, logger, location):
# Determine if this url used a secure transport mechanism
parsed = urllib_parse.urlparse(str(location))
origin = (parsed.scheme, parsed.hostname, parsed.port)
# Determine if our origin is a secure origin by looking through our
# hardcoded list of secure origins, as well as any additional ones
# configured on this PackageFinder instance.
for secure_origin in (SECURE_ORIGINS + self.secure_origins):
# Check to see if the protocol matches
if origin[0] != secure_origin[0] and secure_origin[0] != "*":
continue
try:
# We need to do this decode dance to ensure that we have a
# unicode object, even on Python 2.x.
addr = ipaddress.ip_address(
origin[1]
if (
isinstance(origin[1], six.text_type)
or origin[1] is None
)
else origin[1].decode("utf8")
)
network = ipaddress.ip_network(
secure_origin[1]
if isinstance(secure_origin[1], six.text_type)
else secure_origin[1].decode("utf8")
)
except ValueError:
# We don't have both a valid address or a valid network, so
# we'll check this origin against hostnames.
if origin[1] != secure_origin[1] and secure_origin[1] != "*":
continue
else:
# We have a valid address and network, so see if the address
# is contained within the network.
if addr not in network:
continue
# Check to see if the port patches
if (origin[2] != secure_origin[2]
and secure_origin[2] != "*"
and secure_origin[2] is not None):
continue
# If we've gotten here, then this origin matches the current
# secure origin and we should break out of the loop and continue
# on.
break
else:
# If the loop successfully completed without a break, that means
# that the origin we are testing is not a secure origin.
logger.warning(
"This repository located at %s is not a trusted host, if "
"this repository is available via HTTPS it is recommend to "
"use HTTPS instead, otherwise you may silence this warning "
"with '--trusted-host %s'.",
parsed.hostname,
parsed.hostname,
)
warnings.warn(
"Implicitly allowing locations which are not hosted at a "
"secure origin is deprecated and will require the use of "
"--trusted-host in the future.",
RemovedInPip7Warning,
)
def find_requirement(self, req, upgrade):
def mkurl_pypi_url(url):
loc = posixpath.join(url, url_name)
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's
# behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
url_name = req.url_name
# Only check main index if index URL is given:
main_index_url = None
if self.index_urls:
# Check that we have the url_name correctly spelled:
main_index_url = Link(
mkurl_pypi_url(self.index_urls[0]),
trusted=True,
)
page = self._get_page(main_index_url, req)
if page is None and PyPI.netloc not in str(main_index_url):
warnings.warn(
"Failed to find %r at %s. It is suggested to upgrade "
"your index to support normalized names as the name in "
"/simple/{name}." % (req.name, main_index_url),
RemovedInPip8Warning,
)
url_name = self._find_url_name(
Link(self.index_urls[0], trusted=True),
url_name, req
) or req.url_name
if url_name is not None:
locations = [
mkurl_pypi_url(url)
for url in self.index_urls] + self.find_links
else:
locations = list(self.find_links)
file_locations, url_locations = self._sort_locations(locations)
_flocations, _ulocations = self._sort_locations(self.dependency_links)
file_locations.extend(_flocations)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
locations = [Link(url, trusted=True) for url in url_locations]
# We explicitly do not trust links that came from dependency_links
locations.extend([Link(url) for url in _ulocations])
logger.debug('URLs to search for versions for %s:', req)
for location in locations:
logger.debug('* %s', location)
self._validate_secure_origin(logger, location)
found_versions = []
found_versions.extend(
self._package_versions(
# We trust every directly linked archive in find_links
[Link(url, '-f', trusted=True) for url in self.find_links],
req.name.lower()
)
)
page_versions = []
for page in self._get_pages(locations, req):
logger.debug('Analyzing links from page %s', page.url)
with indent_log():
page_versions.extend(
self._package_versions(page.links, req.name.lower())
)
dependency_versions = list(self._package_versions(
[Link(url) for url in self.dependency_links], req.name.lower()))
if dependency_versions:
logger.debug(
'dependency_links found: %s',
', '.join([
version.location.url for version in dependency_versions
])
)
file_versions = list(
self._package_versions(
[Link(url) for url in file_locations],
req.name.lower()
)
)
if (not found_versions
and not page_versions
and not dependency_versions
and not file_versions):
logger.critical(
'Could not find any downloads that satisfy the requirement %s',
req,
)
if self.need_warn_external:
logger.warning(
"Some externally hosted files were ignored as access to "
"them may be unreliable (use --allow-external %s to "
"allow).",
req.name,
)
if self.need_warn_unverified:
logger.warning(
"Some insecure and unverifiable files were ignored"
" (use --allow-unverified %s to allow).",
req.name,
)
raise DistributionNotFound(
'No distributions at all found for %s' % req
)
installed_version = []
if req.satisfied_by is not None:
installed_version = [
InstallationCandidate(
req.name,
req.satisfied_by.version,
INSTALLED_VERSION,
),
]
if file_versions:
file_versions.sort(reverse=True)
logger.debug(
'Local files found: %s',
', '.join([
url_to_path(candidate.location.url)
for candidate in file_versions
])
)
# This is an intentional priority ordering
all_versions = (
file_versions + found_versions + page_versions
+ dependency_versions
)
# Filter out anything which doesn't match our specifier
_versions = set(
req.specifier.filter(
[x.version for x in all_versions],
prereleases=(
self.allow_all_prereleases
if self.allow_all_prereleases else None
),
)
)
applicable_versions = [
x for x in all_versions if x.version in _versions
]
# Finally add our existing versions to the front of our versions.
applicable_versions = installed_version + applicable_versions
applicable_versions = self._sort_versions(applicable_versions)
existing_applicable = any(
i.location is INSTALLED_VERSION
for i in applicable_versions
)
if not upgrade and existing_applicable:
if applicable_versions[0].location is INSTALLED_VERSION:
logger.debug(
'Existing installed version (%s) is most up-to-date and '
'satisfies requirement',
req.satisfied_by.version,
)
else:
logger.debug(
'Existing installed version (%s) satisfies requirement '
'(most up-to-date version is %s)',
req.satisfied_by.version,
applicable_versions[0][2],
)
return None
if not applicable_versions:
logger.critical(
'Could not find a version that satisfies the requirement %s '
'(from versions: %s)',
req,
', '.join(
sorted(
set(str(i.version) for i in all_versions),
key=parse_version,
)
)
)
if self.need_warn_external:
logger.warning(
"Some externally hosted files were ignored as access to "
"them may be unreliable (use --allow-external to allow)."
)
if self.need_warn_unverified:
logger.warning(
"Some insecure and unverifiable files were ignored"
" (use --allow-unverified %s to allow).",
req.name,
)
raise DistributionNotFound(
'No distributions matching the version for %s' % req
)
if applicable_versions[0].location is INSTALLED_VERSION:
# We have an existing version, and its the best version
logger.debug(
'Installed version (%s) is most up-to-date (past versions: '
'%s)',
req.satisfied_by.version,
', '.join(str(i.version) for i in applicable_versions[1:])
or "none",
)
raise BestVersionAlreadyInstalled
if len(applicable_versions) > 1:
logger.debug(
'Using version %s (newest of versions: %s)',
applicable_versions[0].version,
', '.join(str(i.version) for i in applicable_versions)
)
selected_version = applicable_versions[0].location
if (selected_version.verifiable is not None
and not selected_version.verifiable):
logger.warning(
"%s is potentially insecure and unverifiable.", req.name,
)
if selected_version._deprecated_regex:
warnings.warn(
"%s discovered using a deprecated method of parsing, in the "
"future it will no longer be discovered." % req.name,
RemovedInPip7Warning,
)
return selected_version
def _find_url_name(self, index_url, url_name, req):
"""
Finds the true URL name of a package, when the given name isn't quite
correct.
This is usually used to implement case-insensitivity.
"""
if not index_url.url.endswith('/'):
# Vaguely part of the PyPI API... weird but true.
# FIXME: bad to modify this?
index_url.url += '/'
page = self._get_page(index_url, req)
if page is None:
logger.critical('Cannot fetch index base URL %s', index_url)
return
norm_name = normalize_name(req.url_name)
for link in page.links:
base = posixpath.basename(link.path.rstrip('/'))
if norm_name == normalize_name(base):
logger.debug(
'Real name of requirement %s is %s', url_name, base,
)
return base
return None
def _get_pages(self, locations, req):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors, and adding download/homepage links
"""
all_locations = list(locations)
seen = set()
while all_locations:
location = all_locations.pop(0)
if location in seen:
continue
seen.add(location)
page = self._get_page(location, req)
if page is None:
continue
yield page
for link in page.rel_links():
normalized = normalize_name(req.name).lower()
if (normalized not in self.allow_external
and not self.allow_all_external):
self.need_warn_external = True
logger.debug(
"Not searching %s for files because external "
"urls are disallowed.",
link,
)
continue
if (link.trusted is not None
and not link.trusted
and normalized not in self.allow_unverified):
logger.debug(
"Not searching %s for urls, it is an "
"untrusted link and cannot produce safe or "
"verifiable files.",
link,
)
self.need_warn_unverified = True
continue
all_locations.append(link)
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search_name):
for link in self._sort_links(links):
v = self._link_package_versions(link, search_name)
if v is not None:
yield v
def _known_extensions(self):
extensions = ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip')
if self.use_wheel:
return extensions + (wheel_ext,)
return extensions
def _link_package_versions(self, link, search_name):
"""
Return an iterable of triples (pkg_resources_version_key,
link, python_version) that can be extracted from the given
link.
Meant to be overridden by subclasses, not called by clients.
"""
platform = get_platform()
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
else:
egg_info, ext = link.splitext()
if not ext:
if link not in self.logged_links:
logger.debug('Skipping link %s; not a file', link)
self.logged_links.add(link)
return
if egg_info.endswith('.tar'):
# Special double-extension case:
egg_info = egg_info[:-4]
ext = '.tar' + ext
if ext not in self._known_extensions():
if link not in self.logged_links:
logger.debug(
'Skipping link %s; unknown archive format: %s',
link,
ext,
)
self.logged_links.add(link)
return
if "macosx10" in link.path and ext == '.zip':
if link not in self.logged_links:
logger.debug('Skipping link %s; macosx10 one', link)
self.logged_links.add(link)
return
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
logger.debug(
'Skipping %s because the wheel filename is invalid',
link
)
return
if (pkg_resources.safe_name(wheel.name).lower()
!= pkg_resources.safe_name(search_name).lower()):
logger.debug(
'Skipping link %s; wrong project name (not %s)',
link,
search_name,
)
return
if not wheel.supported():
logger.debug(
'Skipping %s because it is not compatible with this '
'Python',
link,
)
return
# This is a dirty hack to prevent installing Binary Wheels from
# PyPI unless it is a Windows or Mac Binary Wheel. This is
# paired with a change to PyPI disabling uploads for the
# same. Once we have a mechanism for enabling support for
# binary wheels on linux that deals with the inherent problems
# of binary distribution this can be removed.
comes_from = getattr(link, "comes_from", None)
if (
(
not platform.startswith('win')
and not platform.startswith('macosx')
and not platform == 'cli'
)
and comes_from is not None
and urllib_parse.urlparse(
comes_from.url
).netloc.endswith(PyPI.netloc)):
if not wheel.supported(tags=supported_tags_noarch):
logger.debug(
"Skipping %s because it is a pypi-hosted binary "
"Wheel on an unsupported platform",
link,
)
return
version = wheel.version
if not version:
version = self._egg_info_matches(egg_info, search_name, link)
if version is None:
logger.debug(
'Skipping link %s; wrong project name (not %s)',
link,
search_name,
)
return
if (link.internal is not None
and not link.internal
and not normalize_name(search_name).lower()
in self.allow_external
and not self.allow_all_external):
# We have a link that we are sure is external, so we should skip
# it unless we are allowing externals
logger.debug("Skipping %s because it is externally hosted.", link)
self.need_warn_external = True
return
if (link.verifiable is not None
and not link.verifiable
and not (normalize_name(search_name).lower()
in self.allow_unverified)):
# We have a link that we are sure we cannot verify its integrity,
# so we should skip it unless we are allowing unsafe installs
# for this requirement.
logger.debug(
"Skipping %s because it is an insecure and unverifiable file.",
link,
)
self.need_warn_unverified = True
return
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
logger.debug(
'Skipping %s because Python version is incorrect', link
)
return
logger.debug('Found link %s, version: %s', link, version)
return InstallationCandidate(search_name, version, link)
def _egg_info_matches(self, egg_info, search_name, link):
match = self._egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s', link)
return None
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
def _get_page(self, link, req):
return HTMLPage.get_page(link, req, session=self.session)
class HTMLPage(object):
"""Represents one page, along with its URL"""
# FIXME: these regexes are horrible hacks:
_homepage_re = re.compile(b'<th>\\s*home\\s*page', re.I)
_download_re = re.compile(b'<th>\\s*download\\s+url', re.I)
_href_re = re.compile(
b'href=(?:"([^"]*)"|\'([^\']*)\'|([^>\\s\\n]*))',
re.I | re.S
)
def __init__(self, content, url, headers=None, trusted=None):
# Determine if we have any encoding information in our headers
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params['charset']
self.content = content
self.parsed = html5lib.parse(
self.content,
encoding=encoding,
namespaceHTMLElements=False,
)
self.url = url
self.headers = headers
self.trusted = trusted
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, req, skip_archives=True, session=None):
if session is None:
raise TypeError(
"get_page() missing 1 required keyword argument: 'session'"
)
url = link.url
url = url.split('#', 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %s URL %s', scheme, link)
return None
try:
if skip_archives:
filename = link.filename
for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(
url, session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
logger.debug('Getting page %s', url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = \
urllib_parse.urlparse(url)
if (scheme == 'file'
and os.path.isdir(urllib_request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urllib_parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
resp = session.get(
url,
headers={
"Accept": "text/html",
"Cache-Control": "max-age=600",
},
)
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
inst = cls(
resp.content, resp.url, resp.headers,
trusted=link.trusted,
)
except requests.HTTPError as exc:
level = 2 if exc.response.status_code == 404 else 1
cls._handle_fail(req, link, exc, url, level=level)
except requests.ConnectionError as exc:
cls._handle_fail(
req, link, "connection error: %s" % exc, url,
)
except requests.Timeout:
cls._handle_fail(req, link, "timed out", url)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(
req, link, reason, url,
level=2,
meth=logger.info,
)
else:
return inst
@staticmethod
def _handle_fail(req, link, reason, url, level=1, meth=None):
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s", link, reason)
meth("Will skip URL %s when looking for download links for %s" %
(link.url, req))
@staticmethod
def _get_content_type(url, session):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in ('http', 'https'):
# FIXME: some warning or something?
# assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@cached_property
def api_version(self):
metas = [
x for x in self.parsed.findall(".//meta")
if x.get("name", "").lower() == "api-version"
]
if metas:
try:
return int(metas[0].get("value", None))
except (TypeError, ValueError):
pass
return None
@cached_property
def base_url(self):
bases = [
x for x in self.parsed.findall(".//base")
if x.get("href") is not None
]
if bases and bases[0].get("href"):
return bases[0].get("href")
else:
return self.url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
# Determine if this link is internal. If that distinction
# doesn't make sense in this context, then we don't make
# any distinction.
internal = None
if self.api_version and self.api_version >= 2:
# Only api_versions >= 2 have a distinction between
# external and internal links
internal = bool(
anchor.get("rel")
and "internal" in anchor.get("rel").split()
)
yield Link(url, self, internal=internal)
def rel_links(self):
for url in self.explicit_rel_links():
yield url
for url in self.scraped_rel_links():
yield url
def explicit_rel_links(self, rels=('homepage', 'download')):
"""Yields all links with the given relations"""
rels = set(rels)
for anchor in self.parsed.findall(".//a"):
if anchor.get("rel") and anchor.get("href"):
found_rels = set(anchor.get("rel").split())
# Determine the intersection between what rels were found and
# what rels were being looked for
if found_rels & rels:
href = anchor.get("href")
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
yield Link(url, self, trusted=False)
def scraped_rel_links(self):
# Can we get rid of this horrible horrible method?
for regex in (self._homepage_re, self._download_re):
match = regex.search(self.content)
if not match:
continue
href_match = self._href_re.search(self.content, pos=match.end())
if not href_match:
continue
url = (
href_match.group(1)
or href_match.group(2)
or href_match.group(3)
)
if not url:
continue
try:
url = url.decode("ascii")
except UnicodeDecodeError:
continue
url = self.clean_link(urllib_parse.urljoin(self.base_url, url))
yield Link(url, self, trusted=False, _deprecated_regex=True)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None, internal=None, trusted=None,
_deprecated_regex=False):
# url can be a UNC windows share
if url != Inf and url.startswith('\\\\'):
url = path_to_url(url)
self.url = url
self.comes_from = comes_from
self.internal = internal
self.trusted = trusted
self._deprecated_regex = _deprecated_regex
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url == other.url
def __ne__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url != other.url
def __lt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url < other.url
def __le__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url <= other.url
def __gt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url > other.url
def __ge__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urllib_parse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
name = urllib_parse.unquote(name)
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urllib_parse.urlsplit(self.url)[0]
@property
def netloc(self):
return urllib_parse.urlsplit(self.url)[1]
@property
def path(self):
return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2])
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url)
return urllib_parse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(
r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)'
)
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def verifiable(self):
"""
Returns True if this link can be verified after download, False if it
cannot, and None if we cannot determine.
"""
trusted = self.trusted or getattr(self.comes_from, "trusted", None)
if trusted is not None and trusted:
# This link came from a trusted source. It *may* be verifiable but
# first we need to see if this page is operating under the new
# API version.
try:
api_version = getattr(self.comes_from, "api_version", None)
api_version = int(api_version)
except (ValueError, TypeError):
api_version = None
if api_version is None or api_version <= 1:
# This link is either trusted, or it came from a trusted,
# however it is not operating under the API version 2 so
# we can't make any claims about if it's safe or not
return
if self.hash:
# This link came from a trusted source and it has a hash, so we
# can consider it safe.
return True
else:
# This link came from a trusted source, using the new API
# version, and it does not have a hash. It is NOT verifiable
return False
elif trusted is not None:
# This link came from an untrusted source and we cannot trust it
return False
# An object to represent the "link" for the installed version of a requirement.
# Using Inf as the url makes it sort higher.
INSTALLED_VERSION = Link(Inf)
| |
from __future__ import print_function, division
from sympy.core import Basic
from sympy.core.compatibility import iterable, as_int, range
from sympy.utilities.iterables import flatten
from collections import defaultdict
class Prufer(Basic):
"""
The Prufer correspondence is an algorithm that describes the
bijection between labeled trees and the Prufer code. A Prufer
code of a labeled tree is unique up to isomorphism and has
a length of n - 2.
Prufer sequences were first used by Heinz Prufer to give a
proof of Cayley's formula.
References
==========
.. [1] http://mathworld.wolfram.com/LabeledTree.html
"""
_prufer_repr = None
_tree_repr = None
_nodes = None
_rank = None
@property
def prufer_repr(self):
"""Returns Prufer sequence for the Prufer object.
This sequence is found by removing the highest numbered vertex,
recording the node it was attached to, and continuuing until only
two verices remain. The Prufer sequence is the list of recorded nodes.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]).prufer_repr
[3, 3, 3, 4]
>>> Prufer([1, 0, 0]).prufer_repr
[1, 0, 0]
See Also
========
to_prufer
"""
if self._prufer_repr is None:
self._prufer_repr = self.to_prufer(self._tree_repr[:], self.nodes)
return self._prufer_repr
@property
def tree_repr(self):
"""Returns the tree representation of the Prufer object.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]).tree_repr
[[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]
>>> Prufer([1, 0, 0]).tree_repr
[[1, 2], [0, 1], [0, 3], [0, 4]]
See Also
========
to_tree
"""
if self._tree_repr is None:
self._tree_repr = self.to_tree(self._prufer_repr[:])
return self._tree_repr
@property
def nodes(self):
"""Returns the number of nodes in the tree.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]).nodes
6
>>> Prufer([1, 0, 0]).nodes
5
"""
return self._nodes
@property
def rank(self):
"""Returns the rank of the Prufer sequence.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> p = Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]])
>>> p.rank
778
>>> p.next(1).rank
779
>>> p.prev().rank
777
See Also
========
prufer_rank, next, prev, size
"""
if self._rank is None:
self._rank = self.prufer_rank()
return self._rank
@property
def size(self):
"""Return the number of possible trees of this Prufer object.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer([0]*4).size == Prufer([6]*4).size == 1296
True
See Also
========
prufer_rank, rank, next, prev
"""
return self.prev(self.rank).prev().rank + 1
@staticmethod
def to_prufer(tree, n):
"""Return the Prufer sequence for a tree given as a list of edges where
``n`` is the number of nodes in the tree.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> a = Prufer([[0, 1], [0, 2], [0, 3]])
>>> a.prufer_repr
[0, 0]
>>> Prufer.to_prufer([[0, 1], [0, 2], [0, 3]], 4)
[0, 0]
See Also
========
prufer_repr: returns Prufer sequence of a Prufer object.
"""
d = defaultdict(int)
L = []
for edge in tree:
# Increment the value of the corresponding
# node in the degree list as we encounter an
# edge involving it.
d[edge[0]] += 1
d[edge[1]] += 1
for i in range(n - 2):
# find the smallest leaf
for x in range(n):
if d[x] == 1:
break
# find the node it was connected to
y = None
for edge in tree:
if x == edge[0]:
y = edge[1]
elif x == edge[1]:
y = edge[0]
if y is not None:
break
# record and update
L.append(y)
for j in (x, y):
d[j] -= 1
if not d[j]:
d.pop(j)
tree.remove(edge)
return L
@staticmethod
def to_tree(prufer):
"""Return the tree (as a list of edges) of the given Prufer sequence.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> a = Prufer([0, 2], 4)
>>> a.tree_repr
[[0, 1], [0, 2], [2, 3]]
>>> Prufer.to_tree([0, 2])
[[0, 1], [0, 2], [2, 3]]
References
==========
- http://hamberg.no/erlend/posts/2010-11-06-prufer-sequence-compact-tree-representation.html
See Also
========
tree_repr: returns tree representation of a Prufer object.
"""
tree = []
last = []
n = len(prufer) + 2
d = defaultdict(lambda: 1)
for p in prufer:
d[p] += 1
for i in prufer:
for j in range(n):
# find the smallest leaf (degree = 1)
if d[j] == 1:
break
# (i, j) is the new edge that we append to the tree
# and remove from the degree dictionary
d[i] -= 1
d[j] -= 1
tree.append(sorted([i, j]))
last = [i for i in range(n) if d[i] == 1] or [0, 1]
tree.append(last)
return tree
@staticmethod
def edges(*runs):
"""Return a list of edges and the number of nodes from the given runs
that connect nodes in an integer-labelled tree.
All node numbers will be shifted so that the minimum node is 0. It is
not a problem if edges are repeated in the runs; only unique edges are
returned. There is no assumption made about what the range of the node
labels should be, but all nodes from the smallest through the largest
must be present.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer.edges([1, 2, 3], [2, 4, 5]) # a T
([[0, 1], [1, 2], [1, 3], [3, 4]], 5)
Duplicate edges are removed:
>>> Prufer.edges([0, 1, 2, 3], [1, 4, 5], [1, 4, 6]) # a K
([[0, 1], [1, 2], [1, 4], [2, 3], [4, 5], [4, 6]], 7)
"""
e = set()
nmin = runs[0][0]
for r in runs:
for i in range(len(r) - 1):
a, b = r[i: i + 2]
if b < a:
a, b = b, a
e.add((a, b))
rv = []
got = set()
nmin = nmax = None
for ei in e:
for i in ei:
got.add(i)
nmin = min(ei[0], nmin) if nmin is not None else ei[0]
nmax = max(ei[1], nmax) if nmax is not None else ei[1]
rv.append(list(ei))
missing = set(range(nmin, nmax + 1)) - got
if missing:
missing = [i + nmin for i in missing]
if len(missing) == 1:
msg = 'Node %s is missing.' % missing.pop()
else:
msg = 'Nodes %s are missing.' % list(sorted(missing))
raise ValueError(msg)
if nmin != 0:
for i, ei in enumerate(rv):
rv[i] = [n - nmin for n in ei]
nmax -= nmin
return sorted(rv), nmax + 1
def prufer_rank(self):
"""Computes the rank of a Prufer sequence.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> a = Prufer([[0, 1], [0, 2], [0, 3]])
>>> a.prufer_rank()
0
See Also
========
rank, next, prev, size
"""
r = 0
p = 1
for i in range(self.nodes - 3, -1, -1):
r += p*self.prufer_repr[i]
p *= self.nodes
return r
@classmethod
def unrank(self, rank, n):
"""Finds the unranked Prufer sequence.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer.unrank(0, 4)
Prufer([0, 0])
"""
n, rank = as_int(n), as_int(rank)
L = defaultdict(int)
for i in range(n - 3, -1, -1):
L[i] = rank % n
rank = (rank - L[i])//n
return Prufer([L[i] for i in range(len(L))])
def __new__(cls, *args, **kw_args):
"""The constructor for the Prufer object.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
A Prufer object can be constructed from a list of edges:
>>> a = Prufer([[0, 1], [0, 2], [0, 3]])
>>> a.prufer_repr
[0, 0]
If the number of nodes is given, no checking of the nodes will
be performed; it will be assumed that nodes 0 through n - 1 are
present:
>>> Prufer([[0, 1], [0, 2], [0, 3]], 4)
Prufer([[0, 1], [0, 2], [0, 3]], 4)
A Prufer object can be constructed from a Prufer sequence:
>>> b = Prufer([1, 3])
>>> b.tree_repr
[[0, 1], [1, 3], [2, 3]]
"""
ret_obj = Basic.__new__(cls, *args, **kw_args)
args = [list(args[0])]
if args[0] and iterable(args[0][0]):
if not args[0][0]:
raise ValueError(
'Prufer expects at least one edge in the tree.')
if len(args) > 1:
nnodes = args[1]
else:
nodes = set(flatten(args[0]))
nnodes = max(nodes) + 1
if nnodes != len(nodes):
missing = set(range(nnodes)) - nodes
if len(missing) == 1:
msg = 'Node %s is missing.' % missing.pop()
else:
msg = 'Nodes %s are missing.' % list(sorted(missing))
raise ValueError(msg)
ret_obj._tree_repr = [list(i) for i in args[0]]
ret_obj._nodes = nnodes
else:
ret_obj._prufer_repr = args[0]
ret_obj._nodes = len(ret_obj._prufer_repr) + 2
return ret_obj
def next(self, delta=1):
"""Generates the Prufer sequence that is delta beyond the current one.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> a = Prufer([[0, 1], [0, 2], [0, 3]])
>>> b = a.next(1) # == a.next()
>>> b.tree_repr
[[0, 2], [0, 1], [1, 3]]
>>> b.rank
1
See Also
========
prufer_rank, rank, prev, size
"""
return Prufer.unrank(self.rank + delta, self.nodes)
def prev(self, delta=1):
"""Generates the Prufer sequence that is -delta before the current one.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> a = Prufer([[0, 1], [1, 2], [2, 3], [1, 4]])
>>> a.rank
36
>>> b = a.prev()
>>> b
Prufer([1, 2, 0])
>>> b.rank
35
See Also
========
prufer_rank, rank, next, size
"""
return Prufer.unrank(self.rank -delta, self.nodes)
| |
from maya import OpenMaya, OpenMayaUI, cmds
from . import utils
# import pyside, do qt version check for maya 2017 >
qtVersion = cmds.about(qtVersion=True)
if qtVersion.startswith("4") or type(qtVersion) not in [str, unicode]:
from PySide.QtGui import *
from PySide.QtCore import *
import shiboken
else:
from PySide2.QtGui import *
from PySide2.QtCore import *
from PySide2.QtWidgets import *
import shiboken2 as shiboken
# ----------------------------------------------------------------------------
TITLE = "Reorder Attr"
CHANNELBOX = "ChannelBoxForm"
# ----------------------------------------------------------------------------
def mayaWindow():
"""
Get Maya's main window.
:rtype: QMainWindow
"""
window = OpenMayaUI.MQtUtil.mainWindow()
window = shiboken.wrapInstance(long(window), QMainWindow)
return window
# ----------------------------------------------------------------------------
def mayaToQT(name):
"""
Maya -> QWidget
:param str name: Maya name of an ui object
:return: QWidget of parsed Maya name
:rtype: QWidget
"""
ptr = OpenMayaUI.MQtUtil.findControl(name)
if ptr is None:
ptr = OpenMayaUI.MQtUtil.findLayout(name)
if ptr is None:
ptr = OpenMayaUI.MQtUtil.findMenuItem(name)
if ptr is not None:
return shiboken.wrapInstance(long(ptr), QWidget)
def qtToMaya(widget):
"""
QWidget -> Maya name
:param QWidget widget: QWidget of a maya ui object
:return: Maya name of parsed QWidget
:rtype: str
"""
return OpenMayaUI.MQtUtil.fullName(
long(
shiboken.getCppPointer(widget)[0]
)
)
# ----------------------------------------------------------------------------
def getChannelBox():
"""
Get ChannelBox, convert the main channel box to QT.
:return: Maya's main channel box
:rtype: QWidget
"""
channelBox = mayaToQT(CHANNELBOX)
return channelBox
def getChannelBoxMenu():
"""
Get ChannelBox Menu, convert the main channel box to QT and return the
Edit QMenu which is part of the channel box' children.
:return: Maya's main channel box menu
:rtype: QMenu
"""
channelBox = getChannelBox()
# find widget
menus = channelBox.findChildren(QMenu)
# find Edit menu
for menu in menus:
if menu.menuAction().text() == "Edit":
return menu
# ----------------------------------------------------------------------------
class AttributeItem(QListWidgetItem):
def __init__(self, node, attr):
QListWidgetItem.__init__(self)
# variables
self._node = node
self._attr = attr
# save modes
self.modes = {
"short":cmds.attributeQuery(attr, node=node, shortName=True),
"nice":cmds.attributeQuery(attr, node=node, niceName=True),
"long":cmds.attributeQuery(attr, node=node, longName=True),
}
# ------------------------------------------------------------------------
@property
def node(self):
return self._node
@property
def attr(self):
return self._attr
# ------------------------------------------------------------------------
@property
def name(self):
return "{0}.{1}".format(self.node, self.attr)
# ------------------------------------------------------------------------
def rename(self, mode):
"""
Rename the attribute with the mode, the three possible modes and
correspoding names have been stored during the initializing of this
item.
"""
text = self.modes.get(mode)
self.setText(text)
# ------------------------------------------------------------------------
def delete(self):
"""
Delete attribute, first set the lock stated of the attribute to false
so the attribute can actually be deleted. These two functions are
wrapped into one undo chunk for later undoing.
"""
with utils.UndoChunkContext():
cmds.setAttr(self.name, lock=False)
cmds.deleteAttr(self.name)
# ----------------------------------------------------------------------------
class AttributeDisplayWidget(QWidget):
signal = Signal(str)
def __init__(self, parent=None, defaultName="Long"):
QWidget.__init__(self, parent)
# create channel name options
layout = QHBoxLayout(self)
label = QLabel(self)
label.setText("Channel Names:")
layout.addWidget(label)
self.group = QButtonGroup(self)
self.group.buttonReleased.connect(self.buttonReleased)
# create radio buttons
buttonNames = ["Long", "Short", "Nice"]
for i, name in enumerate(buttonNames):
rb = QRadioButton(self)
rb.setText(name)
layout.addWidget(rb)
self.group.addButton(rb)
# check default button
if defaultName == name:
rb.setChecked(True)
# ------------------------------------------------------------------------
def buttonReleased(self):
"""
When the radio button selection is changed this command gets called,
it will read the name of the selected button. This name will get lower
cased and parsed into the signal that gets emitted.
"""
button = self.group.checkedButton()
text = button.text().lower()
self.signal.emit(text)
class DropListWidget(QListWidget):
signal = Signal()
def __init__(self, parent=None):
QListWidget.__init__(self, parent)
# ------------------------------------------------------------------------
@property
def attributes(self):
"""
Get all attributes in the widget, this list is reversed as its needed
to start the deletion process of the attributes in reverse.
:return: List of attributes ( AttributeItem )
:rtype: list
"""
attrs = [self.item(i) for i in range(self.count())]
attrs.reverse()
return attrs
# ------------------------------------------------------------------------
def rename(self, mode):
"""
Rename all attributes by setting the mode, this mode can be three
different things as have been initialized in the attribute item.
:param str mode: Attribute name mode: "long", "short" or "nice".
"""
for attribute in self.attributes:
attribute.rename(mode)
# ------------------------------------------------------------------------
def updateUI(self, node, mode):
"""
Based on the input the widget gets updated with a list of the user
defined attributes of the parsed nodes. The attributes display name
depends on the mode.
:param str node: Node of which to query user defined attributes
:param str mode: Attribute name mode: "long", "short" or "nice".
"""
# clear
self.clear()
if not node:
return
# get user defined attributes
attrs = cmds.listAttr(node, ud=True) or []
# add attributes to list widget
for attr in attrs:
item = AttributeItem(node, attr)
item.rename(mode)
self.addItem(item)
def dropEvent(self, event):
QListWidget.dropEvent(self, event)
self.signal.emit()
# ----------------------------------------------------------------------------
class ReorderAttributesWidget(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
# variables
self._id = None
self.node = None
self.mode = "long"
# set ui
self.setParent(parent)
self.setWindowFlags(Qt.Window)
self.setWindowTitle(self.title)
self.setWindowIcon(QIcon(":/attributes.png"))
self.resize(300, 500)
# create layout
layout = QVBoxLayout(self)
# create attribute display widget
name = AttributeDisplayWidget(self)
layout.addWidget(name)
# create attribute widget
self.widget = DropListWidget(self)
self.widget.setDragDropMode(
QAbstractItemView.InternalMove
)
layout.addWidget(self.widget)
# connect signals
name.signal.connect(self.widget.rename)
self.widget.signal.connect(self.reorder)
# update
self.updateUI()
self.addCallback()
# ------------------------------------------------------------------------
@property
def title(self):
"""
The title of the window, this title differs based on the selection.
If no node is active, the default title will be returned. If a node is
active, the full path will be stripped into a base name and appened
to the default title.
:return: Window title
:rtype: str
"""
if not self.node:
return TITLE
name = self.node.split("|")[-1]
return "{0} - {1}".format(TITLE, name)
# ------------------------------------------------------------------------
def isReferenced(self):
"""
Check if the node selected is referenced, reordering of attributes is
not supported on referenced objects.
:return: Referenced state of self.node
:rtype: bool
"""
if self.node and cmds.referenceQuery(self.node, inr=True):
return True
return False
# ------------------------------------------------------------------------
def updateUI(self, *args):
"""
Update function gets ran every time the selection is changed. The
latest selected node will be queried. The list widget updated with the
attributes of that node. The UI is disabled of the node selected is
referenced.
"""
# get latest selected node
self.node = utils.getLastSelectedNode()
# update widget
self.widget.updateUI(self.node, self.mode)
# update title
self.setWindowTitle(self.title)
# disable ui if referenced
referenced = self.isReferenced()
self.widget.setEnabled(not referenced)
# ------------------------------------------------------------------------
def reorder(self):
"""
Reorder all of the attributes based on the new attribute list. All of
the attributes in the list are deleted, they are deleted in such an
order that when all of the deletion are undone. The attribute order
is changed to what the input of the user.
"""
with utils.UndoStateContext():
for attr in self.widget.attributes:
attr.delete()
for _ in range(self.widget.count()):
cmds.undo()
# ------------------------------------------------------------------------
def addCallback(self):
"""
Register a callback to run the update function every time the
selection list is modified.
"""
self._id = OpenMaya.MModelMessage.addCallback(
OpenMaya.MModelMessage.kActiveListModified,
self.updateUI
)
def removeCallback(self):
"""
Remove the callback that updates the ui every time the selection
list is modified.
"""
OpenMaya.MMessage.removeCallback(self._id)
# ------------------------------------------------------------------------
def closeEvent(self, event):
"""
Subclass the closeEvent function to first remove the callback,
this callback shouldn't be floating around and should be deleted
with the widget.
"""
self.removeCallback()
QWidget.closeEvent(self, event)
# ----------------------------------------------------------------------------
def show(*args):
reorder = ReorderAttributesWidget(mayaWindow())
reorder.show()
| |
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import os.path as path
import re
import sys
try:
import json
except ImportError:
import simplejson as json
sys.path.append(path.dirname(path.abspath(__file__)))
from devtools_paths import third_party_path
sys.path.append(path.join(third_party_path(), 'inspector_protocol'))
import pdl # pylint: disable=F0401
type_traits = {
"any": "*",
"string": "string",
"binary": "string",
"integer": "number",
"number": "number",
"boolean": "boolean",
"array": "!Array<*>",
"object": "!Object",
}
ref_types = {}
def full_qualified_type_id(domain_name, type_id):
if type_id.find(".") == -1:
return "%s.%s" % (domain_name, type_id)
return type_id
def fix_camel_case(name):
prefix = ""
if name[0] == "-":
prefix = "Negative"
name = name[1:]
refined = re.sub(r'-(\w)', lambda pat: pat.group(1).upper(), name)
refined = to_title_case(refined)
return prefix + re.sub(r'(?i)HTML|XML|WML|API', lambda pat: pat.group(0).upper(), refined)
def to_title_case(name):
return name[:1].upper() + name[1:]
def generate_enum(name, json):
enum_members = []
for member in json["enum"]:
enum_members.append(" %s: \"%s\"" % (fix_camel_case(member), member))
return "\n/** @enum {string} */\n%s = {\n%s\n};\n" % (name, (",\n".join(enum_members)))
def param_type(domain_name, param):
if "type" in param:
if param["type"] == "array":
items = param["items"]
return "!Array<%s>" % param_type(domain_name, items)
else:
return type_traits[param["type"]]
if "$ref" in param:
type_id = full_qualified_type_id(domain_name, param["$ref"])
if type_id in ref_types:
return ref_types[type_id]
else:
print "Type not found: " + type_id
return "!! Type not found: " + type_id
def param_name(param):
name = param["name"]
return name if name != "arguments" else "_arguments"
def load_schema(file, domains):
input_file = open(file, "r")
parsed_json = pdl.loads(input_file.read(), file)
input_file.close()
domains.extend(parsed_json["domains"])
def generate_protocol_externs(output_path, file1, file2):
domains = []
load_schema(file1, domains)
load_schema(file2, domains)
output_file = open(output_path, "w")
for domain in domains:
domain_name = domain["domain"]
if "types" in domain:
for type in domain["types"]:
type_id = full_qualified_type_id(domain_name, type["id"])
ref_types[type_id] = "Protocol.%s.%s" % (domain_name, type["id"])
for domain in domains:
domain_name = domain["domain"]
output_file.write("Protocol.%s = {};\n" % domain_name)
output_file.write("\n\n/**\n * @constructor\n*/\n")
output_file.write("Protocol.%sAgent = function(){};\n" % domain_name)
if "commands" in domain:
for command in domain["commands"]:
output_file.write("\n/**\n")
params = []
in_param_to_type = {}
out_param_to_type = {}
has_return_value = "returns" in command
if "parameters" in command:
for in_param in command["parameters"]:
in_param_name = param_name(in_param)
if "optional" in in_param:
in_param_to_type[in_param_name] = "(%s|undefined)" % param_type(domain_name, in_param)
params.append("opt_%s" % in_param_name)
output_file.write(" * @param {%s=} opt_%s\n" % (param_type(domain_name, in_param), in_param_name))
else:
in_param_to_type[in_param_name] = param_type(domain_name, in_param)
params.append(in_param_name)
output_file.write(" * @param {%s} %s\n" % (param_type(domain_name, in_param), in_param_name))
returns = []
returns.append("?Protocol.Error")
if ("error" in command):
returns.append("%s=" % param_type(domain_name, command["error"]))
if (has_return_value):
for out_param in command["returns"]:
out_param_type = param_type(domain_name, out_param)
out_param_to_type[out_param["name"]] = out_param_type
if ("optional" in out_param):
returns.append("%s=" % out_param_type)
else:
returns.append("%s" % out_param_type)
if has_return_value and len(command["returns"]) > 0:
out_param_type = param_type(domain_name, command["returns"][0])
if re.match(r"^[!?]", out_param_type[:1]):
out_param_type = out_param_type[1:]
out_param_type = "?%s" % out_param_type
else:
out_param_type = "undefined"
output_file.write(" * @return {!Promise<%s>}\n" % out_param_type)
output_file.write(" */\n")
output_file.write(
"Protocol.%sAgent.prototype.%s = function(%s) {};\n" % (domain_name, command["name"], ", ".join(params)))
request_object_properties = []
request_type = "Protocol.%sAgent.%sRequest" % (domain_name, to_title_case(command["name"]))
for param in in_param_to_type:
request_object_properties.append("%s: %s" % (param, in_param_to_type[param]))
if request_object_properties:
output_file.write("/** @typedef {!{%s}} */\n" % (", ".join(request_object_properties)))
else:
output_file.write("/** @typedef {Object|undefined} */\n")
output_file.write("%s;\n" % request_type)
response_object_properties = []
response_type = "Protocol.%sAgent.%sResponse" % (domain_name, to_title_case(command["name"]))
for param in out_param_to_type:
response_object_properties.append("%s: %s" % (param, out_param_to_type[param]))
if response_object_properties:
output_file.write("/** @typedef {!{%s}} */\n" % (", ".join(response_object_properties)))
else:
output_file.write("/** @typedef {Object|undefined} */\n")
output_file.write("%s;\n" % response_type)
output_file.write("/**\n")
output_file.write(" * @param {!%s} obj\n" % request_type)
output_file.write(" * @return {!Promise<!%s>}" % response_type)
output_file.write(" */\n")
output_file.write("Protocol.%sAgent.prototype.invoke_%s = function(obj) {};\n" % (domain_name, command["name"]))
if "types" in domain:
for type in domain["types"]:
if type["type"] == "object":
typedef_args = []
if "properties" in type:
for property in type["properties"]:
suffix = ""
if ("optional" in property):
suffix = "|undefined"
if "enum" in property:
enum_name = "Protocol.%s.%s%s" % (domain_name, type["id"], to_title_case(property["name"]))
output_file.write(generate_enum(enum_name, property))
typedef_args.append("%s:(%s%s)" % (property["name"], enum_name, suffix))
else:
typedef_args.append("%s:(%s%s)" % (property["name"], param_type(domain_name, property), suffix))
if (typedef_args):
output_file.write(
"\n/** @typedef {!{%s}} */\nProtocol.%s.%s;\n" % (", ".join(typedef_args), domain_name, type["id"]))
else:
output_file.write("\n/** @typedef {!Object} */\nProtocol.%s.%s;\n" % (domain_name, type["id"]))
elif type["type"] == "string" and "enum" in type:
output_file.write(generate_enum("Protocol.%s.%s" % (domain_name, type["id"]), type))
elif type["type"] == "array":
output_file.write("\n/** @typedef {!Array<!%s>} */\nProtocol.%s.%s;\n" % (param_type(
domain_name, type["items"]), domain_name, type["id"]))
else:
output_file.write(
"\n/** @typedef {%s} */\nProtocol.%s.%s;\n" % (type_traits[type["type"]], domain_name, type["id"]))
if domain_name in ["Runtime", "Debugger", "HeapProfiler"]:
output_file.write("/** @constructor */\n")
else:
output_file.write("/** @interface */\n")
output_file.write("Protocol.%sDispatcher = function() {};\n" % domain_name)
if "events" in domain:
for event in domain["events"]:
params = []
if ("parameters" in event):
output_file.write("/**\n")
for param in event["parameters"]:
if ("optional" in param):
params.append("opt_%s" % param["name"])
output_file.write(" * @param {%s=} opt_%s\n" % (param_type(domain_name, param), param["name"]))
else:
params.append(param["name"])
output_file.write(" * @param {%s} %s\n" % (param_type(domain_name, param), param["name"]))
output_file.write(" */\n")
output_file.write(
"Protocol.%sDispatcher.prototype.%s = function(%s) {};\n" % (domain_name, event["name"], ", ".join(params)))
for domain in domains:
domain_name = domain["domain"]
uppercase_length = 0
while uppercase_length < len(domain_name) and domain_name[uppercase_length].isupper():
uppercase_length += 1
output_file.write("/** @return {!Protocol.%sAgent}*/\n" % domain_name)
output_file.write("Protocol.TargetBase.prototype.%s = function(){};\n" %
(domain_name[:uppercase_length].lower() + domain_name[uppercase_length:] + "Agent"))
output_file.write("/**\n * @param {!Protocol.%sDispatcher} dispatcher\n */\n" % domain_name)
output_file.write("Protocol.TargetBase.prototype.register%sDispatcher = function(dispatcher) {}\n" % domain_name)
output_file.close()
if __name__ == "__main__":
import sys
import os.path
program_name = os.path.basename(__file__)
if len(sys.argv) < 5 or sys.argv[1] != "-o":
sys.stderr.write("Usage: %s -o OUTPUT_FILE INPUT_FILE_1 INPUT_FILE_2\n" % program_name)
exit(1)
output_path = sys.argv[2]
input_path_1 = sys.argv[3]
input_path_2 = sys.argv[4]
generate_protocol_externs(output_path, input_path_1, input_path_2)
| |
from __future__ import unicode_literals
import collections
import datetime
import decimal
import inspect
import math
import os
import re
import sys
import types
from importlib import import_module
from django.apps import apps
from django.db import migrations, models
from django.db.migrations.loader import MigrationLoader
from django.utils import datetime_safe, six
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.functional import Promise
from django.utils.timezone import utc
from django.utils.version import get_docs_version
COMPILED_REGEX_TYPE = type(re.compile(''))
class SettingsReference(str):
"""
Special subclass of string which actually references a current settings
value. It's treated as the value in memory, but serializes out to a
settings.NAME attribute reference.
"""
def __new__(self, value, setting_name):
return str.__new__(self, value)
def __init__(self, value, setting_name):
self.setting_name = setting_name
class OperationWriter(object):
indentation = 2
def __init__(self, operation):
self.operation = operation
self.buff = []
def serialize(self):
def _write(_arg_name, _arg_value):
if (_arg_name in self.operation.serialization_expand_args and
isinstance(_arg_value, (list, tuple, dict))):
if isinstance(_arg_value, dict):
self.feed('%s={' % _arg_name)
self.indent()
for key, value in _arg_value.items():
key_string, key_imports = MigrationWriter.serialize(key)
arg_string, arg_imports = MigrationWriter.serialize(value)
self.feed('%s: %s,' % (key_string, arg_string))
imports.update(key_imports)
imports.update(arg_imports)
self.unindent()
self.feed('},')
else:
self.feed('%s=[' % _arg_name)
self.indent()
for item in _arg_value:
arg_string, arg_imports = MigrationWriter.serialize(item)
self.feed('%s,' % arg_string)
imports.update(arg_imports)
self.unindent()
self.feed('],')
else:
arg_string, arg_imports = MigrationWriter.serialize(_arg_value)
self.feed('%s=%s,' % (_arg_name, arg_string))
imports.update(arg_imports)
imports = set()
name, args, kwargs = self.operation.deconstruct()
argspec = inspect.getargspec(self.operation.__init__)
# See if this operation is in django.db.migrations. If it is,
# We can just use the fact we already have that imported,
# otherwise, we need to add an import for the operation class.
if getattr(migrations, name, None) == self.operation.__class__:
self.feed('migrations.%s(' % name)
else:
imports.add('import %s' % (self.operation.__class__.__module__))
self.feed('%s.%s(' % (self.operation.__class__.__module__, name))
self.indent()
# Start at one because argspec includes "self"
for i, arg in enumerate(args, 1):
arg_value = arg
arg_name = argspec.args[i]
_write(arg_name, arg_value)
i = len(args)
# Only iterate over remaining arguments
for arg_name in argspec.args[i + 1:]:
if arg_name in kwargs:
arg_value = kwargs[arg_name]
_write(arg_name, arg_value)
self.unindent()
self.feed('),')
return self.render(), imports
def indent(self):
self.indentation += 1
def unindent(self):
self.indentation -= 1
def feed(self, line):
self.buff.append(' ' * (self.indentation * 4) + line)
def render(self):
return '\n'.join(self.buff)
class MigrationWriter(object):
"""
Takes a Migration instance and is able to produce the contents
of the migration file from it.
"""
def __init__(self, migration):
self.migration = migration
self.needs_manual_porting = False
def as_string(self):
"""
Returns a string of the file contents.
"""
items = {
"replaces_str": "",
}
imports = set()
# Deconstruct operations
operations = []
for operation in self.migration.operations:
operation_string, operation_imports = OperationWriter(operation).serialize()
imports.update(operation_imports)
operations.append(operation_string)
items["operations"] = "\n".join(operations) + "\n" if operations else ""
# Format dependencies and write out swappable dependencies right
dependencies = []
for dependency in self.migration.dependencies:
if dependency[0] == "__setting__":
dependencies.append(" migrations.swappable_dependency(settings.%s)," % dependency[1])
imports.add("from django.conf import settings")
else:
# No need to output bytestrings for dependencies
dependency = tuple(force_text(s) for s in dependency)
dependencies.append(" %s," % self.serialize(dependency)[0])
items["dependencies"] = "\n".join(dependencies) + "\n" if dependencies else ""
# Format imports nicely, swapping imports of functions from migration files
# for comments
migration_imports = set()
for line in list(imports):
if re.match("^import (.*)\.\d+[^\s]*$", line):
migration_imports.add(line.split("import")[1].strip())
imports.remove(line)
self.needs_manual_porting = True
imports.discard("from django.db import models")
items["imports"] = "\n".join(imports) + "\n" if imports else ""
if migration_imports:
items["imports"] += (
"\n\n# Functions from the following migrations need manual "
"copying.\n# Move them and any dependencies into this file, "
"then update the\n# RunPython operations to refer to the local "
"versions:\n# %s"
) % "\n# ".join(migration_imports)
# If there's a replaces, make a string for it
if self.migration.replaces:
items['replaces_str'] = "\n replaces = %s\n" % self.serialize(self.migration.replaces)[0]
return (MIGRATION_TEMPLATE % items).encode("utf8")
@staticmethod
def serialize_datetime(value):
"""
Returns a serialized version of a datetime object that is valid,
executable python code. It converts timezone-aware values to utc with
an 'executable' utc representation of tzinfo.
"""
if value.tzinfo is not None and value.tzinfo != utc:
value = value.astimezone(utc)
value_repr = repr(value).replace("<UTC>", "utc")
if isinstance(value, datetime_safe.datetime):
value_repr = "datetime.%s" % value_repr
return value_repr
@property
def filename(self):
return "%s.py" % self.migration.name
@property
def path(self):
migrations_package_name = MigrationLoader.migrations_module(self.migration.app_label)
# See if we can import the migrations module directly
try:
migrations_module = import_module(migrations_package_name)
# Python 3 fails when the migrations directory does not have a
# __init__.py file
if not hasattr(migrations_module, '__file__'):
raise ImportError
basedir = os.path.dirname(upath(migrations_module.__file__))
except ImportError:
app_config = apps.get_app_config(self.migration.app_label)
migrations_package_basename = migrations_package_name.split(".")[-1]
# Alright, see if it's a direct submodule of the app
if '%s.%s' % (app_config.name, migrations_package_basename) == migrations_package_name:
basedir = os.path.join(app_config.path, migrations_package_basename)
else:
# In case of using MIGRATION_MODULES setting and the custom
# package doesn't exist, create one.
package_dirs = migrations_package_name.split(".")
create_path = os.path.join(upath(sys.path[0]), *package_dirs)
if not os.path.isdir(create_path):
os.makedirs(create_path)
for i in range(1, len(package_dirs) + 1):
init_dir = os.path.join(upath(sys.path[0]), *package_dirs[:i])
init_path = os.path.join(init_dir, "__init__.py")
if not os.path.isfile(init_path):
open(init_path, "w").close()
return os.path.join(create_path, self.filename)
return os.path.join(basedir, self.filename)
@classmethod
def serialize_deconstructed(cls, path, args, kwargs):
name, imports = cls._serialize_path(path)
strings = []
for arg in args:
arg_string, arg_imports = cls.serialize(arg)
strings.append(arg_string)
imports.update(arg_imports)
for kw, arg in kwargs.items():
arg_string, arg_imports = cls.serialize(arg)
imports.update(arg_imports)
strings.append("%s=%s" % (kw, arg_string))
return "%s(%s)" % (name, ", ".join(strings)), imports
@classmethod
def _serialize_path(cls, path):
module, name = path.rsplit(".", 1)
if module == "django.db.models":
imports = {"from django.db import models"}
name = "models.%s" % name
else:
imports = {"import %s" % module}
name = path
return name, imports
@classmethod
def serialize(cls, value):
"""
Serializes the value to a string that's parsable by Python, along
with any needed imports to make that string work.
More advanced than repr() as it can encode things
like datetime.datetime.now.
"""
# FIXME: Ideally Promise would be reconstructible, but for now we
# use force_text on them and defer to the normal string serialization
# process.
if isinstance(value, Promise):
value = force_text(value)
# Sequences
if isinstance(value, (list, set, tuple)):
imports = set()
strings = []
for item in value:
item_string, item_imports = cls.serialize(item)
imports.update(item_imports)
strings.append(item_string)
if isinstance(value, set):
# Don't use the literal "{%s}" as it doesn't support empty set
format = "set([%s])"
elif isinstance(value, tuple):
# When len(value)==0, the empty tuple should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
format = "(%s)" if len(value) != 1 else "(%s,)"
else:
format = "[%s]"
return format % (", ".join(strings)), imports
# Dictionaries
elif isinstance(value, dict):
imports = set()
strings = []
for k, v in value.items():
k_string, k_imports = cls.serialize(k)
v_string, v_imports = cls.serialize(v)
imports.update(k_imports)
imports.update(v_imports)
strings.append((k_string, v_string))
return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports
# Datetimes
elif isinstance(value, datetime.datetime):
value_repr = cls.serialize_datetime(value)
imports = ["import datetime"]
if value.tzinfo is not None:
imports.append("from django.utils.timezone import utc")
return value_repr, set(imports)
# Dates
elif isinstance(value, datetime.date):
value_repr = repr(value)
if isinstance(value, datetime_safe.date):
value_repr = "datetime.%s" % value_repr
return value_repr, {"import datetime"}
# Times
elif isinstance(value, datetime.time):
value_repr = repr(value)
if isinstance(value, datetime_safe.time):
value_repr = "datetime.%s" % value_repr
return value_repr, {"import datetime"}
# Settings references
elif isinstance(value, SettingsReference):
return "settings.%s" % value.setting_name, {"from django.conf import settings"}
# Simple types
elif isinstance(value, float):
if math.isnan(value) or math.isinf(value):
return 'float("{}")'.format(value), set()
return repr(value), set()
elif isinstance(value, six.integer_types + (bool, type(None))):
return repr(value), set()
elif isinstance(value, six.binary_type):
value_repr = repr(value)
if six.PY2:
# Prepend the `b` prefix since we're importing unicode_literals
value_repr = 'b' + value_repr
return value_repr, set()
elif isinstance(value, six.text_type):
value_repr = repr(value)
if six.PY2:
# Strip the `u` prefix since we're importing unicode_literals
value_repr = value_repr[1:]
return value_repr, set()
# Decimal
elif isinstance(value, decimal.Decimal):
return repr(value), {"from decimal import Decimal"}
# Django fields
elif isinstance(value, models.Field):
attr_name, path, args, kwargs = value.deconstruct()
return cls.serialize_deconstructed(path, args, kwargs)
# Classes
elif isinstance(value, type):
special_cases = [
(models.Model, "models.Model", []),
]
for case, string, imports in special_cases:
if case is value:
return string, set(imports)
if hasattr(value, "__module__"):
module = value.__module__
if module == six.moves.builtins.__name__:
return value.__name__, set()
else:
return "%s.%s" % (module, value.__name__), {"import %s" % module}
elif isinstance(value, models.manager.BaseManager):
as_manager, manager_path, qs_path, args, kwargs = value.deconstruct()
if as_manager:
name, imports = cls._serialize_path(qs_path)
return "%s.as_manager()" % name, imports
else:
return cls.serialize_deconstructed(manager_path, args, kwargs)
# Anything that knows how to deconstruct itself.
elif hasattr(value, 'deconstruct'):
return cls.serialize_deconstructed(*value.deconstruct())
# Functions
elif isinstance(value, (types.FunctionType, types.BuiltinFunctionType)):
# @classmethod?
if getattr(value, "__self__", None) and isinstance(value.__self__, type):
klass = value.__self__
module = klass.__module__
return "%s.%s.%s" % (module, klass.__name__, value.__name__), {"import %s" % module}
# Further error checking
if value.__name__ == '<lambda>':
raise ValueError("Cannot serialize function: lambda")
if value.__module__ is None:
raise ValueError("Cannot serialize function %r: No module" % value)
# Python 3 is a lot easier, and only uses this branch if it's not local.
if getattr(value, "__qualname__", None) and getattr(value, "__module__", None):
if "<" not in value.__qualname__: # Qualname can include <locals>
return "%s.%s" % (value.__module__, value.__qualname__), {"import %s" % value.__module__}
# Python 2/fallback version
module_name = value.__module__
# Make sure it's actually there and not an unbound method
module = import_module(module_name)
if not hasattr(module, value.__name__):
raise ValueError(
"Could not find function %s in %s.\n"
"Please note that due to Python 2 limitations, you cannot "
"serialize unbound method functions (e.g. a method "
"declared and used in the same class body). Please move "
"the function into the main module body to use migrations.\n"
"For more information, see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values"
% (value.__name__, module_name, get_docs_version()))
return "%s.%s" % (module_name, value.__name__), {"import %s" % module_name}
# Other iterables
elif isinstance(value, collections.Iterable):
imports = set()
strings = []
for item in value:
item_string, item_imports = cls.serialize(item)
imports.update(item_imports)
strings.append(item_string)
# When len(strings)==0, the empty iterable should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
format = "(%s)" if len(strings) != 1 else "(%s,)"
return format % (", ".join(strings)), imports
# Compiled regex
elif isinstance(value, COMPILED_REGEX_TYPE):
imports = {"import re"}
regex_pattern, pattern_imports = cls.serialize(value.pattern)
regex_flags, flag_imports = cls.serialize(value.flags)
imports.update(pattern_imports)
imports.update(flag_imports)
args = [regex_pattern]
if value.flags:
args.append(regex_flags)
return "re.compile(%s)" % ', '.join(args), imports
# Uh oh.
else:
raise ValueError(
"Cannot serialize: %r\nThere are some values Django cannot serialize into "
"migration files.\nFor more, see https://docs.djangoproject.com/en/%s/"
"topics/migrations/#migration-serializing" % (value, get_docs_version())
)
MIGRATION_TEMPLATE = """\
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
%(imports)s
class Migration(migrations.Migration):
%(replaces_str)s
dependencies = [
%(dependencies)s\
]
operations = [
%(operations)s\
]
"""
| |
# Copyright 2014 Open vStorage NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import logging
import tempfile
from ConfigParser import RawConfigParser
from ovs.extensions.db.arakoon.ArakoonManagement import ArakoonManagementEx
from ovs.extensions.generic.remote import Remote
from ovs.extensions.generic.sshclient import SSHClient
from ovs.extensions.generic.system import System
from ovs.extensions.services.service import ServiceManager
from StringIO import StringIO
from ovs.log.logHandler import LogHandler
logger = LogHandler.get('extensions', name='arakoon_installer')
logger.logger.propagate = False
class ArakoonNodeConfig(object):
"""
cluster node config parameters
"""
def __init__(self, name, ip, client_port, messaging_port, log_dir, home, tlog_dir):
"""
Initializes a new Config entry for a single Node
"""
self.name = name
self.ip = ip
self.client_port = client_port
self.messaging_port = messaging_port
self.tlog_compression = 'snappy'
self.log_level = 'info'
self.log_dir = log_dir
self.home = home
self.tlog_dir = tlog_dir
self.fsync = True
def __hash__(self):
"""
Defines a hashing equivalent for a given ArakoonNodeConfig
"""
return hash(self.name)
def __eq__(self, other):
"""
Checks whether two objects are the same.
"""
if not isinstance(other, ArakoonNodeConfig):
return False
return self.__hash__() == other.__hash__()
def __ne__(self, other):
"""
Checks whether two objects are not the same.
"""
if not isinstance(other, ArakoonNodeConfig):
return True
return not self.__eq__(other)
class ArakoonClusterConfig(object):
"""
contains cluster config parameters
"""
ARAKOON_CONFIG_DIR = '/opt/OpenvStorage/config/arakoon/{0}'
ARAKOON_CONFIG_FILE = '/opt/OpenvStorage/config/arakoon/{0}/{0}.cfg'
def __init__(self, cluster_id, plugins=None):
"""
Initializes an empty Cluster Config
"""
self.cluster_id = cluster_id
self._dir = ArakoonClusterConfig.ARAKOON_CONFIG_DIR.format(self.cluster_id)
self._filename = ArakoonClusterConfig.ARAKOON_CONFIG_FILE.format(self.cluster_id)
self.nodes = []
self._plugins = []
if isinstance(plugins, list):
self._plugins = plugins
elif isinstance(plugins, basestring):
self._plugins.append(plugins)
def load_config(self, client):
"""
Reads a configuration from reality
"""
contents = client.file_read(self._filename)
parser = RawConfigParser()
parser.readfp(StringIO(contents))
if parser.has_option('global', 'plugins'):
self._plugins = [plugin.strip() for plugin in parser.get('global', 'plugins').split(',')]
for node in parser.get('global', 'cluster').split(','):
node = node.strip()
self.nodes.append(ArakoonNodeConfig(name=node,
ip=parser.get(node, 'ip'),
client_port=parser.get(node, 'client_port'),
messaging_port=parser.get(node, 'messaging_port'),
log_dir=parser.get(node, 'log_dir'),
home=parser.get(node, 'home'),
tlog_dir=parser.get(node, 'tlog_dir')))
def export(self):
"""
Exports the current configuration to a python dict
"""
data = {'global': {'cluster_id': self.cluster_id,
'cluster': ','.join(sorted(node.name for node in self.nodes)),
'plugins': ','.join(sorted(self._plugins))}}
for node in self.nodes:
data[node.name] = {'name': node.name,
'ip': node.ip,
'client_port': node.client_port,
'messaging_port': node.messaging_port,
'tlog_compression': node.tlog_compression,
'log_level': node.log_level,
'log_dir': node.log_dir,
'home': node.home,
'tlog_dir': node.tlog_dir,
'fsync': 'true' if node.fsync else 'false'}
return data
def write_config(self, client):
"""
Writes the configuration down to in the format expected by Arakoon
"""
(temp_handle, temp_filename) = tempfile.mkstemp()
contents = RawConfigParser()
data = self.export()
for section in data:
contents.add_section(section)
for item in data[section]:
contents.set(section, item, data[section][item])
with open(temp_filename, 'wb') as config_file:
contents.write(config_file)
client.dir_create(self._dir)
client.file_upload(self._filename, temp_filename)
os.remove(temp_filename)
def delete_config(self, client):
"""
Deletes a configuration file
"""
client.dir_delete(self._dir)
class ArakoonInstaller(object):
"""
class to dynamically install/(re)configure arakoon cluster
"""
ARAKOON_LOG_DIR = '/var/log/arakoon/{0}'
ARAKOON_HOME_DIR = '{0}/arakoon/{1}'
ARAKOON_TLOG_DIR = '{0}/tlogs/{1}'
ARAKOON_CONFIG_DIR = '/opt/OpenvStorage/config/arakoon'
ARAKOON_CONFIG_FILE = '/opt/OpenvStorage/config/arakoon/{0}/{0}.cfg'
def __init__(self):
"""
ArakoonInstaller should not be instantiated
"""
raise RuntimeError('ArakoonInstaller is a complete static helper class')
@staticmethod
def create_cluster(cluster_name, ip, exclude_ports, plugins=None):
"""
Creates a cluster
"""
logger.debug('Creating cluster {0} on {1}'.format(cluster_name, ip))
client = SSHClient(ip)
base_dir = client.config_read('ovs.arakoon.location').rstrip('/')
port_range = client.config_read('ovs.ports.arakoon')
ports = System.get_free_ports(port_range, exclude_ports, 2, client)
node_name = System.get_my_machine_id(client)
config = ArakoonClusterConfig(cluster_name, plugins)
if not [node.name for node in config.nodes if node.name == node_name]:
config.nodes.append(ArakoonNodeConfig(name=node_name,
ip=ip,
client_port=ports[0],
messaging_port=ports[1],
log_dir=ArakoonInstaller.ARAKOON_LOG_DIR.format(cluster_name),
home=ArakoonInstaller.ARAKOON_HOME_DIR.format(base_dir, cluster_name),
tlog_dir=ArakoonInstaller.ARAKOON_TLOG_DIR.format(base_dir, cluster_name)))
ArakoonInstaller._deploy(config)
logger.debug('Creating cluster {0} on {1} completed'.format(cluster_name, ip))
return {'client_port': ports[0],
'messaging_port': ports[1]}
@staticmethod
def delete_cluster(cluster_name, ip):
"""
Deletes a complete cluster
"""
logger.debug('Deleting cluster {0} on {1}'.format(cluster_name, ip))
config = ArakoonClusterConfig(cluster_name)
config.load_config(SSHClient(ip))
# Cleans up a complete cluster (remove services, directories and configuration files)
for node in config.nodes:
ArakoonInstaller._destroy_node(config, node)
logger.debug('Deleting cluster {0} on {1} completed'.format(cluster_name, ip))
@staticmethod
def extend_cluster(master_ip, new_ip, cluster_name, exclude_ports):
"""
Extends a cluster to a given new node
"""
logger.debug('Extending cluster {0} from {1} to {2}'.format(cluster_name, master_ip, new_ip))
client = SSHClient(master_ip)
config = ArakoonClusterConfig(cluster_name)
config.load_config(client)
client = SSHClient(new_ip)
base_dir = client.config_read('ovs.arakoon.location').rstrip('/')
port_range = client.config_read('ovs.ports.arakoon')
ports = System.get_free_ports(port_range, exclude_ports, 2, client)
node_name = System.get_my_machine_id(client)
if not [node.name for node in config.nodes if node.name == node_name]:
config.nodes.append(ArakoonNodeConfig(name=node_name,
ip=new_ip,
client_port=ports[0],
messaging_port=ports[1],
log_dir=ArakoonInstaller.ARAKOON_LOG_DIR.format(cluster_name),
home=ArakoonInstaller.ARAKOON_HOME_DIR.format(base_dir, cluster_name),
tlog_dir=ArakoonInstaller.ARAKOON_TLOG_DIR.format(base_dir, cluster_name)))
ArakoonInstaller._deploy(config)
logger.debug('Extending cluster {0} from {1} to {2} completed'.format(cluster_name, master_ip, new_ip))
return {'client_port': ports[0],
'messaging_port': ports[1]}
@staticmethod
def shrink_cluster(remaining_node_ip, deleted_node_ip, cluster_name):
"""
Removes a node from a cluster, the old node will become a slave
"""
logger.debug('Shrinking cluster {0} from {1}'.format(cluster_name, deleted_node_ip))
client = SSHClient(remaining_node_ip)
config = ArakoonClusterConfig(cluster_name)
config.load_config(client)
for node in config.nodes[:]:
if node.ip == deleted_node_ip:
config.nodes.remove(node)
ArakoonInstaller._destroy_node(config, node)
ArakoonInstaller._deploy(config)
ArakoonInstaller.deploy_to_slave(remaining_node_ip, deleted_node_ip, cluster_name)
logger.debug('Shrinking cluster {0} from {1} completed'.format(cluster_name, deleted_node_ip))
@staticmethod
def _destroy_node(config, node):
"""
Cleans up a single node (remove services, directories and configuration files)
"""
logger.debug('Destroy node {0} in cluster {1}'.format(node.ip, config.cluster_id))
# Removes services for a cluster on a given node
ovs_client = SSHClient(node.ip)
root_client = SSHClient(node.ip, username='root')
ArakoonInstaller.stop(config.cluster_id, client=root_client)
ArakoonInstaller.remove(config.cluster_id, client=root_client)
# Cleans all directories on a given node
root_client.dir_delete([node.log_dir, node.tlog_dir, node.home])
# Removes a configuration file from a node
config.delete_config(ovs_client)
logger.debug('Destroy node {0} in cluster {1} completed'.format(node.ip, config.cluster_id))
@staticmethod
def _deploy(config):
"""
Deploys a complete cluster: Distributing the configuration files, creating directories and services
"""
logger.debug('Deploying cluster {0}'.format(config.cluster_id))
for node in config.nodes:
logger.debug(' Deploying cluster {0} on {1}'.format(config.cluster_id, node.ip))
ovs_client = SSHClient(node.ip)
root_client = SSHClient(node.ip, username='root')
# Distributes a configuration file to all its nodes
config.write_config(ovs_client)
# Create dirs as root because mountpoint /mnt/cache1 is typically owned by root
abs_paths = [node.log_dir, node.tlog_dir, node.home]
root_client.dir_create(abs_paths)
root_client.dir_chmod(abs_paths, 0755, recursive=True)
root_client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)
# Creates services for/on all nodes in the config
base_name = 'ovs-arakoon'
target_name = 'ovs-arakoon-{0}'.format(config.cluster_id)
ServiceManager.prepare_template(base_name, target_name, ovs_client)
ServiceManager.add_service(target_name, root_client, params={'CLUSTER': config.cluster_id})
logger.debug(' Deploying cluster {0} on {1} completed'.format(config.cluster_id, node.ip))
@staticmethod
def start(cluster_name, client):
"""
Starts an arakoon cluster
"""
if ServiceManager.get_service_status('arakoon-{0}'.format(cluster_name), client=client) is False:
ServiceManager.start_service('arakoon-{0}'.format(cluster_name), client=client)
@staticmethod
def stop(cluster_name, client):
"""
Stops an arakoon service
"""
if ServiceManager.has_service('arakoon-{0}'.format(cluster_name), client=client) is True and \
ServiceManager.get_service_status('arakoon-{0}'.format(cluster_name), client=client) is True:
ServiceManager.stop_service('arakoon-{0}'.format(cluster_name), client=client)
@staticmethod
def remove(cluster_name, client):
"""
Removes an arakoon service
"""
if ServiceManager.has_service('arakoon-{0}'.format(cluster_name), client=client) is True:
ServiceManager.remove_service('arakoon-{0}'.format(cluster_name), client=client)
@staticmethod
def deploy_to_slave(master_ip, slave_ip, cluster_name):
"""
Deploys the configuration file to a slave
"""
client = SSHClient(master_ip)
config = ArakoonClusterConfig(cluster_name)
config.load_config(client)
client = SSHClient(slave_ip)
config.write_config(client)
@staticmethod
def remove_from_slave(master_ip, slave_ip, cluster_name):
"""
Removes everything related to a given cluster from the slave
"""
client = SSHClient(master_ip)
config = ArakoonClusterConfig(cluster_name)
config.load_config(client)
client = SSHClient(slave_ip)
config.delete_config(client)
@staticmethod
def wait_for_cluster(cluster_name):
"""
Waits for an Arakoon cluster to be available (by sending a nop)
"""
logger.debug('Waiting for cluster {0}'.format(cluster_name))
from ovs.extensions.db.arakoon.arakoon.ArakoonExceptions import ArakoonSockReadNoBytes
last_exception = None
tries = 3
while tries > 0:
try:
cluster_object = ArakoonManagementEx().getCluster(str(cluster_name))
client = cluster_object.getClient()
client.nop()
logger.debug('Waiting for cluster {0}: available'.format(cluster_name))
return True
except ArakoonSockReadNoBytes as exception:
last_exception = exception
tries -= 1
time.sleep(1)
raise last_exception
@staticmethod
def restart_cluster(cluster_name, master_ip):
"""
Execute a restart sequence (Executed after arakoon and/or alba package upgrade)
"""
logger.debug('Restart sequence for {0} via {1}'.format(cluster_name, master_ip))
client = SSHClient(master_ip)
config = ArakoonClusterConfig(cluster_name)
config.load_config(client)
all_clients = [SSHClient(node.ip) for node in config.nodes if node.ip != master_ip] + [client]
if len(config.nodes) <= 2:
logger.debug(' Insufficient nodes in cluster {0}. Full restart'.format(cluster_name))
for function in [ArakoonInstaller.stop, ArakoonInstaller.start]:
for client in all_clients:
function(cluster_name, client)
ArakoonInstaller.wait_for_cluster(cluster_name)
else:
logger.debug(' Sufficient nodes in cluster {0}. Sequential restart'.format(cluster_name))
for client in all_clients:
ArakoonInstaller.stop(cluster_name, client)
ArakoonInstaller.start(cluster_name, client)
logger.debug(' Restarted node {0} on cluster {1}'.format(client.ip, cluster_name))
ArakoonInstaller.wait_for_cluster(cluster_name)
logger.debug('Restart sequence for {0} via {1} completed'.format(cluster_name, master_ip))
@staticmethod
def restart_cluster_add(cluster_name, current_ips, new_ip):
"""
Execute a (re)start sequence after adding a new node to a cluster.
"""
logger.debug('Restart sequence (add) for {0}'.format(cluster_name))
logger.debug('Current ips: {0}'.format(', '.join(current_ips)))
logger.debug('New ip: {0}'.format(new_ip))
logger.debug('Catching up new node {0} for cluster {1}'.format(new_ip, cluster_name))
with Remote(new_ip, [ArakoonManagementEx], 'ovs') as remote:
cluster = remote.ArakoonManagementEx().getCluster(cluster_name)
cluster.catchup_node()
logger.debug('Catching up new node {0} for cluster {1} completed'.format(new_ip, cluster_name))
threshold = 2 if new_ip in current_ips else 1
for ip in current_ips:
if ip == new_ip:
continue
client = SSHClient(ip, username='root')
ArakoonInstaller.stop(cluster_name, client=client)
ArakoonInstaller.start(cluster_name, client=client)
logger.debug(' Restarted node {0} for cluster {1}'.format(client.ip, cluster_name))
if len(current_ips) > threshold: # A two node cluster needs all nodes running
ArakoonInstaller.wait_for_cluster(cluster_name)
new_client = SSHClient(new_ip, username='root')
ArakoonInstaller.start(cluster_name, client=new_client)
ArakoonInstaller.wait_for_cluster(cluster_name)
logger.debug('Started node {0} for cluster {1}'.format(new_ip, cluster_name))
@staticmethod
def restart_cluster_remove(cluster_name, remaining_ips):
"""
Execute a restart sequence after removing a node from a cluster
"""
logger.debug('Restart sequence (remove) for {0}'.format(cluster_name))
logger.debug('Remaining ips: {0}'.format(', '.join(remaining_ips)))
for ip in remaining_ips:
client = SSHClient(ip, username='root')
ArakoonInstaller.stop(cluster_name, client=client)
ArakoonInstaller.start(cluster_name, client=client)
logger.debug(' Restarted node {0} for cluster {1}'.format(client.ip, cluster_name))
if len(remaining_ips) > 2: # A two node cluster needs all nodes running
ArakoonInstaller.wait_for_cluster(cluster_name)
logger.debug('Restart sequence (remove) for {0} completed'.format(cluster_name))
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for partitioned_variables.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
class PartitionerCreatorsTest(test.TestCase):
def testFixedSizePartitioner(self):
with self.cached_session():
partitioner = partitioned_variables.fixed_size_partitioner(5, axis=0)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
"v0", dtype=dtypes.float32, shape=(10, 10))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), 5)
self.assertAllEqual(v0_part, (5, 1))
def testFixedSizePartitionerInt64(self):
with self.cached_session():
partitioner = partitioned_variables.fixed_size_partitioner(4, axis=0)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable("v0", dtype=dtypes.int64, shape=[20])
v0_list = v0._get_variable_list()
self.assertEqual(len(v0_list), 4)
def testResourceFixedSizePartitioner(self):
with self.cached_session():
partitioner = partitioned_variables.fixed_size_partitioner(5, axis=0)
with variable_scope.variable_scope(
"root", partitioner=partitioner, use_resource=True):
v0 = variable_scope.get_variable(
"v0", dtype=dtypes.float32, shape=(10, 10))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), 5)
self.assertAllEqual(v0_part, (5, 1))
def _testVariableAxisSizePartitioner(self,
name,
axis,
max_shard_bytes,
expected_axis_shards,
expected_partitions,
max_shards=None):
partitioner = partitioned_variables.variable_axis_size_partitioner(
axis=axis, max_shard_bytes=max_shard_bytes, max_shards=max_shards)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
name, dtype=dtypes.float32, shape=(4, 8, 16, 32))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), expected_axis_shards)
self.assertAllEqual(v0_part, expected_partitions)
def testVariableAxisSizePartitioner(self):
with self.cached_session():
# Create a partitioned variable of shape (4, 8, 16, 32) type float32
# Bytes per slice along the given axes:
# 8 * 16 * 32 * sizeof(float32) = 16384 / slice on axis 0
# 4 * 16 * 32 * sizeof(float32) = 8192 / slice on axis 1
# 4 * 8 * 32 * sizeof(float32) = 4096 / slice on axis 2
# 4 * 8 * 16 * sizeof(float32) = 2048 / slice on axis 3
# Now partition it in different ways...
# No need to slice: bytes_per_slice * dim0 = 65536 < max_shard_bytes
self._testVariableAxisSizePartitioner(
"v0",
axis=0,
max_shard_bytes=131072,
expected_axis_shards=1,
expected_partitions=(1, 1, 1, 1))
# Slice exactly once: bytes_per_slice * dim1 = 65536 = max_shard_bytes
self._testVariableAxisSizePartitioner(
"v1",
axis=1,
max_shard_bytes=65536,
expected_axis_shards=1,
expected_partitions=(1, 1, 1, 1))
# Slice into 2 parts:
# bytes_per_slice = 4096
# slices_per_shard = 32768 / 4096 = 8
# axis_shards = 16 / 8 = 2
self._testVariableAxisSizePartitioner(
"v2",
axis=2,
max_shard_bytes=32768,
expected_axis_shards=2,
expected_partitions=(1, 1, 2, 1))
# This partitioner makes sure we maximize the number of shards along
# axis 3. Slice it into 32 parts:
# bytes_per_slice = 2048
# slices_per_shard = 2048 / 2048 = 1
# axis_shards = 32 / 1 = 32
self._testVariableAxisSizePartitioner(
"v3a",
axis=3,
max_shard_bytes=2048,
expected_axis_shards=32,
expected_partitions=(1, 1, 1, 32))
# This partitioner makes sure we do not go past the bound of allowable
# number of shards along axis 3.
# Slice into 32 parts:
# bytes_per_slice = 2048
# slices_per_shard = max(1, 1024 / 2048) = 1
# axis_shards = 32 / 1 = 32
# Slice into max of 32 parts because: max_shard_bytes < bytes_per_slice
self._testVariableAxisSizePartitioner(
"v3b",
axis=3,
max_shard_bytes=1024,
expected_axis_shards=32,
expected_partitions=(1, 1, 1, 32))
# Specify max_shards so that it won't affect sharding.
self._testVariableAxisSizePartitioner(
"v3c",
axis=3,
max_shard_bytes=1024,
expected_axis_shards=32,
expected_partitions=(1, 1, 1, 32),
max_shards=33)
# Specify max_shards so that it will affect sharding.
self._testVariableAxisSizePartitioner(
"v3d",
axis=3,
max_shard_bytes=1024,
expected_axis_shards=2,
expected_partitions=(1, 1, 1, 2),
max_shards=2)
# Use the partitioner with strings
partitioner_axis3_str = partitioned_variables.variable_axis_size_partitioner( # pylint: disable=line-too-long
axis=3,
max_shard_bytes=32768,
bytes_per_string_element=8)
with variable_scope.variable_scope(
"root", partitioner=partitioner_axis3_str):
v3str = variable_scope.get_variable(
"v3str",
initializer=np.array([""] * 4 * 8 * 16 * 32).reshape(4, 8, 16, 32),
dtype=dtypes.string,
shape=(4, 8, 16, 32))
v3str_list = v3str._get_variable_list()
v3str_part = v3str._get_partitions()
# Now the estimated bytes_per_slice = 4*8*16*bytes_per_string_element
# which is equal to 4096. Setting a max_shard_bytes of 32768
# and we should get a split of 4.
# Slice into 4 parts:
# bytes_per_slice = 4096
# slices_per_shard = 32768 / 4096 = 8
# axis_shards = 32 / 8 = 4
self.assertEqual(len(v3str_list), 4)
self.assertAllEqual(v3str_part, (1, 1, 1, 4))
def _testMinMaxVariablePartitioner(self, max_partitions, axis, min_slice_size,
var_name, var_shape, expected_axis_shards,
expected_partitions):
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=max_partitions, axis=axis, min_slice_size=min_slice_size)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
var_name, dtype=dtypes.float32, shape=var_shape)
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), expected_axis_shards)
self.assertAllEqual(v0_part, expected_partitions)
def testMinMaxVariablePartitioner(self):
with self.cached_session():
# Partitioning a variable of shape=[2048] with a minimum of 2K per slice.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=2 << 10,
var_name="v0_0",
var_shape=[2048],
expected_axis_shards=4,
expected_partitions=[4])
# Partitioning a variable of shape=[2048, 1024] with a minimum of 256K per
# slice.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=256 << 10,
var_name="v0",
var_shape=[2048, 1024],
expected_axis_shards=32,
expected_partitions=[32, 1])
# max_partitions restricts partitioning of the variable.
self._testMinMaxVariablePartitioner(
max_partitions=16,
axis=0,
min_slice_size=256 << 10,
var_name="v1_max",
var_shape=[2048, 1024],
expected_axis_shards=16,
expected_partitions=[16, 1])
self._testMinMaxVariablePartitioner(
max_partitions=1,
axis=0,
min_slice_size=256 << 10,
var_name="v2_max",
var_shape=[2048, 1024],
expected_axis_shards=1,
expected_partitions=[1, 1])
# Reducing/Increasing min_slice_size proportionately increases/reduces the
# number of partitions.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=128 << 10,
var_name="v3_slice",
var_shape=[2048, 1024],
expected_axis_shards=64,
expected_partitions=[64, 1])
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=512 << 10,
var_name="v4_slice",
var_shape=[2048, 1024],
expected_axis_shards=16,
expected_partitions=[16, 1])
# Partitioning the variable along a different axis.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=1,
min_slice_size=256 << 10,
var_name="v5_axis",
var_shape=[64, 1024, 1, 3],
expected_axis_shards=3,
expected_partitions=[1, 3, 1, 1])
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=3,
min_slice_size=256 << 10,
var_name="v6_axis",
var_shape=[64, 1024, 1, 3],
expected_axis_shards=3,
expected_partitions=[1, 1, 1, 3])
# Can not partition the variable more than what its shape allows.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=256 << 10,
var_name="v7_shape",
var_shape=[16, 128, 1024],
expected_axis_shards=16,
expected_partitions=[16, 1, 1])
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=256 << 10,
var_name="v8_shape",
var_shape=[4, 512, 1024],
expected_axis_shards=4,
expected_partitions=[4, 1, 1])
def _IotaInitializer(shape, dtype=dtypes.float32, partition_info=None):
assert dtype == dtypes.float32
if len(shape) == 1:
return range(shape[0])
else:
val = _IotaInitializer(shape[1:], dtype)
return [[(10**i) * v for v in val] for i in range(shape[0])]
class PartitionedVariablesTestCase(test.TestCase):
def _TestSaveSpec(self, slices, expected_specs):
self.assertEqual(len(expected_specs), len(slices))
for i in xrange(len(expected_specs)):
self.assertEqual(expected_specs[i], slices[i]._save_slice_info.spec)
def testVecConstantInit(self):
with self.cached_session():
rnd_par = constant_op.constant([1, 2, 3, 4])
vs = partitioned_variables.create_partitioned_variables([4], [4], rnd_par)
self.evaluate(variables.global_variables_initializer())
val = array_ops.concat(vs, 0)
rnd = self.evaluate(rnd_par)
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.int32] * 4, [v.dtype.base_dtype for v in vs])
self._TestSaveSpec(vs, ["4 0,1", "4 1,1", "4 2,1", "4 3,1"])
def testConstantInit(self):
with self.cached_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
vs = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
self.evaluate(variables.global_variables_initializer())
val = array_ops.concat(vs, 1)
rnd = self.evaluate(rnd_par)
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.int32] * 2, [v.dtype.base_dtype for v in vs])
self._TestSaveSpec(vs, ["2 4 0,2:0,2", "2 4 0,2:2,2"])
def _testNameHelper(self, use_resource=False):
with self.cached_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
with variable_scope.variable_scope("hi", use_resource=use_resource):
vs1 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
vs2 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
self.evaluate(variables.global_variables_initializer())
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
self.assertEqual("hi/PartitionedVariable", var1_name)
self.assertEqual("hi/PartitionedVariable_1", var2_name)
self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
# Test same variable.
with self.cached_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
with variable_scope.variable_scope(
"hola", use_resource=use_resource) as vs:
vs1 = partitioned_variables.create_partitioned_variables(
[2, 4], [1, 2], rnd_par, dtype=dtypes.int32)
with variable_scope.variable_scope(
vs, reuse=True, use_resource=use_resource):
vs2 = partitioned_variables.create_partitioned_variables(
[2, 4], [1, 2], rnd_par, dtype=dtypes.int32)
self.evaluate(variables.global_variables_initializer())
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
self.assertEqual("hola/PartitionedVariable", var1_name)
self.assertEqual("hola/PartitionedVariable", var2_name)
self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
# Test name_scope
with self.cached_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
with ops.name_scope("ola"):
vs1 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
vs2 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
self.evaluate(variables.global_variables_initializer())
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
# Currently, the name scope 'ola' has no effect.
self.assertEqual("PartitionedVariable", var1_name)
self.assertEqual("PartitionedVariable_1", var2_name)
self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
@test_util.run_deprecated_v1
def testName(self):
self._testNameHelper(use_resource=False)
def testResourceName(self):
self._testNameHelper(use_resource=True)
def testRandomInitValue(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([200, 40]))
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, 10], rnd.initialized_value())
self.evaluate(variables.global_variables_initializer())
val = array_ops.concat(vs, 1)
rnd = self.evaluate(rnd)
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.float32] * 10, [v.dtype.base_dtype for v in vs])
self._TestSaveSpec(vs, [
"200 40 0,200:0,4", "200 40 0,200:4,4", "200 40 0,200:8,4",
"200 40 0,200:12,4", "200 40 0,200:16,4", "200 40 0,200:20,4",
"200 40 0,200:24,4", "200 40 0,200:28,4", "200 40 0,200:32,4",
"200 40 0,200:36,4"
])
def testRandomInitUnevenPartitions(self):
with self.cached_session():
rnd = variables.Variable(
random_ops.random_uniform([20, 43], dtype=dtypes.float64))
var_lists = [
partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, i], rnd.initialized_value())
for i in xrange(1, 10)
]
self.evaluate(variables.global_variables_initializer())
rnd_val = self.evaluate(rnd)
# Only check the slice save specs for the first 5 tf.
save_specs = [
# One slice
["20 43 0,20:0,43"],
# Two slices
["20 43 0,20:0,22", "20 43 0,20:22,21"],
# Three slices
["20 43 0,20:0,15", "20 43 0,20:15,14", "20 43 0,20:29,14"],
# Four slices
[
"20 43 0,20:0,11", "20 43 0,20:11,11", "20 43 0,20:22,11",
"20 43 0,20:33,10"
],
# Five slices
[
"20 43 0,20:0,9", "20 43 0,20:9,9", "20 43 0,20:18,9",
"20 43 0,20:27,8", "20 43 0,20:35,8"
]
]
for i, vs in enumerate(var_lists):
var_val = array_ops.concat(vs, 1)
self.assertAllClose(rnd_val, var_val)
self.assertEqual([dtypes.float64] * len(vs),
[v.dtype.base_dtype for v in vs])
if i < len(save_specs):
self._TestSaveSpec(vs, save_specs[i])
def testDegenerate(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43]))
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, 1], rnd.initialized_value())
self.evaluate(variables.global_variables_initializer())
val = array_ops.concat(vs, 0)
rnd = self.evaluate(rnd)
self.assertAllClose(rnd, val)
self._TestSaveSpec(vs, ["10 43 0,10:0,43"])
def testSliceSizeOne(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43]))
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [10, 1], rnd.initialized_value())
self.evaluate(variables.global_variables_initializer())
val = array_ops.concat(vs, 0)
rnd = self.evaluate(rnd)
self.assertAllClose(rnd, val)
self._TestSaveSpec(vs, [
"10 43 0,1:0,43", "10 43 1,1:0,43", "10 43 2,1:0,43",
"10 43 3,1:0,43", "10 43 4,1:0,43", "10 43 5,1:0,43",
"10 43 6,1:0,43", "10 43 7,1:0,43", "10 43 8,1:0,43", "10 43 9,1:0,43"
])
def testIotaInitializer(self):
self.assertAllClose([0., 1., 2., 3.], _IotaInitializer([4]))
self.assertAllClose([[0., 1.], [0., 10.], [0., 100.], [0., 1000.]],
_IotaInitializer([4, 2]))
with self.cached_session():
vs = partitioned_variables.create_partitioned_variables([13, 5], [3, 1],
_IotaInitializer)
self.evaluate(variables.global_variables_initializer())
slice0 = _IotaInitializer([5, 5])
slice1 = _IotaInitializer([4, 5])
slice2 = _IotaInitializer([4, 5])
val = array_ops.concat(vs, 0)
self.assertAllClose(slice0 + slice1 + slice2, val)
self._TestSaveSpec(vs, ["13 5 0,5:0,5", "13 5 5,4:0,5", "13 5 9,4:0,5"])
@test_util.run_deprecated_v1
def testRandomInitializer(self):
# Sanity check that the slices uses a different seed when using a random
# initializer function.
with self.cached_session():
var0, var1 = partitioned_variables.create_partitioned_variables(
[20, 12], [1, 2], init_ops.random_uniform_initializer())
self.evaluate(variables.global_variables_initializer())
val0, val1 = self.evaluate(var0).flatten(), self.evaluate(var1).flatten()
self.assertTrue(np.linalg.norm(val0 - val1) > 1e-6)
# Negative test that proves that slices have the same values if
# the random initializer uses a seed.
with self.cached_session():
var0, var1 = partitioned_variables.create_partitioned_variables(
[20, 12], [1, 2], init_ops.random_uniform_initializer(seed=201))
self.evaluate(variables.global_variables_initializer())
val0, val1 = self.evaluate(var0).flatten(), self.evaluate(var1).flatten()
self.assertAllClose(val0, val1)
def testSomeErrors(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43]))
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10], [1, 1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 20], [1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [1, 2, 3], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [11, 1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [20, 1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [1, 50], rnd.initialized_value())
@test_util.run_deprecated_v1
def testControlDepsNone(self):
with self.cached_session() as session:
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
# d get the control dependency.
d = constant_op.constant(2.0)
# Partitioned variables do not.
var_x = variable_scope.get_variable(
"x",
shape=[2],
initializer=init_ops.ones_initializer(),
partitioner=partitioned_variables.variable_axis_size_partitioner(4))
ops_before_read = session.graph.get_operations()
var_x.as_tensor() # Caches the ops for subsequent reads.
reading_ops = [
op for op in session.graph.get_operations()
if op not in ops_before_read
]
self.assertEqual([c.op], d.op.control_inputs)
# Tests that no control dependencies are added to reading a partitioned
# variable which is similar to reading a variable.
for op in reading_ops:
self.assertEqual([], op.control_inputs)
@test_util.run_deprecated_v1
def testConcat(self):
with self.cached_session() as session:
var_x = variable_scope.get_variable(
"x",
initializer=constant_op.constant([1., 2.]),
partitioner=partitioned_variables.variable_axis_size_partitioner(4))
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
ops_before_concat = session.graph.get_operations()
value = var_x._concat() # pylint: disable=protected-access
concat_ops = [
op for op in session.graph.get_operations()
if op not in ops_before_concat
]
concat_control_inputs = [
ci for op in concat_ops for ci in op.control_inputs
]
self.assertTrue(
c.op in concat_control_inputs,
"var_x._concat() should get control dependencies from its scope.")
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(value, var_x.as_tensor())
def testMetaGraphSaveLoad(self):
save_prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph) as session:
partitioner = partitioned_variables.fixed_size_partitioner(5, axis=0)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
"v0", dtype=dtypes.float32, shape=(10, 10))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), 5)
self.assertAllEqual(v0_part, (5, 1))
self.evaluate(variables.global_variables_initializer())
save_graph.get_collection_ref("partvar").append(v0)
saver = saver_lib.Saver()
save_graph.finalize()
save_path = saver.save(sess=session, save_path=save_prefix)
previous_value = session.run(
save_graph.get_tensor_by_name(v0.name + ":0"))
restore_graph = ops.Graph()
with restore_graph.as_default(), self.session(
graph=restore_graph) as session:
saver = saver_lib.import_meta_graph(save_path + ".meta")
saver.restore(sess=session, save_path=save_path)
v0, = save_graph.get_collection_ref("partvar")
self.assertIsInstance(v0, variables.PartitionedVariable)
self.assertAllEqual(
previous_value,
session.run(restore_graph.get_tensor_by_name(v0.name + ":0")))
if __name__ == "__main__":
test.main()
| |
from datetime import datetime, timedelta
import logging
from django.contrib.auth.models import AnonymousUser, User
from django.utils import timezone
from djblets.testing.decorators import add_fixtures
from djblets.util.dates import get_tz_aware_utcnow
from kgb import SpyAgency, spy_on
from reviewboard.reviews.errors import RevokeShipItError
from reviewboard.reviews.models import Review, ReviewRequest
from reviewboard.reviews.signals import (review_ship_it_revoked,
review_ship_it_revoking)
from reviewboard.testing import TestCase
class ReviewTests(SpyAgency, TestCase):
"""Unit tests for reviewboard.reviews.models.Review."""
fixtures = ['test_users', 'test_scmtools']
def test_duplicate_reviews(self):
"""Testing consolidation of duplicate reviews"""
body_top = 'This is the body_top.'
body_bottom = 'This is the body_bottom.'
comment_text_1 = 'Comment text 1'
comment_text_2 = 'Comment text 2'
comment_text_3 = 'Comment text 3'
# Some objects we need.
user = User.objects.get(username='doc')
review_request = self.create_review_request(create_repository=True,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
# Create the first review.
master_review = self.create_review(review_request, user=user,
body_top=body_top,
body_bottom='')
self.create_diff_comment(master_review, filediff, text=comment_text_1,
first_line=1, num_lines=1)
# Create the second review.
review = self.create_review(review_request, user=user,
body_top='', body_bottom='')
self.create_diff_comment(review, filediff, text=comment_text_2,
first_line=1, num_lines=1)
# Create the third review.
review = self.create_review(review_request, user=user,
body_top='',
body_bottom=body_bottom)
self.create_diff_comment(review, filediff, text=comment_text_3,
first_line=1, num_lines=1)
# Now that we've made a mess, see if we get a single review back.
logging.disable(logging.WARNING)
review = review_request.get_pending_review(user)
self.assertTrue(review)
self.assertEqual(review.id, master_review.id)
self.assertEqual(review.body_top, body_top)
self.assertEqual(review.body_bottom, body_bottom)
comments = list(review.comments.all())
self.assertEqual(len(comments), 3)
self.assertEqual(comments[0].text, comment_text_1)
self.assertEqual(comments[1].text, comment_text_2)
self.assertEqual(comments[2].text, comment_text_3)
def test_all_participants_with_replies(self):
"""Testing Review.all_participants with replies"""
user1 = User.objects.create_user(username='aaa',
email='user1@example.com')
user2 = User.objects.create_user(username='bbb',
email='user2@example.com')
user3 = User.objects.create_user(username='ccc',
email='user3@example.com')
user4 = User.objects.create_user(username='ddd',
email='user4@example.com')
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, user=user1)
self.create_reply(review, user=user2, public=True)
self.create_reply(review, user=user1, public=True)
self.create_reply(review, user=user4, public=False)
self.create_reply(review, user=user3, public=True)
self.create_reply(review, user=user2, public=True)
with self.assertNumQueries(2):
self.assertEqual(review.all_participants, {user1, user2, user3})
def test_all_participants_with_no_replies(self):
"""Testing Review.all_participants with no replies"""
user = User.objects.create_user(username='aaa',
email='user1@example.com')
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, user=user)
with self.assertNumQueries(1):
self.assertEqual(review.all_participants, {user})
def test_all_participants_with_only_owner_reply(self):
"""Testing Review.all_participants with only review owner replied"""
user = User.objects.create_user(username='aaa',
email='user1@example.com')
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, user=user)
self.create_reply(review, user=user, public=True)
with self.assertNumQueries(1):
self.assertEqual(review.all_participants, {user})
def test_is_accessible_by_with_public_and_anonymous_user(self):
"""Testing Review.is_accessible_by with public and anonymous user"""
user = User.objects.get(username='doc')
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request,
user=user,
public=True)
self.assertTrue(review.is_accessible_by(AnonymousUser()))
def test_is_accessible_by_with_public_and_private_review_request(self):
"""Testing Review.is_accessible_by with public review and private
review request
"""
user = User.objects.get(username='doc')
other_user = User.objects.get(username='dopey')
review_request = self.create_review_request(create_repository=True)
review = self.create_review(review_request,
user=user,
public=True)
review_request.repository.public = True
review_request.repository.save(update_fields=('public',))
self.assertFalse(review.is_accessible_by(other_user))
def test_is_accessible_by_with_private_and_anonymous_user(self):
"""Testing Review.is_accessible_by with private and anonymous user"""
user = User.objects.get(username='doc')
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request,
user=user)
self.assertFalse(review.is_accessible_by(AnonymousUser()))
def test_is_accessible_by_with_private_and_owner(self):
"""Testing Review.is_accessible_by with private and owner"""
user = User.objects.get(username='doc')
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request,
user=user)
self.assertTrue(review.is_accessible_by(user))
def test_is_accessible_by_with_private_and_superuser(self):
"""Testing Review.is_accessible_by with private and superuser"""
user = User.objects.get(username='doc')
admin = User.objects.get(username='admin')
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request,
user=user)
self.assertTrue(review.is_accessible_by(admin))
def test_is_mutable_by_with_public_and_owner(self):
"""Testing Review.is_mutable_by with public and owner"""
user = User.objects.get(username='doc')
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request,
user=user,
public=True)
self.assertFalse(review.is_mutable_by(user))
def test_is_mutable_by_with_private_and_anonymous_user(self):
"""Testing Review.is_mutable_by with private and anonymous user"""
user = User.objects.get(username='doc')
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request,
user=user)
self.assertFalse(review.is_mutable_by(AnonymousUser()))
def test_is_mutable_by_with_private_and_owner(self):
"""Testing Review.is_mutable_by with private and owner"""
user = User.objects.get(username='doc')
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request,
user=user)
self.assertTrue(review.is_mutable_by(user))
def test_is_mutable_by_with_private_and_superuser(self):
"""Testing Review.is_mutable_by with private and superuser"""
user = User.objects.get(username='doc')
admin = User.objects.get(username='admin')
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request,
user=user)
self.assertTrue(review.is_mutable_by(admin))
def test_is_new_for_user_with_non_owner(self):
"""Testing Review.is_new_for_user with non-owner"""
user1 = User.objects.create_user(username='test-user-1',
email='user1@example.com')
user2 = User.objects.create_user(username='test-user-2',
email='user2@example.com')
review = Review(
user=user1,
timestamp=datetime(2017, 9, 7, 15, 27, 0))
self.assertTrue(review.is_new_for_user(
user=user2,
last_visited=datetime(2017, 9, 7, 10, 0, 0)))
self.assertFalse(review.is_new_for_user(
user=user2,
last_visited=datetime(2017, 9, 7, 16, 0, 0)))
self.assertFalse(review.is_new_for_user(
user=user2,
last_visited=datetime(2017, 9, 7, 15, 27, 0)))
def test_is_new_for_user_with_owner(self):
"""Testing Review.is_new_for_user with owner"""
user = User.objects.create_user(username='test-user',
email='user@example.com')
review = Review(
user=user,
timestamp=datetime(2017, 9, 7, 15, 27, 0))
self.assertFalse(review.is_new_for_user(
user=user,
last_visited=datetime(2017, 9, 7, 16, 0, 0)))
def test_can_user_revoke_ship_it_with_owner(self):
"""Testing Review.can_user_revoke_ship_it with review owner"""
review_request = self.create_review_request(create_repository=True,
publish=True)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
ship_it=True,
publish=True)
self.assertTrue(review.can_user_revoke_ship_it(review.user))
def test_can_user_revoke_ship_it_with_non_owner(self):
"""Testing Review.can_user_revoke_ship_it with non-owner"""
review_request = self.create_review_request(create_repository=True,
publish=True)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
ship_it=True,
publish=True)
user = User.objects.get(username='doc')
self.assertNotEqual(review.user, user)
self.assertFalse(review.can_user_revoke_ship_it(user))
def test_can_user_revoke_ship_it_with_superuser(self):
"""Testing Review.can_user_revoke_ship_it with superuser"""
review_request = self.create_review_request(create_repository=True,
publish=True)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
ship_it=True,
publish=True)
user = User.objects.get(username='admin')
self.assertNotEqual(review.user, user)
self.assertTrue(review.can_user_revoke_ship_it(user))
@add_fixtures(['test_site'])
def test_can_user_revoke_ship_it_with_local_site_admin(self):
"""Testing Review.can_user_revoke_ship_it with LocalSite admin"""
review_request = self.create_review_request(create_repository=True,
publish=True,
with_local_site=True)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
ship_it=True,
publish=True)
user = User.objects.create_user(username='new-site-admin',
email='new_site_admin@example.com')
review_request.local_site.admins.add(user)
review_request.local_site.users.add(user)
self.assertTrue(review.can_user_revoke_ship_it(user))
def test_can_user_revoke_ship_it_with_anonymous(self):
"""Testing Review.can_user_revoke_ship_it with anonymous user"""
review_request = self.create_review_request(create_repository=True,
publish=True)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
ship_it=True,
publish=True)
self.assertFalse(review.can_user_revoke_ship_it(AnonymousUser()))
def test_can_user_revoke_ship_it_with_unpublished(self):
"""Testing Review.can_user_revoke_ship_it with unpublished review"""
review_request = self.create_review_request(create_repository=True,
publish=True)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
ship_it=True)
self.assertFalse(review.can_user_revoke_ship_it(review.user))
def test_can_user_revoke_ship_it_with_no_ship_it(self):
"""Testing Review.can_user_revoke_ship_it with no Ship It"""
review_request = self.create_review_request(create_repository=True,
publish=True)
review = self.create_review(review_request)
self.assertFalse(review.can_user_revoke_ship_it(review.user))
def test_revoke_ship_it(self):
"""Testing Review.revoke_ship_it"""
review_request = self.create_review_request(create_repository=True,
publish=True)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
ship_it=True,
publish=True)
self.spy_on(review_ship_it_revoking.send)
self.spy_on(review_ship_it_revoked.send)
self.assertEqual(review_request.shipit_count, 1)
review.revoke_ship_it(review.user)
# Make sure the signals fired.
self.assertTrue(review_ship_it_revoking.send.called_with(
sender=Review, user=review.user, review=review))
self.assertTrue(review_ship_it_revoked.send.called_with(
sender=Review, user=review.user, review=review))
# Check the state of the fields.
self.assertEqual(review.body_top, Review.REVOKED_SHIP_IT_TEXT)
self.assertFalse(review.ship_it)
self.assertTrue(review.extra_data.get('revoked_ship_it'))
self.assertEqual(review_request.shipit_count, 0)
# Make sure they persisted to the database.
review = Review.objects.get(pk=review.pk)
self.assertEqual(review.body_top, Review.REVOKED_SHIP_IT_TEXT)
self.assertFalse(review.ship_it)
self.assertTrue(review.extra_data.get('revoked_ship_it'))
def test_revoke_ship_it_with_no_ship_it(self):
"""Testing Review.revoke_ship_it with no Ship It"""
review_request = self.create_review_request(create_repository=True,
publish=True)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
publish=True)
expected_error = 'This review is not marked Ship It!'
with self.assertRaisesMessage(RevokeShipItError, expected_error):
review.revoke_ship_it(review.user)
self.assertEqual(review.body_top, Review.SHIP_IT_TEXT)
self.assertFalse(review.ship_it)
def test_revoke_ship_it_with_custom_body_top(self):
"""Testing Review.revoke_ship_it with custom existing body_top"""
review_request = self.create_review_request(create_repository=True,
publish=True)
review = self.create_review(review_request,
body_top='This is a test',
ship_it=True,
publish=True)
review.revoke_ship_it(review.user)
self.assertEqual(review.body_top, 'This is a test')
self.assertFalse(review.ship_it)
self.assertTrue(review.extra_data.get('revoked_ship_it'))
def test_revoke_ship_it_with_revoking_signal_exception(self):
"""Testing Review.revoke_ship_it with exception in
review_ship_it_revoking handler
"""
def on_revoking(**kwargs):
raise Exception('oh no')
review_request = self.create_review_request(create_repository=True,
publish=True)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
ship_it=True,
publish=True)
try:
review_ship_it_revoking.connect(on_revoking)
expected_error = 'Error revoking the Ship It: oh no'
with self.assertRaisesMessage(RevokeShipItError, expected_error):
review.revoke_ship_it(review.user)
finally:
review_ship_it_revoking.disconnect(on_revoking)
self.assertEqual(review.body_top, Review.SHIP_IT_TEXT)
self.assertTrue(review.ship_it)
self.assertNotIn('revoked_ship_it', review.extra_data)
def test_revoke_ship_it_with_revoked_signal_exception(self):
"""Testing Review.revoke_ship_it with exception in
review_ship_it_revoked handler
"""
def on_revoked(**kwargs):
raise Exception('oh no')
review_request = self.create_review_request(create_repository=True,
publish=True)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
ship_it=True,
publish=True)
try:
review_ship_it_revoked.connect(on_revoked)
review.revoke_ship_it(review.user)
finally:
review_ship_it_revoked.disconnect(on_revoked)
self.assertEqual(review.body_top, Review.REVOKED_SHIP_IT_TEXT)
self.assertFalse(review.ship_it)
self.assertTrue(review.extra_data.get('revoked_ship_it'))
def test_revoke_ship_it_timestamp(self):
"""Testing Review.revoke_ship_it does not modify the review timestamp
"""
# ReviewRequest.last_update is a
# django.db.fields.ModificationTimestampField, which retrieves its
# value from datetime.utcnow().replace(tzinfo=utc).
#
# django.utils.timezone.now has the same implementation.
#
# Unfortunately, we cannot spy on datetime.utcnow since it is a
# builtin. So we replace get_tz_aware_utcnow with timezone.now and we
# will replace that with a constant function in the spy_on calls below.
self.spy_on(get_tz_aware_utcnow, call_fake=lambda: timezone.now())
creation_timestamp = datetime.fromtimestamp(0, timezone.utc)
review_timestamp = creation_timestamp + timedelta(hours=1)
revoke_timestamp = review_timestamp + timedelta(hours=1)
with spy_on(timezone.now, call_fake=lambda: creation_timestamp):
review_request = self.create_review_request(publish=True)
with spy_on(timezone.now, call_fake=lambda: review_timestamp):
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
ship_it=True,
publish=True)
review_request = ReviewRequest.objects.get(pk=review_request.pk)
self.assertEqual(review_request.time_added, creation_timestamp)
self.assertEqual(review_request.last_updated, review_timestamp)
self.assertEqual(review.timestamp, review_timestamp)
with spy_on(timezone.now, call_fake=lambda: revoke_timestamp):
review.revoke_ship_it(review.user)
review = Review.objects.get(pk=review.pk)
review_request = ReviewRequest.objects.get(pk=review_request.pk)
self.assertEqual(review_request.time_added, creation_timestamp)
self.assertEqual(review_request.last_updated, review_timestamp)
self.assertEqual(review.timestamp, review_timestamp)
| |
import json
from contextlib import closing, contextmanager
from io import BytesIO
from django.conf import settings
from django.contrib import messages
from django.http import (
Http404,
HttpRequest,
HttpResponse,
HttpResponseBadRequest,
HttpResponseRedirect,
)
from django.http.response import HttpResponseServerError
from django.shortcuts import redirect, render
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from django.utils.html import escape
from braces.views import JSONResponseMixin
from memoized import memoized
from couchexport.models import Format
from dimagi.utils.dates import DateSpan
from dimagi.utils.modules import to_function
from dimagi.utils.web import json_request
from soil import DownloadBase
from soil.exceptions import TaskFailedError
from soil.util import get_download_context
from corehq.apps.domain.decorators import track_domain_request
from corehq.apps.domain.views.base import BaseDomainView
from corehq.apps.hqwebapp.crispy import CSS_ACTION_CLASS
from corehq.apps.hqwebapp.decorators import (
use_datatables,
use_daterangepicker,
use_jquery_ui,
use_nvd3,
)
from corehq.apps.locations.permissions import conditionally_location_safe
from corehq.apps.reports.datatables import DataTablesHeader
from corehq.apps.reports.dispatcher import ReportDispatcher
from corehq.apps.reports.util import DatatablesParams
from corehq.apps.reports_core.exceptions import FilterException
from corehq.apps.reports_core.filters import Choice, PreFilter
from corehq.apps.saved_reports.models import ReportConfig
from corehq.apps.userreports.const import (
DATA_SOURCE_NOT_FOUND_ERROR_MESSAGE,
REPORT_BUILDER_EVENTS_KEY,
)
from corehq.apps.userreports.exceptions import (
BadSpecError,
DataSourceConfigurationNotFoundError,
TableNotFoundWarning,
UserReportsError,
UserReportsFilterError,
)
from corehq.apps.userreports.models import (
CUSTOM_REPORT_PREFIX,
ReportConfiguration,
StaticReportConfiguration,
report_config_id_is_static,
)
from corehq.apps.userreports.reports.data_source import (
ConfigurableReportDataSource,
)
from corehq.apps.userreports.reports.util import (
ReportExport,
report_has_location_filter,
)
from corehq.apps.userreports.tasks import export_ucr_async
from corehq.apps.userreports.util import (
can_delete_report,
can_edit_report,
default_language,
get_referring_apps,
get_ucr_class_name,
has_report_builder_access,
has_report_builder_trial, wrap_report_config_by_type, get_report_config_or_not_found,
)
from corehq.toggles import DISABLE_COLUMN_LIMIT_IN_UCR
from corehq.util.couch import (
DocumentNotFound,
get_document_or_404,
get_document_or_not_found,
)
from corehq.util.view_utils import reverse
from no_exceptions.exceptions import Http403
def get_filter_values(filters, request_dict, user=None):
"""
Return a dictionary mapping filter ids to specified values
:param filters: A list of corehq.apps.reports_core.filters.BaseFilter
objects (or subclasses)
:param request_dict: key word arguments from the request
:return:
"""
try:
return {
filter.css_id: filter.get_value(request_dict, user)
for filter in filters
}
except FilterException as e:
raise UserReportsFilterError(str(e))
def query_dict_to_dict(query_dict, domain, string_type_params):
"""
Transform the given QueryDict to a normal dict where each value has been
converted from a string to a dict (if the value is JSON). params with values 'true'
or 'false' or numbers are casted to respective datatypes, unless the key is specified in string_type_params
Also add the domain to the dict.
:param query_dict: a QueryDict
:param domain:
:string_type_params: list of params that should not be autocasted to boolean/numbers
:return: a dict
"""
request_dict = json_request(query_dict)
request_dict['domain'] = domain
# json.loads casts strings 'true'/'false' to booleans, so undo it
for key in string_type_params:
if key in query_dict:
vals = query_dict.getlist(key)
if len(vals) > 1:
request_dict[key] = vals
else:
request_dict[key] = vals[0]
return request_dict
@contextmanager
def delete_report_config(report_config):
yield report_config
report_config.delete()
def _ucr_view_is_safe(view_fn, *args, **kwargs):
return report_has_location_filter(config_id=kwargs.get('subreport_slug'),
domain=kwargs.get('domain'))
@conditionally_location_safe(_ucr_view_is_safe)
class ConfigurableReportView(JSONResponseMixin, BaseDomainView):
section_name = ugettext_noop("Reports")
template_name = 'userreports/configurable_report.html'
slug = "configurable"
prefix = slug
emailable = True
is_exportable = True
exportable_all = True
show_filters = True
_domain = None
@property
def domain(self):
if self._domain is not None:
return self._domain
return super(ConfigurableReportView, self).domain
@use_daterangepicker
@use_jquery_ui
@use_datatables
@use_nvd3
@track_domain_request(calculated_prop='cp_n_viewed_ucr_reports')
def dispatch(self, request, *args, **kwargs):
if self.should_redirect_to_paywall(request):
from corehq.apps.userreports.views import paywall_home
return HttpResponseRedirect(paywall_home(self.domain))
else:
original = super(ConfigurableReportView, self).dispatch(request, *args, **kwargs)
return original
def should_redirect_to_paywall(self, request):
spec = self.get_spec_or_404()
return spec.report_meta.created_by_builder and not has_report_builder_access(request)
@property
def section_url(self):
return reverse('reports_home', args=(self.domain, ))
@property
def is_static(self):
return report_config_id_is_static(self.report_config_id)
@property
def is_custom_rendered(self):
return self.report_config_id.startswith(CUSTOM_REPORT_PREFIX)
@property
@memoized
def spec(self):
if self.is_static:
return StaticReportConfiguration.by_id(self.report_config_id, domain=self.domain)
else:
return get_report_config_or_not_found(self.domain, self.report_config_id)
def get_spec_or_404(self):
try:
return self.spec
except (DocumentNotFound, BadSpecError) as e:
messages.error(self.request, e)
raise Http404()
def has_viable_configuration(self):
try:
self.spec
except (DocumentNotFound, BadSpecError):
return False
else:
return True
@property
def title(self):
return self.spec.title
@property
def page_name(self):
return self.spec.title
@property
@memoized
def data_source(self):
report = ConfigurableReportDataSource.from_spec(self.spec, include_prefilters=True)
report.lang = self.lang
return report
@property
@memoized
def request_dict(self):
string_type_params = [
filter.name
for filter in self.filters
if getattr(filter, 'datatype', 'string') == "string"
]
query_dict = self.request.GET if self.request.method == 'GET' else self.request.POST
return query_dict_to_dict(query_dict, self.domain, string_type_params)
@property
@memoized
def request_user(self):
try:
return self.request.couch_user
except AttributeError:
return None
@property
@memoized
def filter_values(self):
return get_filter_values(self.filters, self.request_dict, user=self.request_user)
@property
@memoized
def filter_context(self):
return {
filter.css_id: filter.context(self.request_dict, self.request_user, self.lang)
for filter in self.filters
}
@property
@memoized
def filters(self):
return self.spec.ui_filters
_report_config_id = None
@property
def report_config_id(self):
if self._report_config_id is not None:
return self._report_config_id
return self.kwargs['subreport_slug']
_lang = None
@property
def lang(self):
if self._lang is not None:
return self._lang
return self.request.couch_user.language or default_language()
def get(self, request, *args, **kwargs):
if self.has_permissions(self.domain, request.couch_user):
self.get_spec_or_404()
if kwargs.get('render_as') == 'email':
return self.email_response
elif kwargs.get('render_as') == 'excel':
return self.excel_response
elif request.GET.get('format', None) == "export":
return self.export_response
elif request.is_ajax() or request.GET.get('format', None) == 'json':
return self.get_ajax(self.request.GET)
self.content_type = None
try:
self.add_warnings(self.request)
except UserReportsError as e:
details = ''
if isinstance(e, DataSourceConfigurationNotFoundError):
error_message = DATA_SOURCE_NOT_FOUND_ERROR_MESSAGE
else:
error_message = _(
'It looks like there is a problem with your report. '
'You may need to delete and recreate the report. '
'If you believe you are seeing this message in error, please report an issue.'
)
details = str(e)
self.template_name = 'userreports/report_error.html'
context = {
'report_id': self.report_config_id,
'is_static': self.is_static,
'error_message': error_message,
'details': details,
}
context.update(self.main_context)
return self.render_to_response(context)
return super(ConfigurableReportView, self).get(request, *args, **kwargs)
else:
raise Http403()
def post(self, request, *args, **kwargs):
if self.has_permissions(self.domain, request.couch_user):
self.get_spec_or_404()
if request.is_ajax():
return self.get_ajax(self.request.POST)
else:
return HttpResponseBadRequest()
else:
raise Http403()
def has_permissions(self, domain, user):
return _has_permission(domain, user, self.report_config_id)
def add_warnings(self, request):
for warning in self.data_source.column_warnings:
messages.warning(request, warning)
@property
def page_context(self):
context = {
'report': self,
'report_table': {'default_rows': 25},
'filter_context': self.filter_context,
'url': self.url,
'method': 'POST',
'headers': self.headers,
'can_edit_report': can_edit_report(self.request, self),
'can_delete_report': can_delete_report(self.request, self),
'referring_apps': get_referring_apps(self.domain, self.report_config_id),
'has_report_builder_trial': has_report_builder_trial(self.request),
'report_filter_form_action_css_class': CSS_ACTION_CLASS,
}
context.update(self.saved_report_context_data)
context.update(self.pop_report_builder_context_data())
if isinstance(self.spec, ReportConfiguration) and self.spec.report_meta.builder_report_type == 'map':
context['report_table']['default_rows'] = 100
if self.request.couch_user.is_staff and hasattr(self.data_source, 'data_source'):
context['queries'] = self.data_source.data_source.get_query_strings()
return context
def pop_report_builder_context_data(self):
"""
Pop any report builder data stored on the session and return a dict to
be included in the template context.
"""
return {
'report_builder_events': self.request.session.pop(REPORT_BUILDER_EVENTS_KEY, [])
}
@property
def saved_report_context_data(self):
def _get_context_for_saved_report(report_config):
if report_config:
report_config_data = report_config.to_json()
report_config_data['filters'].update(report_config.get_date_range())
return report_config_data
else:
return ReportConfig.default()
saved_report_config_id = self.request.GET.get('config_id')
saved_report_config = get_document_or_404(ReportConfig, self.domain, saved_report_config_id) \
if saved_report_config_id else None
return {
'report_configs': [
_get_context_for_saved_report(saved_report)
for saved_report in ReportConfig.by_domain_and_owner(
self.domain, self.request.couch_user._id, report_slug=self.slug
)
],
'default_config': _get_context_for_saved_report(saved_report_config),
'datespan_filters': ReportConfig.datespan_filter_choices(self.datespan_filters, self.lang),
}
@property
def has_datespan(self):
return bool(self.datespan_filters)
@property
def datespan_filters(self):
return [
f for f in self.spec.filters
if f['type'] == 'date'
]
@property
def headers(self):
return DataTablesHeader(*[col.data_tables_column for col in self.data_source.inner_columns])
@classmethod
def sanitize_page(cls, page):
result = []
for row in page:
result.append({k: cls._sanitize_column(v) for (k, v) in row.items()})
return result
@classmethod
def _sanitize_column(cls, col):
if isinstance(col, str):
return escape(col)
return col
def get_ajax(self, params):
sort_column = params.get('iSortCol_0')
sort_order = params.get('sSortDir_0', 'ASC')
echo = int(params.get('sEcho', 1))
datatables_params = DatatablesParams.from_request_dict(params)
try:
data_source = self.data_source
if len(data_source.inner_columns) > 50 and not DISABLE_COLUMN_LIMIT_IN_UCR.enabled(self.domain):
raise UserReportsError(_("This report has too many columns to be displayed"))
data_source.set_filter_values(self.filter_values)
if sort_column and echo != 1:
data_source.set_order_by(
[(data_source.top_level_columns[int(sort_column)].column_id, sort_order.upper())]
)
page = list(data_source.get_data(start=datatables_params.start, limit=datatables_params.count))
page = self.sanitize_page(page)
total_records = data_source.get_total_records()
total_row = data_source.get_total_row() if data_source.has_total_row else None
except UserReportsError as e:
if settings.DEBUG:
raise
return self.render_json_response({
'error': str(e),
'aaData': [],
'iTotalRecords': 0,
'iTotalDisplayRecords': 0,
})
except TableNotFoundWarning:
if self.spec.report_meta.created_by_builder:
msg = _(
"The database table backing your report does not exist yet. "
"Please wait while the report is populated."
)
else:
msg = _(
"The database table backing your report does not exist yet. "
"You must rebuild the data source before viewing the report."
)
return self.render_json_response({
'warning': msg
})
json_response = {
'aaData': page,
"sEcho": params.get('sEcho', 0),
"iTotalRecords": total_records,
"iTotalDisplayRecords": total_records,
}
if total_row is not None:
json_response["total_row"] = total_row
return self.render_json_response(json_response)
def _get_initial(self, request, **kwargs):
pass
@classmethod
def url_pattern(cls):
from django.conf.urls import url
pattern = r'^{slug}/(?P<subreport_slug>[\w\-:]+)/$'.format(slug=cls.slug)
return url(pattern, cls.as_view(), name=cls.slug)
@property
def type(self):
"""
Used to populate ReportConfig.report_type
"""
return self.prefix
@property
def sub_slug(self):
"""
Used to populate ReportConfig.subreport_slug
"""
return self.report_config_id
@classmethod
def get_report(cls, domain, slug, report_config_id):
report = cls()
report._domain = domain
report._report_config_id = report_config_id
if not report.has_viable_configuration():
return None
report.name = report.title
return report
@property
def url(self):
return reverse(self.slug, args=[self.domain, self.report_config_id])
def _get_filter_export_format(self, filter_value):
if isinstance(filter_value, list):
values = []
for value in filter_value:
if isinstance(value, Choice):
values.append(value.display)
else:
values.append(str(value))
return ', '.join(values)
elif isinstance(filter_value, DateSpan):
return filter_value.default_serialization()
else:
if isinstance(filter_value, Choice):
return filter_value.display
else:
return str(filter_value)
@property
@memoized
def report_export(self):
return ReportExport(self.domain, self.title, self.spec, self.lang, self.filter_values)
@property
def export_table(self):
return self.report_export.get_table()
@property
@memoized
def email_response(self):
with closing(BytesIO()) as temp:
try:
self.report_export.create_export(temp, Format.HTML)
except UserReportsError as e:
return self.render_json_response({'error': str(e)})
return HttpResponse(json.dumps({
'report': temp.getvalue().decode('utf-8'),
}), content_type='application/json')
@property
@memoized
def excel_response(self):
file = BytesIO()
self.report_export.create_export(file, Format.XLS_2007)
return file
@property
@memoized
def export_response(self):
download = DownloadBase()
res = export_ucr_async.delay(self.report_export, download.download_id, self.request.couch_user)
download.set_task(res)
return redirect(DownloadUCRStatusView.urlname, self.domain, download.download_id, self.report_config_id)
@classmethod
def sanitize_export_table(cls, table):
result = []
for row in table:
result.append([cls._sanitize_column(x) for x in row])
return result
@classmethod
def report_preview_data(cls, domain, report_config):
try:
export = ReportExport(domain, report_config.title, report_config, "en", {})
return {
"table": cls.sanitize_export_table(export.get_table_data()),
"map_config": report_config.map_config,
"chart_configs": report_config.charts,
"aaData": cls.sanitize_page(export.get_data()),
}
except UserReportsError:
# User posted an invalid report configuration
return None
except DataSourceConfigurationNotFoundError:
# A temporary data source has probably expired
# TODO: It would be more helpful just to quietly recreate the data source config from GET params
return None
# Base class for classes that provide custom rendering for UCRs
class CustomConfigurableReport(ConfigurableReportView):
# Ensures that links in saved reports will hit CustomConfigurableReportDispatcher
slug = 'custom_configurable'
class CustomConfigurableReportDispatcher(ReportDispatcher):
slug = prefix = 'custom_configurable'
map_name = 'CUSTOM_UCR'
@staticmethod
def _report_class(domain, config_id):
class_path = StaticReportConfiguration.report_class_by_domain_and_id(
domain, config_id
)
return to_function(class_path)
def dispatch(self, request, domain, subreport_slug, **kwargs):
report_config_id = subreport_slug
try:
report_class = self._report_class(domain, report_config_id)
except (BadSpecError, DocumentNotFound):
raise Http404
return report_class.as_view()(request, domain=domain, subreport_slug=report_config_id, **kwargs)
@classmethod
def get_report(cls, domain, slug, config_id):
try:
report_class = cls._report_class(domain, config_id)
except BadSpecError:
return None
return report_class.get_report(domain, slug, config_id)
@classmethod
def url_pattern(cls):
from django.conf.urls import url
pattern = r'^{slug}/(?P<subreport_slug>[\w\-:]+)/$'.format(slug=cls.slug)
return url(pattern, cls.as_view(), name=cls.slug)
@conditionally_location_safe(_ucr_view_is_safe)
class DownloadUCRStatusView(BaseDomainView):
urlname = 'download_ucr_status'
page_title = ugettext_noop('Download UCR Status')
section_name = ugettext_noop("Reports")
@property
def section_url(self):
return reverse('reports_home', args=(self.domain, ))
def get(self, request, *args, **kwargs):
if _has_permission(self.domain, request.couch_user, self.report_config_id):
context = super(DownloadUCRStatusView, self).main_context
context.update({
'domain': self.domain,
'download_id': kwargs['download_id'],
'poll_url': reverse('ucr_download_job_poll',
args=[self.domain, kwargs['download_id']],
params={'config_id': self.report_config_id}),
'title': _("Download Report Status"),
'progress_text': _("Preparing report download."),
'error_text': _("There was an unexpected error! Please try again or report an issue."),
'next_url': reverse(ConfigurableReportView.slug, args=[self.domain, self.report_config_id]),
'next_url_text': _("Go back to report"),
})
return render(request, 'hqwebapp/soil_status_full.html', context)
else:
raise Http403()
def page_url(self):
return reverse(self.urlname, args=self.args, kwargs=self.kwargs)
@property
def parent_pages(self):
return [{
'title': self.spec.title,
'url': reverse(ConfigurableReportView.slug, args=[self.domain, self.report_config_id]),
}]
@property
@memoized
def spec(self):
if self.is_static:
return StaticReportConfiguration.by_id(self.report_config_id, domain=self.domain)
else:
return get_report_config_or_not_found(self.domain, self.report_config_id)
@property
def is_static(self):
return report_config_id_is_static(self.report_config_id)
@property
@memoized
def report_config_id(self):
return self.kwargs['subreport_slug']
def _safe_download_poll(view_fn, request, domain, download_id, *args, **kwargs):
return report_has_location_filter(request.GET.get('config_id'), domain)
@conditionally_location_safe(_safe_download_poll)
def ucr_download_job_poll(request, domain,
download_id,
template="hqwebapp/partials/shared_download_status.html"):
config_id = request.GET.get('config_id')
if config_id and _has_permission(domain, request.couch_user, config_id):
try:
context = get_download_context(download_id, 'Preparing download')
context.update({'link_text': _('Download Report')})
except TaskFailedError as e:
return HttpResponseServerError(e.errors)
return render(request, template, context)
else:
raise Http403()
def _has_permission(domain, user, config_id):
if domain is None:
return False
if not user.is_active:
return False
return user.can_view_report(domain, get_ucr_class_name(config_id))
| |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert DETR checkpoints."""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import torch
from PIL import Image
import requests
from huggingface_hub import cached_download, hf_hub_url
from transformers import DetrConfig, DetrFeatureExtractor, DetrForObjectDetection, DetrForSegmentation
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
rename_keys = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def rename_key(state_dict, old, new):
val = state_dict.pop(old)
state_dict[new] = val
def rename_backbone_keys(state_dict):
new_state_dict = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
new_key = key.replace("backbone.0.body", "backbone.conv_encoder.model")
new_state_dict[new_key] = value
else:
new_state_dict[key] = value
return new_state_dict
def read_in_q_k_v(state_dict, is_panoptic=False):
prefix = ""
if is_panoptic:
prefix = "detr."
# first: transformer encoder
for i in range(6):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
in_proj_weight = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
in_proj_bias = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
state_dict[f"encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
state_dict[f"encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
state_dict[f"encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
state_dict[f"encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
state_dict[f"encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
state_dict[f"encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6):
# read in weights + bias of input projection layer of self-attention
in_proj_weight = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight")
in_proj_bias = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
state_dict[f"decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
state_dict[f"decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
state_dict[f"decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
state_dict[f"decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
state_dict[f"decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
state_dict[f"decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
in_proj_weight_cross_attn = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight"
)
in_proj_bias_cross_attn = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias")
# next, add query, keys and values (in that order) of cross-attention to the state dict
state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.weight"] = in_proj_weight_cross_attn[:256, :]
state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.bias"] = in_proj_bias_cross_attn[:256]
state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.weight"] = in_proj_weight_cross_attn[256:512, :]
state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.bias"] = in_proj_bias_cross_attn[256:512]
state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.weight"] = in_proj_weight_cross_attn[-256:, :]
state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.bias"] = in_proj_bias_cross_attn[-256:]
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
@torch.no_grad()
def convert_detr_checkpoint(model_name, pytorch_dump_folder_path):
"""
Copy/paste/tweak model's weights to our DETR structure.
"""
# load default config
config = DetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
config.backbone = "resnet101"
if "dc5" in model_name:
config.dilation = True
is_panoptic = "panoptic" in model_name
if is_panoptic:
config.num_labels = 250
else:
config.num_labels = 91
repo_id = "datasets/huggingface/label-files"
filename = "coco-detection-id2label.json"
id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename)), "r"))
id2label = {int(k): v for k, v in id2label.items()}
config.id2label = id2label
config.label2id = {v: k for k, v in id2label.items()}
# load feature extractor
format = "coco_panoptic" if is_panoptic else "coco_detection"
feature_extractor = DetrFeatureExtractor(format=format)
# prepare image
img = prepare_img()
encoding = feature_extractor(images=img, return_tensors="pt")
pixel_values = encoding["pixel_values"]
logger.info(f"Converting model {model_name}...")
# load original model from torch hub
detr = torch.hub.load("facebookresearch/detr", model_name, pretrained=True).eval()
state_dict = detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
src = "detr." + src
rename_key(state_dict, src, dest)
state_dict = rename_backbone_keys(state_dict)
# query, key and value matrices need special treatment
read_in_q_k_v(state_dict, is_panoptic=is_panoptic)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
prefix = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr")
and not key.startswith("class_labels_classifier")
and not key.startswith("bbox_predictor")
):
val = state_dict.pop(key)
state_dict["detr.model" + key[4:]] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
val = state_dict.pop(key)
state_dict["detr." + key] = val
elif key.startswith("bbox_attention") or key.startswith("mask_head"):
continue
else:
val = state_dict.pop(key)
state_dict[prefix + key] = val
else:
if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"):
val = state_dict.pop(key)
state_dict[prefix + key] = val
# finally, create HuggingFace model and load state dict
model = DetrForSegmentation(config) if is_panoptic else DetrForObjectDetection(config)
model.load_state_dict(state_dict)
model.eval()
# verify our conversion
original_outputs = detr(pixel_values)
outputs = model(pixel_values)
assert torch.allclose(outputs.logits, original_outputs["pred_logits"], atol=1e-4)
assert torch.allclose(outputs.pred_boxes, original_outputs["pred_boxes"], atol=1e-4)
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs["pred_masks"], atol=1e-4)
# Save model and feature extractor
logger.info(f"Saving PyTorch model and feature extractor to {pytorch_dump_folder_path}...")
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
model.save_pretrained(pytorch_dump_folder_path)
feature_extractor.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name", default="detr_resnet50", type=str, help="Name of the DETR model you'd like to convert."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
args = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| |
import os
import shutil
import tempfile
import unittest
from subprocess import call, check_output, PIPE, Popen, STDOUT
class TestGitSnapshot(unittest.TestCase):
def _status(self):
return check_output(('git', '-c', 'color.ui=never', 'status', '--short'))
def _stashes(self):
return check_output(('git', 'stash', 'list')).splitlines()
def _output(self, command):
proc = Popen(command, stdout=PIPE, stderr=STDOUT)
return proc.communicate()[0].strip()
def setUp(self):
self.dirpath = tempfile.mkdtemp()
os.chdir(self.dirpath)
call('git init --quiet'.split())
call('touch CHANGELOG.md'.split())
call('touch CONTRIBUTING.md'.split())
call('git add -A'.split())
call(['git', 'commit', '--quiet', '-m', 'Initial commit'])
with open('CHANGELOG.md', 'w') as a_file:
a_file.write('changelog\n')
def tearDown(self):
shutil.rmtree(self.dirpath)
def test_snapshot(self):
# run
stdout, stderr = Popen('git snapshot'.split(), stdout=PIPE, stderr=PIPE).communicate()
# verify
self.assertRegexpMatches(stdout.strip(), 'Saved working directory and index state WIP on master: \w+ Initial commit')
self.assertFalse(stderr)
self.assertEqual(self._status(), " M CHANGELOG.md\n")
stashes = self._stashes()
self.assertEqual(len(stashes), 1)
self.assertRegexpMatches(stashes[0], 'stash@\{0\}: WIP on master: [0-9a-f]+ Initial commit')
call('git reset --hard --quiet'.split())
call('git stash pop --quiet'.split())
self.assertEqual(self._status(), " M CHANGELOG.md\n")
def test_snapshot_quiet(self):
# run
stdout, stderr = Popen('git snapshot --quiet'.split(), stdout=PIPE, stderr=PIPE).communicate()
# verify
self.assertFalse(stdout)
self.assertFalse(stderr)
self.assertEqual(self._status(), " M CHANGELOG.md\n")
stashes = self._stashes()
self.assertEqual(len(stashes), 1)
self.assertRegexpMatches(stashes[0], 'stash@\{0\}: WIP on master: [0-9a-f]+ Initial commit')
call('git reset --hard --quiet'.split())
call('git stash pop --quiet'.split())
self.assertEqual(self._status(), " M CHANGELOG.md\n")
def test_snapshot_complex(self):
# setup
call('touch CONTRIBUTING.md'.split())
call('touch README.md'.split())
call('touch file1.txt'.split())
call('touch file2.txt'.split())
with open('CONTRIBUTING.md', 'w') as a_file:
a_file.write('contributing\n')
with open('README.md', 'w') as a_file:
a_file.write('readme\n')
with open('file0.txt', 'w') as a_file:
a_file.write('file0\n')
with open('file1.txt', 'w') as a_file:
a_file.write('file1\n')
with open('file2.txt', 'w') as a_file:
a_file.write('file2\n')
call('git add -A'.split())
call(['git', 'commit', '--quiet', '--message', 'Initial commit'])
call('rm file0.txt'.split())
call(['git', 'commit', '--all', '--quiet', '--message', 'Remove file0.txt'])
with open('CHANGELOG.md', 'a') as a_file:
a_file.write('changelog\n')
call('git add -- CHANGELOG.md'.split())
with open('CHANGELOG.md', 'a') as a_file:
a_file.write('changelog\n')
with open('CONTRIBUTING.md', 'a') as a_file:
a_file.write('contributing\n')
call('git add -- CONTRIBUTING.md'.split())
with open('README.md', 'a') as a_file:
a_file.write('readme\n')
call('git rm --quiet -- file1.txt'.split())
call('rm file2.txt'.split())
call('touch file3.txt'.split())
call('git add -- file3.txt'.split())
call('touch file4.txt'.split())
# MM CHANGELOG.md
# M CONTRIBUTING.md
# M README.md
# D file1.txt
# D file2.txt
# A file3.txt
# ?? file4.txt
# run
call('git snapshot --quiet'.split())
# verify
call('git reset --hard --quiet'.split())
call('git clean --force --quiet'.split())
call('git stash pop --index --quiet'.split())
self.assertEqual(self._status(), """MM CHANGELOG.md
M CONTRIBUTING.md
M README.md
D file1.txt
D file2.txt
A file3.txt
?? file4.txt
""")
def test_snapshot_withMessage(self):
# setup
message = "My snapshot message"
# run
stdout, stderr = Popen(('git', 'snapshot', message), stdout=PIPE, stderr=PIPE).communicate()
# verify
self.assertEqual(stdout.strip(), 'Saved working directory and index state On master: My snapshot message')
self.assertFalse(stderr)
self.assertEqual(self._status(), " M CHANGELOG.md\n")
stashes = self._stashes()
self.assertEqual(len(stashes), 1)
self.assertRegexpMatches(stashes[0], 'stash@\{0\}: On master: ' + message)
call('git reset --hard --quiet'.split())
call('git stash pop --quiet'.split())
self.assertEqual(self._status(), " M CHANGELOG.md\n")
def test_snapshot_quiet(self):
# run
stdout, stderr = Popen('git snapshot --quiet'.split(), stdout=PIPE, stderr=PIPE).communicate()
# verify
self.assertFalse(stdout)
self.assertFalse(stderr)
def test_snapshot_quiet_shortOption(self):
# run
stdout, stderr = Popen('git snapshot -q'.split(), stdout=PIPE, stderr=PIPE).communicate()
# verify
self.assertFalse(stdout)
self.assertFalse(stderr)
def test_snapshot_specificFiles(self):
# setup
with open('README.md', 'w') as a_file:
a_file.write('readme\n')
# run
call('git snapshot --quiet -- CHANGELOG.md'.split())
# verify
self.assertEqual(self._status(), " M CHANGELOG.md\n?? README.md\n")
call('git reset --hard --quiet'.split())
call('git clean --force --quiet'.split())
call('git stash pop --quiet'.split())
self.assertEqual(self._status(), " M CHANGELOG.md\n")
def test_snapshot_noChanges(self):
# setup
call('git reset --hard --quiet'.split())
# run
result = check_output('git snapshot'.split())
# verify
self.assertEqual(result.strip(), 'No local changes to save. No snapshot created.')
def test_snapshot_noChanges_quiet(self):
# setup
call('git reset --hard --quiet'.split())
# run
stdout, stderr = Popen('git snapshot --quiet'.split(), stdout=PIPE, stderr=PIPE).communicate()
# verify
self.assertFalse(stdout)
self.assertFalse(stderr)
def test_snapshot_noChanges_quiet_shortOption(self):
# setup
call('git reset --hard --quiet'.split())
# run
stdout, stderr = Popen('git snapshot -q'.split(), stdout=PIPE, stderr=PIPE).communicate()
# verify
self.assertFalse(stdout)
self.assertFalse(stderr)
def test_snapshot_nonGitRepository(self):
# setup
os.mkdir(self.dirpath + '/dir')
os.chdir(self.dirpath + '/dir')
# run
p = Popen('git snapshot'.split(), stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
# verify
expected = "error: '{}' not a git repository".format(os.path.realpath(self.dirpath) + '/dir')
self.assertEqual(expected, stderr.strip())
self.assertFalse(stdout)
def test_snapshot_version(self):
# expect
self.assertRegexpMatches(self._output('git snapshot -v'.split()), 'git-snapshot \\d+\\.\\d+\\.\\d+')
self.assertRegexpMatches(self._output('git snapshot --version'.split()), 'git-snapshot \\d+\\.\\d+\\.\\d+')
def test_snapshot_help(self):
# expect
self.assertTrue(self._output('git snapshot -h'.split()))
self.assertTrue(self._output('git snapshot --help'.split()))
def test_snapshot_replace(self):
# given
message = "My snapshot message"
self._output('git reset --hard'.split())
self._output('git stash drop'.split())
with open('CONTRIBUTING.md', 'w') as a_file:
a_file.write('contributing\n')
# when
stdout, stderr = Popen(('git', 'snapshot', '--replace', message), stdout=PIPE, stderr=PIPE).communicate()
# then
self.assertEqual(stdout.strip(), 'Saved working directory and index state On master: My snapshot message')
self.assertFalse(stderr)
self.assertEqual(self._status(), " M CONTRIBUTING.md\n")
stashes = self._stashes()
self.assertEqual(len(stashes), 1)
self.assertRegexpMatches(stashes[0], 'stash@\{0\}: On master: ' + message)
call('git reset --hard --quiet'.split())
call('git stash pop --quiet'.split())
self.assertEqual(self._status(), " M CONTRIBUTING.md\n")
def test_snapshot_replaceWithoutMessage(self):
# when
stdout, stderr = Popen(('git', 'snapshot', '--replace'), stdout=PIPE, stderr=PIPE).communicate()
# then
self.assertEqual(stdout.strip(), 'usage: git snapshot [MESSAGE] [-h] [-v] [-r] [-q] [-- FILE [FILE ...]]')
self.assertEqual(stderr.strip(), 'git snapshot: error: argument -r/--replace: not allowed without positional argument message')
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for our unit tests.
Allows overriding of CONF for use of fakes, and some black magic for
inline callbacks.
"""
import os
import shutil
import tempfile
import uuid
import fixtures
import logging
import mox
from oslo.config import cfg
from oslo.messaging import conffixture as messaging_conffixture
import stubout
import testtools
from testtools import matchers
from cinder.common import config # noqa Need to register global_opts
from cinder.db import migration
from cinder.openstack.common.db.sqlalchemy import session
from cinder.openstack.common import log as oslo_logging
from cinder.openstack.common import timeutils
from cinder import rpc
from cinder import service
from cinder.tests import conf_fixture
from cinder.tests import fake_notifier
test_opts = [
cfg.StrOpt('sqlite_clean_db',
default='clean.sqlite',
help='File name of clean sqlite db'), ]
CONF = cfg.CONF
CONF.register_opts(test_opts)
LOG = oslo_logging.getLogger(__name__)
_DB_CACHE = None
class TestingException(Exception):
pass
class Database(fixtures.Fixture):
def __init__(self, db_session, db_migrate, sql_connection,
sqlite_db, sqlite_clean_db):
self.sql_connection = sql_connection
self.sqlite_db = sqlite_db
self.sqlite_clean_db = sqlite_clean_db
self.engine = db_session.get_engine()
self.engine.dispose()
conn = self.engine.connect()
if sql_connection == "sqlite://":
if db_migrate.db_version() > db_migrate.db_initial_version():
return
else:
testdb = os.path.join(CONF.state_path, sqlite_db)
if os.path.exists(testdb):
return
db_migrate.db_sync()
# self.post_migrations()
if sql_connection == "sqlite://":
conn = self.engine.connect()
self._DB = "".join(line for line in conn.connection.iterdump())
self.engine.dispose()
else:
cleandb = os.path.join(CONF.state_path, sqlite_clean_db)
shutil.copyfile(testdb, cleandb)
def setUp(self):
super(Database, self).setUp()
if self.sql_connection == "sqlite://":
conn = self.engine.connect()
conn.connection.executescript(self._DB)
self.addCleanup(self.engine.dispose)
else:
shutil.copyfile(
os.path.join(CONF.state_path, self.sqlite_clean_db),
os.path.join(CONF.state_path, self.sqlite_db))
class TestCase(testtools.TestCase):
"""Test case base class for all unit tests."""
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
os.environ.get('OS_STDERR_CAPTURE') == '1'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.log_fixture = self.useFixture(fixtures.FakeLogger(
level=logging.DEBUG))
rpc.add_extra_exmods("cinder.tests")
self.addCleanup(rpc.clear_extra_exmods)
self.addCleanup(rpc.cleanup)
fs = '%(levelname)s [%(name)s] %(message)s'
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_driver = 'fake'
self.messaging_conf.response_timeout = 15
self.useFixture(self.messaging_conf)
rpc.init(CONF)
conf_fixture.set_defaults(CONF)
CONF([], default_config_files=[])
# NOTE(vish): We need a better method for creating fixtures for tests
# now that we have some required db setup for the system
# to work properly.
self.start = timeutils.utcnow()
CONF.set_default('connection', 'sqlite://', 'database')
CONF.set_default('sqlite_synchronous', False)
self.log_fixture = self.useFixture(fixtures.FakeLogger())
global _DB_CACHE
if not _DB_CACHE:
_DB_CACHE = Database(session, migration,
sql_connection=CONF.database.connection,
sqlite_db=CONF.sqlite_db,
sqlite_clean_db=CONF.sqlite_clean_db)
self.useFixture(_DB_CACHE)
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
self.mox = mox.Mox()
self.stubs = stubout.StubOutForTesting()
self.addCleanup(CONF.reset)
self.addCleanup(self.mox.UnsetStubs)
self.addCleanup(self.stubs.UnsetAll)
self.addCleanup(self.stubs.SmartUnsetAll)
self.addCleanup(self.mox.VerifyAll)
self.injected = []
self._services = []
fake_notifier.stub_notifier(self.stubs)
CONF.set_override('fatal_exception_format_errors', True)
# This will be cleaned up by the NestedTempfile fixture
CONF.set_override('lock_path', tempfile.mkdtemp())
def tearDown(self):
"""Runs after each test method to tear down test environment."""
# Stop any timers
for x in self.injected:
try:
x.stop()
except AssertionError:
pass
# Kill any services
for x in self._services:
try:
x.kill()
except Exception:
pass
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
del self.__dict__[key]
super(TestCase, self).tearDown()
def flags(self, **kw):
"""Override CONF variables for a test."""
for k, v in kw.iteritems():
CONF.set_override(k, v)
def log_level(self, level):
"""Set logging level to the specified value."""
log_root = logging.getLogger(None).logger
log_root.setLevel(level)
def start_service(self, name, host=None, **kwargs):
host = host and host or uuid.uuid4().hex
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'cinder-%s' % name)
svc = service.Service.create(**kwargs)
svc.start()
self._services.append(svc)
return svc
# Useful assertions
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
def raise_assertion(msg):
d1str = d1
d2str = d2
base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s '
'd2: %(d2str)s' %
{'msg': msg, 'd1str': d1str, 'd2str': d2str})
raise AssertionError(base_msg)
d1keys = set(d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = d1keys - d2keys
d2only = d2keys - d1keys
raise_assertion('Keys in d1 and not d2: %(d1only)s. '
'Keys in d2 and not d1: %(d2only)s' %
{'d1only': d1only, 'd2only': d2only})
for key in d1keys:
d1value = d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= tolerance
except (ValueError, TypeError):
# If both values aren't convertable to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
self.assertDictMatch(d1value, d2value)
elif 'DONTCARE' in (d1value, d2value):
continue
elif approx_equal and within_tolerance:
continue
elif d1value != d2value:
raise_assertion("d1['%(key)s']=%(d1value)s != "
"d2['%(key)s']=%(d2value)s" %
{
'key': key,
'd1value': d1value,
'd2value': d2value,
})
def assertGreater(self, first, second, msg=None):
"""Python < v2.7 compatibility. Assert 'first' > 'second'."""
try:
f = super(TestCase, self).assertGreater
except AttributeError:
self.assertThat(first,
matchers.GreaterThan(second),
message=msg or '')
else:
f(first, second, msg=msg)
def assertGreaterEqual(self, first, second, msg=None):
"""Python < v2.7 compatibility. Assert 'first' >= 'second'."""
try:
f = super(TestCase, self).assertGreaterEqual
except AttributeError:
self.assertThat(first,
matchers.Not(matchers.LessThan(second)),
message=msg or '')
else:
f(first, second, msg=msg)
| |
# Code borrowed from 'Cuckoo Sandbox'
# https://github.com/cuckoosandbox/cuckoo
# Credits go to @jbremer (Jurriaan Bremer) and the Cuckoo team
import binascii
import configparser as ConfigParser
import click
import os
import logging
import re
from findex_common.exceptions import ConfigError
from findex_gui.bin.misc import cwd
log = logging.getLogger(__name__)
basestring = (str, bytes)
_cache = {}
def parse_bool(value):
"""Attempt to parse a boolean value."""
if value in ("true", "True", "yes", "1", "on"):
return True
if value in ("false", "False", "None", "no", "0", "off"):
return False
return bool(int(value))
class Dictionary(dict):
"""Findex custom dict."""
def __getattr__(self, key):
return self.get(key, None)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class Type(object):
"""Base Class for Type Definitions"""
def __init__(self, default=None, required=True, sanitize=False,
allow_empty=False):
self.required = required
self.sanitize = sanitize
self.allow_empty = allow_empty
self.default = self.parse(default)
def parse(self, value):
"""Parse a raw input value."""
def check(self, value):
"""Checks the type of the value."""
def emit(self, value):
"""String-readable version of this object"""
class Int(Type):
"""Integer Type Definition class."""
def parse(self, value):
if isinstance(value, int):
return value
if isinstance(value, basestring) and value.isdigit():
return int(value)
def check(self, value):
if self.allow_empty and not value:
return True
try:
click.INT(value)
return True
except:
return False
def emit(self, value):
return "%d" % value if value is not None else ""
class String(Type):
"""String Type Definition class."""
def parse(self, value):
return value.strip() if value else None
def check(self, value):
if self.allow_empty and not value:
return True
return isinstance(value, basestring)
def emit(self, value):
return value or ""
class Path(String):
"""Path Type Definition class."""
def __init__(self, default=None, exists=False, writable=False,
readable=False, required=True, allow_empty=False,
sanitize=False):
self.exists = exists
self.writable = writable
self.readable = readable
super(Path, self).__init__(default, required, sanitize, allow_empty)
def parse(self, value):
if self.allow_empty and not value:
return
try:
c = click.Path(
exists=self.exists,
writable=self.writable,
readable=self.readable
)
return c.convert(value, None, None)
except Exception:
return value
def check(self, value):
if self.allow_empty and not value:
return True
try:
c = click.Path(
exists=self.exists,
writable=self.writable,
readable=self.readable
)
c.convert(value, None, None)
return True
except:
return False
def emit(self, value):
return value or ""
class Boolean(Type):
"""Boolean Type Definition class."""
def parse(self, value):
try:
return parse_bool(value)
except:
log.error("Incorrect Boolean %s", value)
def check(self, value):
try:
parse_bool(value)
return True
except:
return False
def emit(self, value):
return "yes" if value else "no"
class UUID(Type):
"""UUID Type Definition class."""
def parse(self, value):
try:
c = click.UUID(value)
return str(c)
except:
log.error("Incorrect UUID %s", value)
def check(self, value):
"""Checks if the value is of type UUID."""
try:
click.UUID(value)
return True
except:
return False
def emit(self, value):
return value
class List(Type):
"""List Type Definition class."""
def __init__(self, subclass, default, sep=",", strip=True):
self.subclass = subclass
self.sep = sep
self.strip = strip
super(List, self).__init__(default)
def parse(self, value):
if value is None:
return []
try:
ret = []
if isinstance(value, (tuple, list)):
for entry in value:
ret.append(self.subclass().parse(entry))
return ret
for entry in re.split("[%s]" % self.sep, value):
if self.strip:
entry = entry.strip()
if not entry:
continue
ret.append(self.subclass().parse(entry))
return ret
except:
log.error("Incorrect list: %s", value)
def check(self, value):
try:
value.split(self.sep)
return True
except:
return False
def emit(self, value):
return (", " if self.sep[0] == "," else self.sep[0]).join(value or "")
class Config(object):
"""Configuration file parser."""
configuration = {
"findex": {
"findex": {
"application_root": String("/"),
"secret_token": String(binascii.hexlify(open("/dev/urandom", "rb").read(32)).decode("UTF-8")),
"version_check": Boolean(False),
"debug": Boolean(False),
"async": Boolean(True),
},
"database": {
"connection": String(sanitize=True),
"debug": Boolean(False),
},
"elasticsearch": {
"enabled": Boolean(True),
"host": String("http://localhost:9200/")
},
"users": {
"default_root_password": String("changeme"),
"default_anon_password": String("changeme")
}
}
}
def get_section_types(self, file_name, section, strict=False, loose=False):
"""Get types for a section entry."""
section_types = get_section_types(file_name, section)
if not section_types and not loose:
log.error(
"Config section %s:%s not found!", file_name, section
)
if strict:
raise ConfigError(
"Config section %s:%s not found!", file_name, section
)
return
return section_types
def __init__(self, file_name="findex", cfg=None, strict=False,
loose=False, raw=False):
"""
@param file_name: file name without extension.
@param cfg: configuration file path.
"""
env = {}
for key, value in os.environ.items():
if key.startswith("FINDEX_"):
env[key] = value
env["FINDEX_CWD"] = cwd()
env["FINDEX_APP"] = os.environ.get("FINDEX_APP", "")
config = ConfigParser.ConfigParser(env)
self.env_keys = []
for key in env.keys():
self.env_keys.append(key.lower())
self.sections = {}
try:
config.read(cfg or cwd("conf", "%s.conf" % file_name))
except ConfigParser.ParsingError as e:
raise ConfigError(
"There was an error reading in the $CWD/conf/%s.conf "
"configuration file. Most likely there are leading "
"whitespaces in front of one of the key=value lines defined. "
"More information from the original exception: %s" %
(file_name, e)
)
if file_name not in self.configuration and not loose:
log.error("Unknown config file %s.conf", file_name)
return
for section in config.sections():
types = self.get_section_types(file_name, section, strict, loose)
if types is None:
continue
self.sections[section] = Dictionary()
setattr(self, section, self.sections[section])
try:
items = config.items(section)
except ConfigParser.InterpolationMissingOptionError as e:
log.error("Missing environment variable(s): %s", e)
raise ConfigError(
"Missing environment variable: %s" % e
)
for name, raw_value in items:
if name in self.env_keys:
continue
if "\n" in raw_value:
wrong_key = "???"
try:
wrong_key = raw_value.split("\n", 1)[1].split()[0]
except:
pass
raise ConfigError(
"There was an error reading in the $CWD/conf/%s.conf "
"configuration file. Namely, there are one or more "
"leading whitespaces before the definition of the "
"'%s' key/value pair in the '%s' section. Please "
"remove those leading whitespaces as Python's default "
"configuration parser is unable to handle those "
"properly." % (file_name, wrong_key, section)
)
if not raw and name in types:
# TODO Is this the area where we should be checking the
# configuration values?
# if not types[name].check(raw_value):
# print file_name, section, name, raw_value
# raise
value = types[name].parse(raw_value)
else:
if not loose:
log.error(
"Type of config parameter %s:%s:%s not found! "
"This may indicate that you've incorrectly filled "
"out the findex configuration, please double "
"check it.", file_name, section, name
)
value = raw_value
self.sections[section][name] = value
def get(self, section):
"""Get option.
@param section: section to fetch.
@raise ConfigError: if section not found.
@return: option value.
"""
if section not in self.sections:
raise ConfigError(
"Option %s is not found in configuration" % section
)
return self.sections[section]
@staticmethod
def from_confdir(dirpath, loose=False, sanitize=False):
"""Reads all the configuration from a configuration directory. If
`sanitize` is set, then black out sensitive fields."""
ret = {}
for filename in os.listdir(dirpath):
if not filename.endswith(".conf"):
continue
config_name = filename.rsplit(".", 1)[0]
cfg = Config(
config_name, cfg=os.path.join(dirpath, filename), loose=loose
)
ret[config_name] = {}
for section, values in cfg.sections.items():
ret[config_name][section] = {}
types = cfg.get_section_types(
config_name, section, loose=loose
) or {}
for key, value in values.items():
if sanitize and key in types and types[key].sanitize:
value = "*"*8
ret[config_name][section][key] = value
return ret
def parse_options(options):
"""Parse the analysis options field to a dictionary."""
ret = {}
for field in options.split(","):
if "=" not in field:
continue
key, value = field.split("=", 1)
ret[key.strip()] = value.strip()
return ret
def emit_options(options):
"""Emit the analysis options from a dictionary to a string."""
return ",".join("%s=%s" % (k, v) for k, v in sorted(options.items()))
def config(s, cfg=None, strict=False, raw=False, loose=False, check=False):
"""Fetch a configuration value, denoted as file:section:key."""
if s.count(":") != 2:
raise RuntimeError("Invalid configuration entry: %s" % s)
file_name, section, key = s.split(":")
if check:
strict = raw = loose = True
type_ = Config.configuration.get(file_name, {}).get(section, {}).get(key)
if strict and type_ is None:
raise ConfigError(
"No such configuration value exists: %s" % s
)
required = type_ is not None and type_.required
index = file_name, cfg, cwd(), strict, raw, loose
if index not in _cache:
_cache[index] = Config(
file_name, cfg=cfg, strict=strict, raw=raw, loose=loose
)
config = _cache[index]
if strict and required and section not in config.sections:
raise ConfigError(
"Configuration value %s not present! This may indicate that "
"you've incorrectly filled out the Findex configuration, "
"please double check it." % s
)
section = config.sections.get(section, {})
if strict and required and key not in section:
raise ConfigError(
"Configuration value %s not present! This may indicate that "
"you've incorrectly filled out the Findex configuration, "
"please double check it." % s
)
value = section.get(key, type_.default if type_ else None)
if check and not type_.check(value):
raise ConfigError(
"The configuration value %r found for %s is invalid. Please "
"update your configuration!" % (value, s)
)
return value
def get_section_types(file_name, section, strict=False):
if section in Config.configuration.get(file_name, {}):
return Config.configuration[file_name][section]
if "__star__" not in Config.configuration.get(file_name, {}):
return {}
if strict:
section_, key = Config.configuration[file_name]["__star__"]
if section not in config("%s:%s:%s" % (file_name, section_, key)):
return {}
if "*" in Config.configuration.get(file_name, {}):
section_types = Config.configuration[file_name]["*"]
# If multiple default values have been provided, pick one.
if isinstance(section_types, (tuple, list)):
section_types = section_types[0]
return section_types
return {}
def config2(file_name, section):
keys = get_section_types(file_name, section, strict=True)
if not keys:
raise ConfigError(
"No such configuration section exists: %s:%s" %
(file_name, section)
)
ret = Dictionary()
for key in keys:
if key == "__star__" or key == "*":
continue
ret[key] = config("%s:%s:%s" % (file_name, section, key))
return ret
def cast(s, value):
"""Cast a configuration value as per its type."""
if s.count(":") != 2:
raise RuntimeError("Invalid configuration entry: %s" % s)
file_name, section, key = s.split(":")
type_ = get_section_types(file_name, section).get(key)
if type_ is None:
raise ConfigError(
"No such configuration value exists: %s" % s
)
return type_.parse(value)
def read_kv_conf(filepath):
"""Reads a flat Findex key/value configuration file."""
ret = {}
for line in open(filepath, "r"):
line = line.strip()
if not line or line.startswith("#"):
continue
if "=" not in line:
raise ConfigError(
"Invalid flat configuration line: %s (missing '=' character)" %
line
)
key, raw_value = line.split("=", 1)
key, raw_value = key.replace(".", ":").strip(), raw_value.strip()
try:
value = cast(key, raw_value)
except (ConfigError, RuntimeError) as e:
raise ConfigError(
"Invalid flat configuration line: %s (error %s)" % (line, e)
)
if raw_value and value is None:
raise ConfigError(
"Invalid flat configuration entry: %s is None" % key
)
a, b, c = key.split(":")
ret[a] = ret.get(a, {})
ret[a][b] = ret[a].get(b, {})
ret[a][b][c] = value
return ret
def generate_crawl_config(bot_name: str,
db_host: str,
db_port: int,
db_name: str,
db_user: str,
db_pass: str,
db_max_bulk_inserts: int):
from jinja2 import Environment
from findex_gui.bin.misc import cwd
f = open(cwd("conf/crawl.conf"), "r")
template = f.read()
f.close()
rendered = Environment().from_string(template).render(
bot_name=bot_name,
db_host=db_host,
db_port=db_port,
db_db=db_name,
db_user=db_user,
db_pass=db_pass,
db_max_bulk_inserts=db_max_bulk_inserts,
amqp_username=amqp_username,
amqp_password=amqp_password,
amqp_vhost=amqp_vhost,
amqp_host=amqp_host,
amqp_queue_name=amqp_queue_name,
amqp_queue_size=amqp_queue_size
)
return rendered
| |
"""
periodically pull the Kenya medical practitioners' database
Cron entry:
@weekly source /alephdata/srv/env_scrapengine/bin/activate && cd /alephdata/srv/Scrapengine && make scrape scraper=starhealth-register-doctors && curl -fsS --retry 3 https://hchk.io/<ID> > /dev/null
@weekly source /alephdata/srv/env_scrapengine/bin/activate && cd /alephdata/srv/Scrapengine && make scrape scraper=starhealth-register-foreign_doctors && curl -fsS --retry 3 https://hchk.io/<ID> > /dev/null
@weekly source /alephdata/srv/env_scrapengine/bin/activate && cd /alephdata/srv/Scrapengine && make scrape scraper=starhealth-register-clinical_officers && curl -fsS --retry 3 https://hchk.io/<ID> > /dev/null
"""
import uuid, csv, boto3
import os, dataset, requests
from datetime import datetime
from urllib import quote
from Scrapengine.configs import DATABASE, ARCHIVE, SCRAPERS, CLOUDSEARCH_DOCS, CLOUDSEARCH_COS
from Scrapengine import index_template
from BeautifulSoup import BeautifulSoup
API_KEY = os.getenv("IMPORTIO_API_KEY", "xx-yy-zz")
API = "https://api.import.io/store/connector/_magic?url={url}&format=JSON&js=false&_apikey={apikey}"
SOURCE = dict(
doctors=SCRAPERS["medicalboard"]["doctors"],
foreign_doctors=SCRAPERS["medicalboard"]["foreign_doctors"],
clinical_officers=SCRAPERS["medicalboard"]["clinical_officers"]
)
TIMEOUT = 15 # Request timeout in seconds
PERSIST = False
OUTPUT_FILE_PREFIX = "starhealth_register"
def get_total_page_numbers(url, default_pages):
try:
r = requests.get(url % ('1')) # page one
soup = BeautifulSoup(r.text)
row = soup.find("div", {"id": "tnt_pagination"}).getText()
start_text = "Viewing 1 of "
i = row.index(start_text)
start = i + len(start_text)
end = row.index("pages.")
return int(row[start:end].strip())
except Exception, err:
print "ERROR: get_total_page_numbers() - url: %s - err: %s" % (url, err)
return default_pages
# Get this from the site
PAGES = dict(
doctors=get_total_page_numbers(SCRAPERS["medicalboard"]["doctors"], 409),
foreign_doctors=get_total_page_numbers(SCRAPERS["medicalboard"]["foreign_doctors"], 51),
clinical_officers=get_total_page_numbers(SCRAPERS["medicalboard"]["clinical_officers"], 377)
)
class MedicalBoardScraper(object):
def __init__(self, run_id, source):
self.api = API
self.apikey = API_KEY
self._id = run_id
self.source = source
self.source_url = SOURCE[source]
self.cloudsearch_docs = boto3.client("cloudsearchdomain", **CLOUDSEARCH_DOCS)
self.cloudsearch_cos = boto3.client("cloudsearchdomain", **CLOUDSEARCH_COS)
self.fields = dict(
doctors=dict(
name="name_value",
registration_number="regno_value",
qualification="qualifications_value",
address="address_value",
registration_date="regdate_date/_source",
specialty="specialty_value",
sub_specialty="sub_value"
),
foreign_doctors=dict(
name="name_value",
registration_number="licence_number/_source",
qualification="qualifications_value",
address="address_value",
facility="facility_value",
practice_type="practicetype_value",
),
clinical_officers=dict(
name="name_value",
registration_number="regnolicence_value",
qualification="qualifications_label",
address="address_value",
registration_date="regdate_value",
)
)
#self.db = dataset.connect("mysql://{username}:{password}@{host}".format(**DATABASE))
def persist(self, json_data):
'''
save to db
'''
dbtable = self.db[DATABASE['table']]
dbresp = dbtable.insert(json_data)
print "db said %s for %s" % (str(dbresp), json_data)
def scrape_page(self, page):
try:
args = dict(
url=quote(self.source_url % page),
apikey=self.apikey
)
print "Getting page: %s" % args["url"]
start = datetime.now()
response = requests.get(self.api.format(**args), timeout=TIMEOUT)
print "timer - http - %s seconds to GET %s" % ((datetime.now() - start).seconds, args["url"])
response.raise_for_status()
resp = response.json()
results = resp['tables'][0]['results']
skip_count = 0 # keep track of how many entries have been skipped
all_entries = []
for result in results:
try:
doctor_payload = {}
for attr in self.fields[self.source]:
doctor_payload[attr] = result.get(self.fields[self.source][attr], "None").decode("string_escape").replace('\\','')
doctor_payload["type"] = self.source
start = datetime.now()
if PERSIST:
# for DB
self.persist(result)
print "timer - db - %s seconds" % (datetime.now() - start).seconds
all_entries.append(doctor_payload)
except Exception, err:
skip_count += 1
print "ERROR: (page %s): Skipped %s -- REASON: %s" % (page, result, str(err))
return all_entries, skip_count
except Exception, err:
print "ERROR: Failed to scrape data from page %s -- %s" % (page, err)
def write_to_json(self, results=[]):
"""
This function saves the data in a template ready for bulk addition or bulk deletion in json files.
:param results:
:return: a tuple of the file names
"""
outputfile = "%s/%s-%s-%s-add.json" % (ARCHIVE, OUTPUT_FILE_PREFIX, self._id, self.source) #Serves as a record of items last uploaded
deletefile = "%s/%s-%s-%s-delete.json" % (ARCHIVE, OUTPUT_FILE_PREFIX, self._id, self.source)
with open(outputfile, 'a') as f, open(deletefile, 'a') as d:
try:
for i, item in enumerate(results):
item["id"] = item["registration_number"].strip().replace(" ", "")
item["type"] = self.source
deletion_index = item.get("id", "").encode('utf-8') + '\n'
d.write(deletion_index) #Save a list of
f.write(str(item))
except Exception, err:
print "ERROR - writing to json() - %s- %s - %s" % (outputfile, deletefile, err)
return outputfile, deletefile
def index_for_search(self, payload):
try:
payload_index = ''
for i, item in enumerate(payload):
item["id"] = item["registration_number"].strip().replace(" ", "")
item["type"] = self.source
item = self.custom_corrections(item)
payload_index += index_template.template % (
item.get("id", ""),
item.get("address", "").replace("\"","'"),
item.get("facility", ""),
item.get("name", ""),
item.get("practice_type", ""),
item.get("qualification", "").replace("\\", ""),
item.get("registration_date", ""),
item.get("registration_number", ""),
item.get("specialty", ""),
item.get("sub_specialty", ""),
item.get("type", "")
)
if i < (len(payload) - 1): payload_index += ', '
payload_index = '[%s]' % payload_index
if self.source == 'clinical_officers':
resp = self.cloudsearch_cos.upload_documents(
documents=payload_index, contentType="application/json"
)
else:
resp = self.cloudsearch_docs.upload_documents(
documents=payload_index, contentType="application/json"
)
print "DEBUG - index_for_search() - %s - %s" % (len(payload), resp.get("status"))
except Exception, err:
print "ERROR - index_for_search() - %s - %s" % (len(payload), err)
def delete_records(self, file):
with open(file, 'r') as f:
rows = f.readlines()
batches = list(self.chunkify(rows, 100))
for batch in batches:
no_of_items = len(batch)
payload_index = ''
for i, row in enumerate(batch):
row = row.replace('/n','').strip()
payload_index += index_template.delete_template % ( row )
if i < (len(batch) - 1): payload_index += ', '
payload_index = '[%s]' % payload_index
if self.source == 'clinical_officers':
resp = self.cloudsearch_cos.upload_documents(
documents=payload_index, contentType="application/json"
)
else:
resp = self.cloudsearch_docs.upload_documents(
documents=payload_index, contentType="application/json"
)
print "DEBUG - delete_records() - %s" % (resp.get("status"))
os.remove(file)
os.remove(file.replace('delete.json', 'add.json')) #To avoid loss of space
return no_of_items
def chunkify(self, l, n):
n = max(1, n)
return (l[i:i + n] for i in xrange(0, len(l), n))
def custom_corrections(self, item):
#Some problems just won't go away
if "MD(LINKOPING" in item.get("qualification"):
item["qualification"] = "MD(LINKOPING)02011"
return item
def _encode(_unicode):
return _unicode.encode('utf-8')
def main(source):
"""
Execute scraper
"""
run_id = str(uuid.uuid4())
medboardscraper = MedicalBoardScraper(run_id, source)
#Flush the repository off of old data if the delete.json file exists
for file in os.listdir(ARCHIVE):
if file.endswith(source + "-delete.json"):
no_of_items_flushed = medboardscraper.delete_records(ARCHIVE + '/' + file)
print "[%s]: FLUSHED CLOUDSEARCH : %s" % (datetime.now(), no_of_items_flushed)
break
doc_results = []
print "[%s]: START RUN ID: %s" % (datetime.now(), run_id)
for page in range(0, PAGES[source]+1):
print "scraping page %s" % str(page)
try:
results = medboardscraper.scrape_page(str(page))
except Exception, err:
print "ERROR: main() - source: %s - page: %s - %s" % (source, page, err)
continue
print "Scraped %s entries from page %s | Skipped %s entries" % (len(results[0]), page, results[1])
doc_results.extend(results[0])
medboardscraper.index_for_search(doc_results)
files = medboardscraper.write_to_json(doc_results)
print "Written page %s to %s" % (page, files)
print "[%s]: STOP RUN ID: %s" % (datetime.now(), run_id)
if __name__ == "__main__":
pass
| |
# urllib3/connectionpool.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
import socket
import errno
from socket import error as SocketError, timeout as SocketTimeout
from .util import resolve_cert_reqs, resolve_ssl_version, assert_fingerprint
try: # Python 3
from http.client import HTTPConnection, HTTPException
from http.client import HTTP_PORT, HTTPS_PORT
except ImportError:
from httplib import HTTPConnection, HTTPException
from httplib import HTTP_PORT, HTTPS_PORT
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import LifoQueue, Empty, Full
try: # Compiled with SSL?
HTTPSConnection = object
BaseSSLError = None
ssl = None
try: # Python 3
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
pass
from .request import RequestMethods
from .response import HTTPResponse
from .util import get_host, is_connection_dropped, ssl_wrap_socket
from .exceptions import (
ClosedPoolError,
EmptyPoolError,
HostChangedError,
MaxRetryError,
SSLError,
TimeoutError,
)
from .packages.ssl_match_hostname import match_hostname, CertificateError
from .packages import six
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
port_by_scheme = {
'http': HTTP_PORT,
'https': HTTPS_PORT,
}
## Connection objects (extension of httplib)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ssl_version = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None):
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def connect(self):
# Add certificate verification
sock = socket.create_connection((self.host, self.port), self.timeout)
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=self.host,
ssl_version=resolved_ssl_version)
if resolved_cert_reqs != ssl.CERT_NONE:
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
else:
match_hostname(self.sock.getpeercert(),
self.assert_hostname or self.host)
## Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
self.host = host
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
:param timeout:
Socket timeout for each individual connection, can be a float. None
disables timeout.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to false, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
scheme = 'http'
def __init__(self, host, port=None, strict=False, timeout=None, maxsize=1,
block=False, headers=None):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
self.timeout = timeout
self.pool = self.QueueCls(maxsize)
self.block = block
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host))
return HTTPConnection(host=self.host,
port=self.port,
strict=self.strict)
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s" % self.host)
conn.close()
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except Full:
# This should never happen if self.block == True
log.warning("HttpConnectionPool is full, discarding connection: %s"
% self.host)
# Connection never got put back into the pool, close it.
conn.close()
def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw):
"""
Perform a request on a given httplib connection object taken from our
pool.
"""
self.num_requests += 1
if timeout is _Default:
timeout = self.timeout
conn.timeout = timeout # This only does anything in Py26+
conn.request(method, url, **httplib_request_kw)
# Set timeout
sock = getattr(conn, 'sock', False) # AppEngine doesn't have sock attr.
if sock:
sock.settimeout(timeout)
try: # Python 2.7+, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse()
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("\"%s %s %s\" %s %s" % (method, url, http_version,
httplib_response.status,
httplib_response.length))
return httplib_response
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
if self.port and not port:
# Use explicit default port for comparison when none is given.
port = port_by_scheme.get(scheme)
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Number of retries to allow before raising a MaxRetryError exception.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307). Each redirect counts as a retry.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one request.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param \**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if retries < 0:
raise MaxRetryError(self, url)
if timeout is _Default:
timeout = self.timeout
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
host = "%s://%s" % (self.scheme, self.host)
if self.port:
host = "%s:%d" % (host, self.port)
raise HostChangedError(self, url, retries - 1)
conn = None
try:
# Request a connection from the queue
conn = self._get_conn(timeout=pool_timeout)
# Make the request on the httplib connection object
httplib_response = self._make_request(conn, method, url,
timeout=timeout,
body=body, headers=headers)
# If we're going to release the connection in ``finally:``, then
# the request doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = not release_conn and conn
# Import httplib's response into our own wrapper object
response = HTTPResponse.from_httplib(httplib_response,
pool=self,
connection=response_conn,
**response_kw)
# else:
# The connection will be put back into the pool when
# ``response.release_conn()`` is called (implicitly by
# ``response.read()``)
except Empty as e:
# Timed out by queue
raise TimeoutError(self, "Request timed out. (pool_timeout=%s)" %
pool_timeout)
except SocketTimeout as e:
# Timed out by socket
raise TimeoutError(self, "Request timed out. (timeout=%s)" %
timeout)
except BaseSSLError as e:
# SSL certificate error
raise SSLError(e)
except CertificateError as e:
# Name mismatch
raise SSLError(e)
except (HTTPException, SocketError) as e:
# Connection broken, discard. It will be replaced next _get_conn().
conn = None
# This is necessary so we can access e below
err = e
if retries == 0:
raise MaxRetryError(self, url, e)
finally:
if release_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warn("Retrying (%d attempts remain) after connection "
"broken by '%r': %s" % (retries, err, url))
return self.urlopen(method, url, body, headers, retries - 1,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, body, headers,
retries - 1, redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`httplib.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs`` and
``ssl_version`` are only used if :mod:`ssl` is available and are fed into
:meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket
into an SSL socket.
"""
scheme = 'https'
def __init__(self, host, port=None,
strict=False, timeout=None, maxsize=1,
block=False, headers=None,
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None):
HTTPConnectionPool.__init__(self, host, port,
strict, timeout, maxsize,
block, headers)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not ssl: # Platform-specific: Python compiled without +ssl
if not HTTPSConnection or HTTPSConnection is object:
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
return HTTPSConnection(host=self.host,
port=self.port,
strict=self.strict)
connection = VerifiedHTTPSConnection(host=self.host,
port=self.port,
strict=self.strict)
connection.set_cert(key_file=self.key_file, cert_file=self.cert_file,
cert_reqs=self.cert_reqs, ca_certs=self.ca_certs,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
connection.ssl_version = self.ssl_version
return connection
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example: ::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
| |
# -*- coding: utf-8 -*-
from nose.tools import assert_equals, assert_raises_regexp, \
assert_raises, assert_true, assert_is_instance
from diylisp.ast import is_list
from diylisp.evaluator import evaluate
from diylisp.parser import parse
from diylisp.types import Closure, LispError, Environment
"""
This part is all about defining and using functions.
We'll start by implementing the `lambda` form which is used to create function closures.
"""
def test_lambda_evaluates_to_closure():
"""The lambda form should evaluate to a Closure"""
ast = ["lambda", [], 42]
closure = evaluate(ast, Environment())
assert_is_instance(closure, Closure)
def test_lambda_closure_keeps_defining_env():
"""The closure should keep a copy of the environment where it was defined.
Once we start calling functions later, we'll need access to the environment
from when the function was created in order to resolve all free variables."""
env = Environment({"foo": 1, "bar": 2})
ast = ["lambda", [], 42]
closure = evaluate(ast, env)
assert_equals(closure.env, env)
def test_lambda_closure_holds_function():
"""The closure contains the parameter list and function body too."""
closure = evaluate(parse("(lambda (x y) (+ x y))"), Environment())
assert_equals(["x", "y"], closure.params)
assert_equals(["+", "x", "y"], closure.body)
def test_lambda_arguments_are_lists():
"""The parameters of a `lambda` should be a list."""
closure = evaluate(parse("(lambda (x y) (+ x y))"), Environment())
assert_true(is_list(closure.params))
with assert_raises(LispError):
evaluate(parse("(lambda not-a-list (body of fn))"), Environment())
def test_lambda_number_of_arguments():
"""The `lambda` form should expect exactly two arguments."""
with assert_raises_regexp(LispError, "number of arguments"):
evaluate(parse("(lambda (foo) (bar) (baz))"), Environment())
def test_defining_lambda_with_error_in_body():
"""The function body should not be evaluated when the lambda is defined.
The call to `lambda` should return a function closure holding, among other things
the function body. The body should not be evaluated before the function is called."""
ast = parse("""
(lambda (x y)
(function body ((that) would never) work))
""")
assert_is_instance(evaluate(ast, Environment()), Closure)
"""
Now that we have the `lambda` form implemented, let's see if we can call some functions.
When evaluating ASTs which are lists, if the first element isn't one of the special forms
we have been working with so far, it is a function call. The first element of the list is
the function, and the rest of the elements are arguments.
"""
def test_evaluating_call_to_closure():
"""The first case we'll handle is when the AST is a list with an actual closure
as the first element.
In this first test, we'll start with a closure with no arguments and no free
variables. All we need to do is to evaluate and return the function body."""
closure = evaluate(parse("(lambda () (+ 1 2))"), Environment())
ast = [closure]
result = evaluate(ast, Environment())
assert_equals(3, result)
def test_evaluating_call_to_closure_with_arguments():
"""The function body must be evaluated in an environment where the parameters are bound.
Create an environment where the function parameters (which are stored in the closure)
are bound to the actual argument values in the function call. Use this environment
when evaluating the function body."""
env = Environment()
closure = evaluate(parse("(lambda (a b) (+ a b))"), env)
ast = [closure, 4, 5]
assert_equals(9, evaluate(ast, env))
def test_creating_closure_with_environment():
"""The function parameters must properly shadow the outer scope's bindings.
When the same bindings exist in the environment and function parameters,
the function parameters must properly overwrite the environment bindings."""
env = Environment({ "a": 42, "b": "foo" })
closure = evaluate(parse("(lambda (a b) (+ a b))"), env)
ast = [closure, 4, 5]
assert_equals(9, evaluate(ast, env))
def test_call_to_function_should_evaluate_arguments():
"""Call to function should evaluate all arguments.
When a function is applied, the arguments should be evaluated before being bound
to the parameter names."""
env = Environment()
closure = evaluate(parse("(lambda (a) (+ a 5))"), env)
ast = [closure, parse("(if #f 0 (+ 10 10))")]
assert_equals(25, evaluate(ast, env))
def test_evaluating_call_to_closure_with_free_variables():
"""The body should be evaluated in the environment from the closure.
The function's free variables, i.e. those not specified as part of the parameter list,
should be looked up in the environment from where the function was defined. This is
the environment included in the closure. Make sure this environment is used when
evaluating the body."""
closure = evaluate(parse("(lambda (x) (+ x y))"), Environment({"y": 1}))
ast = [closure, 0]
result = evaluate(ast, Environment({"y": 2}))
assert_equals(1, result)
"""
Okay, now we're able to evaluate ASTs with closures as the first element. But normally
the closures don't just happen to be there all by themselves. Generally we'll find some
expression, evaluate it to a closure, and then evaluate a new AST with the closure just
like we did above.
(some-exp arg1 arg2 ...) -> (closure arg1 arg2 ...) -> result-of-function-call
"""
def test_calling_very_simple_function_in_environment():
"""A call to a symbol corresponds to a call to its value in the environment.
When a symbol is the first element of the AST list, it is resolved to its value in
the environment (which should be a function closure). An AST with the variables
replaced with its value should then be evaluated instead."""
env = Environment()
evaluate(parse("(define add (lambda (x y) (+ x y)))"), env)
assert_is_instance(env.lookup("add"), Closure)
result = evaluate(parse("(add 1 2)"), env)
assert_equals(3, result)
def test_calling_lambda_directly():
"""It should be possible to define and call functions directly.
A lambda definition in the call position of an AST should be evaluated, and then
evaluated as before."""
ast = parse("((lambda (x) x) 42)")
result = evaluate(ast, Environment())
assert_equals(42, result)
def test_calling_complex_expression_which_evaluates_to_function():
"""Actually, all ASTs that are not atoms should be evaluated and then called.
In this test, a call is done to the if-expression. The `if` should be evaluated,
which will result in a `lambda` expression. The lambda is evaluated, giving a
closure. The result is an AST with a `closure` as the first element, which we
already know how to evaluate."""
ast = parse("""
((if #f
wont-evaluate-this-branch
(lambda (x) (+ x y)))
2)
""")
env = Environment({'y': 3})
assert_equals(5, evaluate(ast, env))
"""
Now that we have the happy cases working, let's see what should happen when
function calls are done incorrectly.
"""
def test_calling_atom_raises_exception():
"""A function call to a non-function should result in an error."""
with assert_raises_regexp(LispError, "not a function"):
evaluate(parse("(#t 'foo 'bar)"), Environment())
with assert_raises_regexp(LispError, "not a function"):
evaluate(parse("(42)"), Environment())
def test_make_sure_arguments_to_functions_are_evaluated():
"""The arguments passed to functions should be evaluated
We should accept parameters that are produced through function
calls. If you are seeing stack overflows, e.g.
RuntimeError: maximum recursion depth exceeded while calling a Python object
then you should double-check that you are properly evaluating the passed
function arguments."""
env = Environment()
res = evaluate(parse("((lambda (x) x) (+ 1 2))"), env)
assert_equals(res, 3)
def test_calling_with_wrong_number_of_arguments():
"""Functions should raise exceptions when called with wrong number of arguments."""
env = Environment()
evaluate(parse("(define fn (lambda (p1 p2) 'whatever))"), env)
error_msg = "wrong number of arguments, expected 2 got 3"
with assert_raises_regexp(LispError, error_msg):
evaluate(parse("(fn 1 2 3)"), env)
def test_calling_nothing():
"""Calling nothing should fail (remember to quote empty data lists)"""
with assert_raises(LispError):
evaluate(parse("()"), Environment())
"""
One final test to see that recursive functions are working as expected.
The good news: this should already be working by now :)
"""
def test_calling_function_recursively():
"""Tests that a named function is included in the environment
where it is evaluated."""
env = Environment()
evaluate(parse("""
(define my-fn
;; A meaningless, but recursive, function
(lambda (x)
(if (eq x 0)
42
(my-fn (- x 1)))))
"""), env)
assert_equals(42, evaluate(parse("(my-fn 0)"), env))
assert_equals(42, evaluate(parse("(my-fn 10)"), env))
| |
from functools import update_wrapper
from django.http import Http404, HttpResponseRedirect
from django.contrib.admin import ModelAdmin, actions
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.views.decorators.csrf import csrf_protect
from django.db.models.base import ModelBase
from django.apps import apps
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.core.urlresolvers import reverse, NoReverseMatch
from django.template.engine import Engine
from django.template.response import TemplateResponse
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy, ugettext as _
from django.views.decorators.cache import never_cache
from django.conf import settings
system_check_errors = []
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite(object):
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
# Text to put at the end of each page's <title>.
site_title = ugettext_lazy('Django site admin')
# Text to put in each page's <h1>.
site_header = ugettext_lazy('Django administration')
# Text to put at the top of the admin index page.
index_title = ugettext_lazy('Site administration')
# URL for the "View site" link at the top of each admin page.
site_url = '/'
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.name = name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
def register(self, model_or_iterable, admin_class=None, **options):
"""
Registers the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, it will use ModelAdmin (the default
admin options). If keyword arguments are given -- e.g., list_display --
they'll be applied as options to the admin class.
If a model is already registered, this will raise AlreadyRegistered.
If a model is abstract, this will raise ImproperlyConfigured.
"""
if not admin_class:
admin_class = ModelAdmin
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured('The model %s is abstract, so it '
'cannot be registered with admin.' % model.__name__)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# Ignore the registration if the model has been
# swapped out.
if not model._meta.swapped:
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
if admin_class is not ModelAdmin and settings.DEBUG:
system_check_errors.extend(admin_class.check(model))
# Instantiate the admin class to save in the registry
self._registry[model] = admin_class(model, self)
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def is_registered(self, model):
"""
Check if a model class is registered with this `AdminSite`.
"""
return model in self._registry
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitly get a registered global action whether it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return six.iteritems(self._actions)
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def check_dependencies(self):
"""
Check that all things needed to run the admin have been correctly installed.
The default implementation checks that admin and contenttypes apps are
installed, as well as the auth context processor.
"""
if not apps.is_installed('django.contrib.admin'):
raise ImproperlyConfigured(
"Put 'django.contrib.admin' in your INSTALLED_APPS "
"setting in order to use the admin application.")
if not apps.is_installed('django.contrib.contenttypes'):
raise ImproperlyConfigured(
"Put 'django.contrib.contenttypes' in your INSTALLED_APPS "
"setting in order to use the admin application.")
try:
default_template_engine = Engine.get_default()
except ImproperlyConfigured:
# Skip the check if the user has a non-trivial TEMPLATES setting
pass
else:
if ('django.contrib.auth.context_processors.auth'
not in default_template_engine.context_processors):
raise ImproperlyConfigured(
"Enable 'django.contrib.auth.context_processors.auth' "
"in your TEMPLATES setting in order to use the admin "
"application.")
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls import url
urls = super(MyAdminSite, self).get_urls()
urls += [
url(r'^my_view/$', self.admin_view(some_view))
]
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request):
if request.path == reverse('admin:logout', current_app=self.name):
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
# Inner import to prevent django.contrib.admin (app) from
# importing django.contrib.auth.models.User (unrelated model).
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
request.get_full_path(),
reverse('admin:login', current_app=self.name)
)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
from django.conf.urls import url, include
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.contenttypes.views imports ContentType.
from django.contrib.contenttypes import views as contenttype_views
if settings.DEBUG:
self.check_dependencies()
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
wrapper.admin_site = self
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = [
url(r'^$', wrap(self.index), name='index'),
url(r'^login/$', self.login, name='login'),
url(r'^logout/$', wrap(self.logout), name='logout'),
url(r'^password_change/$', wrap(self.password_change, cacheable=True), name='password_change'),
url(r'^password_change/done/$', wrap(self.password_change_done, cacheable=True),
name='password_change_done'),
url(r'^jsi18n/$', wrap(self.i18n_javascript, cacheable=True), name='jsi18n'),
url(r'^r/(?P<content_type_id>\d+)/(?P<object_id>.+)/$', wrap(contenttype_views.shortcut),
name='view_on_site'),
]
# Add in each model's views, and create a list of valid URLS for the
# app_index
valid_app_labels = []
for model, model_admin in six.iteritems(self._registry):
urlpatterns += [
url(r'^%s/%s/' % (model._meta.app_label, model._meta.model_name), include(model_admin.urls)),
]
if model._meta.app_label not in valid_app_labels:
valid_app_labels.append(model._meta.app_label)
# If there were ModelAdmins registered, we should have a list of app
# labels for which we need to allow access to the app_index view,
if valid_app_labels:
regex = r'^(?P<app_label>' + '|'.join(valid_app_labels) + ')/$'
urlpatterns += [
url(regex, wrap(self.app_index), name='app_list'),
]
return urlpatterns
@property
def urls(self):
return self.get_urls(), 'admin', self.name
def each_context(self, request):
"""
Returns a dictionary of variables to put in the template context for
*every* page in the admin site.
"""
return {
'site_title': self.site_title,
'site_header': self.site_header,
'site_url': self.site_url,
'has_permission': self.has_permission(request),
}
def password_change(self, request, extra_context=None):
"""
Handles the "change password" task -- both form display and validation.
"""
from django.contrib.admin.forms import AdminPasswordChangeForm
from django.contrib.auth.views import password_change
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'current_app': self.name,
'password_change_form': AdminPasswordChangeForm,
'post_change_redirect': url,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
return password_change(request, **defaults)
def password_change_done(self, request, extra_context=None):
"""
Displays the "success" page after a password change.
"""
from django.contrib.auth.views import password_change_done
defaults = {
'current_app': self.name,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
return password_change_done(request, **defaults)
def i18n_javascript(self, request):
"""
Displays the i18n JavaScript that the Django admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster.
"""
if settings.USE_I18N:
from django.views.i18n import javascript_catalog
else:
from django.views.i18n import null_javascript_catalog as javascript_catalog
return javascript_catalog(request, packages=['django.conf', 'django.contrib.admin'])
@never_cache
def logout(self, request, extra_context=None):
"""
Logs out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import logout
defaults = {
'current_app': self.name,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
return logout(request, **defaults)
@never_cache
def login(self, request, extra_context=None):
"""
Displays the login form for the given HttpRequest.
"""
if request.method == 'GET' and self.has_permission(request):
# Already logged-in, redirect to admin index
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
from django.contrib.auth.views import login
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.admin.forms eventually imports User.
from django.contrib.admin.forms import AdminAuthenticationForm
context = dict(self.each_context(request),
title=_('Log in'),
app_path=request.get_full_path(),
)
if (REDIRECT_FIELD_NAME not in request.GET and
REDIRECT_FIELD_NAME not in request.POST):
context[REDIRECT_FIELD_NAME] = request.get_full_path()
context.update(extra_context or {})
defaults = {
'extra_context': context,
'current_app': self.name,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
return login(request, **defaults)
@never_cache
def index(self, request, extra_context=None):
"""
Displays the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_dict = {}
for model, model_admin in self._registry.items():
app_label = model._meta.app_label
has_module_perms = model_admin.has_module_permission(request)
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change', False):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add', False):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': apps.get_app_config(app_label).verbose_name,
'app_label': app_label,
'app_url': reverse(
'admin:app_list',
kwargs={'app_label': app_label},
current_app=self.name,
),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
# Sort the apps alphabetically.
app_list = list(six.itervalues(app_dict))
app_list.sort(key=lambda x: x['name'].lower())
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
context = dict(
self.each_context(request),
title=self.index_title,
app_list=app_list,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.index_template or
'admin/index.html', context)
def app_index(self, request, app_label, extra_context=None):
app_name = apps.get_app_config(app_label).verbose_name
app_dict = {}
for model, model_admin in self._registry.items():
if app_label == model._meta.app_label:
has_module_perms = model_admin.has_module_permission(request)
if not has_module_perms:
raise PermissionDenied
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change'):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add'):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_dict:
app_dict['models'].append(model_dict),
else:
# First time around, now that we know there's
# something to display, add in the necessary meta
# information.
app_dict = {
'name': app_name,
'app_label': app_label,
'app_url': '',
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if not app_dict:
raise Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
context = dict(self.each_context(request),
title=_('%(app)s administration') % {'app': app_name},
app_list=[app_dict],
app_label=app_label,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.app_index_template or [
'admin/%s/app_index.html' % app_label,
'admin/app_index.html'
], context)
# This global object represents the default admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom admin site.
site = AdminSite()
| |
from dns.resolver import Resolver
import socket
import ipaddress
import os
DEBUG = os.environ.get("EEHDEBUG_SPF")
__version__ = "0.0.1"
PASS = "pass"
FAIL = "fail"
SOFTFAIL = "softfail"
NEUTRAL = "neutral"
PERMERROR = "permerror"
TEMPERROR = "temperror"
NONE = "none"
SMTP_PERM_FAIL = 550
statusmap = {
PASS: (250, "sender SPF authorized"),
FAIL: (SMTP_PERM_FAIL, "SPF fail - not authorized"),
NEUTRAL: (250, "access neither permitted nor denied"),
SOFTFAIL: (250, "domain owner discourages use of this host"),
PERMERROR: (450, "permanent error"),
TEMPERROR: (450, "temporary error"),
NONE: (250, "equivocal SPF header"),
}
def _status(code):
return (code, ) + statusmap[code]
def handle_all(arg, domain):
return lambda c: True
def handle_ip(arg, domain):
def validate(c):
try:
c = socket.gethostbyname(c)
except:
return False
if "/" in arg:
return ipaddress.ip_address(c) in ipaddress.ip_network(
arg, False)
else:
return c == arg
return validate
def handle_a(arg, domain):
if "/" in arg:
arg, length = arg.split("/")
network = True
else:
network = False
if not arg:
arg = domain
ip = socket.gethostbyname(arg)
if network:
return handle_ip("/".join([ip, length]), domain)
else:
return handle_ip(ip, domain)
def handle_mx(arg, domain):
if "/" in arg:
arg, length = arg.split("/")
network = True
else:
network = False
if not arg:
arg = domain
a = Resolver().query(arg, "MX")
ips = map(socket.gethostbyname,
map(lambda c: c.exchange.to_text(True), a))
if network:
def validate(c):
c = ipaddress.ip_address(socket.gethostbyname(c))
o = False
for i in ips:
o |= c in ipaddress.ip_network(i+"/"+length, False)
return o
return validate
else:
return lambda c: socket.gethostbyname(c) in ips
def handle_ptr(arg, domain):
if not arg:
arg = domain
def validate(c):
try:
name, aliases, ip = socket.gethostbyaddr(c)
except OSError:
return False
hostnames = [name] + aliases
for hostname in hostnames:
try:
res = socket.gethostbyname(hostname)
except:
continue
else:
if hostname.endswith(arg) and n == ip:
return True
else:
return False
return validate
def handle_include(arg, domain):
return lambda c: spf(arg, c)[1] != SMTP_PERM_FAIL
def handle_exp(arg, domain):
return lambda c: False
def handle_exists(arg, domain):
def validate(c):
try:
socket.gethostbyname(c)
except:
return False
else:
return True
return validate
MECHANISMS = {
"all": handle_all,
"ip4": handle_ip,
"ip6": handle_ip,
"a": handle_a,
"mx": handle_mx,
"ptr": handle_ptr,
"include": handle_include,
"exists": handle_exists,
"exp": handle_exp,
}
def spf(domain, greeting):
r = Resolver()
answers = r.query(domain, "TXT")
for answer in answers:
if DEBUG:
print(answer.strings[0])
if answer.strings[0].startswith(b"v=spf"):
policy = answer.strings[0]
break
else:
return _status(NEUTRAL)
spfp = policy.decode().lower().split(" ")[1:]
for action in spfp:
if action.startswith("+"):
action = action[1:]
verb = PASS
elif action.startswith("-"):
action = action[1:]
verb = FAIL
elif action.startswith("~"):
action = action[1:]
verb = SOFTFAIL
elif action.startswith("?"):
action = action[1:]
verb = NEUTRAL
else:
verb = PASS
if DEBUG:
print(action)
if ":" in action:
action, _, param = action.partition(":")
elif "=" in action:
action, _, param = action.partition("=")
else:
param = ""
if DEBUG:
print(param)
if action == "redirect":
return spf(param, greeting)
elif action not in MECHANISMS:
_status(PERMERROR)
else:
if DEBUG:
print(verb, action, param, MECHANISMS[action](param, domain)(greeting))
if MECHANISMS[action](param, domain)(greeting):
return _status(verb)
else:
return _status(NONE)
| |
#! /usr/bin/env python
#
# Provide some simple capabilities from number theory.
#
# Version of 2008.11.14.
#
# Written in 2005 and 2006 by Peter Pearson and placed in the public domain.
# Revision history:
# 2008.11.14: Use pow( base, exponent, modulus ) for modular_exp.
# Make gcd and lcm accept arbitrarly many arguments.
import math
import types
class Error( Exception ):
"""Base class for exceptions in this module."""
pass
class SquareRootError( Error ):
pass
class NegativeExponentError( Error ):
pass
def modular_exp( base, exponent, modulus ):
"Raise base to exponent, reducing by modulus"
if exponent < 0:
raise NegativeExponentError( "Negative exponents (%d) not allowed" \
% exponent )
return pow( base, exponent, modulus )
# result = 1L
# x = exponent
# b = base + 0L
# while x > 0:
# if x % 2 > 0: result = (result * b) % modulus
# x = x // 2
# b = ( b * b ) % modulus
# return result
def polynomial_reduce_mod( poly, polymod, p ):
"""Reduce poly by polymod, integer arithmetic modulo p.
Polynomials are represented as lists of coefficients
of increasing powers of x."""
# This module has been tested only by extensive use
# in calculating modular square roots.
# Just to make this easy, require a monic polynomial:
assert polymod[-1] == 1
assert len( polymod ) > 1
while len( poly ) >= len( polymod ):
if poly[-1] != 0:
for i in range( 2, len( polymod ) + 1 ):
poly[-i] = ( poly[-i] - poly[-1] * polymod[-i] ) % p
poly = poly[0:-1]
return poly
def polynomial_multiply_mod( m1, m2, polymod, p ):
"""Polynomial multiplication modulo a polynomial over ints mod p.
Polynomials are represented as lists of coefficients
of increasing powers of x."""
# This is just a seat-of-the-pants implementation.
# This module has been tested only by extensive use
# in calculating modular square roots.
# Initialize the product to zero:
prod = ( len( m1 ) + len( m2 ) - 1 ) * [0]
# Add together all the cross-terms:
for i in range( len( m1 ) ):
for j in range( len( m2 ) ):
prod[i+j] = ( prod[i+j] + m1[i] * m2[j] ) % p
return polynomial_reduce_mod( prod, polymod, p )
def polynomial_exp_mod( base, exponent, polymod, p ):
"""Polynomial exponentiation modulo a polynomial over ints mod p.
Polynomials are represented as lists of coefficients
of increasing powers of x."""
# Based on the Handbook of Applied Cryptography, algorithm 2.227.
# This module has been tested only by extensive use
# in calculating modular square roots.
assert exponent < p
if exponent == 0: return [ 1 ]
G = base
k = exponent
if k%2 == 1: s = G
else: s = [ 1 ]
while k > 1:
k = k // 2
G = polynomial_multiply_mod( G, G, polymod, p )
if k%2 == 1: s = polynomial_multiply_mod( G, s, polymod, p )
return s
def jacobi( a, n ):
"""Jacobi symbol"""
# Based on the Handbook of Applied Cryptography (HAC), algorithm 2.149.
# This function has been tested by comparison with a small
# table printed in HAC, and by extensive use in calculating
# modular square roots.
assert n >= 3
assert n%2 == 1
a = a % n
if a == 0: return 0
if a == 1: return 1
a1, e = a, 0
while a1%2 == 0:
a1, e = a1//2, e+1
if e%2 == 0 or n%8 == 1 or n%8 == 7: s = 1
else: s = -1
if a1 == 1: return s
if n%4 == 3 and a1%4 == 3: s = -s
return s * jacobi( n % a1, a1 )
def square_root_mod_prime( a, p ):
"""Modular square root of a, mod p, p prime."""
# Based on the Handbook of Applied Cryptography, algorithms 3.34 to 3.39.
# This module has been tested for all values in [0,p-1] for
# every prime p from 3 to 1229.
assert 0 <= a < p
assert 1 < p
if a == 0: return 0
if p == 2: return a
jac = jacobi( a, p )
if jac == -1: raise SquareRootError( "%d has no square root modulo %d" \
% ( a, p ) )
if p % 4 == 3: return modular_exp( a, (p+1)//4, p )
if p % 8 == 5:
d = modular_exp( a, (p-1)//4, p )
if d == 1: return modular_exp( a, (p+3)//8, p )
if d == p-1: return ( 2 * a * modular_exp( 4*a, (p-5)//8, p ) ) % p
raise RuntimeError, "Shouldn't get here."
for b in range( 2, p ):
if jacobi( b*b-4*a, p ) == -1:
f = ( a, -b, 1 )
ff = polynomial_exp_mod( ( 0, 1 ), (p+1)//2, f, p )
assert ff[1] == 0
return ff[0]
raise RuntimeError, "No b found."
def inverse_mod( a, m ):
"""Inverse of a mod m."""
if a < 0 or m <= a: a = a % m
# From Ferguson and Schneier, roughly:
c, d = a, m
uc, vc, ud, vd = 1, 0, 0, 1
while c != 0:
q, c, d = divmod( d, c ) + ( c, )
uc, vc, ud, vd = ud - q*uc, vd - q*vc, uc, vc
# At this point, d is the GCD, and ud*a+vd*m = d.
# If d == 1, this means that ud is a inverse.
assert d == 1
if ud > 0: return ud
else: return ud + m
def gcd2(a, b):
"""Greatest common divisor using Euclid's algorithm."""
while a:
a, b = b%a, a
return b
def gcd( *a ):
"""Greatest common divisor.
Usage: gcd( [ 2, 4, 6 ] )
or: gcd( 2, 4, 6 )
"""
if len( a ) > 1: return reduce( gcd2, a )
if hasattr( a[0], "__iter__" ): return reduce( gcd2, a[0] )
return a[0]
def lcm2(a,b):
"""Least common multiple of two integers."""
return (a*b)//gcd(a,b)
def lcm( *a ):
"""Least common multiple.
Usage: lcm( [ 3, 4, 5 ] )
or: lcm( 3, 4, 5 )
"""
if len( a ) > 1: return reduce( lcm2, a )
if hasattr( a[0], "__iter__" ): return reduce( lcm2, a[0] )
return a[0]
def factorization( n ):
"""Decompose n into a list of (prime,exponent) pairs."""
assert isinstance( n, types.IntType ) or isinstance( n, types.LongType )
if n < 2: return []
result = []
d = 2
# Test the small primes:
for d in smallprimes:
if d > n: break
q, r = divmod( n, d )
if r == 0:
count = 1
while d <= n:
n = q
q, r = divmod( n, d )
if r != 0: break
count = count + 1
result.append( ( d, count ) )
# If n is still greater than the last of our small primes,
# it may require further work:
if n > smallprimes[-1]:
if is_prime( n ): # If what's left is prime, it's easy:
result.append( ( n, 1 ) )
else: # Ugh. Search stupidly for a divisor:
d = smallprimes[-1]
while 1:
d = d + 2 # Try the next divisor.
q, r = divmod( n, d )
if q < d: break # n < d*d means we're done, n = 1 or prime.
if r == 0: # d divides n. How many times?
count = 1
n = q
while d <= n: # As long as d might still divide n,
q, r = divmod( n, d ) # see if it does.
if r != 0: break
n = q # It does. Reduce n, increase count.
count = count + 1
result.append( ( d, count ) )
if n > 1: result.append( ( n, 1 ) )
return result
def phi( n ):
"""Return the Euler totient function of n."""
assert isinstance( n, types.IntType ) or isinstance( n, types.LongType )
if n < 3: return 1
result = 1
ff = factorization( n )
for f in ff:
e = f[1]
if e > 1:
result = result * f[0] ** (e-1) * ( f[0] - 1 )
else:
result = result * ( f[0] - 1 )
return result
def carmichael( n ):
"""Return Carmichael function of n.
Carmichael(n) is the smallest integer x such that
m**x = 1 mod n for all m relatively prime to n.
"""
return carmichael_of_factorized( factorization( n ) )
def carmichael_of_factorized( f_list ):
"""Return the Carmichael function of a number that is
represented as a list of (prime,exponent) pairs.
"""
if len( f_list ) < 1: return 1
result = carmichael_of_ppower( f_list[0] )
for i in range( 1, len( f_list ) ):
result = lcm( result, carmichael_of_ppower( f_list[i] ) )
return result
def carmichael_of_ppower( pp ):
"""Carmichael function of the given power of the given prime.
"""
p, a = pp
if p == 2 and a > 2: return 2**(a-2)
else: return (p-1) * p**(a-1)
def order_mod( x, m ):
"""Return the order of x in the multiplicative group mod m.
"""
# Warning: this implementation is not very clever, and will
# take a long time if m is very large.
if m <= 1: return 0
assert gcd( x, m ) == 1
z = x
result = 1
while z != 1:
z = ( z * x ) % m
result = result + 1
return result
def largest_factor_relatively_prime( a, b ):
"""Return the largest factor of a relatively prime to b.
"""
while 1:
d = gcd( a, b )
if d <= 1: break
b = d
while 1:
q, r = divmod( a, d )
if r > 0:
break
a = q
return a
def kinda_order_mod( x, m ):
"""Return the order of x in the multiplicative group mod m',
where m' is the largest factor of m relatively prime to x.
"""
return order_mod( x, largest_factor_relatively_prime( m, x ) )
def is_prime( n ):
"""Return True if x is prime, False otherwise.
We use the Miller-Rabin test, as given in Menezes et al. p. 138.
This test is not exact: there are composite values n for which
it returns True.
In testing the odd numbers from 10000001 to 19999999,
about 66 composites got past the first test,
5 got past the second test, and none got past the third.
Since factors of 2, 3, 5, 7, and 11 were detected during
preliminary screening, the number of numbers tested by
Miller-Rabin was (19999999 - 10000001)*(2/3)*(4/5)*(6/7)
= 4.57 million.
"""
# (This is used to study the risk of false positives:)
global miller_rabin_test_count
miller_rabin_test_count = 0
if n <= smallprimes[-1]:
if n in smallprimes: return True
else: return False
if gcd( n, 2*3*5*7*11 ) != 1: return False
# Choose a number of iterations sufficient to reduce the
# probability of accepting a composite below 2**-80
# (from Menezes et al. Table 4.4):
t = 40
n_bits = 1 + int( math.log( n, 2 ) )
for k, tt in ( ( 100, 27 ),
( 150, 18 ),
( 200, 15 ),
( 250, 12 ),
( 300, 9 ),
( 350, 8 ),
( 400, 7 ),
( 450, 6 ),
( 550, 5 ),
( 650, 4 ),
( 850, 3 ),
( 1300, 2 ),
):
if n_bits < k: break
t = tt
# Run the test t times:
s = 0
r = n - 1
while ( r % 2 ) == 0:
s = s + 1
r = r // 2
for i in xrange( t ):
a = smallprimes[ i ]
y = modular_exp( a, r, n )
if y != 1 and y != n-1:
j = 1
while j <= s - 1 and y != n - 1:
y = modular_exp( y, 2, n )
if y == 1:
miller_rabin_test_count = i + 1
return False
j = j + 1
if y != n-1:
miller_rabin_test_count = i + 1
return False
return True
def next_prime( starting_value ):
"Return the smallest prime larger than the starting value."
if starting_value < 2: return 2
result = ( starting_value + 1 ) | 1
while not is_prime( result ): result = result + 2
return result
smallprimes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41,
43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97,
101, 103, 107, 109, 113, 127, 131, 137, 139, 149,
151, 157, 163, 167, 173, 179, 181, 191, 193, 197,
199, 211, 223, 227, 229, 233, 239, 241, 251, 257,
263, 269, 271, 277, 281, 283, 293, 307, 311, 313,
317, 331, 337, 347, 349, 353, 359, 367, 373, 379,
383, 389, 397, 401, 409, 419, 421, 431, 433, 439,
443, 449, 457, 461, 463, 467, 479, 487, 491, 499,
503, 509, 521, 523, 541, 547, 557, 563, 569, 571,
577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
641, 643, 647, 653, 659, 661, 673, 677, 683, 691,
701, 709, 719, 727, 733, 739, 743, 751, 757, 761,
769, 773, 787, 797, 809, 811, 821, 823, 827, 829,
839, 853, 857, 859, 863, 877, 881, 883, 887, 907,
911, 919, 929, 937, 941, 947, 953, 967, 971, 977,
983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033,
1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093,
1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163,
1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, 1229]
miller_rabin_test_count = 0
def __main__():
# Making sure locally defined exceptions work:
# p = modular_exp( 2, -2, 3 )
# p = square_root_mod_prime( 2, 3 )
print "Testing gcd..."
assert gcd( 3*5*7, 3*5*11, 3*5*13 ) == 3*5
assert gcd( [ 3*5*7, 3*5*11, 3*5*13 ] ) == 3*5
assert gcd( 3 ) == 3
print "Testing lcm..."
assert lcm( 3, 5*3, 7*3 ) == 3*5*7
assert lcm( [ 3, 5*3, 7*3 ] ) == 3*5*7
assert lcm( 3 ) == 3
print "Testing next_prime..."
bigprimes = ( 999671,
999683,
999721,
999727,
999749,
999763,
999769,
999773,
999809,
999853,
999863,
999883,
999907,
999917,
999931,
999953,
999959,
999961,
999979,
999983 )
for i in xrange( len( bigprimes ) - 1 ):
assert next_prime( bigprimes[i] ) == bigprimes[ i+1 ]
error_tally = 0
# Test the square_root_mod_prime function:
for p in smallprimes:
print "Testing square_root_mod_prime for modulus p = %d." % p
squares = []
for root in range( 0, 1+p//2 ):
sq = ( root * root ) % p
squares.append( sq )
calculated = square_root_mod_prime( sq, p )
if ( calculated * calculated ) % p != sq:
error_tally = error_tally + 1
print "Failed to find %d as sqrt( %d ) mod %d. Said %d." % \
( root, sq, p, calculated )
for nonsquare in range( 0, p ):
if nonsquare not in squares:
try:
calculated = square_root_mod_prime( nonsquare, p )
except SquareRootError:
pass
else:
error_tally = error_tally + 1
print "Failed to report no root for sqrt( %d ) mod %d." % \
( nonsquare, p )
# Test the jacobi function:
for m in range( 3, 400, 2 ):
print "Testing jacobi for modulus m = %d." % m
if is_prime( m ):
squares = []
for root in range( 1, m ):
if jacobi( root * root, m ) != 1:
error_tally = error_tally + 1
print "jacobi( %d * %d, %d ) != 1" % ( root, root, m )
squares.append( root * root % m )
for i in range( 1, m ):
if not i in squares:
if jacobi( i, m ) != -1:
error_tally = error_tally + 1
print "jacobi( %d, %d ) != -1" % ( i, m )
else: # m is not prime.
f = factorization( m )
for a in range( 1, m ):
c = 1
for i in f:
c = c * jacobi( a, i[0] ) ** i[1]
if c != jacobi( a, m ):
error_tally = error_tally + 1
print "%d != jacobi( %d, %d )" % ( c, a, m )
# Test the inverse_mod function:
print "Testing inverse_mod . . ."
import random
n_tests = 0
for i in range( 100 ):
m = random.randint( 20, 10000 )
for j in range( 100 ):
a = random.randint( 1, m-1 )
if gcd( a, m ) == 1:
n_tests = n_tests + 1
inv = inverse_mod( a, m )
if inv <= 0 or inv >= m or ( a * inv ) % m != 1:
error_tally = error_tally + 1
print "%d = inverse_mod( %d, %d ) is wrong." % ( inv, a, m )
assert n_tests > 1000
print n_tests, " tests of inverse_mod completed."
class FailedTest(Exception): pass
print error_tally, "errors detected."
if error_tally != 0:
raise FailedTest("%d errors detected" % error_tally)
if __name__ == '__main__':
__main__()
| |
from ..docgen.doc import Doc
from ..elements import registry
from .. import bbcode
from .. import pilot
from .. import namespaces
from ..compat import text_type
from fs.path import join, splitext
from collections import defaultdict
from operator import itemgetter
class Extracter(object):
"""Extract documentation from code and external files"""
def __init__(self, archive, fs):
self.archive = archive
self.fs = fs
def _namespace_to_path(self, ns):
if "://" in ns:
ns = ns.split("://", 1)[-1]
ns = ns.replace("/", "_").replace(".", "_dot_")
return "xmlns/" + ns
def _get_namespace_fs(self, ns):
path = self._namespace_to_path(ns)
namespace_fs = self.fs.makedirs(path)
return namespace_fs
def _get_lib_fs(self, libname):
libname = libname.replace(".", "_")
path = "libs/{libname}/docs".format(libname=libname)
lib_fs = self.fs.makedirs(path)
return lib_fs
def slugify_namespace(self, ns):
if ":" in ns:
ns = ns.split(":", 1)[-1].lstrip("/")
return ns.replace("/", "_").replace(".", "_dot_")
def extract_docs(self, libname, docs_fs, const_data=None):
docs = docs_fs.listdir(wildcard="*.txt")
index = []
docs_output_fs = self.fs.makedirs("docs/{}".format(libname))
with pilot.console.progress(
"extracting {} docs".format(libname), len(docs)
) as progress:
for doc_name in progress(docs):
default_name = splitext(doc_name)[0]
code = docs_fs.gettext(doc_name)
html, data = bbcode.parser.render(code)
doc_name = data.get("name", default_name)
data.update(
body=code,
name=doc_name,
libname=libname,
source=docs_fs.getsyspath(doc_name),
)
doc = Doc("doc", doc_name, "doc")
doc.add_reference("index")
doc.data.update(const_data or {})
doc.data.update(data)
data["id"] = doc.id
doc.write(docs_output_fs)
index.append(data)
index.sort(key=lambda d: (d.get("section", ""), d["name"]))
def extract_site_docs(self, docs_fs, const_data=None, dirname="docs"):
docs = docs_fs.listdir(wildcard="*.txt")
index = []
docs_output_fs = self.fs.makedirs(dirname)
with pilot.console.progress("extracting site docs", len(docs)) as progress:
for doc_name in progress(docs):
default_name = splitext(doc_name)[0]
code = docs_fs.gettext(doc_name)
html, data = bbcode.parser.render(
code, path=docs_fs.getsyspath(doc_name)
)
doc_name = data.get("name", default_name)
data.update(body=code, name=doc_name)
doc_class = data.get("class", "doc")
doc = Doc("doc", doc_name, doc_class)
doc.add_reference("index")
doc.data.update(const_data or {})
doc.data.update(data)
data["id"] = doc.id
doc.write(docs_output_fs)
index.append(data)
index.sort(key=lambda d: (d.get("section", ""), d["name"]))
def extract_namespace(self, ns, const_data=None):
namespace_tags = registry.get_elements_in_xmlns(ns).items()
self.extract_tags(namespace_tags, const_data=const_data)
def extract_tags(self, elements, const_data=None):
indices = defaultdict(list)
tags = []
tag_namespaces = set()
with pilot.console.progress("extracting tags", len(elements) + 1) as progress:
for element in progress(elements):
tag_namespaces.add(element.xmlns)
element_name = element._tag_name
doc_namespace = "xmlns.{}".format(element.xmlns)
if element._get_help("undocumented", False):
continue
doc = Doc(doc_namespace, element_name, "tag")
doc.data.update(const_data or {})
doc.data.update(element.extract_doc_info())
doc.data.update(
namespace=element.xmlns,
namespace_slug=self.slugify_namespace(element.xmlns),
name=element_name,
lib=element._lib_long_name,
)
doc.data.update(const_data)
doc.add_reference("doc.index")
doc.add_reference("tags.index")
indices[element.xmlns].append(doc.data)
tags.append(doc.data)
with self._get_namespace_fs(element.xmlns) as namespace_fs:
doc.write(namespace_fs)
tags_index = Doc("tags", "index", "tags_index")
tags_index.data.update(const_data or {})
tags_index.data.update(namespaces=sorted(tag_namespaces))
tags_index.data.update(
tags=sorted(tags, key=lambda t: (t["tag_name"], t["namespace"]))
)
tags_index.data["by_namespace"] = {}
tags_index.data["namespaces"] = []
for ns in tag_namespaces:
tags_index.data["by_namespace"][ns] = []
tags_index.data["namespaces"].append(
(ns, namespaces.namespace_docs.get(ns))
)
tags_index.data["namespaces"].sort()
for tag in tags:
tags_index.data["by_namespace"][tag["namespace"]].append(tag)
for index in tags_index.data["by_namespace"].values():
index.sort(key=lambda t: (t["tag_name"], t["namespace"]))
for i, tag in enumerate(index):
try:
tag["next_tag"] = index[i + 1]["tag_name"]
except IndexError:
pass
try:
tag["prev_tag"] = index[i - 1]["tag_name"]
except IndexError:
pass
tags_index.add_reference("doc.index")
tags_index.write(self.fs)
for ns in tag_namespaces:
ns_slug = self.slugify_namespace(ns)
ns_index = Doc(
"xmlns.{}".format(ns), "{}_index".format(ns_slug), "xmlns_index"
)
ns_index.data.update(const_data or {})
ns_index.data.update(
tags=sorted(
tags_index.data["by_namespace"][ns], key=lambda t: t["tag_name"]
),
namespace=ns,
namespace_doc=namespaces.namespace_docs.get(ns),
)
ns_index.write(self.fs)
progress.step()
def extract_commands(self, elements, const_data=None):
commands_output_fs = self.fs.makedir("commands")
command_index = []
with pilot.console.progress(
"extracting commands", len(elements) + 1
) as progress:
for element in progress(elements):
doc = Doc("command", "command_{}".format(element.libname), "command")
doc.data.update(const_data or {})
doc.data.update(element.extract_doc_info())
doc.data.update(
namespace=element.xmlns,
namespace_slug=self.slugify_namespace(element.xmlns),
name=element.libname,
doc=element._doc or "",
signature=element._signature,
synopsis=element._synopsis,
)
command_index.append(doc.data)
doc.write(commands_output_fs)
command_index.sort(key=itemgetter("name"))
doc = Doc("command_index", "index", "command_index")
doc.data.update(const_data or {})
doc.data["commands"] = command_index
doc.write(commands_output_fs)
progress.step()
def extract_lib(self, long_name):
lib = self.archive.libs[long_name]
const_data = {
"long_name": long_name,
"lib": {
"title": lib.title,
"url": lib.url,
"version": text_type(lib.version),
},
"author": lib.author,
}
lib_cover = Doc("cover", "cover", doc_class="cover")
lib_cover.add_reference("doc.index")
lib_cover.data.update(const_data)
lib_cover.write(self.fs)
if lib.documentation_location is not None:
with lib.load_fs.opendir(lib.documentation_location) as docs_fs:
self.extract_docs(long_name, docs_fs, const_data)
lib_tags = self.archive.registry.get_elements_in_lib(long_name)
self.extract_tags(lib_tags, const_data=const_data)
commands = self.archive.get_elements_by_type(namespaces.default, "command")
self.extract_commands(commands, const_data=const_data)
| |
#!/usr/bin/python
import sys
import itertools
import signal
from time import sleep
from threading import Thread
from mininet.net import Mininet
from mininet.log import setLogLevel
from mininet.node import RemoteController, Node
from mininet.log import info, debug, output, error
from mininet.link import TCLink
from mininet.cli import CLI
# This is the program that each host will call
import gratuitousArp
ARP_PATH = gratuitousArp.__file__.replace('.pyc', '.py')
class ONOSMininet( Mininet ):
def __init__( self, controllers=[], gratuitousArp=True, build=True, *args, **kwargs ):
"""Create Mininet object for ONOS.
controllers: List of controller IP addresses
gratuitousArp: Send an ARP from each host to aid controller's host discovery"""
# discarding provided controller (if any),
# using list of remote controller IPs instead
kwargs[ 'controller' ] = None
# delay building for a second
kwargs[ 'build' ] = False
Mininet.__init__(self, *args, **kwargs )
self.gratArp = gratuitousArp
info ( '*** Adding controllers\n' )
ctrl_count = 0
for controllerIP in controllers:
self.addController( 'c%d' % ctrl_count, RemoteController, ip=controllerIP )
info( ' c%d (%s)\n' % ( ctrl_count, controllerIP ) )
ctrl_count = ctrl_count + 1
if self.topo and build:
self.build()
def start( self ):
Mininet.start( self )
if self.gratArp:
self.waitConnected( timeout=5 )
info ( '*** Sending a gratuitious ARP from each host\n' )
self.gratuitousArp()
def verifyHosts( self, hosts ):
for i in range( len( hosts ) ):
if isinstance( hosts[i], str):
if hosts[i] in self:
hosts[i] = self[ hosts[i] ]
else:
info( '*** ERROR: %s is not a host\n' % hosts[i] )
del hosts[i]
elif not isinstance( hosts[i], Node):
del hosts[i]
def gratuitousArp( self, hosts=[] ):
"Send an ARP from each host to aid controller's host discovery; fallback to ping if necessary"
if not hosts:
hosts = self.hosts
self.verifyHosts( hosts )
for host in hosts:
info( '%s ' % host.name )
info( host.cmd( ARP_PATH ) )
info ( '\n' )
def pingloop( self ):
"Loop forever pinging the full mesh of hosts"
setLogLevel( 'error' )
try:
while True:
self.ping()
finally:
setLogLevel( 'info' )
def bgIperf( self, hosts=[], seconds=10 ):
self.verifyHosts( hosts )
servers = [ host.popen("iperf -s") for host in hosts ]
clients = []
for s, d in itertools.combinations(hosts, 2):
info ( '%s <--> %s\n' % ( s.name, d.name ))
cmd = 'iperf -c %s -t %s -y csv' % (d.IP(), seconds)
p = s.popen(cmd)
p.s = s.name
p.d = d.name
clients.append(p)
def handler (_signum, _frame):
raise BackgroundException()
oldSignal = signal.getsignal(signal.SIGTSTP)
signal.signal(signal.SIGTSTP, handler)
def finish( verbose=True ):
for c in clients:
out, err = c.communicate()
if verbose:
if err:
info( err )
else:
bw = out.split( ',' )[8]
info( '%s <--> %s: %s\n' % ( c.s, c.d, formatBw(bw) ) )
for s in servers:
s.terminate()
try:
info ( 'Press ^Z to continue in background or ^C to abort\n')
progress( seconds )
finish()
except KeyboardInterrupt:
for c in clients:
c.terminate()
for s in servers:
s.terminate()
except BackgroundException:
info( '\n*** Continuing in background...\n' )
t = Thread( target=finish, args=[ False ] )
t.start()
finally:
#Disable custom background signal
signal.signal(signal.SIGTSTP, oldSignal)
def progress(t):
while t > 0:
sys.stdout.write( '.' )
t -= 1
sys.stdout.flush()
sleep(1)
print
def formatBw( bw ):
bw = float(bw)
if bw > 1000:
bw /= 1000
if bw > 1000:
bw /= 1000
if bw > 1000:
bw /= 1000
return '%.2f Gbps' % bw
return '%.2f Mbps' % bw
return '%.2f Kbps' % bw
return '%.2f bps' % bw
class BackgroundException( Exception ):
pass
def do_bgIperf( self, line ):
args = line.split()
if not args:
output( 'Provide a list of hosts.\n' )
#Try to parse the '-t' argument as the number of seconds
seconds = 10
for i, arg in enumerate(args):
if arg == '-t':
if i + 1 < len(args):
try:
seconds = int(args[i + 1])
except ValueError:
error( 'Could not parse number of seconds: %s', args[i+1] )
del(args[i+1])
del args[i]
hosts = []
err = False
for arg in args:
if arg not in self.mn:
err = True
error( "node '%s' not in network\n" % arg )
else:
hosts.append( self.mn[ arg ] )
if "bgIperf" in dir(self.mn) and not err:
self.mn.bgIperf( hosts, seconds=seconds )
def do_gratuitousArp( self, line ):
args = line.split()
if "gratuitousArp" in dir( self.mn ):
self.mn.gratuitousArp( args )
else:
output( 'Gratuitous ARP is not supported.\n' )
CLI.do_bgIperf = do_bgIperf
CLI.do_gratuitousArp = do_gratuitousArp
def run( topo, controllers=None, link=TCLink, autoSetMacs=True ):
if not controllers and len( sys.argv ) > 1:
controllers = sys.argv[ 1: ]
else:
print 'Need to provide a topology and list of controllers'
exit( 1 )
setLogLevel( 'info' )
net = ONOSMininet( topo=topo, controllers=controllers, link=link, autoSetMacs=autoSetMacs )
net.start()
CLI( net )
net.stop()
| |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.utils import encodeutils
import six
from keystone.common import config
from keystone.i18n import _
from keystone.openstack.common import log
CONF = config.CONF
LOG = log.getLogger(__name__)
# Tests use this to make exception message format errors fatal
_FATAL_EXCEPTION_FORMAT_ERRORS = False
class Error(Exception):
"""Base error class.
Child classes should define an HTTP status code, title, and a
message_format.
"""
code = None
title = None
message_format = None
def __init__(self, message=None, **kwargs):
try:
message = self._build_message(message, **kwargs)
except KeyError:
# if you see this warning in your logs, please raise a bug report
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise
else:
LOG.warning(_('missing exception kwargs (programmer error)'))
message = self.message_format
super(Error, self).__init__(message)
def _build_message(self, message, **kwargs):
"""Builds and returns an exception message.
:raises: KeyError given insufficient kwargs
"""
if not message:
try:
message = self.message_format % kwargs
except UnicodeDecodeError:
try:
kwargs = dict([(k, encodeutils.safe_decode(v)) for k, v in
six.iteritems(kwargs)])
except UnicodeDecodeError:
# NOTE(jamielennox): This is the complete failure case
# at least by showing the template we have some idea
# of where the error is coming from
message = self.message_format
else:
message = self.message_format % kwargs
return message
class ValidationError(Error):
message_format = _("Expecting to find %(attribute)s in %(target)s -"
" the server could not comply with the request"
" since it is either malformed or otherwise"
" incorrect. The client is assumed to be in error.")
code = 400
title = 'Bad Request'
class SchemaValidationError(ValidationError):
# NOTE(lbragstad): For whole OpenStack message consistency, this error
# message has been written in a format consistent with WSME.
message_format = _("%(detail)s")
class ValidationTimeStampError(Error):
message_format = _("Timestamp not in expected format."
" The server could not comply with the request"
" since it is either malformed or otherwise"
" incorrect. The client is assumed to be in error.")
code = 400
title = 'Bad Request'
class StringLengthExceeded(ValidationError):
message_format = _("String length exceeded.The length of"
" string '%(string)s' exceeded the limit"
" of column %(type)s(CHAR(%(length)d)).")
class ValidationSizeError(Error):
message_format = _("Request attribute %(attribute)s must be"
" less than or equal to %(size)i. The server"
" could not comply with the request because"
" the attribute size is invalid (too large)."
" The client is assumed to be in error.")
code = 400
title = 'Bad Request'
class PasswordVerificationError(Error):
message_format = _("The password length must be less than or equal "
"to %(size)i. The server could not comply with the "
"request because the password is invalid.")
code = 403
title = 'Forbidden'
class TwoFactorSecurityAnswerVerificationError(Error):
message_format = _("The security answer length must be less than or equal "
"to %(size)i. The server could not comply with the "
"request because the security answer is invalid.")
code = 403
title = 'Forbidden'
class RegionDeletionError(Error):
message_format = _("Unable to delete region %(region_id)s because it or "
"its child regions have associated endpoints.")
code = 403
title = 'Forbidden'
class PKITokenExpected(Error):
message_format = _('The certificates you requested are not available. '
'It is likely that this server does not use PKI tokens '
'otherwise this is the result of misconfiguration.')
code = 403
title = 'Cannot retrieve certificates'
class SecurityError(Error):
"""Avoids exposing details of security failures, unless in debug mode."""
amendment = _('(Disable debug mode to suppress these details.)')
def _build_message(self, message, **kwargs):
"""Only returns detailed messages in debug mode."""
if CONF.debug:
return _('%(message)s %(amendment)s') % {
'message': message or self.message_format % kwargs,
'amendment': self.amendment}
else:
return self.message_format % kwargs
class Unauthorized(SecurityError):
message_format = _("The request you have made requires authentication.")
code = 401
title = 'Unauthorized'
class AuthPluginException(Unauthorized):
message_format = _("Authentication plugin error.")
def __init__(self, *args, **kwargs):
super(AuthPluginException, self).__init__(*args, **kwargs)
self.authentication = {}
class MissingGroups(Unauthorized):
message_format = _("Unable to find valid groups while using "
"mapping %(mapping_id)s")
class AuthMethodNotSupported(AuthPluginException):
message_format = _("Attempted to authenticate with an unsupported method.")
def __init__(self, *args, **kwargs):
super(AuthMethodNotSupported, self).__init__(*args, **kwargs)
self.authentication = {'methods': CONF.auth.methods}
class AdditionalAuthRequired(AuthPluginException):
message_format = _("Additional authentications steps required.")
def __init__(self, auth_response=None, **kwargs):
super(AdditionalAuthRequired, self).__init__(message=None, **kwargs)
self.authentication = auth_response
class Forbidden(SecurityError):
message_format = _("You are not authorized to perform the"
" requested action.")
code = 403
title = 'Forbidden'
class ForbiddenAction(Forbidden):
message_format = _("You are not authorized to perform the"
" requested action: %(action)s")
class ImmutableAttributeError(Forbidden):
message_format = _("Could not change immutable attribute(s) "
"'%(attributes)s' in target %(target)s")
class CrossBackendNotAllowed(Forbidden):
message_format = _("Group membership across backend boundaries is not "
"allowed, group in question is %(group_id)s, "
"user is %(user_id)s")
class InvalidPolicyAssociation(Forbidden):
message_format = _("Invalid mix of entities for policy association - "
"only Endpoint, Service or Region+Service allowed. "
"Request was - Endpoint: %(endpoint_id)s, "
"Service: %(service_id)s, Region: %(region_id)s")
class NotFound(Error):
message_format = _("Could not find: %(target)s")
code = 404
title = 'Not Found'
class EndpointNotFound(NotFound):
message_format = _("Could not find endpoint: %(endpoint_id)s")
class MetadataNotFound(NotFound):
"""(dolph): metadata is not a user-facing concept,
so this exception should not be exposed
"""
message_format = _("An unhandled exception has occurred:"
" Could not find metadata.")
class PolicyNotFound(NotFound):
message_format = _("Could not find policy: %(policy_id)s")
class PolicyAssociationNotFound(NotFound):
message_format = _("Could not find policy association")
class RoleNotFound(NotFound):
message_format = _("Could not find role: %(role_id)s")
class RegionNotFound(NotFound):
message_format = _("Could not find region: %(region_id)s")
class ServiceNotFound(NotFound):
message_format = _("Could not find service: %(service_id)s")
class DomainNotFound(NotFound):
message_format = _("Could not find domain: %(domain_id)s")
class ProjectNotFound(NotFound):
message_format = _("Could not find project: %(project_id)s")
class TokenNotFound(NotFound):
message_format = _("Could not find token: %(token_id)s")
class UserNotFound(NotFound):
message_format = _("Could not find user: %(user_id)s")
class GroupNotFound(NotFound):
message_format = _("Could not find group: %(group_id)s")
class MappingNotFound(NotFound):
message_format = _("Could not find mapping: %(mapping_id)s")
class TrustNotFound(NotFound):
message_format = _("Could not find trust: %(trust_id)s")
class TrustUseLimitReached(Forbidden):
message_format = _("No remaining uses for trust: %(trust_id)s")
class CredentialNotFound(NotFound):
message_format = _("Could not find credential: %(credential_id)s")
class VersionNotFound(NotFound):
message_format = _("Could not find version: %(version)s")
class EndpointGroupNotFound(NotFound):
message_format = _("Could not find Endpoint Group: %(endpoint_group_id)s")
class IdentityProviderNotFound(NotFound):
message_format = _("Could not find Identity Provider: %(idp_id)s")
class FederatedProtocolNotFound(NotFound):
message_format = _("Could not find federated protocol %(protocol_id)s for"
" Identity Provider: %(idp_id)s")
class PublicIDNotFound(NotFound):
# This is used internally and mapped to either User/GroupNotFound or,
# Assertion before the exception leaves Keystone.
message_format = "%(id)s"
class Conflict(Error):
message_format = _("Conflict occurred attempting to store %(type)s -"
" %(details)s")
code = 409
title = 'Conflict'
class RequestTooLarge(Error):
message_format = _("Request is too large.")
code = 413
title = 'Request is too large.'
class UnexpectedError(SecurityError):
"""Avoids exposing details of failures, unless in debug mode."""
_message_format = _("An unexpected error prevented the server "
"from fulfilling your request.")
debug_message_format = _("An unexpected error prevented the server "
"from fulfilling your request: %(exception)s")
@property
def message_format(self):
"""Return the generic message format string unless debug is enabled."""
if CONF.debug:
return self.debug_message_format
return self._message_format
def _build_message(self, message, **kwargs):
if CONF.debug and 'exception' not in kwargs:
# Ensure that exception has a value to be extra defensive for
# substitutions and make sure the exception doesn't raise an
# exception.
kwargs['exception'] = ''
return super(UnexpectedError, self)._build_message(message, **kwargs)
code = 500
title = 'Internal Server Error'
class TrustConsumeMaximumAttempt(UnexpectedError):
debug_message_format = _("Unable to consume trust %(trust_id)s, unable to "
"acquire lock.")
class CertificateFilesUnavailable(UnexpectedError):
debug_message_format = _("Expected signing certificates are not available "
"on the server. Please check Keystone "
"configuration.")
class MalformedEndpoint(UnexpectedError):
debug_message_format = _("Malformed endpoint URL (%(endpoint)s),"
" see ERROR log for details.")
class MappedGroupNotFound(UnexpectedError):
debug_message_format = _("Group %(group_id)s returned by mapping "
"%(mapping_id)s was not found in the backend.")
class MetadataFileError(UnexpectedError):
message_format = _("Error while reading metadata file, %(reason)s")
class NotImplemented(Error):
message_format = _("The action you have requested has not"
" been implemented.")
code = 501
title = 'Not Implemented'
class Gone(Error):
message_format = _("The service you have requested is no"
" longer available on this server.")
code = 410
title = 'Gone'
class ConfigFileNotFound(UnexpectedError):
debug_message_format = _("The Keystone configuration file %(config_file)s "
"could not be found.")
class MultipleSQLDriversInConfig(UnexpectedError):
message_format = _('The Keystone domain configuration file '
'%(config_file)s defines an additional SQL driver - '
'only one is permitted.')
class MigrationNotProvided(Exception):
def __init__(self, mod_name, path):
super(MigrationNotProvided, self).__init__(_(
"%(mod_name)s doesn't provide database migrations. The migration"
" repository path at %(path)s doesn't exist or isn't a directory."
) % {'mod_name': mod_name, 'path': path})
class UnsupportedTokenVersionException(Exception):
"""Token version is unrecognizable or unsupported."""
pass
class SAMLSigningError(UnexpectedError):
debug_message_format = _('Unable to sign SAML assertion. It is likely '
'that this server does not have xmlsec1 '
'installed, or this is the result of '
'misconfiguration. Reason %(reason)s')
title = 'Error signing SAML assertion'
| |
import matplotlib.pyplot as plt
import matplotlib as mpl
import itertools
import palettable
import pandas as pd
import statsmodels.api as sm
import numpy as np
from myfisher import *
from objhist import *
from custom_legends import *
try:
import networkx as nx
from networkx.drawing.layout import spring_layout, spectral_layout
NETWORKX = True
except ImportError:
NETWORKX = False
try:
import plotly.plotly as py
import plotly.graph_objs as pygo
PLOTLY = True
except ImportError:
PLOTLY = False
__all__ = ['catcorr',
'layouts',
'generate_test_data',
'test_edge',
'cull_rows',
'compute_relations']
layouts = ['twopi', 'fdp', 'circo', 'neato', 'dot', 'spring', 'spectral']
color2str = lambda col: 'rgb'+str(tuple((np.array(col)*256).round().astype(int)))
def compute_relations(df, weight_col=None, min_n=10):
"""Test for associations between categorical variables (columns)
in df by testing pairs of values within pairs of columns using
Fisher's exact test. This is not the best way to model associations
with multinomial distributed variables, but it will work as an initial screen.
Parameters
----------
df : pd.DataFrame
Data contains columns of categorical variables.
min_n : int
Minimum number of counts required for testing.
counts_col : str
Column name indicating counts for each row. If None
will use one count per row.
Returns
-------
res : pd.DataFrame
Results, one row per test, with multiplicity adjustment"""
res = []
for col1, col2 in itertools.combinations(df.columns, 2):
for val1, val2 in itertools.product(df[col1].unique(), df[col2].unique()):
w = ((df[col1] == val1) & (df[col2] == val2)).sum()
if w > min_n:
OR, pvalue = test_edge(df, (col1, val1), (col2, val2))
res.append({'OR':OR, 'pvalue':pvalue, col1:val1, col2:val2})
resDf = pd.DataFrame(res)
resDf.loc[:, 'qvalue'] = sm.stats.multipletests(resDf['pvalue'].values, method='fdr_bh')[1]
resDf = resDf.sort_values(by='pvalue', ascending=True)
return resDf
def compute_graph(df):
"""Compute odds-ratios, p-values and FDR-adjusted q-values for each edge"""
edgeKeys = []
pvalueArr = []
ORArr = []
tested = []
for col1, col2 in itertools.combinations(df.columns, 2):
for val1, val2 in itertools.product(df[col1].unique(), df[col2].unique()):
w = ((df[col1] == val1) & (df[col2] == val2)).sum()
if w > 0:
OR, pvalue = test_edge(df, (col1, val1), (col2, val2))
tested.append(True)
else:
pvalue = 1.
OR = 1.
tested.append(False)
edgeKeys.append(((col1, val1), (col2, val2)))
pvalueArr.append(pvalue)
ORArr.append(OR)
pvalueArr, tested, ORArr = np.array(pvalueArr), np.array(tested), np.array(ORArr)
qvalueArr = np.ones(pvalueArr.shape)
qvalueArr[tested] = sm.stats.multipletests(pvalueArr[tested], method='fdr_bh')[1]
g = nx.Graph()
"""Add a node for each unique value in each column with name: col_value"""
for col in df.columns:
for val in df[col].unique():
freq = (df[col] == val).sum() / df.shape[0]
g.add_node((col, val), freq=freq)
"""Add edges for each unique pair of values
with edgewidth proportional to frequency of pairing"""
for col1, col2 in itertools.combinations(df.columns, 2):
for val1, val2 in itertools.product(df[col1].unique(), df[col2].unique()):
w = ((df[col1]==val1) & (df[col2]==val2)).sum()
if w > 0:
key = edgeKeys.index(((col1, val1), (col2, val2)))
dat = dict(weight=w/df.shape[0])
dat['OR'] = ORArr[key]
dat['pvalue'] = pvalueArr[key]
dat['qvalue'] = qvalueArr[key]
g.add_edge((col1, val1), (col2, val2), **dat)
return g
def catcorr(df, layout='spring', mode='mpl', titleStr='', testSig=0.05, sRange=(50, np.inf), wRange=(0.5, np.inf), labelThresh=0.05, fontsize=14):
"""Make a network plot showing the correlations among the
categorical variables in the columns of df.
Each node is a unique value in one of the columns
(Node is specified as a tuple (column, value))
Node size is proportional to the value's frequency.
Each edge is a unique pair of values in two columns.
Edge width is proportional to the frequency of the pairing.
Parameters
----------
df : pandas.DataFrame
Nodes will be created for each unique value within
each column of this object
layout : str
Choose one of [twopi, fdp, circo, neato, dot]
to change the layout of the nodes.
See Graphviz for details about each layout.
mode : str
Specifies whether the resulting plot will be a
matplotlib figure (default: 'mpl')
OR if any other value it specifies the filename
of a figure to be posted to plot.ly
(user needs to be logged in previously).
titleStr : str
Printed at the top of the plot.
testSig : float
If non-zero then testSig is used as the significance cutoff for plotting a highlighted edge.
For each edge, tests the statistical hypothesis that number of observed pairings
between values in two columns is significantly different than what one would expect
based on their marginal frequencies. Note: there is FDR-adjustment for multiple comparisons.
sRange,wRange : tuples of length 2
Contains the min and max node sizes or edge widths in points, for scaling
Examples
--------
>>> import plotly.plotly as py
>>> py.sign_in([username], [api_key])
>>> df = generate_test_data()
>>> catcorr(df, layout = 'neato', mode = 'catcorr_example')
[Posts a catcorr plot to plot.ly]
"""
"""Compute odds-ratios, p-values and FDR-adjusted q-values for each edge"""
g = compute_graph(df)
"""Compute attributes of edges and nodes"""
edgewidth = np.array([d['weight'] for n1, n2, d in g.edges(data=True)])
nodesize = np.array([d['freq'] for n, d in g.nodes(data=True)])
nColors = np.min([np.max([len(df.columns), 3]), 9])
colors = palettable.colorbrewer.get_map('Set1', 'Qualitative', nColors).mpl_colors
cmap = {c:color for c, color in zip(df.columns, itertools.cycle(colors))}
nodecolors = [cmap[n[0]] for n in g.nodes()]
if layout == 'twopi':
"""If using this layout specify the most common node as the root"""
freq = {n:d['freq'] for n, d in g.nodes(data=True)}
pos = nx.graphviz_layout(g, prog=layout, root=np.max(list(freq.keys()), key=freq.get))
elif layout == 'spring':
pos = spring_layout(g)
elif layout == 'spectral':
pos = spectral_layout(g)
else:
pos = nx.graphviz_layout(g, prog=layout)
"""Use either matplotlib or plot.ly to plot the network"""
if mode == 'mpl':
plt.clf()
figh = plt.gcf()
axh = figh.add_axes([0.04, 0.04, 0.92, 0.92])
axh.axis('off')
figh.set_facecolor('white')
#nx.draw_networkx_edges(g,pos,alpha=0.5,width=sznorm(edgewidth,mn=0.5,mx=10), edge_color='k')
#nx.draw_networkx_nodes(g,pos,node_size=sznorm(nodesize,mn=500,mx=5000),node_color=nodecolors,alpha=1)
ew = szscale(edgewidth, mn=wRange[0], mx=wRange[1])
for es, e in zip(ew, g.edges()):
x1, y1=pos[e[0]]
x2, y2=pos[e[1]]
props = dict(color='black', alpha=0.4, zorder=1)
if testSig and g[e[0]][e[1]]['qvalue'] < testSig:
if g[e[0]][e[1]]['OR'] > 1.:
props['color']='orange'
else:
props['color']='green'
props['alpha']=0.8
plt.plot([x1, x2], [y1, y2], '-', lw=es, **props)
plt.scatter(x=[pos[s][0] for s in g.nodes()],
y=[pos[s][1] for s in g.nodes()],
s=szscale(nodesize, mn=sRange[0], mx=sRange[1]), #Units for scatter is (size in points)**2
c=nodecolors,
alpha=1, zorder=2)
for n, d in g.nodes(data=True):
if d['freq'] >= labelThresh:
plt.annotate(n[1],
xy=pos[n],
fontname='Bitstream Vera Sans',
size=fontsize,
weight='bold',
color='black',
va='center',
ha='center')
colorLegend(labels=df.columns,
colors=[c for x, c in zip(df.columns, colors)],
loc=0,
title='N = %1.0f' % (~df.isnull()).all(axis=1).sum(axis=0))
plt.title(titleStr)
elif PLOTLY:
"""Send the plot to plot.ly"""
data = []
for es, e in zip(szscale(edgewidth, mn=wRange[0], mx=wRange[1]), g.edges()):
x1, y1=pos[e[0]]
x2, y2=pos[e[1]]
props = dict(color='black', opacity=0.4)
if testSig and g[e[0]][e[1]]['qvalue'] < testSig:
if g[e[0]][e[1]]['OR'] > 1.:
props['color']='orange'
else:
props['color']='green'
props['opacity']=0.8
tmp = pygo.Scatter(x=[x1, x2],
y=[y1, y2],
mode='lines',
line=pygo.Line(width=es, **props),
showlegend=False)
data.append(tmp)
"""May need to add sqrt() to match mpl plots"""
nodesize = szscale(nodesize, mn=sRange[0], mx=sRange[1]) #Units for plotly.Scatter is (size in points)
for col in list(cmap.keys()):
ind = [nodei for nodei, node in enumerate(g.nodes()) if node[0]==col]
tmp = pygo.Scatter(x=[pos[s][0] for nodei, s in enumerate(g.nodes()) if nodei in ind],
y=[pos[s][1] for nodei, s in enumerate(g.nodes()) if nodei in ind],
mode='markers',
name=col,
text=[node[1] for nodei, node in enumerate(g.nodes()) if nodei in ind],
textposition='middle center',
marker=pygo.Marker(size=nodesize[ind],
color=[color2str(nc) for nodei, nc in enumerate(nodecolors) if nodei in ind]))
data.append(tmp)
layout = pygo.Layout(title=titleStr,
showlegend=True,
xaxis=pygo.XAxis(showgrid=False, zeroline=False),
yaxis=pygo.YAxis(showgrid=False, zeroline=False))
fig = pygo.Figure(data=data, layout=layout)
plot_url = py.plot(fig, filename='catcorr_'+mode)
def generate_test_data(nrows=100):
"""Generate a pd.DataFrame() with correlations that can be visualized by catcorr()"""
testDf = pd.DataFrame(zeros((nrows, 3), dtype=object), columns = ['ColA', 'ColB', 'ColC'])
"""Use objhist to generate specific frequencies of (0,0,0), (1,0,0) etc. with values 1-4"""
oh = objhist([])
oh.update({('X', 'A', 'foo'):2,
('X', 'A', 'bar'):5,
('X', 'B', 'foo'):1,
('X', 'B', 'bar'):10,
('Y', 'A', 'bar'):10,
('Y', 'B', 'bar'):7})
for i, v in enumerate(oh.generateRandomSequence(nrows)):
testDf['ColA'].loc[i] = v[0]
testDf['ColB'].loc[i] = v[1]
testDf['ColC'].loc[i] = v[2]
return testDf
def sznorm(vec, mx=1, mn=0):
"""Normalize values of vec to [mn, mx] interval"""
vec -= np.nanmin(vec)
vec = vec / np.nanmax(vec)
vec = vec * (mx-mn) + mn
vec[np.isnan(vec)] = mn
vec[vec < mn] = mn
return vec
def szscale(vec, mx=np.inf, mn=1):
"""Normalize values of vec to [mn, mx] interval
such that sz ratios remain representative."""
factor = mn/np.nanmin(vec)
vec = vec*factor
vec[vec > mx] = mx
vec[np.isnan(vec)] = mn
return vec
def cull_rows(df, cols, freq):
"""Remove all rows from df that contain any column
with a value that is less frequent than freq.
Parameters
----------
df : pandas.DataFrame
cols : list
List of column indices in df
freq : float
Frequency threshold for row removal.
Returns
-------
outDf : pandas.DataFrame
A copy of df with rows removed."""
outDf = df.copy()
keepers = {}
for c in cols:
oh = objhist(df[c]).freq()
keepers[c] = [v for v in list(oh.keys()) if oh[v]>freq]
"""Keep rows that have a value in keepers for each column"""
for c in cols:
outDf = outDf.loc[outDf[c].map(lambda v: v in keepers[c])]
return outDf
def test_edge(df, node1, node2, weight_col=None, verbose=False):
"""Test if the occurence of nodeA paired with nodeB is more/less common than expected.
Parameters
----------
nodeX : tuple (column, value)
Specify the node by its column name and the value.
Returns
-------
OR : float
Odds-ratio associated with the 2x2 contingency table
pvalue : float
P-value associated with the Fisher's exact test that H0: OR = 1"""
col1, val1 = node1
col2, val2 = node2
if weight_col is None:
tmp = df[[col1, col2]].dropna()
weight_col = 'weights'
tmp = tmp.assign(weights=np.ones(tmp.shape[0]))
else:
tmp = df[[col1, col2, weight_col]].dropna()
tab = np.zeros((2, 2))
tab[0, 0] = (((tmp[col1]!=val1) & (tmp[col2]!=val2)) * tmp[weight_col]).sum()
tab[0, 1] = (((tmp[col1]!=val1) & (tmp[col2]==val2)) * tmp[weight_col]).sum()
tab[1, 0] = (((tmp[col1]==val1) & (tmp[col2]!=val2)) * tmp[weight_col]).sum()
tab[1, 1] = (((tmp[col1]==val1) & (tmp[col2]==val2)) * tmp[weight_col]).sum()
"""Add 1 to cells with zero"""
if np.any(tab == 0):
if verbose:
print('Adding one to %d cells with zero counts.' % (ind.sum()))
print()
tab[tab==0] = 1
OR, pvalue = fisherTest(tab)
if verbose:
print('Node1: %s, %s' % node1)
print('Node2: %s, %s' % node2)
print()
print(pd.DataFrame(tab, index=['Node1(-)', 'Node1(+)'], columns = ['Node2(-)', 'Node2(+)']))
print('\nOR: %1.2f\nP-value: %1.3f' % (OR, pvalue))
return OR, pvalue
| |
"""===========================
Variant annotation pipeline
===========================
:Author: Andreas Heger & David Sims
:Release: $Id$
:Date: |today|
:Tags: Python
The Variants pipeline attempts to annotate variants in
a :term:`vcf` formatted file. It computes
1. The effects of SNPs on transcripts and genes
2. The effects of indels on transcripts and genes
This pipeline works on a single genome.
Overview
========
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general
information how to use CGAT pipelines.
Configuration
-------------
The pipeline requires a configured :file:`pipeline.ini` file.
The sphinxreport report requires a :file:`conf.py` and
:file:`sphinxreport.ini` file (see :ref:`PipelineReporting`). To start
with, use the files supplied with the Example_ data.
Input
-----
Variants
++++++++
Variants are read from a :term:`vcf` formatted file called
:file:`variants.vcf.gz`. The file is assumed to have been compressed
with :file:`bgzip` and compressed with tabix.
The tracks are taken from the headers in the :term:`vcf` file. Please
avoid any special characters like ``_][*.+-`` within strain names.
The pipeline expects the following information within the genotype
field in the :term:`vcf` file:
GT
The genotype
DP
The read depth
Optional inputs
+++++++++++++++
Requirements
------------
The pipeline requires the results from
:doc:`pipeline_annotations`. Set the configuration variable
:py:data:`annotations_database` and :py:data:`annotations_dir`.
On top of the default CGAT setup, the pipeline requires the following
software to be in the path:
+--------------------+-------------------+------------------------------------------------+
|*Program* |*Version* |*Purpose* |
+--------------------+-------------------+------------------------------------------------+
|polyphen_ |>=2.0.23 |prediction of deleterious substitutions |
+--------------------+-------------------+------------------------------------------------+
Pipeline output
===============
The major output is in the database file :file:`csvdb`.
Example
=======
Example data is available at
http://www.cgat.org/~andreas/sample_data/pipeline_variants.tgz. To
run the example, simply unpack and untar::
wget http://www.cgat.org/~andreas/sample_data/pipeline_variants.tgz
tar -xvzf pipeline_variants.tgz
cd pipeline_variants
python <srcdir>/pipeline_variants.py make full
.. note::
For the pipeline to run, install the :doc:`pipeline_annotations` as well.
Glossary
========
.. glossary::
polyphen
polyphen_ - a program to predict the deleteriousness of substitutions
.. _polyphen: http://genetics.bwh.harvard.edu/pph2/dokuwiki/start
Code
====
"""
from ruffus import *
import sys
import gzip
import os
import itertools
import re
import math
import collections
import sqlite3
import CGAT.Experiment as E
import CGAT.Database as Database
import scipy.stats
import CGAT.Stats as Stats
import pysam
import CGATPipelines.PipelineTracks as PipelineTracks
###################################################################
###################################################################
###################################################################
# Pipeline configuration
import CGATPipelines.Pipeline as P
P.getParameters(["%s/pipeline.ini" %
os.path.splitext(__file__)[0], "../pipeline.ini",
"pipeline.ini"])
PARAMS = P.PARAMS
PARAMS_ANNOTATIONS = P.peekParameters(
PARAMS["annotations_dir"],
"pipeline_annotations.py")
SEPARATOR = "|"
###################################################################
###################################################################
###################################################################
# Helper functions mapping tracks to conditions, etc
class TracksVCF (PipelineTracks.Tracks):
def load(self, filename, exclude=None):
'''load tracks from a vcf file.'''
tracks = []
v = pysam.VCF()
v.setversion(40)
if not os.path.exists(filename):
self.tracks = tracks
return self
v.connect(filename)
if exclude:
to_exclude = [re.compile(x) for x in exclude]
for sample in v.getsamples():
if exclude:
for x in to_exclude:
if x.search(sample):
skip = True
break
if skip:
continue
tracks.append(self.factory(sample))
self.tracks = tracks
return self
TRACKS = TracksVCF(PipelineTracks.Sample).load("variants.vcf.gz")
###################################################################
###################################################################
###################################################################
# Database connectivity
def connect():
'''connect to the database.
This method also attaches to the annotation database.'''
dbh = sqlite3.connect(PARAMS["database_name"])
statement = '''ATTACH DATABASE '%s' as annotations''' % (
PARAMS["annotations_database"])
cc = dbh.cursor()
cc.execute(statement)
cc.close()
return dbh
###################################################################
###################################################################
# Annotations
###################################################################
###################################################################
@files([("variants.vcf.gz", "%s.annotations.gz" % x, x) for x in TRACKS])
def buildAnnotations(infile, outfile, sample):
"""annotate snps with gene set."""
to_cluster = True
bases = "annotations_bases"
statement = """cgat snp2table
--input-format=vcf
--vcf-file=%(infile)s
--vcf-sample=%(sample)s
--genome-file=%(genome_dir)s/%(genome)s
--annotations-tsv-file=%(bases)s
--log=%(outfile)s.log
| gzip > %(outfile)s """
P.run()
###################################################################
###################################################################
###################################################################
@transform(buildAnnotations,
suffix('.annotations.gz'),
'_annotations.load')
def loadAnnotations(infile, outfile):
'''load variant annotations into database'''
P.load(infile, outfile,
options="--map=gene_id:str "
"--add-index=gene_id "
"--map=base_qualities:text ")
@merge(buildAnnotations, 'annotations.load')
def mergeAnnotations(infiles, outfile):
'''load variant annotations into single database table'''
tablename = P.toTable(outfile)
outf = open('anno.txt', 'w')
first = True
for f in infiles:
track = P.snip(os.path.basename(f), ".annotations.gz")
if not os.path.exists(f):
E.warn("File %s missing" % f)
continue
lines = [x for x in gzip.open(f, "r").readlines()]
if first:
outf.write("%s\t%s" % ("track", lines[0]))
first = False
for i in range(1, len(lines)):
outf.write("%s\t%s" % (track, lines[i]))
outf.close()
P.load('anno.text',
outfile)
@transform(buildAnnotations,
suffix('.annotations.gz'),
'_annotations.summary')
def summarizeAnnotations(infile, outfile):
'''compute summary stats for annotation files.'''
# count substitutions for each category
statement = '''gunzip
< %(infile)s
| cgat csv_cut code reference_base genotype variant_type
| awk '$4 == "variant_type" { printf("%%s-%%s-%%s\\tcounts\\n", $1,$2,$3); }
$4 == "E" || $4 == "O" {printf("%%s-%%s-%%s\\t1\\n", $1,$2,$3)}'
| sort
| uniq -c
| awk 'BEGIN{ printf("code-reference_base-genotype\\tcounts\\n" ); } \
$2 !~ /^code/ {printf("%%s\\t%%i\\n",$2,$1);}'
| perl -p -i -e "s/-/\\t/g unless (/^#/)"
> %(outfile)s
'''
P.run()
@transform(summarizeAnnotations,
suffix('_annotations.summary'),
'_annotations_summary.load')
def loadAnnotationsSummary(infile, outfile):
'''load annotations'''
P.load(infile, outfile, options="--add-index=code")
###################################################################
###################################################################
# Effects
###################################################################
###################################################################
@files([("variants.vcf.gz", "%s.effects.gz" % x, x) for x in TRACKS])
def buildEffects(infile, outfile, sample):
"""annotate snps with gene set."""
seleno = "seleno.list"
transcripts = os.path.join(
PARAMS["annotations_dir"], PARAMS_ANNOTATIONS["interface_geneset_cds_gtf"])
statement = """cgat snp2counts
--genome-file=%(genome_dir)s/%(genome)s
--vcf-file=%(infile)s
--input-format=vcf
--vcf-sample=%(sample)s
--module=transcript-effects
--seleno-tsv-file=%(seleno)s
--exons-file=%(transcripts)s
--output-filename-pattern=%(outfile)s.%%s.gz
--log=%(outfile)s.log
| gzip > %(outfile)s """
P.run()
###################################################################
###################################################################
###################################################################
@transform(buildEffects, suffix(".effects.gz"), "_effects.load")
def loadEffects(infile, outfile):
'''load transcript effects into tables.'''
root = infile[:-len(".effects.gz")]
P.load(infile,
outfile,
tablename=root + "_effects",
options="--add-index=transcript_id")
for suffix in ("cds", "intron", "splicing", "translation"):
P.load(infile,
outfile,
tablename=root + "_effects_" + suffix,
options="--add-index=transcript_id "
"--allow-empty-file "
"--ignore-column=seq_na "
"--ignore-column=seq_aa")
@merge(buildEffects, "effects.load")
def mergeEffects(infiles, outfile):
'''load transcript effects into single table.'''
tablename = P.toTable(outfile)
outf = open('effects.txt', 'w')
first = True
for f in infiles:
track = P.snip(os.path.basename(f), ".effects.gz")
if not os.path.exists(f):
E.warn("File %s missing" % f)
continue
lines = [x for x in gzip.open(f, "r").readlines()]
if first:
outf.write("%s\t%s" % ("track", lines[0]))
first = False
for i in range(1, len(lines)):
outf.write("%s\t%s" % (track, lines[i]))
outf.close()
P.load("effect.txt",
outfile,
options="--add-index=transcript_id")
for suffix in ("cds", "intron", "splicing", "translation", "genes"):
outf = open('effects.' + suffix + '.txt', 'w')
first = True
for f in infiles:
track = P.snip(os.path.basename(f), ".effects.gz")
statfile = f + "." + suffix + ".gz"
print(statfile)
if not os.path.exists(statfile):
E.warn("File %s missing" % statfile)
continue
lines = [x for x in gzip.open(statfile, "r").readlines()]
if first:
outf.write("%s\t%s" % ("track", lines[0]))
first = False
for i in range(1, len(lines)):
outf.write("%s\t%s" % (track, lines[i]))
outf.close()
tmpfilename = outf.name
P.load(outf.name,
outfile,
tablename=tabelname + "_" + suffix,
options="--add-index=transcript_id "
"--allow-empty-file "
"--ignore-column=seq_na "
"--ignore-column=seq_aa")
@transform(loadEffects, suffix("_effects.load"), "_effects_genes.load")
def summarizeEffectsPerGene(infile, outfile):
'''summarize effects on a per-gene level.'''
tablename = outfile[:-len(".load")]
track = infile[:-len("_effects.load")]
dbhandle = connect()
statement = '''
CREATE TABLE %(tablename)s AS
SELECT DISTINCT
gene_id,
COUNT(*) AS ntranscripts,
MIN(e.nalleles) AS min_nalleles,
MAX(e.nalleles) AS max_nalleles,
MIN(e.stop_min) AS min_stop_min,
MAX(e.stop_min) AS max_stop_min,
MIN(e.stop_max) AS min_stop_max,
MAX(e.stop_max) AS max_stop_max,
SUM( CASE WHEN stop_min > 0 AND cds_len - stop_min * 3 < last_exon_start THEN 1
ELSE 0 END) AS nmd_knockout,
SUM( CASE WHEN stop_max > 0 AND cds_len - stop_max * 3 < last_exon_start THEN 1
ELSE 0 END) AS nmd_affected
FROM annotations.transcript_info as i,
%(track)s_effects AS e
WHERE i.transcript_id = e.transcript_id
GROUP BY i.gene_id
''' % locals()
Database.executewait(
dbhandle, "DROP TABLE IF EXISTS %(tablename)s" % locals())
Database.executewait(dbhandle, statement)
Database.executewait(
dbhandle, "CREATE INDEX %(tablename)s_gene_id ON %(tablename)s (gene_id)" % locals())
dbhandle.commit()
P.touch(outfile)
@merge(mergeEffects, "effects_genes.load")
def mergeEffectsPerGene(infile, outfile):
'''summarize effects on a per-gene level.'''
tablename = outfile[:-len(".load")]
dbhandle = connect()
statement = '''
CREATE TABLE %(tablename)s AS
SELECT DISTINCT
track,
gene_id,
COUNT(*) AS ntranscripts,
MIN(e.nalleles) AS min_nalleles,
MAX(e.nalleles) AS max_nalleles,
MIN(e.stop_min) AS min_stop_min,
MAX(e.stop_min) AS max_stop_min,
MIN(e.stop_max) AS min_stop_max,
MAX(e.stop_max) AS max_stop_max,
SUM( CASE WHEN stop_min > 0 AND cds_len - stop_min * 3 < last_exon_start THEN 1
ELSE 0 END) AS nmd_knockout,
SUM( CASE WHEN stop_max > 0 AND cds_len - stop_max * 3 < last_exon_start THEN 1
ELSE 0 END) AS nmd_affected
FROM annotations.transcript_info as i, effects AS e
WHERE i.transcript_id = e.transcript_id
GROUP BY i.gene_id, track
''' % locals()
Database.executewait(
dbhandle, "DROP TABLE IF EXISTS %(tablename)s" % locals())
Database.executewait(dbhandle, statement)
Database.executewait(
dbhandle, "CREATE INDEX %(tablename)s_gene_id ON %(tablename)s (gene_id)" % locals())
dbhandle.commit()
P.touch(outfile)
###################################################################
###################################################################
# Polyphen
###################################################################
###################################################################
@merge(loadEffects, "polyphen.input")
def buildPolyphenInput(infiles, outfile):
'''build polyphen input file.
SNPS across all species are aggregated into a single
file to avoid multiple submissions for the same variant.
Mapping to Uniprot ids was not successful - 40% of the
SNPs would have been lost. Hence I map to ensembl protein
identifiers. Note that the sequence file is then to be
submitted to POLYPHEN as well.
Note that this method outputs 1-based coordinates for polyphen,
while the coordinates in the .map file are still 0-based.
SNPs are assigned a snp_id and a locus_id. The snp_id refers
to the SNP within a peptide sequence while the locus_id refers
to the genomic location. If there are alternative
transcripts overlapping a SNP, the same SNP will get two
snp_ids, but the same locus_id. As the peptide background might
be different for the same SNP depending on the transcript,
its effect needs to be predicted twice.
'''
statement = '''SELECT
transcript_id,
cds_start,
cds_end,
orig_codons,
variant_codons,
orig_na,
variant_na,
contig,
snp_position
FROM %(table)s_cds
WHERE variant_code = '=' AND code = 'N'
'''
dbhandle = connect()
cc = dbhandle.cursor()
infiles.sort()
# ensembl mapping
map_transcript2id = dict(
cc.execute("SELECT transcript_id, protein_id FROM annotations.transcript_info WHERE protein_id IS NOT NULL").fetchall())
total_counts = E.Counter()
notfound, found = set(), set()
outf_map = open(outfile + ".map", "w")
outf_map.write(
"snp_id\ttrack\ttranscript_id\tprotein_id\tprotein_pos\tlocus_id\tcontig\tpos\tphase\n")
outf = open(outfile, "w")
snps = {}
locus_ids = {}
for infile in infiles:
table = P.toTable(infile)
track = table[:-len("_effects")]
print(statement % locals())
cc.execute(statement % locals())
counts = E.Counter()
snp_id = 0
for transcript_id, cds_start, cds_end, orig_codons, variant_codons, orig_na, variant_na, contig, pos in cc:
counts.input += 1
if transcript_id not in map_transcript2id:
notfound.add(transcript_id)
counts.not_found += 1
continue
if "," in variant_codons:
counts.heterozygous += 1
continue
for phase in range(0, 3):
if orig_na[phase].lower() != variant_na[phase].lower():
break
pid = map_transcript2id[transcript_id]
# one-based coordinates
peptide_pos = int(math.floor(cds_start / 3.0)) + 1
key = "%s-%i-%s" % (pid, peptide_pos, variant_codons)
if key in snps:
snp_id = snps[key]
else:
snp_id = len(snps)
snps[key] = snp_id
outf.write("snp%010i\t%s\t%i\t%s\t%s\n" %
(snp_id,
pid,
peptide_pos,
orig_codons,
variant_codons,
))
counts.output += 1
locus_key = "%s-%i-%s" % (contig, pos, variant_codons)
if locus_key not in locus_ids:
locus_ids[locus_key] = len(locus_ids)
# use 0-based coordinates throughout, including peptide pos
outf_map.write("snp%010i\t%s\t%s\t%s\t%i\tloc%010i\t%s\t%i\t%i\n" %
(snp_id,
track,
transcript_id,
pid,
peptide_pos - 1,
locus_ids[locus_key],
contig,
pos,
phase))
found.add(transcript_id)
total_counts += counts
E.info("%s: %s" % (table, str(counts)))
outf.close()
outf_map.close()
E.info("%s: transcripts: %s found, %i not found" % (table,
len(found),
len(notfound)))
E.info("total=%s, snp_ids=%i, locus_ids=%i" %
(str(total_counts), len(snps), len(locus_ids)))
if notfound:
E.warn("%i transcripts had SNPS that were ignored because there was no uniprot accession" %
len(notfound))
E.warn("notfound: %s" % ",".join(notfound))
statement = '''sort -k2,2 -k3,3n %(outfile)s > %(outfile)s.tmp; mv %(outfile)s.tmp %(outfile)s'''
P.run()
###################################################################
###################################################################
###################################################################
@transform(buildPolyphenInput, suffix(".input"), ".features")
def buildPolyphenFeatures(infile, outfile):
'''run polyphen on the cluster.
To do this, first send uniref to all nodes:
python ~/cgat/cluster_distribute.py
--collection=andreas
/net/cpp-group/tools/polyphen-2.0.18/nrdb/uniref100*.{pin,psd,psi,phr,psq,pal}
'''
nsnps = len([x for x in open(infile)])
to_cluster = True
stepsize = max(int(nsnps / 200000.0), 1000)
job_array = (0, nsnps, stepsize)
E.info("running array jobs on %i snps" % nsnps)
scratchdir = os.path.join(os.path.abspath("."), "scratch")
try:
os.mkdir(scratchdir)
except OSError:
pass
resultsdir = outfile + ".dir"
try:
os.mkdir(resultsdir)
except OSError:
pass
filename_peptides = os.path.join(PARAMS["annotations_dir"],
PARAMS_ANNOTATIONS["interface_peptides_fasta"])
statement = '''
%(polyphen_home)s/bin/run_pph.pl
-s %(filename_peptides)s
-b %(polyphen_blastdb)s
-d %(scratchdir)s
%(infile)s > %(resultsdir)s/%(outfile)s.$SGE_TASK_ID 2> %(resultsdir)s/%(outfile)s.err.$SGE_TASK_ID
'''
P.run()
to_cluster = False
job_array = None
statement = '''find %(resultsdir)s -name "*.err.*" -exec cat {} \; > %(outfile)s.log'''
P.run()
statement = '''find %(resultsdir)s -not -name "*.err.*" -exec cat {} \; > %(outfile)s'''
P.run()
###################################################################
###################################################################
###################################################################
# do not run in parallel. run_weka.pl creates a $testfile
# that is not unique. run_weka.pl and pph2arff.pl could either
# be patched or the following jobs run in sequence.
@jobs_limit(1, "polyphen")
@files([(buildPolyphenFeatures, "polyphen_%s.output.gz" % x, x)
for x in P.asList(PARAMS["polyphen_models"])])
def runPolyphen(infile, outfile, model):
'''run POLYPHEN on feature tables to classify SNPs.
'''
# options
# -f: feature set, default is F11
# -c: classifier, default is NBd (Naive Bayes with discretization)
# -l: model name, default is HumDiv
statement = '''
%(polyphen_home)s/bin/run_weka.pl
-l %(polyphen_home)s/models/%(model)s.UniRef100.NBd.f11.model
%(infile)s
| gzip
> %(outfile)s
2> %(outfile)s.log
'''
P.run()
@transform(buildPolyphenInput, suffix(".input"), "_map.load")
def loadPolyphenMap(infile, outfile):
'''load polyphen input data.'''
P.load(infile + ".map",
outfile,
options="--add-index=snp_id "
"--add-index=track,transcript_id "
"--add-index=contig,pos "
"--add-index=protein_id "
"--add-index=transcript_id ")
@transform(runPolyphen, suffix(".output.gz"), ".load")
def loadPolyphen(infile, outfile):
'''load polyphen results.'''
load_statement = P.build_load_statement(
P.toTable(outfile),
options="--add-index=snp_id "
"--add-index=protein_id "
"--map=effect:str")
statement = '''
gunzip
< %(infile)s
| perl -p -e "s/o_acc/protein_id/; s/ +//g; s/^#//;"
| %(load_statement)s
> %(outfile)s
'''
P.run()
###################################################################
###################################################################
###################################################################
@transform(loadPolyphen, suffix(".load"), ".genestats")
def analysePolyphen(infile, outfile):
'''compute enrichment of SNPs within genes
and deleterious SNPs within SNPs within genes.
del: enrichment of deleterious snps within snps per gene
len: enrichment of snps within genes
com: enrichment of deleterious snps within gene
'''
table = P.toTable(infile)
tablename_map = "polyphen_map"
dbhandle = connect()
cc = dbhandle.cursor()
statement = '''
SELECT i.gene_id,
COUNT(DISTINCT map.locus_id) as nsnps,
COUNT(DISTINCT case t.prediction when 'possiblydamaging' then map.locus_id when 'probablydamaging' then map.locus_id else NULL end) AS ndeleterious,
MAX(s.length)
FROM %(table)s as t,
%(tablename_map)s as map,
annotations.protein_stats as s,
annotations.transcript_info as i
WHERE map.snp_id = t.snp_id AND
i.transcript_id = map.transcript_id AND
s.protein_id = map.protein_id
GROUP BY i.gene_id
''' % locals()
data = cc.execute(statement).fetchall()
statement = '''SELECT DISTINCT i.gene_id, MAX(s.length)
FROM annotations.transcript_info AS i, annotations.protein_stats AS s
WHERE s.protein_id = i.protein_id
GROUP BY i.gene_id'''
gene_ids = cc.execute(statement).fetchall()
total_nsnps = sum([x[1] for x in data])
total_ndel = sum([x[2] for x in data])
total_length = sum([x[1] for x in gene_ids])
del_p = float(total_ndel) / total_nsnps
len_p = float(total_nsnps) / total_length
com_p = float(total_ndel) / total_length
E.info("del: background probability: %i/%i = %f" %
(total_ndel, total_nsnps, del_p))
E.info("len: background probability: %i/%i = %f" %
(total_nsnps, total_length, len_p))
E.info("com: background probability: %i/%i = %f" %
(total_ndel, total_length, com_p))
outf = open(outfile, "w")
outf.write("\t".join(("gene_id", "code",
"length", "nsnps", "ndel",
"del_p", "del_pvalue", "del_qvalue",
"len_p", "len_pvalue", "len_qvalue",
"com_p", "com_pvalue", "com_qvalue", )) + "\n")
del_pvalues, len_pvalues, com_pvalues = [], [], []
for gene_id, nsnps, ndel, length in data:
# use -1, because I need P( x >= X)
# sf = 1 - cdf and cdf = P( x <= X ), thus sf = 1 - P( x <= X ) = P (x
# > X ).
del_pvalues.append(scipy.stats.binom.sf(ndel - 1, nsnps, del_p))
len_pvalues.append(
scipy.stats.binom.sf(nsnps - 1, int(round(length)), len_p))
com_pvalues.append(
scipy.stats.binom.sf(ndel - 1, int(round(length)), com_p))
if len(del_pvalues) > 10:
del_qvalues = Stats.doFDR(del_pvalues).mQValues
else:
E.warn("no FDR computed for del")
del_qvalues = del_pvalues
if len(len_pvalues) > 10:
len_qvalues = Stats.doFDR(len_pvalues).mQValues
else:
E.warn("no FDR computed for del")
len_qvalues = len_pvalues
if len(com_pvalues) > 10:
com_q = Stats.doFDR(com_pvalues).mQValues
else:
E.warn("no FDR computed for com")
com_qvalues = com_pvalues
fdr = PARAMS["polyphen_fdr"]
found = set()
for a, del_pvalue, del_qvalue, len_pvalue, len_qvalue, com_pvalue, com_qvalue in \
zip(data,
del_pvalues, del_qvalues,
len_pvalues, len_qvalues,
com_pvalues, com_qvalues,
):
gene_id, nsnps, ndel, length = a
found.add(gene_id)
del_p = float(ndel) / nsnps
len_p = float(nsnps) / length
code = "".join([str(int(x < fdr))
for x in (del_qvalue, len_qvalue, com_qvalue)])
outf.write("\t".join((gene_id,
code,
"%i" % int(round(length)),
"%i" % int(nsnps),
"%i" % int(ndel),
"%6.4f" % del_p,
"%6.4g" % del_pvalue,
"%6.4g" % del_qvalue,
"%6.4f" % len_p,
"%6.4g" % len_pvalue,
"%6.4g" % len_qvalue,
"%6.4f" % com_p,
"%6.4g" % com_pvalue,
"%6.4g" % com_qvalue,
)) + "\n")
# add missing genes:
code = "---"
for gene_id, length in gene_ids:
if gene_id in found:
continue
outf.write("\t".join((gene_id,
code,
"%i" % int(round(length)),
"%i" % 0,
"%i" % 0,
"%6.4f" % 0,
"%6.4g" % 1,
"%6.4g" % 1,
"%6.4f" % 0,
"%6.4g" % 1,
"%6.4g" % 1,
"%6.4f" % 0,
"%6.4g" % 1,
"%6.4g" % 1,
)) + "\n")
outf.close()
@transform(analysePolyphen, suffix(".genestats"), "_genestats.load")
def loadPolyphenAnalysis(infile, outfile):
'''load polyphen analysis results.'''
P.load(infile, outfile,
options="--add-index=gene_id --map=code:str")
@split(loadPolyphenMap, ("counts_shared.matrix",
"counts_segregation.matrix",
"counts_pid.matrix",
"counts_distance.matrix",
"counts.tree"
))
def buildSharedSNPMatrix(infiles, outfiles):
'''build matrix of shared coding nonsynonymous SNPs.
Counts are per locus id.
Percent identities are only within coding segregating loci
and thus do not reflect the real divergence.
'''
dbhandle = connect()
cc = dbhandle.cursor()
segregating_sites = cc.execute(
'SELECT COUNT( DISTINCT locus_id) FROM polyphen_map').fetchone()[0]
statement = '''SELECT DISTINCT locus_id, track FROM polyphen_map ORDER BY locus_id'''
cc.execute(statement)
matrix = collections.defaultdict(int)
for k, vals in itertools.groupby(cc, key=lambda x: x[0]):
tracks = [x[1] for x in list(vals)]
for t1 in tracks:
matrix[(t1, t1)] += 1
if len(tracks) > 1:
for t1, t2 in itertools.combinations(tracks, 2):
matrix[(t1, t2)] += 1
matrix[(t2, t1)] += 1
all_tracks = set([x[0] for x in list(matrix.keys())] + [x[1]
for x in list(matrix.keys())])
# output matrix with shared SNPs.
outf = open(outfiles[0], "w")
outf.write("track\t%s\n" % "\t".join(all_tracks))
for track1 in all_tracks:
outf.write("%s" % track1)
for track2 in all_tracks:
outf.write("\t%i" % matrix[(track1, track2)])
outf.write("\n")
outf.close()
# output matrix with shared segregating sites as
# distance matrix
outf = open(outfiles[1], "w")
outf.write("track\t%s\n" % "\t".join(all_tracks))
for track1 in all_tracks:
outf.write("%s" % track1)
for track2 in all_tracks:
if track1 == track2:
outf.write("\t%i" % 0)
else:
outf.write("\t%i" %
(segregating_sites - matrix[(track1, track2)]))
outf.write("\n")
outf.close()
# output matrix as percent identity matrix
# percent identity is given as
# segregating sites - sites where strains differ = segregating_sites - (matrix[i,i] + matrix[j,j] - 2 * matrix[i,j])
# simplifies to:
# segsites - matrix[i,i] -matrix[j,j] +
# divided by the total number of segregating sites
outf = open(outfiles[2], "w")
outf.write("track\t%s\n" % "\t".join(all_tracks))
pids = {}
for track1 in all_tracks:
outf.write("%s" % track1)
for track2 in all_tracks:
a = segregating_sites - \
(matrix[(track1, track1)] + matrix[(track2, track2)] -
2 * matrix[(track1, track2)])
pid = 100.0 * a / segregating_sites
outf.write("\t%6.4f" % pid)
pids[(track1, track2)] = pid
outf.write("\n")
outf.close()
# distance matrix
outf = open(outfiles[3], "w")
outf.write("track\t%s\n" % "\t".join(all_tracks))
for track1 in all_tracks:
outf.write("%s" % track1)
for track2 in all_tracks:
val = 100.0 - pids[(track1, track2)]
outf.write("\t%6.4f" % val)
outf.write("\n")
outf.close()
outfile_distance, outfile_tree = outfiles[3], outfiles[4]
# build tree
statement = '''cgat matrix2matrix
--output-format=phylip
< %(outfile_distance)s
| cgat matrix2tree
--method=nj
> %(outfile_tree)s
'''
P.run()
###################################################################
###################################################################
###################################################################
@follows(buildAnnotations, loadAnnotations, mergeAnnotations, summarizeAnnotations, loadAnnotationsSummary)
def annotations():
pass
@follows(buildEffects, loadEffects, mergeEffects, summarizeEffectsPerGene)
def effects():
pass
@follows(buildPolyphenInput, buildPolyphenFeatures, runPolyphen, loadPolyphen, loadPolyphenMap, analysePolyphen, loadPolyphenAnalysis)
def polyphen():
pass
@follows(annotations, effects, polyphen)
def full():
pass
@follows(mkdir("report"))
def build_report():
'''build report from scratch.'''
E.info("starting documentation build process from scratch")
P.run_report(clean=True)
@follows(mkdir("report"))
def update_report():
'''update report.'''
E.info("updating documentation")
P.run_report(clean=False)
if __name__ == "__main__":
# P.checkFiles( ("genome.fasta", "genome.idx" ) )
sys.exit(P.main(sys.argv))
| |
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import time
import uuid
from nova.openstack.common.gettextutils import _
# Allow passing None to the various connect methods
# (i.e. allow the client to rely on default URLs)
allow_default_uri_connection = True
# string indicating the CPU arch
node_arch = 'x86_64' # or 'i686' (or whatever else uname -m might return)
# memory size in kilobytes
node_kB_mem = 4096
# the number of active CPUs
node_cpus = 2
# expected CPU frequency
node_mhz = 800
# the number of NUMA cell, 1 for unusual NUMA topologies or uniform
# memory access; check capabilities XML for the actual NUMA topology
node_nodes = 1 # NUMA nodes
# number of CPU sockets per node if nodes > 1, total number of CPU
# sockets otherwise
node_sockets = 1
# number of cores per socket
node_cores = 2
# number of threads per core
node_threads = 1
# CPU model
node_cpu_model = "Penryn"
# CPU vendor
node_cpu_vendor = "Intel"
# Has libvirt connection been used at least once
connection_used = False
def _reset():
global allow_default_uri_connection
allow_default_uri_connection = True
# virDomainState
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_XML_SECURE = 1
VIR_DOMAIN_EVENT_ID_LIFECYCLE = 0
VIR_DOMAIN_EVENT_DEFINED = 0
VIR_DOMAIN_EVENT_UNDEFINED = 1
VIR_DOMAIN_EVENT_STARTED = 2
VIR_DOMAIN_EVENT_SUSPENDED = 3
VIR_DOMAIN_EVENT_RESUMED = 4
VIR_DOMAIN_EVENT_STOPPED = 5
VIR_DOMAIN_EVENT_SHUTDOWN = 6
VIR_DOMAIN_EVENT_PMSUSPENDED = 7
VIR_DOMAIN_UNDEFINE_MANAGED_SAVE = 1
VIR_DOMAIN_AFFECT_CURRENT = 0
VIR_DOMAIN_AFFECT_LIVE = 1
VIR_DOMAIN_AFFECT_CONFIG = 2
VIR_CPU_COMPARE_ERROR = -1
VIR_CPU_COMPARE_INCOMPATIBLE = 0
VIR_CPU_COMPARE_IDENTICAL = 1
VIR_CPU_COMPARE_SUPERSET = 2
VIR_CRED_USERNAME = 1
VIR_CRED_AUTHNAME = 2
VIR_CRED_LANGUAGE = 3
VIR_CRED_CNONCE = 4
VIR_CRED_PASSPHRASE = 5
VIR_CRED_ECHOPROMPT = 6
VIR_CRED_NOECHOPROMPT = 7
VIR_CRED_REALM = 8
VIR_CRED_EXTERNAL = 9
VIR_MIGRATE_PEER2PEER = 2
VIR_MIGRATE_UNDEFINE_SOURCE = 16
VIR_NODE_CPU_STATS_ALL_CPUS = -1
VIR_DOMAIN_START_PAUSED = 1
# libvirtError enums
# (Intentionally different from what's in libvirt. We do this to check,
# that consumers of the library are using the symbolic names rather than
# hardcoding the numerical values)
VIR_FROM_QEMU = 100
VIR_FROM_DOMAIN = 200
VIR_FROM_NWFILTER = 330
VIR_FROM_REMOTE = 340
VIR_FROM_RPC = 345
VIR_ERR_NO_SUPPORT = 3
VIR_ERR_XML_DETAIL = 350
VIR_ERR_NO_DOMAIN = 420
VIR_ERR_OPERATION_INVALID = 55
VIR_ERR_OPERATION_TIMEOUT = 68
VIR_ERR_NO_NWFILTER = 620
VIR_ERR_SYSTEM_ERROR = 900
VIR_ERR_INTERNAL_ERROR = 950
# Readonly
VIR_CONNECT_RO = 1
# virConnectBaselineCPU flags
VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES = 1
# snapshotCreateXML flags
VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA = 4
VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY = 16
VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
def _parse_disk_info(element):
disk_info = {}
disk_info['type'] = element.get('type', 'file')
disk_info['device'] = element.get('device', 'disk')
driver = element.find('./driver')
if driver is not None:
disk_info['driver_name'] = driver.get('name')
disk_info['driver_type'] = driver.get('type')
source = element.find('./source')
if source is not None:
disk_info['source'] = source.get('file')
if not disk_info['source']:
disk_info['source'] = source.get('dev')
if not disk_info['source']:
disk_info['source'] = source.get('path')
target = element.find('./target')
if target is not None:
disk_info['target_dev'] = target.get('dev')
disk_info['target_bus'] = target.get('bus')
return disk_info
class libvirtError(Exception):
"""This class was copied and slightly modified from
`libvirt-python:libvirt-override.py`.
Since a test environment will use the real `libvirt-python` version of
`libvirtError` if it's installed and not this fake, we need to maintain
strict compatability with the original class, including `__init__` args
and instance-attributes.
To create a libvirtError instance you should:
# Create an unsupported error exception
exc = libvirtError('my message')
exc.err = (libvirt.VIR_ERR_NO_SUPPORT,)
self.err is a tuple of form:
(error_code, error_domain, error_message, error_level, str1, str2,
str3, int1, int2)
Alternatively, you can use the `make_libvirtError` convenience function to
allow you to specify these attributes in one shot.
"""
def __init__(self, defmsg, conn=None, dom=None, net=None, pool=None,
vol=None):
Exception.__init__(self, defmsg)
self.err = None
def get_error_code(self):
if self.err is None:
return None
return self.err[0]
def get_error_domain(self):
if self.err is None:
return None
return self.err[1]
def get_error_message(self):
if self.err is None:
return None
return self.err[2]
def get_error_level(self):
if self.err is None:
return None
return self.err[3]
def get_str1(self):
if self.err is None:
return None
return self.err[4]
def get_str2(self):
if self.err is None:
return None
return self.err[5]
def get_str3(self):
if self.err is None:
return None
return self.err[6]
def get_int1(self):
if self.err is None:
return None
return self.err[7]
def get_int2(self):
if self.err is None:
return None
return self.err[8]
class NWFilter(object):
def __init__(self, connection, xml):
self._connection = connection
self._xml = xml
self._parse_xml(xml)
def _parse_xml(self, xml):
tree = etree.fromstring(xml)
root = tree.find('.')
self._name = root.get('name')
def undefine(self):
self._connection._remove_filter(self)
class Domain(object):
def __init__(self, connection, xml, running=False, transient=False):
self._connection = connection
if running:
connection._mark_running(self)
self._state = running and VIR_DOMAIN_RUNNING or VIR_DOMAIN_SHUTOFF
self._transient = transient
self._def = self._parse_definition(xml)
self._has_saved_state = False
self._snapshots = {}
self._id = self._connection._id_counter
def _parse_definition(self, xml):
try:
tree = etree.fromstring(xml)
except etree.ParseError:
raise make_libvirtError(
libvirtError, "Invalid XML.",
error_code=VIR_ERR_XML_DETAIL,
error_domain=VIR_FROM_DOMAIN)
definition = {}
name = tree.find('./name')
if name is not None:
definition['name'] = name.text
uuid_elem = tree.find('./uuid')
if uuid_elem is not None:
definition['uuid'] = uuid_elem.text
else:
definition['uuid'] = str(uuid.uuid4())
vcpu = tree.find('./vcpu')
if vcpu is not None:
definition['vcpu'] = int(vcpu.text)
memory = tree.find('./memory')
if memory is not None:
definition['memory'] = int(memory.text)
os = {}
os_type = tree.find('./os/type')
if os_type is not None:
os['type'] = os_type.text
os['arch'] = os_type.get('arch', node_arch)
os_kernel = tree.find('./os/kernel')
if os_kernel is not None:
os['kernel'] = os_kernel.text
os_initrd = tree.find('./os/initrd')
if os_initrd is not None:
os['initrd'] = os_initrd.text
os_cmdline = tree.find('./os/cmdline')
if os_cmdline is not None:
os['cmdline'] = os_cmdline.text
os_boot = tree.find('./os/boot')
if os_boot is not None:
os['boot_dev'] = os_boot.get('dev')
definition['os'] = os
features = {}
acpi = tree.find('./features/acpi')
if acpi is not None:
features['acpi'] = True
definition['features'] = features
devices = {}
device_nodes = tree.find('./devices')
if device_nodes is not None:
disks_info = []
disks = device_nodes.findall('./disk')
for disk in disks:
disks_info += [_parse_disk_info(disk)]
devices['disks'] = disks_info
nics_info = []
nics = device_nodes.findall('./interface')
for nic in nics:
nic_info = {}
nic_info['type'] = nic.get('type')
mac = nic.find('./mac')
if mac is not None:
nic_info['mac'] = mac.get('address')
source = nic.find('./source')
if source is not None:
if nic_info['type'] == 'network':
nic_info['source'] = source.get('network')
elif nic_info['type'] == 'bridge':
nic_info['source'] = source.get('bridge')
nics_info += [nic_info]
devices['nics'] = nics_info
definition['devices'] = devices
return definition
def create(self):
self.createWithFlags(0)
def createWithFlags(self, flags):
# FIXME: Not handling flags at the moment
self._state = VIR_DOMAIN_RUNNING
self._connection._mark_running(self)
self._has_saved_state = False
def isActive(self):
return int(self._state == VIR_DOMAIN_RUNNING)
def undefine(self):
self._connection._undefine(self)
def undefineFlags(self, flags):
self.undefine()
if flags & VIR_DOMAIN_UNDEFINE_MANAGED_SAVE:
if self.hasManagedSaveImage(0):
self.managedSaveRemove()
def destroy(self):
self._state = VIR_DOMAIN_SHUTOFF
self._connection._mark_not_running(self)
def ID(self):
return self._id
def name(self):
return self._def['name']
def UUIDString(self):
return self._def['uuid']
def interfaceStats(self, device):
return [10000242400, 1234, 0, 2, 213412343233, 34214234, 23, 3]
def blockStats(self, device):
return [2, 10000242400, 234, 2343424234, 34]
def suspend(self):
self._state = VIR_DOMAIN_PAUSED
def shutdown(self):
self._state = VIR_DOMAIN_SHUTDOWN
self._connection._mark_not_running(self)
def reset(self, flags):
# FIXME: Not handling flags at the moment
self._state = VIR_DOMAIN_RUNNING
self._connection._mark_running(self)
def info(self):
return [self._state,
long(self._def['memory']),
long(self._def['memory']),
self._def['vcpu'],
123456789L]
def migrateToURI(self, desturi, flags, dname, bandwidth):
raise make_libvirtError(
libvirtError,
"Migration always fails for fake libvirt!",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
def attachDevice(self, xml):
disk_info = _parse_disk_info(etree.fromstring(xml))
disk_info['_attached'] = True
self._def['devices']['disks'] += [disk_info]
return True
def attachDeviceFlags(self, xml, flags):
if (flags & VIR_DOMAIN_AFFECT_LIVE and
self._state != VIR_DOMAIN_RUNNING):
raise make_libvirtError(
libvirtError,
"AFFECT_LIVE only allowed for running domains!",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
self.attachDevice(xml)
def detachDevice(self, xml):
disk_info = _parse_disk_info(etree.fromstring(xml))
disk_info['_attached'] = True
return disk_info in self._def['devices']['disks']
def detachDeviceFlags(self, xml, _flags):
self.detachDevice(xml)
def XMLDesc(self, flags):
disks = ''
for disk in self._def['devices']['disks']:
disks += '''<disk type='%(type)s' device='%(device)s'>
<driver name='%(driver_name)s' type='%(driver_type)s'/>
<source file='%(source)s'/>
<target dev='%(target_dev)s' bus='%(target_bus)s'/>
<address type='drive' controller='0' bus='0' unit='0'/>
</disk>''' % disk
nics = ''
for nic in self._def['devices']['nics']:
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
<source %(type)s='%(source)s'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03'
function='0x0'/>
</interface>''' % nic
return '''<domain type='kvm'>
<name>%(name)s</name>
<uuid>%(uuid)s</uuid>
<memory>%(memory)s</memory>
<currentMemory>%(memory)s</currentMemory>
<vcpu>%(vcpu)s</vcpu>
<os>
<type arch='%(arch)s' machine='pc-0.12'>hvm</type>
<boot dev='hd'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<clock offset='localtime'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
<emulator>/usr/bin/kvm</emulator>
%(disks)s
<controller type='ide' index='0'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01'
function='0x1'/>
</controller>
%(nics)s
<serial type='file'>
<source path='dummy.log'/>
<target port='0'/>
</serial>
<serial type='pty'>
<source pty='/dev/pts/27'/>
<target port='1'/>
</serial>
<console type='file'>
<source path='dummy.log'/>
<target port='0'/>
</console>
<input type='tablet' bus='usb'/>
<input type='mouse' bus='ps2'/>
<graphics type='vnc' port='-1' autoport='yes'/>
<graphics type='spice' port='-1' autoport='yes'/>
<video>
<model type='cirrus' vram='9216' heads='1'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02'
function='0x0'/>
</video>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04'
function='0x0'/>
</memballoon>
</devices>
</domain>''' % {'name': self._def['name'],
'uuid': self._def['uuid'],
'memory': self._def['memory'],
'vcpu': self._def['vcpu'],
'arch': self._def['os']['arch'],
'disks': disks,
'nics': nics}
def managedSave(self, flags):
self._connection._mark_not_running(self)
self._has_saved_state = True
def managedSaveRemove(self, flags):
self._has_saved_state = False
def hasManagedSaveImage(self, flags):
return int(self._has_saved_state)
def resume(self):
self._state = VIR_DOMAIN_RUNNING
def snapshotCreateXML(self, xml, flags):
tree = etree.fromstring(xml)
name = tree.find('./name').text
snapshot = DomainSnapshot(name, self)
self._snapshots[name] = snapshot
return snapshot
def vcpus(self):
vcpus = ([], [])
for i in range(0, self._def['vcpu']):
vcpus[0].append((i, 1, 120405L, i))
vcpus[1].append((True, True, True, True))
return vcpus
def memoryStats(self):
return {}
def maxMemory(self):
return self._def['memory']
def blockJobInfo(self, disk, flags):
return {}
class DomainSnapshot(object):
def __init__(self, name, domain):
self._name = name
self._domain = domain
def delete(self, flags):
del self._domain._snapshots[self._name]
class Connection(object):
def __init__(self, uri=None, readonly=False, version=9007):
if not uri or uri == '':
if allow_default_uri_connection:
uri = 'qemu:///session'
else:
raise ValueError("URI was None, but fake libvirt is "
"configured to not accept this.")
uri_whitelist = ['qemu:///system',
'qemu:///session',
'xen:///system',
'uml:///system',
'test:///default']
if uri not in uri_whitelist:
raise make_libvirtError(
libvirtError,
"libvirt error: no connection driver "
"available for No connection for URI %s" % uri,
error_code=5, error_domain=0)
self.readonly = readonly
self._uri = uri
self._vms = {}
self._running_vms = {}
self._id_counter = 1 # libvirt reserves 0 for the hypervisor.
self._nwfilters = {}
self._event_callbacks = {}
self.fakeLibVersion = version
self.fakeVersion = version
def _add_filter(self, nwfilter):
self._nwfilters[nwfilter._name] = nwfilter
def _remove_filter(self, nwfilter):
del self._nwfilters[nwfilter._name]
def _mark_running(self, dom):
self._running_vms[self._id_counter] = dom
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0)
self._id_counter += 1
def _mark_not_running(self, dom):
if dom._transient:
self._undefine(dom)
dom._id = -1
for (k, v) in self._running_vms.iteritems():
if v == dom:
del self._running_vms[k]
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STOPPED, 0)
return
def _undefine(self, dom):
del self._vms[dom.name()]
if not dom._transient:
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_UNDEFINED, 0)
def getInfo(self):
return [node_arch,
node_kB_mem,
node_cpus,
node_mhz,
node_nodes,
node_sockets,
node_cores,
node_threads]
def numOfDomains(self):
return len(self._running_vms)
def listDomainsID(self):
return self._running_vms.keys()
def lookupByID(self, id):
if id in self._running_vms:
return self._running_vms[id]
raise make_libvirtError(
libvirtError,
'Domain not found: no domain with matching id %d' % id,
error_code=VIR_ERR_NO_DOMAIN,
error_domain=VIR_FROM_QEMU)
def lookupByName(self, name):
if name in self._vms:
return self._vms[name]
raise make_libvirtError(
libvirtError,
'Domain not found: no domain with matching name "%s"' % name,
error_code=VIR_ERR_NO_DOMAIN,
error_domain=VIR_FROM_QEMU)
def _emit_lifecycle(self, dom, event, detail):
if VIR_DOMAIN_EVENT_ID_LIFECYCLE not in self._event_callbacks:
return
cbinfo = self._event_callbacks[VIR_DOMAIN_EVENT_ID_LIFECYCLE]
callback = cbinfo[0]
opaque = cbinfo[1]
callback(self, dom, event, detail, opaque)
def defineXML(self, xml):
dom = Domain(connection=self, running=False, transient=False, xml=xml)
self._vms[dom.name()] = dom
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_DEFINED, 0)
return dom
def createXML(self, xml, flags):
dom = Domain(connection=self, running=True, transient=True, xml=xml)
self._vms[dom.name()] = dom
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0)
return dom
def getType(self):
if self._uri == 'qemu:///system':
return 'QEMU'
def getLibVersion(self):
return self.fakeLibVersion
def getVersion(self):
return self.fakeVersion
def getHostname(self):
return 'compute1'
def domainEventRegisterAny(self, dom, eventid, callback, opaque):
self._event_callbacks[eventid] = [callback, opaque]
def registerCloseCallback(self, cb, opaque):
pass
def getCapabilities(self):
"""Return spoofed capabilities."""
return '''<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
<feature name='tm2'/>
<feature name='est'/>
<feature name='vmx'/>
<feature name='ds_cpl'/>
<feature name='monitor'/>
<feature name='pbe'/>
<feature name='tm'/>
<feature name='ht'/>
<feature name='ss'/>
<feature name='acpi'/>
<feature name='ds'/>
<feature name='vme'/>
</cpu>
<migration_features>
<live/>
<uri_transports>
<uri_transport>tcp</uri_transport>
</uri_transports>
</migration_features>
<secmodel>
<model>apparmor</model>
<doi>0</doi>
</secmodel>
</host>
<guest>
<os_type>hvm</os_type>
<arch name='i686'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
<domain type='qemu'>
</domain>
<domain type='kvm'>
<emulator>/usr/bin/kvm</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
</domain>
</arch>
<features>
<cpuselection/>
<deviceboot/>
<pae/>
<nonpae/>
<acpi default='on' toggle='yes'/>
<apic default='on' toggle='no'/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='x86_64'>
<wordsize>64</wordsize>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
<domain type='qemu'>
</domain>
<domain type='kvm'>
<emulator>/usr/bin/kvm</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
</domain>
</arch>
<features>
<cpuselection/>
<deviceboot/>
<acpi default='on' toggle='yes'/>
<apic default='on' toggle='no'/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='arm'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-arm</emulator>
<machine>integratorcp</machine>
<machine>vexpress-a9</machine>
<machine>syborg</machine>
<machine>musicpal</machine>
<machine>mainstone</machine>
<machine>n800</machine>
<machine>n810</machine>
<machine>n900</machine>
<machine>cheetah</machine>
<machine>sx1</machine>
<machine>sx1-v1</machine>
<machine>beagle</machine>
<machine>beaglexm</machine>
<machine>tosa</machine>
<machine>akita</machine>
<machine>spitz</machine>
<machine>borzoi</machine>
<machine>terrier</machine>
<machine>connex</machine>
<machine>verdex</machine>
<machine>lm3s811evb</machine>
<machine>lm3s6965evb</machine>
<machine>realview-eb</machine>
<machine>realview-eb-mpcore</machine>
<machine>realview-pb-a8</machine>
<machine>realview-pbx-a9</machine>
<machine>versatilepb</machine>
<machine>versatileab</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='mips'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-mips</emulator>
<machine>malta</machine>
<machine>mipssim</machine>
<machine>magnum</machine>
<machine>pica61</machine>
<machine>mips</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='mipsel'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-mipsel</emulator>
<machine>malta</machine>
<machine>mipssim</machine>
<machine>magnum</machine>
<machine>pica61</machine>
<machine>mips</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='sparc'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-sparc</emulator>
<machine>SS-5</machine>
<machine>leon3_generic</machine>
<machine>SS-10</machine>
<machine>SS-600MP</machine>
<machine>SS-20</machine>
<machine>Voyager</machine>
<machine>LX</machine>
<machine>SS-4</machine>
<machine>SPARCClassic</machine>
<machine>SPARCbook</machine>
<machine>SS-1000</machine>
<machine>SS-2000</machine>
<machine>SS-2</machine>
<domain type='qemu'>
</domain>
</arch>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='ppc'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-ppc</emulator>
<machine>g3beige</machine>
<machine>virtex-ml507</machine>
<machine>mpc8544ds</machine>
<machine canonical='bamboo-0.13'>bamboo</machine>
<machine>bamboo-0.13</machine>
<machine>bamboo-0.12</machine>
<machine>ref405ep</machine>
<machine>taihu</machine>
<machine>mac99</machine>
<machine>prep</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
</capabilities>'''
def compareCPU(self, xml, flags):
tree = etree.fromstring(xml)
arch_node = tree.find('./arch')
if arch_node is not None:
if arch_node.text not in ['x86_64', 'i686']:
return VIR_CPU_COMPARE_INCOMPATIBLE
model_node = tree.find('./model')
if model_node is not None:
if model_node.text != node_cpu_model:
return VIR_CPU_COMPARE_INCOMPATIBLE
vendor_node = tree.find('./vendor')
if vendor_node is not None:
if vendor_node.text != node_cpu_vendor:
return VIR_CPU_COMPARE_INCOMPATIBLE
# The rest of the stuff libvirt implements is rather complicated
# and I don't think it adds much value to replicate it here.
return VIR_CPU_COMPARE_IDENTICAL
def getCPUStats(self, cpuNum, flag):
if cpuNum < 2:
return {'kernel': 5664160000000L,
'idle': 1592705190000000L,
'user': 26728850000000L,
'iowait': 6121490000000L}
else:
raise make_libvirtError(
libvirtError,
"invalid argument: Invalid cpu number",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
def nwfilterLookupByName(self, name):
try:
return self._nwfilters[name]
except KeyError:
raise make_libvirtError(
libvirtError,
"no nwfilter with matching name %s" % name,
error_code=VIR_ERR_NO_NWFILTER,
error_domain=VIR_FROM_NWFILTER)
def nwfilterDefineXML(self, xml):
nwfilter = NWFilter(self, xml)
self._add_filter(nwfilter)
def listDefinedDomains(self):
return []
def listDevices(self, cap, flags):
return []
def baselineCPU(self, cpu, flag):
"""Add new libvirt API."""
return """<cpu mode='custom' match='exact'>
<model fallback='allow'>Westmere</model>
<vendor>Intel</vendor>
<feature policy='require' name='aes'/>
</cpu>"""
def openAuth(uri, auth, flags):
if type(auth) != list:
raise Exception(_("Expected a list for 'auth' parameter"))
if type(auth[0]) != list:
raise Exception(
_("Expected a function in 'auth[0]' parameter"))
if not callable(auth[1]):
raise Exception(
_("Expected a function in 'auth[1]' parameter"))
connection_used = True
return Connection(uri, (flags == VIR_CONNECT_RO))
def virEventRunDefaultImpl():
time.sleep(1)
def virEventRegisterDefaultImpl():
if connection_used:
raise Exception(_("virEventRegisterDefaultImpl() must be \
called before connection is used."))
def registerErrorHandler(handler, ctxt):
pass
def make_libvirtError(error_class, msg, error_code=None,
error_domain=None, error_message=None,
error_level=None, str1=None, str2=None, str3=None,
int1=None, int2=None):
"""Convenience function for creating `libvirtError` exceptions which
allow you to specify arguments in constructor without having to manipulate
the `err` tuple directly.
We need to pass in `error_class` to this function because it may be
`libvirt.libvirtError` or `fakelibvirt.libvirtError` depending on whether
`libvirt-python` is installed.
"""
exc = error_class(msg)
exc.err = (error_code, error_domain, error_message, error_level,
str1, str2, str3, int1, int2)
return exc
virDomain = Domain
virConnect = Connection
| |
# -*- coding: utf-8 -*-
"""
Simple folder synchronization using FTP.
(c) 2012-2021 Martin Wendt; see https://github.com/mar10/pyftpsync
Licensed under the MIT license: https://www.opensource.org/licenses/mit-license.php
Usage examples:
> pyftpsync.py --help
> pyftpsync.py upload . ftps://example.com/myfolder
"""
import argparse
import platform
import sys
from pprint import pprint
from ftpsync import __version__
from ftpsync.cli_common import (
common_parser,
creds_parser,
matcher_parser,
verbose_parser,
)
from ftpsync.run_command import add_run_parser, handle_run_command
from ftpsync.scan_command import add_scan_parser
from ftpsync.synchronizers import (
BiDirSynchronizer,
DownloadSynchronizer,
UploadSynchronizer,
)
from ftpsync.targets import FsTarget, make_target
from ftpsync.tree_command import add_tree_parser
from ftpsync.util import (
DEBUG_FLAGS,
PYTHON_VERSION,
check_cli_verbose,
namespace_to_dict,
set_pyftpsync_logger,
)
# ===============================================================================
# run
# ===============================================================================
def run():
"""CLI main entry point."""
# Use print() instead of logging when running in CLI mode:
set_pyftpsync_logger(None)
parser = argparse.ArgumentParser(
description="Synchronize folders over FTP.",
epilog="See also https://github.com/mar10/pyftpsync",
parents=[verbose_parser],
)
# Note: we want to allow --version to be combined with --verbose. However
# on Py2, argparse makes sub-commands mandatory, unless `action="version"` is used.
if check_cli_verbose(3) > 3:
version_info = "pyftpsync/{} Python/{} {}".format(
__version__, PYTHON_VERSION, platform.platform()
)
else:
version_info = "{}".format(__version__)
parser.add_argument("-V", "--version", action="version", version=version_info)
subparsers = parser.add_subparsers(help="sub-command help")
# --- Create the parser for the "upload" command ---------------------------
sp = subparsers.add_parser(
"upload",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="copy new and modified files to remote folder",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--force",
action="store_true",
help="overwrite remote files, even if the target is newer "
"(but no conflict was detected)",
)
sp.add_argument(
"--resolve",
default="ask",
choices=["local", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.add_argument(
"--delete",
action="store_true",
help="remove remote files if they don't exist locally",
)
sp.add_argument(
"--delete-unmatched",
action="store_true",
help="remove remote files if they don't exist locally "
"or don't match the current filter (implies '--delete' option)",
)
sp.set_defaults(command="upload")
# --- Create the parser for the "download" command -------------------------
sp = subparsers.add_parser(
"download",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="copy new and modified files from remote folder to local target",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--force",
action="store_true",
help="overwrite local files, even if the target is newer "
"(but no conflict was detected)",
)
sp.add_argument(
"--resolve",
default="ask",
choices=["remote", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.add_argument(
"--delete",
action="store_true",
help="remove local files if they don't exist on remote target",
)
sp.add_argument(
"--delete-unmatched",
action="store_true",
help="remove local files if they don't exist on remote target "
"or don't match the current filter (implies '--delete' option)",
)
sp.set_defaults(command="download")
# --- Create the parser for the "sync" command -----------------------------
sp = subparsers.add_parser(
"sync",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="synchronize new and modified files between remote folder and local target",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--resolve",
default="ask",
choices=["old", "new", "local", "remote", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.set_defaults(command="sync")
# --- Create the parser for the "run" command -----------------------------
add_run_parser(subparsers)
# --- Create the parser for the "scan" command -----------------------------
add_scan_parser(subparsers)
# --- Create the parser for the "tree" command -----------------------------
add_tree_parser(subparsers)
# --- Parse command line ---------------------------------------------------
args = parser.parse_args()
args.verbose -= args.quiet
del args.quiet
# print("verbose", args.verbose)
ftp_debug = 0
if args.verbose >= 6:
ftp_debug = 1
if args.debug:
if args.verbose < 4:
parser.error("'--debug' requires verbose level >= 4")
DEBUG_FLAGS.update(args.debug)
# Modify the `args` from the `pyftpsync.yaml` config:
if getattr(args, "command", None) == "run":
handle_run_command(parser, args)
if callable(getattr(args, "command", None)):
# scan_handler
try:
return args.command(parser, args)
except KeyboardInterrupt:
print("\nAborted by user.", file=sys.stderr)
sys.exit(3)
elif not hasattr(args, "command"):
parser.error(
"missing command (choose from 'upload', 'download', 'run', 'sync', 'scan')"
)
# Post-process and check arguments
if hasattr(args, "delete_unmatched") and args.delete_unmatched:
args.delete = True
args.local_target = make_target(args.local, {"ftp_debug": ftp_debug})
if args.remote == ".":
parser.error("'.' is expected to be the local target (not remote)")
args.remote_target = make_target(args.remote, {"ftp_debug": ftp_debug})
if not isinstance(args.local_target, FsTarget) and isinstance(
args.remote_target, FsTarget
):
parser.error("a file system target is expected to be local")
# Let the command handler do its thing
opts = namespace_to_dict(args)
if args.command == "upload":
s = UploadSynchronizer(args.local_target, args.remote_target, opts)
elif args.command == "download":
s = DownloadSynchronizer(args.local_target, args.remote_target, opts)
elif args.command == "sync":
s = BiDirSynchronizer(args.local_target, args.remote_target, opts)
else:
parser.error("unknown command '{}'".format(args.command))
s.is_script = True
try:
s.run()
except KeyboardInterrupt:
print("\nAborted by user.", file=sys.stderr)
sys.exit(3)
finally:
# Prevent sporadic exceptions in ftplib, when closing in __del__
s.local.close()
s.remote.close()
stats = s.get_stats()
if args.verbose >= 5:
pprint(stats)
elif args.verbose >= 1:
if args.dry_run:
print("(DRY-RUN) ", end="")
print(
"Wrote {}/{} files in {} directories, skipped: {}.".format(
stats["files_written"],
stats["local_files"],
stats["local_dirs"],
stats["conflict_files_skipped"],
),
end="",
)
if stats["interactive_ask"]:
print()
else:
print(" Elap: {}.".format(stats["elap_str"]))
return
# Script entry point
if __name__ == "__main__":
# Just in case...
from multiprocessing import freeze_support
freeze_support()
run()
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from toscaparser.tests.base import TestCase
import translator.common.utils
class CommonUtilsTest(TestCase):
MemoryUnit = translator.common.utils.MemoryUnit
cmpUtils = translator.common.utils.CompareUtils
yamlUtils = translator.common.utils.YamlUtils
UrlUtils = translator.common.utils.UrlUtils
def test_convert_unit_size_to_num(self):
size = '1 TB'
num_to_convert = 'GB'
expected_output = 1000
output = self.MemoryUnit.convert_unit_size_to_num(size, num_to_convert)
self.assertEqual(output, expected_output)
size = '40 GB'
num_to_convert = 'MB'
expected_output = 40000
output = self.MemoryUnit.convert_unit_size_to_num(size, num_to_convert)
self.assertEqual(output, expected_output)
size = '20 B'
num_to_convert = None
expected_output = 20
output = self.MemoryUnit.convert_unit_size_to_num(size, num_to_convert)
self.assertEqual(output, expected_output)
def test_validate_unit(self):
unit = 'AB'
exp_msg = ('Provided unit "{0}" is not valid. The valid units are '
'{1}').format(unit, self.MemoryUnit.UNIT_SIZE_DICT.keys())
try:
self.MemoryUnit.validate_unit(unit)
except Exception as err:
self.assertTrue(
isinstance(err, ValueError))
self.assertEqual(exp_msg, err.__str__())
def test_unit_size_conversion_to_GNU_standard(self):
unit = 'gB'
standard_unit = 'GB'
converted_unit = self.MemoryUnit.validate_unit(unit)
self.assertEqual(converted_unit, standard_unit)
unit = 'KB'
standard_unit = 'kB'
converted_unit = self.MemoryUnit.validate_unit(unit)
self.assertEqual(converted_unit, standard_unit)
unit = 'kb'
standard_unit = 'kB'
converted_unit = self.MemoryUnit.validate_unit(unit)
self.assertEqual(converted_unit, standard_unit)
unit = 'kB'
standard_unit = 'kB'
converted_unit = self.MemoryUnit.validate_unit(unit)
self.assertEqual(converted_unit, standard_unit)
unit = 'MIB'
standard_unit = 'MiB'
converted_unit = self.MemoryUnit.validate_unit(unit)
self.assertEqual(converted_unit, standard_unit)
def test_str_to_num_value_error(self):
str_to_convert = '55063.000000'
expected_output = 55063.0
output = translator.common.utils.str_to_num(str_to_convert)
self.assertEqual(output, expected_output)
def test_compare_dicts_unequal(self):
dict1 = {'allowed_values': [1, 2, 4, 8],
'server3': {'depends_on': ['server1', 'server2']}}
dict2 = {'allowed_values': [1, 2, 4, 8],
'server3': {'depends_on': ['server2', 'server1']}}
self.assertFalse(self.cmpUtils.compare_dicts(dict1, dict2))
def test_dicts_equivalent_empty_dicts(self):
self.assertTrue(self.cmpUtils.compare_dicts(None, None))
self.assertFalse(self.cmpUtils.compare_dicts(None, {}))
self.assertFalse(self.cmpUtils.compare_dicts(None, {'x': '2'}))
def test_compareutils_reorder(self):
dic = {'output': {'website_url': {'value': {'get_attr':
['server', 'networks',
'private', 0]}}},
'allowed_values': [2, 8, 1, 4],
'server3': {'depends_on': ['server2', 'server1']}}
reordered_dic = {'output': {'website_url': {'value': {'get_attr':
['server', 'networks',
'private', 0]}}},
'allowed_values': [1, 2, 4, 8],
'server3': {'depends_on': ['server1', 'server2']}}
self.assertEqual(reordered_dic, self.cmpUtils.reorder(dic))
def test_compareutils_diff_dicts_both_null(self):
expected = None
provided = None
self.assertEqual({},
self.cmpUtils.diff_dicts(expected, provided))
def test_compareutils_diff_dicts_one_null(self):
expected = {'keyname': 'userkey'}
provided = None
self.assertEqual(
{self.cmpUtils.MISMATCH_VALUE1_LABEL: {'keyname': 'userkey'},
self.cmpUtils.MISMATCH_VALUE2_LABEL: None},
self.cmpUtils.diff_dicts(expected, provided))
def test_compareutils_diff_dicts_missing_key(self):
expected = {'server3': {'depends_on': ['server1', 'server2'],
'keyname': 'userkey'}}
provided = {'server3': {'depends_on': ['server2', 'server1']}}
self.assertEqual(
{'server3': {'keyname':
{self.cmpUtils.MISMATCH_VALUE1_LABEL: 'userkey',
self.cmpUtils.MISMATCH_VALUE2_LABEL: None}}},
self.cmpUtils.diff_dicts(expected, provided))
def test_compareutils_diff_dicts_missing_key_other_dict(self):
expected = {'server3': {'depends_on': ['server1', 'server2']}}
provided = {'server3': {'depends_on': ['server2', 'server1'],
'keyname': 'userkey'}}
self.assertEqual(
{'server3': {'keyname':
{self.cmpUtils.MISMATCH_VALUE1_LABEL: None,
self.cmpUtils.MISMATCH_VALUE2_LABEL: 'userkey'}}},
self.cmpUtils.diff_dicts(expected, provided))
def test_compareutils_diff_dicts_value_diff(self):
expected = \
{'output':
{'website_url':
{'value':
{'get_attr': ['server', 'networks', 'private', 0]}}},
'server3': {'depends_on': ['server2', 'server1']}}
provided = \
{'output':
{'website_url':
{'value':
{'get_attr': ['server', 'networks', 'public', 0]}}},
'server3': {'depends_on': ['server2', 'server1']}}
self.assertEqual(
{'output':
{'website_url':
{'value':
{'get_attr':
{self.cmpUtils.MISMATCH_VALUE1_LABEL:
['server', 'networks', 'private', 0],
self.cmpUtils.MISMATCH_VALUE2_LABEL:
['server', 'networks', 'public', 0]}}}}},
self.cmpUtils.diff_dicts(expected, provided))
def test_yamlutils_get_dict_missing_file(self):
self.assertIsNone(self.yamlUtils.get_dict('./no_file.yaml'))
def test_yamlutils_get_dict(self):
yaml_file = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'../tests/data/custom_types/rsyslog.yaml')
dict = \
{'tosca_definitions_version': 'tosca_simple_yaml_1_0',
'description':
'RSYSLOG is the Rocket-fast SYStem for LOG processing.\n',
'node_types':
{'tosca.nodes.SoftwareComponent.Rsyslog':
{'derived_from': 'tosca.nodes.SoftwareComponent',
'requirements':
[{'log_endpoint':
{'capability': 'tosca.capabilities.Endpoint',
'node': 'tosca.nodes.SoftwareComponent.Logstash',
'relationship': 'tosca.relationships.ConnectsTo'}}]}}}
self.assertEqual(dict, self.yamlUtils.get_dict(yaml_file))
def test_yamlutils_compare_yamls(self):
yaml_file1 = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'../tests/data/custom_types/kibana.yaml')
yaml_file2 = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'../tests/data/custom_types/collectd.yaml')
self.assertTrue(self.yamlUtils.compare_yamls(yaml_file1, yaml_file1))
self.assertFalse(self.yamlUtils.compare_yamls(yaml_file1, yaml_file2))
def test_yamlutils_compare_yaml_dict(self):
yaml_file1 = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'../tests/data/custom_types/rsyslog.yaml')
yaml_file2 = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'../tests/data/custom_types/collectd.yaml')
dict = \
{'tosca_definitions_version': 'tosca_simple_yaml_1_0',
'description':
'RSYSLOG is the Rocket-fast SYStem for LOG processing.\n',
'node_types':
{'tosca.nodes.SoftwareComponent.Rsyslog':
{'derived_from': 'tosca.nodes.SoftwareComponent',
'requirements':
[{'log_endpoint':
{'capability': 'tosca.capabilities.Endpoint',
'node': 'tosca.nodes.SoftwareComponent.Logstash',
'relationship': 'tosca.relationships.ConnectsTo'}}]}}}
self.assertEqual({}, self.cmpUtils.diff_dicts(
self.yamlUtils.get_dict(yaml_file1), dict))
self.assertFalse(self.yamlUtils.compare_yaml_dict(yaml_file2, dict))
def test_assert_value_is_num(self):
value = 1
output = translator.common.utils.str_to_num(value)
self.assertEqual(value, output)
def test_urlutils_validate_url(self):
self.assertTrue(self.UrlUtils.validate_url("http://www.github.com/"))
self.assertTrue(
self.UrlUtils.validate_url("https://github.com:81/a/2/a.b"))
self.assertTrue(self.UrlUtils.validate_url("ftp://github.com"))
self.assertFalse(self.UrlUtils.validate_url("github.com"))
self.assertFalse(self.UrlUtils.validate_url("123"))
self.assertFalse(self.UrlUtils.validate_url("a/b/c"))
def test_get_dict_value(self):
single_snippet = \
{'nodejs_create_config':
{'type': 'tosca.nodes.SoftwareConfig',
'properties':
{'config':
{'get_file': 'create.sh'}}}}
actual_output_single_snippet = []
ex_output_single_snippet = ['create.sh']
translator.common.utils.get_dict_value(single_snippet, "get_file",
actual_output_single_snippet)
self.assertEqual(actual_output_single_snippet,
ex_output_single_snippet)
multi_snippet = \
{'resources':
{'nodejs_create_config':
{'type': 'tosca.nodes.SoftwareConfig',
'properties':
{'config':
{'get_file': 'nodejs/create.sh'}}},
'mongodb_create_config':
{'type': 'tosca.nodes.SoftwareConfig',
'properties':
{'config':
{'get_file': 'mongodb/create.sh'}}}}}
actual_output_multi_snippet = []
ex_output_multi_snippet = ['mongodb/create.sh',
'nodejs/create.sh']
translator.common.utils.get_dict_value(multi_snippet, "get_file",
actual_output_multi_snippet)
self.assertEqual(sorted(actual_output_multi_snippet),
ex_output_multi_snippet)
| |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import logging
from django.conf import settings
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from cinderclient.v2.contrib import list_extensions as cinder_list_extensions
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import nova
LOG = logging.getLogger(__name__)
# API static values
VOLUME_STATE_AVAILABLE = "available"
DEFAULT_QUOTA_NAME = 'default'
# Available consumer choices associated with QOS Specs
CONSUMER_CHOICES = (
('back-end', _('back-end')),
('front-end', _('front-end')),
('both', pgettext_lazy('Both of front-end and back-end', u'both')),
)
VERSIONS = base.APIVersionManager("volume", preferred_version=2)
try:
from cinderclient.v2 import client as cinder_client_v2
VERSIONS.load_supported_version(2, {"client": cinder_client_v2,
"version": 2})
except ImportError:
pass
class BaseCinderAPIResourceWrapper(base.APIResourceWrapper):
@property
def name(self):
# If a volume doesn't have a name, use its id.
return (getattr(self._apiresource, 'name', None) or
getattr(self._apiresource, 'display_name', None) or
getattr(self._apiresource, 'id', None))
@property
def description(self):
return (getattr(self._apiresource, 'description', None) or
getattr(self._apiresource, 'display_description', None))
class Volume(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'description', 'size', 'status', 'created_at',
'volume_type', 'availability_zone', 'imageRef', 'bootable',
'snapshot_id', 'source_volid', 'attachments', 'tenant_name',
'os-vol-host-attr:host', 'os-vol-tenant-attr:tenant_id',
'metadata', 'volume_image_metadata', 'encrypted', 'transfer']
@property
def is_bootable(self):
return self.bootable == 'true'
class VolumeSnapshot(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'description', 'size', 'status',
'created_at', 'volume_id',
'os-extended-snapshot-attributes:project_id']
class VolumeType(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'extra_specs', 'created_at',
'os-extended-snapshot-attributes:project_id']
class VolumeBackup(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'description', 'container', 'size', 'status',
'created_at', 'volume_id', 'availability_zone']
_volume = None
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, value):
self._volume = value
class VolTypeExtraSpec(object):
def __init__(self, type_id, key, val):
self.type_id = type_id
self.id = key
self.key = key
self.value = val
class QosSpec(object):
def __init__(self, id, key, val):
self.id = id
self.key = key
self.value = val
class VolumeTransfer(base.APIResourceWrapper):
_attrs = ['id', 'name', 'created_at', 'volume_id', 'auth_key']
@memoized
def cinderclient(request):
api_version = VERSIONS.get_active_version()
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
cinder_url = ""
try:
# The cinder client assumes that the v2 endpoint type will be
# 'volumev2'.
if api_version['version'] == 2:
try:
cinder_url = base.url_for(request, 'volumev2')
except exceptions.ServiceCatalogException:
LOG.warning("Cinder v2 requested but no 'volumev2' service "
"type available in Keystone catalog.")
except exceptions.ServiceCatalogException:
LOG.debug('no volume service configured.')
raise
c = api_version['client'].Client(request.user.username,
request.user.token.id,
project_id=request.user.tenant_id,
auth_url=cinder_url,
insecure=insecure,
cacert=cacert,
http_log_debug=settings.DEBUG)
c.client.auth_token = request.user.token.id
c.client.management_url = cinder_url
return c
def _replace_v2_parameters(data):
if VERSIONS.active < 2:
data['display_name'] = data['name']
data['display_description'] = data['description']
del data['name']
del data['description']
return data
def version_get():
api_version = VERSIONS.get_active_version()
return api_version['version']
def volume_list(request, search_opts=None):
"""To see all volumes in the cloud as an admin you can pass in a special
search option: {'all_tenants': 1}
"""
c_client = cinderclient(request)
if c_client is None:
return []
# build a dictionary of volume_id -> transfer
transfers = {t.volume_id: t
for t in transfer_list(request, search_opts=search_opts)}
volumes = []
for v in c_client.volumes.list(search_opts=search_opts):
v.transfer = transfers.get(v.id)
volumes.append(Volume(v))
return volumes
def volume_get(request, volume_id):
volume_data = cinderclient(request).volumes.get(volume_id)
for attachment in volume_data.attachments:
if "server_id" in attachment:
instance = nova.server_get(request, attachment['server_id'])
attachment['instance_name'] = instance.name
else:
# Nova volume can occasionally send back error'd attachments
# the lack a server_id property; to work around that we'll
# give the attached instance a generic name.
attachment['instance_name'] = _("Unknown instance")
volume_data.transfer = None
if volume_data.status == 'awaiting-transfer':
for transfer in transfer_list(request):
if transfer.volume_id == volume_id:
volume_data.transfer = transfer
break
return Volume(volume_data)
def volume_create(request, size, name, description, volume_type,
snapshot_id=None, metadata=None, image_id=None,
availability_zone=None, source_volid=None):
data = {'name': name,
'description': description,
'volume_type': volume_type,
'snapshot_id': snapshot_id,
'metadata': metadata,
'imageRef': image_id,
'availability_zone': availability_zone,
'source_volid': source_volid}
data = _replace_v2_parameters(data)
volume = cinderclient(request).volumes.create(size, **data)
return Volume(volume)
def volume_extend(request, volume_id, new_size):
return cinderclient(request).volumes.extend(volume_id, new_size)
def volume_delete(request, volume_id):
return cinderclient(request).volumes.delete(volume_id)
def volume_retype(request, volume_id, new_type, migration_policy):
return cinderclient(request).volumes.retype(volume_id,
new_type,
migration_policy)
def volume_set_bootable(request, volume_id, bootable):
return cinderclient(request).volumes.set_bootable(volume_id,
bootable)
def volume_update(request, volume_id, name, description):
vol_data = {'name': name,
'description': description}
vol_data = _replace_v2_parameters(vol_data)
return cinderclient(request).volumes.update(volume_id,
**vol_data)
def volume_reset_state(request, volume_id, state):
return cinderclient(request).volumes.reset_state(volume_id, state)
def volume_upload_to_image(request, volume_id, force, image_name,
container_format, disk_format):
return cinderclient(request).volumes.upload_to_image(volume_id,
force,
image_name,
container_format,
disk_format)
def volume_get_encryption_metadata(request, volume_id):
return cinderclient(request).volumes.get_encryption_metadata(volume_id)
def volume_snapshot_get(request, snapshot_id):
snapshot = cinderclient(request).volume_snapshots.get(snapshot_id)
return VolumeSnapshot(snapshot)
def volume_snapshot_list(request, search_opts=None):
c_client = cinderclient(request)
if c_client is None:
return []
return [VolumeSnapshot(s) for s in c_client.volume_snapshots.list(
search_opts=search_opts)]
def volume_snapshot_create(request, volume_id, name,
description=None, force=False):
data = {'name': name,
'description': description,
'force': force}
data = _replace_v2_parameters(data)
return VolumeSnapshot(cinderclient(request).volume_snapshots.create(
volume_id, **data))
def volume_snapshot_delete(request, snapshot_id):
return cinderclient(request).volume_snapshots.delete(snapshot_id)
def volume_snapshot_update(request, snapshot_id, name, description):
snapshot_data = {'name': name,
'description': description}
snapshot_data = _replace_v2_parameters(snapshot_data)
return cinderclient(request).volume_snapshots.update(snapshot_id,
**snapshot_data)
def volume_snapshot_reset_state(request, snapshot_id, state):
return cinderclient(request).volume_snapshots.reset_state(
snapshot_id, state)
@memoized
def volume_backup_supported(request):
"""This method will determine if cinder supports backup.
"""
# TODO(lcheng) Cinder does not expose the information if cinder
# backup is configured yet. This is a workaround until that
# capability is available.
# https://bugs.launchpad.net/cinder/+bug/1334856
cinder_config = getattr(settings, 'OPENSTACK_CINDER_FEATURES', {})
return cinder_config.get('enable_backup', False)
def volume_backup_get(request, backup_id):
backup = cinderclient(request).backups.get(backup_id)
return VolumeBackup(backup)
def volume_backup_list(request):
c_client = cinderclient(request)
if c_client is None:
return []
return [VolumeBackup(b) for b in c_client.backups.list()]
def volume_backup_create(request,
volume_id,
container_name,
name,
description):
backup = cinderclient(request).backups.create(
volume_id,
container=container_name,
name=name,
description=description)
return VolumeBackup(backup)
def volume_backup_delete(request, backup_id):
return cinderclient(request).backups.delete(backup_id)
def volume_backup_restore(request, backup_id, volume_id):
return cinderclient(request).restores.restore(backup_id=backup_id,
volume_id=volume_id)
def tenant_quota_get(request, tenant_id):
c_client = cinderclient(request)
if c_client is None:
return base.QuotaSet()
return base.QuotaSet(c_client.quotas.get(tenant_id))
def tenant_quota_update(request, tenant_id, **kwargs):
return cinderclient(request).quotas.update(tenant_id, **kwargs)
def default_quota_get(request, tenant_id):
return base.QuotaSet(cinderclient(request).quotas.defaults(tenant_id))
def volume_type_list_with_qos_associations(request):
vol_types = volume_type_list(request)
vol_types_dict = {}
# initialize and build a dictionary for lookup access below
for vol_type in vol_types:
vol_type.associated_qos_spec = ""
vol_types_dict[vol_type.id] = vol_type
# get all currently defined qos specs
qos_specs = qos_spec_list(request)
for qos_spec in qos_specs:
# get all volume types this qos spec is associated with
assoc_vol_types = qos_spec_get_associations(request, qos_spec.id)
for assoc_vol_type in assoc_vol_types:
# update volume type to hold this association info
vol_type = vol_types_dict[assoc_vol_type.id]
vol_type.associated_qos_spec = qos_spec.name
return vol_types
def default_quota_update(request, **kwargs):
cinderclient(request).quota_classes.update(DEFAULT_QUOTA_NAME, **kwargs)
def volume_type_list(request):
return cinderclient(request).volume_types.list()
def volume_type_create(request, name):
return cinderclient(request).volume_types.create(name)
def volume_type_delete(request, volume_type_id):
return cinderclient(request).volume_types.delete(volume_type_id)
def volume_type_get(request, volume_type_id):
return cinderclient(request).volume_types.get(volume_type_id)
def volume_encryption_type_create(request, volume_type_id, data):
return cinderclient(request).volume_encryption_types.create(volume_type_id,
specs=data)
def volume_encryption_type_delete(request, volume_type_id):
return cinderclient(request).volume_encryption_types.delete(volume_type_id)
def volume_encryption_type_get(request, volume_type_id):
return cinderclient(request).volume_encryption_types.get(volume_type_id)
def volume_encryption_type_list(request):
return cinderclient(request).volume_encryption_types.list()
def volume_type_extra_get(request, type_id, raw=False):
vol_type = volume_type_get(request, type_id)
extras = vol_type.get_keys()
if raw:
return extras
return [VolTypeExtraSpec(type_id, key, value) for
key, value in extras.items()]
def volume_type_extra_set(request, type_id, metadata):
vol_type = volume_type_get(request, type_id)
if not metadata:
return None
return vol_type.set_keys(metadata)
def volume_type_extra_delete(request, type_id, keys):
vol_type = volume_type_get(request, type_id)
return vol_type.unset_keys([keys])
def qos_spec_list(request):
return cinderclient(request).qos_specs.list()
def qos_spec_get(request, qos_spec_id):
return cinderclient(request).qos_specs.get(qos_spec_id)
def qos_spec_delete(request, qos_spec_id):
return cinderclient(request).qos_specs.delete(qos_spec_id, force=True)
def qos_spec_create(request, name, specs):
return cinderclient(request).qos_specs.create(name, specs)
def qos_spec_get_keys(request, qos_spec_id, raw=False):
spec = qos_spec_get(request, qos_spec_id)
qos_specs = spec.specs
if raw:
return spec
return [QosSpec(qos_spec_id, key, value) for
key, value in qos_specs.items()]
def qos_spec_set_keys(request, qos_spec_id, specs):
return cinderclient(request).qos_specs.set_keys(qos_spec_id, specs)
def qos_spec_unset_keys(request, qos_spec_id, specs):
return cinderclient(request).qos_specs.unset_keys(qos_spec_id, specs)
def qos_spec_associate(request, qos_specs, vol_type_id):
return cinderclient(request).qos_specs.associate(qos_specs, vol_type_id)
def qos_spec_disassociate(request, qos_specs, vol_type_id):
return cinderclient(request).qos_specs.disassociate(qos_specs, vol_type_id)
def qos_spec_get_associations(request, qos_spec_id):
return cinderclient(request).qos_specs.get_associations(qos_spec_id)
@memoized
def tenant_absolute_limits(request):
limits = cinderclient(request).limits.get().absolute
limits_dict = {}
for limit in limits:
if limit.value < 0:
# In some cases, the absolute limits data in Cinder can get
# out of sync causing the total.*Used limits to return
# negative values instead of 0. For such cases, replace
# negative values with 0.
if limit.name.startswith('total') and limit.name.endswith('Used'):
limits_dict[limit.name] = 0
else:
# -1 is used to represent unlimited quotas
limits_dict[limit.name] = float("inf")
else:
limits_dict[limit.name] = limit.value
return limits_dict
def service_list(request):
return cinderclient(request).services.list()
def availability_zone_list(request, detailed=False):
return cinderclient(request).availability_zones.list(detailed=detailed)
@memoized
def list_extensions(request):
return cinder_list_extensions.ListExtManager(cinderclient(request))\
.show_all()
@memoized
def extension_supported(request, extension_name):
"""This method will determine if Cinder supports a given extension name.
"""
extensions = list_extensions(request)
for extension in extensions:
if extension.name == extension_name:
return True
return False
def transfer_list(request, detailed=True, search_opts=None):
"""To see all volumes transfers as an admin pass in a special
search option: {'all_tenants': 1}
"""
c_client = cinderclient(request)
return [VolumeTransfer(v) for v in c_client.transfers.list(
detailed=detailed, search_opts=search_opts)]
def transfer_get(request, transfer_id):
transfer_data = cinderclient(request).transfers.get(transfer_id)
return VolumeTransfer(transfer_data)
def transfer_create(request, transfer_id, name):
volume = cinderclient(request).transfers.create(transfer_id, name)
return VolumeTransfer(volume)
def transfer_accept(request, transfer_id, auth_key):
return cinderclient(request).transfers.accept(transfer_id, auth_key)
def transfer_delete(request, transfer_id):
return cinderclient(request).transfers.delete(transfer_id)
| |
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections import OrderedDict
import itertools
import json
import multiprocessing
import optparse
import os
from os.path import getmtime, isdir, join
import platform
import random
import shlex
import subprocess
import sys
import time
from testrunner.local import execution
from testrunner.local import progress
from testrunner.local import testsuite
from testrunner.local.variants import ALL_VARIANTS
from testrunner.local import utils
from testrunner.local import verbose
from testrunner.network import network_execution
from testrunner.objects import context
# Base dir of the v8 checkout to be used as cwd.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEFAULT_OUT_GN = "out.gn"
ARCH_GUESS = utils.DefaultArch()
# Map of test name synonyms to lists of test suites. Should be ordered by
# expected runtimes (suites with slow test cases first). These groups are
# invoked in seperate steps on the bots.
TEST_MAP = {
# This needs to stay in sync with test/bot_default.isolate.
"bot_default": [
"mjsunit",
"cctest",
"debugger",
"inspector",
"webkit",
"fuzzer",
"message",
"preparser",
"intl",
"unittests",
],
# This needs to stay in sync with test/default.isolate.
"default": [
"mjsunit",
"cctest",
"debugger",
"inspector",
"fuzzer",
"message",
"preparser",
"intl",
"unittests",
],
# This needs to stay in sync with test/optimize_for_size.isolate.
"optimize_for_size": [
"mjsunit",
"cctest",
"debugger",
"inspector",
"webkit",
"intl",
],
"unittests": [
"unittests",
],
}
TIMEOUT_DEFAULT = 60
VARIANTS = ["default", "turbofan", "ignition_staging"]
MORE_VARIANTS = [
"ignition",
"stress",
"turbofan_opt",
"asm_wasm",
"wasm_traps",
]
EXHAUSTIVE_VARIANTS = VARIANTS + MORE_VARIANTS
VARIANT_ALIASES = {
# The default for developer workstations.
"dev": VARIANTS,
# Additional variants, run on all bots.
"more": MORE_VARIANTS,
# Additional variants, run on a subset of bots.
"extra": ["nocrankshaft"],
}
DEBUG_FLAGS = ["--nohard-abort", "--nodead-code-elimination",
"--nofold-constants", "--enable-slow-asserts",
"--verify-heap"]
RELEASE_FLAGS = ["--nohard-abort", "--nodead-code-elimination",
"--nofold-constants"]
MODES = {
"debug": {
"flags": DEBUG_FLAGS,
"timeout_scalefactor": 4,
"status_mode": "debug",
"execution_mode": "debug",
"output_folder": "debug",
},
"optdebug": {
"flags": DEBUG_FLAGS,
"timeout_scalefactor": 4,
"status_mode": "debug",
"execution_mode": "debug",
"output_folder": "optdebug",
},
"release": {
"flags": RELEASE_FLAGS,
"timeout_scalefactor": 1,
"status_mode": "release",
"execution_mode": "release",
"output_folder": "release",
},
# Normal trybot release configuration. There, dchecks are always on which
# implies debug is set. Hence, the status file needs to assume debug-like
# behavior/timeouts.
"tryrelease": {
"flags": RELEASE_FLAGS,
"timeout_scalefactor": 1,
"status_mode": "debug",
"execution_mode": "release",
"output_folder": "release",
},
# This mode requires v8 to be compiled with dchecks and slow dchecks.
"slowrelease": {
"flags": RELEASE_FLAGS + ["--enable-slow-asserts"],
"timeout_scalefactor": 2,
"status_mode": "debug",
"execution_mode": "release",
"output_folder": "release",
},
}
GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
"--concurrent-recompilation-queue-length=64",
"--concurrent-recompilation-delay=500",
"--concurrent-recompilation"]
SUPPORTED_ARCHS = ["android_arm",
"android_arm64",
"android_ia32",
"android_x64",
"arm",
"ia32",
"x87",
"mips",
"mipsel",
"mips64",
"mips64el",
"s390",
"s390x",
"ppc",
"ppc64",
"x64",
"x32",
"arm64"]
# Double the timeout for these:
SLOW_ARCHS = ["android_arm",
"android_arm64",
"android_ia32",
"android_x64",
"arm",
"mips",
"mipsel",
"mips64",
"mips64el",
"s390",
"s390x",
"x87",
"arm64"]
def BuildOptions():
result = optparse.OptionParser()
result.usage = '%prog [options] [tests]'
result.description = """TESTS: %s""" % (TEST_MAP["default"])
result.add_option("--arch",
help=("The architecture to run tests for, "
"'auto' or 'native' for auto-detect: %s" % SUPPORTED_ARCHS),
default="ia32,x64,arm")
result.add_option("--arch-and-mode",
help="Architecture and mode in the format 'arch.mode'",
default=None)
result.add_option("--asan",
help="Regard test expectations for ASAN",
default=False, action="store_true")
result.add_option("--sancov-dir",
help="Directory where to collect coverage data")
result.add_option("--cfi-vptr",
help="Run tests with UBSAN cfi_vptr option.",
default=False, action="store_true")
result.add_option("--buildbot",
help="Adapt to path structure used on buildbots",
default=False, action="store_true")
result.add_option("--dcheck-always-on",
help="Indicates that V8 was compiled with DCHECKs enabled",
default=False, action="store_true")
result.add_option("--novfp3",
help="Indicates that V8 was compiled without VFP3 support",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--slow-tests",
help="Regard slow tests (run|skip|dontcare)",
default="dontcare")
result.add_option("--pass-fail-tests",
help="Regard pass|fail tests (run|skip|dontcare)",
default="dontcare")
result.add_option("--gc-stress",
help="Switch on GC stress mode",
default=False, action="store_true")
result.add_option("--gcov-coverage",
help="Uses executables instrumented for gcov coverage",
default=False, action="store_true")
result.add_option("--command-prefix",
help="Prepended to each shell command used to run a test",
default="")
result.add_option("--download-data", help="Download missing test suite data",
default=False, action="store_true")
result.add_option("--download-data-only",
help="Deprecated",
default=False, action="store_true")
result.add_option("--enable-inspector",
help="Indicates a build with inspector support",
default=False, action="store_true")
result.add_option("--extra-flags",
help="Additional flags to pass to each test command",
default="")
result.add_option("--isolates", help="Whether to test isolates",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=0, type="int")
result.add_option("-m", "--mode",
help="The test modes in which to run (comma-separated,"
" uppercase for ninja and buildbot builds): %s" % MODES.keys(),
default="release,debug")
result.add_option("--no-harness", "--noharness",
help="Run without test harness of a given suite",
default=False, action="store_true")
result.add_option("--no-i18n", "--noi18n",
help="Skip internationalization tests",
default=False, action="store_true")
result.add_option("--no-network", "--nonetwork",
help="Don't distribute tests on the network",
default=(utils.GuessOS() != "linux"),
dest="no_network", action="store_true")
result.add_option("--no-presubmit", "--nopresubmit",
help='Skip presubmit checks (deprecated)',
default=False, dest="no_presubmit", action="store_true")
result.add_option("--no-snap", "--nosnap",
help='Test a build compiled without snapshot.',
default=False, dest="no_snap", action="store_true")
result.add_option("--no-sorting", "--nosorting",
help="Don't sort tests according to duration of last run.",
default=False, dest="no_sorting", action="store_true")
result.add_option("--no-variants", "--novariants",
help="Don't run any testing variants",
default=False, dest="no_variants", action="store_true")
result.add_option("--variants",
help="Comma-separated list of testing variants;"
" default: \"%s\"" % ",".join(VARIANTS))
result.add_option("--exhaustive-variants",
default=False, action="store_true",
help="Use exhaustive set of default variants:"
" \"%s\"" % ",".join(EXHAUSTIVE_VARIANTS))
result.add_option("--outdir", help="Base directory with compile output",
default="out")
result.add_option("--gn", help="Scan out.gn for the last built configuration",
default=False, action="store_true")
result.add_option("--predictable",
help="Compare output of several reruns of each test",
default=False, action="store_true")
result.add_option("-p", "--progress",
help=("The style of progress indicator"
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--quickcheck", default=False, action="store_true",
help=("Quick check mode (skip slow tests)"))
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("--json-test-results",
help="Path to a file for storing json results.")
result.add_option("--rerun-failures-count",
help=("Number of times to rerun each failing test case. "
"Very slow tests will be rerun only once."),
default=0, type="int")
result.add_option("--rerun-failures-max",
help="Maximum number of failing test cases to rerun.",
default=100, type="int")
result.add_option("--shard-count",
help="Split testsuites into this number of shards",
default=1, type="int")
result.add_option("--shard-run",
help="Run this shard from the split up tests.",
default=1, type="int")
result.add_option("--shell", help="DEPRECATED! use --shell-dir", default="")
result.add_option("--shell-dir", help="Directory containing executables",
default="")
result.add_option("--dont-skip-slow-simulator-tests",
help="Don't skip more slow tests when using a simulator.",
default=False, action="store_true",
dest="dont_skip_simulator_slow_tests")
result.add_option("--swarming",
help="Indicates running test driver on swarming.",
default=False, action="store_true")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default=TIMEOUT_DEFAULT, type="int")
result.add_option("--tsan",
help="Regard test expectations for TSAN",
default=False, action="store_true")
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
result.add_option("--junitout", help="File name of the JUnit output")
result.add_option("--junittestsuite",
help="The testsuite name in the JUnit output file",
default="v8tests")
result.add_option("--random-seed", default=0, dest="random_seed", type="int",
help="Default seed for initializing random generator")
result.add_option("--random-seed-stress-count", default=1, type="int",
dest="random_seed_stress_count",
help="Number of runs with different random seeds")
result.add_option("--msan",
help="Regard test expectations for MSAN",
default=False, action="store_true")
return result
def RandomSeed():
seed = 0
while not seed:
seed = random.SystemRandom().randint(-2147483648, 2147483647)
return seed
def BuildbotToV8Mode(config):
"""Convert buildbot build configs to configs understood by the v8 runner.
V8 configs are always lower case and without the additional _x64 suffix for
64 bit builds on windows with ninja.
"""
mode = config[:-4] if config.endswith('_x64') else config
return mode.lower()
def SetupEnvironment(options):
"""Setup additional environment variables."""
# Many tests assume an English interface.
os.environ['LANG'] = 'en_US.UTF-8'
symbolizer = 'external_symbolizer_path=%s' % (
os.path.join(
BASE_DIR, 'third_party', 'llvm-build', 'Release+Asserts', 'bin',
'llvm-symbolizer',
)
)
if options.asan:
os.environ['ASAN_OPTIONS'] = symbolizer
if options.sancov_dir:
assert os.path.exists(options.sancov_dir)
os.environ['ASAN_OPTIONS'] = ":".join([
'coverage=1',
'coverage_dir=%s' % options.sancov_dir,
symbolizer,
])
if options.cfi_vptr:
os.environ['UBSAN_OPTIONS'] = ":".join([
'print_stacktrace=1',
'print_summary=1',
'symbolize=1',
symbolizer,
])
if options.msan:
os.environ['MSAN_OPTIONS'] = symbolizer
if options.tsan:
suppressions_file = os.path.join(
BASE_DIR, 'tools', 'sanitizers', 'tsan_suppressions.txt')
os.environ['TSAN_OPTIONS'] = " ".join([
symbolizer,
'suppressions=%s' % suppressions_file,
'exit_code=0',
'report_thread_leaks=0',
'history_size=7',
'report_destroy_locked=0',
])
def ProcessOptions(options):
global VARIANTS
# First try to auto-detect configurations based on the build if GN was
# used. This can't be overridden by cmd-line arguments.
options.auto_detect = False
if options.gn:
gn_out_dir = os.path.join(BASE_DIR, DEFAULT_OUT_GN)
latest_timestamp = -1
latest_config = None
for gn_config in os.listdir(gn_out_dir):
gn_config_dir = os.path.join(gn_out_dir, gn_config)
if not isdir(gn_config_dir):
continue
if os.path.getmtime(gn_config_dir) > latest_timestamp:
latest_timestamp = os.path.getmtime(gn_config_dir)
latest_config = gn_config
if latest_config:
print(">>> Latest GN build found is %s" % latest_config)
options.outdir = os.path.join(DEFAULT_OUT_GN, latest_config)
if options.buildbot:
build_config_path = os.path.join(
BASE_DIR, options.outdir, options.mode, "v8_build_config.json")
else:
build_config_path = os.path.join(
BASE_DIR, options.outdir, "v8_build_config.json")
if os.path.exists(build_config_path):
try:
with open(build_config_path) as f:
build_config = json.load(f)
except Exception:
print ("%s exists but contains invalid json. Is your build up-to-date?" %
build_config_path)
return False
options.auto_detect = True
# In auto-detect mode the outdir is always where we found the build config.
# This ensures that we'll also take the build products from there.
options.outdir = os.path.dirname(build_config_path)
options.arch_and_mode = None
options.arch = build_config["v8_target_cpu"]
if options.arch == 'x86':
# TODO(machenbach): Transform all to x86 eventually.
options.arch = 'ia32'
options.asan = build_config["is_asan"]
options.dcheck_always_on = build_config["dcheck_always_on"]
options.enable_inspector = build_config["v8_enable_inspector"]
options.mode = 'debug' if build_config["is_debug"] else 'release'
options.msan = build_config["is_msan"]
options.no_i18n = not build_config["v8_enable_i18n_support"]
options.no_snap = not build_config["v8_use_snapshot"]
options.tsan = build_config["is_tsan"]
# Architecture and mode related stuff.
if options.arch_and_mode:
options.arch_and_mode = [arch_and_mode.split(".")
for arch_and_mode in options.arch_and_mode.split(",")]
options.arch = ",".join([tokens[0] for tokens in options.arch_and_mode])
options.mode = ",".join([tokens[1] for tokens in options.arch_and_mode])
options.mode = options.mode.split(",")
for mode in options.mode:
if not BuildbotToV8Mode(mode) in MODES:
print "Unknown mode %s" % mode
return False
if options.arch in ["auto", "native"]:
options.arch = ARCH_GUESS
options.arch = options.arch.split(",")
for arch in options.arch:
if not arch in SUPPORTED_ARCHS:
print "Unknown architecture %s" % arch
return False
# Store the final configuration in arch_and_mode list. Don't overwrite
# predefined arch_and_mode since it is more expressive than arch and mode.
if not options.arch_and_mode:
options.arch_and_mode = itertools.product(options.arch, options.mode)
# Special processing of other options, sorted alphabetically.
if options.buildbot:
options.no_network = True
if options.command_prefix:
print("Specifying --command-prefix disables network distribution, "
"running tests locally.")
options.no_network = True
options.command_prefix = shlex.split(options.command_prefix)
options.extra_flags = shlex.split(options.extra_flags)
if options.gc_stress:
options.extra_flags += GC_STRESS_FLAGS
if options.asan:
options.extra_flags.append("--invoke-weak-callbacks")
options.extra_flags.append("--omit-quit")
if options.novfp3:
options.extra_flags.append("--noenable-vfp3")
if options.exhaustive_variants:
# This is used on many bots. It includes a larger set of default variants.
# Other options for manipulating variants still apply afterwards.
VARIANTS = EXHAUSTIVE_VARIANTS
# TODO(machenbach): Figure out how to test a bigger subset of variants on
# msan and tsan.
if options.msan:
VARIANTS = ["default"]
if options.tsan:
VARIANTS = ["default"]
if options.j == 0:
options.j = multiprocessing.cpu_count()
if options.random_seed_stress_count <= 1 and options.random_seed == 0:
options.random_seed = RandomSeed()
def excl(*args):
"""Returns true if zero or one of multiple arguments are true."""
return reduce(lambda x, y: x + y, args) <= 1
if not excl(options.no_variants, bool(options.variants)):
print("Use only one of --no-variants or --variants.")
return False
if options.quickcheck:
VARIANTS = ["default", "stress"]
options.slow_tests = "skip"
options.pass_fail_tests = "skip"
if options.no_variants:
VARIANTS = ["default"]
if options.variants:
VARIANTS = options.variants.split(",")
# Resolve variant aliases.
VARIANTS = reduce(
list.__add__,
(VARIANT_ALIASES.get(v, [v]) for v in VARIANTS),
[],
)
if not set(VARIANTS).issubset(ALL_VARIANTS):
print "All variants must be in %s" % str(ALL_VARIANTS)
return False
if options.predictable:
VARIANTS = ["default"]
options.extra_flags.append("--predictable")
options.extra_flags.append("--verify_predictable")
options.extra_flags.append("--no-inline-new")
# Dedupe.
VARIANTS = list(set(VARIANTS))
if not options.shell_dir:
if options.shell:
print "Warning: --shell is deprecated, use --shell-dir instead."
options.shell_dir = os.path.dirname(options.shell)
if options.valgrind:
run_valgrind = os.path.join("tools", "run-valgrind.py")
# This is OK for distributed running, so we don't need to set no_network.
options.command_prefix = (["python", "-u", run_valgrind] +
options.command_prefix)
def CheckTestMode(name, option):
if not option in ["run", "skip", "dontcare"]:
print "Unknown %s mode %s" % (name, option)
return False
return True
if not CheckTestMode("slow test", options.slow_tests):
return False
if not CheckTestMode("pass|fail test", options.pass_fail_tests):
return False
if options.no_i18n:
TEST_MAP["bot_default"].remove("intl")
TEST_MAP["default"].remove("intl")
if not options.enable_inspector:
TEST_MAP["default"].remove("inspector")
TEST_MAP["bot_default"].remove("inspector")
TEST_MAP["optimize_for_size"].remove("inspector")
TEST_MAP["default"].remove("debugger")
TEST_MAP["bot_default"].remove("debugger")
TEST_MAP["optimize_for_size"].remove("debugger")
return True
def ShardTests(tests, options):
# Read gtest shard configuration from environment (e.g. set by swarming).
# If none is present, use values passed on the command line.
shard_count = int(os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
shard_run = os.environ.get('GTEST_SHARD_INDEX')
if shard_run is not None:
# The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
shard_run = int(shard_run) + 1
else:
shard_run = options.shard_run
if options.shard_count > 1:
# Log if a value was passed on the cmd line and it differs from the
# environment variables.
if options.shard_count != shard_count:
print("shard_count from cmd line differs from environment variable "
"GTEST_TOTAL_SHARDS")
if options.shard_run > 1 and options.shard_run != shard_run:
print("shard_run from cmd line differs from environment variable "
"GTEST_SHARD_INDEX")
if shard_count < 2:
return tests
if shard_run < 1 or shard_run > shard_count:
print "shard-run not a valid number, should be in [1:shard-count]"
print "defaulting back to running all tests"
return tests
count = 0
shard = []
for test in tests:
if count % shard_count == shard_run - 1:
shard.append(test)
count += 1
return shard
def Main():
# Use the v8 root as cwd as some test cases use "load" with relative paths.
os.chdir(BASE_DIR)
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
SetupEnvironment(options)
if options.swarming:
# Swarming doesn't print how isolated commands are called. Lets make this
# less cryptic by printing it ourselves.
print ' '.join(sys.argv)
exit_code = 0
suite_paths = utils.GetSuitePaths(join(BASE_DIR, "test"))
# Use default tests if no test configuration was provided at the cmd line.
if len(args) == 0:
args = ["default"]
# Expand arguments with grouped tests. The args should reflect the list of
# suites as otherwise filters would break.
def ExpandTestGroups(name):
if name in TEST_MAP:
return [suite for suite in TEST_MAP[name]]
else:
return [name]
args = reduce(lambda x, y: x + y,
[ExpandTestGroups(arg) for arg in args],
[])
args_suites = OrderedDict() # Used as set
for arg in args:
args_suites[arg.split('/')[0]] = True
suite_paths = [ s for s in args_suites if s in suite_paths ]
suites = []
for root in suite_paths:
suite = testsuite.TestSuite.LoadTestSuite(
os.path.join(BASE_DIR, "test", root))
if suite:
suites.append(suite)
if options.download_data or options.download_data_only:
for s in suites:
s.DownloadData()
if options.download_data_only:
return exit_code
for s in suites:
s.PrepareSources()
for (arch, mode) in options.arch_and_mode:
try:
code = Execute(arch, mode, args, options, suites)
except KeyboardInterrupt:
return 2
exit_code = exit_code or code
return exit_code
def Execute(arch, mode, args, options, suites):
print(">>> Running tests for %s.%s" % (arch, mode))
shell_dir = options.shell_dir
if not shell_dir:
if options.auto_detect:
# If an output dir with a build was passed, test directly in that
# directory.
shell_dir = os.path.join(BASE_DIR, options.outdir)
elif options.buildbot:
# TODO(machenbach): Get rid of different output folder location on
# buildbot. Currently this is capitalized Release and Debug.
shell_dir = os.path.join(BASE_DIR, options.outdir, mode)
mode = BuildbotToV8Mode(mode)
else:
shell_dir = os.path.join(
BASE_DIR,
options.outdir,
"%s.%s" % (arch, MODES[mode]["output_folder"]),
)
if not os.path.exists(shell_dir):
raise Exception('Could not find shell_dir: "%s"' % shell_dir)
# Populate context object.
mode_flags = MODES[mode]["flags"]
# Simulators are slow, therefore allow a longer timeout.
if arch in SLOW_ARCHS:
options.timeout *= 2
options.timeout *= MODES[mode]["timeout_scalefactor"]
if options.predictable:
# Predictable mode is slower.
options.timeout *= 2
ctx = context.Context(arch, MODES[mode]["execution_mode"], shell_dir,
mode_flags, options.verbose,
options.timeout,
options.isolates,
options.command_prefix,
options.extra_flags,
options.no_i18n,
options.random_seed,
options.no_sorting,
options.rerun_failures_count,
options.rerun_failures_max,
options.predictable,
options.no_harness,
use_perf_data=not options.swarming,
sancov_dir=options.sancov_dir)
# TODO(all): Combine "simulator" and "simulator_run".
# TODO(machenbach): In GN we can derive simulator run from
# target_arch != v8_target_arch in the dumped build config.
simulator_run = not options.dont_skip_simulator_slow_tests and \
arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', \
'ppc', 'ppc64'] and \
ARCH_GUESS and arch != ARCH_GUESS
# Find available test suites and read test cases from them.
variables = {
"arch": arch,
"asan": options.asan,
"deopt_fuzzer": False,
"gc_stress": options.gc_stress,
"gcov_coverage": options.gcov_coverage,
"isolates": options.isolates,
"mode": MODES[mode]["status_mode"],
"no_i18n": options.no_i18n,
"no_snap": options.no_snap,
"simulator_run": simulator_run,
"simulator": utils.UseSimulator(arch),
"system": utils.GuessOS(),
"tsan": options.tsan,
"msan": options.msan,
"dcheck_always_on": options.dcheck_always_on,
"novfp3": options.novfp3,
"predictable": options.predictable,
"byteorder": sys.byteorder,
}
all_tests = []
num_tests = 0
for s in suites:
s.ReadStatusFile(variables)
s.ReadTestCases(ctx)
if len(args) > 0:
s.FilterTestCasesByArgs(args)
all_tests += s.tests
# First filtering by status applying the generic rules (independent of
# variants).
s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
options.pass_fail_tests)
if options.cat:
verbose.PrintTestSource(s.tests)
continue
variant_gen = s.CreateVariantGenerator(VARIANTS)
variant_tests = [ t.CopyAddingFlags(v, flags)
for t in s.tests
for v in variant_gen.FilterVariantsByTest(t)
for flags in variant_gen.GetFlagSets(t, v) ]
if options.random_seed_stress_count > 1:
# Duplicate test for random seed stress mode.
def iter_seed_flags():
for i in range(0, options.random_seed_stress_count):
# Use given random seed for all runs (set by default in execution.py)
# or a new random seed if none is specified.
if options.random_seed:
yield []
else:
yield ["--random-seed=%d" % RandomSeed()]
s.tests = [
t.CopyAddingFlags(t.variant, flags)
for t in variant_tests
for flags in iter_seed_flags()
]
else:
s.tests = variant_tests
# Second filtering by status applying the variant-dependent rules.
s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
options.pass_fail_tests, variants=True)
s.tests = ShardTests(s.tests, options)
num_tests += len(s.tests)
if options.cat:
return 0 # We're done here.
if options.report:
verbose.PrintReport(all_tests)
# Run the tests, either locally or distributed on the network.
start_time = time.time()
progress_indicator = progress.IndicatorNotifier()
progress_indicator.Register(progress.PROGRESS_INDICATORS[options.progress]())
if options.junitout:
progress_indicator.Register(progress.JUnitTestProgressIndicator(
options.junitout, options.junittestsuite))
if options.json_test_results:
progress_indicator.Register(progress.JsonTestProgressIndicator(
options.json_test_results, arch, MODES[mode]["execution_mode"],
ctx.random_seed))
run_networked = not options.no_network
if not run_networked:
if options.verbose:
print("Network distribution disabled, running tests locally.")
elif utils.GuessOS() != "linux":
print("Network distribution is only supported on Linux, sorry!")
run_networked = False
peers = []
if run_networked:
peers = network_execution.GetPeers()
if not peers:
print("No connection to distribution server; running tests locally.")
run_networked = False
elif len(peers) == 1:
print("No other peers on the network; running tests locally.")
run_networked = False
elif num_tests <= 100:
print("Less than 100 tests, running them locally.")
run_networked = False
if run_networked:
runner = network_execution.NetworkedRunner(suites, progress_indicator,
ctx, peers, BASE_DIR)
else:
runner = execution.Runner(suites, progress_indicator, ctx)
exit_code = runner.Run(options.j)
overall_duration = time.time() - start_time
if options.time:
verbose.PrintTestDurations(suites, overall_duration)
if num_tests == 0:
print("Warning: no tests were run!")
if exit_code == 1 and options.json_test_results:
print("Force exit code 0 after failures. Json test results file generated "
"with failure information.")
exit_code = 0
if options.sancov_dir:
# If tests ran with sanitizer coverage, merge coverage files in the end.
try:
print "Merging sancov files."
subprocess.check_call([
sys.executable,
join(BASE_DIR, "tools", "sanitizers", "sancov_merger.py"),
"--coverage-dir=%s" % options.sancov_dir])
except:
print >> sys.stderr, "Error: Merging sancov files failed."
exit_code = 1
return exit_code
if __name__ == "__main__":
sys.exit(Main())
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import frappe
from frappe.model import (
display_fieldtypes,
no_value_fields,
table_fields as table_fieldtypes,
)
from frappe.utils.csvutils import build_csv_response
from frappe.utils.xlsxutils import build_xlsx_response
class Exporter:
def __init__(
self,
doctype,
export_fields=None,
export_data=False,
export_filters=None,
export_page_length=None,
file_type="CSV",
):
"""
Exports records of a DocType for use with Importer
:param doctype: Document Type to export
:param export_fields=None: One of 'All', 'Mandatory' or {'DocType': ['field1', 'field2'], 'Child DocType': ['childfield1']}
:param export_data=False: Whether to export data as well
:param export_filters=None: The filters (dict or list) which is used to query the records
:param file_type: One of 'Excel' or 'CSV'
"""
self.doctype = doctype
self.meta = frappe.get_meta(doctype)
self.export_fields = export_fields
self.export_filters = export_filters
self.export_page_length = export_page_length
self.file_type = file_type
# this will contain the csv content
self.csv_array = []
# fields that get exported
self.exportable_fields = self.get_all_exportable_fields()
self.fields = self.serialize_exportable_fields()
self.add_header()
if export_data:
self.data = self.get_data_to_export()
else:
self.data = []
self.add_data()
def get_all_exportable_fields(self):
child_table_fields = [
df.fieldname for df in self.meta.fields if df.fieldtype in table_fieldtypes
]
meta = frappe.get_meta(self.doctype)
exportable_fields = frappe._dict({})
for key, fieldnames in self.export_fields.items():
if key == self.doctype:
# parent fields
exportable_fields[key] = self.get_exportable_fields(key, fieldnames)
elif key in child_table_fields:
# child fields
child_df = meta.get_field(key)
child_doctype = child_df.options
exportable_fields[key] = self.get_exportable_fields(child_doctype, fieldnames)
return exportable_fields
def serialize_exportable_fields(self):
fields = []
for key, exportable_fields in self.exportable_fields.items():
for _df in exportable_fields:
# make a copy of df dict to avoid reference mutation
if isinstance(_df, frappe.core.doctype.docfield.docfield.DocField):
df = _df.as_dict()
else:
df = _df.copy()
df.is_child_table_field = key != self.doctype
if df.is_child_table_field:
df.child_table_df = self.meta.get_field(key)
fields.append(df)
return fields
def get_exportable_fields(self, doctype, fieldnames):
meta = frappe.get_meta(doctype)
def is_exportable(df):
return df and df.fieldtype not in (display_fieldtypes + no_value_fields)
# add name field
name_field = frappe._dict(
{
"fieldtype": "Data",
"fieldname": "name",
"label": "ID",
"reqd": 1,
"parent": doctype,
}
)
fields = [meta.get_field(fieldname) for fieldname in fieldnames]
fields = [df for df in fields if is_exportable(df)]
if "name" in fieldnames:
fields = [name_field] + fields
return fields or []
def get_data_to_export(self):
frappe.permissions.can_export(self.doctype, raise_exception=True)
data_to_export = []
table_fields = [f for f in self.exportable_fields if f != self.doctype]
data = self.get_data_as_docs()
for doc in data:
rows = []
rows = self.add_data_row(self.doctype, None, doc, rows, 0)
if table_fields:
# add child table data
for f in table_fields:
for i, child_row in enumerate(doc[f]):
table_df = self.meta.get_field(f)
child_doctype = table_df.options
rows = self.add_data_row(child_doctype, child_row.parentfield, child_row, rows, i)
data_to_export += rows
return data_to_export
def add_data_row(self, doctype, parentfield, doc, rows, row_idx):
if len(rows) < row_idx + 1:
rows.append([""] * len(self.fields))
row = rows[row_idx]
for i, df in enumerate(self.fields):
if df.parent == doctype:
if df.is_child_table_field and df.child_table_df.fieldname != parentfield:
continue
row[i] = doc.get(df.fieldname, "")
return rows
def get_data_as_docs(self):
def format_column_name(df):
return "`tab{0}`.`{1}`".format(df.parent, df.fieldname)
filters = self.export_filters
if self.meta.is_nested_set():
order_by = "`tab{0}`.`lft` ASC".format(self.doctype)
else:
order_by = "`tab{0}`.`creation` DESC".format(self.doctype)
parent_fields = [
format_column_name(df) for df in self.fields if df.parent == self.doctype
]
parent_data = frappe.db.get_list(
self.doctype,
filters=filters,
fields=["name"] + parent_fields,
limit_page_length=self.export_page_length,
order_by=order_by,
as_list=0,
)
parent_names = [p.name for p in parent_data]
child_data = {}
for key in self.exportable_fields:
if key == self.doctype:
continue
child_table_df = self.meta.get_field(key)
child_table_doctype = child_table_df.options
child_fields = ["name", "idx", "parent", "parentfield"] + list(
set(
[format_column_name(df) for df in self.fields if df.parent == child_table_doctype]
)
)
data = frappe.db.get_list(
child_table_doctype,
filters={
"parent": ("in", parent_names),
"parentfield": child_table_df.fieldname,
"parenttype": self.doctype,
},
fields=child_fields,
order_by="idx asc",
as_list=0,
)
child_data[key] = data
return self.merge_data(parent_data, child_data)
def merge_data(self, parent_data, child_data):
for doc in parent_data:
for table_field, table_rows in child_data.items():
doc[table_field] = [row for row in table_rows if row.parent == doc.name]
return parent_data
def add_header(self):
header = []
for df in self.fields:
is_parent = not df.is_child_table_field
if is_parent:
label = df.label
else:
label = "{0} ({1})".format(df.label, df.child_table_df.label)
if label in header:
# this label is already in the header,
# which means two fields with the same label
# add the fieldname to avoid clash
if is_parent:
label = "{0}".format(df.fieldname)
else:
label = "{0}.{1}".format(df.child_table_df.fieldname, df.fieldname)
header.append(label)
self.csv_array.append(header)
def add_data(self):
self.csv_array += self.data
def get_csv_array(self):
return self.csv_array
def get_csv_array_for_export(self):
csv_array = self.csv_array
if not self.data:
# add 2 empty rows
csv_array += [[]] * 2
return csv_array
def build_response(self):
if self.file_type == "CSV":
self.build_csv_response()
elif self.file_type == "Excel":
self.build_xlsx_response()
def build_csv_response(self):
build_csv_response(self.get_csv_array_for_export(), self.doctype)
def build_xlsx_response(self):
build_xlsx_response(self.get_csv_array_for_export(), self.doctype)
| |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functionality to create recommendations for a user."""
from __future__ import division
from datetime import datetime
from datetime import timedelta
from recommender import feeds
from recommender import items
from recommender import models
from recommender import past_recommendations
from recommender import time_periods
from recommender import url_util
from google.appengine.ext import ndb
# How many sources to include for each recommendation.
MAX_TOP_SOURCES = 10
NOMINAL_USER_VOTE_WEIGHT = 0.0001
SOURCE_TYPE_ANY = None
SOURCE_TYPE_FEED = 'feed'
SOURCE_TYPE_USER = 'user'
def RecommendationsOnDemand(
user,
time_period,
category_id,
any_category,
include_popular,
limit,
connection_version,
decay_rate=1,
source_type=SOURCE_TYPE_ANY,
exclude_urls=frozenset(),
save_past_recommendations=False,
exclude_past_recommendations=False,
exclude_past_recommendations_from_all_time_periods=False,
external_connections=None,
exclude_rated_items=True,
diversify=False):
"""Calculates recommendations for a user on demand.
The recommendations are calculated from raw user ratings, feed items and top
connection of this user to other users and other feeds.
Args:
user: The user we are making recommendations for.
time_period: The time period that the user is interested in
category_id: The category that user wants recommendations for.
any_category: Whether the user wants recommendations for all categories.
include_popular: Whether to include recommendations from other users that
this users is not connected to.
limit: Pagination size.
connection_version: The version of connections to use.
decay_rate: How much to penalize older items from the same source.
source_type: Whether to return recommendations only from feeds, users or
both.
exclude_urls: The set of items that should not be returned.
save_past_recommendations: Whether to save the returned recommendations as
past recommendations.
exclude_past_recommendations: Whether to exclude past committed
recommendations.
exclude_past_recommendations_from_all_time_periods: Whether to exclude past
recommendations that were shown for other time periods. Otherwise only
excludes past recommendations that were shown for the same time period as
time_period.
external_connections: If None, then these models.Connection objects will be
used instead of getting them from Datastore for this user.
exclude_rated_items: Whether to exclude items already rated by the user from
the returned recommendations.
diversify: If True, then recommendations from the same sources will be
forced to be separated with recommendations from other sources.
Returns:
A list of recommendations.
"""
exclude_item_ids = set(items.UrlsToItemIds(exclude_urls).values())
subscriber_id = models.UserKey(user).id()
now = datetime.now()
since_time = _GetSinceTime(time_period, now)
past_recommendation_item_ids_future = None
past_recommendation_item_ids = frozenset()
if exclude_past_recommendations:
past_recommendation_item_ids_future = (
past_recommendations.GetPastRecommendationItemIdsAsync(
subscriber_id,
(None if exclude_past_recommendations_from_all_time_periods else
time_period)))
if exclude_rated_items:
recent_rated_item_ids_future = models.GetRatedItemIdsAsync(subscriber_id)
else:
recent_rated_item_ids_future = ndb.Future()
recent_rated_item_ids_future.set_result([])
connection_active_days = None
for days in models.CONNECTION_ALL_ACTIVE_DAYS:
if timedelta(days=days) >= now - since_time:
connection_active_days = days
break
if source_type == SOURCE_TYPE_FEED or source_type == SOURCE_TYPE_ANY:
feed_info_future = _GetRecommendedFeedItems(
user, since_time, category_id, any_category, connection_version,
connection_active_days, external_connections)
else:
feed_info_future = ndb.Future()
feed_info_future.set_result(([], []))
def GetConnections(connection_version=connection_version):
if external_connections:
promise = ndb.Future()
promise.set_result([])
return promise
query = models.Connection.query(
models.Connection.publisher_type == models.SOURCE_TYPE_USER,
models.Connection.subscriber_id == subscriber_id,
models.Connection.version == connection_version).order(
-models.Connection.weight)
if connection_active_days:
query = query.filter(
models.Connection.active_days == connection_active_days)
if not any_category:
subscriber_category = models.CategoryKey(category_id, user)
query = query.filter(
models.Connection.subscriber_category == subscriber_category)
return query.fetch_async(100)
connections_future = GetConnections()
query = models.PageRating.query(
projection=['item_id', 'user_id', 'rating', 'category', 'date'])
if since_time != datetime.min:
query = query.filter(models.PageRating.date > since_time)
user_ratings = query.order(-models.PageRating.date).fetch(1000)
if past_recommendation_item_ids_future:
past_recommendation_item_ids = (
past_recommendation_item_ids_future.get_result())
positive_sources = set()
negative_sources = set()
for r in user_ratings:
if r.rating == 0:
continue
if r.item_id in past_recommendation_item_ids:
continue
if r.item_id in exclude_item_ids:
continue
if r.key.parent().id() == subscriber_id:
continue
publisher_id = r.key.parent().id()
publisher_category_id = models.GetCategoryId(r.category)
source = (publisher_id, publisher_category_id)
if r.rating < 0:
negative_sources.add(source)
else:
positive_sources.add(source)
r.rating_source = source
positive_source_to_connection = {}
negative_source_to_connection = {}
connections = connections_future.get_result()
if external_connections:
connections = [
c for c in external_connections
if c.publisher_type == models.SOURCE_TYPE_USER
]
for connection in connections:
source = (connection.publisher_id, connection.PublisherCategoryId())
# The user may be subscribed to the same source from multiple collections.
# We only count the strongest connection here.
if (connection.positive and source in positive_sources and
source not in positive_source_to_connection):
positive_source_to_connection[source] = connection
if (not connection.positive and source in negative_sources and
source not in negative_source_to_connection):
negative_source_to_connection[source] = connection
item_id_to_recommendation = {}
recent_rated_item_ids = set(recent_rated_item_ids_future.get_result())
nominal_weight = NOMINAL_USER_VOTE_WEIGHT if include_popular else 0
num_matched_past_recommendations = 0
# Keyed by (user_id, category_id).
seen_items_from_user = {}
for r in user_ratings:
item_id = r.item_id
assert item_id
if r.rating == 0:
continue
if r.item_id in past_recommendation_item_ids:
num_matched_past_recommendations += 1
continue
if r.item_id in exclude_item_ids:
continue
if r.item_id in recent_rated_item_ids:
continue
if r.key.parent().id() == subscriber_id:
continue
if r.rating > 0:
connection = positive_source_to_connection.get(r.rating_source, None)
else:
connection = negative_source_to_connection.get(r.rating_source, None)
if connection:
category_id = connection.SubscriberCategoryId()
category = connection.subscriber_category
else:
category = None
category_id = None
key = (r.item_id, category_id)
if key in item_id_to_recommendation:
(recommendation, top_sources, source_users,
feed_connections) = item_id_to_recommendation[key]
else:
recommendation = models.Recommendation(
item_id=r.item_id,
source_category=category,
first_seen_datetime=r.date)
top_sources = {}
source_users = {}
feed_connections = []
item_id_to_recommendation[key] = (recommendation, top_sources,
source_users, feed_connections)
recommendation.first_seen_datetime = min(recommendation.first_seen_datetime,
r.date)
weight = nominal_weight
if connection:
connection_weight = connection.weight
connection_top_sources = connection.top_sources
publisher_id = connection.publisher_id
if r.rating > 0 and connection_weight > 0:
for source in connection_top_sources:
if source.url not in top_sources:
top_sources[source.url] = models.RecommendationSourcePage(
source.url)
top_sources[source.url].weight += connection_weight
top_sources[source.url].user_count += 1
source_users[publisher_id] = connection_weight
recommendation.connection_key_components.append(
connection.KeyComponents())
# Skip positive recommendations if the publisher has no positive
# recommendations in common with the subscriber
# (ie, num_shared_items == 0).
if r.rating > 0 and connection.num_shared_items == 0:
pass
else:
if decay_rate < 1:
key = (publisher_id, connection.PublisherCategoryId())
seen_items = seen_items_from_user.get(key, 0)
seen_items_from_user[key] = seen_items + 1
# We are processing user ratings in most-recent first order. The most
# recent rated item gets the full weight of the connection, the older
# ones get progressively smaller weight.
# We apply the decay rate only to the earned connection weight and
# leave the nominal weight alone. That way a new user will see purely
# popularity based ranking where each rating has the same weight, no
# matter who those ratings came from.
connection_weight *= decay_rate ** seen_items
weight += connection_weight
if weight > 0:
recommendation.weight += r.rating * weight
if r.rating > 0:
recommendation.user_count += 1
(feed_items, feed_url_to_connection) = feed_info_future.get_result()
seen_items_from_feed = {}
if decay_rate < 1:
# We need to sort so that the decay rate is applied from most recent items
# to less recent items.
feed_items.sort(key=lambda item: item.published_date, reverse=True)
num_feed_items_matched_past_recommendations = 0
for item in feed_items:
item_id = item.item_id
if item_id in recent_rated_item_ids:
continue
if item_id in past_recommendation_item_ids:
num_feed_items_matched_past_recommendations += 1
continue
connection = feed_url_to_connection.get(item.feed_url, None)
if not connection:
continue
seen_items = 0
# We need to count urls in the exclude list before we ignore them.
if decay_rate < 1:
seen_items = seen_items_from_feed.get(item.feed_url, 0)
seen_items_from_feed[item.feed_url] = seen_items + 1
if item_id in exclude_item_ids:
continue
category = connection['category']
key = (item_id, models.GetCategoryId(category))
if key in item_id_to_recommendation:
(recommendation, top_sources, source_users,
feed_connections) = item_id_to_recommendation[key]
else:
recommendation = models.Recommendation(
item_id=item_id,
source_category=category,
first_seen_datetime=item.published_date)
top_sources = {}
source_users = {}
feed_connections = []
item_id_to_recommendation[key] = (recommendation, top_sources,
source_users, feed_connections)
recommendation.first_seen_datetime = min(recommendation.first_seen_datetime,
item.published_date)
weight = connection['weight']
if decay_rate < 1:
weight = connection['weight'] * (decay_rate ** seen_items)
feed_connections.append(connection)
recommendation.weight += weight
recommendation.connection_key_components.append(
connection['key_components'])
for _, (recommendation, top_sources, source_users,
feed_connections) in item_id_to_recommendation.iteritems():
recommendation.source_count = len(top_sources)
recommendation.top_sources = sorted(
top_sources.values(), key=lambda v: v.weight,
reverse=True)[:MAX_TOP_SOURCES]
feed_connections = sorted(
feed_connections, key=lambda c: c['weight'], reverse=True)
unique_feed_urls = set(
url_util.DeduplicateUrls([c['publisher_id'] for c in feed_connections]))
recommendation.top_feed_urls = _GetTopFeedUrls(feed_connections,
unique_feed_urls)
recommendation.feed_count = len(unique_feed_urls)
result = [
r for (r, _, _, _) in item_id_to_recommendation.values() if r.weight > 0
]
result.sort(key=lambda v: (v.weight, v.first_seen_datetime), reverse=True)
seen = set()
seen_add = seen.add
# Remove duplicate items that may have been recommended under different
# categories.
result = [r for r in result if not (r.item_id in seen or seen_add(r.item_id))]
if diversify:
result = _DiversifyByKey(result, limit, lambda r: r.ConnectionsHash())
result = result[:limit]
# The recommendations only have item_id populated. We need to add
# destination_url.
item_id_to_url = items.ItemIdsToUrls([r.item_id for r in result])
for r in result:
r.destination_url = item_id_to_url.get(r.item_id, '#invalid_item')
if save_past_recommendations:
past_recommendations.SavePastRecommendations(subscriber_id, time_period,
result)
return models.DecorateRecommendations(subscriber_id, result)
MAX_TOP_FEEDS = 10
def _GetTopFeedUrls(feed_connections, unique_feed_urls):
urls = None
for c in feed_connections:
url = c['publisher_id']
if url in unique_feed_urls:
if urls is None:
urls = [url]
else:
urls.append(url)
if len(urls) >= MAX_TOP_FEEDS:
break
return urls
def _DiversifyByKey(all_items, limit, key):
"""Returns a subset of all_items such that no two items next to each other have the same key."""
if len(all_items) < 2:
return all_items
if len(all_items) < limit:
limit = len(all_items)
diverse_items = []
previous_key = None
added_indexes = set([])
for _ in range(0, limit):
found_diverse = False
first_not_added_index = -1
for index, item in enumerate(all_items):
if index in added_indexes:
continue
if first_not_added_index == -1:
first_not_added_index = index
current_key = key(item)
if current_key != previous_key:
diverse_items.append(item)
added_indexes.add(index)
previous_key = current_key
found_diverse = True
break
# If we didn't find a new item, then we add the next not added item with
# the same hash.
if not found_diverse:
diverse_items.append(all_items[first_not_added_index])
added_indexes.add(first_not_added_index)
return diverse_items
@ndb.tasklet
def _GetRecommendedFeedItems(user, since_time, category_id, any_category,
connection_version, connection_active_days,
external_connections):
"""Finds recent items from feeds that the user is connected to most."""
subscriber_id = models.UserKey(user).id()
subscriber_category = None if any_category else models.CategoryKey(
category_id, user)
def GetConnections(connection_version=connection_version,
subscriber_id=subscriber_id,
count=400,
any_category=any_category,
negative=False):
if external_connections:
promise = ndb.Future()
promise.set_result([])
return promise
default_properties = ['publisher_id', 'weight', 'updated_datetime']
properties = default_properties
query = models.Connection.query(
models.Connection.version == connection_version,
models.Connection.publisher_type == models.SOURCE_TYPE_FEED,
models.Connection.subscriber_id == subscriber_id)
if negative:
query = query.filter(models.Connection.weight < 0).order(
models.Connection.weight)
else:
query = query.filter(
models.Connection.weight > 0).order(-models.Connection.weight)
# When we filter by subscriber category we cannot include it in the
# projection. Otherwise we get this error:
# BadRequestError: Cannot use projection on a property with an equality
# filter.
if any_category:
properties.append('subscriber_category')
else:
query = query.filter(
models.Connection.subscriber_category == subscriber_category)
# We do not filter negative connections out by active_days because we do
# not update "active_days" field for negative connections.
if connection_active_days and not negative:
query = query.filter(
models.Connection.active_days == connection_active_days)
return query.fetch_async(count, projection=properties)
def ConnectionsToDict(connections):
return [ConnectionToDict(c) for c in connections]
def ConnectionToDict(connection):
# We do not use connection.KeyComponents() because the connection object is
# a projection that does not have all the fields that KeyComponents()
# accesses.
key_components = models.ConnectionKeyComponents(
models.SOURCE_TYPE_FEED, connection.publisher_id, None, subscriber_id,
connection.SubscriberCategoryId() if any_category else
models.GetCategoryId(subscriber_category), connection_version)
return {
'weight': connection.weight,
'category': (connection.subscriber_category
if any_category else subscriber_category),
'updated_datetime': connection.updated_datetime,
'publisher_id': connection.publisher_id,
'key_components': key_components
}
connections_future = GetConnections()
connections = yield connections_future
if external_connections:
connections = [
c for c in external_connections
if c.publisher_type == models.SOURCE_TYPE_FEED
]
connections = ConnectionsToDict(connections)
feed_url_to_connection = {}
feed_urls = []
for connection in connections:
# The connection weight is update each time new items are added to the feed.
# It means that there is no point looking up items for a feed that was
# updated before the time period we are interested in.
if connection['updated_datetime'] < since_time:
continue
weight = connection['weight']
feed_url = connection['publisher_id']
if feed_url not in feed_url_to_connection:
feed_url_to_connection[feed_url] = connection
feed_urls.append(feed_url)
else:
feed_url_to_connection[feed_url]['weight'] += weight
feed_url_to_items = yield feeds.GetBulkItemIdsAsync(feed_urls, since_time)
feed_items = []
for feed_url, item_list in feed_url_to_items.iteritems():
feed_items.extend(item_list)
raise ndb.Return((feed_items, feed_url_to_connection))
def _GetSinceTime(time_period, now):
if time_period == time_periods.ALL:
return datetime.min
return now - time_periods.Get(time_period)['timedelta']
| |
# Copyright (c) 2014 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for HDS HNAS NFS storage.
"""
import os
import time
from oslo.config import cfg
from xml.etree import ElementTree as ETree
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import excutils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder.openstack.common import units
from cinder.volume.drivers.hds.hnas_backend import HnasBackend
from cinder.volume.drivers import nfs
HDS_HNAS_NFS_VERSION = '1.0.0'
LOG = logging.getLogger(__name__)
NFS_OPTS = [
cfg.StrOpt('hds_hnas_nfs_config_file',
default='/opt/hds/hnas/cinder_nfs_conf.xml',
help='configuration file for HDS NFS cinder plugin'), ]
CONF = cfg.CONF
CONF.register_opts(NFS_OPTS)
HNAS_DEFAULT_CONFIG = {'hnas_cmd': 'ssc'}
def _xml_read(root, element, check=None):
"""Read an xml element.
:param root: XML object
:param element: string desired tag
:param check: string if present, throw exception if element missing
"""
try:
val = root.findtext(element)
LOG.info(_("%(element)s: %(val)s")
% {'element': element,
'val': val})
if val:
return val.strip()
if check:
raise exception.ParameterNotFound(param=element)
return None
except ETree.ParseError:
if check:
with excutils.save_and_reraise_exception():
LOG.error(_("XML exception reading parameter: %s") % element)
else:
LOG.info(_("XML exception reading parameter: %s") % element)
return None
def _read_config(xml_config_file):
"""Read hds driver specific xml config file.
:param xml_config_file: string filename containing XML configuration
"""
try:
root = ETree.parse(xml_config_file).getroot()
except Exception:
raise exception.NotFound(message='config file not found: '
+ xml_config_file)
# mandatory parameters
config = {}
arg_prereqs = ['mgmt_ip0', 'username', 'password']
for req in arg_prereqs:
config[req] = _xml_read(root, req, 'check')
# optional parameters
config['hnas_cmd'] = _xml_read(root, 'hnas_cmd') or\
HNAS_DEFAULT_CONFIG['hnas_cmd']
config['hdp'] = {}
config['services'] = {}
# min one needed
for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']:
if _xml_read(root, svc) is None:
continue
service = {'label': svc}
# none optional
for arg in ['volume_type', 'hdp']:
service[arg] = _xml_read(root, svc + '/' + arg, 'check')
config['services'][service['volume_type']] = service
config['hdp'][service['hdp']] = service['hdp']
# at least one service required!
if config['services'].keys() is None:
raise exception.ParameterNotFound(param="No service found")
return config
def factory_bend():
"""Factory over-ride in self-tests."""
return HnasBackend()
class HDSNFSDriver(nfs.NfsDriver):
"""Base class for Hitachi NFS driver.
Executes commands relating to Volumes.
"""
def __init__(self, *args, **kwargs):
# NOTE(vish): db is set by Manager
self._execute = None
self.context = None
self.configuration = kwargs.get('configuration', None)
if self.configuration:
self.configuration.append_config_values(NFS_OPTS)
self.config = _read_config(
self.configuration.hds_hnas_nfs_config_file)
super(HDSNFSDriver, self).__init__(*args, **kwargs)
self.bend = factory_bend()
(self.arid, self.nfs_name, self.lumax) = self._array_info_get()
def _array_info_get(self):
"""Get array parameters."""
out = self.bend.get_version(self.config['hnas_cmd'],
HDS_HNAS_NFS_VERSION,
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'])
inf = out.split()
return inf[1], 'nfs_' + inf[1], inf[6]
def _id_to_vol(self, volume_id):
"""Given the volume id, retrieve the volume object from database.
:param volume_id: string volume id
"""
vol = self.db.volume_get(self.context, volume_id)
return vol
def _get_service(self, volume):
"""Get the available service parameters for a given volume using
its type.
:param volume: dictionary volume reference
"""
label = None
if volume['volume_type']:
label = volume['volume_type']['name']
label = label or 'default'
if label not in self.config['services'].keys():
# default works if no match is found
label = 'default'
if label in self.config['services'].keys():
svc = self.config['services'][label]
LOG.info("Get service: %s->%s" % (label, svc['fslabel']))
service = (svc['hdp'], svc['path'], svc['fslabel'])
else:
LOG.info(_("Available services: %s")
% self.config['services'].keys())
LOG.error(_("No configuration found for service: %s") % label)
raise exception.ParameterNotFound(param=label)
return service
def set_execute(self, execute):
self._execute = execute
def extend_volume(self, volume, new_size):
"""Extend an existing volume.
:param volume: dictionary volume reference
:param new_size: int size in GB to extend
"""
nfs_mount = self._get_provider_location(volume['id'])
path = self._get_volume_path(nfs_mount, volume['name'])
# Resize the image file on share to new size.
LOG.debug('Checking file for resize')
if self._is_file_size_equal(path, new_size):
return
else:
LOG.info(_('Resizing file to %sG'), new_size)
image_utils.resize_image(path, new_size)
if self._is_file_size_equal(path, new_size):
LOG.info(_("LUN %(id)s extended to %(size)s GB.")
% {'id': volume['id'], 'size': new_size})
return
else:
raise exception.InvalidResults(
_('Resizing image file failed.'))
def _is_file_size_equal(self, path, size):
"""Checks if file size at path is equal to size."""
data = image_utils.qemu_img_info(path)
virt_size = data.virtual_size / units.Gi
if virt_size == size:
return True
else:
return False
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.debug('create_volume_from %s', volume)
vol_size = volume['size']
snap_size = snapshot['volume_size']
if vol_size != snap_size:
msg = _('Cannot create volume of size %(vol_size)s from '
'snapshot of size %(snap_size)s')
msg_fmt = {'vol_size': vol_size, 'snap_size': snap_size}
raise exception.CinderException(msg % msg_fmt)
self._clone_volume(snapshot['name'],
volume['name'],
snapshot['volume_id'])
share = self._get_volume_location(snapshot['volume_id'])
return {'provider_location': share}
def create_snapshot(self, snapshot):
"""Create a snapshot.
:param snapshot: dictionary snapshot reference
"""
self._clone_volume(snapshot['volume_name'],
snapshot['name'],
snapshot['volume_id'])
share = self._get_volume_location(snapshot['volume_id'])
LOG.debug('Share: %s', share)
# returns the mount point (not path)
return {'provider_location': share}
def delete_snapshot(self, snapshot):
"""Deletes a snapshot.
:param snapshot: dictionary snapshot reference
"""
nfs_mount = self._get_provider_location(snapshot['volume_id'])
if self._volume_not_present(nfs_mount, snapshot['name']):
return True
self._execute('rm', self._get_volume_path(nfs_mount, snapshot['name']),
run_as_root=True)
def _get_volume_location(self, volume_id):
"""Returns NFS mount address as <nfs_ip_address>:<nfs_mount_dir>.
:param volume_id: string volume id
"""
nfs_server_ip = self._get_host_ip(volume_id)
export_path = self._get_export_path(volume_id)
return nfs_server_ip + ':' + export_path
def _get_provider_location(self, volume_id):
"""Returns provider location for given volume.
:param volume_id: string volume id
"""
volume = self.db.volume_get(self.context, volume_id)
# same format as _get_volume_location
return volume.provider_location
def _get_host_ip(self, volume_id):
"""Returns IP address for the given volume.
:param volume_id: string volume id
"""
return self._get_provider_location(volume_id).split(':')[0]
def _get_export_path(self, volume_id):
"""Returns NFS export path for the given volume.
:param volume_id: string volume id
"""
return self._get_provider_location(volume_id).split(':')[1]
def _volume_not_present(self, nfs_mount, volume_name):
"""Check if volume exists.
:param volume_name: string volume name
"""
try:
self._try_execute('ls', self._get_volume_path(nfs_mount,
volume_name))
except processutils.ProcessExecutionError:
# If the volume isn't present
return True
return False
def _try_execute(self, *command, **kwargs):
# NOTE(vish): Volume commands can partially fail due to timing, but
# running them a second time on failure will usually
# recover nicely.
tries = 0
while True:
try:
self._execute(*command, **kwargs)
return True
except processutils.ProcessExecutionError:
tries += 1
if tries >= self.configuration.num_shell_tries:
raise
LOG.exception(_("Recovering from a failed execute. "
"Try number %s"), tries)
time.sleep(tries ** 2)
def _get_volume_path(self, nfs_share, volume_name):
"""Get volume path (local fs path) for given volume name on given nfs
share.
:param nfs_share string, example 172.18.194.100:/var/nfs
:param volume_name string,
example volume-91ee65ec-c473-4391-8c09-162b00c68a8c
"""
return os.path.join(self._get_mount_point_for_share(nfs_share),
volume_name)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume.
:param volume: dictionary volume reference
:param src_vref: dictionary src_vref reference
"""
vol_size = volume['size']
src_vol_size = src_vref['size']
if vol_size != src_vol_size:
msg = _('Cannot create clone of size %(vol_size)s from '
'volume of size %(src_vol_size)s')
msg_fmt = {'vol_size': vol_size, 'src_vol_size': src_vol_size}
raise exception.CinderException(msg % msg_fmt)
self._clone_volume(src_vref['name'], volume['name'], src_vref['id'])
share = self._get_volume_location(src_vref['id'])
return {'provider_location': share}
def get_volume_stats(self, refresh=False):
"""Get volume stats.
if 'refresh' is True, update the stats first.
"""
_stats = super(HDSNFSDriver, self).get_volume_stats(refresh)
be_name = self.configuration.safe_get('volume_backend_name')
_stats["volume_backend_name"] = be_name or 'HDSNFSDriver'
_stats["vendor_name"] = 'HDS'
_stats["driver_version"] = HDS_HNAS_NFS_VERSION
_stats["storage_protocol"] = 'NFS'
return _stats
def _get_nfs_info(self):
out = self.bend.get_nfs_info(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'])
lines = out.split('\n')
# dict based on NFS exports addresses
conf = {}
for line in lines:
if 'Export' in line:
inf = line.split()
(export, path, fslabel, hdp, evs, ip1) = \
inf[1], inf[3], inf[5], inf[7], inf[9], inf[11]
# 9, 10, etc are IP addrs
key = ip1 + ':' + export
conf[key] = {}
conf[key]['path'] = path
conf[key]['hdp'] = hdp
conf[key]['fslabel'] = fslabel
msg = _('nfs_info: %(key)s: %(path)s, HDP: \
%(fslabel)s FSID: %(hdp)s')
LOG.info(msg
% {'key': key,
'path': path,
'fslabel': fslabel,
'hdp': hdp})
return conf
def do_setup(self, context):
"""Perform internal driver setup."""
self.context = context
self._load_shares_config(getattr(self.configuration,
self.driver_prefix +
'_shares_config'))
LOG.info("Review shares: %s" % self.shares)
nfs_info = self._get_nfs_info()
for share in self.shares:
#export = share.split(':')[1]
if share in nfs_info.keys():
LOG.info("share: %s -> %s" % (share, nfs_info[share]['path']))
for svc in self.config['services'].keys():
if share == self.config['services'][svc]['hdp']:
self.config['services'][svc]['path'] = \
nfs_info[share]['path']
# don't overwrite HDP value
self.config['services'][svc]['fsid'] = \
nfs_info[share]['hdp']
self.config['services'][svc]['fslabel'] = \
nfs_info[share]['fslabel']
LOG.info("Save service info for %s -> %s, %s"
% (svc, nfs_info[share]['hdp'],
nfs_info[share]['path']))
break
if share != self.config['services'][svc]['hdp']:
LOG.error("NFS share %s has no service entry: %s -> %s"
% (share, svc,
self.config['services'][svc]['hdp']))
raise exception.ParameterNotFound(param=svc)
else:
LOG.info("share: %s incorrect entry" % share)
def _clone_volume(self, volume_name, clone_name, volume_id):
"""Clones mounted volume using the HNAS file_clone.
:param volume_name: string volume name
:param clone_name: string clone name (or snapshot)
:param volume_id: string volume id
"""
export_path = self._get_export_path(volume_id)
# volume-ID snapshot-ID, /cinder
LOG.info("Cloning with volume_name %s clone_name %s export_path %s"
% (volume_name, clone_name, export_path))
source_vol = self._id_to_vol(volume_id)
# sps; added target
(_hdp, _path, _fslabel) = self._get_service(source_vol)
target_path = '%s/%s' % (_path, clone_name)
source_path = '%s/%s' % (_path, volume_name)
out = self.bend.file_clone(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
_fslabel, source_path, target_path)
return out
| |
##########################################################################
#
# Copyright (c) 2010-2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import unittest
import os
import nuke
import imath
import IECore
import IECoreScene
import IECoreNuke
class ParameterisedHolderTest( IECoreNuke.TestCase ) :
def __checkParameterKnobs( self, parameter, node, knobName=None, parameterPath=None, ignore=set() ) :
if knobName is None :
knobName = "parm"
if parameterPath in ignore :
return
if isinstance( parameter, IECore.CompoundParameter ) :
for k in parameter.keys() :
childKnobName = knobName + "_" + parameter[k].name
if not parameterPath :
childParameterPath = k
else :
childParameterPath = parameterPath + "." + k
self.__checkParameterKnobs( parameter[k], node, childKnobName, childParameterPath, ignore )
else :
knob = node.knob( knobName )
self.failUnless( knob is not None )
if isinstance( knob, nuke.Enumeration_Knob ) :
self.assertEqual( knob.value(), parameter.getCurrentPresetName() )
else :
knobValue = None
try :
knobValue = IECoreNuke.getKnobValue( knob )
if isinstance( parameter, IECore.V2dParameter ) :
# getKnobValue defaults to V2f
knobValue = imath.V2d( knobValue[0], knobValue[1] )
elif isinstance( parameter, IECore.V3dParameter ) :
knobValue = imath.V3d( knobValue[0], knobValue[1], knobValue[2] )
except :
# not all knob types have accessors yet. some of the numeric
# knobs don't have them because nuke has bugs and returns those
# knobs as the wrong type. try to get the value another way.
knobValue = knob.getValue()
self.assertEqual( parameter.getValue().value, knobValue )
def testModifyParametersAndTransferToKnobs( self ) :
fnOH = IECoreNuke.FnOpHolder.create( "mult", "maths/multiply", 2 )
self.assertEqual( fnOH.node().knob( "parm_a" ).getValue(), 1 )
self.assertEqual( fnOH.node().knob( "parm_b" ).getValue(), 2 )
with fnOH.parameterModificationContext() as parameters :
parameters["a"].setNumericValue( 10 )
parameters["b"].setNumericValue( 20 )
self.assertEqual( fnOH.node().knob( "parm_a" ).getValue(), 10 )
self.assertEqual( fnOH.node().knob( "parm_b" ).getValue(), 20 )
def testModifyParametersAndUndo( self ) :
fnOH = IECoreNuke.FnOpHolder.create( "mult", "maths/multiply", 2 )
self.assertEqual( fnOH.node().knob( "parm_a" ).getValue(), 1 )
self.assertEqual( fnOH.node().knob( "parm_b" ).getValue(), 2 )
with IECoreNuke.UndoEnabled() :
with fnOH.parameterModificationContext() as parameters :
parameters["a"].setNumericValue( 10 )
parameters["b"].setNumericValue( 20 )
self.assertEqual( fnOH.node().knob( "parm_a" ).getValue(), 10 )
self.assertEqual( fnOH.node().knob( "parm_b" ).getValue(), 20 )
nuke.undo()
self.assertEqual( fnOH.node().knob( "parm_a" ).getValue(), 1 )
self.assertEqual( fnOH.node().knob( "parm_b" ).getValue(), 2 )
def testClassParameterSetClass( self ) :
fnOH = IECoreNuke.FnOpHolder.create( "test", "classParameterTest", 1 )
with fnOH.parameterModificationContext() as parameterised :
parameterised["cp"].setClass( "maths/multiply", 2 )
self.__checkParameterKnobs( parameterised.parameters(), fnOH.node() )
self.assertEqual( parameterised.parameters().getValue(), fnOH.getParameterised()[0].parameters().getValue() )
def testClassParameterSetClassAndValues( self ) :
fnOH = IECoreNuke.FnOpHolder.create( "test", "classParameterTest", 1 )
with fnOH.parameterModificationContext() as parameterised :
parameterised["cp"].setClass( "maths/multiply", 2 )
parameterised["cp"]["a"].setNumericValue( 10 )
parameterised["cp"]["a"].setNumericValue( 20 )
self.__checkParameterKnobs( parameterised.parameters(), fnOH.node() )
self.assertEqual( parameterised.parameters().getValue(), fnOH.getParameterised()[0].parameters().getValue() )
def testClassParameterSetClassAndValues( self ) :
fnOH = IECoreNuke.FnOpHolder.create( "test", "classParameterTest", 1 )
with fnOH.parameterModificationContext() as parameterised :
parameterised["cp"].setClass( "maths/multiply", 2 )
parameterised["cp"]["a"].setNumericValue( 10 )
parameterised["cp"]["a"].setNumericValue( 20 )
self.__checkParameterKnobs( parameterised.parameters(), fnOH.node() )
nuke.nodeCopy( "test/IECoreNuke/parameterisedHolder.nk" )
nuke.scriptClear()
n = nuke.nodePaste( "test/IECoreNuke/parameterisedHolder.nk" )
fnOH = IECoreNuke.FnOpHolder( n )
parameterised2 = fnOH.getParameterised()[0]
self.assertEqual( parameterised.parameters().getValue(), parameterised2.parameters().getValue() )
def testNestedClassParameterSetClass( self ) :
fnOH = IECoreNuke.FnOpHolder.create( "test", "classParameterTest", 1 )
with fnOH.parameterModificationContext() as parameterised :
parameterised["cp"].setClass( "classParameterTest", 1 )
parameterised["cp"]["cp"].setClass( "maths/multiply", 2 )
self.__checkParameterKnobs( parameterised.parameters(), fnOH.node() )
self.assertEqual( parameterised.parameters().getValue(), fnOH.getParameterised()[0].parameters().getValue() )
def testClassVectorParameter( self ) :
fnOH = IECoreNuke.FnOpHolder.create( "test", "classVectorParameterTest", 1 )
with fnOH.parameterModificationContext() as parameterised :
cv = parameterised["cv"]
cv.setClasses( [
( "p0", "maths/multiply", 1 ),
( "p1", "floatParameter", 1 ),
] )
self.__checkParameterKnobs( parameterised.parameters(), fnOH.node() )
self.assertEqual( parameterised.parameters().getValue(), fnOH.getParameterised()[0].parameters().getValue() )
def testNestedClassVectorParameter( self ) :
fnOH = IECoreNuke.FnOpHolder.create( "test", "classVectorParameterTest", 1 )
with fnOH.parameterModificationContext() as parameterised :
cv = parameterised["cv"]
cv.setClasses( [
( "p0", "classParameterTest", 1 ),
] )
cp = parameterised["cv"]["p0"]["cp"]
cp.setClass( "maths/multiply", 2 )
self.__checkParameterKnobs( parameterised.parameters(), fnOH.node() )
self.assertEqual( parameterised.parameters().getValue(), fnOH.getParameterised()[0].parameters().getValue() )
def testMoreNestedClassVectorParameter( self ) :
fnOH = IECoreNuke.FnOpHolder.create( "test", "classVectorParameterTest", 1 )
with fnOH.parameterModificationContext() as parameterised :
cv = parameterised["cv"]
cv.setClasses( [
( "p0", "classVectorParameterTest", 1 ),
] )
cv2 = cv["p0"]["cv"]
cv2.setClasses( [
( "p0", "classParameterTest", 1 ),
] )
cp = cv2["p0"]["cp"]
cp.setClass( "maths/multiply", 2 )
self.__checkParameterKnobs( parameterised.parameters(), fnOH.node() )
self.assertEqual( parameterised.parameters().getValue(), fnOH.getParameterised()[0].parameters().getValue() )
def testParameterTypes( self ) :
# the parameters for which we know we have no handler
unsupported = { "c", "e", "f", "compound.k", "m", "s", "u", "v", "x", "y", "p1", "p4" }
# the parameters for which we'll do our own testing because they are not straightforward to deal with in __checkParameterKnobs
notEasy = set ( ( "p2", "p3" ) )
mh = IECore.CapturingMessageHandler()
with mh :
fnOH = IECoreNuke.FnOpHolder.create( "test", "parameterTypes", 1 )
# make sure there's an error reported for each unsupported parameter
self.assertEqual( len( mh.messages ), len( unsupported ) )
for n in unsupported :
found = False
t = "for parameter \"%s\"" % n.split( "." )[-1]
for m in mh.messages :
if t in m.message :
found = True
break
self.assertEqual( found, True )
self.__checkParameterKnobs( fnOH.getParameterised()[0].parameters(), fnOH.node(), ignore=unsupported | notEasy )
self.assertEqual( fnOH.node().knob( "parm_p2Start" ).getValue(), [ 1, 1, 1 ] )
self.assertEqual( fnOH.node().knob( "parm_p2End" ).getValue(), [ 1, 1, 1 ] )
with fnOH.parameterModificationContext() as parameterised :
parameterised.parameters()["d"].setTypedValue( "lalal" )
parameterised.parameters()["p2"].setTypedValue( IECore.LineSegment3f( imath.V3f( 10, 11, 12 ), imath.V3f( 12, 10, 9 ) ) )
self.__checkParameterKnobs( parameterised.parameters(), fnOH.node(), ignore=unsupported | notEasy )
self.__checkParameterKnobs( fnOH.getParameterised()[0].parameters(), fnOH.node(), ignore=unsupported | notEasy )
self.assertEqual( fnOH.node().knob( "parm_p2Start" ).getValue(), [ 10, 11, 12 ] )
self.assertEqual( fnOH.node().knob( "parm_p2End" ).getValue(), [ 2, -1, -3 ] )
def testDefaultExpression( self ) :
# create opholder and check the default expression we asked for works
fnOH = IECoreNuke.FnOpHolder.create( "op", "add", 1 )
self.assertEqual( fnOH.node().knob( "parm_a" ).toScript(), '{"frame * 2"}' )
self.failUnless( fnOH.node().knob( "parm_a" ).isAnimated() )
self.assertEqual( nuke.frame(), 1 )
self.assertEqual( fnOH.node().knob( "parm_a" ).getValue(), 2 )
# remove the expression, cut and paste the node, and make sure
# it doesn't reappear
fnOH.node().knob( "parm_a" ).clearAnimated()
self.failIf( fnOH.node().knob( "parm_a" ).isAnimated() )
nuke.nodeCopy( "test/IECoreNuke/parameterisedHolder.nk" )
nuke.scriptClear()
n = nuke.nodePaste( "test/IECoreNuke/parameterisedHolder.nk" )
fnOH = IECoreNuke.FnOpHolder( n )
self.assertEqual( fnOH.node().knob( "parm_a" ).toScript(), "2" )
def tearDown( self ) :
for f in [
"test/IECoreNuke/parameterisedHolder.nk",
] :
if os.path.exists( f ) :
os.remove( f )
if __name__ == "__main__":
unittest.main()
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v10.services.types import (
conversion_value_rule_service,
)
from .base import ConversionValueRuleServiceTransport, DEFAULT_CLIENT_INFO
class ConversionValueRuleServiceGrpcTransport(
ConversionValueRuleServiceTransport
):
"""gRPC backend transport for ConversionValueRuleService.
Service to manage conversion value rules.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn(
"client_cert_source is deprecated", DeprecationWarning
)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = (
SslCredentials().ssl_credentials
)
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def mutate_conversion_value_rules(
self,
) -> Callable[
[conversion_value_rule_service.MutateConversionValueRulesRequest],
conversion_value_rule_service.MutateConversionValueRulesResponse,
]:
r"""Return a callable for the mutate conversion value rules method over gRPC.
Creates, updates, or removes conversion value rules.
Operation statuses are returned.
Returns:
Callable[[~.MutateConversionValueRulesRequest],
~.MutateConversionValueRulesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_conversion_value_rules" not in self._stubs:
self._stubs[
"mutate_conversion_value_rules"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v10.services.ConversionValueRuleService/MutateConversionValueRules",
request_serializer=conversion_value_rule_service.MutateConversionValueRulesRequest.serialize,
response_deserializer=conversion_value_rule_service.MutateConversionValueRulesResponse.deserialize,
)
return self._stubs["mutate_conversion_value_rules"]
def close(self):
self.grpc_channel.close()
__all__ = ("ConversionValueRuleServiceGrpcTransport",)
| |
import mock
from tests.compat import unittest
from tests.utils import APITestCase
import evelink.eve as evelink_eve
class EVETestCase(APITestCase):
def setUp(self):
super(EVETestCase, self).setUp()
self.eve = evelink_eve.EVE(api=self.api)
def test_character_names_from_ids(self):
self.api.get.return_value = self.make_api_result("eve/character_name.xml")
result, current, expires = self.eve.character_names_from_ids(set([1,2]))
self.assertEqual(result, {1:"EVE System", 2:"EVE Central Bank"})
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/CharacterName', params={'IDs': set([1,2])}),
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_character_name_from_id(self):
self.api.get.return_value = self.make_api_result("eve/character_name_single.xml")
result, current, expires = self.eve.character_name_from_id(1)
self.assertEqual(result, "EVE System")
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/CharacterName', params={'IDs': [1]}),
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_character_ids_from_names(self):
self.api.get.return_value = self.make_api_result("eve/character_id.xml")
result, current, expires = self.eve.character_ids_from_names(set(["EVE System", "EVE Central Bank"]))
self.assertEqual(result, {"EVE System":1, "EVE Central Bank":2})
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/CharacterID', params={'names': set(["EVE System","EVE Central Bank"])}),
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_character_id_from_name(self):
self.api.get.return_value = self.make_api_result("eve/character_id_single.xml")
result, current, expires = self.eve.character_id_from_name("EVE System")
self.assertEqual(result, 1)
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/CharacterID', params={'names': ["EVE System"]}),
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_affiliations_for_characters(self):
self.api.get.return_value = self.make_api_result("eve/character_affiliation.xml")
result, current, expires = self.eve.affiliations_for_characters(set([92168909, 401111892, 1979087900]))
self.assertEqual(result, {
1979087900: {
'id': 1979087900,
'name': 'Marcel Devereux',
'faction': {
'id': 500004,
'name': 'Gallente Federation'
},
'corp': {
'id': 1894214152,
'name': 'Aideron Robotics'
}
},
401111892: {
'id': 401111892,
'name': 'ShadowMaster',
'alliance': {
'id': 99000652,
'name': 'RvB - BLUE Republic'
},
'corp': {
'id': 1741770561,
'name': 'Blue Republic'
}
},
92168909: {
'id': 92168909,
'name': 'CCP FoxFour',
'alliance': {
'id': 434243723,
'name': 'C C P Alliance'
},
'corp': {
'id': 109299958,
'name': 'C C P'
}
}
})
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/CharacterAffiliation', params={'ids': set([92168909, 401111892, 1979087900])})
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_affiliations_for_character(self):
self.api.get.return_value = self.make_api_result("eve/character_affiliation_single.xml")
result, current, expires = self.eve.affiliations_for_character(92168909)
self.assertEqual(result, {
'id': 92168909,
'name': 'CCP FoxFour',
'alliance': {
'id': 434243723,
'name': 'C C P Alliance'
},
'corp': {
'id': 109299958,
'name': 'C C P'
}
})
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/CharacterAffiliation', params={'ids': [92168909]})
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_character_info_from_id(self):
self.api.get.return_value = self.make_api_result("eve/character_info.xml")
result, current, expires = self.eve.character_info_from_id(1234)
self.assertEqual(result, {
'alliance': {'id': None, 'name': None, 'timestamp': None},
'bloodline': 'Civire',
'corp': {'id': 2345, 'name': 'Test Corporation', 'timestamp': 1338689400},
'history': [
{'corp_id': 1, 'corp_name': 'test_one', 'start_ts': 1338603000},
{'corp_id': 2, 'corp_name': 'test_two', 'start_ts': 1318422896}
],
'id': 1234,
'isk': None,
'location': None,
'name': 'Test Character',
'race': 'Caldari',
'sec_status': 2.5,
'ship': {'name': None, 'type_id': None, 'type_name': None},
'skillpoints': None,
})
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/CharacterInfo', params={'characterID': 1234}),
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_alliances(self):
self.api.get.return_value = self.make_api_result("eve/alliances.xml")
result, current, expires = self.eve.alliances()
self.assertEqual(result, {
1: {
'executor_id': 2,
'id': 1,
'member_corps': {
2: {'id': 2, 'timestamp': 1289250660},
3: {'id': 3, 'timestamp': 1327728960},
4: {'id': 4, 'timestamp': 1292440500},
},
'member_count': 123,
'name': 'Test Alliance',
'ticker': 'TEST',
'timestamp': 1272717240,
}
})
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/AllianceList', params={}),
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_errors(self):
self.api.get.return_value = self.make_api_result("eve/errors.xml")
result, current, expires = self.eve.errors()
self.assertEqual(result, {1:"Foo", 2:"Bar"})
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/ErrorList', params={}),
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_faction_warfare_stats(self):
self.api.get.return_value = self.make_api_result("eve/faction_warfare_stats.xml")
result, current, expires = self.eve.faction_warfare_stats()
self.assertEqual(result, {
'kills': {'total': 232772, 'week': 3246, 'yesterday': 677},
'points': {'total': 44045189, 'week': 414049, 'yesterday': 55087},
'factions': {
500001: {
'id': 500001,
'kills': {'total': 59239, 'week': 627, 'yesterday': 115},
'name': 'Caldari State',
'pilots': 5324,
'points': {'total': 4506493, 'week': 64548, 'yesterday': 9934},
'systems': 61,
},
500002: {
'id': 500002,
'kills': {'total': 56736, 'week': 952, 'yesterday': 213},
'name': 'Minmatar Republic',
'pilots': 4068,
'points': {'total': 3627522, 'week': 51211, 'yesterday': 2925},
'systems': 0,
},
500003: {
'id': 500003,
'kills': {'total': 55717, 'week': 1000, 'yesterday': 225},
'name': 'Amarr Empire',
'pilots': 3960,
'points': {'total': 3670190, 'week': 50518, 'yesterday': 3330},
'systems': 11,
},
500004: {
'id': 500004,
'kills': {'total': 61080, 'week': 667, 'yesterday': 124},
'name': 'Gallente Federation',
'pilots': 3663,
'points': {'total': 4098366, 'week': 62118, 'yesterday': 10343},
'systems': 0,
},
},
'wars': [
{
'against': {'id': 500002, 'name': 'Minmatar Republic'},
'faction': {'id': 500001, 'name': 'Caldari State'},
},
{
'against': {'id': 500004, 'name': 'Gallente Federation'},
'faction': {'id': 500001, 'name': 'Caldari State'},
},
{
'against': {'id': 500001, 'name': 'Caldari State'},
'faction': {'id': 500002, 'name': 'Minmatar Republic'},
},
{
'against': {'id': 500003, 'name': 'Amarr Empire'},
'faction': {'id': 500002, 'name': 'Minmatar Republic'},
},
{
'against': {'id': 500002, 'name': 'Minmatar Republic'},
'faction': {'id': 500003, 'name': 'Amarr Empire'},
},
{
'against': {'id': 500004, 'name': 'Gallente Federation'},
'faction': {'id': 500003, 'name': 'Amarr Empire'},
},
{
'against': {'id': 500001, 'name': 'Caldari State'},
'faction': {'id': 500004, 'name': 'Gallente Federation'},
},
{
'against': {'id': 500003, 'name': 'Amarr Empire'},
'faction': {'id': 500004, 'name': 'Gallente Federation'},
}
],
})
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/FacWarStats', params={}),
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_faction_warfare_leaderboard(self):
self.api.get.return_value = self.make_api_result("eve/faction_warfare_leaderboard.xml")
result, current, expires = self.eve.faction_warfare_leaderboard()
self.assertEqual(result, {
'char': {
'kills': {
'total': [{'id': 673662188, 'kills': 451, 'name': 'Val Erian'}],
'week': [{'id': 187452523, 'kills': 52, 'name': 'Tigrana Blanque'}],
'yesterday': [
{'id': 1007512845, 'kills': 14, 'name': 'StonedBoy'},
{'id': 646053002, 'kills': 11, 'name': 'Erick Voliffe'},
],
},
'points': {
'total': [{'id': 395923478, 'name': 'sasawong', 'points': 197046}],
'week': [{'id': 161929388, 'name': 'Ankhesentapemkah', 'points': 20851}],
'yesterday': [{'id': 774720050, 'name': 'v3nd3tt4', 'points': 3151}],
},
},
'corp': {
'kills': {
'total': [{'id': 673662188, 'kills': 451, 'name': 'Val Erian'}],
'week': [{'id': 187452523, 'kills': 52, 'name': 'Tigrana Blanque'}],
'yesterday': [
{'id': 1007512845, 'kills': 14, 'name': 'StonedBoy'},
{'id': 646053002, 'kills': 11, 'name': 'Erick Voliffe'},
],
},
'points': {
'total': [{'id': 395923478, 'name': 'sasawong', 'points': 197046}],
'week': [{'id': 161929388, 'name': 'Ankhesentapemkah', 'points': 20851}],
'yesterday': [{'id': 774720050, 'name': 'v3nd3tt4', 'points': 3151}],
},
},
'faction': {
'kills': {
'total': [{'id': 500004, 'kills': 104, 'name': 'Gallente Federation'}],
'week': [{'id': 500004, 'kills': 105, 'name': 'Gallente Federation'}],
'yesterday': [{'id': 500004, 'kills': 106, 'name': 'Gallente Federation'}],
},
'points': {
'total': [{'id': 500004, 'points': 101, 'name': 'Gallente Federation'}],
'week': [{'id': 500004, 'points': 102, 'name': 'Gallente Federation'}],
'yesterday': [{'id': 500004, 'points': 103, 'name': 'Gallente Federation'}],
},
},
})
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/FacWarTopStats', params={}),
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_conquerable_stations(self):
self.api.get.return_value = self.make_api_result("eve/conquerable_stations.xml")
result, current, expires = self.eve.conquerable_stations()
self.assertEqual(result, {
1:{ 'id':1,
'name':"Station station station",
'type_id':123,
'system_id':512,
'corp':{
'id':444,
'name':"Valkyries of Night" }
},
2:{ 'id':2,
'name':"Station the station",
'type_id':42,
'system_id':503,
'corp':{
'id':400,
'name':"Deus Fides Empire"}
}
})
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/ConquerableStationlist', params={}),
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_skill_tree(self):
self.api.get.return_value = self.make_api_result("eve/skill_tree.xml")
result, current, expires = self.eve.skill_tree()
self.assertEqual(result, {
255: {
'id': 255,
'name': 'Gunnery',
'skills': {
3300: {
'attributes': {
'primary': 'perception',
'secondary': 'willpower',
},
'bonuses': {
'turretSpeeBonus': {
'type': 'turretSpeeBonus',
'value': -2.0,
},
},
'description': "Basic turret operation skill. 2% Bonus to weapon turrets' rate of fire per skill level.",
'group_id': 255,
'id': 3300,
'name': 'Gunnery',
'published': True,
'rank': 1,
'required_skills': {},
},
3301: {
'attributes': {
'primary': 'perception',
'secondary': 'willpower',
},
'bonuses': {
'damageMultiplierBonus': {
'type': 'damageMultiplierBonus',
'value': 5.0,
},
},
'description': 'Operation of small hybrid turrets. 5% Bonus to small hybrid turret damage per level.',
'group_id': 255,
'id': 3301,
'name': 'Small Hybrid Turret',
'published': True,
'rank': 1,
'required_skills': {
3300: {
'id': 3300,
'level': 1,
'name': 'Gunnery',
},
},
},
},
},
266: {
'id': 266,
'name': 'Corporation Management',
'skills': {
11584 : {
'id': 11584,
'group_id': 266,
'name': 'Anchoring',
'description': 'Skill at Anchoring Deployables. Can not be trained on Trial Accounts.',
'published': True,
'rank': 3,
'attributes': {
'primary': 'memory',
'secondary': 'charisma',
},
'required_skills': {},
'bonuses': {
'canNotBeTrainedOnTrial': {
'type': 'canNotBeTrainedOnTrial',
'value': 1.0,
}
}
},
3369 : {
'id': 3369,
'group_id': 266,
'name': 'CFO Training',
'description': 'Skill at managing corp finances. 5% discount on all fees at non-hostile NPC station if acting as CFO of a corp. ',
'published': False,
'rank': 3,
'attributes': {
'primary': 'memory',
'secondary': 'charisma',
},
'required_skills': {
3363 : { 'id' : 3363, 'level' : 2, 'name' : None },
3444 : { 'id' : 3444, 'level' : 3, 'name' : None },
},
'bonuses': {}
}
}
}
})
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/SkillTree', params={})
])
def test_reference_types(self):
self.api.get.return_value = self.make_api_result("eve/reference_types.xml")
result, current, expires = self.eve.reference_types()
self.assertEqual(result, {
0: 'Undefined',
1: 'Player Trading',
2: 'Market Transaction',
3: 'GM Cash Transfer',
4: 'ATM Withdraw',
5: 'ATM Deposit'
})
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/RefTypes', params={})
])
if __name__ == "__main__":
unittest.main()
| |
from argparse import ArgumentParser
import datetime
import os.path
import requests
from config import EcobeeConfig
VERBOSE = False
# First values here are Ecobee's request column names, second are readable names used for the CSV file header
COLUMNS = (
("auxHeat1", "Aux Heat (sec)"),
("auxHeat2", "Aux Heat Stage 2 (sec)"),
("auxHeat3", "Aux Heat Stage 3 (sec)"),
("compCool1", "Cool Stage 1 (sec)"),
("compCool2", "Cool Stage 2 (sec)"),
("compHeat1", "Heat Stage 1 (sec)"),
("compHeat2", "Heat Stage 2 (sec)"),
("dehumidifier", "Dehumidifier (sec)"),
("dmOffset", "Demand Mgmt Offset (F)"),
("economizer", "Economizer Runtime (sec)"),
("fan", "Fan (sec)"),
("humidifier", "Humidifier (sec)"),
("hvacMode", "HVAC Mode"),
("outdoorHumidity", "Outdoor Humidity (%)"),
("outdoorTemp", "Outdoor Temp (F)"),
("sky", "Sky Cover"),
("ventilator", "Ventilator (sec)"),
("wind", "Wind Speed (km/h)"),
("zoneAveTemp", "Indoor Temp Avg (F)"),
("zoneCalendarEvent", "Override Event"),
("zoneClimate", "Climate Mode"),
("zoneCoolTemp", "Zone Cool Temp"),
("zoneHeatTemp", "Zone Heat Temp"),
("zoneHumidity", "Humidity Avg (%)"),
("zoneHumidityHigh", "Humidity High (%)"),
("zoneHumidityLow", "Humidity Low (%)"),
("zoneHvacMode", "HVAC System Mode"),
("zoneOccupancy", "Zone Occupancy")
)
CSV_HEADER_ROW = "Date,Time," + ','.join([column[1] for column in COLUMNS])
class EcobeeCSV:
def __init__(self, config):
self.config = config
# Fetch history at given days and save to CSV, overwriting previous fetched data for range
def update(self, days_ago_start, days_ago_end):
self.__refresh_tokens()
self.__fetch_thermostats()
new_data = self.__fetch_data(days_ago_start=days_ago_start, days_ago_end=days_ago_end)
existing_data = self.__read_csv()
updated_data = self.__update_data(existing_data=existing_data, new_data=new_data)
self.__write_csv(csv_lines=updated_data)
# Fetch all history and save to CSV, overwriting all
def update_all_history(self):
# Verify that they want to do this - will overwrite any old data
choice = input("This will overwrite any existing file at " + self.config.csv_location + ", continue? (y/n) ")
if choice.lower() != "y":
return
self.__refresh_tokens()
self.__fetch_thermostats()
data = self.__fetch_all_data()
self.__write_csv(csv_lines=data)
# Refresh access and refresh tokens
def __refresh_tokens(self):
print("***Refreshing tokens***")
refresh_req_data = {
'grant_type': 'refresh_token',
'code': self.config.refresh_token,
'client_id': self.config.api_key
}
response = requests.post('https://api.ecobee.com/token', data=refresh_req_data)
refresh_json = response.json()
if VERBOSE:
print("Refresh token JSON:")
print(refresh_json)
self.config.access_token = refresh_json['access_token']
self.config.refresh_token = refresh_json['refresh_token']
self.config.save()
# Fetch list of thermostats and store ids
def __fetch_thermostats(self):
print("***Fetching thermostats***")
url = 'https://api.ecobee.com/1/thermostat?format=json&body={"selection":{"selectionType":"registered",' \
'"selectionMatch":""}} '
response = requests.get(url, headers=self.config.auth_header())
thermostat_json = response.json()
if VERBOSE:
print("Thermostat JSON:")
print(thermostat_json)
thermostat_ids = []
for thermostat in thermostat_json["thermostatList"]:
thermostat_ids.append(thermostat["identifier"])
self.config.thermostat_ids = thermostat_ids
self.config.save()
# Fetch historical data for first thermostat
def __fetch_data(self, days_ago_start, days_ago_end):
if days_ago_start <= days_ago_end:
raise ValueError("Days ago start must be greater than days ago end!")
if days_ago_end - days_ago_start > 30:
raise ValueError("Range to fetch must not exceed 30 days!")
start_date = date_string(days_ago=days_ago_start)
end_date = date_string(days_ago=days_ago_end)
print("***Fetching data from " + start_date + " to " + end_date + "***")
thermostat_ids_csv = self.config.thermostat_ids_csv()
columns_csv = ','.join([column[0] for column in COLUMNS])
url = 'https://api.ecobee.com/1/runtimeReport?format=json&body={"startDate":"' + start_date + '"'\
+ ',"endDate":"' + end_date + '"'\
+ ',"columns":"' + columns_csv + '"'\
+ ',"selection":{"selectionType":"thermostats","selectionMatch":"' + thermostat_ids_csv + '"}}'
if VERBOSE:
print("Data fetch URL:")
print(url)
response = requests.get(url, headers=self.config.auth_header())
report_json = response.json()
# Only using first thermostats data, change this in the future to accept more
data = report_json["reportList"][0]["rowList"]
if VERBOSE:
print("Report had " + str(len(data)) + " rows")
return data
# Find earliest month with data and then download all data from that point until now
def __fetch_all_data(self):
print("***Fetching all data***")
history_days_ago = 30
print("Attempting to find when thermostat history began")
# Keep looking for data until we hit two years max or break because we found data start
while history_days_ago < 730:
# Fetch only one day's worth of data per month to move fast
sample_month_data = self.__fetch_data(days_ago_start=history_days_ago, days_ago_end=history_days_ago-1)
start_index = actual_data_start_index(data=sample_month_data)
if start_index == -1:
break
history_days_ago += 30
# Should now have max number of days to fetch. Start from history_days_ago, subtract 30 and fetch until we hit 0
all_data = []
is_first = True
print("Downloading history starting " + str(history_days_ago) + " days ago")
while history_days_ago > 0:
month_data = self.__fetch_data(days_ago_start=history_days_ago, days_ago_end=history_days_ago-30)
# First month fetched will have garbage data, remove it
if is_first:
month_data = month_data[actual_data_start_index(data=month_data):]
is_first = False
all_data.extend(month_data)
history_days_ago = history_days_ago - 30
return all_data
# Read existing CSV data if exists
def __read_csv(self):
print("***Reading CSV from " + self.config.csv_location + "***")
existing_data = []
if not os.path.exists(self.config.csv_location):
return []
with open(self.config.csv_location, 'r') as csv_file:
for line in csv_file.readlines():
existing_data.append(line.rstrip())
return existing_data
# Override any old data with new data, sort it and return it
@staticmethod
def __update_data(existing_data, new_data):
print("***Updating data***")
updated_data_dict = {}
comma_index = 19 # Index of comma after time column for splitting
for row in existing_data[1:]:
updated_data_dict[row[0:comma_index]] = row[comma_index:]
for row in new_data:
updated_data_dict[row[0:comma_index]] = row[comma_index:]
updated_data = []
for item in updated_data_dict.items():
updated_data.append(item[0] + item[1])
updated_data.sort()
return updated_data
# Write out data to the CSV
def __write_csv(self, csv_lines):
print("***Writing CSV to " + self.config.csv_location + "***")
if VERBOSE:
print("Writing " + str(len(csv_lines)) + " lines to file")
with open(self.config.csv_location, 'w') as csv_file:
csv_file.write(CSV_HEADER_ROW + "\n")
for line in csv_lines:
csv_file.write(line + "\n")
# Converts days ago int to date string like 2017-08-07
def date_string(days_ago):
today = datetime.date.today()
day = today - datetime.timedelta(days=days_ago)
return day.strftime("%Y-%m-%d")
# Returns index of first row when "actual" thermostat data exists (not just weather)
def actual_data_start_index(data):
start_index = -1
current_index = 0
for row in data:
if is_actual_data_row(row=row):
start_index = current_index
break
current_index = current_index + 1
return start_index
# If row has actual thermostat data and not just weather data
def is_actual_data_row(row):
columns = row.split(',')
non_empty_count = columns.count('')
# Arbitrary number of "empty" columns to look for, may need to change this.
# Current data shows only these consistently empty columns: dmOffset,skyCover,zoneCalendarEvent
return non_empty_count < 5
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
parser.add_argument("-d1", "--days-ago-start", type=int, default=1,
help="Days ago to start history fetch (max 30 days total length)")
parser.add_argument("-d2", "--days-ago-end", type=int, default=0,
help="Days ago to end history fetch (max 30 days total length)")
parser.add_argument("--all-time", action="store_true", help="Download all data 30 days at a time and save to file")
args = parser.parse_args()
VERBOSE = args.verbose
ecobee = EcobeeCSV(EcobeeConfig())
if args.all_time:
ecobee.update_all_history()
else:
ecobee.update(days_ago_start=args.days_ago_start, days_ago_end=args.days_ago_end)
| |
#!/usr/bin/python
####################################################
## Run this file before doing a simics sync_char run.
## It produces the sync_char.map file that is read by
## The simics sync_char module.
import sys, os, re, sets, shutil, getopt, sync_common, fsm as fsm_mod
# Make sure . is in the path - common case
path = os.environ.get('PATH')
path += ':.'
os.environ['PATH'] = path
objdump = 'objdump-osa --disassemble --line-numbers '
nm = 'nm'
#######################################################################
## Here is the strategy.
## 1. Find the instruction that manipulates the lock and emit
#information for how to find the address of the lock.
## 2. The runtime will need to check the contents of the lock address
#before and after the instruction. It needs to know the semantics of
#the lock to determine if the lock aquire/release was successful
## 3. Marking the return instruction for some routines that can take a
#non-trivial amount of time is difficult to do statically because of
#multiple return instructions and most of all tail call optimization.
## XXX These must agree with values in sync_char.cc and sync_char_post.py
## Flags. Properties of each locking routine
F_LOCK = 0x1 # It locks
F_UNLOCK = 0x2 # It unlocks
F_TRYLOCK = 0x4 # It trys to get the lock
F_INLINED = 0x8 # Inlined function
F_EAX = 0x10 # Address in 0(%EAX)
F_TIME_RET = 0x20 # Once this instruction executes, note time until
# ret (allowing for additional call/ret pairs
# before ret) NOT USED
F_NOADDR = 0x40 # No address for this lock, it is acq/rel at this pc
F_LOOP_UNROLL = 0x80 # The lock instruction is in a loop that the
# compiler unrolls. This is only used for
# sync_char_pre, not in syncchar proper.
# Type of lock
L_SEMA = 1
L_RSEMA = 2
L_WSEMA = 3
L_SPIN = 4
L_RSPIN = 5
L_WSPIN = 6
L_MUTEX = 7
L_COMPL = 8
L_RCU = 9
L_FUTEX = 10
L_CXA = 11
L_CXE = 12
# Regular expressions to find the PC and address of the lock
re_lock_i = re.compile(r'^(?P<addr>[A-Fa-f0-9]+):\s+(?P<bytes>([0-9a-z][0-9a-z][ \t])+)\s*?\tlock .*?(?P<offset>[A-Fa-f0-9x]+)?\(?\%?(?P<reg>[a-z][a-z][a-z])?\)?$')
re_xcas_i = re.compile(r'^(?P<addr>[A-Fa-f0-9]+):\s+(?P<bytes>([0-9a-z][0-9a-z][ \t])+)\s*?\txcas.*')
#re_ret_i = re.compile(r'^(?P<addr>[A-Fa-f0-9]+):\s+.*?\tret\s+$')
#re_jmp_i = re.compile(r'^(?P<addr>[A-Fa-f0-9]+):\s+.*?jmp\s+')
#re_pause_i = re.compile(r'^(?P<addr>[A-Fa-f0-9]+):\s+.*?pause')
re_addr = re.compile(r'^(?P<addr>[A-Fa-f0-9]+):\s+')
re_raw_spin_unlock_i = re.compile(r'^\s*(?P<addr>[A-Fa-f0-9]+):\s+(?P<bytes>([0-9a-z][0-9a-z][ \t])+)\s*?\tmovb\s+\$0x1,(?P<offset>[A-Fa-f0-9x]+)?\(?\%?(?P<reg>[a-z][a-z][a-z])?\)?$')
noninlined_funcs = {
'_spin_lock' :
{ 'flmatch' : '',
'flags' : F_LOCK|F_TIME_RET,
'sync_id' : L_SPIN,
're_lkaddr_i' : re_lock_i,
},
'_spin_lock_irqsave' :
{ 'flmatch' : '', # 'kernel/spinlock.c:73',
'flags' : F_LOCK|F_TIME_RET,
'sync_id' : L_SPIN,
're_lkaddr_i' : re_lock_i,
},
'_spin_lock_irq' :
{ 'flmatch' : '',
'flags' : F_LOCK|F_TIME_RET,
'sync_id' : L_SPIN,
're_lkaddr_i' : re_lock_i,
},
'_spin_lock_bh' :
{ 'flmatch' : '',
'flags' : F_LOCK|F_TIME_RET,
'sync_id' : L_SPIN,
're_lkaddr_i' : re_lock_i,
},
# Tx kernels
'_cspin_lock' :
{ 'flmatch' : '',
'flags' : F_LOCK|F_TIME_RET,
'sync_id' : L_SPIN,
're_lkaddr_i' : re_lock_i,
},
'_cspin_lock_irqsave' :
{ 'flmatch' : '', # 'kernel/spinlock.c:73',
'flags' : F_LOCK|F_TIME_RET,
'sync_id' : L_SPIN,
're_lkaddr_i' : re_lock_i,
},
'_cspin_lock_irq' :
{ 'flmatch' : '',
'flags' : F_LOCK|F_TIME_RET,
'sync_id' : L_SPIN,
're_lkaddr_i' : re_lock_i,
},
'_cspin_lock_bh' :
{ 'flmatch' : '',
'flags' : F_LOCK|F_TIME_RET,
'sync_id' : L_SPIN,
're_lkaddr_i' : re_lock_i,
},
'_read_trylock':
{ 'flmatch' : '',
'flags' : F_TRYLOCK,
'sync_id' : L_RSPIN,
're_lkaddr_i' : re_lock_i,
},
'_write_trylock' :
{ 'flmatch' : '', # kernel/spinlock.c:53
'flags' : F_TRYLOCK,
'sync_id' : L_WSPIN,
're_lkaddr_i' : re_lock_i,
},
# Look for the non-inlined form of these functions to get
# information about their caller. From kernel/spinlock.c
'_read_lock_irqsave' :
{'flmatch' : '',
'flags' : F_LOCK|F_TIME_RET,
'sync_id' : L_RSPIN,
're_lkaddr_i' : re_lock_i,
},
'_read_lock_irq' :
{'flmatch' : '',
'flags' : F_LOCK|F_TIME_RET,
'sync_id' : L_RSPIN,
're_lkaddr_i' : re_lock_i,
},
'_read_lock_bh' :
{'flmatch' : '',
'flags' : F_LOCK|F_TIME_RET,
'sync_id' : L_RSPIN,
're_lkaddr_i' : re_lock_i,
},
'_read_lock' :
{'flmatch' : '',
'flags' : F_LOCK|F_TIME_RET,
'sync_id' : L_RSPIN,
're_lkaddr_i' : re_lock_i,
},
'_write_lock_irqsave' :
{'flmatch' : '',
'flags' : F_LOCK|F_TIME_RET,
'sync_id' : L_WSPIN,
're_lkaddr_i' : re_lock_i,
},
'_write_lock_irq' :
{'flmatch' : '',
'flags' : F_LOCK|F_TIME_RET,
'sync_id' : L_WSPIN,
're_lkaddr_i' : re_lock_i,
},
'_write_lock_bh' :
{'flmatch' : '',
'flags' : F_LOCK|F_TIME_RET,
'sync_id' : L_WSPIN,
're_lkaddr_i' : re_lock_i,
},
'_write_lock' :
{'flmatch' : '',
'flags' : F_LOCK|F_TIME_RET,
'sync_id' : L_WSPIN,
're_lkaddr_i' : re_lock_i,
},
#######################################################
## Mutexes
'mutex_lock' :
{'flmatch' : '',
'flags' : F_LOCK|F_TIME_RET,
'sync_id' : L_MUTEX,
're_lkaddr_i' : re_lock_i,
},
# Problems here, slow path is via jmp
'mutex_lock_interruptible' :
{'flmatch' : '',
'flags' : F_LOCK|F_TIME_RET,
'sync_id' : L_MUTEX,
're_lkaddr_i' : re_lock_i,
},
'mutex_trylock':
{'flmatch' : '',
'flags' : F_TRYLOCK,
'sync_id' : L_MUTEX,
're_lkaddr_i' : re_lock_i,
},
'mutex_unlock' :
{'flmatch' : '',
'flags' : F_UNLOCK|F_TIME_RET,
'sync_id' : L_MUTEX,
're_lkaddr_i' : re_lock_i,
},
#######################################################
## Semaphore slowpath
'__down' :
{'flmatch' : '',
'flags' : F_LOCK|F_TIME_RET|F_LOOP_UNROLL,
'sync_id' : L_SEMA,
're_lkaddr_i' : re_lock_i,
},
# Many of these functions do some work and are tail call optimized
# XXX Completion is not like other locks in the sense that there
# is no single word whose state encapsulates the state of the
# completion. Also, the struct completion* seems to be aliasing
# with the spin lock in the wait_queue_head_t (its the first
# member of the wait_queue_head_t), even though the done
# field should be at the struct completion* address.
# For now, just count them.
'complete' :
{ 'flmatch' : '',
'flags' : F_UNLOCK|F_EAX|F_TIME_RET,
'sync_id' : L_COMPL,
're_lkaddr_i' : re_addr,
},
'complete_all' :
{ 'flmatch' : '',
'flags' : F_UNLOCK|F_EAX|F_TIME_RET,
'sync_id' : L_COMPL,
're_lkaddr_i' : re_addr,
},
'wait_for_completion' :
{ 'flmatch' : '',
'flags' : F_LOCK|F_EAX,
'sync_id' : L_COMPL,
're_lkaddr_i' : re_addr,
},
'wait_for_completion_timeout' :
{ 'flmatch' : '',
'flags' : F_LOCK|F_EAX,
'sync_id' : L_COMPL,
're_lkaddr_i' : re_addr,
},
'wait_for_completion_interruptible_timeout' :
{ 'flmatch' : '',
'flags' : F_LOCK|F_EAX,
'sync_id' : L_COMPL,
're_lkaddr_i' : re_addr,
},
'wait_for_completion_interruptible' :
{ 'flmatch' : '',
'flags' : F_LOCK|F_EAX,
'sync_id' : L_COMPL,
're_lkaddr_i' : re_addr,
},
# 'sys_futex' : # Maybe don't need caller, since this is syscall
# { 'flmatch' : '',
# 'str' : '0 10'
# },
####################################
# cxspinlocks
'_cx_atomic' :
{ 'flmatch' : '',
'flags' : F_LOCK|F_EAX|F_LOOP_UNROLL,
'sync_id' : L_CXA,
're_lkaddr_i': re_xcas_i,
},
'_cx_atomic_bh' :
{ 'flmatch' : '',
'flags' : F_LOCK|F_EAX|F_LOOP_UNROLL,
'sync_id' : L_CXA,
're_lkaddr_i': re_xcas_i,
},
'_cx_atomic_irq' :
{ 'flmatch' : '',
'flags' : F_LOCK|F_EAX|F_LOOP_UNROLL,
'sync_id' : L_CXA,
're_lkaddr_i': re_xcas_i,
},
'_cx_atomic_irqsave' :
{ 'flmatch' : '',
'flags' : F_LOCK|F_EAX|F_LOOP_UNROLL,
'sync_id' : L_CXA,
're_lkaddr_i': re_xcas_i,
},
'_cx_exclusive' :
{ 'flmatch' : '',
'flags' : F_LOCK|F_EAX,
'sync_id' : L_CXE,
're_lkaddr_i': re_xcas_i,
},
'_cx_exclusive_bh' :
{ 'flmatch' : '',
'flags' : F_LOCK|F_EAX,
'sync_id' : L_CXE,
're_lkaddr_i': re_xcas_i,
},
'_cx_exclusive_irq' :
{ 'flmatch' : '',
'flags' : F_LOCK|F_EAX,
'sync_id' : L_CXE,
're_lkaddr_i': re_xcas_i,
},
'_cx_exclusive_irqsave' :
{ 'flmatch' : '',
'flags' : F_LOCK|F_EAX,
'sync_id' : L_CXE,
're_lkaddr_i': re_xcas_i,
},
'_cx_unlock_wait' :
{ 'flmatch' : '',
'flags' : F_EAX,
'sync_id' : L_CXA,
're_lkaddr_i': re_xcas_i,
},
'_cx_atomic_trylock' :
{ 'flmatch' : '',
'flags' : F_TRYLOCK|F_EAX,
'sync_id' : L_CXA,
're_lkaddr_i': re_xcas_i,
},
'_cx_atomic_trylock_bh' :
{ 'flmatch' : '',
'flags' : F_TRYLOCK|F_EAX,
'sync_id' : L_CXA,
're_lkaddr_i': re_xcas_i,
},
'_cx_exclusive_trylock' :
{ 'flmatch' : '',
'flags' : F_TRYLOCK|F_EAX,
'sync_id' : L_CXE,
're_lkaddr_i': re_xcas_i,
},
'_cx_exclusive_trylock_bh' :
{ 'flmatch' : '',
'flags' : F_TRYLOCK|F_EAX,
'sync_id' : L_CXE,
're_lkaddr_i': re_xcas_i,
},
# we only track cx_exclusive unlocks
'_cx_end' :
{
'flmatch' : '',
'flags' : F_UNLOCK,
'sync_id' : L_CXE,
're_lkaddr_i': re_raw_spin_unlock_i,
},
'_cx_end_bh' :
{
'flmatch' : '',
'flags' : F_UNLOCK,
'sync_id' : L_CXE,
're_lkaddr_i': re_raw_spin_unlock_i,
},
'_cx_end_irq' :
{
'flmatch' : '',
'flags' : F_UNLOCK,
'sync_id' : L_CXE,
're_lkaddr_i': re_raw_spin_unlock_i,
},
'_cx_end_irqrestore' :
{
'flmatch' : '',
'flags' : F_UNLOCK,
'sync_id' : L_CXE,
're_lkaddr_i': re_raw_spin_unlock_i,
},
}
re_down_i = re.compile(r'^(?P<addr>[A-Fa-f0-9]+):\s+(?P<bytes>([0-9a-z][0-9a-z][ \t])+)\s*?\tlock decl (?P<offset>[A-Fa-f0-9x]+)?\(?\%?(?P<reg>[a-z][a-z][a-z])?\)?$')
re_down_read_i = re.compile(r'^(?P<addr>[A-Fa-f0-9]+):\s+(?P<bytes>([0-9a-z][0-9a-z][ \t])+)\s*?\tlock incl (?P<offset>[A-Fa-f0-9x]+)?\(\%(?P<reg>eax)\)$')
re_down_read_trylock_i = re.compile(r'^(?P<addr>[A-Fa-f0-9]+):\s+(?P<bytes>([0-9a-z][0-9a-z][ \t])+)\s*?\tlock cmpxchg.*?(?P<offset>[A-Fa-f0-9x]+)?\(?\%?(?P<reg>[a-z][a-z][a-z])?\)?$')
re_up_i = re.compile(r'^(?P<addr>[A-Fa-f0-9]+):\s+(?P<bytes>([0-9a-z][0-9a-z][ \t])+)\s*?\tlock incl (?P<offset>[A-Fa-f0-9x]+)?\(?\%?(?P<reg>[a-z][a-z][a-z])?\)?$')
re_raw_read_unlock_i = re_up_i
re_down_write_i = re.compile(r'^(?P<addr>[A-Fa-f0-9]+):\s+(?P<bytes>([0-9a-z][0-9a-z][ \t])+)\s*?\tlock xadd.*?(?P<offset>[A-Fa-f0-9x]+)?\(?\%?(?P<reg>[a-z][a-z][a-z])?\)?$')
re_up_write_i = re_up_read_i = re_down_write_i
re_raw_write_unlock_i = re.compile(r'^(?P<addr>[A-Fa-f0-9]+):\s+(?P<bytes>([0-9a-z][0-9a-z][ \t])+)\s*?\tlock addl \$0x1000000,(?P<offset>[A-Fa-f0-9x]+)?\(?\%?(?P<reg>[a-z][a-z][a-z])?\)?$')
re_raw_spin_trylock_i = re.compile(r'^(?P<addr>[A-Fa-f0-9]+):\s+(?P<bytes>([0-9a-z][0-9a-z][ \t])+)\s*\txchg .*?(?P<offset>[A-Fa-f0-9x]+)?\(?\%?(?P<reg>[a-z][a-z][a-z])?\)?$')
re_raw_read_lock_i = re.compile(r'^(?P<addr>[A-Fa-f0-9]+):\s+(?P<bytes>([0-9a-z][0-9a-z][ \t])+)\s*?\tlock subl \$0x1,(?P<offset>[A-Fa-f0-9x]+)?\(?\%?(?P<reg>[a-z][a-z][a-z])?\)?$')
re_raw_write_lock_i = re.compile(r'^(?P<addr>[A-Fa-f0-9]+):\s+(?P<bytes>([0-9a-z][0-9a-z][ \t])+)\s*?\tlock subl \$0x1000000,(?P<offset>[A-Fa-f0-9x]+)?\(?\%?(?P<reg>[a-z][a-z][a-z])?\)?$')
re_raw_spin_lock_i = re.compile(r'^\s*(?P<addr>[A-Fa-f0-9]+):\s+(?P<bytes>([0-9a-z][0-9a-z][ \t])+)\s*?\tlock decb\s+(?P<offset>[A-Fa-f0-9x]+)?\(?\%?(?P<reg>[a-z][a-z][a-z])?\)?$')
re_rwsem_atomic_add_i =re.compile(r'^(?P<addr>[A-Fa-f0-9]+):\s+(?P<bytes>([0-9a-z][0-9a-z][ \t])+)\s*?\tlock add.*?,(?P<offset>[A-Fa-f0-9x]+)?\(?\%?(?P<reg>[a-z][a-z][a-z])?\)?$')
re_nop_i = re.compile(r'^(?P<addr>[A-Fa-f0-9]+):\s+(?P<bytes>([0-9a-z][0-9a-z][ \t])+)\s*\tnop\s+$')
re_xchg_i = re.compile(r'^(?P<addr>[A-Fa-f0-9]+):\s+(?P<bytes>([0-9a-z][0-9a-z][ \t])+)\s*\txchg .*?(?P<offset>[A-Fa-f0-9x]+)?\(?\%?(?P<reg>[a-z][a-z][a-z])?\)?$')
re_movl_i = re.compile(r'^(?P<addr>[A-Fa-f0-9]+):\s+(?P<bytes>([0-9a-z][0-9a-z][ \t])+)\s*?\tmovl\s+\$0x1,(?P<offset>[A-Fa-f0-9x]+)?\(?\%?(?P<reg>[a-z][a-z][a-z])?\)?$')
# INLINED functions
re_i_s = [
{'i' : re_down_i,
'func_name' : 'down',
'flmatch' : 'include/asm/semaphore.h:100',
'flags' : F_INLINED|F_LOCK,
'sync_id' : L_SEMA,
},
{'i' : re_down_i,
'func_name' : 'down_interruptible',
'flmatch' : 'include/asm/semaphore.h:124',
'flags' : F_INLINED|F_LOCK,
'sync_id' : L_SEMA,
},
# This is the inlined public interface. It calls
# __down_failed_trylock which calls __down_trylock in
# lib/semaphore-sleepers.c:158
{'i' : re_down_i,
'func_name' : 'down_trylock',
'flmatch' : 'include/asm/semaphore.h:149',
'flags' : F_INLINED|F_TRYLOCK,
'sync_id' : L_SEMA,
},
{'i' : re_up_i,
'func_name' : 'up',
'flmatch' : 'include/asm/semaphore.h:174',
'flags' : F_INLINED|F_UNLOCK,
'sync_id' : L_SEMA,
},
{'i' : re_down_read_i,
'func_name' : '__down_read',
'flmatch' : 'include/asm/rwsem.h:101',
'flags' : F_INLINED|F_LOCK,
'sync_id' : L_RSEMA,
},
{'i' : re_down_read_trylock_i,
'func_name' : '__down_read_trylock',
'flmatch' : 'include/asm/rwsem.h:127',
'flags' : F_INLINED|F_TRYLOCK,
'sync_id' : L_RSEMA,
},
{'i' : re_down_write_i,
'func_name' : '__down_write',
'flmatch' : 'include/asm/rwsem.h:152',
'flags' : F_INLINED|F_LOCK,
'sync_id' : L_WSEMA,
},
{'i' : re_up_read_i,
'func_name' : '__up_read',
'flmatch' : 'include/asm/rwsem.h:190',
'flags' : F_INLINED|F_UNLOCK,
'sync_id' : L_RSEMA,
},
{'i' : re_up_write_i,
'func_name' : '__up_write',
'flmatch' : 'include/asm/rwsem.h:215',
'flags' : F_INLINED|F_UNLOCK,
'sync_id' : L_WSEMA,
},
# Inlined into __reacquire_kernel_lock & _spin_trylock_bh & _spin_trylock
{'i' : re_raw_spin_trylock_i,
'func_name' : '__raw_spin_trylock',
'flmatch' : 'include/asm/spinlock.h:69',
'flags' : F_INLINED|F_TRYLOCK,
'sync_id' : L_SPIN,
},
# Both are inlined into many functions including
# _spin_unlock{,_bh,_irqrestore,...}
{'i' : re_raw_spin_unlock_i,
'func_name' : '__raw_spin_unlock',
'flmatch' : 'include/asm/spinlock.h:92',
'flags' : F_INLINED|F_UNLOCK,
'sync_id' : L_SPIN,
},
{'i' : re_raw_spin_lock_i,
'func_name' : '__raw_spin_lock',
'flmatch' : 'include/asm/spinlock.h:54',
'flags' : F_INLINED|F_LOCK,
'sync_id' : L_SPIN,
},
# Covers several, including generic__raw_read_trylock
# 2.6.16-unmod
{'i' : re_raw_read_lock_i,
'func_name' : '__raw_read_lock',
'flmatch' : 'include/asm/spinlock.h:153',
'flags' : F_INLINED|F_LOCK|F_TIME_RET,
'sync_id' : L_RSPIN,
},
# 2.6.16 - tx
{'i' : re_raw_read_lock_i,
'func_name' : '__raw_read_lock',
'flmatch' : 'include/asm/spinlock.h:167',
'flags' : F_INLINED|F_LOCK|F_TIME_RET,
'sync_id' : L_RSPIN,
},
# All flavors of _write_lock share a retry loop
# 2.6.16-unmod
{'i' : re_raw_write_lock_i,
'func_name' : '__raw_write_lock',
'flmatch' : 'include/asm/spinlock.h:158',
'flags' : F_INLINED|F_LOCK|F_TIME_RET,
'sync_id' : L_WSPIN,
},
# 2.6.16 - tx
{'i' : re_raw_write_lock_i,
'func_name' : '__raw_write_lock',
'flmatch' : 'include/asm/spinlock.h:172',
'flags' : F_INLINED|F_LOCK|F_TIME_RET,
'sync_id' : L_WSPIN,
},
# write_lock slowpath
{'i' : re_raw_write_lock_i,
'func_name' : '__write_lock_failed',
'flmatch' : 'only match func_name',
'flags' : F_LOCK|F_INLINED,
'sync_id' : L_WSPIN,
},
# read_lock slowpath
{'i' : re_down_i,
'func_name' : '__read_lock_failed',
'flmatch' : 'only match func_name',
'flags' : F_LOCK|F_INLINED,
'sync_id' : L_RSPIN,
},
# rwsem_atomic_add used to grant read locks en-masse in rwsem.c
{'i' : re_rwsem_atomic_add_i,
'func_name' : 'rwsem_atomic_add',
'flmatch' : 'include/asm/rwsem.h:266',
'flags' : F_LOCK|F_INLINED,
'sync_id' : L_RSEMA,
},
{'i' : re_down_write_i,
'func_name' : 'rwsem_atomic_update',
'flmatch' : 'include/asm/rwsem.h:279',
'flags' : F_INLINED, # Could be lock or unlock
'sync_id' : L_WSEMA,
},
# 2.6.16 - unmod
{'i' : re_raw_read_unlock_i,
'func_name' : '__raw_read_unlock',
'flmatch' : 'include/asm/spinlock.h:182',
'flags' : F_UNLOCK|F_INLINED,
'sync_id' : L_RSPIN,
},
{'i' : re_raw_write_unlock_i,
'func_name' : '__raw_write_unlock',
'flmatch' : 'include/asm/spinlock.h:187',
'flags' : F_UNLOCK|F_INLINED,
'sync_id' : L_WSPIN,
},
# 2.6.16 - tx
{'i' : re_raw_read_unlock_i,
'func_name' : '__raw_read_unlock',
'flmatch' : 'include/asm/spinlock.h:196',
'flags' : F_UNLOCK|F_INLINED,
'sync_id' : L_RSPIN,
},
{'i' : re_raw_write_unlock_i,
'func_name' : '__raw_write_unlock',
'flmatch' : 'include/asm/spinlock.h:201',
'flags' : F_UNLOCK|F_INLINED,
'sync_id' : L_WSPIN,
},
{'i' : re_nop_i,
'func_name' : 'rcu_read_lock',
'flmatch' : 'include/linux/rcupdate.h:168',
'flags' : F_INLINED|F_LOCK|F_NOADDR,
'sync_id' : L_RCU,
},
{'i' : re_xchg_i,
'func_name' : '__mutex_lock_common',
'flmatch' : 'only match func_name',
'flags' : F_INLINED|F_LOCK,
'sync_id' : L_MUTEX,
},
{'i' : re_movl_i,
'func_name' : '__mutex_unlock_slowpath',
'flmatch' : 'kernel/mutex.c:238',
'flags' : F_INLINED|F_LOCK,
'sync_id' : L_MUTEX,
},
##################### 2.4 Kernel spinlocks and rwlocks
{'i' : re_raw_spin_trylock_i,
'func_name' : 'spin_trylock',
'flmatch' : 'include/asm/spinlock.h:224',
'flags' : F_INLINED|F_TRYLOCK,
'sync_id' : L_SPIN,
},
{'i' : re_raw_spin_unlock_i,
'func_name' : 'spin_unlock',
'flmatch' : 'include/asm/spinlock.h:193',
'flags' : F_INLINED|F_UNLOCK,
'sync_id' : L_SPIN,
},
{'i' : re_raw_spin_lock_i,
'func_name' : 'spin_lock',
'flmatch' : 'include/asm/spinlock.h:241',
'flags' : F_INLINED|F_LOCK,
'sync_id' : L_SPIN,
},
{'i' : re_raw_spin_trylock_i,
'func_name' : 'raw_spin_trylock',
'flmatch' : 'include/asm/spinlock.h:127',
'flags' : F_INLINED|F_TRYLOCK,
'sync_id' : L_SPIN,
},
{'i' : re_raw_spin_unlock_i,
'func_name' : 'raw_spin_unlock',
'flmatch' : 'include/asm/spinlock.h:96',
'flags' : F_INLINED|F_UNLOCK,
'sync_id' : L_SPIN,
},
{'i' : re_raw_spin_lock_i,
'func_name' : 'raw_spin_lock',
'flmatch' : 'include/asm/spinlock.h:144',
'flags' : F_INLINED|F_LOCK,
'sync_id' : L_SPIN,
},
{'i' : re_raw_read_lock_i,
'func_name' : 'read_lock',
'flmatch' : 'include/asm/spinlock.h:294',
'flags' : F_INLINED|F_LOCK|F_TIME_RET,
'sync_id' : L_RSPIN,
},
{'i' : re_raw_write_lock_i,
'func_name' : 'write_lock',
'flmatch' : 'include/asm/spinlock.h:303',
'flags' : F_INLINED|F_LOCK|F_TIME_RET,
'sync_id' : L_WSPIN,
},
{'i' : re_raw_write_lock_i,
'func_name' : '__write_lock_failed',
'flmatch' : 'only match func_name',
'flags' : F_LOCK|F_INLINED,
'sync_id' : L_WSPIN,
},
{'i' : re_down_i,
'func_name' : '__read_lock_failed',
'flmatch' : 'only match func_name',
'flags' : F_LOCK|F_INLINED,
'sync_id' : L_RSPIN,
},
{'i' : re_raw_read_unlock_i,
'func_name' : 'read_unlock',
'flmatch' : 'include/asm/spinlock.h:320',
'flags' : F_UNLOCK|F_INLINED,
'sync_id' : L_RSPIN,
},
{'i' : re_raw_write_unlock_i,
'func_name' : 'write_unlock',
'flmatch' : 'include/asm/spinlock.h:325',
'flags' : F_UNLOCK|F_INLINED,
'sync_id' : L_WSPIN,
},
################ End 2.4 locks
]
##################################################################
## Sync_char_pre works in two passes. The first collects information
## from objdump on vmlinux into an array. The second phase resolves
## dependencies amoung array entries, and prints the array to the
## output file
## Function record base class, and subclasses
class func_rec :
def __init__(self, func_pc, func_name, lock_pc) :
self.func_pc = func_pc
self.func_name = func_name
self.lock_pc = lock_pc
# Vestige of earlier error where we had inter-instruction dependencies.
def resolve(self) : pass
def pr(self, ostr) :
pass
class fr_lock(func_rec) :
def __init__(self, func_pc, func_name, lock_pc, lock_off, lock_reg,
flags, sync_id) :
func_rec.__init__(self, func_pc, func_name, lock_pc)
self.lock_off = lock_off
self.lock_reg = lock_reg
self.flags = flags
self.sync_id = sync_id
def resolve(self) : pass
def pr(self, ostr) :
print >>ostr, '0x%x %10s %s %d %#4x 0x%8s %s' % \
(int(self.lock_pc, 16), self.lock_off, self.lock_reg, self.sync_id,
self.flags, self.func_pc, self.func_name)
## This class parses the output of objdump using a state machine.
class SCAN_OBJ:
'''Scan output of objdump using a state machine to look for important kernel features'''
ni_funcs = noninlined_funcs
def _init_fsm(self) :
'''Initialize the finite state machine that scans objdump --disassemble --line-number output.'''
self.fsm = fsm_mod.FSM()
self.fsm.start('scan')
# Start state: scan
# 'in_func' entered at a function definition where we find
# a non-inlined function in s_func dict. While in
# 'in_func' we do not look for inlined functions, but we do
# look for lock/unlock instructions to find how their address
# is generated. Return to scan on finding the next func_def.
# 'inlined' entered when we find an inlined function in s_func
# We look for lock/unlock instructions, and return to 'scan'
# when we find one.
# If we are not interested, transition to 'scan' by raising RestartError
self.fsm.add('scan', self.re_func_def, 'in_func', self.in_func)
# Record inlined function name
self.fsm.add('scan', self.re_func, 'inlined',self.set_func_name)
# Record filename & line number info
self.fsm.add('scan', self.re_fl, 'inlined',self.set_fl)
# Default scan action, keep scanning
self.fsm.add('scan', None, 'scan', None)
self.fsm.add('inlined', self.re_fl, 'inlined',self.set_fl)
# Allow inlined func name to change while looking for inlined func
self.fsm.add('inlined', self.re_func, 'inlined',self.set_func_name)
# First check for any inst in case we are in nexti and inlined
# found_nexti also searches for any important instruction
self.fsm.add('inlined', self.re_anyinst_i, 'inlined',self.found_nexti)
# If we don't find lock addr instruction, go back to scanning
self.fsm.add('inlined', self.re_func_def, 'in_func', self.in_func)
self.fsm.add('inlined', None, 'inlined', None)
# Our fsm implementation does not support dyanmically changing the regexps
# that form the transition arcs, so each time we change the regex, install
# all transition rules for that state.
def _fsm_in_func(self, re_lockaddr_i, unrolled_loop) :
self.fsm.del_state('in_func')
# Store this here in case we need to print it later
self.re_lockaddr_i = re_lockaddr_i
# If we are within a function and find a function definition,
# see if we are interested, otherwise switch back to 'scan'
self.fsm.add('in_func', self.re_func_def, 'in_func', self.in_func)
# Search for instruction that has lock address
# NB: We would pick up an inlined function after
# the lock instruction for this non-inlined function, but we
# don't pick up another lockaddr_i.
# In the case of loop unrolling that involves the locked
# instruction (e.g. __down), we can't quit on the first lock
# instruction. Instead, we use the empty line objdump inserts
# between functions.
if unrolled_loop :
self.fsm.add('in_func', self.re_lockaddr_i,'in_func',self.found_lock_addr)
self.fsm.add('in_func', self.re_whitespace,'scan',None)
else :
self.fsm.add('in_func', self.re_lockaddr_i,'scan',self.found_lock_addr)
self.fsm.add('in_func', None, 'in_func',None)
def __init__(self) :
'''Initialize SCAN_OBJ object.'''
self.func_name = ''
# Where the last function (not inlined) started
self.func_pc = '0'
# Current inline filename & line number
self.file_line = 'bogus file and line'
self.lock_ra = '0' # nexti is associated with this lock RA
self.insts = [] # List of instrumentation points
self.re_func_def = re.compile(r'^(?P<addr>[A-Fa-f0-9]+) <(?P<func_name>[A-Za-z0-9_$#@./]+)>:')
self.re_func = re.compile(r'^(?P<func_name>[A-Za-z0-9_$#@./]+)\(\):')
self.re_fl = re.compile(r'^(?P<file_line>[A-Za-z0-9_$#@./-]+:[0-9]+)')
self.re_whitespace = re.compile(r'^\s*$')
# Instruction must have opcode. A line might be a
# continuation of previous line, e.g.,
# c016baa9: f0 81 05 6c 9b 2e c0 lock addl $0x1000000,0xc02e9b6c
# c016bab0: 00 00 00 01
self.re_anyinst_i = re.compile(r'''
^\s*(?P<addr>[A-Fa-f0-9]+):\s+ # The instruction address, can be prefixed with whitespace in a smaller app
(?P<bytes>([0-9a-z][0-9a-z][ \t])+)\s*?\t[a-z]+.*?$
''', re.VERBOSE)
# Detect direct and indirect calls, e.g., call *0x38(%edx)
# call c02a01da <mutex_unlock>, call *%eax
# self.re_call_i = re.compile(r'^(?P<addr>[A-Fa-f0-9]+):\s+.*?\tcall\s+(\*[a-zA-F0-9x%]+|[a-fA-F0-9x]+ )')
# This gets set dynamically to 're_lkaddr_i' of function we are looking at
self._init_fsm()
# Remember the function name
def set_func_name(self, state, input, m) :
# Special case for the xchg in the mutex implementation.
# Since we don't actually care about the inlined __xchg()
# function, we just don't set its name as such.
if m.group('func_name') != '__xchg' :
self.func_name = m.group('func_name')
def record_lock_info(self, m, func_name, flags, sync_id) :
assert func_name != '', func_name
assert m.group('addr') != '0', (func_name, m.group(0))
addr_off = '0x0'
addr_reg = 'nil'
try :
addr_off = m.group('offset')
if addr_off == None: addr_off = '0x0'
except IndexError : pass
try:
addr_reg = m.group('reg')
if addr_reg == None: addr_reg = 'nil'
except IndexError : pass
# Override match for F_EAX flag
if flags & F_EAX :
addr_reg = 'eax'
if (flags & F_NOADDR) == 0 :
# No atheists in foxholes and no locks at address 0
assert not (addr_off == '0x0' and addr_reg == 'nil'), func_name
# Append the record
self.insts.append(fr_lock(self.func_pc, func_name, m.group('addr'),
addr_off, addr_reg, flags, sync_id))
del addr_off, addr_reg
def real_found_lock_addr(self, state, input, m, func_name, lock_ra, flags,
sync_id) :
self.record_lock_info(m, func_name, flags, sync_id)
self.lock_ra = lock_ra
# F_EAX means mark this address, the first address of the function
if (flags & F_EAX) == 0 :
# Insert record with bogus "zero" address. We will look
# for this flag when we fine the next instruction. If
# something gets messed up, the zero will show up in the output as an
# address (e.g., "0x zero") indicating something is seriously wrong.
self.insts.append(func_rec(self.lock_ra, func_name, "zero"))
def found_lock_addr(self, state, input, m) :
self.real_found_lock_addr(state, input, m, self.func_name,
m.group('addr'),
self.ni_funcs[self.func_name]['flags'],
self.ni_funcs[self.func_name]['sync_id'])
def found_nexti(self, state, input, m) :
try:
if self.insts[len(self.insts) - 1].lock_pc == "zero" :
self.insts[len(self.insts) - 1].lock_pc = m.group('addr')
except IndexError:
# Transient when self.insts is empty
pass
# Do work of text parsing state machine manually, doing this transition
lock_ra = m.group('addr')
for re_i in re_i_s :
m = re_i['i'].match(input)
if m :
if self.func_name == re_i['func_name'] \
or self.file_line.find(re_i['flmatch']) != -1 :
if opt_verbose :
print self.func_name, self.file_line
print input,
self.real_found_lock_addr(state, input, m, re_i['func_name'],
lock_ra, re_i['flags'],
re_i['sync_id'])
def in_func(self, state, input, m) :
self.func_name = m.group('func_name')
self.func_pc = m.group('addr')
try :
fr = self.ni_funcs[self.func_name]
# Ignore all inlined calls until next function
if opt_verbose :
print 'IN_FUNC %s %s %s' % (self.func_name,
self.func_pc,
fr['flmatch'])
# Look for this instruction in this function
self._fsm_in_func(self.ni_funcs[self.func_name]['re_lkaddr_i'], fr['flags'] & F_LOOP_UNROLL)
except KeyError :
self.func_name = '' # 'scan' continues,
# but we are not interested in this function
raise fsm_mod.RestartException('scan')
def set_fl(self, state, input, m) :
'''Call this when we see source line information in the input'''
self.file_line = m.group('file_line')
def inlined_err(self, state, input, m) :
if self.re_lockaddr_i == None :
print 'Inlined func %s\n\tFound:%s'%(
self.func_name, m.group(0))
else :
print 'Inlined func %s, but no instr matching\n\t%s\n\tFound:%s'%(
self.func_name, self.re_lockaddr_i.pattern, m.group(0))
def nexti_err(self, state, input, m) :
print 'Nexti error %s\n%s' %(self.func_name, m.group(0))
def scan(self, istr, ostr) :
'''Scan the input stream for functions and their return instructions.'''
for line in istr :
try:
self.fsm.execute(line)
except fsm_mod.FSMError:
print 'Invalid input: %s' % line,
##fsm.execute(fsm_mod.FSMEOF) ## No cleanup action needed
## Now actualy resolve and print records
for inst in self.insts :
inst.resolve()
inst.pr(ostr)
def get_static_locks(ostr, nm, vmlinux) :
nm_sym = {}
sync_common.GET_NM(nm_sym, os.popen('%s %s' % (nm, vmlinux)), True)
spinlock_names = ['kernel_flag', 'pool_lock', 'vfsmount_lock',
'dcache_lock', 'logbuf_lock', 'i8259A_lock',
'files_lock', 'mmlist_lock', 'unix_table_lock',
'inet_peer_unused_lock', 'pci_bus_lock', 'kmap_lock',
'inode_lock', 'i8253_lock', 'rtc_lock',
'tty_ldisc_lock', 'vga_lock', 'ide_lock',
'call_lock', 'sb_lock', 'unnamed_dev_lock',
# This is actually a timer_base_t object that is
# used for non-per-cpu timers. The first field
# is a spinlock, so we can use its address to
# get these static locks.
'__init_timer_base',
'proc_inum_lock', 'set_atomicity_lock',
'ioapic_lock', 'workqueue_lock', 'net_family_lock',
'pci_config_lock', 'sequence_lock', 'pci_lock',
'uidhash_lock', 'pdflush_lock', 'bdev_lock',
'swap_lock', 'mb_cache_spinlock',
'cache_list_lock', 'sysctl_lock', 'elv_list_lock',
'cpa_lock', 'pgd_lock', 'serio_event_lock',
'i8042_lock', 'lweventlist_lock', 'net_todo_list_lock',
'cdrom_lock', 'inet_proto_lock', 'inetsw_lock',
'ptype_lock', 'tcp_cong_list_lock', 'inet_diag_register_lock',
'cdev_lock', 'tlbstate_lock', 'redirect_lock',
'rt_flush_lock', 'task_capability_lock',
'rtc_task_lock', 'acpi_device_lock', 'acpi_prt_lock',
# STAMP
'globalLock',
#From here.. 2.4
'kernel_flag_cacheline',
'log_wait', # It is wait_queue_head_t whose first field is lock
'timerlist_lock',
'global_bh_lock',
'contig_page_data',
'lru_list_lock_cacheline',
'proc_alloc_map_lock',
'runqueue_lock',
'tasklist_lock',
'lastpid_lock',
'nl_table_wait',
'context_task_wq',
'kswapd_wait',
'swaplock',
'emergency_lock',
'bdflush_wait',
'kupdate_wait',
'nls_lock',
'kbd_controller_lock',
'ime_lock',
'io_request_lock',
'pagecache_lock_cacheline',
'pagemap_lru_lock_cacheline',
'unused_list_lock',
'tqueue_lock',
'page_uptodate_lock.0',
'buffer_wait',
'random_write_wait',
'random_read_wait',
'context_task_done',
'journal_datalist_lock',
'arbitration_lock',
'tty_ldisc_wait',
'kmap_lock_cacheline',
'modlist_lock',
'shmem_ilock',
'semaphore_lock',
'jh_splice_lock',
'hash_wait',
'ip_lock',
]
for name in spinlock_names :
if nm_sym.has_key(name):
print >>ostr, '0x%x 4 1 %s' % (nm_sym[name], name)
#############################################################
# Main program starts here
def usage() :
print sys.argv[0] + \
''': [-v verbose output (false)]
[-h this help message]'''
###################################################################
## Main program starts here
## Get options
try :
opts, args = getopt.getopt(sys.argv[1:],'hvx:')
except getopt.GetoptError :
usage()
sys.exit(2)
opt_verbose = False
vmlinux = 'vmlinux'
for o, a in opts :
if o == '-v' :
opt_verbose = True
if o == '-h' :
usage()
sys.exit(2)
if o == '-x' :
vmlinux = a
# Please change these to the local location in your file system.
outf = 'sync_char.map'
vstr = ''
# Figure out if we are dealing with linux or not
vmlinux_re = re.compile('''vmlinux''')
m = vmlinux_re.match(vmlinux)
if m :
version_re = re.compile('''.*vmlinux-(?P<version>.*)''')
m = version_re.match(vmlinux)
if m :
vstr = m.group('version')
else :
version_file = open('.kernelrelease', 'r')
vstr = version_file.readline()
version_file.close()
vstr = vstr.rstrip()
init1 = '../sws/%s.%s' % (outf, vstr)
else :
init1 = '%s.%s' % (outf, vmlinux)
if not os.access(init1, os.W_OK) and os.access(init1, os.F_OK) :
raise AssertionError, 'Cannot write %s\nPlease change this path in %s'%\
(init1, sys.argv[0])
#############################################################
## Open files
ostr = open(outf, 'w')
estr = sys.stderr
scan_obj = SCAN_OBJ()
scan_obj.scan(os.popen('%s %s' % (objdump, vmlinux)), ostr)
# Now print certain static lock addresses if they are present
get_static_locks(ostr, nm, vmlinux)
ostr.close() # Flush those lines
shutil.copy(outf, init1)
##################################################################
## Notes on the syntax of objdump
# Most of the time objdump prints something like this
#down():
#include/asm/semaphore.h:100
#c0259a2b: f0 ff 4e 54 lock decl 0x54(%esi)
#c0259a2f: 0f 88 cd 01 00 00 js c0259c02 <.text.lock.tty_ioctl+0x1a>
# But somtimes it looks like this (file:line is optional),
# this is in tty_ioctl.c:100
#down():
#c025952e: f0 ff 4d 54 lock decl 0x54(%ebp)
#c0259532: 0f 88 b0 06 00 00 js c0259be8 <.text.lock.tty_ioctl>
# For some functions, objdump prints multiple function identifiers
# with different line numbers. If the flmatch is '', take
# anything, otherwise match that reg exp on the line after the
# function name appears. Use flmatch for INLINED FUNCTIONS ONLY.
# If the function is not inlined flmatch should either be '', or a
# flag value like 'ignore_call'
# Sometimes the file_line appears by itself, and it is wrong. This is not an "up"
#include/asm/semaphore.h:174
#c0106f4c: 31 d2 xor %edx,%edx
#arch/i386/kernel/ldt.c:107
# Sometimes the func_name is wrong. This is not an "up". I think
# the function.
# this happens when the up/down is the last call before the end of
#up():
#fs/lockd/svclock.c:375
#c01e3a5e: b8 00 00 00 02 mov $0x2000000,%eax
#nlmsvc_lock():
#fs/lockd/svclock.c:376
# Objdump does not mark "up" consistently at its start
# instruction, which is "lock incl ". Sometimes there are
# instructions before the "lock incl " instruction.
# This is a problem. Is it really a read_unlock? There is no locked increment
# __raw_read_unlock():
#include/asm/spinlock.h:181
#c0116242: c7 44 24 08 00 00 00 movl $0x0,0x8(%esp)
#c0116249: 00
#current_thread_info():
#include/asm/thread_info.h:91
#c011624a: be 00 e0 ff ff mov $0xffffe000,%esi
# Ugh, sometimes the function name does not appear.
#c015bba3: 75 f6 jne c015bb9b <register_binfmt+0x32>
#include/asm/spinlock.h:186
#c015bba5: f0 81 05 c4 95 2e c0 lock addl $0x1000000,0xc02e95c4
#c015bbac: 00 00 00 01
#c015bbb0: b8 f0 ff ff ff mov $0xfffffff0,%eax
#fs/exec.c:90
# Hell and tarnation. I have to change the down detection algorithm
# to deal with this.
#down():
#include/asm/semaphore.h:100
#c02138ca: f0 ff 8a d4 00 00 00 lock decl 0xd4(%edx)
#c02138d1: 0f 88 33 01 00 00 js c0213a0a <.text.lock.dd+0x1c>
#drivers/base/dd.c:167
#c02138d7: f0 ff 8b d4 00 00 00 lock decl 0xd4(%ebx)
#c02138de: 0f 88 36 01 00 00 js c0213a1a <.text.lock.dd+0x2c>
# Watch out for the back to back inlined functions. 'inlined' overlaps 'nexti'
#__raw_spin_unlock():
#include/asm/spinlock.h:91
#c011752b: c6 80 04 05 00 00 01 movb $0x1,0x504(%eax)
#__raw_write_unlock():
#include/asm/spinlock.h:186
#c0117532: f0 81 05 80 18 32 c0 lock addl $0x1000000,0xc0321880
#c0117539: 00 00 00 01
# There can be multiple inlined funcs without the inlined name
#d_validate():
#include/asm/spinlock.h:91
#c01697a8: c6 05 00 92 32 c0 01 movb $0x1,0xc0329200
#c01697af: b8 01 00 00 00 mov $0x1,%eax
#fs/dcache.c:1283
#c01697b4: 5b pop %ebx
#c01697b5: 5e pop %esi
#c01697b6: c3 ret
#include/asm/spinlock.h:91
#c01697b7: c6 05 00 92 32 c0 01 movb $0x1,0xc0329200
#c01697be: 31 c0 xor %eax,%eax
# Sometimes there are multiple file name/lineno records, but the second one is bogus
# fs/ext3/acl.c:149
# c019864d: 89 3b mov %edi,(%ebx)
# include/asm/spinlock.h:91
# c019864f: c6 46 70 01 movb $0x1,0x70(%esi)
# c0198653: 31 db xor %ebx,%ebx
# c0198655: e9 d5 fe ff ff jmp c019852f <ext3_set_acl+0x34>
# fs/ext3/acl.c:254
# c019865a: 3d 00 40 00 00 cmp $0x4000,%eax
# c019865f: 0f 84 d6 00 00 00 je c019873b <ext3_set_acl+0x240>
# fs/ext3/acl.c:255
# c0198665: bb f3 ff ff ff mov $0xfffffff3,%ebx
# c019866a: 85 ff test %edi,%edi
# c019866c: 0f 85 bd fe ff ff jne c019852f <ext3_set_acl+0x34>
# include/asm/spinlock.h:91
# c0198672: 31 db xor %ebx,%ebx
# c0198674: e9 b6 fe ff ff jmp c019852f <ext3_set_acl+0x34>
# ext3_acl_to_disk():
# Can't be too lazy about allowing func_name & file_line persist
# The second movb is not a spin_unlock
#__raw_spin_unlock():
#include/asm/spinlock.h:91
#c014ca2f: c6 47 18 01 movb $0x1,0x18(%edi)
#mm/shmem.c:370
#c014ca33: 8b 54 24 10 mov 0x10(%esp),%edx
#c014ca37: c6 02 01 movb $0x1,(%edx)
| |
from __future__ import absolute_import
from itertools import izip_longest
import Queue
import MySQLdb as mysql
from MySQLdb.cursors import DictCursor
from dejavu.database import Database
class SQLDatabase(Database):
"""
Queries:
1) Find duplicates (shouldn't be any, though):
select `hash`, `song_id`, `offset`, count(*) cnt
from fingerprints
group by `hash`, `song_id`, `offset`
having cnt > 1
order by cnt asc;
2) Get number of hashes by song:
select song_id, song_name, count(song_id) as num
from fingerprints
natural join songs
group by song_id
order by count(song_id) desc;
3) get hashes with highest number of collisions
select
hash,
count(distinct song_id) as n
from fingerprints
group by `hash`
order by n DESC;
=> 26 different songs with same fingerprint (392 times):
select songs.song_name, fingerprints.offset
from fingerprints natural join songs
where fingerprints.hash = "08d3c833b71c60a7b620322ac0c0aba7bf5a3e73";
"""
type = "mysql"
# tables
FINGERPRINTS_TABLENAME = "fingerprints"
SONGS_TABLENAME = "songs"
# fields
FIELD_HASH = "hash"
FIELD_SONG_ID = "song_id"
FIELD_OFFSET = "offset"
FIELD_SONGNAME = "song_name"
FIELD_FINGERPRINTED = "fingerprinted"
# creates
CREATE_FINGERPRINTS_TABLE = """
CREATE TABLE IF NOT EXISTS `%s` (
`%s` binary(10) not null,
`%s` mediumint unsigned not null,
`%s` int unsigned not null,
INDEX (%s),
UNIQUE KEY `unique_constraint` (%s, %s, %s),
FOREIGN KEY (%s) REFERENCES %s(%s) ON DELETE CASCADE
) ENGINE=INNODB;""" % (
FINGERPRINTS_TABLENAME, FIELD_HASH,
FIELD_SONG_ID, FIELD_OFFSET, FIELD_HASH,
FIELD_SONG_ID, FIELD_OFFSET, FIELD_HASH,
FIELD_SONG_ID, SONGS_TABLENAME, FIELD_SONG_ID
)
CREATE_SONGS_TABLE = """
CREATE TABLE IF NOT EXISTS `%s` (
`%s` mediumint unsigned not null auto_increment,
`%s` varchar(250) not null,
`%s` tinyint default 0,
PRIMARY KEY (`%s`),
UNIQUE KEY `%s` (`%s`)
) ENGINE=INNODB;""" % (
SONGS_TABLENAME, FIELD_SONG_ID, FIELD_SONGNAME, FIELD_FINGERPRINTED,
FIELD_SONG_ID, FIELD_SONG_ID, FIELD_SONG_ID,
)
# inserts (ignores duplicates)
INSERT_FINGERPRINT = """
INSERT IGNORE INTO %s (%s, %s, %s) values
(UNHEX(%%s), %%s, %%s);
""" % (FINGERPRINTS_TABLENAME, FIELD_HASH, FIELD_SONG_ID, FIELD_OFFSET)
INSERT_SONG = "INSERT INTO %s (%s) values (%%s);" % (
SONGS_TABLENAME, FIELD_SONGNAME)
# selects
SELECT = """
SELECT %s, %s FROM %s WHERE %s = UNHEX(%%s);
""" % (FIELD_SONG_ID, FIELD_OFFSET, FINGERPRINTS_TABLENAME, FIELD_HASH)
SELECT_MULTIPLE = """
SELECT HEX(%s), %s, %s FROM %s WHERE %s IN (%%s);
""" % (FIELD_HASH, FIELD_SONG_ID, FIELD_OFFSET,
FINGERPRINTS_TABLENAME, FIELD_HASH)
SELECT_ALL = """
SELECT %s, %s FROM %s;
""" % (FIELD_SONG_ID, FIELD_OFFSET, FINGERPRINTS_TABLENAME)
SELECT_SONG = """
SELECT %s FROM %s WHERE %s = %%s
""" % (FIELD_SONGNAME, SONGS_TABLENAME, FIELD_SONG_ID)
SELECT_NUM_FINGERPRINTS = """
SELECT COUNT(*) as n FROM %s
""" % (FINGERPRINTS_TABLENAME)
SELECT_UNIQUE_SONG_IDS = """
SELECT COUNT(DISTINCT %s) as n FROM %s WHERE %s = 1;
""" % (FIELD_SONG_ID, SONGS_TABLENAME, FIELD_FINGERPRINTED)
SELECT_SONGS = """
SELECT %s, %s FROM %s WHERE %s = 1;
""" % (FIELD_SONG_ID, FIELD_SONGNAME, SONGS_TABLENAME, FIELD_FINGERPRINTED)
# drops
DROP_FINGERPRINTS = "DROP TABLE IF EXISTS %s;" % FINGERPRINTS_TABLENAME
DROP_SONGS = "DROP TABLE IF EXISTS %s;" % SONGS_TABLENAME
# update
UPDATE_SONG_FINGERPRINTED = """
UPDATE %s SET %s = 1 WHERE %s = %%s
""" % (SONGS_TABLENAME, FIELD_FINGERPRINTED, FIELD_SONG_ID)
# delete
DELETE_UNFINGERPRINTED = """
DELETE FROM %s WHERE %s = 0;
""" % (SONGS_TABLENAME, FIELD_FINGERPRINTED)
def __init__(self, **options):
super(SQLDatabase, self).__init__()
self.cursor = cursor_factory(**options)
self._options = options
def after_fork(self):
# Clear the cursor cache, we don't want any stale connections from
# the previous process.
Cursor.clear_cache()
def setup(self):
"""
Creates any non-existing tables required for dejavu to function.
This also removes all songs that have been added but have no
fingerprints associated with them.
"""
with self.cursor() as cur:
cur.execute(self.CREATE_SONGS_TABLE)
cur.execute(self.CREATE_FINGERPRINTS_TABLE)
cur.execute(self.DELETE_UNFINGERPRINTED)
def empty(self):
"""
Drops tables created by dejavu and then creates them again
by calling `SQLDatabase.setup`.
.. warning:
This will result in a loss of data
"""
with self.cursor() as cur:
cur.execute(self.DROP_FINGERPRINTS)
cur.execute(self.DROP_SONGS)
self.setup()
def delete_unfingerprinted_songs(self):
"""
Removes all songs that have no fingerprints associated with them.
"""
with self.cursor() as cur:
cur.execute(self.DELETE_UNFINGERPRINTED)
def get_num_songs(self):
"""
Returns number of songs the database has fingerprinted.
"""
with self.cursor() as cur:
cur.execute(self.SELECT_UNIQUE_SONG_IDS)
for count, in cur:
return count
return 0
def get_num_fingerprints(self):
"""
Returns number of fingerprints the database has fingerprinted.
"""
with self.cursor() as cur:
cur.execute(self.SELECT_NUM_FINGERPRINTS)
for count, in cur:
return count
return 0
def set_song_fingerprinted(self, sid):
"""
Set the fingerprinted flag to TRUE (1) once a song has been completely
fingerprinted in the database.
"""
with self.cursor() as cur:
cur.execute(self.UPDATE_SONG_FINGERPRINTED, (sid,))
def get_songs(self):
"""
Return songs that have the fingerprinted flag set TRUE (1).
"""
with self.cursor(cursor_type=DictCursor) as cur:
cur.execute(self.SELECT_SONGS)
for row in cur:
yield row
def get_song_by_id(self, sid):
"""
Returns song by its ID.
"""
with self.cursor(cursor_type=DictCursor) as cur:
cur.execute(self.SELECT_SONG, (sid,))
return cur.fetchone()
def insert(self, hash, sid, offset):
"""
Insert a (sha1, song_id, offset) row into database.
"""
with self.cursor() as cur:
cur.execute(self.INSERT_FINGERPRINT, (hash, sid, offset))
def insert_song(self, songname):
"""
Inserts song in the database and returns the ID of the inserted record.
"""
with self.cursor() as cur:
cur.execute(self.INSERT_SONG, (songname,))
return cur.lastrowid
def query(self, hash):
"""
Return all tuples associated with hash.
If hash is None, returns all entries in the
database (be careful with that one!).
"""
# select all if no key
query = self.SELECT_ALL if hash is None else self.SELECT
with self.cursor() as cur:
cur.execute(query)
for sid, offset in cur:
yield (sid, offset)
def get_iterable_kv_pairs(self):
"""
Returns all tuples in database.
"""
return self.query(None)
def insert_hashes(self, sid, hashes):
"""
Insert series of hash => song_id, offset
values into the database.
"""
values = []
for hash, offset in hashes:
values.append((hash, sid, offset))
with self.cursor() as cur:
for split_values in grouper(values, 1000):
cur.executemany(self.INSERT_FINGERPRINT, split_values)
def return_matches(self, hashes):
"""
Return the (song_id, offset_diff) tuples associated with
a list of (sha1, sample_offset) values.
"""
# Create a dictionary of hash => offset pairs for later lookups
mapper = {}
for hash, offset in hashes:
mapper[hash.upper()] = offset
# Get an iteratable of all the hashes we need
values = mapper.keys()
with self.cursor() as cur:
for split_values in grouper(values, 1000):
# Create our IN part of the query
query = self.SELECT_MULTIPLE
query = query % ', '.join(['UNHEX(%s)'] * len(split_values))
cur.execute(query, split_values)
for hash, sid, offset in cur:
# (sid, db_offset - song_sampled_offset)
yield (sid, offset - mapper[hash])
def __getstate__(self):
return (self._options,)
def __setstate__(self, state):
self._options, = state
self.cursor = cursor_factory(**self._options)
def grouper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return (filter(None, values) for values
in izip_longest(fillvalue=fillvalue, *args))
def cursor_factory(**factory_options):
def cursor(**options):
options.update(factory_options)
return Cursor(**options)
return cursor
class Cursor(object):
"""
Establishes a connection to the database and returns an open cursor.
```python
# Use as context manager
with Cursor() as cur:
cur.execute(query)
```
"""
_cache = Queue.Queue(maxsize=5)
def __init__(self, cursor_type=mysql.cursors.Cursor, **options):
super(Cursor, self).__init__()
try:
conn = self._cache.get_nowait()
except Queue.Empty:
conn = mysql.connect(**options)
else:
# Ping the connection before using it from the cache.
conn.ping(True)
self.conn = conn
self.conn.autocommit(False)
self.cursor_type = cursor_type
@classmethod
def clear_cache(cls):
cls._cache = Queue.Queue(maxsize=5)
def __enter__(self):
self.cursor = self.conn.cursor(self.cursor_type)
return self.cursor
def __exit__(self, extype, exvalue, traceback):
# if we had a MySQL related error we try to rollback the cursor.
if extype is mysql.MySQLError:
self.cursor.rollback()
self.cursor.close()
self.conn.commit()
# Put it back on the queue
try:
self._cache.put_nowait(self.conn)
except Queue.Full:
self.conn.close()
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function
import mxnet as mx
import mxnet.ndarray as nd
import numpy
import os
import pickle
from collections import OrderedDict
import logging
from utils import *
logger = logging.getLogger(__name__)
class Base(object):
"""Basic wrapper for the symbols
Parameters
----------
data_shapes : dict
The shapes of tensor variables
sym_gen : mx.sym.Symbol
Symbol of the network
params : None or dict, optional
params_grad : None or dict, optional
aux_states:
initializer:
ctx:
name:
"""
def __init__(self, data_shapes, sym_gen, params=None, aux_states=None,
default_bucket_kwargs=None, learn_init_keys=None,
initializer=mx.init.Xavier(factor_type="in", rnd_type="gaussian", magnitude=2),
ctx=mx.gpu(), name='Net'):
self.sym_gen = sym_gen
bucket_kwargs = default_bucket_kwargs.copy() if \
default_bucket_kwargs is not None else dict()
self.curr_bucket_key = None
self.ctx = ctx
self.name = name
self.initializer = initializer
if params is None:
self.params = None
self.params_grad = None
else:
self.params = OrderedDict([(k, v.copyto(ctx)) for k, v in params.items()])
self.params_grad = OrderedDict([(n, nd.empty(v.shape, ctx=ctx))
for n, v in self.params.items()])
if aux_states is not None:
self.aux_states = OrderedDict([(k, v.copyto(ctx)) for k, v in aux_states.items()])
else:
self.aux_states = None
self._buckets = dict()
self.learn_init_keys = learn_init_keys if learn_init_keys is not None else []
self.learn_init_key_shapes = {k: data_shapes[k] for k in self.learn_init_keys}
self.switch_bucket(bucket_kwargs=bucket_kwargs, data_shapes=data_shapes)
self.acc_grad = None
@property
def exe(self):
"""Get the current executor
Returns
-------
exe : mxnet.executor.Executor
"""
return self._buckets[self.curr_bucket_key]['exe'][tuple(self.data_shapes.items())]
@property
def data_shapes(self):
return self._buckets[self.curr_bucket_key]['data_shapes']
@property
def sym(self):
return self._buckets[self.curr_bucket_key]['sym']
def switch_bucket(self, bucket_kwargs=None, data_shapes=None):
if bucket_kwargs is not None:
self.curr_bucket_key = get_bucket_key(bucket_kwargs=bucket_kwargs)
# 1. Check if bucket key exists
if self.curr_bucket_key in self._buckets:
if data_shapes is not None:
if tuple(data_shapes.items()) not in self._buckets[self.curr_bucket_key]['exe']:
#TODO Optimize the reshaping functionality!
self._buckets[self.curr_bucket_key]['exe'][tuple(data_shapes.items())] = \
self.exe.reshape(partial_shaping=True, allow_up_sizing=True, **data_shapes)
self._buckets[self.curr_bucket_key]['data_shapes'] = data_shapes
else:
self._buckets[self.curr_bucket_key]['data_shapes'] = data_shapes
return
# 2. If the bucket key does not exist, create new symbol + executor
assert data_shapes is not None, "Must set data_shapes for new bucket!"
if isinstance(self.sym_gen, mx.symbol.Symbol):
sym = self.sym_gen
else:
sym = self.sym_gen(**dict(self.curr_bucket_key))
arg_names = sym.list_arguments()
aux_names = sym.list_auxiliary_states()
param_names = [n for n in arg_names
if n in self.learn_init_keys or (n not in data_shapes.keys())]
for k, v in data_shapes.items():
assert isinstance(v, tuple), "Data_shapes must be tuple! Find k=%s, v=%s, " \
"data_shapes=%s" % (k, str(v), str(data_shapes))
arg_shapes, _, aux_shapes = sym.infer_shape(**data_shapes)
arg_name_shape = OrderedDict([(k, s) for k, s in zip(arg_names, arg_shapes)])
if self.params is None:
self.params = OrderedDict([(n, nd.empty(arg_name_shape[n], ctx=self.ctx))
for n in param_names])
self.params_grad = OrderedDict([(n, nd.empty(arg_name_shape[n], ctx=self.ctx))
for n in param_names])
if len(self.params) > 0:
assert self.initializer is not None, \
'We must set the initializer if we donnot initialize' \
'manually the free parameters of the network!!'
for k, v in self.params.items():
self.initializer(k, v)
else:
assert set(arg_name_shape.items()) == \
set(data_shapes.items() + [(k, v.shape) for k, v in self.params.items()])
if self.aux_states is None:
self.aux_states = OrderedDict([(k, nd.empty(s, ctx=self.ctx))
for k, s in zip(aux_names, aux_shapes)])
data_inputs = {k: mx.nd.empty(data_shapes[k], ctx=self.ctx)
for k in set(data_shapes.keys()) - set(self.learn_init_keys)}
if len(self._buckets) > 0:
shared_exe = list(list(self._buckets.values())[0]['exe'].values())[0]
else:
shared_exe = None
self._buckets[self.curr_bucket_key] = {
'exe': {tuple(data_shapes.items()):
sym.bind(ctx=self.ctx,
args=dict(self.params, **data_inputs),
args_grad=dict(self.params_grad.items()),
aux_states=self.aux_states,
shared_exec=shared_exe)
},
'data_shapes': data_shapes,
'sym': sym
}
def save_params(self, dir_path="", epoch=None):
param_saving_path = save_params(dir_path=dir_path, name=self.name, epoch=epoch,
params=self.params,
aux_states=self.aux_states)
misc_saving_path = save_misc(dir_path=dir_path, epoch=epoch, name=self.name,
content={'data_shapes': {k: map(int, v) for k, v in self.data_shapes.items()}})
logging.info('Saving %s, params: \"%s\", misc: \"%s\"',
self.name, param_saving_path, misc_saving_path)
def load_params(self, name="", dir_path="", epoch=None):
params, aux_states, param_loading_path = load_params(dir_path=dir_path, epoch=epoch, name=name)
logging.info('Loading params from \"%s\" to %s' % (param_loading_path, self.name))
for k, v in params.items():
if k in self.params:
logging.debug(' Loading %s %s' %(k, str(v.shape)))
self.params[k][:] = v
else:
logging.warn("Found unused param in the saved model file: %s" % k)
for k, v in aux_states.items():
self.aux_states[k][:] = v
@property
def internal_sym_names(self):
return self.sym.get_internals().list_outputs()
@property
def output_keys(self):
return self.sym.list_outputs()
def compute_internal(self, sym_name, bucket_kwargs=None, **arg_dict):
"""
View the internal symbols using the forward function.
:param sym_name:
:param bucket_kwargs:
:param input_dict:
:return:
"""
data_shapes = {k: v.shape for k, v in arg_dict.items()}
self.switch_bucket(bucket_kwargs=bucket_kwargs,
data_shapes=data_shapes)
internal_sym = self.sym.get_internals()[sym_name]
data_inputs = {k: mx.nd.empty(v, ctx=self.ctx)
for k, v in self.data_shapes.items()
if k in internal_sym.list_arguments()}
params = {k: v for k, v in self.params.items() if
k in internal_sym.list_arguments()}
aux_states = {k: v for k, v in self.aux_states.items()
if k in internal_sym.list_auxiliary_states()}
exe = internal_sym.bind(ctx=self.ctx,
args=dict(params, **data_inputs),
args_grad=None,
grad_req='null',
aux_states=aux_states,
shared_exec=self.exe)
for k, v in arg_dict.items():
exe.arg_dict[k][:] = v
exe.forward(is_train=False)
assert 1 == len(exe.outputs)
for output in exe.outputs:
output.wait_to_read()
return exe.outputs[0]
def forward(self, is_train=False, bucket_kwargs=None, **arg_dict):
#import time
#start = time.time()
data_shapes = {k: v.shape for k, v in arg_dict.items()}
for name in self.learn_init_keys:
data_shapes[name] = self.learn_init_key_shapes[name]
self.switch_bucket(bucket_kwargs=bucket_kwargs,
data_shapes=data_shapes)
#end = time.time()
#print 'Swith Bucket:', end - start
#start = time.time()
for k, v in arg_dict.items():
assert self.exe.arg_dict[k].shape == v.shape,\
"Shape not match: key %s, need %s, received %s" \
%(k, str(self.exe.arg_dict[k].shape), str(v.shape))
self.exe.arg_dict[k][:] = v
self.exe.forward(is_train=is_train)
for output in self.exe.outputs:
output.wait_to_read()
#end = time.time()
#print 'Forward:', end - start
return self.exe.outputs
def backward(self, out_grads=None, **arg_dict):
for k, v in arg_dict.items():
assert self.exe.arg_dict[k].shape == v.shape, \
"Shape not match: key %s, need %s, received %s" \
% (k, str(self.exe.arg_dict[k].shape), str(v.shape))
self.exe.arg_dict[k][:] = v
self.exe.backward(out_grads=out_grads)
def forward_backward(self, bucket_kwargs=None, out_grads=None, **arg_dict):
data_shapes = {k: v.shape for k, v in arg_dict.items()}
for name in self.learn_init_keys:
data_shapes[name] = self.learn_init_key_shapes[name]
self.switch_bucket(bucket_kwargs=bucket_kwargs,
data_shapes=data_shapes)
for k, v in arg_dict.items():
self.exe.arg_dict[k][:] = v
self.exe.forward(is_train=True)
self.exe.backward(out_grads=out_grads)
for output in self.exe.outputs:
output.wait_to_read()
return self.exe.outputs
def update(self, updater, params_grad=None):
if params_grad is None:
params_grad = self.params_grad
assert type(params_grad) is OrderedDict
for ind, k in enumerate(self.params.keys()):
updater(index=ind, grad=params_grad[k], weight=self.params[k])
def update_acc_grad(self):
if self.acc_grad is None:
self.acc_grad = OrderedDict([(n, nd.zeros(v.shape, ctx=self.ctx))
for n, v in self.params_grad.items()])
for k, v in self.acc_grad.items():
v[:] = v + self.params_grad[k]
def reset_acc_grad(self):
for v in self.acc_grad.values():
v[:] = 0
def copy(self, name=None, ctx=None):
if ctx is None:
ctx = self.ctx
if name is None:
name = self.name + '-copy-' + str(ctx)
return Base(data_shapes=self.data_shapes,
sym_gen=self.sym_gen,
default_bucket_kwargs=dict(self.curr_bucket_key),
params=self.params,
aux_states=self.aux_states, ctx=ctx, name=name)
def copy_params_to(self, dst):
for k, v in self.params.items():
dst.params[k][:] = v
# TODO `wait_to_read()` here seems unnecessary, remove it in the future!
dst.params[k].wait_to_read()
@property
def total_param_num(self):
return sum(v.size for v in self.params.values())
def print_stat(self):
logging.info("Name: %s" % self.name)
assert self.params is not None, "Fatal Error!"
logging.info("Params: ")
for k, v in self.params.items():
logging.info(" %s: %s" % (k, v.shape))
if self.aux_states is None or 0 == len(self.aux_states):
logging.info("Aux States: None")
else:
logging.info("Aux States: " + ' '.join(
["%s:%s" % (str(k), str(v.shape)) for k, v in self.aux_states.items()]))
logging.info("Total Parameter Num: " + str(self.total_param_num))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.