repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
jyeatman/dipy | scratch/very_scratch/eddy_currents.py | 22 | 1282 | import numpy as np
import dipy as dp
import nibabel as ni
dname = '/home/eg01/Data_Backup/Data/Eleftherios/CBU090133_METHODS/20090227_145404/Series_003_CBU_DTI_64D_iso_1000'
#dname = '/home/eg01/Data_Backup/Data/Frank_Eleftherios/frank/20100511_m030y_cbu100624/08_ep2d_advdiff_101dir_DSI'
data,affine,bvals,gradients=dp.load_dcm_dir(dname)
'''
rot=np.array([[1,0,0,0],
[0,np.cos(np.pi/2),-np.sin(np.pi/2),0],
[0,np.sin(np.pi/2), np.cos(np.pi/2),0],
[0,0,0,1]])
from scipy.ndimage import affine_transform as aff
naffine=np.dot(affine,rot)
'''
data[:,:,:,1]
source=ni.Nifti1Image(data[:,:,:,1],affine)
target=ni.Nifti1Image(data[:,:,:,0],affine)
#similarity 'cc', 'cr', 'crl1', 'mi', je', 'ce', 'nmi', 'smi'. 'cr'
similarity='cr'
#interp 'pv', 'tri'
interp = 'tri'
#subsampling None or sequence (3,)
subsampling=None
#search 'affine', 'rigid', 'similarity' or ['rigid','affine']
search='affine'
#optimizer 'simplex', 'powell', 'steepest', 'cg', 'bfgs' or
#sequence of optimizers
optimizer= 'powell'
T=dp.volume_register(source,target,similarity,\
interp,subsampling,search,)
sourceT=dp.volume_transform(source, T.inv(), reference=target)
s=source.get_data()
t=target.get_data()
sT=sourceT.get_data()
| bsd-3-clause |
niieani/rethinkdb | test/interface/table_readiness.py | 19 | 10020 | #!/usr/bin/env python
# Copyright 2014 RethinkDB, all rights reserved.
"""This test checks that waiting for a table at different levels of readiness returns at the right time."""
import threading, os, sys, time
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import driver, scenario_common, utils, vcoptparse
r = utils.import_python_driver()
op = vcoptparse.OptParser()
scenario_common.prepare_option_parser_mode_flags(op)
_, command_prefix, serve_options = scenario_common.parse_mode_flags(op.parse(sys.argv))
db, _ = utils.get_test_db_table()
query_server = None
def create_tables(conn):
r.db(db).table_create('single').run(conn)
r.db(db).table_create('majority').run(conn)
r.db(db).reconfigure(
replicas={'primary': 1, 'replica': 2, 'nonvoting': 3},
primary_replica_tag='primary',
nonvoting_replica_tags=['nonvoting'],
shards=1
).run(conn)
r.db(db).table('single').config().update({'write_acks':'single'}).run(conn)
r.db(db).table('majority').config().update({'write_acks':'majority'}).run(conn)
r.db(db).wait(wait_for='all_replicas_ready').run(conn)
def make_expected(default=False, **kwargs):
expected = {'ready_for_outdated_reads': default,
# RSI(raft): Currently we don't check `ready_for_reads` because it's sort
# of ill-defined. See GitHub issue #4320.
# 'ready_for_reads': default,
'ready_for_writes': default,
'all_replicas_ready': default}
for key,value in kwargs.items():
assert key in expected, 'Unrecognized readiness state: %s' % str(key)
assert isinstance(value, bool)
expected[key] = value
return expected
def do_async_wait(query, expected_success, error_event):
conn = r.connect('localhost', query_server.driver_port)
try:
query.run(conn)
if not expected_success:
error_event.set()
assert False, 'Query should have failed but did not: %s' % str(query)
except r.ReqlRuntimeError as ex:
if expected_success:
error_event.set()
raise
def start_waits(expected_wait_result, error_event):
threads = []
for table, readinesses in expected_wait_result.items():
for readiness, success in readinesses.items():
threads.append(threading.Thread(target=do_async_wait,
args=(r.db(db).table(table).wait(wait_for=readiness, timeout=10),
success,
error_event)))
threads[-1].start()
return threads
def transition_cluster(servers, state):
assert all((x in servers and x in state) for x in ['primary', 'replicas'])
assert len(servers['replicas']) == len(state['replicas'])
assert len(servers['nvrs']) == len(state['nvrs'])
assert all(x in ['up', 'down'] for x in [state['primary']] + state['replicas'] + (state['nvrs'] or []))
message_parts = []
if state['primary'] == 'up':
message_parts.append('primary')
if 'up' in state['replicas']:
count = state['replicas'].count('up')
message_parts.append('%d replica' % count + ('s' if count > 1 else ''))
if 'up' in state['nvrs']:
count = state['nvrs'].count('up')
message_parts.append('%d nonvoting replica' % count + ('s' if count > 1 else ''))
if len(message_parts) == 0:
message_parts.append('no replicas')
utils.print_with_time("Transitioning to %s up" % ' and '.join(message_parts))
def up_down_server(server, new_state):
if new_state == 'up' and not server.running:
server.start()
elif new_state == 'down' and server.running:
server.stop()
up_down_server(servers['primary'], state['primary'])
for server, server_state in zip(servers['replicas'], state['replicas']):
up_down_server(server, server_state)
for server, server_state in zip(servers['nvrs'], state['nvrs']):
up_down_server(server, server_state)
# Waits for the query server to see the currently-running servers
# This does not wait for disconnections or for reactors to reach a stable state
def wait_for_transition(cluster):
running_procs = [x for x in cluster[:] if x.running]
[x.wait_until_ready() for x in running_procs]
uuids = [proc.uuid for proc in running_procs]
conn = r.connect(host=query_server.host, port=query_server.driver_port)
while not all(r.expr(uuids).map(r.db('rethinkdb').table('server_status').get(r.row).ne(None)).run(conn)):
time.sleep(0.1)
def test_wait(cluster, servers, states, expected_wait_result):
# Transition through each state excluding the final state
for state in states:
transition_cluster(servers, state)
wait_for_transition(cluster)
utils.print_with_time("Collecting table wait results")
error_event = threading.Event()
wait_threads = start_waits(expected_wait_result, error_event)
[x.join() for x in wait_threads]
if error_event.is_set():
conn = r.connect('localhost', query_server.driver_port)
statuses = list(r.db('rethinkdb').table('table_status').run(conn))
assert False, 'Wait failed, table statuses: %s' % str(statuses)
utils.print_with_time("Spinning up seven servers")
serverNames = ['query', 'primary', 'r1', 'r2', 'nv1', 'nv2', 'nv3']
with driver.Cluster(initial_servers=serverNames, output_folder='.', command_prefix=command_prefix, extra_options=serve_options) as cluster:
cluster.check()
query_server = cluster[0]
table_servers = {
'primary': cluster[1],
'replicas': cluster[2:4],
'nvrs': cluster[4:] }
utils.print_with_time("Establishing ReQL connection")
conn = r.connect("localhost", query_server.driver_port)
# Set the server tags
r.db('rethinkdb').table('server_config').get(table_servers['primary'].uuid).update({'tags':['primary']}).run(conn)
for replica in table_servers['replicas']:
r.db('rethinkdb').table('server_config').get(replica.uuid).update({'tags':['replica']}).run(conn)
for nv in table_servers['nvrs']:
r.db('rethinkdb').table('server_config').get(nv.uuid).update({'tags':['nonvoting']}).run(conn)
if db not in r.db_list().run(conn):
utils.print_with_time("Creating db")
r.db_create(db).run(conn)
utils.print_with_time("Creating 'single' and 'majority' write_ack tables")
create_tables(conn)
test_wait(cluster, table_servers,
[{'primary': 'down', 'replicas': ['down', 'down'], 'nvrs': ['down', 'down', 'down']},
{'primary': 'down', 'replicas': ['down', 'down'], 'nvrs': ['down', 'down', 'down']}],
{'single': make_expected(default=False),
'majority': make_expected(default=False)})
test_wait(cluster, table_servers,
[{'primary': 'down', 'replicas': ['down', 'down'], 'nvrs': ['down', 'down', 'down']},
{'primary': 'down', 'replicas': ['up', 'down'], 'nvrs': ['down', 'down', 'down']}],
{'single': make_expected(default=False, ready_for_outdated_reads=True),
'majority': make_expected(default=False, ready_for_outdated_reads=True)})
test_wait(cluster, table_servers,
[{'primary': 'down', 'replicas': ['down', 'down'], 'nvrs': ['down', 'down', 'down']},
{'primary': 'down', 'replicas': ['down', 'down'], 'nvrs': ['up', 'down', 'down']}],
{'single': make_expected(default=False, ready_for_outdated_reads=True),
'majority': make_expected(default=False, ready_for_outdated_reads=True)})
test_wait(cluster, table_servers,
[{'primary': 'down', 'replicas': ['down', 'down'], 'nvrs': ['down', 'down', 'down']},
{'primary': 'up', 'replicas': ['down', 'down'], 'nvrs': ['down', 'down', 'down']}],
{'single': make_expected(default=False, ready_for_outdated_reads=True),
'majority': make_expected(default=False, ready_for_outdated_reads=True)})
test_wait(cluster, table_servers,
[{'primary': 'down', 'replicas': ['down', 'down'], 'nvrs': ['down', 'down', 'down']},
{'primary': 'up', 'replicas': ['up', 'up'], 'nvrs': ['up', 'up', 'up']}],
{'single': make_expected(default=True),
'majority': make_expected(default=True)})
test_wait(cluster, table_servers,
[{'primary': 'up', 'replicas': ['up', 'up'], 'nvrs': ['up', 'up', 'up']}],
{'single': make_expected(default=True),
'majority': make_expected(default=True)})
test_wait(cluster, table_servers,
[{'primary': 'up', 'replicas': ['up', 'up'], 'nvrs': ['down', 'down', 'down']}],
{'single': make_expected(default=True, all_replicas_ready=False),
'majority': make_expected(default=True, all_replicas_ready=False)})
test_wait(cluster, table_servers,
[{'primary': 'up', 'replicas': ['down', 'down'], 'nvrs': ['down', 'down', 'down']}],
{'single': make_expected(default=False, ready_for_outdated_reads=True),
'majority': make_expected(default=False, ready_for_outdated_reads=True)})
test_wait(cluster, table_servers,
[{'primary': 'up', 'replicas': ['down', 'down'], 'nvrs': ['up', 'up', 'up']}],
{'single': make_expected(default=False, ready_for_outdated_reads=True),
'majority': make_expected(default=False, ready_for_outdated_reads=True)})
test_wait(cluster, table_servers,
[{'primary': 'up', 'replicas': ['up', 'down'], 'nvrs': ['down', 'down', 'down']}],
{'single': make_expected(default=True, all_replicas_ready=False),
'majority': make_expected(default=True, all_replicas_ready=False)})
utils.print_with_time("Cleaning up")
utils.print_with_time("Done.")
| agpl-3.0 |
muLAn-project/muLAn | muLAn/models/fhexaBL.py | 1 | 2911 | # -*-coding:Utf-8 -*
# ====================================================================
# ====================================================================
# Packages
# ====================================================================
import sys
import numpy as np
from muLAn.models.multipole import hexamag
# ====================================================================
# Functions
# ====================================================================
def magnifcalc(t, param, Ds=None, tb=None):
"""Return the hexadecapolar approximation of the magnification."""
### Get parameters
t0 = param['t0']
u0 = param['u0']
tE = param['tE']
rho = param['rho']
gamma = param['gamma']
q = param['q']
piEN = param['piEN']
piEE = param['piEE']
alpha0 = param['alpha']
s0 = param['s']
dalpha = param['dadt']
ds = param['dsdt']
### Lens orbital motion
alpha, s = lens_rotation(alpha0, s0, dalpha, ds, t, tb)
### Parallax
DsN = Ds['N']
DsE = Ds['E']
tau = (t-t0)/tE + piEN * DsN + piEE * DsE
beta = u0 + piEN * DsE - piEE * DsN
x, y = binrot(alpha, tau, beta)
### Conversion center of mass to Cassan (2008)
x = x - s*q/(1.+q)
### Compute magnification
zeta0 = x + 1j*y
return np.array([hexamag(s[i], q, rho, gamma, zeta0[i]) for i in range(len(x))])
# --------------------------------------------------------------------
def binrot(theta, x_old, y_old):
"""Rotation by an angle alpha.
:param theta: float, angle in radians.
:param x_old: numpy array, x coodinate in the old frame.
:param y_old: numpy array, y coodinate in the old frame.
:return x_new: numpy array, x coodinate in the new frame.
:return y_new: numpy array, y coodinate in the new frame.
"""
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
x_new = x_old * cos_theta - y_old * sin_theta
y_new = x_old * sin_theta + y_old * cos_theta
return x_new, y_new
# --------------------------------------------------------------------
def lens_rotation(alpha0, s0, dalpha, ds, t, tb):
"""Compute the angle alpha and projected separation s for each
time step due to the lens orbital motion.
:param alpha0: angle alpha at date tb.
:param s0: projected separation at date tb.
:param dalpha: float, angular velocity at date tb
(radians.year^-1).
:param ds: change rate of separation (year^-1).
:param t: list of dates.
:param tb: time reference for linear development.
:type alpha0: float
:type s0: float
:type dalpha: float
:type ds: float
:type t: numpy array
:type tb: float
:return: unpacked list of actual alpha and s values at each date.
:rtype: numpy array, numpy array
"""
Cte_yr_d = 365.25 # Julian year in days
alpha = alpha0 - (t - tb) * dalpha / Cte_yr_d
s = s0 + (t-tb) * ds / Cte_yr_d
return alpha, s
| mit |
arnif/CouchPotatoServer | couchpotato/core/providers/nzb/newzbin/main.py | 3 | 5554 | from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.nzb.base import NZBProvider
from dateutil.parser import parse
import base64
import time
import xml.etree.ElementTree as XMLTree
log = CPLog(__name__)
class Newzbin(NZBProvider, RSS):
urls = {
'download': 'https://www.newzbin2.es/api/dnzb/',
'search': 'https://www.newzbin2.es/search/',
}
format_ids = {
2: ['scr'],
1: ['cam'],
4: ['tc'],
8: ['ts'],
1024: ['r5'],
}
cat_ids = [
([262144], ['bd50']),
([2097152], ['1080p']),
([524288], ['720p']),
([262144], ['brrip']),
([2], ['dvdr']),
]
cat_backup_id = -1
http_time_between_calls = 3 # Seconds
def search(self, movie, quality):
results = []
if self.isDisabled():
return results
format_id = self.getFormatId(type)
cat_id = self.getCatId(type)
arguments = tryUrlencode({
'searchaction': 'Search',
'u_url_posts_only': '0',
'u_show_passworded': '0',
'q_url': 'imdb.com/title/' + movie['library']['identifier'],
'sort': 'ps_totalsize',
'order': 'asc',
'u_post_results_amt': '100',
'feed': 'rss',
'category': '6',
'ps_rb_video_format': str(cat_id),
'ps_rb_source': str(format_id),
'u_post_larger_than': quality.get('size_min'),
'u_post_smaller_than': quality.get('size_max'),
})
url = "%s?%s" % (self.urls['search'], arguments)
cache_key = str('newzbin.%s.%s.%s' % (movie['library']['identifier'], str(format_id), str(cat_id)))
data = self.getCache(cache_key)
if not data:
headers = {
'Authorization': "Basic %s" % base64.encodestring('%s:%s' % (self.conf('username'), self.conf('password')))[:-1]
}
try:
data = self.urlopen(url, headers = headers)
self.setCache(cache_key, data)
except:
return results
if data:
try:
try:
data = XMLTree.fromstring(data)
nzbs = self.getElements(data, 'channel/item')
except Exception, e:
log.debug('%s, %s', (self.getName(), e))
return results
for nzb in nzbs:
title = self.getTextElement(nzb, "title")
if 'error' in title.lower(): continue
REPORT_NS = 'http://www.newzbin2.es/DTD/2007/feeds/report/';
# Add attributes to name
try:
for attr in nzb.find('{%s}attributes' % REPORT_NS):
title += ' ' + attr.text
except:
pass
id = int(self.getTextElement(nzb, '{%s}id' % REPORT_NS))
size = str(int(self.getTextElement(nzb, '{%s}size' % REPORT_NS)) / 1024 / 1024) + ' mb'
date = str(self.getTextElement(nzb, '{%s}postdate' % REPORT_NS))
new = {
'id': id,
'type': 'nzb',
'provider': self.getName(),
'name': title,
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': self.parseSize(size),
'url': str(self.getTextElement(nzb, '{%s}nzb' % REPORT_NS)),
'download': self.download,
'detail_url': str(self.getTextElement(nzb, 'link')),
'description': self.getTextElement(nzb, "description"),
'check_nzb': False,
}
is_correct_movie = fireEvent('searcher.correct_movie',
nzb = new, movie = movie, quality = quality,
imdb_results = True, single = True)
if is_correct_movie:
new['score'] = fireEvent('score.calculate', new, movie, single = True)
results.append(new)
self.found(new)
return results
except SyntaxError:
log.error('Failed to parse XML response from newzbin')
return results
def download(self, url = '', nzb_id = ''):
try:
log.info('Download nzb from newzbin, report id: %s ', nzb_id)
return self.urlopen(self.urls['download'], params = {
'username' : self.conf('username'),
'password' : self.conf('password'),
'reportid' : nzb_id
}, show_error = False)
except Exception, e:
log.error('Failed downloading from newzbin, check credit: %s', e)
return False
def getFormatId(self, format):
for id, quality in self.format_ids.iteritems():
for q in quality:
if q == format:
return id
return self.cat_backup_id
def isEnabled(self):
return NZBProvider.isEnabled(self) and self.conf('enabled') and self.conf('username') and self.conf('password')
| gpl-3.0 |
jswoboda/SimISR | SimISR/analysisplots.py | 2 | 36407 | #!/usr/bin/env python
"""
Created on Wed May 6 13:55:26 2015
analysisplots.py
This module is used to plot the output from various stages of the simulator to debug
problems. This is also helpful for presentations.
@author: John Swoboda
"""
from . import Path
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import scipy as sp
import scipy.fftpack as scfft
import numpy as np
import seaborn as sns
from .IonoContainer import IonoContainer
from .utilFunctions import readconfigfile,spect2acf,acf2spect
from .specfunctions import ISRspecmakeout#,ISRSfitfunction
def beamvstime(configfile,maindir,params=['Ne'],filetemplate='AltvTime',suptitle = 'Alt vs Time'):
""" This will create a altitude time image for the data for ionocontainer files
that are in sphereical coordinates.
Inputs
Times - A list of times that will be plotted.
configfile - The INI file with the simulation parameters that will be useds.
maindir - The directory the images will be saved in.
params - List of Parameter names that will be ploted. These need to match
in the ionocontainer names.
filetemplate - The first part of a the file names.
suptitle - The supertitle for the plots.
"""
sns.set_style("whitegrid")
sns.set_context("notebook")
# rc('text', usetex=True)
(sensdict,simparams) = readconfigfile(configfile)
paramslower = [ip.lower() for ip in params]
Np = len(params)
maindir=Path(maindir)
inputfile = str(maindir.joinpath('Fitted','fitteddata.h5'))
Ionofit = IonoContainer.readh5(inputfile)
times = Ionofit.Time_Vector
Nt = len(times)
dataloc = Ionofit.Sphere_Coords
pnames = Ionofit.Param_Names
pnameslower = sp.array([ip.lower() for ip in pnames.flatten()])
p2fit = [sp.argwhere(ip==pnameslower)[0][0] if ip in pnameslower else None for ip in paramslower]
angles = dataloc[:,1:]
b = np.ascontiguousarray(angles).view(np.dtype((np.void, angles.dtype.itemsize * angles.shape[1])))
_, idx, invidx = np.unique(b, return_index=True,return_inverse=True)
beamlist = angles[idx]
Nb = beamlist.shape[0]
newfig=True
imcount=0
ifig=-1
for iparam in range(Np):
for ibeam in range(Nb):
if newfig:
(figmplf, axmat) = plt.subplots(3, 3,figsize=(20, 15), facecolor='w',sharex=True, sharey=True)
axvec = axmat.flatten()
newfig=False
ix=0
ifig+=1
ax=axvec[ix]
curbeam = beamlist[ibeam]
curparm = paramslower[iparam]
if curparm == 'nepow':
curparm = 'ne'
indxkep = np.argwhere(invidx==ibeam)[:,0]
rng_fit= dataloc[indxkep,0]
rngargs = np.argsort(rng_fit)
rng_fit = rng_fit[rngargs]
alt_fit = rng_fit*sp.sin(curbeam[1]*sp.pi/180.)
curfit = Ionofit.Param_List[indxkep,:,p2fit[iparam]]
curfit = curfit[rngargs]
Tmat, Amat =np.meshgrid(times[:,0],alt_fit)
image = ax.pcolor(Tmat,Amat,curfit.real,cmap='viridis')
if curparm=='ne':
image.set_norm(colors.LogNorm(vmin=1e9,vmax=5e12))
cbarstr = params[iparam] + ' m-3'
else:
image.set_norm(colors.PowerNorm(gamma=1.,vmin=500,vmax=3e3))
cbarstr = params[iparam] + ' K'
if ix>5:
ax.set_xlabel("Time in s")
if sp.mod(ix,3)==0:
ax.set_ylabel('Alt km')
ax.set_title('{0} vs Altitude, Az: {1}$^o$ El: {2}$^o$'.format(params[iparam],*curbeam))
imcount=imcount+1
ix+=1
if ix==9 or ibeam+1==Nb:
cbar_ax = figmplf.add_axes([.91, .3, .06, .4])
cbar = plt.colorbar(image,cax=cbar_ax)
cbar.set_label(cbarstr)
figmplf.suptitle(suptitle, fontsize=20)
figmplf.tight_layout(rect=[0, .05, .9, .95])
fname= filetemplate+'_{0:0>3}.png'.format(ifig)
plt.savefig(fname)
plt.close(figmplf)
newfig=True
def fitsurfaceplot(paramdict,plotvals,configfile,y_acf,yerr=None,filetemplate='fitsurfs',suptitle = 'Fit Surfaces'):
""" This will create a fit surface plot.
Inputs
paramdict - A dictionary with the followign key value pairs.
Ne - Array of possible electron density values.
Te - Array of possible electron tempreture values.
Ti - Array of possible ion tempreture values.
frac - Array of possible fraction shares of the ion make up.
plotvals - A dictionary with key value pars.
setparam - A string that describes he parameter thats set.
xparam - The parameter that's varied along the x axis of the image.
yparam - The parameter that's varied along the y axis of the image.
indx - The index from the paramdict for the set variable.
configfile - The file thats used for the simulation.
y_acf - the complex ACF used to create the errors.
yerr - The standard deviation of the acf measurement.
filetemplate - The template on how the file will be named.
suptitle - The super title for the plots.
"""
sns.set_style("whitegrid")
sns.set_context("notebook")
(sensdict,simparams) = readconfigfile(configfile)
specs = simparams['species']
nspecs = len(specs)
# make param lists
paramlist = [[]]*(2*nspecs+1)
paramlist[2*(nspecs-1)] =paramdict['Ne']
paramlist[2*(nspecs-1)+1] =paramdict['Te']
if 'frac' in paramdict.keys():
frac = paramdict['frac']
else:
frac = [[1./(nspecs-1)]]*(nspecs-1)
for ispec in range(nspecs-1):
paramlist[2*ispec] =frac[ispec]
paramlist[2*ispec+1] = paramdict['Ti'][ispec]
if 'Vi' in paramdict.keys():
paramlist[-1] = paramdict['Vi']
else:
paramlist[-1] =[0.]
pvals = {'Ne':2*(nspecs-1),'Te':2*(nspecs-1)+1,'Ti':1,'frac':0}
fitsurfs= makefitsurf(paramlist,y_acf,sensdict,simparams,yerr)
quad = (3,3)
i_fig=0
for iplt, idict in enumerate(plotvals):
iaxn = sp.mod(iplt,sp.prod(quad))
if iaxn==0:
(figmplf, axmat) = plt.subplots(quad[0],quad[1],figsize=(20, 15), facecolor='w')
axvec = axmat.flatten()
setstr = idict['setparam']
xstr = idict['xparam']
ystr = idict['yparam']
mloc = pvals[setstr]
xdim = pvals[xstr]
ydim = pvals[ystr]
setval = paramlist[setstr][idict['indx']]
transarr = sp.arange(2*nspecs+1).tolist()
transarr.remove(mloc)
transarr.remove(xdim)
transarr.remove(ydim)
transarr = [mloc,ydim,xdim] +transarr
fitupdate = sp.transpose(fitsurfs,transarr)
while fitupdate.ndim>3:
fitupdate = sp.nanmean(fitupdate,dim=-1)
Z1 = fitupdate[idict['indx']]
iax = axvec[iaxn]
xvec = paramdict[xstr]
yvec = paramdict[ystr]
[Xmat,Ymat]= sp.meshgrid(xvec,yvec)
iax.pcolor(Xmat,Ymat,Z1,norm=colors.LogNorm(vmin=Z1.min(), vmax=Z1.max()))
iax.xlabel=xstr
iax.ylabel=ystr
iax.title('{0} at {0}'.format(setstr,setval))
if iaxn ==sp.prod(quad)-1:
figmplf.suptitle(suptitle, fontsize=20)
fname= filetemplate+'_{0:0>4}.png'.format(i_fig)
plt.savefig(fname)
plt.close(figmplf)
i_fig+=1
def maketi(Ionoin):
""" This makes the ion densities, tempretures and velocities and places
them in the Param_List variable in the ionocontainer object.
"""
(Nloc,Nt,Nion,Nppi) = Ionoin.Param_List.shape
Paramlist = Ionoin.Param_List[:,:,:-1,:]
Vi = Ionoin.getDoppler()
Nisum = sp.sum(Paramlist[:,:,:,0],axis=2)
Tisum = sp.sum(Paramlist[:,:,:,0]*Paramlist[:,:,:,1],axis=2)
Tiave = Tisum/Nisum
Newpl = sp.zeros((Nloc,Nt,Nion+2,Nppi))
Newpl[:,:,:-2,:] = Ionoin.Param_List
Newpl[:,:,-2,0] = Nisum
Newpl[:,:,-2,1] = Tiave
Newpl[:,:,-1,0] = Vi
newrow = sp.array([['Ni','Ti'],['Vi','xx']])
newpn = sp.vstack((Ionoin.Param_Names,newrow))
Ionoin.Param_List = Newpl
Ionoin.Param_Names = newpn
return Ionoin
def plotbeamparametersv2(times, configfile, maindir, fitdir='Fitted', params=['Ne'],
filetemplate='params', suptitle='Parameter Comparison',
werrors=False, nelog=True):
"""
This function will plot the desired parameters for each beam along range.
The values of the input and measured parameters will be plotted
Inputs
Times - A list of times that will be plotted.
configfile - The INI file with the simulation parameters that will be useds.
maindir - The directory the images will be saved in.
params - List of Parameter names that will be ploted. These need to match
in the ionocontainer names.
filetemplate - The first part of a the file names.
suptitle - The supertitle for the plots.
werrors - A bools that determines if the errors will be plotted.
"""
sns.set_style("whitegrid")
sns.set_context("notebook")
# rc('text', usetex=True)
maindir = Path(maindir)
ffit = maindir/fitdir/'fitteddata.h5'
inputfiledir = maindir/'Origparams'
(sensdict, simparams) = readconfigfile(configfile)
paramslower = [ip.lower() for ip in params]
Nt = len(times)
Np = len(params)
#Read in fitted data
Ionofit = IonoContainer.readh5(str(ffit))
dataloc = Ionofit.Sphere_Coords
pnames = Ionofit.Param_Names
pnameslower = sp.array([ip.lower() for ip in pnames.flatten()])
p2fit = [sp.argwhere(ip == pnameslower)[0][0]
if ip in pnameslower else None for ip in paramslower]
time2fit = [None]*Nt
# Have to fix this because of time offsets
if times[0] == 0:
times += Ionofit.Time_Vector[0, 0]
for itn, itime in enumerate(times):
filear = sp.argwhere(Ionofit.Time_Vector[:, 0] >= itime)
if len(filear) == 0:
filenum = len(Ionofit.Time_Vector)-1
else:
filenum = sp.argmin(sp.absolute(Ionofit.Time_Vector[:, 0]-itime))
time2fit[itn] = filenum
times_int = [Ionofit.Time_Vector[i] for i in time2fit]
# determine the beams
angles = dataloc[:, 1:]
rng = sp.unique(dataloc[:, 0])
b_arr = np.ascontiguousarray(angles).view(np.dtype((np.void,
angles.dtype.itemsize * angles.shape[1])))
_, idx, invidx = np.unique(b_arr, return_index=True, return_inverse=True)
beamlist = angles[idx]
Nb = beamlist.shape[0]
# Determine which imput files are to be used.
dirlist = sorted(inputfiledir.glob('*.h5'))
dirliststr = [str(i) for i in dirlist]
sortlist, outime, outfilelist,timebeg,timelist_s = IonoContainer.gettimes(dirliststr)
timelist = timebeg.copy()
time2file = [None]*Nt
time2intime = [None]*Nt
# go through times find files and then times in files
for itn, itime in enumerate(times):
filear = sp.argwhere(timelist >= itime)
if len(filear) == 0:
filenum = [len(timelist)-1]
else:
filenum = filear[0]
flist1 = []
timeinflist = []
for ifile in filenum:
filetimes = timelist_s[ifile]
log1 = (filetimes[:, 0] >= times_int[itn][0]) & (filetimes[:, 0] < times_int[itn][1])
log2 = (filetimes[:, 1] > times_int[itn][0]) & (filetimes[:, 1] <= times_int[itn][1])
log3 = (filetimes[:, 0] <= times_int[itn][0]) & (filetimes[:, 1] > times_int[itn][1])
log4 = (filetimes[:, 0] > times_int[itn][0]) & (filetimes[:, 1] < times_int[itn][1])
curtimes1 = sp.where(log1|log2|log3|log4)[0].tolist()
flist1 = flist1+ [ifile]*len(curtimes1)
timeinflist = timeinflist+curtimes1
time2intime[itn] = timeinflist
time2file[itn] = flist1
nfig = int(sp.ceil(Nt*Nb))
imcount = 0
curfilenum = -1
# Loop for the figures
for i_fig in range(nfig):
lines = [None]*2
labels = [None]*2
(figmplf, axmat) = plt.subplots(int(sp.ceil(Np/2)), 2, figsize=(20, 15), facecolor='w')
axvec = axmat.flatten()
# loop that goes through each axis loops through each parameter, beam
# then time.
for ax in axvec:
if imcount >= Nt*Nb*Np:
break
imcount_f = float(imcount)
itime = int(sp.floor(imcount_f/Nb/Np))
iparam = int(imcount_f/Nb-Np*itime)
ibeam = int(imcount_f-(itime*Np*Nb+iparam*Nb))
curbeam = beamlist[ibeam]
altlist = sp.sin(curbeam[1]*sp.pi/180.)*rng
curparm = paramslower[iparam]
# Use Ne from input to compare the ne derived from the power.
if curparm == 'nepow':
curparm_in = 'ne'
else:
curparm_in = curparm
curcoord = sp.zeros(3)
curcoord[1:] = curbeam
for iplot, filenum in enumerate(time2file[itime]):
if curfilenum != filenum:
curfilenum = filenum
datafilename = dirlist[filenum]
Ionoin = IonoContainer.readh5(str(datafilename))
if ('ti' in paramslower) or ('vi' in paramslower):
Ionoin = maketi(Ionoin)
pnames = Ionoin.Param_Names
pnameslowerin = sp.array([ip.lower() for ip in pnames.flatten()])
prmloc = sp.argwhere(curparm_in == pnameslowerin)
if prmloc.size != 0:
curprm = prmloc[0][0]
# build up parameter vector bs the range values by finding the closest point in space in the input
curdata = sp.zeros(len(rng))
for irngn, irng in enumerate(rng):
curcoord[0] = irng
tempin = Ionoin.getclosestsphere(curcoord)[0][time2intime[itime]]
Ntloc = tempin.shape[0]
tempin = sp.reshape(tempin, (Ntloc, len(pnameslowerin)))
curdata[irngn] = tempin[0, curprm]
#actual plotting of the input data
lines[0] = ax.plot(curdata, altlist, marker='o', c='b', linewidth=2)[0]
labels[0] = 'Input Parameters'
# Plot fitted data for the axis
indxkep = np.argwhere(invidx == ibeam)[:, 0]
curfit = Ionofit.Param_List[indxkep, time2fit[itime], p2fit[iparam]]
rng_fit = dataloc[indxkep, 0]
alt_fit = rng_fit*sp.sin(curbeam[1]*sp.pi/180.)
errorexist = 'n'+paramslower[iparam] in pnameslower
if errorexist and werrors:
eparam = sp.argwhere('n'+paramslower[iparam] == pnameslower)[0][0]
curerror = Ionofit.Param_List[indxkep, time2fit[itime], eparam]
lines[1] = ax.errorbar(curfit, alt_fit, xerr=curerror, fmt='-.',
c='g', linewidth=2)[0]
else:
lines[1] = ax.plot(curfit, alt_fit, marker='o', c='g', linewidth=2)[0]
labels[1] = 'Fitted Parameters'
# get and plot the input data
numplots = len(time2file[itime])
# set the limit for the parameter
if curparm == 'vi':
ax.set(xlim=[-1.25*sp.nanmax(sp.absolute(curfit)), 1.25*sp.nanmax(sp.absolute(curfit))])
elif curparm_in != 'ne':
ax.set(xlim=[0.75*sp.nanmin(curfit), sp.minimum(1.25*sp.nanmax(curfit), 8000.)])
elif (curparm_in == 'ne') and nelog:
ax.set_xscale('log')
ax.set_xlabel(params[iparam])
ax.set_ylabel('Alt km')
ax.set_title('{0} vs Altitude, Time: {1}s Az: {2}$^o$ El: {3}$^o$'.format(params[iparam], times[itime], *curbeam))
imcount += 1
# save figure
figmplf.suptitle(suptitle, fontsize=20)
if None in labels:
labels.remove(None)
lines.remove(None)
plt.figlegend(lines, labels, loc = 'lower center', ncol=5, labelspacing=0.)
fname = filetemplate+'_{0:0>3}.png'.format(i_fig)
plt.savefig(fname)
plt.close(figmplf)
def plotspecs(coords, times, configfile, maindir, cartcoordsys=True, indisp=True, acfdisp=True,
fitdisp=True, filetemplate='spec', suptitle='Spectrum Comparison'):
""" This will create a set of images that compare the input ISR spectrum to the
output ISR spectrum from the simulator.
Inputs
coords - An Nx3 numpy array that holds the coordinates of the desired points.
times - A numpy list of times in seconds.
configfile - The name of the configuration file used.
cartcoordsys - (default True)A bool, if true then the coordinates are given in cartisian if
false then it is assumed that the coords are given in sphereical coordinates.
specsfilename - (default None) The name of the file holding the input spectrum.
acfname - (default None) The name of the file holding the estimated ACFs.
filetemplate (default 'spec') This is the beginning string used to save the images.
"""
sns.set_style("whitegrid")
sns.set_context("notebook")
maindir=Path(maindir).expanduser()
acfname = maindir.joinpath('ACF','00lags.h5')
ffit = maindir.joinpath('Fitted','fitteddata.h5')
specsfiledir = maindir.joinpath('Spectrums')
(sensdict,simparams) = readconfigfile(configfile)
simdtype = simparams['dtype']
npts = simparams['numpoints']*3.0
amb_dict = simparams['amb_dict']
if sp.ndim(coords)==1:
coords = coords[sp.newaxis,:]
Nt = len(times)
Nloc = coords.shape[0]
sns.set_style("whitegrid")
sns.set_context("notebook")
if indisp:
dirlist = [i.name for i in specsfiledir.glob('*.h5')]
timelist = sp.array([float(i.split()[0]) for i in dirlist])
for itn,itime in enumerate(times):
filear = sp.argwhere(timelist>=itime)
if len(filear)==0:
filenum = len(timelist)-1
else:
filenum = filear[0][0]
specsfilename = specsfiledir.joinpath(dirlist[filenum])
Ionoin = IonoContainer.readh5(str(specsfilename))
if itn==0:
specin = sp.zeros((Nloc,Nt,Ionoin.Param_List.shape[-1])).astype(Ionoin.Param_List.dtype)
omeg = Ionoin.Param_Names
npts = Ionoin.Param_List.shape[-1]
for icn, ic in enumerate(coords):
if cartcoordsys:
tempin = Ionoin.getclosest(ic,times)[0]
else:
tempin = Ionoin.getclosestsphere(ic,times)[0]
specin[icn,itn] = tempin[0,:]/npts
fs = sensdict['fs']
if acfdisp:
Ionoacf = IonoContainer.readh5(str(acfname))
ACFin = sp.zeros((Nloc,Nt,Ionoacf.Param_List.shape[-1])).astype(Ionoacf.Param_List.dtype)
ts = sensdict['t_s']
omeg = sp.arange(-sp.ceil((npts-1.)/2.),sp.floor((npts-1.)/2.)+1)/ts/npts
for icn, ic in enumerate(coords):
if cartcoordsys:
tempin = Ionoacf.getclosest(ic,times)[0]
else:
tempin = Ionoacf.getclosestsphere(ic,times)[0]
if sp.ndim(tempin)==1:
tempin = tempin[sp.newaxis,:]
ACFin[icn] = tempin
specout = scfft.fftshift(scfft.fft(ACFin,n=npts,axis=-1),axes=-1)
if fitdisp:
Ionofit = IonoContainer.readh5(str(ffit))
(omegfit,outspecsfit) =ISRspecmakeout(Ionofit.Param_List,sensdict['fc'],sensdict['fs'],simparams['species'],npts)
Ionofit.Param_List= outspecsfit
Ionofit.Param_Names = omegfit
specfit = sp.zeros((Nloc,Nt,npts))
for icn, ic in enumerate(coords):
if cartcoordsys:
tempin = Ionofit.getclosest(ic,times)[0]
else:
tempin = Ionofit.getclosestsphere(ic,times)[0]
if sp.ndim(tempin)==1:
tempin = tempin[sp.newaxis,:]
specfit[icn] = tempin/npts/npts
nfig = int(sp.ceil(Nt*Nloc/6.0))
imcount = 0
for i_fig in range(nfig):
lines = [None]*3
labels = [None]*3
(figmplf, axmat) = plt.subplots(2, 3,figsize=(16, 12), facecolor='w')
axvec = axmat.flatten()
for iax,ax in enumerate(axvec):
if imcount>=Nt*Nloc:
break
iloc = int(sp.floor(imcount/Nt))
itime = int(imcount-(iloc*Nt))
maxvec = []
if fitdisp:
curfitspec = specfit[iloc,itime]
rcsfit = curfitspec.sum()
(taufit,acffit) = spect2acf(omegfit,curfitspec)
guess_acffit = sp.dot(amb_dict['WttMatrix'],acffit)
guess_acffit = guess_acffit*rcsfit/guess_acffit[0].real
spec_intermfit = scfft.fftshift(scfft.fft(guess_acffit,n=npts))
lines[1]= ax.plot(omeg*1e-3,spec_intermfit.real,label='Fitted Spectrum',linewidth=5)[0]
labels[1] = 'Fitted Spectrum'
if indisp:
# apply ambiguity function to spectrum
curin = specin[iloc,itime]
rcs = curin.real.sum()
(tau,acf) = spect2acf(omeg,curin)
guess_acf = sp.dot(amb_dict['WttMatrix'],acf)
guess_acf = guess_acf*rcs/guess_acf[0].real
# fit to spectrums
spec_interm = scfft.fftshift(scfft.fft(guess_acf,n=npts))
maxvec.append(spec_interm.real.max())
lines[0]= ax.plot(omeg*1e-3,spec_interm.real,label='Input',linewidth=5)[0]
labels[0] = 'Input Spectrum With Ambiguity Applied'
if acfdisp:
lines[2]=ax.plot(omeg*1e-3,specout[iloc,itime].real,label='Output',linewidth=5)[0]
labels[2] = 'Estimated Spectrum'
maxvec.append(specout[iloc,itime].real.max())
ax.set_xlabel('f in kHz')
ax.set_ylabel('Amp')
ax.set_title('Location {0}, Time {1}'.format(coords[iloc],times[itime]))
ax.set_ylim(0.0,max(maxvec)*1)
ax.set_xlim([-fs*5e-4,fs*5e-4])
imcount=imcount+1
figmplf.suptitle(suptitle, fontsize=20)
if None in labels:
labels.remove(None)
lines.remove(None)
plt.figlegend( lines, labels, loc = 'lower center', ncol=5, labelspacing=0. )
fname= filetemplate+'_{0:0>3}.png'.format(i_fig)
plt.savefig(fname)
plt.close(figmplf)
def plotacfs(coords,times,configfile,maindir,cartcoordsys = True, indisp=True,acfdisp= True,
fitdisp=True, filetemplate='acf',suptitle = 'ACF Comparison',invacf=''):
""" This will create a set of images that compare the input ISR acf to the
output ISR acfs from the simulator.
Inputs
coords - An Nx3 numpy array that holds the coordinates of the desired points.
times - A numpy list of times in seconds.
configfile - The name of the configuration file used.
cartcoordsys - (default True)A bool, if true then the coordinates are given in cartisian if
false then it is assumed that the coords are given in sphereical coordinates.
specsfilename - (default None) The name of the file holding the input spectrum.
acfname - (default None) The name of the file holding the estimated ACFs.
filetemplate (default 'spec') This is the beginning string used to save the images.
"""
# indisp = specsfilename is not None
# acfdisp = acfname is not None
maindir=Path(maindir).expanduser()
sns.set_style("whitegrid")
sns.set_context("notebook")
acfname = maindir.joinpath('ACF','00lags.h5')
ffit = maindir.joinpath('Fitted','fitteddata.h5')
specsfiledir = maindir.joinpath('Spectrums')
(sensdict,simparams) = readconfigfile(configfile)
simdtype = simparams['dtype']
npts = simparams['numpoints']*3.0
amb_dict = simparams['amb_dict']
if sp.ndim(coords)==1:
coords = coords[sp.newaxis,:]
Nt = len(times)
Nloc = coords.shape[0]
sns.set_style("whitegrid")
sns.set_context("notebook")
pulse = simparams['Pulse']
ts = sensdict['t_s']
tau1 = sp.arange(pulse.shape[-1])*ts
if indisp:
dirlist = [i.name for i in specsfiledir.glob('*.h5')]
timelist = sp.array([float(i.split()[0]) for i in dirlist])
for itn,itime in enumerate(times):
filear = sp.argwhere(timelist>=itime)
if len(filear)==0:
filenum = len(timelist)-1
else:
filenum = filear[0][0]
specsfilename = specsfiledir.joinpath(dirlist[filenum])
Ionoin = IonoContainer.readh5(str(specsfilename))
if itn==0:
specin = sp.zeros((Nloc,Nt,Ionoin.Param_List.shape[-1])).astype(Ionoin.Param_List.dtype)
omeg = Ionoin.Param_Names
npts = Ionoin.Param_List.shape[-1]
for icn, ic in enumerate(coords):
if cartcoordsys:
tempin = Ionoin.getclosest(ic,times)[0]
else:
tempin = Ionoin.getclosestsphere(ic,times)[0]
# if sp.ndim(tempin)==1:
# tempin = tempin[sp.newaxis,:]
specin[icn,itn] = tempin[0,:]
if acfdisp:
Ionoacf = IonoContainer.readh5(str(acfname))
ACFin = sp.zeros((Nloc,Nt,Ionoacf.Param_List.shape[-1])).astype(Ionoacf.Param_List.dtype)
omeg = sp.arange(-sp.ceil((npts+1)/2),sp.floor((npts+1)/2))/ts/npts
for icn, ic in enumerate(coords):
if cartcoordsys:
tempin = Ionoacf.getclosest(ic,times)[0]
else:
tempin = Ionoacf.getclosestsphere(ic,times)[0]
if sp.ndim(tempin)==1:
tempin = tempin[sp.newaxis,:]
ACFin[icn] = tempin
# Determine the inverse ACF stuff
if len(invacf)==0:
invacfbool = False
else:
invacfbool = True
invfile=maindir.joinpath('ACFInv','00lags'+invacf+'.h5')
Ionoacfinv=IonoContainer.readh5(str(invfile))
ACFinv = sp.zeros((Nloc,Nt,Ionoacfinv.Param_List.shape[-1])).astype(Ionoacfinv.Param_List.dtype)
for icn, ic in enumerate(coords):
if cartcoordsys:
tempin = Ionoacfinv.getclosest(ic,times)[0]
else:
tempin = Ionoacfinv.getclosestsphere(ic,times)[0]
if sp.ndim(tempin)==1:
tempin = tempin[sp.newaxis,:]
ACFinv[icn] = tempin
if fitdisp:
Ionofit = IonoContainer.readh5(str(ffit))
(omegfit,outspecsfit) = ISRspecmakeout(Ionofit.Param_List,sensdict['fc'],
sensdict['fs'], simparams['species'],
npts)
Ionofit.Param_List = outspecsfit
Ionofit.Param_Names = omegfit
specfit = sp.zeros((Nloc,Nt,npts))
for icn, ic in enumerate(coords):
if cartcoordsys:
tempin = Ionofit.getclosest(ic,times)[0]
else:
tempin = Ionofit.getclosestsphere(ic,times)[0]
if sp.ndim(tempin)==1:
tempin = tempin[sp.newaxis,:]
specfit[icn] = tempin/npts/npts
nfig = int(sp.ceil(Nt*Nloc/3.))
imcount = 0
for i_fig in range(nfig):
lines = [None]*4
labels = [None]*4
lines_im = [None]*4
labels_im = [None]*4
(figmplf, axmat) = plt.subplots(3, 2,figsize=(16, 12), facecolor='w')
for ax in axmat:
if imcount>=Nt*Nloc:
break
iloc = int(sp.floor(imcount/Nt))
itime = int(imcount-(iloc*Nt))
maxvec = []
minvec = []
if indisp:
# apply ambiguity funciton to spectrum
curin = specin[iloc,itime]
(tau,acf) = spect2acf(omeg,curin)
acf1 = scfft.ifftshift(acf)[:len(pulse)]*len(curin)
rcs = acf1[0].real
guess_acf = sp.dot(amb_dict['WttMatrix'],acf)
guess_acf = guess_acf*rcs/guess_acf[0].real
# fit to spectrums
maxvec.append(guess_acf.real.max())
maxvec.append(guess_acf.imag.max())
minvec.append(acf1.real.min())
minvec.append(acf1.imag.min())
lines[0]= ax[0].plot(tau1*1e6,guess_acf.real,label='Input',linewidth=5)[0]
labels[0] = 'Input ACF With Ambiguity Applied'
lines_im[0]= ax[1].plot(tau1*1e6,guess_acf.imag,label='Input',linewidth=5)[0]
labels_im[0] = 'Input ACF With Ambiguity Applied'
if fitdisp:
curinfit = specfit[iloc,itime]
(taufit,acffit) = spect2acf(omegfit,curinfit)
rcsfit=curinfit.sum()
guess_acffit = sp.dot(amb_dict['WttMatrix'],acffit)
guess_acffit = guess_acffit*rcsfit/guess_acffit[0].real
lines[1]= ax[0].plot(tau1*1e6,guess_acffit.real,label='Input',linewidth=5)[0]
labels[1] = 'Fitted ACF'
lines_im[1]= ax[1].plot(tau1*1e6,guess_acffit.imag,label='Input',linewidth=5)[0]
labels_im[1] = 'Fitted ACF'
if acfdisp:
lines[2]=ax[0].plot(tau1*1e6,ACFin[iloc,itime].real,label='Output',linewidth=5)[0]
labels[2] = 'Estimated ACF'
lines_im[2]=ax[1].plot(tau1*1e6,ACFin[iloc,itime].imag,label='Output',linewidth=5)[0]
labels_im[2] = 'Estimated ACF'
maxvec.append(ACFin[iloc,itime].real.max())
maxvec.append(ACFin[iloc,itime].imag.max())
minvec.append(ACFin[iloc,itime].real.min())
minvec.append(ACFin[iloc,itime].imag.min())
if invacfbool:
lines[3]=ax[0].plot(tau1*1e6,ACFinv[iloc,itime].real,label='Output',linewidth=5)[0]
labels[3] = 'Reconstructed ACF'
lines_im[3]=ax[1].plot(tau1*1e6,ACFinv[iloc,itime].imag,label='Output',linewidth=5)[0]
labels_im[3] = 'Reconstructed ACF'
ax[0].set_xlabel(r'$\tau$ in $\mu$s')
ax[0].set_ylabel('Amp')
ax[0].set_title('Real Part')# Location {0}, Time {1}'.format(coords[iloc],times[itime]))
ax[0].set_ylim(min(minvec),max(maxvec)*1)
ax[0].set_xlim([tau1.min()*1e6,tau1.max()*1e6])
ax[1].set_xlabel(r'$\tau$ in $\mu$s')
ax[1].set_ylabel('Amp')
ax[1].set_title('Imag Part')# Location {0}, Time {1}'.format(coords[iloc],times[itime]))
ax[1].set_ylim(min(minvec),max(maxvec)*1)
ax[1].set_xlim([tau1.min()*1e6,tau1.max()*1e6])
imcount=imcount+1
figmplf.suptitle(suptitle, fontsize=20)
if None in labels:
labels.remove(None)
lines.remove(None)
plt.figlegend( lines, labels, loc = 'lower center', ncol=5, labelspacing=0. )
fname= filetemplate+'_{0:0>3}.png'.format(i_fig)
plt.savefig(fname,dpi=300)
plt.close(figmplf)
def plotspecsgen(timeomeg,speclist,needtrans,specnames=None,filename='specs.png',n=None):
fig1 = plt.figure()
sns.set_style("whitegrid")
sns.set_context("notebook")
lines = []
if specnames is None:
specnames = ['Spec {0}'.format(i) for i in range(len(speclist))]
labels = specnames
xlims = [sp.Inf,-sp.Inf]
ylims = [sp.Inf,-sp.Inf]
for ispecn,ispec in enumerate(speclist):
if type(timeomeg)==list:
curbasis = timeomeg[ispecn]
else:
curbasis=timeomeg
if needtrans[ispecn]:
curbasis,ispec= acf2spect(curbasis,ispec,n=n)
lines.append(plt.plot(curbasis*1e-3, ispec.real, linewidth=5)[0])
xlims = [min(xlims[0], min(curbasis)*1e-3), max(xlims[1], max(curbasis)*1e-3)]
ylims = [min(ylims[0], min(ispec.real)), max(ylims[1], max(ispec.real))]
plt.xlabel('f in kHz')
plt.ylabel('Amp')
plt.title('Output Spectrums')
plt.xlim(xlims)
plt.ylim(ylims)
plt.legend(lines, labels)
plt.savefig(filename)
plt.close(fig1)
def analysisdump(maindir,configfile,suptitle=None, params = ['Ne','Nepow','Te','Ti','Vi']):
""" This function will perform all of the plotting functions in this module
given the main directory that all of the files live.
Inputs
maindir - The directory for the simulation.
configfile - The name of the configuration file used.
suptitle - The supertitle used on the files.
"""
maindir = Path(maindir)
plotdir = maindir.joinpath('AnalysisPlots')
if not plotdir.is_dir():
plotdir.mkdir()
#plot spectrums
filetemplate1 = str(maindir.joinpath('AnalysisPlots', 'Spec'))
filetemplate3 = str(maindir.joinpath('AnalysisPlots', 'ACF'))
filetemplate4 = str(maindir.joinpath('AnalysisPlots', 'AltvTime'))
(sensdict, simparams) = readconfigfile(configfile)
angles = simparams['angles']
ang_data = sp.array([[iout[0], iout[1]] for iout in angles])
if not sensdict['Name'].lower() in ['risr', 'pfisr']:
ang_data_temp = ang_data.copy()
beamlistlist = sp.array(simparams['outangles']).astype(int)
ang_data = sp.array([ang_data_temp[i].mean(axis=0) for i in beamlistlist])
zenang = ang_data[sp.argmax(ang_data[:, 1])]
rnggates = simparams['Rangegatesfinal']
rngchoices = sp.linspace(sp.amin(rnggates), sp.amax(rnggates), 4)
angtile = sp.tile(zenang, (len(rngchoices), 1))
coords = sp.column_stack((sp.transpose(rngchoices), angtile))
times = simparams['Timevec']
filetemplate2 = str(maindir.joinpath('AnalysisPlots', 'Params'))
if simparams['Pulsetype'].lower() == 'barker':
params = ['Ne']
if suptitle is None:
plotbeamparametersv2(times, configfile, maindir, params=params,
filetemplate=filetemplate2, werrors=True)
else:
plotbeamparametersv2(times, configfile, maindir, params=params,
filetemplate=filetemplate2, suptitle=suptitle,
werrors=True)
else:
if suptitle is None:
plotspecs(coords, times, configfile, maindir, cartcoordsys=False,
filetemplate=filetemplate1)
plotacfs(coords, times, configfile, maindir, cartcoordsys=False,
filetemplate=filetemplate3)
plotbeamparametersv2(times, configfile, maindir, params=params,
filetemplate=filetemplate2, werrors=True)
plotbeamparametersv2(times, configfile, maindir, params=params,
filetemplate=filetemplate2+'Noerrors', werrors=False)
beamvstime(configfile, maindir, params=params, filetemplate=filetemplate4)
else:
plotspecs(coords, times, configfile, maindir, cartcoordsys=False,
filetemplate=filetemplate1, suptitle=suptitle)
plotacfs(coords, times, configfile, maindir, cartcoordsys=False,
filetemplate=filetemplate3, suptitle=suptitle)
plotbeamparametersv2(times, configfile, maindir, params=params,
filetemplate=filetemplate2, suptitle=suptitle, werrors=True)
plotbeamparametersv2(times, configfile, maindir, params=params,
filetemplate=filetemplate2+'Noerrors', suptitle=suptitle,
werrors=False)
beamvstime(configfile, maindir, params=params, filetemplate=filetemplate4,
suptitle=suptitle)
| mit |
nivertech/peru | tests/test_test_shared.py | 4 | 1794 | import os
from pathlib import Path
import unittest
import shared
class SharedTestCodeTest(unittest.TestCase):
def test_create_dir(self):
empty_dir = shared.create_dir()
self.assertListEqual([], os.listdir(empty_dir))
content = {Path('foo'): 'a', Path('bar/baz'): 'b'}
content_dir = shared.create_dir(content)
# Don't use read_dir, because the read_dir test relies on create_dir.
actual_content = {}
for p in Path(content_dir).glob('**/*'):
if p.is_dir():
continue
with p.open() as f:
actual_content[p.relative_to(content_dir)] = f.read()
self.assertDictEqual(content, actual_content)
def test_read_dir(self):
content = {Path('foo'): 'a', Path('bar/baz'): 'b'}
test_dir = shared.create_dir(content)
read_content = shared.read_dir(test_dir)
self.assertDictEqual(content, read_content)
self.assertDictEqual({Path('foo'): 'a'},
shared.read_dir(test_dir, excludes=['bar']))
self.assertDictEqual({Path('foo'): 'a'},
shared.read_dir(test_dir, excludes=['bar/baz']))
def test_assert_contents(self):
content = {'foo': 'a', 'bar/baz': 'b'}
test_dir = shared.create_dir(content)
shared.assert_contents(test_dir, content)
shared.write_files(test_dir, {'bing': 'c'})
with self.assertRaises(AssertionError):
shared.assert_contents(test_dir, content)
shared.assert_contents(test_dir, content, excludes=['bing'])
try:
shared.assert_contents(test_dir, content, excludes=['foo'])
except AssertionError as e:
assert e.args[0].startswith('EXPECTED FILES WERE EXCLUDED')
| mit |
lisael/pg-django | django/contrib/localflavor/ca/forms.py | 86 | 5019 | """
Canada-specific Form helpers
"""
from __future__ import absolute_import
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, CharField, Select
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
phone_digits_re = re.compile(r'^(?:1-?)?(\d{3})[-\.]?(\d{3})[-\.]?(\d{4})$')
sin_re = re.compile(r"^(\d{3})-(\d{3})-(\d{3})$")
class CAPostalCodeField(CharField):
"""
Canadian postal code field.
Validates against known invalid characters: D, F, I, O, Q, U
Additionally the first character cannot be Z or W.
For more info see:
http://www.canadapost.ca/tools/pg/manual/PGaddress-e.asp#1402170
"""
default_error_messages = {
'invalid': _(u'Enter a postal code in the format XXX XXX.'),
}
postcode_regex = re.compile(r'^([ABCEGHJKLMNPRSTVXY]\d[ABCEGHJKLMNPRSTVWXYZ]) *(\d[ABCEGHJKLMNPRSTVWXYZ]\d)$')
def clean(self, value):
value = super(CAPostalCodeField, self).clean(value)
if value in EMPTY_VALUES:
return u''
postcode = value.upper().strip()
m = self.postcode_regex.match(postcode)
if not m:
raise ValidationError(self.default_error_messages['invalid'])
return "%s %s" % (m.group(1), m.group(2))
class CAPhoneNumberField(Field):
"""Canadian phone number field."""
default_error_messages = {
'invalid': u'Phone numbers must be in XXX-XXX-XXXX format.',
}
def clean(self, value):
"""Validate a phone number.
"""
super(CAPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub('(\(|\)|\s+)', '', smart_unicode(value))
m = phone_digits_re.search(value)
if m:
return u'%s-%s-%s' % (m.group(1), m.group(2), m.group(3))
raise ValidationError(self.error_messages['invalid'])
class CAProvinceField(Field):
"""
A form field that validates its input is a Canadian province name or abbreviation.
It normalizes the input to the standard two-leter postal service
abbreviation for the given province.
"""
default_error_messages = {
'invalid': u'Enter a Canadian province or territory.',
}
def clean(self, value):
super(CAProvinceField, self).clean(value)
if value in EMPTY_VALUES:
return u''
try:
value = value.strip().lower()
except AttributeError:
pass
else:
# Load data in memory only when it is required, see also #17275
from django.contrib.localflavor.ca.ca_provinces import PROVINCES_NORMALIZED
try:
return PROVINCES_NORMALIZED[value.strip().lower()].decode('ascii')
except KeyError:
pass
raise ValidationError(self.error_messages['invalid'])
class CAProvinceSelect(Select):
"""
A Select widget that uses a list of Canadian provinces and
territories as its choices.
"""
def __init__(self, attrs=None):
# Load data in memory only when it is required, see also #17275
from django.contrib.localflavor.ca.ca_provinces import PROVINCE_CHOICES
super(CAProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class CASocialInsuranceNumberField(Field):
"""
A Canadian Social Insurance Number (SIN).
Checks the following rules to determine whether the number is valid:
* Conforms to the XXX-XXX-XXX format.
* Passes the check digit process "Luhn Algorithm"
See: http://en.wikipedia.org/wiki/Social_Insurance_Number
"""
default_error_messages = {
'invalid': _('Enter a valid Canadian Social Insurance number in XXX-XXX-XXX format.'),
}
def clean(self, value):
super(CASocialInsuranceNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
match = re.match(sin_re, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
number = u'%s-%s-%s' % (match.group(1), match.group(2), match.group(3))
check_number = u'%s%s%s' % (match.group(1), match.group(2), match.group(3))
if not self.luhn_checksum_is_valid(check_number):
raise ValidationError(self.error_messages['invalid'])
return number
def luhn_checksum_is_valid(self, number):
"""
Checks to make sure that the SIN passes a luhn mod-10 checksum
See: http://en.wikipedia.org/wiki/Luhn_algorithm
"""
sum = 0
num_digits = len(number)
oddeven = num_digits & 1
for count in range(0, num_digits):
digit = int(number[count])
if not (( count & 1 ) ^ oddeven ):
digit = digit * 2
if digit > 9:
digit = digit - 9
sum = sum + digit
return ( (sum % 10) == 0 )
| bsd-3-clause |
ShawnMurd/MetPy | src/metpy/interpolate/slices.py | 1 | 6857 | # Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Tools for interpolating to a vertical slice/cross section through data."""
import numpy as np
import xarray as xr
from ..package_tools import Exporter
from ..units import units
from ..xarray import check_axis
exporter = Exporter(globals())
@exporter.export
def interpolate_to_slice(data, points, interp_type='linear'):
r"""Obtain an interpolated slice through data using xarray.
Utilizing the interpolation functionality in `xarray`, this function takes a slice the
given data (currently only regular grids are supported), which is given as an
`xarray.DataArray` so that we can utilize its coordinate metadata.
Parameters
----------
data: `xarray.DataArray` or `xarray.Dataset`
Three- (or higher) dimensional field(s) to interpolate. The DataArray (or each
DataArray in the Dataset) must have been parsed by MetPy and include both an x and
y coordinate dimension.
points: (N, 2) array_like
A list of x, y points in the data projection at which to interpolate the data
interp_type: str, optional
The interpolation method, either 'linear' or 'nearest' (see
`xarray.DataArray.interp()` for details). Defaults to 'linear'.
Returns
-------
`xarray.DataArray` or `xarray.Dataset`
The interpolated slice of data, with new index dimension of size N.
See Also
--------
cross_section
"""
try:
x, y = data.metpy.coordinates('x', 'y')
except AttributeError:
raise ValueError('Required coordinate information not available. Verify that '
'your data has been parsed by MetPy with proper x and y '
'dimension coordinates.')
data_sliced = data.interp({
x.name: xr.DataArray(points[:, 0], dims='index', attrs=x.attrs),
y.name: xr.DataArray(points[:, 1], dims='index', attrs=y.attrs)
}, method=interp_type)
data_sliced.coords['index'] = range(len(points))
# Bug in xarray: interp strips units
if (
isinstance(data.data, units.Quantity)
and not isinstance(data_sliced.data, units.Quantity)
):
data_sliced.data = units.Quantity(data_sliced.data, data.data.units)
return data_sliced
@exporter.export
def geodesic(crs, start, end, steps):
r"""Construct a geodesic path between two points.
This function acts as a wrapper for the geodesic construction available in `pyproj`.
Parameters
----------
crs: `cartopy.crs`
Cartopy Coordinate Reference System to use for the output
start: (2, ) array_like
A latitude-longitude pair designating the start point of the geodesic (units are
degrees north and degrees east).
end: (2, ) array_like
A latitude-longitude pair designating the end point of the geodesic (units are degrees
north and degrees east).
steps: int, optional
The number of points along the geodesic between the start and the end point
(including the end points).
Returns
-------
`numpy.ndarray`
The list of x, y points in the given CRS of length `steps` along the geodesic.
See Also
--------
cross_section
"""
import cartopy.crs as ccrs
from pyproj import Geod
# Geod.npts only gives points *in between* the start and end, and we want to include
# the endpoints.
g = Geod(crs.proj4_init)
geodesic = np.concatenate([
np.array(start[::-1])[None],
np.array(g.npts(start[1], start[0], end[1], end[0], steps - 2)),
np.array(end[::-1])[None]
]).transpose()
points = crs.transform_points(ccrs.Geodetic(), *geodesic)[:, :2]
return points
@exporter.export
def cross_section(data, start, end, steps=100, interp_type='linear'):
r"""Obtain an interpolated cross-sectional slice through gridded data.
Utilizing the interpolation functionality in `xarray`, this function takes a vertical
cross-sectional slice along a geodesic through the given data on a regular grid, which is
given as an `xarray.DataArray` so that we can utilize its coordinate and projection
metadata.
Parameters
----------
data: `xarray.DataArray` or `xarray.Dataset`
Three- (or higher) dimensional field(s) to interpolate. The DataArray (or each
DataArray in the Dataset) must have been parsed by MetPy and include both an x and
y coordinate dimension and the added `crs` coordinate.
start: (2, ) array_like
A latitude-longitude pair designating the start point of the cross section (units are
degrees north and degrees east).
end: (2, ) array_like
A latitude-longitude pair designating the end point of the cross section (units are
degrees north and degrees east).
steps: int, optional
The number of points along the geodesic between the start and the end point
(including the end points) to use in the cross section. Defaults to 100.
interp_type: str, optional
The interpolation method, either 'linear' or 'nearest' (see
`xarray.DataArray.interp()` for details). Defaults to 'linear'.
Returns
-------
`xarray.DataArray` or `xarray.Dataset`
The interpolated cross section, with new index dimension along the cross-section.
See Also
--------
interpolate_to_slice, geodesic
"""
if isinstance(data, xr.Dataset):
# Recursively apply to dataset
return data.map(cross_section, True, (start, end), steps=steps,
interp_type=interp_type)
elif data.ndim == 0:
# This has no dimensions, so it is likely a projection variable. In any case, there
# are no data here to take the cross section with. Therefore, do nothing.
return data
else:
# Get the projection and coordinates
try:
crs_data = data.metpy.cartopy_crs
x = data.metpy.x
except AttributeError:
raise ValueError('Data missing required coordinate information. Verify that '
'your data have been parsed by MetPy with proper x and y '
'dimension coordinates and added crs coordinate of the '
'correct projection for each variable.')
# Get the geodesic
points_cross = geodesic(crs_data, start, end, steps)
# Patch points_cross to match given longitude range, whether [0, 360) or (-180, 180]
if check_axis(x, 'longitude') and (x > 180).any():
points_cross[points_cross[:, 0] < 0, 0] += 360.
# Return the interpolated data
return interpolate_to_slice(data, points_cross, interp_type=interp_type)
| bsd-3-clause |
cc272309126/panda3d | panda/src/testbed/test_native_net3.py | 8 | 1087 | from libpandaexpress import BufferedDatagramConnection
from libpandaexpress import SocketAddress
from libpandaexpress import SocketIP
from libpandaexpress import SocketTCP
from libpandaexpress import SocketTCPListen
from libpandaexpress import SocketUDPOutgoing
from libpandaexpress import SocketUDPIncoming
from libpandaexpress import Datagram
import time
SocketIP.InitNetworkDriver();
addr = SocketAddress()
addr.setHost("127.0.0.1",6666)
print addr.getIpPort()
MyConection = BufferedDatagramConnection(0,4096000,4096000,102400);
#help(BufferedDatagramConnection)
MyConection.AddAddress(addr)
dg = Datagram();
dg.addUint8(1)
dg.addUint64(4001)
dg.addUint16(2001)
dg.addUint64(123456)
MyConection.SendMessage(dg);
dg1 = Datagram();
dg1.addUint8(1)
dg1.addUint64(123456)
dg1.addUint64(12340)
dg1.addUint16(1000)
dg1.addUint16(54321)
while 1==1:
MyConection.SendMessage(dg);
##for x in range(120000):
while MyConection.GetMessage():
None
MyConection.Flush();
time.sleep(1)
print "loop" | bsd-3-clause |
stacywsmith/ansible | lib/ansible/modules/notification/jabber.py | 32 | 4800 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
version_added: "1.2"
module: jabber
short_description: Send a message to jabber user or chat room
description:
- Send a message to jabber
options:
user:
description:
- User as which to connect
required: true
password:
description:
- password for user to connect
required: true
to:
description:
- user ID or name of the room, when using room use a slash to indicate your nick.
required: true
msg:
description:
- The message body.
required: true
default: null
host:
description:
- host to connect, overrides user info
required: false
port:
description:
- port to connect to, overrides default
required: false
default: 5222
encoding:
description:
- message encoding
required: false
# informational: requirements for nodes
requirements:
- python xmpp (xmpppy)
author: "Brian Coca (@bcoca)"
'''
EXAMPLES = '''
# send a message to a user
- jabber:
user: mybot@example.net
password: secret
to: friend@example.net
msg: Ansible task finished
# send a message to a room
- jabber:
user: mybot@example.net
password: secret
to: mychaps@conference.example.net/ansiblebot
msg: Ansible task finished
# send a message, specifying the host and port
- jabber:
user: mybot@example.net
host: talk.example.net
port: 5223
password: secret
to: mychaps@example.net
msg: Ansible task finished
'''
import os
import re
import time
HAS_XMPP = True
try:
import xmpp
except ImportError:
HAS_XMPP = False
def main():
module = AnsibleModule(
argument_spec=dict(
user=dict(required=True),
password=dict(required=True, no_log=True),
to=dict(required=True),
msg=dict(required=True),
host=dict(required=False),
port=dict(required=False,default=5222),
encoding=dict(required=False),
),
supports_check_mode=True
)
if not HAS_XMPP:
module.fail_json(msg="The required python xmpp library (xmpppy) is not installed")
jid = xmpp.JID(module.params['user'])
user = jid.getNode()
server = jid.getDomain()
port = module.params['port']
password = module.params['password']
try:
to, nick = module.params['to'].split('/', 1)
except ValueError:
to, nick = module.params['to'], None
if module.params['host']:
host = module.params['host']
else:
host = server
if module.params['encoding']:
xmpp.simplexml.ENCODING = params['encoding']
msg = xmpp.protocol.Message(body=module.params['msg'])
try:
conn=xmpp.Client(server, debug=[])
if not conn.connect(server=(host,port)):
module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server))
if not conn.auth(user,password,'Ansible'):
module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user,server))
# some old servers require this, also the sleep following send
conn.sendInitPresence(requestRoster=0)
if nick: # sending to room instead of user, need to join
msg.setType('groupchat')
msg.setTag('x', namespace='http://jabber.org/protocol/muc#user')
conn.send(xmpp.Presence(to=module.params['to']))
time.sleep(1)
else:
msg.setType('chat')
msg.setTo(to)
if not module.check_mode:
conn.send(msg)
time.sleep(1)
conn.disconnect()
except Exception:
e = get_exception()
module.fail_json(msg="unable to send msg: %s" % e)
module.exit_json(changed=False, to=to, user=user, msg=msg.getBody())
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__':
main()
| gpl-3.0 |
Charleo85/SIS-Rebuild | models/api/views_model.py | 1 | 11071 | from django.http import JsonResponse, HttpResponse
from django.contrib.auth import hashers
from django.forms.models import model_to_dict
from django.shortcuts import render
from django.core.exceptions import ObjectDoesNotExist
from .models import *
from .forms import *
def _success(data_dict, model_name, code):
correct = { 'status_code' : code, model_name : data_dict }
return JsonResponse(correct)
def _failure(code, error_msg=''):
if error_msg == '':
error = { 'status_code' : code }
else:
error = { 'status_code' : code, 'error_message' : error_msg }
return JsonResponse(error)
def index(request):
return HttpResponse("Success!")
def course_detail(request, sisid):
try:
target_course = Course.objects.get(id=sisid)
except ObjectDoesNotExist:
return _failure(404)
if request.method == 'GET':
data = model_to_dict(target_course)
data['current_enrolled'] = len(target_course.student_set.all())
# This str function doesn't always work consistently!!
data['instructor'] = data['instructor'].__str__()
return _success(data, 'course', 200)
elif request.method == 'POST':
if request.POST.get('id') == sisid:
form = CourseForm(request.POST, instance=target_course)
if form.is_valid():
form.save()
data = form.cleaned_data
data['current_enrolled'] = len(target_course.student_set.all())
data['instructor'] = data['instructor'].__str__()
return _success(data, 'course', 201)
return _failure(400)
def course_create(request):
if request.method == 'POST':
exist = True
try:
target_course = Course.objects.get(id=request.POST.get('id'))
except (ObjectDoesNotExist, ValueError):
exist = False
status_code = 401
if not exist:
form = CourseForm(request.POST)
status_code = 402
if form.is_valid():
form.save()
data = form.cleaned_data
#This str function doesn't always work consistently!!
data['instructor'] = data['instructor'].__str__()
data['current_enrolled'] = 0
return _success(data, 'course', 201)
return _failure(status_code)
def course_delete(request):
if request.method == 'POST':
exist = True
try:
target_course = Course.objects.get(id=request.POST.get('id'))
except (ObjectDoesNotExist, ValueError):
exist = False
if exist:
Enrollment.objects.filter(course=target_course).delete()
Course.objects.filter(id=request.POST.get('id')).delete()
return JsonResponse({ 'status_code': 202 })
return _failure(400)
def course_all(request):
if request.method == 'GET':
courses = Course.objects.all()
course_list = []
for course in courses:
data = model_to_dict(course)
data['current_enrolled'] = len(course.student_set.all())
course_list.append(data)
return _success(course_list, 'all_courses', 200)
return _failure(400)
def instructor_detail(request, compid):
try:
ins = Instructor.objects.get(id=compid)
except ObjectDoesNotExist:
return _failure(404)
if request.method == 'GET':
data = model_to_dict(ins)
teaching_courses = []
teaching = ins.course_set.all()
for course in teaching:
teaching_courses.append(course.__str__())
data['teaching_courses'] = teaching_courses
data.pop('username', None)
data.pop('password', None)
return _success(data, 'instructor', 200)
elif request.method == 'POST':
if request.POST.get('id') == compid:
form = InstructorForm(request.POST, instance=ins)
if form.is_valid():
form.save()
data = form.cleaned_data
data.pop('username', None)
data.pop('password', None)
return _success(data, 'instructor', 202)
return _failure(400)
def instructor_create(request):
if request.method == 'POST':
exist = True
try:
ins = Instructor.objects.get(id=request.POST.get('id'))
except ObjectDoesNotExist:
exist = False
if exist:
return _failure(400, 'instructor already exists')
else:
form = InstructorForm(request.POST)
if form.is_valid():
form.save()
data = form.cleaned_data
data.pop('username', None)
data.pop('password', None)
return _success(data, 'instructor', 201)
else:
return _failure(400, 'invalid input(s)')
return _failure(400, 'incorrect request type')
def instructor_delete(request):
if request.method == 'POST':
exist = True
try:
ins = Instructor.objects.get(id=request.POST.get('id'))
except (ObjectDoesNotExist, ValueError):
exist = False
if exist:
courses = Course.objects.filter(instructor=ins)
for course in courses:
Enrollment.objects.filter(course=course).delete()
courses.delete()
Instructor.objects.filter(id=request.POST.get('id')).delete()
return JsonResponse({ 'status_code': 202 })
return _failure(400)
def instructor_all(request):
if request.method == 'GET':
instructors = Instructor.objects.all()
instructor_list = []
for ins in instructors:
data = model_to_dict(ins)
data.pop('username', None)
data.pop('password', None)
instructor_list.append(data)
return _success(instructor_list, 'all_instructors', 200)
return _failure(400)
def student_detail(request, compid):
try:
stud = Student.objects.get(id=compid)
except ObjectDoesNotExist:
return _failure(404)
if request.method == 'GET':
data = model_to_dict(stud)
data.pop('username', None)
data.pop('password', None)
return _success(data, 'student', 200)
elif request.method == 'POST':
if request.POST.get('id') == compid:
form = StudentForm(request.POST, instance=stud)
if form.is_valid():
form.save()
data = form.cleaned_data
data.pop('username', None)
data.pop('password', None)
return _success(data, 'student', 201)
return _failure(400)
def student_create(request):
if request.method == 'POST':
exist = True
try:
stud = Student.objects.get(id=request.POST.get('id'))
except ObjectDoesNotExist:
exist = False
if exist:
return _failure(400, 'student already exists')
else:
form = StudentForm(request.POST)
if form.is_valid():
form.save()
data = form.cleaned_data
data.pop('username', None)
data.pop('password', None)
return _success(data, 'student', 201)
else:
return _failure(400, 'invalid input(s)')
return _failure(400, 'incorrect request type')
def student_delete(request):
if request.method == 'POST':
exist = True
try:
stud = Student.objects.get(id=request.POST.get('id'))
except (ObjectDoesNotExist, ValueError):
exist = False
if exist:
Enrollment.objects.filter(student=stud).delete()
Student.objects.filter(id=request.POST.get('id')).delete()
return JsonResponse({ 'status_code': 202 })
return _failure(400)
def student_all(request):
if request.method == 'GET':
students = Student.objects.all()
student_list = []
for stud in students:
data = model_to_dict(stud)
data.pop('username', None)
data.pop('password', None)
student_list.append(data)
return _success(student_list, 'all_students', 200)
return _failure(400)
def enrollment_detail(request, enrid):
try:
enroll = Enrollment.objects.get(id=enrid)
except ObjectDoesNotExist:
return _failure(404)
if request.method == 'GET':
data = model_to_dict(enroll)
data['enroll_status'] = enroll.get_enroll_status_display()
return _success(data, 'enrollment', 200)
elif request.method == 'POST':
credential1 = (request.POST.get('student') == enroll.student.id)
credential2 = (request.POST.get('course') == enroll.course.id)
if credential1 and credential2:
form = EnrollmentForm(request.POST, instance=enroll)
if form.is_valid():
form.save()
enroll = Enrollment.objects.get(
student=request.POST.get('student'),
course=request.POST.get('course'),
)
data = model_to_dict(enroll)
data['enroll_status'] = enroll.get_enroll_status_display()
return _success(data, 'enrollment', 202)
return _failure(400)
def enrollment_create(request):
if request.method == 'POST':
exist = True
try:
enroll = Enrollment.objects.get(
student=request.POST.get('student'),
course=request.POST.get('course'),
)
except ObjectDoesNotExist:
exist = False
if not exist:
form = EnrollmentForm(request.POST)
if form.is_valid():
form.save()
enroll = Enrollment.objects.get(
student=request.POST.get('student'),
course=request.POST.get('course'),
)
data = model_to_dict(enroll)
data['enroll_status'] = enroll.get_enroll_status_display()
return _success(data, 'enrollment', 201)
return _failure(400)
def enrollment_delete(request):
if request.method == 'POST':
exist = True
try:
enr = Enrollment.objects.get(id=request.POST.get('id'))
except (ObjectDoesNotExist, ValueError):
exist = False
if exist:
Enrollment.objects.filter(id=request.POST.get('id')).delete()
return JsonResponse({ 'status_code': 202 })
return _failure(400)
def enrollment_all(request):
if request.method == 'GET':
enrollments = Enrollment.objects.all()
enrollment_list = []
for enr in enrollments:
data = model_to_dict(enr)
data['enroll_status'] = enr.get_enroll_status_display()
enrollment_list.append(data)
return _success(enrollment_list, 'all_enrollments', 200)
return _failure(400)
| bsd-3-clause |
mayk93/python_koans | python2/koans/about_with_statements.py | 69 | 3407 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutSandwichCode in the Ruby Koans
#
from runner.koan import *
import re # For regular expression string comparisons
class AboutWithStatements(Koan):
def count_lines(self, file_name):
try:
f = open(file_name)
try:
return len(f.readlines())
finally:
f.close()
except IOError:
# should never happen
self.fail()
def test_counting_lines(self):
self.assertEqual(__, self.count_lines("example_file.txt"))
# ------------------------------------------------------------------
def find_line(self, file_name):
try:
f = open(file_name)
try:
for line in f.readlines():
match = re.search('e', line)
if match:
return line
finally:
f.close()
except IOError:
# should never happen
self.fail()
def test_finding_lines(self):
self.assertEqual(__, self.find_line("example_file.txt"))
## ------------------------------------------------------------------
## THINK ABOUT IT:
##
## The count_lines and find_line are similar, and yet different.
## They both follow the pattern of "sandwich code".
##
## Sandwich code is code that comes in three parts: (1) the top slice
## of bread, (2) the meat, and (3) the bottom slice of bread.
## The bread part of the sandwich almost always goes together, but
## the meat part changes all the time.
##
## Because the changing part of the sandwich code is in the middle,
## abstracting the top and bottom bread slices to a library can be
## difficult in many languages.
##
## (Aside for C++ programmers: The idiom of capturing allocated
## pointers in a smart pointer constructor is an attempt to deal with
## the problem of sandwich code for resource allocation.)
##
## Python solves the problem using Context Managers. Consider the
## following code:
##
class FileContextManager():
def __init__(self, file_name):
self._file_name = file_name
self._file = None
def __enter__(self):
self._file = open(self._file_name)
return self._file
def __exit__(self, cls, value, tb):
self._file.close()
# Now we write:
def count_lines2(self, file_name):
with self.FileContextManager(file_name) as f:
return len(f.readlines())
def test_counting_lines2(self):
self.assertEqual(__, self.count_lines2("example_file.txt"))
# ------------------------------------------------------------------
def find_line2(self, file_name):
# Rewrite find_line using the Context Manager.
pass
def test_finding_lines2(self):
self.assertEqual(__, self.find_line2("example_file.txt"))
self.assertNotEqual(None, self.find_line2("example_file.txt"))
# ------------------------------------------------------------------
def count_lines3(self, file_name):
with open(file_name) as f:
return len(f.readlines())
def test_open_already_has_its_own_built_in_context_manager(self):
self.assertEqual(__, self.count_lines3("example_file.txt"))
| mit |
havard024/prego | venv/lib/python2.7/site-packages/unidecode/x07f.py | 252 | 4664 | data = (
'Zhui ', # 0x00
'Zi ', # 0x01
'Ke ', # 0x02
'Xiang ', # 0x03
'Jian ', # 0x04
'Mian ', # 0x05
'Lan ', # 0x06
'Ti ', # 0x07
'Miao ', # 0x08
'Qi ', # 0x09
'Yun ', # 0x0a
'Hui ', # 0x0b
'Si ', # 0x0c
'Duo ', # 0x0d
'Duan ', # 0x0e
'Bian ', # 0x0f
'Xian ', # 0x10
'Gou ', # 0x11
'Zhui ', # 0x12
'Huan ', # 0x13
'Di ', # 0x14
'Lu ', # 0x15
'Bian ', # 0x16
'Min ', # 0x17
'Yuan ', # 0x18
'Jin ', # 0x19
'Fu ', # 0x1a
'Ru ', # 0x1b
'Zhen ', # 0x1c
'Feng ', # 0x1d
'Shuai ', # 0x1e
'Gao ', # 0x1f
'Chan ', # 0x20
'Li ', # 0x21
'Yi ', # 0x22
'Jian ', # 0x23
'Bin ', # 0x24
'Piao ', # 0x25
'Man ', # 0x26
'Lei ', # 0x27
'Ying ', # 0x28
'Suo ', # 0x29
'Mou ', # 0x2a
'Sao ', # 0x2b
'Xie ', # 0x2c
'Liao ', # 0x2d
'Shan ', # 0x2e
'Zeng ', # 0x2f
'Jiang ', # 0x30
'Qian ', # 0x31
'Zao ', # 0x32
'Huan ', # 0x33
'Jiao ', # 0x34
'Zuan ', # 0x35
'Fou ', # 0x36
'Xie ', # 0x37
'Gang ', # 0x38
'Fou ', # 0x39
'Que ', # 0x3a
'Fou ', # 0x3b
'Kaakeru ', # 0x3c
'Bo ', # 0x3d
'Ping ', # 0x3e
'Hou ', # 0x3f
'[?] ', # 0x40
'Gang ', # 0x41
'Ying ', # 0x42
'Ying ', # 0x43
'Qing ', # 0x44
'Xia ', # 0x45
'Guan ', # 0x46
'Zun ', # 0x47
'Tan ', # 0x48
'Chang ', # 0x49
'Qi ', # 0x4a
'Weng ', # 0x4b
'Ying ', # 0x4c
'Lei ', # 0x4d
'Tan ', # 0x4e
'Lu ', # 0x4f
'Guan ', # 0x50
'Wang ', # 0x51
'Wang ', # 0x52
'Gang ', # 0x53
'Wang ', # 0x54
'Han ', # 0x55
'[?] ', # 0x56
'Luo ', # 0x57
'Fu ', # 0x58
'Mi ', # 0x59
'Fa ', # 0x5a
'Gu ', # 0x5b
'Zhu ', # 0x5c
'Ju ', # 0x5d
'Mao ', # 0x5e
'Gu ', # 0x5f
'Min ', # 0x60
'Gang ', # 0x61
'Ba ', # 0x62
'Gua ', # 0x63
'Ti ', # 0x64
'Juan ', # 0x65
'Fu ', # 0x66
'Lin ', # 0x67
'Yan ', # 0x68
'Zhao ', # 0x69
'Zui ', # 0x6a
'Gua ', # 0x6b
'Zhuo ', # 0x6c
'Yu ', # 0x6d
'Zhi ', # 0x6e
'An ', # 0x6f
'Fa ', # 0x70
'Nan ', # 0x71
'Shu ', # 0x72
'Si ', # 0x73
'Pi ', # 0x74
'Ma ', # 0x75
'Liu ', # 0x76
'Ba ', # 0x77
'Fa ', # 0x78
'Li ', # 0x79
'Chao ', # 0x7a
'Wei ', # 0x7b
'Bi ', # 0x7c
'Ji ', # 0x7d
'Zeng ', # 0x7e
'Tong ', # 0x7f
'Liu ', # 0x80
'Ji ', # 0x81
'Juan ', # 0x82
'Mi ', # 0x83
'Zhao ', # 0x84
'Luo ', # 0x85
'Pi ', # 0x86
'Ji ', # 0x87
'Ji ', # 0x88
'Luan ', # 0x89
'Yang ', # 0x8a
'Mie ', # 0x8b
'Qiang ', # 0x8c
'Ta ', # 0x8d
'Mei ', # 0x8e
'Yang ', # 0x8f
'You ', # 0x90
'You ', # 0x91
'Fen ', # 0x92
'Ba ', # 0x93
'Gao ', # 0x94
'Yang ', # 0x95
'Gu ', # 0x96
'Qiang ', # 0x97
'Zang ', # 0x98
'Gao ', # 0x99
'Ling ', # 0x9a
'Yi ', # 0x9b
'Zhu ', # 0x9c
'Di ', # 0x9d
'Xiu ', # 0x9e
'Qian ', # 0x9f
'Yi ', # 0xa0
'Xian ', # 0xa1
'Rong ', # 0xa2
'Qun ', # 0xa3
'Qun ', # 0xa4
'Qian ', # 0xa5
'Huan ', # 0xa6
'Zui ', # 0xa7
'Xian ', # 0xa8
'Yi ', # 0xa9
'Yashinau ', # 0xaa
'Qiang ', # 0xab
'Xian ', # 0xac
'Yu ', # 0xad
'Geng ', # 0xae
'Jie ', # 0xaf
'Tang ', # 0xb0
'Yuan ', # 0xb1
'Xi ', # 0xb2
'Fan ', # 0xb3
'Shan ', # 0xb4
'Fen ', # 0xb5
'Shan ', # 0xb6
'Lian ', # 0xb7
'Lei ', # 0xb8
'Geng ', # 0xb9
'Nou ', # 0xba
'Qiang ', # 0xbb
'Chan ', # 0xbc
'Yu ', # 0xbd
'Gong ', # 0xbe
'Yi ', # 0xbf
'Chong ', # 0xc0
'Weng ', # 0xc1
'Fen ', # 0xc2
'Hong ', # 0xc3
'Chi ', # 0xc4
'Chi ', # 0xc5
'Cui ', # 0xc6
'Fu ', # 0xc7
'Xia ', # 0xc8
'Pen ', # 0xc9
'Yi ', # 0xca
'La ', # 0xcb
'Yi ', # 0xcc
'Pi ', # 0xcd
'Ling ', # 0xce
'Liu ', # 0xcf
'Zhi ', # 0xd0
'Qu ', # 0xd1
'Xi ', # 0xd2
'Xie ', # 0xd3
'Xiang ', # 0xd4
'Xi ', # 0xd5
'Xi ', # 0xd6
'Qi ', # 0xd7
'Qiao ', # 0xd8
'Hui ', # 0xd9
'Hui ', # 0xda
'Xiao ', # 0xdb
'Se ', # 0xdc
'Hong ', # 0xdd
'Jiang ', # 0xde
'Di ', # 0xdf
'Cui ', # 0xe0
'Fei ', # 0xe1
'Tao ', # 0xe2
'Sha ', # 0xe3
'Chi ', # 0xe4
'Zhu ', # 0xe5
'Jian ', # 0xe6
'Xuan ', # 0xe7
'Shi ', # 0xe8
'Pian ', # 0xe9
'Zong ', # 0xea
'Wan ', # 0xeb
'Hui ', # 0xec
'Hou ', # 0xed
'He ', # 0xee
'He ', # 0xef
'Han ', # 0xf0
'Ao ', # 0xf1
'Piao ', # 0xf2
'Yi ', # 0xf3
'Lian ', # 0xf4
'Qu ', # 0xf5
'[?] ', # 0xf6
'Lin ', # 0xf7
'Pen ', # 0xf8
'Qiao ', # 0xf9
'Ao ', # 0xfa
'Fan ', # 0xfb
'Yi ', # 0xfc
'Hui ', # 0xfd
'Xuan ', # 0xfe
'Dao ', # 0xff
)
| mit |
emamd/django-calaccess-raw-data | example/toolbox/management/commands/samplecalaccessrawdata.py | 30 | 2294 | import os
import shutil
from itertools import chain
from optparse import make_option
from clint.textui import progress
from subsample.file_input import FileInput
from subsample.algorithms import two_pass_sample
from calaccess_raw.management.commands import CalAccessCommand
from calaccess_raw import get_download_directory, get_test_download_directory
custom_options = (
make_option(
"--sample-rows",
action="store",
dest="samplerows",
default=1000,
help="Number of rows to grab from each table"
),
)
class Command(CalAccessCommand):
help = 'Create smaller sampled TSV files for unit tests'
option_list = CalAccessCommand.option_list + custom_options
def set_config(self, *args, **options):
self.data_dir = get_download_directory()
self.test_data_dir = get_test_download_directory()
self.tsv_dir = os.path.join(self.data_dir, "tsv/")
self.sample_dir = os.path.join(self.test_data_dir, "tsv/")
self.sample_rows = int(options['samplerows'])
self.tsv_list = os.listdir(self.tsv_dir)
self.verbosity = int(options['verbosity'])
def handle(self, *args, **options):
self.set_config(*args, **options)
self.header("Sampling %i rows from %s source files" % (
self.sample_rows,
len(self.tsv_list),
))
# Make sure sample dir exists and is empty
os.path.exists(self.test_data_dir) or os.mkdir(self.test_data_dir)
os.path.exists(self.sample_dir) and shutil.rmtree(self.sample_dir)
os.mkdir(self.sample_dir)
# Loop through all the files in the source directory
for name in progress.bar(self.tsv_list):
# Find the input
file = os.path.join(self.tsv_dir, name)
out_file = os.path.join(self.sample_dir, name)
if self.verbosity > 2:
self.log(" Sampling %s" % file)
# Open the file
fi = FileInput(file, True)
# Generate our sample
sample = two_pass_sample(fi, sample_size=self.sample_rows)
# Open our output file
out = open(out_file, 'wb')
# Write it out
for line in chain(fi.header, sample):
out.write(line)
| mit |
dagwieers/ansible | lib/ansible/modules/cloud/hcloud/hcloud_datacenter_facts.py | 4 | 4230 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Hetzner Cloud GmbH <info@hetzner-cloud.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: hcloud_datacenter_facts
short_description: Gather facts about the Hetzner Cloud datacenters.
version_added: "2.8"
description:
- Gather facts about your Hetzner Cloud datacenters.
author:
- Lukas Kaemmerling (@lkaemmerling)
options:
id:
description:
- The ID of the datacenter you want to get.
type: int
name:
description:
- The name of the datacenter you want to get.
type: str
extends_documentation_fragment: hcloud
"""
EXAMPLES = """
- name: Gather hcloud datacenter facts
local_action:
module: hcloud_datacenter_facts
- name: Print the gathered facts
debug:
var: ansible_facts.hcloud_datacenter_facts
"""
RETURN = """
hcloud_datacenter_facts:
description: The datacenter facts as list
returned: always
type: complex
contains:
id:
description: Numeric identifier of the location
returned: always
type: int
sample: 1937415
name:
description: Name of the location
returned: always
type: str
sample: fsn1-dc8
description:
description: Detail description of the location
returned: always
type: str
sample: Falkenstein DC 8
location:
description: Name of the location where the datacenter resides in
returned: always
type: str
sample: fsn1
city:
description: City of the location
returned: always
type: str
sample: fsn1
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.hcloud import Hcloud
try:
from hcloud import APIException
except ImportError:
pass
class AnsibleHcloudDatacenterFacts(Hcloud):
def __init__(self, module):
Hcloud.__init__(self, module, "hcloud_datacenter_facts")
self.hcloud_datacenter_facts = None
def _prepare_result(self):
tmp = []
for datacenter in self.hcloud_datacenter_facts:
if datacenter is not None:
tmp.append({
"id": to_native(datacenter.id),
"name": to_native(datacenter.name),
"description": to_native(datacenter.description),
"location": to_native(datacenter.location.name)
})
return tmp
def get_datacenters(self):
try:
if self.module.params.get("id") is not None:
self.hcloud_datacenter_facts = [self.client.datacenters.get_by_id(
self.module.params.get("id")
)]
elif self.module.params.get("name") is not None:
self.hcloud_datacenter_facts = [self.client.datacenters.get_by_name(
self.module.params.get("name")
)]
else:
self.hcloud_datacenter_facts = self.client.datacenters.get_all()
except APIException as e:
self.module.fail_json(msg=e.message)
@staticmethod
def define_module():
return AnsibleModule(
argument_spec=dict(
id={"type": "int"},
name={"type": "str"},
**Hcloud.base_module_arguments()
),
supports_check_mode=True,
)
def main():
module = AnsibleHcloudDatacenterFacts.define_module()
hcloud = AnsibleHcloudDatacenterFacts(module)
hcloud.get_datacenters()
result = hcloud.get_result()
ansible_facts = {
'hcloud_datacenter_facts': result['hcloud_datacenter_facts']
}
module.exit_json(ansible_facts=ansible_facts)
if __name__ == "__main__":
main()
| gpl-3.0 |
espadrine/opera | chromium/src/third_party/chromite/buildbot/cbuildbot_unittest.py | 4 | 20464 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for build stages."""
import glob
import mox
import optparse
import os
import sys
import constants
sys.path.insert(0, constants.SOURCE_ROOT)
from chromite.buildbot import cbuildbot_commands as commands
from chromite.buildbot import cbuildbot_config as config
from chromite.buildbot import cbuildbot_stages as stages
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import osutils
from chromite.scripts import cbuildbot
# pylint: disable=W0212,R0904
class TestExitedException(Exception):
"""Exception used by sys.exit() mock to halt execution."""
pass
class TestHaltedException(Exception):
"""Exception used by mocks to halt execution without indicating failure."""
pass
class TestFailedException(Exception):
"""Exception used by mocks to halt execution and indicate failure."""
pass
class RunBuildStagesTest(cros_test_lib.MoxTempDirTestCase):
def setUp(self):
self.buildroot = os.path.join(self.tempdir, 'buildroot')
osutils.SafeMakedirs(self.buildroot)
# Always stub RunCommmand out as we use it in every method.
self.bot_id = 'x86-generic-paladin'
self.build_config = config.config[self.bot_id]
self.build_config['master'] = False
self.build_config['important'] = False
# Use the cbuildbot parser to create properties and populate default values.
self.parser = cbuildbot._CreateParser()
argv = ['-r', self.buildroot, '--buildbot', '--debug',
'x86-generic-paladin']
(self.options, _) = cbuildbot._ParseCommandLine(self.parser, argv)
self.options.bootstrap = False
self.options.clean = False
self.options.resume = False
self.options.sync = False
self.options.build = False
self.options.uprev = False
self.options.tests = False
self.options.archive = False
self.options.remote_test_status = False
self.options.patches = None
self.options.prebuilts = False
self.mox.StubOutWithMock(stages.SyncStage, 'HandleSkip')
stages.SyncStage.HandleSkip()
def testChromeosOfficialSet(self):
"""Verify that CHROMEOS_OFFICIAL is set correctly."""
self.build_config['chromeos_official'] = True
# Clean up before
if 'CHROMEOS_OFFICIAL' in os.environ:
del os.environ['CHROMEOS_OFFICIAL']
self.mox.StubOutWithMock(cros_build_lib, 'RunCommand')
api = self.mox.CreateMock(cros_build_lib.CommandResult)
api.returncode = 0
api.output = constants.REEXEC_API_VERSION
cros_build_lib.RunCommand(
[constants.PATH_TO_CBUILDBOT, '--reexec-api-version'],
cwd=self.buildroot, redirect_stderr=True, redirect_stdout=True,
error_code_ok=True).AndReturn(api)
result = self.mox.CreateMock(cros_build_lib.CommandResult)
result.returncode = 0
cros_build_lib.RunCommand(mox.IgnoreArg(), cwd=self.buildroot,
error_code_ok=True,
kill_timeout=mox.IgnoreArg()).AndReturn(result)
self.mox.ReplayAll()
self.assertFalse('CHROMEOS_OFFICIAL' in os.environ)
cbuildbot.SimpleBuilder(self.options, self.build_config).Run()
self.assertTrue('CHROMEOS_OFFICIAL' in os.environ)
self.mox.VerifyAll()
# Clean up after the test
if 'CHROMEOS_OFFICIAL' in os.environ:
del os.environ['CHROMEOS_OFFICIAL']
def testChromeosOfficialNotSet(self):
"""Verify that CHROMEOS_OFFICIAL is not always set."""
self.build_config['chromeos_official'] = False
# Clean up before
if 'CHROMEOS_OFFICIAL' in os.environ:
del os.environ['CHROMEOS_OFFICIAL']
self.mox.StubOutWithMock(cros_build_lib, 'RunCommand')
api = self.mox.CreateMock(cros_build_lib.CommandResult)
api.returncode = 0
api.output = constants.REEXEC_API_VERSION
cros_build_lib.RunCommand(
[constants.PATH_TO_CBUILDBOT, '--reexec-api-version'],
cwd=self.buildroot, redirect_stderr=True, redirect_stdout=True,
error_code_ok=True).AndReturn(api)
result = self.mox.CreateMock(cros_build_lib.CommandResult)
result.returncode = 0
cros_build_lib.RunCommand(mox.IgnoreArg(), cwd=self.buildroot,
error_code_ok=True,
kill_timeout=mox.IgnoreArg()).AndReturn(result)
self.mox.ReplayAll()
self.assertFalse('CHROMEOS_OFFICIAL' in os.environ)
cbuildbot.SimpleBuilder(self.options, self.build_config).Run()
self.assertFalse('CHROMEOS_OFFICIAL' in os.environ)
self.mox.VerifyAll()
# Clean up after the test
if 'CHROMEOS_OFFICIAL' in os.environ:
del os.environ['CHROMEOS_OFFICIAL']
class LogTest(cros_test_lib.MoxTestCase):
def _generateLogs(self, num):
"""Generates cbuildbot.log and num backups."""
with open(os.path.join(self.tempdir, 'cbuildbot.log'), 'w') as f:
f.write(str(num + 1))
for i in range(1, num + 1):
with open(os.path.join(self.tempdir, 'cbuildbot.log.' + str(i)),
'w') as f:
f.write(str(i))
@osutils.TempDirDecorator
def testZeroToOneLogs(self):
"""Test beginning corner case."""
self._generateLogs(0)
cbuildbot._BackupPreviousLog(os.path.join(self.tempdir, 'cbuildbot.log'),
backup_limit=25)
with open(os.path.join(self.tempdir, 'cbuildbot.log.1')) as f:
self.assertEquals(f.readline(), '1')
@osutils.TempDirDecorator
def testNineToTenLogs(self):
"""Test handling *.log.9 to *.log.10 (correct sorting)."""
self._generateLogs(9)
cbuildbot._BackupPreviousLog(os.path.join(self.tempdir, 'cbuildbot.log'),
backup_limit=25)
with open(os.path.join(self.tempdir, 'cbuildbot.log.10')) as f:
self.assertEquals(f.readline(), '10')
@osutils.TempDirDecorator
def testOverLimit(self):
"""Test going over the limit and having to purge old logs."""
self._generateLogs(25)
cbuildbot._BackupPreviousLog(os.path.join(self.tempdir, 'cbuildbot.log'),
backup_limit=25)
with open(os.path.join(self.tempdir, 'cbuildbot.log.26')) as f:
self.assertEquals(f.readline(), '26')
self.assertEquals(len(glob.glob(os.path.join(self.tempdir, 'cbuildbot*'))),
25)
class InterfaceTest(cros_test_lib.MoxTestCase):
_X86_PREFLIGHT = 'x86-generic-paladin'
_BUILD_ROOT = '/b/test_build1'
def setUp(self):
self.parser = cbuildbot._CreateParser()
def assertDieSysExit(self, *args, **kwargs):
self.assertRaises(cros_build_lib.DieSystemExit, *args, **kwargs)
# Let this test run for a max of 30s; if it takes longer, then it's
# likely that there is an exec loop in the pathways.
@cros_build_lib.TimeoutDecorator(30)
def testDepotTools(self):
"""Test that the entry point used by depot_tools works."""
path = os.path.join(constants.SOURCE_ROOT, 'chromite', 'buildbot',
'cbuildbot')
# Verify the tests below actually are testing correct behaviour;
# specifically that it doesn't always just return 0.
self.assertRaises(cros_build_lib.RunCommandError,
cros_build_lib.RunCommandCaptureOutput,
['cbuildbot', '--monkeys'], cwd=constants.SOURCE_ROOT)
# Validate depot_tools lookup.
cros_build_lib.RunCommandCaptureOutput(
['cbuildbot', '--help'], cwd=constants.SOURCE_ROOT)
# Validate buildbot invocation pathway.
cros_build_lib.RunCommandCaptureOutput(
[path, '--help'], cwd=constants.SOURCE_ROOT)
def testDebugBuildBotSetByDefault(self):
"""Test that debug and buildbot flags are set by default."""
args = ['--local', '-r', self._BUILD_ROOT, self._X86_PREFLIGHT]
(options, args) = cbuildbot._ParseCommandLine(self.parser, args)
self.assertEquals(options.debug, True)
self.assertEquals(options.buildbot, False)
def testBuildBotOption(self):
"""Test that --buildbot option unsets debug flag."""
args = ['-r', self._BUILD_ROOT, '--buildbot', self._X86_PREFLIGHT]
(options, args) = cbuildbot._ParseCommandLine(self.parser, args)
self.assertEquals(options.debug, False)
self.assertEquals(options.buildbot, True)
def testBuildBotWithDebugOption(self):
"""Test that --debug option overrides --buildbot option."""
args = ['-r', self._BUILD_ROOT, '--buildbot', '--debug',
self._X86_PREFLIGHT]
(options, args) = cbuildbot._ParseCommandLine(self.parser, args)
self.assertEquals(options.debug, True)
self.assertEquals(options.buildbot, True)
def testLocalTrybotWithSpacesInPatches(self):
"""Test that we handle spaces in patch arguments."""
args = ['-r', self._BUILD_ROOT, '--remote', '--local-patches',
' proj:br \t proj2:b2 ',
self._X86_PREFLIGHT]
(options, args) = cbuildbot._ParseCommandLine(self.parser, args)
self.assertEquals(options.local_patches, ['proj:br', 'proj2:b2'])
def testBuildBotWithRemotePatches(self):
"""Test that --buildbot errors out with patches."""
args = ['-r', self._BUILD_ROOT, '--buildbot', '-g', '1234',
self._X86_PREFLIGHT]
self.assertDieSysExit(cbuildbot._ParseCommandLine, self.parser, args)
def testRemoteBuildBotWithRemotePatches(self):
"""Test that --buildbot and --remote errors out with patches."""
args = ['-r', self._BUILD_ROOT, '--buildbot', '--remote', '-g', '1234',
self._X86_PREFLIGHT]
self.assertDieSysExit(cbuildbot._ParseCommandLine, self.parser, args)
def testBuildbotDebugWithPatches(self):
"""Test we can test patches with --buildbot --debug."""
args = ['--remote', '-g', '1234', '--debug', '--buildbot',
self._X86_PREFLIGHT]
cbuildbot._ParseCommandLine(self.parser, args)
def testBuildBotWithoutProfileOption(self):
"""Test that no --profile option gets defaulted."""
args = ['--buildbot', self._X86_PREFLIGHT]
(options, args) = cbuildbot._ParseCommandLine(self.parser, args)
self.assertEquals(options.profile, None)
def testBuildBotWithProfileOption(self):
"""Test that --profile option gets parsed."""
args = ['--buildbot', '--profile', 'carp', self._X86_PREFLIGHT]
(options, args) = cbuildbot._ParseCommandLine(self.parser, args)
self.assertEquals(options.profile, 'carp')
def testValidateClobberUserDeclines_1(self):
"""Test case where user declines in prompt."""
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(cros_build_lib, 'GetInput')
os.path.exists(self._BUILD_ROOT).AndReturn(True)
cros_build_lib.GetInput(mox.IgnoreArg()).AndReturn('No')
self.mox.ReplayAll()
self.assertFalse(commands.ValidateClobber(self._BUILD_ROOT))
self.mox.VerifyAll()
def testValidateClobberUserDeclines_2(self):
"""Test case where user does not enter the full 'yes' pattern."""
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(cros_build_lib, 'GetInput')
os.path.exists(self._BUILD_ROOT).AndReturn(True)
cros_build_lib.GetInput(mox.IgnoreArg()).AndReturn('asdf')
cros_build_lib.GetInput(mox.IgnoreArg()).AndReturn('No')
self.mox.ReplayAll()
self.assertFalse(commands.ValidateClobber(self._BUILD_ROOT))
self.mox.VerifyAll()
def testValidateClobberProtectRunningChromite(self):
"""User should not be clobbering our own source."""
cwd = os.path.dirname(os.path.realpath(__file__))
buildroot = os.path.dirname(cwd)
self.assertDieSysExit(commands.ValidateClobber, buildroot)
def testValidateClobberProtectRoot(self):
"""User should not be clobbering /"""
self.assertDieSysExit(commands.ValidateClobber, '/')
def testBuildBotWithBadChromeRevOption(self):
"""chrome_rev can't be passed an invalid option after chrome_root."""
args = ['--local',
'--buildroot=/tmp',
'--chrome_root=.',
'--chrome_rev=%s' % constants.CHROME_REV_TOT,
self._X86_PREFLIGHT]
self.assertDieSysExit(cbuildbot._ParseCommandLine, self.parser, args)
def testBuildBotWithBadChromeRootOption(self):
"""chrome_root can't get passed after non-local chrome_rev."""
args = ['--local',
'--buildroot=/tmp',
'--chrome_rev=%s' % constants.CHROME_REV_TOT,
'--chrome_root=.',
self._X86_PREFLIGHT]
self.assertDieSysExit(cbuildbot._ParseCommandLine, self.parser, args)
def testBuildBotWithBadChromeRevOptionLocal(self):
"""chrome_rev can't be local without chrome_root."""
args = ['--local',
'--buildroot=/tmp',
'--chrome_rev=%s' % constants.CHROME_REV_LOCAL,
self._X86_PREFLIGHT]
self.assertDieSysExit(cbuildbot._ParseCommandLine, self.parser, args)
def testBuildBotWithGoodChromeRootOption(self):
"""chrome_root can be set without chrome_rev."""
args = ['--local',
'--buildroot=/tmp',
'--chrome_root=.',
self._X86_PREFLIGHT]
self.mox.ReplayAll()
(options, args) = cbuildbot._ParseCommandLine(self.parser, args)
self.mox.VerifyAll()
self.assertEquals(options.chrome_rev, constants.CHROME_REV_LOCAL)
self.assertNotEquals(options.chrome_root, None)
def testBuildBotWithGoodChromeRevAndRootOption(self):
"""chrome_rev can get reset around chrome_root."""
args = ['--local',
'--buildroot=/tmp',
'--chrome_rev=%s' % constants.CHROME_REV_LATEST,
'--chrome_rev=%s' % constants.CHROME_REV_STICKY,
'--chrome_rev=%s' % constants.CHROME_REV_TOT,
'--chrome_rev=%s' % constants.CHROME_REV_TOT,
'--chrome_rev=%s' % constants.CHROME_REV_STICKY,
'--chrome_rev=%s' % constants.CHROME_REV_LATEST,
'--chrome_rev=%s' % constants.CHROME_REV_LOCAL,
'--chrome_root=.',
'--chrome_rev=%s' % constants.CHROME_REV_TOT,
'--chrome_rev=%s' % constants.CHROME_REV_LOCAL,
self._X86_PREFLIGHT]
self.mox.ReplayAll()
(options, args) = cbuildbot._ParseCommandLine(self.parser, args)
self.mox.VerifyAll()
self.assertEquals(options.chrome_rev, constants.CHROME_REV_LOCAL)
self.assertNotEquals(options.chrome_root, None)
def testPassThroughOptions(self):
"""Test we are building up pass-through list properly."""
args = ['--remote', '-g', '1234', self._X86_PREFLIGHT]
(options, args) = cbuildbot._ParseCommandLine(self.parser, args)
self.assertEquals(options.pass_through_args, ['-g', '1234'])
def testDebugPassThrough(self):
"""Test we are passing --debug through."""
args = ['--remote', '--debug', '--buildbot', self._X86_PREFLIGHT]
(options, args) = cbuildbot._ParseCommandLine(self.parser, args)
self.assertEquals(options.pass_through_args, ['--debug', '--buildbot'])
class FullInterfaceTest(cros_test_lib.MoxTempDirTestCase):
"""Tests that run the cbuildbot.main() function directly.
Note this explicitly suppresses automatic VerifyAll() calls; thus if you want
that checked, you have to invoke it yourself.
"""
mox_suppress_verify_all = True
def MakeTestRootDir(self, relpath):
abspath = os.path.join(self.root, relpath)
os.makedirs(abspath)
return abspath
def setUp(self):
self.root = self.tempdir
self.buildroot = self.MakeTestRootDir('build_root')
self.sourceroot = self.MakeTestRootDir('source_root')
self.trybot_root = self.MakeTestRootDir('trybot')
self.trybot_internal_root = self.MakeTestRootDir('trybot-internal')
self.external_marker = os.path.join(self.trybot_root, '.trybot')
self.internal_marker = os.path.join(self.trybot_internal_root, '.trybot')
os.makedirs(os.path.join(self.sourceroot, '.repo', 'manifests'))
os.makedirs(os.path.join(self.sourceroot, '.repo', 'repo'))
# Create the parser before we stub out os.path.exists() - which the parser
# creation code actually uses.
parser = cbuildbot._CreateParser()
# Stub out all relevant methods regardless of whether they are called in the
# specific test case. We can do this because we don't run VerifyAll() at
# the end of every test.
self.mox.StubOutWithMock(optparse.OptionParser, 'error')
self.mox.StubOutWithMock(cros_build_lib, 'IsInsideChroot')
self.mox.StubOutWithMock(cbuildbot, '_CreateParser')
self.mox.StubOutWithMock(sys, 'exit')
self.mox.StubOutWithMock(cros_build_lib, 'GetInput')
self.mox.StubOutWithMock(cbuildbot, '_RunBuildStagesWrapper')
parser.error(mox.IgnoreArg()).InAnyOrder().AndRaise(TestExitedException())
cros_build_lib.IsInsideChroot().InAnyOrder().AndReturn(False)
cbuildbot._CreateParser().InAnyOrder().AndReturn(parser)
sys.exit(mox.IgnoreArg()).InAnyOrder().AndRaise(TestExitedException())
cbuildbot._RunBuildStagesWrapper(
mox.IgnoreArg(),
mox.IgnoreArg()).InAnyOrder().AndReturn(True)
def assertMain(self, args, common_options=True):
if common_options:
# Suppress cgroups code. For cbuildbot invocation, it doesn't hugely
# care about cgroups- that's a blackbox to it. As such these unittests
# should not be sensitive to it.
args.extend(['--sourceroot', self.sourceroot, '--nocgroups',
'--notee'])
return cbuildbot.main(args)
def testNullArgsStripped(self):
"""Test that null args are stripped out and don't cause error."""
self.mox.ReplayAll()
self.assertMain(['--local', '-r', self.buildroot, '', '',
'x86-generic-paladin'])
def testMultipleConfigsError(self):
"""Test that multiple configs cause error if --remote is not used."""
self.mox.ReplayAll()
self.assertRaises(cros_build_lib.DieSystemExit, self.assertMain,
['--local',
'-r', self.buildroot,
'arm-generic-paladin',
'x86-generic-paladin'])
def testDontInferBuildrootForBuildBotRuns(self):
"""Test that we don't infer buildroot if run with --buildbot option."""
self.mox.ReplayAll()
self.assertRaises(TestExitedException, self.assertMain,
['--buildbot', 'x86-generic-paladin'])
def testInferExternalBuildRoot(self):
"""Test that we default to correct buildroot for external config."""
self.mox.StubOutWithMock(cbuildbot, '_ConfirmBuildRoot')
(cbuildbot._ConfirmBuildRoot(mox.IgnoreArg()).InAnyOrder()
.AndRaise(TestHaltedException()))
self.mox.ReplayAll()
self.assertRaises(TestHaltedException, self.assertMain,
['--local', 'x86-generic-paladin'])
def testInferInternalBuildRoot(self):
"""Test that we default to correct buildroot for internal config."""
self.mox.StubOutWithMock(cbuildbot, '_ConfirmBuildRoot')
(cbuildbot._ConfirmBuildRoot(mox.IgnoreArg()).InAnyOrder()
.AndRaise(TestHaltedException()))
self.mox.ReplayAll()
self.assertRaises(TestHaltedException, self.assertMain,
['--local', 'mario-paladin'])
def testInferBuildRootPromptNo(self):
"""Test that a 'no' answer on the prompt halts execution."""
cros_build_lib.GetInput(mox.IgnoreArg()).InAnyOrder().AndReturn('no')
self.mox.ReplayAll()
self.assertRaises(TestExitedException, self.assertMain,
['--local', 'x86-generic-paladin'])
def testInferBuildRootExists(self):
"""Test that we don't prompt the user if buildroot already exists."""
cros_build_lib.RunCommandCaptureOutput(['touch', self.external_marker])
os.utime(self.external_marker, None)
(cros_build_lib.GetInput(mox.IgnoreArg()).InAnyOrder()
.AndRaise(TestFailedException()))
self.mox.ReplayAll()
self.assertMain(['--local', 'x86-generic-paladin'])
def testBuildbotDiesInChroot(self):
"""Buildbot should quit if run inside a chroot."""
# Need to do this since a cros_build_lib.IsInsideChroot() call is already
# queued up in setup() and we can't Reset() an individual mock.
# pylint: disable=E1102
new_is_inside_chroot = self.mox.CreateMockAnything()
new_is_inside_chroot().InAnyOrder().AndReturn(True)
cros_build_lib.IsInsideChroot = new_is_inside_chroot
self.mox.ReplayAll()
self.assertRaises(cros_build_lib.DieSystemExit, self.assertMain,
['--local', '-r', self.buildroot, 'x86-generic-paladin'])
if __name__ == '__main__':
cros_test_lib.main()
| bsd-3-clause |
ryanahall/django | tests/gis_tests/gis_migrations/test_commands.py | 52 | 2502 | from __future__ import unicode_literals
from django.core.management import call_command
from django.db import connection
from django.test import TransactionTestCase, skipUnlessDBFeature
@skipUnlessDBFeature("gis_enabled")
class MigrateTests(TransactionTestCase):
"""
Tests running the migrate command in Geodjango.
"""
available_apps = ["gis_tests.gis_migrations"]
def get_table_description(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_table_description(cursor, table)
def assertTableExists(self, table):
with connection.cursor() as cursor:
self.assertIn(table, connection.introspection.table_names(cursor))
def assertTableNotExists(self, table):
with connection.cursor() as cursor:
self.assertNotIn(table, connection.introspection.table_names(cursor))
def test_migrate_gis(self):
"""
Tests basic usage of the migrate command when a model uses Geodjango
fields. Regression test for ticket #22001:
https://code.djangoproject.com/ticket/22001
It's also used to showcase an error in migrations where spatialite is
enabled and geo tables are renamed resulting in unique constraint
failure on geometry_columns. Regression for ticket #23030:
https://code.djangoproject.com/ticket/23030
"""
# Make sure the right tables exist
self.assertTableExists("gis_migrations_neighborhood")
self.assertTableExists("gis_migrations_household")
self.assertTableExists("gis_migrations_family")
# Unmigrate everything
call_command("migrate", "gis_migrations", "zero", verbosity=0)
# Make sure it's all gone
self.assertTableNotExists("gis_migrations_neighborhood")
self.assertTableNotExists("gis_migrations_household")
self.assertTableNotExists("gis_migrations_family")
# Even geometry columns metadata
try:
GeoColumn = connection.ops.geometry_columns()
except NotImplementedError:
# Not all GIS backends have geometry columns model
pass
else:
self.assertEqual(
GeoColumn.objects.filter(
**{'%s__in' % GeoColumn.table_name_col(): ["gis_neighborhood", "gis_household"]}
).count(),
0)
# Revert the "unmigration"
call_command("migrate", "gis_migrations", verbosity=0)
| bsd-3-clause |
karthik-suresh/horizon | openstack_dashboard/test/integration_tests/pages/project/data_processing/imageregistrypage.py | 37 | 3406 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common import by
from openstack_dashboard.test.integration_tests.pages import basepage
from openstack_dashboard.test.integration_tests.regions import forms
from openstack_dashboard.test.integration_tests.regions import tables
class ImageregistryPage(basepage.BaseNavigationPage):
_image_table_locator = (by.By.CSS_SELECTOR, 'table#image_registry')
_unregister_form_locator = (by.By.CSS_SELECTOR, 'div.modal-dialog')
_register_form_locator = (by.By.CSS_SELECTOR, 'div.modal-dialog')
IMAGE_TABLE_ACTIONS = ("register_image", "unregister_images")
IMAGE_TABLE_ROW_ACTIONS = {
tables.ComplexActionRowRegion.PRIMARY_ACTION: "edit_tags",
tables.ComplexActionRowRegion.SECONDARY_ACTIONS: ("unregister_image",)
}
TABLE_IMAGE_COLUMN = 0
REGISTER_FORM_IMAGE = "image"
REGISTER_FORM_USER_NAME = "user_name"
REGISTER_FORM_DESCRIPTION = "description"
REGISTER_FORM_FIELDS = (REGISTER_FORM_IMAGE, REGISTER_FORM_USER_NAME,
REGISTER_FORM_DESCRIPTION)
def __init__(self, driver, conf):
super(ImageregistryPage, self).__init__(driver, conf)
self._page_title = "Data Processing"
def _get_row_with_image_name(self, name):
return self.image_table.get_row(self.TABLE_IMAGE_COLUMN, name)
@property
def image_table(self):
src_elem = self._get_element(*self._image_table_locator)
return tables.ComplexActionTableRegion(self.driver, self.conf,
src_elem,
self.IMAGE_TABLE_ACTIONS,
self.IMAGE_TABLE_ROW_ACTIONS)
@property
def unregister_form(self):
src_elem = self._get_element(*self._unregister_form_locator)
return forms.BaseFormRegion(self.driver, self.conf, src_elem)
@property
def register_form(self):
src_elem = self._get_element(*self._register_form_locator)
return forms.FormRegion(self.driver, self.conf, src_elem,
self.REGISTER_FORM_FIELDS)
def is_image_registered(self, name):
return bool(self._get_row_with_image_name(name))
def unregister_image(self, name):
self._get_row_with_image_name(name).mark()
self.image_table.unregister_images.click()
self.unregister_form.submit.click()
def register_image(self, image, user_name, description):
self.image_table.register_image.click()
self.register_form.image.text = image
self.register_form.user_name.text = user_name
self.register_form.description.text = description
self.register_form.submit.click()
def wait_until_image_registered(self, name):
self._wait_until(lambda x: self.is_image_registered(name))
| apache-2.0 |
tensorflow/lingvo | lingvo/core/step.py | 1 | 27037 | # Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An abstract layer for processing sequences step-by-step.
E.g.::
def ProcessSeq(step, external_inputs, input_batch):
prepared_inputs = step.PrepareExternalInputs(
step.theta, external_inputs)
batch_size, T = tf.shape(input_batch.paddings)[:2]
state = step.ZeroState(
step.theta, prepared_inputs, batch_size)
for t in range(T):
step_inputs = input_batch.Transform(lambda x: x[:, t, ...])
step_outputs, state = step.FProp(
step.theta, prepared_inputs, step_inputs, state)
(processing step_outputs...)
"""
import collections
from lingvo import compat as tf
from lingvo.core import base_layer
from lingvo.core import builder_layers
from lingvo.core import py_utils
from lingvo.core import recurrent
class Step(base_layer.BaseLayer):
"""A layer that processes input sequences step-by-step.
This can be seen as an RNNCell extended with optional external inputs.
"""
def PrepareExternalInputs(self, theta, external_inputs):
"""Returns the prepared external inputs, e.g., packed_src for attention."""
if not external_inputs:
external_inputs = py_utils.NestedMap()
packed = external_inputs.DeepCopy()
for name, child in self.children.items():
child_external_inputs = external_inputs.get(name, py_utils.NestedMap())
if isinstance(child, (tuple, list)):
output = []
for i, sub in enumerate(child):
if isinstance(sub, Step):
output.append(
sub.PrepareExternalInputs(theta[name][i],
child_external_inputs))
if output:
if len(output) != len(child):
raise ValueError('Expecting child list to be instances of Step.')
packed[name] = type(child)(output)
elif isinstance(child, Step):
packed[name] = child.PrepareExternalInputs(theta[name],
child_external_inputs)
return packed
def ZeroState(self, theta, prepared_inputs, batch_size):
"""Returns the initial state given external inputs and batch size.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
prepared_inputs: External inputs returned by PrepareExternalInputs().
batch_size: An int scalar representing the batch size of per-step inputs.
Returns:
A `.NestedMap` representing the initial state, which can be passed to
FProp() for processing the first time step.
"""
state0 = py_utils.NestedMap()
for name, child in self.children.items():
if isinstance(child, (tuple, list)):
output = []
for i, sub in enumerate(child):
if isinstance(sub, Step):
output.append(
sub.ZeroState(theta[name][i], prepared_inputs[name][i],
batch_size))
if output:
if len(output) != len(child):
raise ValueError('Expecting child list to be instances of Step.')
state0[name] = type(child)(output)
elif isinstance(child, Step):
state0[name] = child.ZeroState(theta[name], prepared_inputs[name],
batch_size)
return state0
def FProp(self, theta, prepared_inputs, step_inputs, padding, state0):
"""Forward function.
step_inputs, state0, step_outputs, and state1 should each be a `.NestedMap`
of tensor values. Each tensor must be of shape [batch_size ...]. The
structure of NestedMaps are determined by the implementation. state0 and
state1 must have exactly the same structure and tensor shapes.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
prepared_inputs: External inputs returned by PrepareExternalInputs().
step_inputs: The inputs for this time step.
padding: A 0/1 float tensor of shape [batch_size]; 1.0 means that this
batch element is empty in this step.
state0: The previous recurrent state.
Returns:
A tuple (step_outputs, state1).
- outputs: The outputs of this step.
- state1: The next recurrent state.
"""
raise NotImplementedError(type(self))
class StatelessLayerStep(Step):
"""Allows BaseLayer subclasses to be used as Steps.
Layers used with this class should be stateless: they should not return
anything that must be passed back in the next invocation.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('layer', None, 'Params for the layer that this step wraps.')
return p
def __init__(self, params):
super().__init__(params)
p = params
self.CreateChild('layer', p.layer)
def FProp(self, theta, prepared_inputs, step_inputs, padding, state0):
"""Perform inference on a stateless layer.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
prepared_inputs: unused.
step_inputs: A NestedMap containing 'inputs', which are passed directly to
the layer.
padding: A 0/1 float tensor of shape [batch_size]; 1.0 means that this
batch element is empty in this step.
state0: unused.
Returns:
(output, state1), where output is the output of the layer, and
state1 is an empty NestedMap.
"""
del state0
del prepared_inputs
args = {}
if padding is not None:
args['padding'] = padding
output = self.layer.FProp(theta.layer, step_inputs.inputs, **args)
return output, py_utils.NestedMap()
class StackStep(Step):
"""A stack of steps.
Each sub-step is assumed to accept step_inputs of type NestedMap(inputs=[])
and return a primary output of type NestedMap(output=tensor). The
output of layer n-1 is sent to input of layer n.
Per-step context vectors and per-sequence context vectors can also be
supplied; see FProp for more details.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'sub', [], 'A list of sub-stack params. Each layer is '
'expected to accept its input as NestedMap(inputs=[]), and '
'produce output as NestedMap(output=tensor). '
'The external_inputs parameter is passed directly to the '
'PrepareExternalInputs method of each sub-step. ')
p.Define(
'residual_start', -1, 'An index of the layer where residual '
'connections start. Setting this parameter to a negative value turns '
'off residual connections.'
'More precisely, when i >= residual_start, the output of each step '
'is defined as: '
'output[i] = output[i - residual_stride] + sub[i](output[i - 1]) '
'where output[-1] is the step input.')
p.Define(
'residual_stride', 1, 'If residual connections are active, this '
'is the number of layers that each connection skips. For '
'instance, setting residual_stride = 2 means the output of layer '
'n is added to layer n + 2')
return p
def __init__(self, params):
super().__init__(params)
p = params
self.sub_steps = []
self.CreateChildren('sub', p.sub)
def PrepareExternalInputs(self, theta, external_inputs):
"""Delegates external inputs preparation to sub-layers.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
external_inputs: A `.NestedMap` object. The structure of the internal
fields is defined by the sub-steps.
Returns:
A `.NestedMap` containing a pre-processed version of the external_inputs,
one per sub-step.
"""
packed = py_utils.NestedMap(sub=[])
for i in range(len(self.sub)):
packed.sub.append(self.sub[i].PrepareExternalInputs(
theta.sub[i], external_inputs))
return packed
def ZeroState(self, theta, prepared_inputs, batch_size):
"""Computes a zero state for each sub-step.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
prepared_inputs: An output from PrepareExternalInputs.
batch_size: The number of items in the batch that FProp will process.
Returns:
A `.NestedMap` containing a state0 object for each sub-step.
"""
state = py_utils.NestedMap(sub=[])
for i in range(len(self.sub)):
state.sub.append(self.sub[i].ZeroState(theta.sub[i], prepared_inputs,
batch_size))
return state
def FProp(self, theta, prepared_inputs, step_inputs, padding, state0):
"""Performs inference on the stack of sub-steps.
There are three possible ways to feed input to the stack:
* step_inputs.inputs: These tensors are fed only to the lowest layer.
* step_inputs.context: [Optional] This tensor is fed to every layer.
* prepared_inputs: [Optional] This tensor is fed to every layer and
is assumed to stay constant over all steps.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
prepared_inputs: An output from PrepareExternalInputs.
step_inputs: A `.NestedMap` containing a list called 'inputs', an
optionally a tensor called 'context'.
padding: A 0/1 float tensor of shape [batch_size]; 1.0 means that this
batch element is empty in this step.
state0: The previous recurrent state.
Returns:
A tuple (output, state1):
- output: A `.NestedMap` containing the output of the top-most step.
- state1: The recurrent state to feed to next invocation of this graph.
"""
state1 = py_utils.NestedMap(sub=[])
inputs = list(step_inputs.inputs)
# We pretend that the input is the output of layer -1 for the purposes
# of residual connections.
residual_inputs = [tf.concat(inputs, axis=1)]
additional = []
if 'context' in step_inputs:
additional.append(step_inputs.context)
for i in range(len(self.sub)):
sub_inputs = py_utils.NestedMap(inputs=inputs + additional)
sub_output, state1_i = self.sub[i].FProp(theta.sub[i],
prepared_inputs.sub[i],
sub_inputs, padding,
state0.sub[i])
state1.sub.append(state1_i)
output = sub_output.output
if i >= self.params.residual_start >= 0:
# residual_inputs contains the step input at residual_inputs[0].
assert i + 1 - self.params.residual_stride < len(residual_inputs)
output += residual_inputs[i + 1 - self.params.residual_stride]
residual_inputs.append(output)
inputs = [output]
return py_utils.NestedMap(output=output), state1
class ParallelStep(Step):
"""Runs many steps on the same input and concatenates their outputs."""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'sub', [], 'A list of step params. Each step is '
'expected to accept its input as NestedMap(inputs=[]), and '
'produce output as NestedMap(output=tensor). '
'The external_inputs parameter is passed directly to the '
'PrepareExternalInputs method of each sub-step. ')
return p
def __init__(self, params):
super().__init__(params)
p = params
self.CreateChildren('sub', p.sub)
def FProp(self, theta, prepared_inputs, step_inputs, padding, state0):
"""Performs inference on N steps at once and concatenates the result.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
prepared_inputs: An output from PrepareExternalInputs.
step_inputs: A `.NestedMap` containing a list called 'inputs'.
padding: A 0/1 float tensor of shape [batch_size]; 1.0 means that this
batch element is empty in this step.
state0: The previous recurrent state.
Returns:
A tuple (output, state1):
- output: A `.NestedMap` containing the output of the top-most step.
- state1: The recurrent state to feed to next invocation of this graph.
"""
state1 = py_utils.NestedMap(sub=[None] * len(self.sub))
outputs = [None] * len(self.sub)
for i in range(len(self.sub)):
outputs[i], state1.sub[i] = self.sub[i].FProp(theta.sub[i],
prepared_inputs.sub[i],
step_inputs, padding,
state0.sub[i])
output = py_utils.NestedMap(output=tf.concat(outputs, axis=1))
return output, state1
# signature: A GraphSignature string defining the input and output parameters
# of this step. For example, (inputs=[a,b])->c means that step_inputs
# should be NestedMap(inputs=[a,b]), and the output of FProp should be
# stored in c.
# external_signature: A GraphSignature string defining the input to
# PrepareExternalInputs. For example, 'external_inputs.foo' means that
# the tensor external_inputs.foo should be the 'external_inputs' parameter
# when calling PrepareExternalInputs on this sub-step.
# params: The parameters to use when constructing the sub-step.
SubStep = collections.namedtuple('SubStep',
['signature', 'external_signature', 'params'])
class GraphStep(Step):
r"""A step that connects sub-steps in a simple data flow graph.
This is an adaptation of builder_layers.GraphLayer to support steps.
Params.sub specifies a list of Specs that define each sub-step.
A spec contains:
* step_inputs: The signature describing how to assemble the input and output
for this step. The input part describes the 'step_inputs' parameter,
while the output part describes the name of the output. The state0
input and state1 output are handled automatically and should not be
specified.
* external_inputs: if this Step requires external_inputs, this
is the signature describing how to find those inputs.
This value can also be set to None.
* params: the params used to construct the sub-step.
The format of signature strings is defined in detail in the GraphSignature
class documentation.
All inputs to a layer must have been produced by some previous layer. No
cycles are allowed. All outputs must be uniquely named; no overwriting
of previous names is allowed.
Example
('(act=[layer_0.output,step_inputs.context])->layer_1',
'external_inputs.extra',
step_params)
This constructs the step defined by step_params. Its FProp method will be
called with {act=[layer_0.output,step_inputs.context]} as the step_inputs
parameter. Its PrepareExternalInputs method will be called with
'external_inputs.extra' as the external_inputs parameter. The output of that
method will be passed to ZeroState and FProp.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('output_signature', '', 'Signature of the step output.')
p.Define('sub', [], 'A list of SubSteps (defined above).')
p.Define('dict_type', py_utils.NestedMap, 'Type of nested dicts.')
return p
_seq = collections.namedtuple(
'_Seq', ['name', 'signature', 'external_signature', 'step'])
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
self._seq = []
for i, (signature, external_signature, sub_params) in enumerate(p.sub):
assert signature
sig = builder_layers.GraphSignature(signature)
assert len(sig.inputs) == 1
assert sig.outputs
external_sig = None
if external_signature:
external_sig = builder_layers.GraphSignature(external_signature)
assert len(external_sig.inputs) == 1
assert not external_sig.outputs
name = sub_params.name
if not name:
name = '%s_%02d' % (sig.outputs[0], i)
sub_params.name = name
self.CreateChild(name, sub_params)
self._seq.append(
GraphStep._seq(name, sig, external_sig, self.children[name]))
self.output_signature = builder_layers.GraphSignature(p.output_signature)
def PrepareExternalInputs(self, theta, external_inputs):
"""Prepares external inputs for each sub-step.
The external_inputs parameter of this method is processed by the
external_inputs of each sub-step, then processed by the sub-step's
PrepareExternalInputs method.
Args:
theta: variables used by sub-steps.
external_inputs: A NestedMap of [n_batch, ...] tensors.
Returns:
A NestedMap of prepared inputs, where the keys are the names of
each sub-step.
"""
graph_tensors = builder_layers.GraphTensors()
graph_tensors.StoreTensor('external_inputs', external_inputs)
prepared_inputs = py_utils.NestedMap()
with tf.name_scope(self.params.name):
for seq in self._seq:
if seq.external_signature:
template = py_utils.NestedMap(inputs=seq.external_signature.inputs)
packed = template.Transform(graph_tensors.GetTensor)
seq_external_inputs = packed.inputs[0]
prepared_inputs[seq.name] = seq.step.PrepareExternalInputs(
theta[seq.name], seq_external_inputs)
else:
prepared_inputs[seq.name] = py_utils.NestedMap()
return prepared_inputs
def ZeroState(self, theta, prepared_inputs, batch_size):
"""Creates a zero state NestedMap for this step.
Args:
theta: variables used by sub-steps.
prepared_inputs: Output from a call to PrepareExternalInputs.
batch_size: The number of items in the batch that FProp will process.
Returns:
A NestedMap of ZeroState results for each sub-step.
"""
state0 = py_utils.NestedMap()
with tf.name_scope(self.params.name):
for seq in self._seq:
state0[seq.name] = seq.step.ZeroState(theta[seq.name],
prepared_inputs[seq.name],
batch_size)
return state0
def FProp(self, theta, prepared_inputs, step_inputs, padding, state0):
"""A single inference step for this step graph.
Args:
theta: variables used by sub-steps.
prepared_inputs: A NestedMap containing external_inputs that were
pre-processed by the PrepareExternalInputs method of each sub-step. The
keys are the names of the sub-steps.
step_inputs: A NestedMap of [batch, ...] tensors. The structure of this
depends on the graph implementation.
padding: A 0/1 float tensor of shape [batch_size]; 1.0 means that this
batch element is empty in this step.
state0: A NestedMap of state variables produced by either ZeroState or a
previous invocation of this FProp step. The keys are the names of the
sub-steps.
Returns:
(output, state1), both of which are NestedMaps.
output is implementation-dependent and is defined by the output_signature
parameter.
state1 is a NestedMap where the keys are names of sub-steps and the values
are state outputs from their FProp methods.
"""
p = self.params
graph_tensors = builder_layers.GraphTensors()
graph_tensors.StoreTensor('prepared_inputs', prepared_inputs)
graph_tensors.StoreTensor('step_inputs', step_inputs)
state1 = py_utils.NestedMap()
with tf.name_scope(p.name):
for seq in self._seq:
tf.logging.vlog(1, 'GraphStep: call %s', seq.name)
external = None
if seq.external_signature:
external = prepared_inputs[seq.name]
template = py_utils.NestedMap(inputs=seq.signature.inputs)
packed = template.Transform(graph_tensors.GetTensor)
input_args = packed.inputs[0]
out, seq_state1 = seq.step.FProp(theta[seq.name], external, input_args,
padding, state0[seq.name])
graph_tensors.StoreTensor(seq.signature.outputs[0], out)
state1[seq.name] = seq_state1
template = py_utils.NestedMap(inputs=self.output_signature.inputs)
output_tensors = template.Transform(graph_tensors.GetTensor).inputs[0]
return output_tensors, state1
class IteratorStep(Step):
"""An iterator over the time dimension of some tensors.
It's common to have a tensor of shape [batch, time, ...] or
[time, batch, ...]. This object will step through the time dimension,
producing tensors of shape [batch, ...] in succession.
The input tensors are passed to PrepareExternalInputs. The step_inputs
argument of FProp is unused.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('axis', 1, 'The time dimension of the tensors.')
return p
def PrepareExternalInputs(self, theta, external_inputs):
"""Prepares the input for iteration.
Args:
theta: unused.
external_inputs: A NestedMap containing tensors. The time axis of each
tensor should be params.axis.
Returns:
A prepared NestedMap (current the same as the input).
"""
return external_inputs
def ZeroState(self, theta, prepared_inputs, batch_size):
"""Returns the initial iterator state.
Args:
theta: unused.
prepared_inputs: Output from a call to PrepareExternalInputs.
batch_size: The number of items in the batch that FProp will process.
Returns:
An initial state NestedMap.
"""
return py_utils.NestedMap(t=tf.constant(0, dtype=tf.int32))
def FProp(self, theta, prepared_inputs, step_inputs, padding, state0):
"""Returns a A single inference step for this step graph.
Args:
theta: unused.
prepared_inputs: Output from a call to PrepareExternalInputs.
step_inputs: unused.
padding: unused.
state0: A NestedMap of state variables produced by either ZeroState or a
previous invocation of this FProp step.
Returns:
(output, state1), both of which are NestedMaps.
output is implementation-dependent and is defined by the output_signature
parameter.
state1 is a NestedMap where the keys are names of sub-steps and the values
are state outputs from their FProp methods.
"""
del theta
del step_inputs
del padding
def _Slice(tensor):
"""Return a slice of this tensor at time=state0.t."""
shape = py_utils.GetShape(tensor)
# All zeros except for t in the time dimension.
# e.g. if params.axis=1, begin is [0, t, 0, 0, 0, ...]
begin = tf.one_hot(self.params.axis, tf.rank(tensor), on_value=state0.t)
# Same as shape, but with a 1 in the time dimension.
# e.g. if params.axis=1, shape is [shape[0], 1, shape[2], shape[3], ...]
size = tf.concat([
shape[0:self.params.axis],
tf.constant([1], dtype=tf.int32), shape[self.params.axis + 1:]
],
axis=0)
# Make a slice where the time dimension is fixed at state0.t.
time_slice = tf.slice(tensor, begin, size)
# Remove the time dimension.
return tf.squeeze(time_slice, axis=self.params.axis)
output = prepared_inputs.Transform(_Slice)
state1 = py_utils.NestedMap(t=state0.t + 1)
return output, state1
class RecurrentStepWrapper(base_layer.BaseLayer):
"""A layer that wraps a step in a recurrent.Recurrent call."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('step', None, 'The step params that this class wraps.')
return p
def __init__(self, params):
super().__init__(params)
self.CreateChild('step', self.params.step)
def _CreateChildrenVariables(self):
# Backwards compatibility: manually call child.InstantiateVariables()
# outside of tf.variable_scope(p.name).
self.step.InstantiateVariables()
super()._CreateChildrenVariables()
def PrepareExternalInputs(self, theta, external_inputs):
"""See Step.PrepareExternalInputs."""
return self.step.PrepareExternalInputs(theta.step, external_inputs)
def ZeroState(self, theta, prepared_inputs, batch_size):
"""See Step.ZeroState."""
return self.step.ZeroState(theta.step, prepared_inputs, batch_size)
def FProp(self, theta, prepared_inputs, inputs, padding, state0, **kwargs):
"""Runs a Step layer over multiple timesteps using Recurrent.
Args:
theta: A NestedMap containing weights' values of this layer and its
children layers.
prepared_inputs: External inputs returned by Step.PrepareExternalInputs().
inputs: A NestedMap of inputs of shape [time, batch_size, dim].
padding: A 0/1 float tensor of shape [time, batch_size]; 1.0 means that
this batch element is empty in this step.
state0: A NestedMap containing the initial recurrent state.
**kwargs: Additional kwargs to pass to Recurrent.
Returns:
A tuple (outputs, state1).
- outputs: A NestedMap containing the accumulated outputs of all steps,
containing Tensors shaped [time, batch_size, dim].
- state1: A NestedMap containing the accumulated recurrent states,
containing Tensors shaped [time, batch_size, dim].
"""
def RnnStep(recurrent_theta, recurrent_state0, recurrent_inputs):
"""Compute a single timestep."""
output, state1 = self.step.FProp(
theta=recurrent_theta.theta,
prepared_inputs=recurrent_theta.prepared_inputs,
step_inputs=recurrent_inputs.inputs,
padding=recurrent_inputs.padding,
state0=recurrent_state0.state)
recurrent_state1 = py_utils.NestedMap(output=output, state=state1)
return recurrent_state1, py_utils.NestedMap()
# In order to pass Step outputs through Recurrent, they need to be
# included as part of state.
output0, _ = self.step.FProp(theta.step, prepared_inputs,
inputs.Transform(lambda x: x[0]), padding[0],
state0)
accumulated_states, _ = recurrent.Recurrent(
theta=py_utils.NestedMap(
theta=theta.step, prepared_inputs=prepared_inputs),
state0=py_utils.NestedMap(output=output0, state=state0),
inputs=py_utils.NestedMap(inputs=inputs, padding=padding),
cell_fn=RnnStep,
**kwargs)
return accumulated_states.output, accumulated_states.state
| apache-2.0 |
richardcs/ansible | lib/ansible/modules/cloud/rackspace/rax_network.py | 91 | 3691 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_network
short_description: create / delete an isolated network in Rackspace Public Cloud
description:
- creates / deletes a Rackspace Public Cloud isolated network.
version_added: "1.4"
options:
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
label:
description:
- Label (name) to give the network
cidr:
description:
- cidr of the network being created
author:
- "Christopher H. Laco (@claco)"
- "Jesse Keating (@omgjlk)"
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Build an Isolated Network
gather_facts: False
tasks:
- name: Network create request
local_action:
module: rax_network
credentials: ~/.raxpub
label: my-net
cidr: 192.168.3.0/24
state: present
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
def cloud_network(module, state, label, cidr):
changed = False
network = None
networks = []
if not pyrax.cloud_networks:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if state == 'present':
if not cidr:
module.fail_json(msg='missing required arguments: cidr')
try:
network = pyrax.cloud_networks.find_network_by_label(label)
except pyrax.exceptions.NetworkNotFound:
try:
network = pyrax.cloud_networks.create(label, cidr=cidr)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
except Exception as e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
network = pyrax.cloud_networks.find_network_by_label(label)
network.delete()
changed = True
except pyrax.exceptions.NetworkNotFound:
pass
except Exception as e:
module.fail_json(msg='%s' % e.message)
if network:
instance = dict(id=network.id,
label=network.label,
cidr=network.cidr)
networks.append(instance)
module.exit_json(changed=changed, networks=networks)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present',
choices=['present', 'absent']),
label=dict(required=True),
cidr=dict()
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
cidr = module.params.get('cidr')
setup_rax_module(module, pyrax)
cloud_network(module, state, label, cidr)
if __name__ == '__main__':
main()
| gpl-3.0 |
unhangout/django-channels-presence | channels_presence/models.py | 1 | 4350 | from __future__ import unicode_literals, absolute_import
import json
from datetime import timedelta
from django.db import models
from django.conf import settings
from django.contrib.auth import get_user_model
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from channels import Group
from channels_presence.signals import presence_changed
class PresenceManager(models.Manager):
def touch(self, channel_name):
self.filter(channel_name=channel_name).update(last_seen=now())
def leave_all(self, channel_name):
for presence in self.select_related('room').filter(channel_name=channel_name):
room = presence.room
room.remove_presence(presence=presence)
@python_2_unicode_compatible
class Presence(models.Model):
room = models.ForeignKey('Room', on_delete=models.CASCADE)
channel_name = models.CharField(max_length=255,
help_text="Reply channel for connection that is present")
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,
on_delete=models.CASCADE)
last_seen = models.DateTimeField(default=now)
objects = PresenceManager()
def __str__(self):
return self.channel_name
class Meta:
unique_together = [('room', 'channel_name')]
class RoomManager(models.Manager):
def add(self, room_channel_name, user_channel_name, user=None):
room, created = Room.objects.get_or_create(channel_name=room_channel_name)
room.add_presence(user_channel_name, user)
return room
def remove(self, room_channel_name, user_channel_name):
try:
room = Room.objects.get(channel_name=room_channel_name)
except Room.DoesNotExist:
return
room.remove_presence(user_channel_name)
def prune_presences(self, channel_layer=None, age=None):
for room in Room.objects.all():
room.prune_presences(age)
def prune_rooms(self):
Room.objects.filter(presence__isnull=True).delete()
@python_2_unicode_compatible
class Room(models.Model):
channel_name = models.CharField(max_length=255, unique=True,
help_text="Group channel name for this room")
objects = RoomManager()
def __str__(self):
return self.channel_name
def add_presence(self, channel_name, user=None):
# Check user.is_authenticated for Django 1.10+ and
# user.is_authenticated() for prior versions.
# https://docs.djangoproject.com/en/1.11/ref/contrib/auth/#django.contrib.auth.models.User.is_authenticated
authenticated = user and (
user.is_authenticated == True or
(callable(user.is_authenticated) and user.is_authenticated())
)
presence, created = Presence.objects.get_or_create(
room=self,
channel_name=channel_name,
user=user if authenticated else None
)
if created:
Group(self.channel_name).add(channel_name)
self.broadcast_changed(added=presence)
def remove_presence(self, channel_name=None, presence=None):
if presence is None:
try:
presence = Presence.objects.get(room=self, channel_name=channel_name)
except Presence.DoesNotExist:
return
Group(self.channel_name).discard(presence.channel_name)
presence.delete()
self.broadcast_changed(removed=presence)
def prune_presences(self, age_in_seconds=None):
if age_in_seconds is None:
age_in_seconds = getattr(settings, "CHANNELS_PRESENCE_MAX_AGE", 60)
num_deleted, num_per_type = Presence.objects.filter(
room=self,
last_seen__lt=now() - timedelta(seconds=age_in_seconds)
).delete()
if num_deleted > 0:
self.broadcast_changed(bulk_change=True)
def get_users(self):
User = get_user_model()
return User.objects.filter(presence__room=self).distinct()
def get_anonymous_count(self):
return self.presence_set.filter(user=None).count()
def broadcast_changed(self, added=None, removed=None, bulk_change=False):
presence_changed.send(sender=self.__class__,
room=self,
added=added,
removed=removed,
bulk_change=bulk_change)
| mit |
rom1sqr/miasm | miasm2/core/asmbloc.py | 1 | 52468 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import logging
import inspect
from collections import namedtuple
import miasm2.expression.expression as m2_expr
from miasm2.expression.simplifications import expr_simp
from miasm2.expression.modint import moduint, modint
from miasm2.core.utils import Disasm_Exception, pck
from miasm2.core.graph import DiGraph, DiGraphSimplifier, MatchGraphJoker
from miasm2.core.interval import interval
log_asmbloc = logging.getLogger("asmblock")
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter("%(levelname)-5s: %(message)s"))
log_asmbloc.addHandler(console_handler)
log_asmbloc.setLevel(logging.WARNING)
def is_int(a):
return isinstance(a, int) or isinstance(a, long) or \
isinstance(a, moduint) or isinstance(a, modint)
def expr_is_label(e):
return isinstance(e, m2_expr.ExprId) and isinstance(e.name, asm_label)
def expr_is_int_or_label(e):
return isinstance(e, m2_expr.ExprInt) or \
(isinstance(e, m2_expr.ExprId) and isinstance(e.name, asm_label))
class asm_label:
"Stand for an assembly label"
def __init__(self, name="", offset=None):
self.fixedblocs = False
if is_int(name):
name = "loc_%.16X" % (int(name) & 0xFFFFFFFFFFFFFFFF)
self.name = name
self.attrib = None
if offset is None:
self.offset = offset
else:
self.offset = int(offset)
def __str__(self):
if isinstance(self.offset, (int, long)):
return "%s:0x%08x" % (self.name, self.offset)
else:
return "%s:%s" % (self.name, str(self.offset))
def __repr__(self):
rep = '<asmlabel '
if self.name:
rep += repr(self.name) + ' '
rep += '>'
return rep
class asm_raw:
def __init__(self, raw=""):
self.raw = raw
def __str__(self):
return repr(self.raw)
class asm_constraint(object):
c_to = "c_to"
c_next = "c_next"
def __init__(self, label, c_t=c_to):
# Sanity check
assert isinstance(label, asm_label)
self.label = label
self.c_t = c_t
def __str__(self):
return "%s:%s" % (str(self.c_t), str(self.label))
class asm_constraint_next(asm_constraint):
def __init__(self, label):
super(asm_constraint_next, self).__init__(
label, c_t=asm_constraint.c_next)
class asm_constraint_to(asm_constraint):
def __init__(self, label):
super(asm_constraint_to, self).__init__(
label, c_t=asm_constraint.c_to)
class asm_bloc(object):
def __init__(self, label, alignment=1):
assert isinstance(label, asm_label)
self.bto = set()
self.lines = []
self.label = label
self.alignment = alignment
def __str__(self):
out = []
out.append(str(self.label))
for l in self.lines:
out.append(str(l))
if self.bto:
lbls = ["->"]
for l in self.bto:
if l is None:
lbls.append("Unknown? ")
else:
lbls.append(str(l) + " ")
lbls = '\t'.join(lbls)
out.append(lbls)
return '\n'.join(out)
def addline(self, l):
self.lines.append(l)
def addto(self, c):
assert isinstance(self.bto, set)
self.bto.add(c)
def split(self, offset, l):
log_asmbloc.debug('split at %x', offset)
i = -1
offsets = [x.offset for x in self.lines]
if not l.offset in offsets:
log_asmbloc.warning(
'cannot split bloc at %X ' % offset +
'middle instruction? default middle')
offsets.sort()
return None
new_bloc = asm_bloc(l)
i = offsets.index(offset)
self.lines, new_bloc.lines = self.lines[:i], self.lines[i:]
flow_mod_instr = self.get_flow_instr()
log_asmbloc.debug('flow mod %r', flow_mod_instr)
c = asm_constraint(l, asm_constraint.c_next)
# move dst if flowgraph modifier was in original bloc
# (usecase: split delayslot bloc)
if flow_mod_instr:
for xx in self.bto:
log_asmbloc.debug('lbl %s', xx)
c_next = set(
[x for x in self.bto if x.c_t == asm_constraint.c_next])
c_to = [x for x in self.bto if x.c_t != asm_constraint.c_next]
self.bto = set([c] + c_to)
new_bloc.bto = c_next
else:
new_bloc.bto = self.bto
self.bto = set([c])
return new_bloc
def get_range(self):
"""Returns the offset hull of an asm_bloc"""
if len(self.lines):
return (self.lines[0].offset,
self.lines[-1].offset + self.lines[-1].l)
else:
return 0, 0
def get_offsets(self):
return [x.offset for x in self.lines]
def add_cst(self, offset, c_t, symbol_pool):
if isinstance(offset, (int, long)):
l = symbol_pool.getby_offset_create(offset)
elif isinstance(offset, str):
l = symbol_pool.getby_name_create(offset)
elif isinstance(offset, asm_label):
l = offset
else:
raise ValueError('unknown offset type %r' % offset)
c = asm_constraint(l, c_t)
self.bto.add(c)
def get_flow_instr(self):
if not self.lines:
return None
for i in xrange(-1, -1 - self.lines[0].delayslot - 1, -1):
if not 0 <= i < len(self.lines):
return None
l = self.lines[i]
if l.splitflow() or l.breakflow():
raise NotImplementedError('not fully functional')
def get_subcall_instr(self):
if not self.lines:
return None
delayslot = self.lines[0].delayslot
end_index = len(self.lines) - 1
ds_max_index = max(end_index - delayslot, 0)
for i in xrange(end_index, ds_max_index - 1, -1):
l = self.lines[i]
if l.is_subcall():
return l
return None
def get_next(self):
for x in self.bto:
if x.c_t == asm_constraint.c_next:
return x.label
return None
@staticmethod
def _filter_constraint(constraints):
"""Sort and filter @constraints for asm_bloc.bto
@constraints: non-empty set of asm_constraint instance
Always the same type -> one of the constraint
c_next and c_to -> c_next
"""
# Only one constraint
if len(constraints) == 1:
return next(iter(constraints))
# Constraint type -> set of corresponding constraint
cbytype = {}
for cons in constraints:
cbytype.setdefault(cons.c_t, set()).add(cons)
# Only one type -> any constraint is OK
if len(cbytype) == 1:
return next(iter(constraints))
# At least 2 types -> types = {c_next, c_to}
# c_to is included in c_next
return next(iter(cbytype[asm_constraint.c_next]))
def fix_constraints(self):
"""Fix next block constraints"""
# destination -> associated constraints
dests = {}
for constraint in self.bto:
dests.setdefault(constraint.label, set()).add(constraint)
self.bto = set(self._filter_constraint(constraints)
for constraints in dests.itervalues())
class asm_block_bad(asm_bloc):
"""Stand for a *bad* ASM block (malformed, unreachable,
not disassembled, ...)"""
ERROR_TYPES = {-1: "Unknown error",
0: "Unable to disassemble",
1: "Null starting block",
2: "Address forbidden by dont_dis",
}
def __init__(self, label=None, alignment=1, errno=-1, *args, **kwargs):
"""Instanciate an asm_block_bad.
@label, @alignement: same as asm_bloc.__init__
@errno: (optional) specify a error type associated with the block
"""
super(asm_block_bad, self).__init__(label, alignment, *args, **kwargs)
self._errno = errno
def __str__(self):
error_txt = self.ERROR_TYPES.get(self._errno, self._errno)
return "\n".join([str(self.label),
"\tBad block: %s" % error_txt])
def addline(self, *args, **kwargs):
raise RuntimeError("An asm_block_bad cannot have line")
def addto(self, *args, **kwargs):
raise RuntimeError("An asm_block_bad cannot have bto")
def split(self, *args, **kwargs):
raise RuntimeError("An asm_block_bad cannot be splitted")
class asm_symbol_pool:
def __init__(self):
self._labels = []
self._name2label = {}
self._offset2label = {}
self._label_num = 0
def add_label(self, name, offset=None):
"""
Create and add a label to the symbol_pool
@name: label's name
@offset: (optional) label's offset
"""
label = asm_label(name, offset)
# Test for collisions
if (label.offset in self._offset2label and
label != self._offset2label[label.offset]):
raise ValueError('symbol %s has same offset as %s' %
(label, self._offset2label[label.offset]))
if (label.name in self._name2label and
label != self._name2label[label.name]):
raise ValueError('symbol %s has same name as %s' %
(label, self._name2label[label.name]))
self._labels.append(label)
if label.offset is not None:
self._offset2label[label.offset] = label
if label.name != "":
self._name2label[label.name] = label
return label
def remove_label(self, label):
"""
Delete a @label
"""
self._name2label.pop(label.name, None)
self._offset2label.pop(label.offset, None)
if label in self._labels:
self._labels.remove(label)
def del_label_offset(self, label):
"""Unpin the @label from its offset"""
self._offset2label.pop(label.offset, None)
label.offset = None
def getby_offset(self, offset):
"""Retrieve label using its @offset"""
return self._offset2label.get(offset, None)
def getby_name(self, name):
"""Retrieve label using its @name"""
return self._name2label.get(name, None)
def getby_name_create(self, name):
"""Get a label from its @name, create it if it doesn't exist"""
label = self.getby_name(name)
if label is None:
label = self.add_label(name)
return label
def getby_offset_create(self, offset):
"""Get a label from its @offset, create it if it doesn't exist"""
label = self.getby_offset(offset)
if label is None:
label = self.add_label(offset, offset)
return label
def rename_label(self, label, newname):
"""Rename the @label name to @newname"""
if newname in self._name2label:
raise ValueError('Symbol already known')
self._name2label.pop(label.name, None)
label.name = newname
self._name2label[label.name] = label
def set_offset(self, label, offset):
"""Pin the @label from at @offset
Note that there is a special case when the offset is a list
it happens when offsets are recomputed in resolve_symbol*
"""
if label is None:
raise ValueError('label should not be None')
if not label.name in self._name2label:
raise ValueError('label %s not in symbol pool' % label)
if offset is not None and offset in self._offset2label:
raise ValueError('Conflict in label %s' % label)
self._offset2label.pop(label.offset, None)
label.offset = offset
if is_int(label.offset):
self._offset2label[label.offset] = label
@property
def items(self):
"""Return all labels"""
return self._labels
def __str__(self):
return reduce(lambda x, y: x + str(y) + '\n', self._labels, "")
def __getitem__(self, item):
if item in self._name2label:
return self._name2label[item]
if item in self._offset2label:
return self._offset2label[item]
raise KeyError('unknown symbol %r' % item)
def __contains__(self, item):
return item in self._name2label or item in self._offset2label
def merge(self, symbol_pool):
"""Merge with another @symbol_pool"""
self._labels += symbol_pool._labels
self._name2label.update(symbol_pool._name2label)
self._offset2label.update(symbol_pool._offset2label)
def gen_label(self):
"""Generate a new unpinned label"""
label = self.add_label("lbl_gen_%.8X" % (self._label_num))
self._label_num += 1
return label
class AsmCFG(DiGraph):
"""Directed graph standing for a ASM Control Flow Graph with:
- nodes: asm_bloc
- edges: constraints between blocks, synchronized with asm_bloc's "bto"
Specialized the .dot export and force the relation between block to be uniq,
and associated with a constraint.
Offer helpers on AsmCFG management, such as research by label, sanity
checking and mnemonic size guessing.
"""
# Internal structure for pending management
AsmCFGPending = namedtuple("AsmCFGPending",
["waiter", "constraint"])
def __init__(self, *args, **kwargs):
super(AsmCFG, self).__init__(*args, **kwargs)
# Edges -> constraint
self.edges2constraint = {}
# Expected asm_label -> set( (src, dst), constraint )
self._pendings = {}
# Label2block built on the fly
self._label2block = {}
# Compatibility with old list API
def append(self, *args, **kwargs):
raise DeprecationWarning("AsmCFG is a graph, use add_node")
def remove(self, *args, **kwargs):
raise DeprecationWarning("AsmCFG is a graph, use del_node")
def __getitem__(self, *args, **kwargs):
raise DeprecationWarning("Order of AsmCFG elements is not reliable")
def __iter__(self):
"""Iterator on asm_bloc composing the current graph"""
return iter(self._nodes)
def __len__(self):
"""Return the number of blocks in AsmCFG"""
return len(self._nodes)
# Manage graph with associated constraints
def add_edge(self, src, dst, constraint):
"""Add an edge to the graph
@src: asm_bloc instance, source
@dst: asm_block instance, destination
@constraint: constraint associated to this edge
"""
# Sanity check
assert (src, dst) not in self.edges2constraint
# Add the edge to src.bto if needed
if dst.label not in [cons.label for cons in src.bto]:
src.bto.add(asm_constraint(dst.label, constraint))
# Add edge
self.edges2constraint[(src, dst)] = constraint
super(AsmCFG, self).add_edge(src, dst)
def add_uniq_edge(self, src, dst, constraint):
"""Add an edge from @src to @dst if it doesn't already exist"""
if (src not in self._nodes_succ or
dst not in self._nodes_succ[src]):
self.add_edge(src, dst, constraint)
def del_edge(self, src, dst):
"""Delete the edge @src->@dst and its associated constraint"""
# Delete from src.bto
to_remove = [cons for cons in src.bto if cons.label == dst.label]
if to_remove:
assert len(to_remove) == 1
src.bto.remove(to_remove[0])
# Del edge
del self.edges2constraint[(src, dst)]
super(AsmCFG, self).del_edge(src, dst)
def add_node(self, block):
"""Add the block @block to the current instance, if it is not already in
@block: asm_bloc instance
Edges will be created for @block.bto, if destinations are already in
this instance. If not, they will be resolved when adding these
aforementionned destinations.
`self.pendings` indicates which blocks are not yet resolved.
"""
status = super(AsmCFG, self).add_node(block)
if not status:
return status
# Update waiters
if block.label in self._pendings:
for bblpend in self._pendings[block.label]:
self.add_edge(bblpend.waiter, block, bblpend.constraint)
del self._pendings[block.label]
# Synchronize edges with block destinations
self._label2block[block.label] = block
for constraint in block.bto:
dst = self._label2block.get(constraint.label,
None)
if dst is None:
# Block is yet unknown, add it to pendings
to_add = self.AsmCFGPending(waiter=block,
constraint=constraint.c_t)
self._pendings.setdefault(constraint.label,
set()).add(to_add)
else:
# Block is already in known nodes
self.add_edge(block, dst, constraint.c_t)
return status
def del_node(self, block):
super(AsmCFG, self).del_node(block)
del self._label2block[block.label]
def merge(self, graph):
"""Merge with @graph, taking in account constraints"""
# -> add_edge(x, y, constraint)
for node in graph._nodes:
self.add_node(node)
for edge in graph._edges:
# Use "_uniq_" beacause the edge can already exist due to add_node
self.add_uniq_edge(*edge, constraint=graph.edges2constraint[edge])
def node2lines(self, node):
yield self.DotCellDescription(text=str(node.label.name),
attr={'align': 'center',
'colspan': 2,
'bgcolor': 'grey'})
if isinstance(node, asm_block_bad):
yield [self.DotCellDescription(
text=node.ERROR_TYPES.get(node._errno,
node._errno),
attr={})]
raise StopIteration
for line in node.lines:
if self._dot_offset:
yield [self.DotCellDescription(text="%.8X" % line.offset,
attr={}),
self.DotCellDescription(text=str(line), attr={})]
else:
yield self.DotCellDescription(text=str(line), attr={})
def node_attr(self, node):
if isinstance(node, asm_block_bad):
return {'style': 'filled', 'fillcolor': 'red'}
return {}
def edge_attr(self, src, dst):
cst = self.edges2constraint.get((src, dst), None)
edge_color = "blue"
if len(self.successors(src)) > 1:
if cst == asm_constraint.c_next:
edge_color = "red"
else:
edge_color = "limegreen"
return {"color": edge_color}
def dot(self, offset=False):
"""
@offset: (optional) if set, add the corresponding offsets in each node
"""
self._dot_offset = offset
return super(AsmCFG, self).dot()
# Helpers
@property
def pendings(self):
"""Dictionary of label -> set(AsmCFGPending instance) indicating
which label are missing in the current instance.
A label is missing if a block which is already in nodes has constraints
with him (thanks to its .bto) and the corresponding block is not yet in
nodes
"""
return self._pendings
def _build_label2block(self):
self._label2block = {block.label: block
for block in self._nodes}
def label2block(self, label):
"""Return the block corresponding to label @label
@label: asm_label instance or ExprId(asm_label) instance"""
return self._label2block[label]
def rebuild_edges(self):
"""Consider blocks '.bto' and rebuild edges according to them, ie:
- update constraint type
- add missing edge
- remove no more used edge
This method should be called if a block's '.bto' in nodes have been
modified without notifying this instance to resynchronize edges.
"""
self._build_label2block()
for block in self._nodes:
edges = []
# Rebuild edges from bto
for constraint in block.bto:
dst = self._label2block.get(constraint.label,
None)
if dst is None:
# Missing destination, add to pendings
self._pendings.setdefault(constraint.label,
set()).add(self.AsmCFGPending(block,
constraint.c_t))
continue
edge = (block, dst)
edges.append(edge)
if edge in self._edges:
# Already known edge, constraint may have changed
self.edges2constraint[edge] = constraint.c_t
else:
# An edge is missing
self.add_edge(edge[0], edge[1], constraint.c_t)
# Remove useless edges
for succ in self.successors(block):
edge = (block, succ)
if edge not in edges:
self.del_edge(*edge)
def get_bad_blocks(self):
"""Iterator on asm_block_bad elements"""
# A bad asm block is always a leaf
for block in self.leaves():
if isinstance(block, asm_block_bad):
yield block
def get_bad_blocks_predecessors(self, strict=False):
"""Iterator on block with an asm_block_bad destination
@strict: (optional) if set, return block with only bad
successors
"""
# Avoid returning the same block
done = set()
for badblock in self.get_bad_blocks():
for predecessor in self.predecessors_iter(badblock):
if predecessor not in done:
if (strict and
not all(isinstance(block, asm_block_bad)
for block in self.successors_iter(predecessor))):
continue
yield predecessor
done.add(predecessor)
def sanity_check(self):
"""Do sanity checks on blocks' constraints:
* no pendings
* no multiple next constraint to same block
* no next constraint to self
"""
if len(self._pendings) != 0:
raise RuntimeError("Some blocks are missing: %s" % map(str,
self._pendings.keys()))
next_edges = {edge: constraint
for edge, constraint in self.edges2constraint.iteritems()
if constraint == asm_constraint.c_next}
for block in self._nodes:
# No next constraint to self
if (block, block) in next_edges:
raise RuntimeError('Bad constraint: self in next')
# No multiple next constraint to same block
pred_next = list(pblock
for (pblock, dblock) in next_edges
if dblock == block)
if len(pred_next) > 1:
raise RuntimeError("Too many next constraints for bloc %r"
"(%s)" % (block.label,
map(lambda x: x.label, pred_next)))
def guess_blocks_size(self, mnemo):
"""Asm and compute max block size
Add a 'size' and 'max_size' attribute on each block
@mnemo: metamn instance"""
for block in self._nodes:
size = 0
for instr in block.lines:
if isinstance(instr, asm_raw):
# for special asm_raw, only extract len
if isinstance(instr.raw, list):
data = None
if len(instr.raw) == 0:
l = 0
else:
l = instr.raw[0].size / 8 * len(instr.raw)
elif isinstance(instr.raw, str):
data = instr.raw
l = len(data)
else:
raise NotImplementedError('asm raw')
else:
# Assemble the instruction to retrieve its len.
# If the instruction uses symbol it will fail
# In this case, the max_instruction_len is used
try:
candidates = mnemo.asm(instr)
l = len(candidates[-1])
except:
l = mnemo.max_instruction_len
data = None
instr.data = data
instr.l = l
size += l
block.size = size
block.max_size = size
log_asmbloc.info("size: %d max: %d", block.size, block.max_size)
def apply_splitting(self, symbol_pool, dis_block_callback=None, **kwargs):
"""Consider @self' bto destinations and split block in @self if one of
these destinations jumps in the middle of this block.
In order to work, they must be only one block in @self per label in
@symbol_pool (which is true if @self come from the same disasmEngine).
@symbol_pool: asm_symbol_pool instance associated with @self'labels
@dis_block_callback: (optional) if set, this callback will be called on
new block destinations
@kwargs: (optional) named arguments to pass to dis_block_callback
"""
# Get all possible destinations not yet resolved, with a resolved
# offset
block_dst = [label.offset
for label in self.pendings
if label.offset is not None]
todo = self.nodes().copy()
rebuild_needed = False
while todo:
# Find a block with a destination inside another one
cur_block = todo.pop()
range_start, range_stop = cur_block.get_range()
for off in block_dst:
if not (off > range_start and off < range_stop):
continue
# `cur_block` must be splitted at offset `off`
label = symbol_pool.getby_offset_create(off)
new_b = cur_block.split(off, label)
log_asmbloc.debug("Split block %x", off)
if new_b is None:
log_asmbloc.error("Cannot split %x!!", off)
continue
# Remove pending from cur_block
# Links from new_b will be generated in rebuild_edges
for dst in new_b.bto:
if dst.label not in self.pendings:
continue
self.pendings[dst.label] = set(pending for pending in self.pendings[dst.label]
if pending.waiter != cur_block)
# The new block destinations may need to be disassembled
if dis_block_callback:
offsets_to_dis = set(constraint.label.offset
for constraint in new_b.bto)
dis_block_callback(cur_bloc=new_b,
offsets_to_dis=offsets_to_dis,
symbol_pool=symbol_pool, **kwargs)
# Update structure
rebuild_needed = True
self.add_node(new_b)
# The new block must be considered
todo.add(new_b)
range_start, range_stop = cur_block.get_range()
# Rebuild edges to match new blocks'bto
if rebuild_needed:
self.rebuild_edges()
# Out of _merge_blocks to be computed only once
_acceptable_block = lambda block: (not isinstance(block, asm_block_bad) and
len(block.lines) > 0)
_parent = MatchGraphJoker(restrict_in=False, filt=_acceptable_block)
_son = MatchGraphJoker(restrict_out=False, filt=_acceptable_block)
_expgraph = _parent >> _son
def _merge_blocks(dg, graph):
"""Graph simplification merging asm_bloc with one and only one son with this
son if this son has one and only one parent"""
# Blocks to ignore, because they have been removed from the graph
to_ignore = set()
for match in _expgraph.match(graph):
# Get matching blocks
block, succ = match[_parent], match[_son]
# Ignore already deleted blocks
if (block in to_ignore or
succ in to_ignore):
continue
# Remove block last instruction if needed
last_instr = block.lines[-1]
if last_instr.delayslot > 0:
# TODO: delayslot
raise RuntimeError("Not implemented yet")
if last_instr.is_subcall():
continue
if last_instr.breakflow() and last_instr.dstflow():
block.lines.pop()
# Merge block
block.lines += succ.lines
for nextb in graph.successors_iter(succ):
graph.add_edge(block, nextb, graph.edges2constraint[(succ, nextb)])
graph.del_node(succ)
to_ignore.add(succ)
bbl_simplifier = DiGraphSimplifier()
bbl_simplifier.enable_passes([_merge_blocks])
def conservative_asm(mnemo, instr, symbols, conservative):
"""
Asm instruction;
Try to keep original instruction bytes if it exists
"""
candidates = mnemo.asm(instr, symbols)
if not candidates:
raise ValueError('cannot asm:%s' % str(instr))
if not hasattr(instr, "b"):
return candidates[0], candidates
if instr.b in candidates:
return instr.b, candidates
if conservative:
for c in candidates:
if len(c) == len(instr.b):
return c, candidates
return candidates[0], candidates
def fix_expr_val(expr, symbols):
"""Resolve an expression @expr using @symbols"""
def expr_calc(e):
if isinstance(e, m2_expr.ExprId):
s = symbols._name2label[e.name]
e = m2_expr.ExprInt_from(e, s.offset)
return e
result = expr.visit(expr_calc)
result = expr_simp(result)
if not isinstance(result, m2_expr.ExprInt):
raise RuntimeError('Cannot resolve symbol %s' % expr)
return result
def fix_label_offset(symbol_pool, label, offset, modified):
"""Fix the @label offset to @offset. If the @offset has changed, add @label
to @modified
@symbol_pool: current symbol_pool
"""
if label.offset == offset:
return
symbol_pool.set_offset(label, offset)
modified.add(label)
class BlockChain(object):
"""Manage blocks linked with an asm_constraint_next"""
def __init__(self, symbol_pool, blocks):
self.symbol_pool = symbol_pool
self.blocks = blocks
self.place()
@property
def pinned(self):
"""Return True iff at least one block is pinned"""
return self.pinned_block_idx is not None
def _set_pinned_block_idx(self):
self.pinned_block_idx = None
for i, block in enumerate(self.blocks):
if is_int(block.label.offset):
if self.pinned_block_idx is not None:
raise ValueError("Multiples pinned block detected")
self.pinned_block_idx = i
def place(self):
"""Compute BlockChain min_offset and max_offset using pinned block and
blocks' size
"""
self._set_pinned_block_idx()
self.max_size = 0
for block in self.blocks:
self.max_size += block.max_size + block.alignment - 1
# Check if chain has one block pinned
if not self.pinned:
return
offset_base = self.blocks[self.pinned_block_idx].label.offset
assert(offset_base % self.blocks[self.pinned_block_idx].alignment == 0)
self.offset_min = offset_base
for block in self.blocks[:self.pinned_block_idx - 1:-1]:
self.offset_min -= block.max_size + \
(block.alignment - block.max_size) % block.alignment
self.offset_max = offset_base
for block in self.blocks[self.pinned_block_idx:]:
self.offset_max += block.max_size + \
(block.alignment - block.max_size) % block.alignment
def merge(self, chain):
"""Best effort merge two block chains
Return the list of resulting blockchains"""
self.blocks += chain.blocks
self.place()
return [self]
def fix_blocks(self, modified_labels):
"""Propagate a pinned to its blocks' neighbour
@modified_labels: store new pinned labels"""
if not self.pinned:
raise ValueError('Trying to fix unpinned block')
# Propagate offset to blocks before pinned block
pinned_block = self.blocks[self.pinned_block_idx]
offset = pinned_block.label.offset
if offset % pinned_block.alignment != 0:
raise RuntimeError('Bad alignment')
for block in self.blocks[:self.pinned_block_idx - 1:-1]:
new_offset = offset - block.size
new_offset = new_offset - new_offset % pinned_block.alignment
fix_label_offset(self.symbol_pool,
block.label,
new_offset,
modified_labels)
# Propagate offset to blocks after pinned block
offset = pinned_block.label.offset + pinned_block.size
last_block = pinned_block
for block in self.blocks[self.pinned_block_idx + 1:]:
offset += (- offset) % last_block.alignment
fix_label_offset(self.symbol_pool,
block.label,
offset,
modified_labels)
offset += block.size
last_block = block
return modified_labels
class BlockChainWedge(object):
"""Stand for wedges between blocks"""
def __init__(self, symbol_pool, offset, size):
self.symbol_pool = symbol_pool
self.offset = offset
self.max_size = size
self.offset_min = offset
self.offset_max = offset + size
def merge(self, chain):
"""Best effort merge two block chains
Return the list of resulting blockchains"""
self.symbol_pool.set_offset(chain.blocks[0].label, self.offset_max)
chain.place()
return [self, chain]
def group_constrained_blocks(symbol_pool, blocks):
"""
Return the BlockChains list built from grouped asm blocks linked by
asm_constraint_next
@blocks: a list of asm block
"""
log_asmbloc.info('group_constrained_blocks')
# Group adjacent blocks
remaining_blocks = list(blocks)
known_block_chains = {}
lbl2block = {block.label: block for block in blocks}
while remaining_blocks:
# Create a new block chain
block_list = [remaining_blocks.pop()]
# Find sons in remainings blocks linked with a next constraint
while True:
# Get next block
next_label = block_list[-1].get_next()
if next_label is None or next_label not in lbl2block:
break
next_block = lbl2block[next_label]
# Add the block at the end of the current chain
if next_block not in remaining_blocks:
break
block_list.append(next_block)
remaining_blocks.remove(next_block)
# Check if son is in a known block group
if next_label is not None and next_label in known_block_chains:
block_list += known_block_chains[next_label]
del known_block_chains[next_label]
known_block_chains[block_list[0].label] = block_list
out_block_chains = []
for label in known_block_chains:
chain = BlockChain(symbol_pool, known_block_chains[label])
out_block_chains.append(chain)
return out_block_chains
def get_blockchains_address_interval(blockChains, dst_interval):
"""Compute the interval used by the pinned @blockChains
Check if the placed chains are in the @dst_interval"""
allocated_interval = interval()
for chain in blockChains:
if not chain.pinned:
continue
chain_interval = interval([(chain.offset_min, chain.offset_max - 1)])
if chain_interval not in dst_interval:
raise ValueError('Chain placed out of destination interval')
allocated_interval += chain_interval
return allocated_interval
def resolve_symbol(blockChains, symbol_pool, dst_interval=None):
"""Place @blockChains in the @dst_interval"""
log_asmbloc.info('resolve_symbol')
if dst_interval is None:
dst_interval = interval([(0, 0xFFFFFFFFFFFFFFFF)])
forbidden_interval = interval(
[(-1, 0xFFFFFFFFFFFFFFFF + 1)]) - dst_interval
allocated_interval = get_blockchains_address_interval(blockChains,
dst_interval)
log_asmbloc.debug('allocated interval: %s', allocated_interval)
pinned_chains = [chain for chain in blockChains if chain.pinned]
# Add wedge in forbidden intervals
for start, stop in forbidden_interval.intervals:
wedge = BlockChainWedge(
symbol_pool, offset=start, size=stop + 1 - start)
pinned_chains.append(wedge)
# Try to place bigger blockChains first
pinned_chains.sort(key=lambda x: x.offset_min)
blockChains.sort(key=lambda x: -x.max_size)
fixed_chains = list(pinned_chains)
log_asmbloc.debug("place chains")
for chain in blockChains:
if chain.pinned:
continue
fixed = False
for i in xrange(1, len(fixed_chains)):
prev_chain = fixed_chains[i - 1]
next_chain = fixed_chains[i]
if prev_chain.offset_max + chain.max_size < next_chain.offset_min:
new_chains = prev_chain.merge(chain)
fixed_chains[i - 1:i] = new_chains
fixed = True
break
if not fixed:
raise RuntimeError('Cannot find enough space to place blocks')
return [chain for chain in fixed_chains if isinstance(chain, BlockChain)]
def filter_exprid_label(exprs):
"""Extract labels from list of ExprId @exprs"""
return set(expr.name for expr in exprs if isinstance(expr.name, asm_label))
def get_block_labels(block):
"""Extract labels used by @block"""
symbols = set()
for instr in block.lines:
if isinstance(instr, asm_raw):
if isinstance(instr.raw, list):
for expr in instr.raw:
symbols.update(m2_expr.get_expr_ids(expr))
else:
for arg in instr.args:
symbols.update(m2_expr.get_expr_ids(arg))
labels = filter_exprid_label(symbols)
return labels
def assemble_block(mnemo, block, symbol_pool, conservative=False):
"""Assemble a @block using @symbol_pool
@conservative: (optional) use original bytes when possible
"""
offset_i = 0
for instr in block.lines:
if isinstance(instr, asm_raw):
if isinstance(instr.raw, list):
# Fix special asm_raw
data = ""
for expr in instr.raw:
expr_int = fix_expr_val(expr, symbol_pool)
data += pck[expr_int.size](expr_int.arg)
instr.data = data
instr.offset = offset_i
offset_i += instr.l
continue
# Assemble an instruction
saved_args = list(instr.args)
instr.offset = block.label.offset + offset_i
# Replace instruction's arguments by resolved ones
instr.args = instr.resolve_args_with_symbols(symbol_pool)
if instr.dstflow():
instr.fixDstOffset()
old_l = instr.l
cached_candidate, _ = conservative_asm(mnemo, instr, symbol_pool,
conservative)
# Restore original arguments
instr.args = saved_args
# We need to update the block size
block.size = block.size - old_l + len(cached_candidate)
instr.data = cached_candidate
instr.l = len(cached_candidate)
offset_i += instr.l
def asmbloc_final(mnemo, blocks, blockChains, symbol_pool, conservative=False):
"""Resolve and assemble @blockChains using @symbol_pool until fixed point is
reached"""
log_asmbloc.debug("asmbloc_final")
# Init structures
lbl2block = {block.label: block for block in blocks}
blocks_using_label = {}
for block in blocks:
labels = get_block_labels(block)
for label in labels:
blocks_using_label.setdefault(label, set()).add(block)
block2chain = {}
for chain in blockChains:
for block in chain.blocks:
block2chain[block] = chain
# Init worklist
blocks_to_rework = set(blocks)
# Fix and re-assemble blocks until fixed point is reached
while True:
# Propagate pinned blocks into chains
modified_labels = set()
for chain in blockChains:
chain.fix_blocks(modified_labels)
for label in modified_labels:
# Retrive block with modified reference
if label in lbl2block:
blocks_to_rework.add(lbl2block[label])
# Enqueue blocks referencing a modified label
if label not in blocks_using_label:
continue
for block in blocks_using_label[label]:
blocks_to_rework.add(block)
# No more work
if not blocks_to_rework:
break
while blocks_to_rework:
block = blocks_to_rework.pop()
assemble_block(mnemo, block, symbol_pool, conservative)
def asm_resolve_final(mnemo, blocks, symbol_pool, dst_interval=None):
"""Resolve and assemble @blocks using @symbol_pool into interval
@dst_interval"""
blocks.sanity_check()
blocks.guess_blocks_size(mnemo)
blockChains = group_constrained_blocks(symbol_pool, blocks)
resolved_blockChains = resolve_symbol(
blockChains, symbol_pool, dst_interval)
asmbloc_final(mnemo, blocks, resolved_blockChains, symbol_pool)
patches = {}
output_interval = interval()
for block in blocks:
offset = block.label.offset
for instr in block.lines:
if not instr.data:
# Empty line
continue
assert len(instr.data) == instr.l
patches[offset] = instr.data
instruction_interval = interval([(offset, offset + instr.l - 1)])
if not (instruction_interval & output_interval).empty:
raise RuntimeError("overlapping bytes %X" % int(offset))
instr.offset = offset
offset += instr.l
return patches
class disasmEngine(object):
"""Disassembly engine, taking care of disassembler options and mutli-block
strategy.
Engine options:
+ Object supporting membership test (offset in ..)
- dont_dis: stop the current disassembly branch if reached
- split_dis: force a basic block end if reached,
with a next constraint on its successor
+ On/Off
- follow_call: recursively disassemble CALL destinations
- dontdis_retcall: stop on CALL return addresses
- dont_dis_nulstart_bloc: stop if a block begin with a few \x00
+ Number
- lines_wd: maximum block's size (in number of instruction)
- blocs_wd: maximum number of distinct disassembled block
+ callback(arch, attrib, pool_bin, cur_bloc, offsets_to_dis,
symbol_pool)
- dis_bloc_callback: callback after each new disassembled block
The engine also tracks already handled block, for performance and to avoid
infinite cycling.
Addresses of disassembled block is in the attribute `job_done`.
To force a new disassembly, the targeted offset must first be removed from
this structure.
"""
def __init__(self, arch, attrib, bin_stream, **kwargs):
"""Instanciate a new disassembly engine
@arch: targeted architecture
@attrib: architecture attribute
@bin_stream: bytes source
@kwargs: (optional) custom options
"""
self.arch = arch
self.attrib = attrib
self.bin_stream = bin_stream
self.symbol_pool = asm_symbol_pool()
self.job_done = set()
# Setup options
self.dont_dis = []
self.split_dis = []
self.follow_call = False
self.dontdis_retcall = False
self.lines_wd = None
self.blocs_wd = None
self.dis_bloc_callback = None
self.dont_dis_nulstart_bloc = False
# Override options if needed
self.__dict__.update(kwargs)
def _dis_bloc(self, offset):
"""Disassemble the block at offset @offset
Return the created asm_bloc and future offsets to disassemble
"""
lines_cpt = 0
in_delayslot = False
delayslot_count = self.arch.delayslot
offsets_to_dis = set()
add_next_offset = False
label = self.symbol_pool.getby_offset_create(offset)
cur_block = asm_bloc(label)
log_asmbloc.debug("dis at %X", int(offset))
while not in_delayslot or delayslot_count > 0:
if in_delayslot:
delayslot_count -= 1
if offset in self.dont_dis:
if not cur_block.lines:
self.job_done.add(offset)
# Block is empty -> bad block
cur_block = asm_block_bad(label, errno=2)
else:
# Block is not empty, stop the desassembly pass and add a
# constraint to the next block
cur_block.add_cst(offset, asm_constraint.c_next,
self.symbol_pool)
break
if lines_cpt > 0 and offset in self.split_dis:
cur_block.add_cst(offset, asm_constraint.c_next,
self.symbol_pool)
offsets_to_dis.add(offset)
break
lines_cpt += 1
if self.lines_wd is not None and lines_cpt > self.lines_wd:
log_asmbloc.debug("lines watchdog reached at %X", int(offset))
break
if offset in self.job_done:
cur_block.add_cst(offset, asm_constraint.c_next,
self.symbol_pool)
break
off_i = offset
try:
instr = self.arch.dis(self.bin_stream, self.attrib, offset)
except (Disasm_Exception, IOError), e:
log_asmbloc.warning(e)
instr = None
if instr is None:
log_asmbloc.warning("cannot disasm at %X", int(off_i))
if not cur_block.lines:
self.job_done.add(offset)
# Block is empty -> bad block
cur_block = asm_block_bad(label, errno=0)
else:
# Block is not empty, stop the desassembly pass and add a
# constraint to the next block
cur_block.add_cst(off_i, asm_constraint.c_next,
self.symbol_pool)
break
# XXX TODO nul start block option
if self.dont_dis_nulstart_bloc and instr.b.count('\x00') == instr.l:
log_asmbloc.warning("reach nul instr at %X", int(off_i))
if not cur_block.lines:
# Block is empty -> bad block
cur_block = asm_block_bad(label, errno=1)
else:
# Block is not empty, stop the desassembly pass and add a
# constraint to the next block
cur_block.add_cst(off_i, asm_constraint.c_next,
self.symbol_pool)
break
# special case: flow graph modificator in delayslot
if in_delayslot and instr and (instr.splitflow() or instr.breakflow()):
add_next_offset = True
break
self.job_done.add(offset)
log_asmbloc.debug("dis at %X", int(offset))
offset += instr.l
log_asmbloc.debug(instr)
log_asmbloc.debug(instr.args)
cur_block.addline(instr)
if not instr.breakflow():
continue
# test split
if instr.splitflow() and not (instr.is_subcall() and self.dontdis_retcall):
add_next_offset = True
pass
if instr.dstflow():
instr.dstflow2label(self.symbol_pool)
dst = instr.getdstflow(self.symbol_pool)
dstn = []
for d in dst:
if isinstance(d, m2_expr.ExprId) and \
isinstance(d.name, asm_label):
dstn.append(d.name)
dst = dstn
if (not instr.is_subcall()) or self.follow_call:
cur_block.bto.update(
[asm_constraint(x, asm_constraint.c_to) for x in dst])
# get in delayslot mode
in_delayslot = True
delayslot_count = instr.delayslot
for c in cur_block.bto:
offsets_to_dis.add(c.label.offset)
if add_next_offset:
cur_block.add_cst(offset, asm_constraint.c_next, self.symbol_pool)
offsets_to_dis.add(offset)
# Fix multiple constraints
cur_block.fix_constraints()
if self.dis_bloc_callback is not None:
self.dis_bloc_callback(mn=self.arch, attrib=self.attrib,
pool_bin=self.bin_stream, cur_bloc=cur_block,
offsets_to_dis=offsets_to_dis,
symbol_pool=self.symbol_pool)
return cur_block, offsets_to_dis
def dis_bloc(self, offset):
"""Disassemble the block at offset @offset and return the created
asm_bloc
@offset: targeted offset to disassemble
"""
current_block, _ = self._dis_bloc(offset)
return current_block
def dis_multibloc(self, offset, blocs=None):
"""Disassemble every block reachable from @offset regarding
specific disasmEngine conditions
Return an AsmCFG instance containing disassembled blocks
@offset: starting offset
@blocs: (optional) AsmCFG instance of already disassembled blocks to
merge with
"""
log_asmbloc.info("dis bloc all")
if blocs is None:
blocs = AsmCFG()
todo = [offset]
bloc_cpt = 0
while len(todo):
bloc_cpt += 1
if self.blocs_wd is not None and bloc_cpt > self.blocs_wd:
log_asmbloc.debug("blocs watchdog reached at %X", int(offset))
break
target_offset = int(todo.pop(0))
if (target_offset is None or
target_offset in self.job_done):
continue
cur_block, nexts = self._dis_bloc(target_offset)
todo += nexts
blocs.add_node(cur_block)
blocs.apply_splitting(self.symbol_pool,
dis_block_callback=self.dis_bloc_callback,
mn=self.arch, attrib=self.attrib,
pool_bin=self.bin_stream)
return blocs
| gpl-2.0 |
yujikato/DIRAC | src/DIRAC/StorageManagementSystem/Agent/StageRequestAgent.py | 2 | 22666 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from DIRAC import gLogger, S_OK
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.StorageManagementSystem.Client.StorageManagerClient import StorageManagerClient
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.StorageManagementSystem.DB.StorageManagementDB import THROTTLING_STEPS, THROTTLING_TIME
import re
AGENT_NAME = 'StorageManagement/StageRequestAgent'
class StageRequestAgent(AgentModule):
def initialize(self):
self.stagerClient = StorageManagerClient()
# self.storageDB = StorageManagementDB()
# pin lifetime = 1 day
self.pinLifetime = self.am_getOption('PinLifetime', THROTTLING_TIME)
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption('shifterProxy', 'DataManager')
return S_OK()
def execute(self):
# Get the current submitted stage space and the amount of pinned space for each storage element
res = self.getStorageUsage()
if not res['OK']:
return res
return self.submitStageRequests()
def getStorageUsage(self):
""" Fill the current Status of the SE Caches from the DB
"""
self.storageElementCache = {}
res = self.stagerClient.getSubmittedStagePins()
if not res['OK']:
gLogger.fatal(
"StageRequest.getStorageUsage: Failed to obtain submitted requests from StorageManagementDB.",
res['Message'])
return res
self.storageElementUsage = res['Value']
if self.storageElementUsage:
gLogger.info("StageRequest.getStorageUsage: Active stage/pin requests found at the following sites:")
for storageElement in sorted(self.storageElementUsage.keys()):
seDict = self.storageElementUsage[storageElement]
# Convert to GB for printout
seDict['TotalSize'] = seDict['TotalSize'] / (1000 * 1000 * 1000.0)
gLogger.info("StageRequest.getStorageUsage: %s: %s replicas with a size of %.3f GB." %
(storageElement.ljust(15), str(seDict['Replicas']).rjust(6), seDict['TotalSize']))
if not self.storageElementUsage:
gLogger.info("StageRequest.getStorageUsage: No active stage/pin requests found.")
return S_OK()
def submitStageRequests(self):
""" This manages the following transitions of the Replicas
* Waiting -> Offline (if the file is not found Cached)
* Waiting -> StageSubmitted (if the file is found Cached)
* Offline -> StageSubmitted (if there are not more Waiting replicas)
"""
# Retry Replicas that have not been Staged in a previous attempt
res = self._getMissingReplicas()
if not res['OK']:
gLogger.fatal(
"StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.",
res['Message'])
return res
seReplicas = res['Value']['SEReplicas']
allReplicaInfo = res['Value']['AllReplicaInfo']
if seReplicas:
gLogger.info("StageRequest.submitStageRequests: Completing partially Staged Tasks")
for storageElement, seReplicaIDs in seReplicas.items():
gLogger.debug('Staging at %s:' % storageElement, seReplicaIDs)
self._issuePrestageRequests(storageElement, seReplicaIDs, allReplicaInfo)
# Check Waiting Replicas and select those found Online and all other Replicas from the same Tasks
res = self._getOnlineReplicas()
if not res['OK']:
gLogger.fatal(
"StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.",
res['Message'])
return res
seReplicas = res['Value']['SEReplicas']
allReplicaInfo = res['Value']['AllReplicaInfo']
# Check Offline Replicas that fit in the Cache and all other Replicas from the same Tasks
res = self._getOfflineReplicas()
if not res['OK']:
gLogger.fatal(
"StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.",
res['Message'])
return res
# Merge info from both results
for storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
seReplicas.setdefault(storageElement, []).extend(seReplicaIDs)
allReplicaInfo.update(res['Value']['AllReplicaInfo'])
gLogger.info("StageRequest.submitStageRequests: Obtained %s replicas for staging." % len(allReplicaInfo))
for storageElement, seReplicaIDs in seReplicas.items():
gLogger.debug('Staging at %s:' % storageElement, seReplicaIDs)
self._issuePrestageRequests(storageElement, seReplicaIDs, allReplicaInfo)
return S_OK()
def _getMissingReplicas(self):
""" This recovers Replicas that were not Staged on a previous attempt (the stage request failed or timed out),
while other Replicas of the same task are already Staged. If left behind they can produce a deadlock.
All SEs are considered, even if their Cache is full
"""
# Get Replicas that are in Staged/StageSubmitted
gLogger.info('StageRequest._getMissingReplicas: Checking Staged Replicas')
res = self.__getStagedReplicas()
if not res['OK']:
gLogger.fatal(
"StageRequest._getMissingReplicas: Failed to get replicas from StorageManagementDB.",
res['Message'])
return res
seReplicas = {}
allReplicaInfo = res['Value']['AllReplicaInfo']
replicasToStage = []
for seReplicaIDs in res['Value']['SEReplicas'].values():
# Consider all SEs
replicasToStage += seReplicaIDs
# Get Replicas from the same Tasks as those selected
res = self.__addAssociatedReplicas(replicasToStage, seReplicas, allReplicaInfo)
if not res['OK']:
gLogger.fatal("StageRequest._getMissingReplicas: Failed to get associated Replicas.", res['Message'])
return res
def _getOnlineReplicas(self):
""" This manages the transition
* Waiting -> Offline (if the file is not found Cached)
and returns the list of Cached Replicas for which the pin time has to be extended
SEs for which the cache is currently full are not considered
"""
# Get all Replicas in Waiting Status associated to Staging Tasks
gLogger.verbose('StageRequest._getOnlineReplicas: Checking Online Replicas to be handled')
res = self.__getWaitingReplicas()
if not res['OK']:
gLogger.fatal("StageRequest._getOnlineReplicas: Failed to get replicas from StorageManagementDB.", res['Message'])
return res
seReplicas = {}
allReplicaInfo = res['Value']['AllReplicaInfo']
if not len(allReplicaInfo):
gLogger.info("StageRequest._getOnlineReplicas: There were no Waiting replicas found")
return res
gLogger.info("StageRequest._getOnlineReplicas: Obtained %s replicas Waiting for staging." % len(allReplicaInfo))
replicasToStage = []
for storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
if not self.__usage(storageElement) < self.__cache(storageElement):
gLogger.info(
'StageRequest._getOnlineReplicas: Skipping %s, current usage above limit ( %s GB )' %
(storageElement, self.__cache(storageElement)))
# Do not consider those SE that have the Cache full
continue
# Check if the Replica Metadata is OK and find out if they are Online or Offline
res = self.__checkIntegrity(storageElement, seReplicaIDs, allReplicaInfo)
if not res['OK']:
gLogger.error(
'StageRequest._getOnlineReplicas: Failed to check Replica Metadata', '(%s): %s' %
(storageElement, res['Message']))
else:
# keep only Online Replicas
seReplicas[storageElement] = res['Value']['Online']
replicasToStage += res['Value']['Online']
# Get Replicas from the same Tasks as those selected
res = self.__addAssociatedReplicas(replicasToStage, seReplicas, allReplicaInfo)
if not res['OK']:
gLogger.fatal("StageRequest._getOnlineReplicas: Failed to get associated Replicas.", res['Message'])
return res
def _getOfflineReplicas(self):
""" This checks Replicas in Offline status
and returns the list of Replicas to be Staged
SEs for which the cache is currently full are not considered
"""
# Get all Replicas in Waiting Status associated to Staging Tasks
gLogger.verbose('StageRequest._getOfflineReplicas: Checking Offline Replicas to be handled')
res = self.__getOfflineReplicas()
if not res['OK']:
gLogger.fatal(
"StageRequest._getOfflineReplicas: Failed to get replicas from StorageManagementDB.",
res['Message'])
return res
seReplicas = {}
allReplicaInfo = res['Value']['AllReplicaInfo']
if not len(allReplicaInfo):
gLogger.info("StageRequest._getOfflineReplicas: There were no Offline replicas found")
return res
gLogger.info("StageRequest._getOfflineReplicas: Obtained %s replicas Offline for staging." % len(allReplicaInfo))
replicasToStage = []
for storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
if not self.__usage(storageElement) < self.__cache(storageElement):
gLogger.info(
'StageRequest._getOfflineReplicas: Skipping %s, current usage above limit ( %s GB )' %
(storageElement, self.__cache(storageElement)))
# Do not consider those SE that have the Cache full
continue
seReplicas[storageElement] = []
for replicaID in sorted(seReplicaIDs):
seReplicas[storageElement].append(replicaID)
replicasToStage.append(replicaID)
self.__add(storageElement, allReplicaInfo[replicaID]['Size'])
if not self.__usage(storageElement) < self.__cache(storageElement):
# Stop adding Replicas when the cache is full
break
# Get Replicas from the same Tasks as those selected
res = self.__addAssociatedReplicas(replicasToStage, seReplicas, allReplicaInfo)
if not res['OK']:
gLogger.fatal("StageRequest._getOfflineReplicas: Failed to get associated Replicas.", res['Message'])
return res
def __usage(self, storageElement):
""" Retrieve current usage of SE
"""
# Set it if not yet done
self.storageElementUsage.setdefault(storageElement, {'TotalSize': 0.})
return self.storageElementUsage[storageElement]['TotalSize']
def __cache(self, storageElement):
""" Retrieve cache size for SE
"""
if storageElement not in self.storageElementCache:
diskCacheTB = float(StorageElement(storageElement).options.get('DiskCacheTB', 1.0))
self.storageElementCache[storageElement] = diskCacheTB * 1000. / THROTTLING_STEPS
return self.storageElementCache[storageElement]
def __add(self, storageElement, size):
""" Add size (in bytes) to current usage of storageElement (in GB)
"""
self.storageElementUsage.setdefault(storageElement, {'TotalSize': 0.})
size /= 1000. * 1000. * 1000.
self.storageElementUsage[storageElement]['TotalSize'] += size
return size
def _issuePrestageRequests(self, storageElement, seReplicaIDs, allReplicaInfo):
""" Make the request to the SE and update the DB
"""
# Since we are in a give SE, the lfn is a unique key
lfnRepIDs = {}
for replicaID in seReplicaIDs:
lfn = allReplicaInfo[replicaID]['LFN']
lfnRepIDs[lfn] = replicaID
# Now issue the prestage requests for the remaining replicas
stageRequestMetadata = {}
updatedLfnIDs = []
if lfnRepIDs:
gLogger.info(
"StageRequest._issuePrestageRequests: Submitting %s stage requests for %s." %
(len(lfnRepIDs), storageElement))
res = StorageElement(storageElement).prestageFile(lfnRepIDs, lifetime=self.pinLifetime)
gLogger.debug("StageRequest._issuePrestageRequests: StorageElement.prestageStorageFile: res=", res)
# Daniela: fishy result from ReplicaManager!!! Should NOT return OK
# res= {'OK': True, 'Value': {'Successful': {}, 'Failed': {'srm://srm-lhcb.cern.ch/castor/cern.ch/grid/lhcb/data/2010/RAW/EXPRESS/LHCb/COLLISION10/71476/071476_0000000241.raw': ' SRM2Storage.__gfal_exec: Failed to perform gfal_prestage.[SE][BringOnline][SRM_INVALID_REQUEST] httpg://srm-lhcb.cern.ch:8443/srm/managerv2: User not able to access specified space token\n'}}}
# res= {'OK': True, 'Value': {'Successful': {'srm://gridka-dCache.fzk.de/pnfs/gridka.de/lhcb/data/2009/RAW/FULL/LHCb/COLLISION09/63495/063495_0000000001.raw': '-2083846379'}, 'Failed': {}}}
if not res['OK']:
gLogger.error(
"StageRequest._issuePrestageRequests: Completely failed to submit stage requests for replicas.",
res['Message'])
else:
for lfn, requestID in res['Value']['Successful'].items():
stageRequestMetadata.setdefault(requestID, []).append(lfnRepIDs[lfn])
updatedLfnIDs.append(lfnRepIDs[lfn])
if stageRequestMetadata:
gLogger.info(
"StageRequest._issuePrestageRequests: %s stage request metadata to be updated." %
len(stageRequestMetadata))
res = self.stagerClient.insertStageRequest(stageRequestMetadata, self.pinLifetime)
if not res['OK']:
gLogger.error("StageRequest._issuePrestageRequests: Failed to insert stage request metadata.", res['Message'])
return res
res = self.stagerClient.updateReplicaStatus(updatedLfnIDs, 'StageSubmitted')
if not res['OK']:
gLogger.error("StageRequest._issuePrestageRequests: Failed to insert replica status.", res['Message'])
return
def __sortBySE(self, replicaDict):
seReplicas = {}
replicaIDs = {}
for replicaID, info in replicaDict.items():
lfn = info['LFN']
storageElement = info['SE']
size = info['Size']
pfn = info['PFN']
replicaIDs[replicaID] = {'LFN': lfn, 'PFN': pfn, 'Size': size, 'StorageElement': storageElement}
seReplicas.setdefault(storageElement, []).append(replicaID)
return S_OK({'SEReplicas': seReplicas, 'AllReplicaInfo': replicaIDs})
def __getStagedReplicas(self):
""" This obtains the Staged replicas from the Replicas table and for each LFN the requested storage element """
# First obtain the Waiting replicas from the Replicas table
res = self.stagerClient.getStagedReplicas()
if not res['OK']:
gLogger.error("StageRequest.__getStagedReplicas: Failed to get replicas with Waiting status.", res['Message'])
return res
if not res['Value']:
gLogger.debug("StageRequest.__getStagedReplicas: No Waiting replicas found to process.")
else:
gLogger.debug("StageRequest.__getStagedReplicas: Obtained %s Waiting replicas(s) to process." % len(res['Value']))
return self.__sortBySE(res['Value'])
def __getWaitingReplicas(self):
""" This obtains the Waiting replicas from the Replicas table and for each LFN the requested storage element """
# First obtain the Waiting replicas from the Replicas table
res = self.stagerClient.getWaitingReplicas()
if not res['OK']:
gLogger.error("StageRequest.__getWaitingReplicas: Failed to get replicas with Waiting status.", res['Message'])
return res
if not res['Value']:
gLogger.debug("StageRequest.__getWaitingReplicas: No Waiting replicas found to process.")
else:
gLogger.debug(
"StageRequest.__getWaitingReplicas: Obtained %s Waiting replicas(s) to process." %
len(
res['Value']))
return self.__sortBySE(res['Value'])
def __getOfflineReplicas(self):
""" This obtains the Offline replicas from the Replicas table and for each LFN the requested storage element """
# First obtain the Waiting replicas from the Replicas table
res = self.stagerClient.getOfflineReplicas()
if not res['OK']:
gLogger.error("StageRequest.__getOfflineReplicas: Failed to get replicas with Waiting status.", res['Message'])
return res
if not res['Value']:
gLogger.debug("StageRequest.__getOfflineReplicas: No Waiting replicas found to process.")
else:
gLogger.debug(
"StageRequest.__getOfflineReplicas: Obtained %s Waiting replicas(s) to process." %
len(
res['Value']))
return self.__sortBySE(res['Value'])
def __addAssociatedReplicas(self, replicasToStage, seReplicas, allReplicaInfo):
""" Retrieve the list of Replicas that belong to the same Tasks as the provided list
"""
res = self.stagerClient.getAssociatedReplicas(replicasToStage)
if not res['OK']:
gLogger.fatal("StageRequest.__addAssociatedReplicas: Failed to get associated Replicas.", res['Message'])
return res
addReplicas = {'Offline': {}, 'Waiting': {}}
replicaIDs = {}
for replicaID, info in res['Value'].items():
lfn = info['LFN']
storageElement = info['SE']
size = info['Size']
pfn = info['PFN']
status = info['Status']
if status in ['Waiting', 'Offline']:
replicaIDs[replicaID] = {'LFN': lfn, 'PFN': pfn, 'Size': size, 'StorageElement': storageElement}
addReplicas[status].setdefault(storageElement, []).append(replicaID)
waitingReplicas = addReplicas['Waiting']
offlineReplicas = addReplicas['Offline']
newReplicaInfo = replicaIDs
allReplicaInfo.update(newReplicaInfo)
# First handle Waiting Replicas for which metadata is to be checked
for storageElement, seReplicaIDs in waitingReplicas.items():
for replicaID in list(seReplicaIDs):
if replicaID in replicasToStage:
seReplicaIDs.remove(replicaID)
res = self.__checkIntegrity(storageElement, seReplicaIDs, allReplicaInfo)
if not res['OK']:
gLogger.error(
'StageRequest.__addAssociatedReplicas: Failed to check Replica Metadata', '(%s): %s' %
(storageElement, res['Message']))
else:
# keep all Replicas (Online and Offline)
seReplicas.setdefault(storageElement, []).extend(res['Value']['Online'])
replicasToStage.extend(res['Value']['Online'])
seReplicas[storageElement].extend(res['Value']['Offline'])
replicasToStage.extend(res['Value']['Offline'])
# Then handle Offline Replicas for which metadata is already checked
for storageElement, seReplicaIDs in offlineReplicas.items():
for replicaID in sorted(seReplicaIDs):
if replicaID in replicasToStage:
seReplicaIDs.remove(replicaID)
seReplicas.setdefault(storageElement, []).extend(seReplicaIDs)
replicasToStage.extend(seReplicaIDs)
for replicaID in list(allReplicaInfo):
if replicaID not in replicasToStage:
del allReplicaInfo[replicaID]
totalSize = 0
for storageElement in sorted(seReplicas.keys()):
replicaIDs = seReplicas[storageElement]
size = 0
for replicaID in replicaIDs:
size += self.__add(storageElement, allReplicaInfo[replicaID]['Size'])
gLogger.info(
'StageRequest.__addAssociatedReplicas: Considering %s GB to be staged at %s' %
(size, storageElement))
totalSize += size
gLogger.info("StageRequest.__addAssociatedReplicas: Obtained %s GB for staging." % totalSize)
return S_OK({'SEReplicas': seReplicas, 'AllReplicaInfo': allReplicaInfo})
def __checkIntegrity(self, storageElement, seReplicaIDs, allReplicaInfo):
""" Check the integrity of the files to ensure they are available
Updates status of Offline Replicas for a later pass
Return list of Online replicas to be Stage
"""
if not seReplicaIDs:
return S_OK({'Online': [], 'Offline': []})
# Since we are with a given SE, the LFN is a unique key
lfnRepIDs = {}
for replicaID in seReplicaIDs:
lfn = allReplicaInfo[replicaID]['LFN']
lfnRepIDs[lfn] = replicaID
gLogger.info(
"StageRequest.__checkIntegrity: Checking the integrity of %s replicas at %s." %
(len(lfnRepIDs), storageElement))
res = StorageElement(storageElement).getFileMetadata(lfnRepIDs)
if not res['OK']:
gLogger.error("StageRequest.__checkIntegrity: Completely failed to obtain metadata for replicas.", res['Message'])
return res
terminalReplicaIDs = {}
onlineReplicaIDs = []
offlineReplicaIDs = []
for lfn, metadata in res['Value']['Successful'].items():
if metadata['Size'] != allReplicaInfo[lfnRepIDs[lfn]]['Size']:
gLogger.error("StageRequest.__checkIntegrity: LFN StorageElement size does not match FileCatalog", lfn)
terminalReplicaIDs[lfnRepIDs[lfn]] = 'LFN StorageElement size does not match FileCatalog'
lfnRepIDs.pop(lfn)
elif metadata.get('Lost', False):
gLogger.error("StageRequest.__checkIntegrity: LFN has been Lost by the StorageElement", lfn)
terminalReplicaIDs[lfnRepIDs[lfn]] = 'LFN has been Lost by the StorageElement'
lfnRepIDs.pop(lfn)
elif metadata.get('Unavailable', False):
gLogger.error("StageRequest.__checkIntegrity: LFN is declared Unavailable by the StorageElement", lfn)
terminalReplicaIDs[lfnRepIDs[lfn]] = 'LFN is declared Unavailable by the StorageElement'
lfnRepIDs.pop(lfn)
elif metadata.get('Cached', metadata['Accessible']):
gLogger.verbose("StageRequest.__checkIntegrity: Cache hit for file.")
onlineReplicaIDs.append(lfnRepIDs[lfn])
else:
offlineReplicaIDs.append(lfnRepIDs[lfn])
for lfn, reason in res['Value']['Failed'].items():
if re.search('File does not exist', reason):
gLogger.error("StageRequest.__checkIntegrity: LFN does not exist in the StorageElement", lfn)
terminalReplicaIDs[lfnRepIDs[lfn]] = 'LFN does not exist in the StorageElement'
lfnRepIDs.pop(lfn)
# Update the states of the replicas in the database #TODO Sent status to integrity DB
if terminalReplicaIDs:
gLogger.info("StageRequest.__checkIntegrity: %s replicas are terminally failed." % len(terminalReplicaIDs))
res = self.stagerClient.updateReplicaFailure(terminalReplicaIDs)
if not res['OK']:
gLogger.error("StageRequest.__checkIntegrity: Failed to update replica failures.", res['Message'])
if onlineReplicaIDs:
gLogger.info("StageRequest.__checkIntegrity: %s replicas found Online." % len(onlineReplicaIDs))
if offlineReplicaIDs:
gLogger.info("StageRequest.__checkIntegrity: %s replicas found Offline." % len(offlineReplicaIDs))
res = self.stagerClient.updateReplicaStatus(offlineReplicaIDs, 'Offline')
return S_OK({'Online': onlineReplicaIDs, 'Offline': offlineReplicaIDs})
| gpl-3.0 |
jeonghoonkang/BerePi | apps/hue/cycling_light.py | 1 | 1388 |
# -*- coding: utf-8 -*-
# Author : jeonghoonkang, https://github.com/jeonghoonkang
import httplib
import time
conn = httplib.HTTPConnection("10.xxx.xxx.xxxx")
hue_uid = "c274b3c285d19cfxxxxxxxxxx"
restcmd = "/api"+hue_uid+"/lights"
str = " "
xhue = [10000,25000,46000,56280]
def shifthue() :
global str
global xhue
xhue.insert(0,xhue[-1])
xhue = xhue[0:4]
print xhue
callurl = restcmd + "/4/state"
try:
conn.request("PUT",callurl ,'{"on":false}')
response = conn.getresponse()
data = response.read()
except:
print "keep goging...."
time.sleep(2)
time.sleep(1)
for num in [3,2,1,4] :
callurl = restcmd + "/%s/state"%(num)
print callurl
huenumber = (xhue[4-num])
try :
conn.request("PUT",callurl ,'{"on":false}')
response = conn.getresponse()
data = response.read()
time.sleep(1)
conn.request("PUT",callurl ,'{"on":true, "sat":254, "bri":254, "hue":%s}'%huenumber)
response = conn.getresponse()
data = response.read()
print data
time.sleep(1)
except:
print "exception conn.getresponse from Hue GW"
time.sleep(2)
if __name__ == "__main__":
# print web()
while True :
shifthue()
time.sleep(5
| bsd-2-clause |
aaron-fz/neutron_full_sync | neutron/tests/unit/agent/linux/test_ovs_lib.py | 1 | 39055 | # Copyright 2012, VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo.config import cfg
import testtools
from neutron.agent.linux import ovs_lib
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.openstack.common import jsonutils
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as p_const
from neutron.plugins.openvswitch.common import constants as const
from neutron.tests import base
from neutron.tests import tools
try:
OrderedDict = collections.OrderedDict
except AttributeError:
import ordereddict
OrderedDict = ordereddict.OrderedDict
OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0"
class TestBaseOVS(base.BaseTestCase):
def setUp(self):
super(TestBaseOVS, self).setUp()
self.root_helper = 'sudo'
self.ovs = ovs_lib.BaseOVS(self.root_helper)
self.br_name = 'bridge1'
def test_add_bridge(self):
with mock.patch.object(self.ovs, 'run_vsctl') as mock_vsctl:
bridge = self.ovs.add_bridge(self.br_name)
mock_vsctl.assert_called_with(["--", "--may-exist",
"add-br", self.br_name])
self.assertEqual(bridge.br_name, self.br_name)
self.assertEqual(bridge.root_helper, self.ovs.root_helper)
def test_delete_bridge(self):
with mock.patch.object(self.ovs, 'run_vsctl') as mock_vsctl:
self.ovs.delete_bridge(self.br_name)
mock_vsctl.assert_called_with(["--", "--if-exists", "del-br",
self.br_name])
def test_bridge_exists_returns_true(self):
with mock.patch.object(self.ovs, 'run_vsctl') as mock_vsctl:
self.assertTrue(self.ovs.bridge_exists(self.br_name))
mock_vsctl.assert_called_with(['br-exists', self.br_name],
check_error=True)
def test_bridge_exists_returns_false_for_exit_code_2(self):
with mock.patch.object(self.ovs, 'run_vsctl',
side_effect=RuntimeError('Exit code: 2\n')):
self.assertFalse(self.ovs.bridge_exists('bridge1'))
def test_bridge_exists_raises_unknown_exception(self):
with mock.patch.object(self.ovs, 'run_vsctl',
side_effect=RuntimeError()):
with testtools.ExpectedException(RuntimeError):
self.ovs.bridge_exists('bridge1')
def test_get_bridge_name_for_port_name_returns_bridge_for_valid_port(self):
port_name = 'bar'
with mock.patch.object(self.ovs, 'run_vsctl',
return_value=self.br_name) as mock_vsctl:
bridge = self.ovs.get_bridge_name_for_port_name(port_name)
self.assertEqual(bridge, self.br_name)
mock_vsctl.assert_called_with(['port-to-br', port_name],
check_error=True)
def test_get_bridge_name_for_port_name_returns_none_for_exit_code_1(self):
with mock.patch.object(self.ovs, 'run_vsctl',
side_effect=RuntimeError('Exit code: 1\n')):
self.assertFalse(self.ovs.get_bridge_name_for_port_name('bridge1'))
def test_get_bridge_name_for_port_name_raises_unknown_exception(self):
with mock.patch.object(self.ovs, 'run_vsctl',
side_effect=RuntimeError()):
with testtools.ExpectedException(RuntimeError):
self.ovs.get_bridge_name_for_port_name('bridge1')
def _test_port_exists(self, br_name, result):
with mock.patch.object(self.ovs,
'get_bridge_name_for_port_name',
return_value=br_name):
self.assertEqual(self.ovs.port_exists('bar'), result)
def test_port_exists_returns_true_for_bridge_name(self):
self._test_port_exists(self.br_name, True)
def test_port_exists_returns_false_for_none(self):
self._test_port_exists(None, False)
class OVS_Lib_Test(base.BaseTestCase):
"""A test suite to exercise the OVS libraries shared by Neutron agents.
Note: these tests do not actually execute ovs-* utilities, and thus
can run on any system. That does, however, limit their scope.
"""
def setUp(self):
super(OVS_Lib_Test, self).setUp()
self.BR_NAME = "br-int"
self.TO = "--timeout=10"
self.root_helper = 'sudo'
self.br = ovs_lib.OVSBridge(self.BR_NAME, self.root_helper)
self.execute = mock.patch.object(
utils, "execute", spec=utils.execute).start()
def test_vifport(self):
"""Create and stringify vif port, confirm no exceptions."""
pname = "vif1.0"
ofport = 5
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
# test __init__
port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br)
self.assertEqual(port.port_name, pname)
self.assertEqual(port.ofport, ofport)
self.assertEqual(port.vif_id, vif_id)
self.assertEqual(port.vif_mac, mac)
self.assertEqual(port.switch.br_name, self.BR_NAME)
# test __str__
str(port)
def test_set_controller(self):
controller_names = ['tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555']
self.br.set_controller(controller_names)
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'set-controller', self.BR_NAME,
'tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555'],
root_helper=self.root_helper)
def test_del_controller(self):
self.br.del_controller()
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'del-controller', self.BR_NAME],
root_helper=self.root_helper)
def test_get_controller(self):
self.execute.return_value = 'tcp:127.0.0.1:6633\ntcp:172.17.16.10:5555'
names = self.br.get_controller()
self.assertEqual(names,
['tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555'])
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'get-controller', self.BR_NAME],
root_helper=self.root_helper)
def test_set_secure_mode(self):
self.br.set_secure_mode()
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'set-fail-mode', self.BR_NAME,
'secure'], root_helper=self.root_helper)
def test_set_protocols(self):
protocols = 'OpenFlow13'
self.br.set_protocols(protocols)
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'set', 'bridge', self.BR_NAME,
"protocols=%s" % protocols],
root_helper=self.root_helper)
def test_create(self):
self.br.add_bridge(self.BR_NAME)
self.br.create()
def test_destroy(self):
self.br.delete_bridge(self.BR_NAME)
self.br.destroy()
def test_reset_bridge(self):
self.br.destroy()
self.br.create()
self.br.reset_bridge()
def _build_timeout_opt(self, exp_timeout):
return "--timeout=%d" % exp_timeout if exp_timeout else self.TO
def _test_delete_port(self, exp_timeout=None):
exp_timeout_str = self._build_timeout_opt(exp_timeout)
pname = "tap5"
self.br.delete_port(pname)
self.execute.assert_called_once_with(
["ovs-vsctl", exp_timeout_str, "--", "--if-exists",
"del-port", self.BR_NAME, pname],
root_helper=self.root_helper)
def test_delete_port(self):
self._test_delete_port()
def test_call_command_non_default_timeput(self):
# This test is only for verifying a non-default timeout
# is correctly applied. Does not need to be repeated for
# every ovs_lib method
new_timeout = 5
self.br.vsctl_timeout = new_timeout
self._test_delete_port(new_timeout)
def test_add_flow(self):
ofport = "99"
vid = 4000
lsw_id = 18
cidr = '192.168.1.0/24'
flow_dict_1 = OrderedDict([('priority', 2),
('dl_src', 'ca:fe:de:ad:be:ef'),
('actions', 'strip_vlan,output:0')])
flow_dict_2 = OrderedDict([('priority', 1),
('actions', 'normal')])
flow_dict_3 = OrderedDict([('priority', 2),
('actions', 'drop')])
flow_dict_4 = OrderedDict([('priority', 2),
('in_port', ofport),
('actions', 'drop')])
flow_dict_5 = OrderedDict([
('priority', 4),
('in_port', ofport),
('dl_vlan', vid),
('actions', "strip_vlan,set_tunnel:%s,normal" % (lsw_id))])
flow_dict_6 = OrderedDict([
('priority', 3),
('tun_id', lsw_id),
('actions', "mod_vlan_vid:%s,output:%s" % (vid, ofport))])
flow_dict_7 = OrderedDict([
('priority', 4),
('nw_src', cidr),
('proto', 'arp'),
('actions', 'drop')])
self.br.add_flow(**flow_dict_1)
self.br.add_flow(**flow_dict_2)
self.br.add_flow(**flow_dict_3)
self.br.add_flow(**flow_dict_4)
self.br.add_flow(**flow_dict_5)
self.br.add_flow(**flow_dict_6)
self.br.add_flow(**flow_dict_7)
expected_calls = [
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,dl_src=ca:fe:de:ad:be:ef"
",actions=strip_vlan,output:0"],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=1,actions=normal"],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,actions=drop"],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,in_port=%s,actions=drop" % ofport],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=4,dl_vlan=%s,in_port=%s,"
"actions=strip_vlan,set_tunnel:%s,normal"
% (vid, ofport, lsw_id)],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=3,tun_id=%s,actions="
"mod_vlan_vid:%s,output:%s"
% (lsw_id, vid, ofport)],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=4,nw_src=%s,arp,actions=drop" % cidr],
process_input=None, root_helper=self.root_helper),
]
self.execute.assert_has_calls(expected_calls)
def test_add_flow_timeout_set(self):
flow_dict = OrderedDict([('priority', 1),
('hard_timeout', 1000),
('idle_timeout', 2000),
('actions', 'normal')])
self.br.add_flow(**flow_dict)
self.execute.assert_called_once_with(
["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=1000,idle_timeout=2000,priority=1,actions=normal"],
process_input=None,
root_helper=self.root_helper)
def test_add_flow_default_priority(self):
flow_dict = OrderedDict([('actions', 'normal')])
self.br.add_flow(**flow_dict)
self.execute.assert_called_once_with(
["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,priority=1,actions=normal"],
process_input=None,
root_helper=self.root_helper)
def _test_get_port_ofport(self, ofport, expected_result):
pname = "tap99"
self.execute.return_value = ofport
self.assertEqual(self.br.get_port_ofport(pname), expected_result)
self.execute.assert_called_once_with(
["ovs-vsctl", self.TO, "get", "Interface", pname, "ofport"],
root_helper=self.root_helper)
def test_get_port_ofport_succeeds_for_valid_ofport(self):
self._test_get_port_ofport("6", "6")
def test_get_port_ofport_returns_invalid_ofport_for_non_int(self):
self._test_get_port_ofport("[]", const.INVALID_OFPORT)
def test_get_port_ofport_returns_invalid_ofport_for_none(self):
self._test_get_port_ofport(None, const.INVALID_OFPORT)
def test_get_datapath_id(self):
datapath_id = '"0000b67f4fbcc149"'
self.execute.return_value = datapath_id
self.assertEqual(self.br.get_datapath_id(), datapath_id.strip('"'))
self.execute.assert_called_once_with(
["ovs-vsctl", self.TO, "get",
"Bridge", self.BR_NAME, "datapath_id"],
root_helper=self.root_helper)
def test_count_flows(self):
self.execute.return_value = 'ignore\nflow-1\n'
# counts the number of flows as total lines of output - 2
self.assertEqual(self.br.count_flows(), 1)
self.execute.assert_called_once_with(
["ovs-ofctl", "dump-flows", self.BR_NAME],
root_helper=self.root_helper,
process_input=None)
def test_delete_flow(self):
ofport = "5"
lsw_id = 40
vid = 39
self.br.delete_flows(in_port=ofport)
self.br.delete_flows(tun_id=lsw_id)
self.br.delete_flows(dl_vlan=vid)
expected_calls = [
mock.call(["ovs-ofctl", "del-flows", self.BR_NAME,
"in_port=" + ofport],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "del-flows", self.BR_NAME,
"tun_id=%s" % lsw_id],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "del-flows", self.BR_NAME,
"dl_vlan=%s" % vid],
process_input=None, root_helper=self.root_helper),
]
self.execute.assert_has_calls(expected_calls)
def test_delete_flow_with_priority_set(self):
params = {'in_port': '1',
'priority': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.delete_flows,
**params)
def test_dump_flows(self):
table = 23
nxst_flow = "NXST_FLOW reply (xid=0x4):"
flows = "\n".join([" cookie=0x0, duration=18042.514s, table=0, "
"n_packets=6, n_bytes=468, "
"priority=2,in_port=1 actions=drop",
" cookie=0x0, duration=18027.562s, table=0, "
"n_packets=0, n_bytes=0, "
"priority=3,in_port=1,dl_vlan=100 "
"actions=mod_vlan_vid:1,NORMAL",
" cookie=0x0, duration=18044.351s, table=0, "
"n_packets=9, n_bytes=594, priority=1 "
"actions=NORMAL", " cookie=0x0, "
"duration=18044.211s, table=23, n_packets=0, "
"n_bytes=0, priority=0 actions=drop"])
flow_args = '\n'.join([nxst_flow, flows])
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
run_ofctl.side_effect = [flow_args]
retflows = self.br.dump_flows_for_table(table)
self.assertEqual(flows, retflows)
def test_dump_flows_ovs_dead(self):
table = 23
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
run_ofctl.side_effect = ['']
retflows = self.br.dump_flows_for_table(table)
self.assertEqual(None, retflows)
def test_mod_flow_with_priority_set(self):
params = {'in_port': '1',
'priority': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.mod_flow,
**params)
def test_mod_flow_no_actions_set(self):
params = {'in_port': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.mod_flow,
**params)
def test_defer_apply_flows(self):
flow_expr = mock.patch.object(ovs_lib, '_build_flow_expr_str').start()
flow_expr.side_effect = ['added_flow_1', 'added_flow_2',
'deleted_flow_1']
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
self.br.defer_apply_on()
self.br.add_flow(flow='add_flow_1')
self.br.defer_apply_on()
self.br.add_flow(flow='add_flow_2')
self.br.delete_flows(flow='delete_flow_1')
self.br.defer_apply_off()
flow_expr.assert_has_calls([
mock.call({'flow': 'add_flow_1'}, 'add'),
mock.call({'flow': 'add_flow_2'}, 'add'),
mock.call({'flow': 'delete_flow_1'}, 'del')
])
run_ofctl.assert_has_calls([
mock.call('add-flows', ['-'], 'added_flow_1\nadded_flow_2\n'),
mock.call('del-flows', ['-'], 'deleted_flow_1\n')
])
def test_defer_apply_flows_concurrently(self):
flow_expr = mock.patch.object(ovs_lib, '_build_flow_expr_str').start()
flow_expr.side_effect = ['added_flow_1', 'deleted_flow_1',
'modified_flow_1', 'added_flow_2',
'deleted_flow_2', 'modified_flow_2']
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
def run_ofctl_fake(cmd, args, process_input=None):
self.br.defer_apply_on()
if cmd == 'add-flows':
self.br.add_flow(flow='added_flow_2')
elif cmd == 'del-flows':
self.br.delete_flows(flow='deleted_flow_2')
elif cmd == 'mod-flows':
self.br.mod_flow(flow='modified_flow_2')
run_ofctl.side_effect = run_ofctl_fake
self.br.defer_apply_on()
self.br.add_flow(flow='added_flow_1')
self.br.delete_flows(flow='deleted_flow_1')
self.br.mod_flow(flow='modified_flow_1')
self.br.defer_apply_off()
run_ofctl.side_effect = None
self.br.defer_apply_off()
flow_expr.assert_has_calls([
mock.call({'flow': 'added_flow_1'}, 'add'),
mock.call({'flow': 'deleted_flow_1'}, 'del'),
mock.call({'flow': 'modified_flow_1'}, 'mod'),
mock.call({'flow': 'added_flow_2'}, 'add'),
mock.call({'flow': 'deleted_flow_2'}, 'del'),
mock.call({'flow': 'modified_flow_2'}, 'mod')
])
run_ofctl.assert_has_calls([
mock.call('add-flows', ['-'], 'added_flow_1\n'),
mock.call('del-flows', ['-'], 'deleted_flow_1\n'),
mock.call('mod-flows', ['-'], 'modified_flow_1\n'),
mock.call('add-flows', ['-'], 'added_flow_2\n'),
mock.call('del-flows', ['-'], 'deleted_flow_2\n'),
mock.call('mod-flows', ['-'], 'modified_flow_2\n')
])
def test_add_tunnel_port(self):
pname = "tap99"
local_ip = "1.1.1.1"
remote_ip = "9.9.9.9"
ofport = "6"
command = ["ovs-vsctl", self.TO, '--', "--may-exist", "add-port",
self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=gre", "options:df_default=true",
"options:remote_ip=" + remote_ip,
"options:local_ip=" + local_ip,
"options:in_key=flow",
"options:out_key=flow"])
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(command, root_helper=self.root_helper), None),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper),
ofport),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(
self.br.add_tunnel_port(pname, remote_ip, local_ip),
ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_vxlan_fragmented_tunnel_port(self):
pname = "tap99"
local_ip = "1.1.1.1"
remote_ip = "9.9.9.9"
ofport = "6"
vxlan_udp_port = "9999"
dont_fragment = False
command = ["ovs-vsctl", self.TO, '--', "--may-exist", "add-port",
self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=" + p_const.TYPE_VXLAN,
"options:dst_port=" + vxlan_udp_port,
"options:df_default=false",
"options:remote_ip=" + remote_ip,
"options:local_ip=" + local_ip,
"options:in_key=flow",
"options:out_key=flow"])
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(command, root_helper=self.root_helper), None),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper),
ofport),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(
self.br.add_tunnel_port(pname, remote_ip, local_ip,
p_const.TYPE_VXLAN, vxlan_udp_port,
dont_fragment),
ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_patch_port(self):
pname = "tap99"
peer = "bar10"
ofport = "6"
# Each element is a tuple of (expected mock call, return_value)
command = ["ovs-vsctl", self.TO, "add-port", self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=patch", "options:peer=" + peer])
expected_calls_and_values = [
(mock.call(command, root_helper=self.root_helper),
None),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper),
ofport)
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(self.br.add_patch_port(pname, peer), ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _test_get_vif_ports(self, is_xen=False):
pname = "tap99"
ofport = "6"
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
if is_xen:
external_ids = ('{xs-vif-uuid="%s", attached-mac="%s"}'
% (vif_id, mac))
else:
external_ids = ('{iface-id="%s", attached-mac="%s"}'
% (vif_id, mac))
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
"%s\n" % pname),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "external_ids"],
root_helper=self.root_helper),
external_ids),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper),
ofport),
]
if is_xen:
expected_calls_and_values.append(
(mock.call(["xe", "vif-param-get", "param-name=other-config",
"param-key=nicira-iface-id", "uuid=" + vif_id],
root_helper=self.root_helper),
vif_id)
)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
ports = self.br.get_vif_ports()
self.assertEqual(1, len(ports))
self.assertEqual(ports[0].port_name, pname)
self.assertEqual(ports[0].ofport, ofport)
self.assertEqual(ports[0].vif_id, vif_id)
self.assertEqual(ports[0].vif_mac, mac)
self.assertEqual(ports[0].switch.br_name, self.BR_NAME)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _encode_ovs_json(self, headings, data):
# See man ovs-vsctl(8) for the encoding details.
r = {"data": [],
"headings": headings}
for row in data:
ovs_row = []
r["data"].append(ovs_row)
for cell in row:
if isinstance(cell, (str, int, list)):
ovs_row.append(cell)
elif isinstance(cell, dict):
ovs_row.append(["map", cell.items()])
elif isinstance(cell, set):
ovs_row.append(["set", cell])
else:
raise TypeError('%r not int, str, list, set or dict' %
type(cell))
return jsonutils.dumps(r)
def _test_get_vif_port_set(self, is_xen):
if is_xen:
id_key = 'xs-vif-uuid'
else:
id_key = 'iface-id'
headings = ['name', 'external_ids']
data = [
# A vif port on this bridge:
['tap99', {id_key: 'tap99id', 'attached-mac': 'tap99mac'}, 1],
# A vif port on this bridge not yet configured
['tap98', {id_key: 'tap98id', 'attached-mac': 'tap98mac'}, []],
# Another vif port on this bridge not yet configured
['tap97', {id_key: 'tap97id', 'attached-mac': 'tap97mac'},
['set', []]],
# A vif port on another bridge:
['tap88', {id_key: 'tap88id', 'attached-mac': 'tap88id'}, 1],
# Non-vif port on this bridge:
['tun22', {}, 2],
]
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
'tap99\ntun22'),
(mock.call(["ovs-vsctl", self.TO, "--format=json",
"--", "--columns=name,external_ids,ofport",
"list", "Interface"],
root_helper=self.root_helper),
self._encode_ovs_json(headings, data)),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
if is_xen:
get_xapi_iface_id = mock.patch.object(self.br,
'get_xapi_iface_id').start()
get_xapi_iface_id.return_value = 'tap99id'
port_set = self.br.get_vif_port_set()
self.assertEqual(set(['tap99id']), port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
if is_xen:
get_xapi_iface_id.assert_called_once_with('tap99id')
def test_get_vif_ports_nonxen(self):
self._test_get_vif_ports(is_xen=False)
def test_get_vif_ports_xen(self):
self._test_get_vif_ports(is_xen=True)
def test_get_vif_port_set_nonxen(self):
self._test_get_vif_port_set(False)
def test_get_vif_port_set_xen(self):
self._test_get_vif_port_set(True)
def test_get_vif_ports_list_ports_error(self):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_ports)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_vif_port_set_list_ports_error(self):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_vif_port_set_list_interface_error(self):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
'tap99\n'),
(mock.call(["ovs-vsctl", self.TO, "--format=json",
"--", "--columns=name,external_ids,ofport",
"list", "Interface"],
root_helper=self.root_helper),
RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_port_tag_dict(self):
headings = ['name', 'tag']
data = [
['int-br-eth2', set()],
['patch-tun', set()],
['qr-76d9e6b6-21', 1],
['tapce5318ff-78', 1],
['tape1400310-e6', 1],
]
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
'\n'.join((iface for iface, tag in data))),
(mock.call(["ovs-vsctl", self.TO, "--format=json",
"--", "--columns=name,tag",
"list", "Port"],
root_helper=self.root_helper),
self._encode_ovs_json(headings, data)),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
port_tags = self.br.get_port_tag_dict()
self.assertEqual(
port_tags,
{u'int-br-eth2': [],
u'patch-tun': [],
u'qr-76d9e6b6-21': 1,
u'tapce5318ff-78': 1,
u'tape1400310-e6': 1}
)
def test_clear_db_attribute(self):
pname = "tap77"
self.br.clear_db_attribute("Port", pname, "tag")
self.execute.assert_called_once_with(
["ovs-vsctl", self.TO, "clear", "Port", pname, "tag"],
root_helper=self.root_helper)
def _test_iface_to_br(self, exp_timeout=None):
iface = 'tap0'
br = 'br-int'
root_helper = 'sudo'
self.execute.return_value = 'br-int'
exp_timeout_str = self._build_timeout_opt(exp_timeout)
self.assertEqual(ovs_lib.get_bridge_for_iface(root_helper, iface), br)
self.execute.assert_called_once_with(
["ovs-vsctl", exp_timeout_str, "iface-to-br", iface],
root_helper=root_helper)
def test_iface_to_br(self):
self._test_iface_to_br()
def test_iface_to_br_non_default_timeout(self):
new_timeout = 5
cfg.CONF.set_override('ovs_vsctl_timeout', new_timeout)
self._test_iface_to_br(new_timeout)
def test_iface_to_br_handles_ovs_vsctl_exception(self):
iface = 'tap0'
root_helper = 'sudo'
self.execute.side_effect = Exception
self.assertIsNone(ovs_lib.get_bridge_for_iface(root_helper, iface))
self.execute.assert_called_once_with(
["ovs-vsctl", self.TO, "iface-to-br", iface],
root_helper=root_helper)
def test_delete_all_ports(self):
with mock.patch.object(self.br, 'get_port_name_list',
return_value=['port1']) as get_port:
with mock.patch.object(self.br, 'delete_port') as delete_port:
self.br.delete_ports(all_ports=True)
get_port.assert_called_once_with()
delete_port.assert_called_once_with('port1')
def test_delete_neutron_ports(self):
port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(),
'ca:fe:de:ad:be:ef', 'br')
port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(),
'ca:ee:de:ad:be:ef', 'br')
with mock.patch.object(self.br, 'get_vif_ports',
return_value=[port1, port2]) as get_ports:
with mock.patch.object(self.br, 'delete_port') as delete_port:
self.br.delete_ports(all_ports=False)
get_ports.assert_called_once_with()
delete_port.assert_has_calls([
mock.call('tap1234'),
mock.call('tap5678')
])
def test_delete_neutron_ports_list_error(self):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.delete_ports, all_ports=False)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _test_get_bridges(self, exp_timeout=None):
bridges = ['br-int', 'br-ex']
root_helper = 'sudo'
self.execute.return_value = 'br-int\nbr-ex\n'
timeout_str = self._build_timeout_opt(exp_timeout)
self.assertEqual(ovs_lib.get_bridges(root_helper), bridges)
self.execute.assert_called_once_with(
["ovs-vsctl", timeout_str, "list-br"],
root_helper=root_helper)
def test_get_bridges(self):
self._test_get_bridges()
def test_get_bridges_not_default_timeout(self):
new_timeout = 5
cfg.CONF.set_override('ovs_vsctl_timeout', new_timeout)
self._test_get_bridges(new_timeout)
def test_get_local_port_mac_succeeds(self):
with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
return_value=mock.Mock(address='foo')):
self.assertEqual('foo', self.br.get_local_port_mac())
def test_get_local_port_mac_raises_exception_for_missing_mac(self):
with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
return_value=mock.Mock(address=None)):
with testtools.ExpectedException(Exception):
self.br.get_local_port_mac()
def _test_get_vif_port_by_id(self, iface_id, data, br_name=None):
headings = ['external_ids', 'name', 'ofport']
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "--format=json",
"--", "--columns=external_ids,name,ofport",
"find", "Interface",
'external_ids:iface-id="%s"' % iface_id],
root_helper=self.root_helper),
self._encode_ovs_json(headings, data))]
if data:
if not br_name:
br_name = self.BR_NAME
expected_calls_and_values.append(
(mock.call(["ovs-vsctl", self.TO,
"iface-to-br", data[0][headings.index('name')]],
root_helper=self.root_helper),
br_name))
tools.setup_mock_calls(self.execute, expected_calls_and_values)
vif_port = self.br.get_vif_port_by_id(iface_id)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
return vif_port
def _test_get_vif_port_by_id_with_data(self, ofport=None, mac=None):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"]]
if mac:
external_ids.append(["attached-mac", mac])
data = [[["map", external_ids], "tap99",
ofport if ofport else '["set",[]]']]
vif_port = self._test_get_vif_port_by_id('tap99id', data)
if not ofport or ofport == -1 or not mac:
self.assertIsNone(vif_port)
return
self.assertEqual(vif_port.vif_id, 'tap99id')
self.assertEqual(vif_port.vif_mac, 'aa:bb:cc:dd:ee:ff')
self.assertEqual(vif_port.port_name, 'tap99')
self.assertEqual(vif_port.ofport, ofport)
def test_get_vif_by_port_id_with_ofport(self):
self._test_get_vif_port_by_id_with_data(
ofport=1, mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_without_ofport(self):
self._test_get_vif_port_by_id_with_data(mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_with_invalid_ofport(self):
self._test_get_vif_port_by_id_with_data(
ofport=-1, mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_without_mac(self):
self._test_get_vif_port_by_id_with_data(ofport=1)
def test_get_vif_by_port_id_with_no_data(self):
self.assertIsNone(self._test_get_vif_port_by_id('whatever', []))
def test_get_vif_by_port_id_different_bridge(self):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"]]
data = [[["map", external_ids], "tap99", 1]]
self.assertIsNone(self._test_get_vif_port_by_id('tap99id', data,
"br-ext"))
| apache-2.0 |
alajara/servo | tests/wpt/css-tests/tools/html5lib/html5lib/ihatexml.py | 1727 | 16581 | from __future__ import absolute_import, division, unicode_literals
import re
import warnings
from .constants import DataLossWarning
baseChar = """
[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] |
[#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] |
[#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] |
[#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 |
[#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] |
[#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] |
[#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] |
[#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] |
[#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 |
[#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] |
[#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] |
[#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D |
[#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] |
[#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] |
[#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] |
[#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] |
[#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] |
[#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] |
[#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 |
[#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] |
[#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] |
[#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] |
[#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] |
[#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] |
[#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] |
[#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] |
[#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] |
[#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] |
[#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] |
[#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A |
#x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 |
#x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] |
#x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] |
[#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] |
[#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C |
#x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 |
[#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] |
[#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] |
[#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 |
[#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] |
[#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B |
#x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE |
[#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] |
[#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 |
[#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] |
[#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]"""
ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]"""
combiningCharacter = """
[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] |
[#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 |
[#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] |
[#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] |
#x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] |
[#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] |
[#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 |
#x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] |
[#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC |
[#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] |
#x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] |
[#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] |
[#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] |
[#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] |
[#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] |
[#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] |
#x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 |
[#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] |
#x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] |
[#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] |
[#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] |
#x3099 | #x309A"""
digit = """
[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] |
[#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] |
[#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] |
[#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]"""
extender = """
#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 |
#[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]"""
letter = " | ".join([baseChar, ideographic])
# Without the
name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter,
extender])
nameFirst = " | ".join([letter, "_"])
reChar = re.compile(r"#x([\d|A-F]{4,4})")
reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]")
def charStringToList(chars):
charRanges = [item.strip() for item in chars.split(" | ")]
rv = []
for item in charRanges:
foundMatch = False
for regexp in (reChar, reCharRange):
match = regexp.match(item)
if match is not None:
rv.append([hexToInt(item) for item in match.groups()])
if len(rv[-1]) == 1:
rv[-1] = rv[-1] * 2
foundMatch = True
break
if not foundMatch:
assert len(item) == 1
rv.append([ord(item)] * 2)
rv = normaliseCharList(rv)
return rv
def normaliseCharList(charList):
charList = sorted(charList)
for item in charList:
assert item[1] >= item[0]
rv = []
i = 0
while i < len(charList):
j = 1
rv.append(charList[i])
while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1:
rv[-1][1] = charList[i + j][1]
j += 1
i += j
return rv
# We don't really support characters above the BMP :(
max_unicode = int("FFFF", 16)
def missingRanges(charList):
rv = []
if charList[0] != 0:
rv.append([0, charList[0][0] - 1])
for i, item in enumerate(charList[:-1]):
rv.append([item[1] + 1, charList[i + 1][0] - 1])
if charList[-1][1] != max_unicode:
rv.append([charList[-1][1] + 1, max_unicode])
return rv
def listToRegexpStr(charList):
rv = []
for item in charList:
if item[0] == item[1]:
rv.append(escapeRegexp(chr(item[0])))
else:
rv.append(escapeRegexp(chr(item[0])) + "-" +
escapeRegexp(chr(item[1])))
return "[%s]" % "".join(rv)
def hexToInt(hex_str):
return int(hex_str, 16)
def escapeRegexp(string):
specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}",
"[", "]", "|", "(", ")", "-")
for char in specialCharacters:
string = string.replace(char, "\\" + char)
return string
# output from the above
nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
# Simpler things
nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\-\'()+,./:=?;!*#@$_%]")
class InfosetFilter(object):
replacementRegexp = re.compile(r"U[\dA-F]{5,5}")
def __init__(self, replaceChars=None,
dropXmlnsLocalName=False,
dropXmlnsAttrNs=False,
preventDoubleDashComments=False,
preventDashAtCommentEnd=False,
replaceFormFeedCharacters=True,
preventSingleQuotePubid=False):
self.dropXmlnsLocalName = dropXmlnsLocalName
self.dropXmlnsAttrNs = dropXmlnsAttrNs
self.preventDoubleDashComments = preventDoubleDashComments
self.preventDashAtCommentEnd = preventDashAtCommentEnd
self.replaceFormFeedCharacters = replaceFormFeedCharacters
self.preventSingleQuotePubid = preventSingleQuotePubid
self.replaceCache = {}
def coerceAttribute(self, name, namespace=None):
if self.dropXmlnsLocalName and name.startswith("xmlns:"):
warnings.warn("Attributes cannot begin with xmlns", DataLossWarning)
return None
elif (self.dropXmlnsAttrNs and
namespace == "http://www.w3.org/2000/xmlns/"):
warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning)
return None
else:
return self.toXmlName(name)
def coerceElement(self, name, namespace=None):
return self.toXmlName(name)
def coerceComment(self, data):
if self.preventDoubleDashComments:
while "--" in data:
warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning)
data = data.replace("--", "- -")
return data
def coerceCharacters(self, data):
if self.replaceFormFeedCharacters:
for i in range(data.count("\x0C")):
warnings.warn("Text cannot contain U+000C", DataLossWarning)
data = data.replace("\x0C", " ")
# Other non-xml characters
return data
def coercePubid(self, data):
dataOutput = data
for char in nonPubidCharRegexp.findall(data):
warnings.warn("Coercing non-XML pubid", DataLossWarning)
replacement = self.getReplacementCharacter(char)
dataOutput = dataOutput.replace(char, replacement)
if self.preventSingleQuotePubid and dataOutput.find("'") >= 0:
warnings.warn("Pubid cannot contain single quote", DataLossWarning)
dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'"))
return dataOutput
def toXmlName(self, name):
nameFirst = name[0]
nameRest = name[1:]
m = nonXmlNameFirstBMPRegexp.match(nameFirst)
if m:
warnings.warn("Coercing non-XML name", DataLossWarning)
nameFirstOutput = self.getReplacementCharacter(nameFirst)
else:
nameFirstOutput = nameFirst
nameRestOutput = nameRest
replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest))
for char in replaceChars:
warnings.warn("Coercing non-XML name", DataLossWarning)
replacement = self.getReplacementCharacter(char)
nameRestOutput = nameRestOutput.replace(char, replacement)
return nameFirstOutput + nameRestOutput
def getReplacementCharacter(self, char):
if char in self.replaceCache:
replacement = self.replaceCache[char]
else:
replacement = self.escapeChar(char)
return replacement
def fromXmlName(self, name):
for item in set(self.replacementRegexp.findall(name)):
name = name.replace(item, self.unescapeChar(item))
return name
def escapeChar(self, char):
replacement = "U%05X" % ord(char)
self.replaceCache[char] = replacement
return replacement
def unescapeChar(self, charcode):
return chr(int(charcode[1:], 16))
| mpl-2.0 |
yoer/hue | desktop/core/ext-py/markdown/markdown/extensions/rss.py | 131 | 3693 | import markdown
from markdown import etree
DEFAULT_URL = "http://www.freewisdom.org/projects/python-markdown/"
DEFAULT_CREATOR = "Yuri Takhteyev"
DEFAULT_TITLE = "Markdown in Python"
GENERATOR = "http://www.freewisdom.org/projects/python-markdown/markdown2rss"
month_map = { "Jan" : "01",
"Feb" : "02",
"March" : "03",
"April" : "04",
"May" : "05",
"June" : "06",
"July" : "07",
"August" : "08",
"September" : "09",
"October" : "10",
"November" : "11",
"December" : "12" }
def get_time(heading):
heading = heading.split("-")[0]
heading = heading.strip().replace(",", " ").replace(".", " ")
month, date, year = heading.split()
month = month_map[month]
return rdftime(" ".join((month, date, year, "12:00:00 AM")))
def rdftime(time):
time = time.replace(":", " ")
time = time.replace("/", " ")
time = time.split()
return "%s-%s-%sT%s:%s:%s-08:00" % (time[0], time[1], time[2],
time[3], time[4], time[5])
def get_date(text):
return "date"
class RssExtension (markdown.Extension):
def extendMarkdown(self, md, md_globals):
self.config = { 'URL' : [DEFAULT_URL, "Main URL"],
'CREATOR' : [DEFAULT_CREATOR, "Feed creator's name"],
'TITLE' : [DEFAULT_TITLE, "Feed title"] }
md.xml_mode = True
# Insert a tree-processor that would actually add the title tag
treeprocessor = RssTreeProcessor(md)
treeprocessor.ext = self
md.treeprocessors['rss'] = treeprocessor
md.stripTopLevelTags = 0
md.docType = '<?xml version="1.0" encoding="utf-8"?>\n'
class RssTreeProcessor(markdown.treeprocessors.Treeprocessor):
def run (self, root):
rss = etree.Element("rss")
rss.set("version", "2.0")
channel = etree.SubElement(rss, "channel")
for tag, text in (("title", self.ext.getConfig("TITLE")),
("link", self.ext.getConfig("URL")),
("description", None)):
element = etree.SubElement(channel, tag)
element.text = text
for child in root:
if child.tag in ["h1", "h2", "h3", "h4", "h5"]:
heading = child.text.strip()
item = etree.SubElement(channel, "item")
link = etree.SubElement(item, "link")
link.text = self.ext.getConfig("URL")
title = etree.SubElement(item, "title")
title.text = heading
guid = ''.join([x for x in heading if x.isalnum()])
guidElem = etree.SubElement(item, "guid")
guidElem.text = guid
guidElem.set("isPermaLink", "false")
elif child.tag in ["p"]:
try:
description = etree.SubElement(item, "description")
except UnboundLocalError:
# Item not defined - moving on
pass
else:
if len(child):
content = "\n".join([etree.tostring(node)
for node in child])
else:
content = child.text
pholder = self.markdown.htmlStash.store(
"<![CDATA[ %s]]>" % content)
description.text = pholder
return rss
def makeExtension(configs):
return RssExtension(configs)
| apache-2.0 |
nvoron23/arangodb | 3rdParty/V8-4.3.61/build/detect_v8_host_arch.py | 47 | 2980 | #!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Outputs host CPU architecture in format recognized by gyp."""
import platform
import re
import sys
def main():
print DoMain([])
return 0
def DoMain(_):
"""Hook to be called from gyp without starting a separate python
interpreter."""
host_arch = platform.machine()
host_system = platform.system();
# Convert machine type to format recognized by gyp.
if re.match(r'i.86', host_arch) or host_arch == 'i86pc':
host_arch = 'ia32'
elif host_arch in ['x86_64', 'amd64']:
host_arch = 'x64'
elif host_arch.startswith('arm'):
host_arch = 'arm'
elif host_arch == 'aarch64':
host_arch = 'arm64'
elif host_arch == 'mips64':
host_arch = 'mips64el'
elif host_arch.startswith('mips'):
host_arch = 'mipsel'
# Under AIX the value returned by platform.machine is not
# the best indicator of the host architecture
# AIX 6.1 which is the lowest level supported only provides
# a 64 bit kernel
if host_system == 'AIX':
host_arch = 'ppc64'
# platform.machine is based on running kernel. It's possible to use 64-bit
# kernel with 32-bit userland, e.g. to give linker slightly more memory.
# Distinguish between different userland bitness by querying
# the python binary.
if host_arch == 'x64' and platform.architecture()[0] == '32bit':
host_arch = 'ia32'
return host_arch
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
rosudrag/Freemium-winner | VirtualEnvironment/Lib/site-packages/migrate/changeset/databases/mysql.py | 75 | 2164 | """
MySQL database specific implementations of changeset classes.
"""
import sqlalchemy
from sqlalchemy.databases import mysql as sa_base
from sqlalchemy import types as sqltypes
from migrate import exceptions
from migrate.changeset import ansisql
from migrate.changeset import util
MySQLSchemaGenerator = sa_base.MySQLDDLCompiler
class MySQLColumnGenerator(MySQLSchemaGenerator, ansisql.ANSIColumnGenerator):
pass
class MySQLColumnDropper(ansisql.ANSIColumnDropper):
pass
class MySQLSchemaChanger(MySQLSchemaGenerator, ansisql.ANSISchemaChanger):
def visit_column(self, delta):
table = delta.table
colspec = self.get_column_specification(delta.result_column)
if delta.result_column.autoincrement:
primary_keys = [c for c in table.primary_key.columns
if (c.autoincrement and
isinstance(c.type, sqltypes.Integer) and
not c.foreign_keys)]
if primary_keys:
first = primary_keys.pop(0)
if first.name == delta.current_name:
colspec += " AUTO_INCREMENT"
q = util.safe_quote(table)
old_col_name = self.preparer.quote(delta.current_name, q)
self.start_alter_table(table)
self.append("CHANGE COLUMN %s " % old_col_name)
self.append(colspec)
self.execute()
def visit_index(self, param):
# If MySQL can do this, I can't find how
raise exceptions.NotSupportedError("MySQL cannot rename indexes")
class MySQLConstraintGenerator(ansisql.ANSIConstraintGenerator):
pass
class MySQLConstraintDropper(MySQLSchemaGenerator, ansisql.ANSIConstraintDropper):
def visit_migrate_check_constraint(self, *p, **k):
raise exceptions.NotSupportedError("MySQL does not support CHECK"
" constraints, use triggers instead.")
class MySQLDialect(ansisql.ANSIDialect):
columngenerator = MySQLColumnGenerator
columndropper = MySQLColumnDropper
schemachanger = MySQLSchemaChanger
constraintgenerator = MySQLConstraintGenerator
constraintdropper = MySQLConstraintDropper
| mit |
pandeyop/rally | rally/benchmark/context/keypair.py | 2 | 2215 | # Copyright 2014: Rackspace UK
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import novaclient.exceptions
from rally.benchmark.context import base
from rally.benchmark.context.cleanup import manager as resource_manager
from rally.common.i18n import _
from rally.common import log as logging
from rally.common import utils
from rally import osclients
LOG = logging.getLogger(__name__)
@base.context(name="keypair", order=310)
class Keypair(base.Context):
KEYPAIR_NAME = "rally_ssh_key"
def _generate_keypair(self, endpoint):
keypair_name = "%s_%s" % (
self.KEYPAIR_NAME, self.context["task"]["uuid"])
nova_client = osclients.Clients(endpoint).nova()
# NOTE(hughsaunders): If keypair exists, it must be deleted as we can't
# retrieve the private key
try:
nova_client.keypairs.delete(keypair_name)
except novaclient.exceptions.NotFound:
pass
keypair = nova_client.keypairs.create(keypair_name)
return {"private": keypair.private_key,
"public": keypair.public_key,
"name": keypair_name,
"id": keypair.id}
@utils.log_task_wrapper(LOG.info, _("Enter context: `keypair`"))
def setup(self):
for user in self.context["users"]:
user["keypair"] = self._generate_keypair(user["endpoint"])
@utils.log_task_wrapper(LOG.info, _("Exit context: `keypair`"))
def cleanup(self):
# TODO(boris-42): Delete only resources created by this context
resource_manager.cleanup(names=["nova.keypairs"],
users=self.context.get("users", []))
| apache-2.0 |
operepo/ope | libs/paramiko/py3compat.py | 37 | 3894 | import sys
import base64
__all__ = ['PY2', 'string_types', 'integer_types', 'text_type', 'bytes_types', 'bytes', 'long', 'input',
'decodebytes', 'encodebytes', 'bytestring', 'byte_ord', 'byte_chr', 'byte_mask',
'b', 'u', 'b2s', 'StringIO', 'BytesIO', 'is_callable', 'MAXSIZE', 'next']
PY2 = sys.version_info[0] < 3
if PY2:
string_types = basestring
text_type = unicode
bytes_types = str
bytes = str
integer_types = (int, long)
long = long
input = raw_input
decodebytes = base64.decodestring
encodebytes = base64.encodestring
def bytestring(s): # NOQA
if isinstance(s, unicode):
return s.encode('utf-8')
return s
byte_ord = ord # NOQA
byte_chr = chr # NOQA
def byte_mask(c, mask):
return chr(ord(c) & mask)
def b(s, encoding='utf8'): # NOQA
"""cast unicode or bytes to bytes"""
if isinstance(s, str):
return s
elif isinstance(s, unicode):
return s.encode(encoding)
elif isinstance(s, buffer):
return s
else:
raise TypeError("Expected unicode or bytes, got %r" % s)
def u(s, encoding='utf8'): # NOQA
"""cast bytes or unicode to unicode"""
if isinstance(s, str):
return s.decode(encoding)
elif isinstance(s, unicode):
return s
elif isinstance(s, buffer):
return s.decode(encoding)
else:
raise TypeError("Expected unicode or bytes, got %r" % s)
def b2s(s):
return s
try:
import cStringIO
StringIO = cStringIO.StringIO # NOQA
except ImportError:
import StringIO
StringIO = StringIO.StringIO # NOQA
BytesIO = StringIO
def is_callable(c): # NOQA
return callable(c)
def get_next(c): # NOQA
return c.next
def next(c):
return c.next()
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1) # NOQA
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1) # NOQA
del X
else:
import collections
import struct
string_types = str
text_type = str
bytes = bytes
bytes_types = bytes
integer_types = int
class long(int):
pass
input = input
decodebytes = base64.decodebytes
encodebytes = base64.encodebytes
def bytestring(s):
return s
def byte_ord(c):
# In case we're handed a string instead of an int.
if not isinstance(c, int):
c = ord(c)
return c
def byte_chr(c):
assert isinstance(c, int)
return struct.pack('B', c)
def byte_mask(c, mask):
assert isinstance(c, int)
return struct.pack('B', c & mask)
def b(s, encoding='utf8'):
"""cast unicode or bytes to bytes"""
if isinstance(s, bytes):
return s
elif isinstance(s, str):
return s.encode(encoding)
else:
raise TypeError("Expected unicode or bytes, got %r" % s)
def u(s, encoding='utf8'):
"""cast bytes or unicode to unicode"""
if isinstance(s, bytes):
return s.decode(encoding)
elif isinstance(s, str):
return s
else:
raise TypeError("Expected unicode or bytes, got %r" % s)
def b2s(s):
return s.decode() if isinstance(s, bytes) else s
import io
StringIO = io.StringIO # NOQA
BytesIO = io.BytesIO # NOQA
def is_callable(c):
return isinstance(c, collections.Callable)
def get_next(c):
return c.__next__
next = next
MAXSIZE = sys.maxsize # NOQA
| mit |
shenlong3030/asv-django-guestbook | django/contrib/localflavor/fr/forms.py | 12 | 1798 | """
FR-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
import re
phone_digits_re = re.compile(r'^0\d(\s|\.)?(\d{2}(\s|\.)?){3}\d{2}$')
class FRZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXXX.'),
}
def __init__(self, *args, **kwargs):
super(FRZipCodeField, self).__init__(r'^\d{5}$',
max_length=None, min_length=None, *args, **kwargs)
class FRPhoneNumberField(Field):
"""
Validate local French phone number (not international ones)
The correct format is '0X XX XX XX XX'.
'0X.XX.XX.XX.XX' and '0XXXXXXXXX' validate but are corrected to
'0X XX XX XX XX'.
"""
default_error_messages = {
'invalid': _('Phone numbers must be in 0X XX XX XX XX format.'),
}
def clean(self, value):
super(FRPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub('(\.|\s)', '', smart_unicode(value))
m = phone_digits_re.search(value)
if m:
return u'%s %s %s %s %s' % (value[0:2], value[2:4], value[4:6], value[6:8], value[8:10])
raise ValidationError(self.error_messages['invalid'])
class FRDepartmentSelect(Select):
"""
A Select widget that uses a list of FR departments as its choices.
"""
def __init__(self, attrs=None):
from fr_department import DEPARTMENT_ASCII_CHOICES
super(FRDepartmentSelect, self).__init__(attrs, choices=DEPARTMENT_ASCII_CHOICES)
| bsd-3-clause |
pquentin/django | django/utils/module_loading.py | 46 | 6700 | # Avoid importing `importlib` from this package.
from __future__ import absolute_import
import copy
import os
import sys
import warnings
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
def import_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
msg = "%s doesn't look like a module path" % dotted_path
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError:
msg = 'Module "%s" does not define a "%s" attribute/class' % (
module_path, class_name)
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
def import_by_path(dotted_path, error_prefix=''):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImproperlyConfigured if something goes wrong.
"""
warnings.warn(
'import_by_path() has been deprecated. Use import_string() instead.',
RemovedInDjango19Warning, stacklevel=2)
try:
attr = import_string(dotted_path)
except ImportError as e:
msg = '%sError importing module %s: "%s"' % (
error_prefix, dotted_path, e)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg),
sys.exc_info()[2])
return attr
def autodiscover_modules(*args, **kwargs):
"""
Auto-discover INSTALLED_APPS modules and fail silently when
not present. This forces an import on them to register any admin bits they
may want.
You may provide a register_to keyword parameter as a way to access a
registry. This register_to object must have a _registry instance variable
to access it.
"""
from django.apps import apps
register_to = kwargs.get('register_to')
for app_config in apps.get_app_configs():
for module_to_search in args:
# Attempt to import the app's module.
try:
if register_to:
before_import_registry = copy.copy(register_to._registry)
import_module('%s.%s' % (app_config.name, module_to_search))
except:
# Reset the registry to the state before the last import
# as this import will have to reoccur on the next request and
# this could raise NotRegistered and AlreadyRegistered
# exceptions (see #8245).
if register_to:
register_to._registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have the module in question, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(app_config.module, module_to_search):
raise
if sys.version_info[:2] >= (3, 3):
if sys.version_info[:2] >= (3, 4):
from importlib.util import find_spec as importlib_find
else:
from importlib import find_loader as importlib_find
def module_has_submodule(package, module_name):
"""See if 'module' is in 'package'."""
try:
package_name = package.__name__
package_path = package.__path__
except AttributeError:
# package isn't a package.
return False
full_module_name = package_name + '.' + module_name
return importlib_find(full_module_name, package_path) is not None
else:
import imp
def module_has_submodule(package, module_name):
"""See if 'module' is in 'package'."""
name = ".".join([package.__name__, module_name])
try:
# None indicates a cached miss; see mark_miss() in Python/import.c.
return sys.modules[name] is not None
except KeyError:
pass
try:
package_path = package.__path__ # No __path__, then not a package.
except AttributeError:
# Since the remainder of this function assumes that we're dealing with
# a package (module with a __path__), so if it's not, then bail here.
return False
for finder in sys.meta_path:
if finder.find_module(name, package_path):
return True
for entry in package_path:
try:
# Try the cached finder.
finder = sys.path_importer_cache[entry]
if finder is None:
# Implicit import machinery should be used.
try:
file_, _, _ = imp.find_module(module_name, [entry])
if file_:
file_.close()
return True
except ImportError:
continue
# Else see if the finder knows of a loader.
elif finder.find_module(name):
return True
else:
continue
except KeyError:
# No cached finder, so try and make one.
for hook in sys.path_hooks:
try:
finder = hook(entry)
# XXX Could cache in sys.path_importer_cache
if finder.find_module(name):
return True
else:
# Once a finder is found, stop the search.
break
except ImportError:
# Continue the search for a finder.
continue
else:
# No finder found.
# Try the implicit import machinery if searching a directory.
if os.path.isdir(entry):
try:
file_, _, _ = imp.find_module(module_name, [entry])
if file_:
file_.close()
return True
except ImportError:
pass
# XXX Could insert None or NullImporter
else:
# Exhausted the search, so the module cannot be found.
return False
| bsd-3-clause |
2014cdag2/w17x1 | static/Brython3.1.3-20150514-095342/Lib/getopt.py | 845 | 7488 | """Parser for command line options.
This module helps scripts to parse the command line arguments in
sys.argv. It supports the same conventions as the Unix getopt()
function (including the special meanings of arguments of the form `-'
and `--'). Long options similar to those supported by GNU software
may be used as well via an optional third argument. This module
provides two functions and an exception:
getopt() -- Parse command line options
gnu_getopt() -- Like getopt(), but allow option and non-option arguments
to be intermixed.
GetoptError -- exception (class) raised with 'opt' attribute, which is the
option involved with the exception.
"""
# Long option support added by Lars Wirzenius <liw@iki.fi>.
#
# Gerrit Holl <gerrit@nl.linux.org> moved the string-based exceptions
# to class-based exceptions.
#
# Peter Åstrand <astrand@lysator.liu.se> added gnu_getopt().
#
# TODO for gnu_getopt():
#
# - GNU getopt_long_only mechanism
# - allow the caller to specify ordering
# - RETURN_IN_ORDER option
# - GNU extension with '-' as first character of option string
# - optional arguments, specified by double colons
# - a option string with a W followed by semicolon should
# treat "-W foo" as "--foo"
__all__ = ["GetoptError","error","getopt","gnu_getopt"]
import os
try:
from gettext import gettext as _
except ImportError:
# Bootstrapping Python: gettext's dependencies not built yet
def _(s): return s
class GetoptError(Exception):
opt = ''
msg = ''
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
error = GetoptError # backward compatibility
def getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
Parses command line options and parameter list. args is the
argument list to be parsed, without the leading reference to the
running program. Typically, this means "sys.argv[1:]". shortopts
is the string of option letters that the script wants to
recognize, with options that require an argument followed by a
colon (i.e., the same format that Unix getopt() uses). If
specified, longopts is a list of strings with the names of the
long options which should be supported. The leading '--'
characters should not be included in the option name. Options
which require an argument should be followed by an equal sign
('=').
The return value consists of two elements: the first is a list of
(option, value) pairs; the second is the list of program arguments
left after the option list was stripped (this is a trailing slice
of the first argument). Each option-and-value pair returned has
the option as its first element, prefixed with a hyphen (e.g.,
'-x'), and the option argument as its second element, or an empty
string if the option has no argument. The options occur in the
list in the same order in which they were found, thus allowing
multiple occurrences. Long and short options may be mixed.
"""
opts = []
if type(longopts) == type(""):
longopts = [longopts]
else:
longopts = list(longopts)
while args and args[0].startswith('-') and args[0] != '-':
if args[0] == '--':
args = args[1:]
break
if args[0].startswith('--'):
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
else:
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
return opts, args
def gnu_getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if isinstance(longopts, str):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
elif os.environ.get("POSIXLY_CORRECT"):
all_options_first = True
else:
all_options_first = False
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-' and args[0] != '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
def do_longs(opts, opt, longopts, args):
try:
i = opt.index('=')
except ValueError:
optarg = None
else:
opt, optarg = opt[:i], opt[i+1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError(_('option --%s requires argument') % opt, opt)
optarg, args = args[0], args[1:]
elif optarg is not None:
raise GetoptError(_('option --%s must not have an argument') % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name
def long_has_args(opt, longopts):
possibilities = [o for o in longopts if o.startswith(opt)]
if not possibilities:
raise GetoptError(_('option --%s not recognized') % opt, opt)
# Is there an exact match?
if opt in possibilities:
return False, opt
elif opt + '=' in possibilities:
return True, opt
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations, might be
# nice to work them into the error msg
raise GetoptError(_('option --%s not a unique prefix') % opt, opt)
assert len(possibilities) == 1
unique_match = possibilities[0]
has_arg = unique_match.endswith('=')
if has_arg:
unique_match = unique_match[:-1]
return has_arg, unique_match
def do_shorts(opts, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise GetoptError(_('option -%s requires argument') % opt,
opt)
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
opts.append(('-' + opt, optarg))
return opts, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts.startswith(':', i+1)
raise GetoptError(_('option -%s not recognized') % opt, opt)
if __name__ == '__main__':
import sys
print(getopt(sys.argv[1:], "a:b", ["alpha=", "beta"]))
| agpl-3.0 |
MyPhate/PythonMiniProbe | miniprobe/probe.py | 1 | 11513 | #!/usr/bin/env python
# Copyright (c) 2014, Paessler AG <support@paessler.com>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
# and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# PRTG Python Miniprobe
# Miniprobe needs at least Python 2.7 because of "importlib"
# If older python version is used you will have to install "importlib"
# import general modules
import sys
import json
import time
import gc
import logging
import socket
import warnings
from requests.packages.urllib3 import exceptions
# import own modules
sys.path.append('./')
try:
from miniprobe import MiniProbe
import sensors
import requests
import multiprocessing
except Exception as e:
print e
# Implemented for internal testing only. Not for public usage!
http = False
if sys.argv[1:] and sys.argv[1] == "http":
http = True
def main():
"""
Main routine for MiniProbe (Python)
"""
# Enable Garbage Collection
gc.enable()
# make sure the probe will not stop
probe_stop = False
# make sure probe is announced at every start
announce = False
# read configuration file (existence check done in probe_controller.py)
config = mini_probe.read_config('./probe.conf')
logger = logging.getLogger("")
if config['debug'] == "True":
config['debug'] = True
logger.setLevel(logging.DEBUG)
logging.warning("DEBUG LOGGING HAS BEEN TURNED ON!!")
logging.getLogger("requests").setLevel(logging.INFO)
else:
config['debug'] = False
logger.setLevel(logging.INFO)
logging.info("Debug logging has been turned off!!")
logging.getLogger("requests").setLevel(logging.WARNING)
if config['cleanmem'] == "True":
config['cleanmem'] = True
else:
config['cleanmem'] = False
# Doing some startup logging
logging.info("PRTG Small Probe '%s' starting on '%s'" % (config['name'], socket.gethostname()))
logging.info("Connecting to PRTG Core Server at %s:%s" % (config['server'], config['port']))
# create hash of probe access key
key_sha1 = mini_probe.hash_access_key(config['key'])
# get list of all sensors announced in __init__.py in package sensors
sensor_list = mini_probe.get_import_sensors()
sensor_announce = mini_probe.build_announce(sensor_list)
announce_json = json.dumps(sensor_announce)
url_announce = mini_probe.create_url(config, 'announce', http)
data_announce = mini_probe.create_parameters(config, announce_json, 'announce')
logging.debug("Announce Data: %s" % data_announce)
json_history = []
while not announce:
try:
# announcing the probe and all sensors
with warnings.catch_warnings():
warnings.simplefilter("ignore", exceptions.InsecureRequestWarning)
request_announce = requests.post(url_announce, data=data_announce, verify=False, timeout=30)
announce = True
logging.info("ANNOUNCE request successfully sent to PRTG Core Server at %s:%s."
% (config["server"], config["port"]))
logging.debug("Connecting to %s:%s" % (config["server"], config["port"]))
logging.debug("Status Code: %s | Message: %s" % (request_announce.status_code, request_announce.text))
request_announce.close()
except requests.exceptions.Timeout:
logging.error("ANNOUNCE Timeout - try again in %s seconds" % string(int(config['baseinterval']) / 2))
time.sleep(int(config['baseinterval']) / 2)
except Exception as announce_error:
logging.error(announce_error)
time.sleep(int(config['baseinterval']) / 2)
while not probe_stop:
# creating some objects only needed in loop
url_task = mini_probe.create_url(config, 'tasks', http)
task_data = {
'gid': config['gid'],
'protocol': config['protocol'],
'key': key_sha1
}
procs = []
out_queue = multiprocessing.Queue()
task = False
while not task:
json_payload_data = []
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", exceptions.InsecureRequestWarning)
request_task = requests.post(url_task, data=task_data, verify=False, timeout=30)
logging.debug(request_task.headers)
logging.debug(request_task.text)
try:
json_response = request_task.json()
except Exception as ex:
logging.info("Error: %s! Server returned: %s" % (ex, request_task.text))
request_task.close()
gc.collect()
task = True
logging.info("TASK request successfully sent to PRTG Core Server at %s:%s. Status: %s"
% (config["server"], config["port"], request_task.status_code))
logging.debug("task_url: " + url_task + "\ntask_data: " + str(task_data))
except requests.exceptions.Timeout:
logging.error("TASK Timeout: " + str(task_data))
logging.debug("Timeout encountered. Need to write more code to handle timeoutzzzzz: %s"
% json_history)
except Exception as announce_error:
logging.error(announce_error)
time.sleep(int(config['baseinterval']) / 2)
gc.collect()
if str(json_response) != '[]':
json_history = json_response
if config['subprocs']:
json_response_chunks = [json_response[i:i + int(config['subprocs'])]
for i in range(0, len(json_response), int(config['subprocs']))]
else:
json_response_chunks = [json_response[i:i + 10]
for i in range(0, len(json_response), 10)]
for element in json_response_chunks:
for part in element:
logging.debug(part)
found = False
for sensor in sensor_list:
# Workaround: sensorids greater 9999 cuts the first letter of the kind
if (part['kind'] == sensor.get_kind() or part['kind'] == sensor.get_kind()[1:len(sensor.get_kind())]):
logging.debug("Running sensor %s for id: %s" % (sensor.get_kind(), part['sensorid']))
part['kind'] = sensor.get_kind()
p = multiprocessing.Process(target=sensor.get_data, args=(part, out_queue), name=part['kind'])
procs.append(p)
p.start()
found = True
break
if not found:
logging.debug("No sensor found for id %s of kind %s" % (part['sensorid'], part['kind']))
gc.collect()
try:
while len(json_payload_data) < len(element):
json_payload_data.append(out_queue.get())
except Exception as ex:
logging.error(ex)
url_data = mini_probe.create_url(config, 'data', http)
# Try to send data, stops only when it's successfull or a bad mistake happend
data_sent = True
while data_sent:
try:
request_data = requests.post(url_data, data=json.dumps(json_payload_data),
verify=False, timeout=30)
logging.info("DATA request successfully sent to PRTG Core Server at %s:%s. Status: %s"
% (config["server"], config["port"], request_data.status_code))
logging.debug("data_url: " + url_data + "\ndata_data: " + str(json_payload_data))
request_data.close()
json_payload_data = []
data_sent = False
except requests.exceptions.Timeout:
logging.error("DATA send to PRTG Core Server timed out. Try again.")
except Exception as announce_error:
logging.error(announce_error)
data_sent = False
if len(json_response) > 10:
time.sleep((int(config['baseinterval']) * (9 / len(json_response))))
else:
time.sleep(int(config['baseinterval']) / 2)
else:
logging.info("Nothing to do. Waiting for %s seconds." % (int(config['baseinterval']) / 3))
time.sleep(int(config['baseinterval']) / 3)
# Delete some stuff used in the loop and run the garbage collector
for p in procs:
if not p.is_alive():
p.join()
p.terminate()
del p
del json_response
del json_payload_data
gc.collect()
if config['cleanmem']:
# checking if the clean memory option has been chosen during install then call the method to flush mem
mini_probe.clean_mem()
sys.exit()
if __name__ == "__main__":
mini_probe = MiniProbe()
main()
| bsd-3-clause |
pombredanne/bcm11351 | tools/perf/tests/attr.py | 58 | 9435 | #! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.debug("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.warning(" running '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-2.0 |
papados/ordersys | Lib/encodings/iso2022_jp_1.py | 816 | 1061 | #
# iso2022_jp_1.py: Python Unicode Codec for ISO2022_JP_1
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_1')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_1',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| unlicense |
ohio813/libforensics | code/lf/win/con/dtypes.py | 13 | 1097 | # Copyright 2010 Michael Murr
#
# This file is part of LibForensics.
#
# LibForensics is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibForensics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LibForensics. If not, see <http://www.gnu.org/licenses/>.
"""Data types for Microsoft Windows consoles"""
# local imports
from lf.dtypes import LERecord, BERecord
from lf.win.dtypes import SHORT
__docformat__ = "restructuredtext en"
__all__ = [
"COORD_LE", "COORD_BE"
]
class COORD_LE(LERecord):
x = SHORT
y = SHORT
# end class COORD_LE
class COORD_BE(BERecord):
x = SHORT
y = SHORT
# end class COORD_BE
| gpl-3.0 |
DANCEcollaborative/forum-xblock | XBlock Integration Files/xdjangobb/xblock/lib/python2.7/site-packages/django/core/management/commands/startproject.py | 201 | 1323 | from django.core.management.base import CommandError
from django.core.management.templates import TemplateCommand
from django.utils.crypto import get_random_string
from django.utils.importlib import import_module
class Command(TemplateCommand):
help = ("Creates a Django project directory structure for the given "
"project name in the current directory or optionally in the "
"given directory.")
def handle(self, project_name=None, target=None, *args, **options):
if project_name is None:
raise CommandError("you must provide a project name")
# Check that the project_name cannot be imported.
try:
import_module(project_name)
except ImportError:
pass
else:
raise CommandError("%r conflicts with the name of an existing "
"Python module and cannot be used as a "
"project name. Please try another name." %
project_name)
# Create a random SECRET_KEY hash to put it in the main settings.
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
options['secret_key'] = get_random_string(50, chars)
super(Command, self).handle('project', project_name, target, **options)
| mit |
javiergarridomellado/Empresa_django | devcodela/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/introspection.py | 111 | 3843 | from __future__ import unicode_literals
from django.db.backends import BaseDatabaseIntrospection
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type codes to Django Field types.
data_types_reverse = {
16: 'BooleanField',
20: 'BigIntegerField',
21: 'SmallIntegerField',
23: 'IntegerField',
25: 'TextField',
700: 'FloatField',
701: 'FloatField',
869: 'GenericIPAddressField',
1042: 'CharField', # blank-padded
1043: 'CharField',
1082: 'DateField',
1083: 'TimeField',
1114: 'DateTimeField',
1184: 'DateTimeField',
1266: 'TimeField',
1700: 'DecimalField',
}
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
cursor.execute("""
SELECT c.relname
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v', '')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)""")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
# As cursor.description does not return reliably the nullable property,
# we have to query the information_schema (#7783)
cursor.execute("""
SELECT column_name, is_nullable
FROM information_schema.columns
WHERE table_name = %s""", [table_name])
null_map = dict(cursor.fetchall())
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [line[:6] + (null_map[line[0]]=='YES',)
for line in cursor.description]
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
cursor.execute("""
SELECT con.conkey, con.confkey, c2.relname
FROM pg_constraint con, pg_class c1, pg_class c2
WHERE c1.oid = con.conrelid
AND c2.oid = con.confrelid
AND c1.relname = %s
AND con.contype = 'f'""", [table_name])
relations = {}
for row in cursor.fetchall():
# row[0] and row[1] are single-item lists, so grab the single item.
relations[row[0][0] - 1] = (row[1][0] - 1, row[2])
return relations
def get_indexes(self, cursor, table_name):
# This query retrieves each index on the given table, including the
# first associated field name
cursor.execute("""
SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND attr.attrelid = c.oid
AND attr.attnum = idx.indkey[0]
AND c.relname = %s""", [table_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
indexes[row[0]] = {'primary_key': row[3], 'unique': row[2]}
return indexes
| gpl-2.0 |
google/google-ctf | third_party/edk2/AppPkg/Applications/Python/Python-2.7.2/Lib/lib2to3/fixes/fix_import.py | 56 | 3358 | """Fixer for import statements.
If spam is being imported from the local directory, this import:
from spam import eggs
Becomes:
from .spam import eggs
And this import:
import spam
Becomes:
from . import spam
"""
# Local imports
from .. import fixer_base
from os.path import dirname, join, exists, sep
from ..fixer_util import FromImport, syms, token
def traverse_imports(names):
"""
Walks over all the names imported in a dotted_as_names node.
"""
pending = [names]
while pending:
node = pending.pop()
if node.type == token.NAME:
yield node.value
elif node.type == syms.dotted_name:
yield "".join([ch.value for ch in node.children])
elif node.type == syms.dotted_as_name:
pending.append(node.children[0])
elif node.type == syms.dotted_as_names:
pending.extend(node.children[::-2])
else:
raise AssertionError("unkown node type")
class FixImport(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
import_from< 'from' imp=any 'import' ['('] any [')'] >
|
import_name< 'import' imp=any >
"""
def start_tree(self, tree, name):
super(FixImport, self).start_tree(tree, name)
self.skip = "absolute_import" in tree.future_features
def transform(self, node, results):
if self.skip:
return
imp = results['imp']
if node.type == syms.import_from:
# Some imps are top-level (eg: 'import ham')
# some are first level (eg: 'import ham.eggs')
# some are third level (eg: 'import ham.eggs as spam')
# Hence, the loop
while not hasattr(imp, 'value'):
imp = imp.children[0]
if self.probably_a_local_import(imp.value):
imp.value = u"." + imp.value
imp.changed()
else:
have_local = False
have_absolute = False
for mod_name in traverse_imports(imp):
if self.probably_a_local_import(mod_name):
have_local = True
else:
have_absolute = True
if have_absolute:
if have_local:
# We won't handle both sibling and absolute imports in the
# same statement at the moment.
self.warning(node, "absolute and local imports together")
return
new = FromImport(u".", [imp])
new.prefix = node.prefix
return new
def probably_a_local_import(self, imp_name):
if imp_name.startswith(u"."):
# Relative imports are certainly not local imports.
return False
imp_name = imp_name.split(u".", 1)[0]
base_path = dirname(self.filename)
base_path = join(base_path, imp_name)
# If there is no __init__.py next to the file its not in a package
# so can't be a relative import.
if not exists(join(dirname(base_path), "__init__.py")):
return False
for ext in [".py", sep, ".pyc", ".so", ".sl", ".pyd"]:
if exists(base_path + ext):
return True
return False
| apache-2.0 |
MatthewWilkes/mw4068-packaging | src/melange/src/soc/models/question.py | 1 | 7827 | #!/usr/bin/env python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the Question Model."""
__authors__ = [
'"Todd Larsen" <tlarsen@google.com>',
]
from google.appengine.ext import db
import soc.models.work
class Question(soc.models.work.Work):
"""Model of a Question, which is a specialized form of Work.
Specific types of Questions are actually implemented in subclasses.
The specific way that the properties and relations inherited from
Work are used with a Question are described below.
work.title: the title of the Question, used for finding the
Question in a list of Questions
work.author: the author of the Work referred to by this relation
is the original author of the actual Question, regardless of
which Quizzes might incorporate the Question
work.reviews: even Questions can be "reviewed" (possibly commented
on during creation or annotated once put into use).
work.content: the Question text, asked to the respondent
linkable.scope: used to scope (and, when combined with
linkable.link_id, uniquely identify) a Question in the same way the
property are used with Documents, etc.
linkable.link_id: used to identify (and, when combined with
linkable.scope, *uniquely* identify) a Question in the same way
these properties are used with Documents, etc.
In addition to any explicit ReferenceProperties in the Question Model
and those inherited as described above, a Question entity participates
in these relationships:
answers) a 1:many relationship, where each Question has many different
Answers associated with it as parts of Responses to Quizzes. This is
implemented as the 'answers' back-reference Query of the Answer model
'question' reference. It is currently unclear how useful this
back-reference will be, since the same Question could be used in
multiple different Quizzes. Given this, 'answers' currently only
exists for completeness.
quizzes) a many:many relationship between Questions and the Quizzes
that collect them into a set. This relation is not explicitly
implemented, but can be obtained via a query something like:
quizzes_with_a_question = db.GqlQuery(
"SELECT * FROM Quiz where questions = :1",
a_question.key())
Such queries are probably only needed when a Question might be
altered, in order to find which Quizzes will be affected.
The properties in this Model do not have verbose_name or help_text,
because the dynamic nature of the forms required to create, edit, and
use entities of this Model make them pretty useless.
######################################################################
# TODO(tlarsen): the following verbose comments can be removed later,
when these ideas are implemented in the views and controllers; they
are here now so that the concepts will not be lost before that time.
The recommended use for the combination of linkable.scope and
linkable.link_id is to keep the *same* link_id when copying and
modifying an existing Question for a new Program (or instance of a
Group that is per-Program), while changing the linkable.scope to
represent the Program and Group "ownership" of the Question. For
example, if a Question asking about prior GSoC participation needed
to have an additional choice (see the choice_ids and choices properties
below), it is desirable to keep the same linkable.link_id (and also
simply append new choice_ids and choices to keep the old answer values
compatible). An existing Question in the above example might be identified
as something like:
Question:google/gsoc2009/gsoc_past_participation
<type>:<Sponsor>/<Program>/<link_id>
To make it possible to query for gsoc_past_participation answers regardless
of the Program, the next year, new values are added to choice_ids and
choices in a new Question copied from the one above, which would then
be named something (still unique) like:
Question:google/gsoc2010/gsoc_past_participation
Care just needs to be taken to keep the existing choice_ids and choices
compatible.
Other interesting possibilities also exist, such as asking about GSoC
participation of the GHOP participants (some GHOP high school students
have actually previously been GSoC mentors, for example). To produce
unique statistics for GHOP that could also be aggregated overall in
combination with GSoC, the gsoc_past_participation Question would be
duplicated (unaltered) to something like:
Question:google/ghop2009/gsoc_past_participation
To get the combined results, query on a link_id of
gsoc_past_participation. For more targeted results, include the
scope to make the query more specific.
Question creation to permit use cases like the one above is going to
be a bit of an "advanced" skill, possibly. "Doing it wrong" the first
time a Question is created will make it difficult to implement stuff
like multiple-choice Questions that "grow" new choices year-over-year.
A dynamic form is most definitely going to be needed to implement the
Question creation and editing for multiple-choice questions.
"""
#: db.ListProperty of short, plain-text, "link_id-like" strings
#: representing the "encoded" answer choices (must be strings compatible
#: with being query arguments and being used in HTML controls and POST
#: responses).
#:
#: If empty (None or an empty list), it is assumed that this Question
#: is *not* a multiple choice question. In that case, the UI should
#: display the Question as a textarea in forms and accept any plain-text.
#:
#: If non-empty, max_answers helps determine how the UI should display
#: the Question. Also, controller logic needs to validate if the
#: strings in the 'answers' property of the Answer entity come only
#: from this list.
#:
#: Once Answers to this Question have been stored in the Datastore,
#: choice_ids and choices should *not* be modified. An existing
#: Question can be duplicated and then modified (but, it will be a
#: different question as a result).
choice_ids = db.ListProperty(item_type=str)
#: db.ListProperty of human-readable choice strings, in the same order
#: as, and corresponding to, the "encoded" choices in the choice_ids
#: db.ListProperty.
choices = db.ListProperty(item_type=str)
#: db.IntegerProperty indicating the maximum number of answer values
#: permitted for this question. If 'choices' does not contain a list of
#: choice strings, this value is ignored (but should still only be 1).
#:
#: If there are 'choices' and this value is 1, the UI should render the
#: Question in forms as a single-choice control ("radio buttons").
#:
#: If there are 'choices' and this value is greater than 1, the UI should
#: render the question as a list of check-boxes.
#:
#: max_answers greater than 1 combined with choices enables Questions
#: like, for example, "...select the three most important...".
max_answers = db.IntegerProperty(default=1)
#: field storing whether the Answer to a Question is optional
is_optional = db.BooleanProperty(default=False)
| apache-2.0 |
brunogamacatao/portalsaladeaula | djangoappengine/settings_base.py | 1 | 1102 | # Initialize App Engine SDK if necessary
try:
from google.appengine.api import api_proxy_stub_map
except ImportError:
from .boot import setup_env
setup_env()
from djangoappengine.utils import on_production_server, have_appserver
DEBUG = not on_production_server
TEMPLATE_DEBUG = DEBUG
ROOT_URLCONF = 'urls'
DATABASES = {
'default': {
'ENGINE': 'djangoappengine.db',
},
}
if on_production_server:
EMAIL_BACKEND = 'djangoappengine.mail.AsyncEmailBackend'
else:
EMAIL_BACKEND = 'djangoappengine.mail.EmailBackend'
PREPARE_UPLOAD_BACKEND = 'djangoappengine.storage.prepare_upload'
SERVE_FILE_BACKEND = 'djangoappengine.storage.serve_file'
DEFAULT_FILE_STORAGE = 'djangoappengine.storage.BlobstoreStorage'
FILE_UPLOAD_MAX_MEMORY_SIZE = 1024 * 1024
FILE_UPLOAD_HANDLERS = (
'djangoappengine.storage.BlobstoreFileUploadHandler',
'django.core.files.uploadhandler.MemoryFileUploadHandler',
)
CACHE_BACKEND = 'memcached://?timeout=0'
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
if not on_production_server:
INTERNAL_IPS = ('127.0.0.1',)
| bsd-3-clause |
mikkylok/mikky.lu | venv/lib/python2.7/site-packages/sqlalchemy/orm/dynamic.py | 32 | 13144 | # orm/dynamic.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Dynamic collection API.
Dynamic collections act like Query() objects for read operations and support
basic add/delete mutation.
"""
from .. import log, util, exc
from ..sql import operators
from . import (
attributes, object_session, util as orm_util, strategies,
object_mapper, exc as orm_exc, properties
)
from .query import Query
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="dynamic")
class DynaLoader(strategies.AbstractRelationshipLoader):
def init_class_attribute(self, mapper):
self.is_class_level = True
if not self.uselist:
raise exc.InvalidRequestError(
"On relationship %s, 'dynamic' loaders cannot be used with "
"many-to-one/one-to-one relationships and/or "
"uselist=False." % self.parent_property)
strategies._register_attribute(
self.parent_property,
mapper,
useobject=True,
impl_class=DynamicAttributeImpl,
target_mapper=self.parent_property.mapper,
order_by=self.parent_property.order_by,
query_class=self.parent_property.query_class,
)
class DynamicAttributeImpl(attributes.AttributeImpl):
uses_objects = True
accepts_scalar_loader = False
supports_population = False
collection = False
def __init__(self, class_, key, typecallable,
dispatch,
target_mapper, order_by, query_class=None, **kw):
super(DynamicAttributeImpl, self).\
__init__(class_, key, typecallable, dispatch, **kw)
self.target_mapper = target_mapper
self.order_by = order_by
if not query_class:
self.query_class = AppenderQuery
elif AppenderMixin in query_class.mro():
self.query_class = query_class
else:
self.query_class = mixin_user_query(query_class)
def get(self, state, dict_, passive=attributes.PASSIVE_OFF):
if not passive & attributes.SQL_OK:
return self._get_collection_history(
state, attributes.PASSIVE_NO_INITIALIZE).added_items
else:
return self.query_class(self, state)
def get_collection(self, state, dict_, user_data=None,
passive=attributes.PASSIVE_NO_INITIALIZE):
if not passive & attributes.SQL_OK:
return self._get_collection_history(state,
passive).added_items
else:
history = self._get_collection_history(state, passive)
return history.added_plus_unchanged
@util.memoized_property
def _append_token(self):
return attributes.Event(self, attributes.OP_APPEND)
@util.memoized_property
def _remove_token(self):
return attributes.Event(self, attributes.OP_REMOVE)
def fire_append_event(self, state, dict_, value, initiator,
collection_history=None):
if collection_history is None:
collection_history = self._modified_event(state, dict_)
collection_history.add_added(value)
for fn in self.dispatch.append:
value = fn(state, value, initiator or self._append_token)
if self.trackparent and value is not None:
self.sethasparent(attributes.instance_state(value), state, True)
def fire_remove_event(self, state, dict_, value, initiator,
collection_history=None):
if collection_history is None:
collection_history = self._modified_event(state, dict_)
collection_history.add_removed(value)
if self.trackparent and value is not None:
self.sethasparent(attributes.instance_state(value), state, False)
for fn in self.dispatch.remove:
fn(state, value, initiator or self._remove_token)
def _modified_event(self, state, dict_):
if self.key not in state.committed_state:
state.committed_state[self.key] = CollectionHistory(self, state)
state._modified_event(dict_,
self,
attributes.NEVER_SET)
# this is a hack to allow the fixtures.ComparableEntity fixture
# to work
dict_[self.key] = True
return state.committed_state[self.key]
def set(self, state, dict_, value, initiator=None,
passive=attributes.PASSIVE_OFF,
check_old=None, pop=False, _adapt=True):
if initiator and initiator.parent_token is self.parent_token:
return
if pop and value is None:
return
iterable = value
new_values = list(iterable)
if state.has_identity:
old_collection = util.IdentitySet(self.get(state, dict_))
collection_history = self._modified_event(state, dict_)
if not state.has_identity:
old_collection = collection_history.added_items
else:
old_collection = old_collection.union(
collection_history.added_items)
idset = util.IdentitySet
constants = old_collection.intersection(new_values)
additions = idset(new_values).difference(constants)
removals = old_collection.difference(constants)
for member in new_values:
if member in additions:
self.fire_append_event(state, dict_, member, None,
collection_history=collection_history)
for member in removals:
self.fire_remove_event(state, dict_, member, None,
collection_history=collection_history)
def delete(self, *args, **kwargs):
raise NotImplementedError()
def set_committed_value(self, state, dict_, value):
raise NotImplementedError("Dynamic attributes don't support "
"collection population.")
def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF):
c = self._get_collection_history(state, passive)
return c.as_history()
def get_all_pending(self, state, dict_,
passive=attributes.PASSIVE_NO_INITIALIZE):
c = self._get_collection_history(
state, passive)
return [
(attributes.instance_state(x), x)
for x in
c.all_items
]
def _get_collection_history(self, state, passive=attributes.PASSIVE_OFF):
if self.key in state.committed_state:
c = state.committed_state[self.key]
else:
c = CollectionHistory(self, state)
if state.has_identity and (passive & attributes.INIT_OK):
return CollectionHistory(self, state, apply_to=c)
else:
return c
def append(self, state, dict_, value, initiator,
passive=attributes.PASSIVE_OFF):
if initiator is not self:
self.fire_append_event(state, dict_, value, initiator)
def remove(self, state, dict_, value, initiator,
passive=attributes.PASSIVE_OFF):
if initiator is not self:
self.fire_remove_event(state, dict_, value, initiator)
def pop(self, state, dict_, value, initiator,
passive=attributes.PASSIVE_OFF):
self.remove(state, dict_, value, initiator, passive=passive)
class AppenderMixin(object):
query_class = None
def __init__(self, attr, state):
super(AppenderMixin, self).__init__(attr.target_mapper, None)
self.instance = instance = state.obj()
self.attr = attr
mapper = object_mapper(instance)
prop = mapper._props[self.attr.key]
self._criterion = prop._with_parent(
instance,
alias_secondary=False)
if self.attr.order_by:
self._order_by = self.attr.order_by
def session(self):
sess = object_session(self.instance)
if sess is not None and self.autoflush and sess.autoflush \
and self.instance in sess:
sess.flush()
if not orm_util.has_identity(self.instance):
return None
else:
return sess
session = property(session, lambda s, x: None)
def __iter__(self):
sess = self.session
if sess is None:
return iter(self.attr._get_collection_history(
attributes.instance_state(self.instance),
attributes.PASSIVE_NO_INITIALIZE).added_items)
else:
return iter(self._clone(sess))
def __getitem__(self, index):
sess = self.session
if sess is None:
return self.attr._get_collection_history(
attributes.instance_state(self.instance),
attributes.PASSIVE_NO_INITIALIZE).indexed(index)
else:
return self._clone(sess).__getitem__(index)
def count(self):
sess = self.session
if sess is None:
return len(self.attr._get_collection_history(
attributes.instance_state(self.instance),
attributes.PASSIVE_NO_INITIALIZE).added_items)
else:
return self._clone(sess).count()
def _clone(self, sess=None):
# note we're returning an entirely new Query class instance
# here without any assignment capabilities; the class of this
# query is determined by the session.
instance = self.instance
if sess is None:
sess = object_session(instance)
if sess is None:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session, and no "
"contextual session is established; lazy load operation "
"of attribute '%s' cannot proceed" % (
orm_util.instance_str(instance), self.attr.key))
if self.query_class:
query = self.query_class(self.attr.target_mapper, session=sess)
else:
query = sess.query(self.attr.target_mapper)
query._criterion = self._criterion
query._order_by = self._order_by
return query
def extend(self, iterator):
for item in iterator:
self.attr.append(
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance), item, None)
def append(self, item):
self.attr.append(
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance), item, None)
def remove(self, item):
self.attr.remove(
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance), item, None)
class AppenderQuery(AppenderMixin, Query):
"""A dynamic query that supports basic collection storage operations."""
def mixin_user_query(cls):
"""Return a new class with AppenderQuery functionality layered over."""
name = 'Appender' + cls.__name__
return type(name, (AppenderMixin, cls), {'query_class': cls})
class CollectionHistory(object):
"""Overrides AttributeHistory to receive append/remove events directly."""
def __init__(self, attr, state, apply_to=None):
if apply_to:
coll = AppenderQuery(attr, state).autoflush(False)
self.unchanged_items = util.OrderedIdentitySet(coll)
self.added_items = apply_to.added_items
self.deleted_items = apply_to.deleted_items
self._reconcile_collection = True
else:
self.deleted_items = util.OrderedIdentitySet()
self.added_items = util.OrderedIdentitySet()
self.unchanged_items = util.OrderedIdentitySet()
self._reconcile_collection = False
@property
def added_plus_unchanged(self):
return list(self.added_items.union(self.unchanged_items))
@property
def all_items(self):
return list(self.added_items.union(
self.unchanged_items).union(self.deleted_items))
def as_history(self):
if self._reconcile_collection:
added = self.added_items.difference(self.unchanged_items)
deleted = self.deleted_items.intersection(self.unchanged_items)
unchanged = self.unchanged_items.difference(deleted)
else:
added, unchanged, deleted = self.added_items,\
self.unchanged_items,\
self.deleted_items
return attributes.History(
list(added),
list(unchanged),
list(deleted),
)
def indexed(self, index):
return list(self.added_items)[index]
def add_added(self, value):
self.added_items.add(value)
def add_removed(self, value):
if value in self.added_items:
self.added_items.remove(value)
else:
self.deleted_items.add(value)
| mit |
ifearcompilererrors/fle_redesign | fle_redesign/settings.py | 1 | 5681 | # Django settings for fle_redesign project.
import os
try:
from local_settings import *
import local_settings
except ImportError:
local_settings = {}
def localor(setting_name, default_val):
"""Returns local_settings version if it exists (and is non-empty), otherwise uses default value"""
return hasattr(local_settings, setting_name) and getattr(local_settings, setting_name) or default_val
DEBUG = localor("DEBUG", True)
TEMPLATE_DEBUG = localor("TEMPLATE_DEBUG", DEBUG)
GEOIPDAT = localor("GEOIPDAT", '/usr/share/GeoIP/GeoIPCity.dat') or False
ADMINS = (
# ('Dylan', 'dylan@learningequality.org'),
)
MANAGERS = ADMINS
PROJECT_PATH = os.path.dirname(os.path.realpath(__file__))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_PATH, 'database.sqlite'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
MEDIA_URL = getattr(local_settings, "MEDIA_URL", "/media/")
MEDIA_ROOT = os.path.realpath(getattr(local_settings, "MEDIA_ROOT", PROJECT_PATH + "/media/")) + "/"
STATIC_URL = getattr(local_settings, "STATIC_URL", "/static/")
# STATIC_ROOT = os.path.realpath(getattr(local_settings, "STATIC_ROOT", PROJECT_PATH + "/static/")) + "/"
# Additional locations of static files
STATICFILES_DIRS = (
'/Users/dylan/Hacking/fle_redesign/fle_redesign/static',
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '@$q_9h45p=b3-wk2zv9oy_8d-13p576dk))p*o=ntyh!_3b16%'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'fle_redesign.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'fle_redesign.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(PROJECT_PATH, "templates"),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django_extensions',
'south',
'easy_thumbnails',
'fle_redesign.apps.radpress',
'fle_redesign.apps.main',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| mit |
shabab12/edx-platform | common/lib/xmodule/xmodule/tests/test_validation.py | 83 | 8741 | """
Test xblock/validation.py
"""
import unittest
from xblock.test.tools import assert_raises
from xmodule.validation import StudioValidationMessage, StudioValidation
from xblock.validation import Validation, ValidationMessage
class StudioValidationMessageTest(unittest.TestCase):
"""
Tests for `ValidationMessage`
"""
def test_bad_parameters(self):
"""
Test that `TypeError`s are thrown for bad input parameters.
"""
with assert_raises(TypeError):
StudioValidationMessage("unknown type", u"Unknown type info")
with assert_raises(TypeError):
StudioValidationMessage(StudioValidationMessage.WARNING, u"bad warning", action_class=0)
with assert_raises(TypeError):
StudioValidationMessage(StudioValidationMessage.WARNING, u"bad warning", action_runtime_event=0)
with assert_raises(TypeError):
StudioValidationMessage(StudioValidationMessage.WARNING, u"bad warning", action_label="Non-unicode string")
def test_to_json(self):
"""
Test the `to_json` method.
"""
self.assertEqual(
{
"type": StudioValidationMessage.NOT_CONFIGURED,
"text": u"Not Configured message",
"action_label": u"Action label"
},
StudioValidationMessage(
StudioValidationMessage.NOT_CONFIGURED, u"Not Configured message", action_label=u"Action label"
).to_json()
)
self.assertEqual(
{
"type": StudioValidationMessage.WARNING,
"text": u"Warning message",
"action_class": "class-for-action"
},
StudioValidationMessage(
StudioValidationMessage.WARNING, u"Warning message", action_class="class-for-action"
).to_json()
)
self.assertEqual(
{
"type": StudioValidationMessage.ERROR,
"text": u"Error message",
"action_runtime_event": "do-fix-up"
},
StudioValidationMessage(
StudioValidationMessage.ERROR, u"Error message", action_runtime_event="do-fix-up"
).to_json()
)
class StudioValidationTest(unittest.TestCase):
"""
Tests for `StudioValidation` class.
"""
def test_copy(self):
validation = Validation("id")
validation.add(ValidationMessage(ValidationMessage.ERROR, u"Error message"))
studio_validation = StudioValidation.copy(validation)
self.assertIsInstance(studio_validation, StudioValidation)
self.assertFalse(studio_validation)
self.assertEqual(1, len(studio_validation.messages))
expected = {
"type": StudioValidationMessage.ERROR,
"text": u"Error message"
}
self.assertEqual(expected, studio_validation.messages[0].to_json())
self.assertIsNone(studio_validation.summary)
def test_copy_studio_validation(self):
validation = StudioValidation("id")
validation.add(
StudioValidationMessage(StudioValidationMessage.WARNING, u"Warning message", action_label=u"Action Label")
)
validation_copy = StudioValidation.copy(validation)
self.assertFalse(validation_copy)
self.assertEqual(1, len(validation_copy.messages))
expected = {
"type": StudioValidationMessage.WARNING,
"text": u"Warning message",
"action_label": u"Action Label"
}
self.assertEqual(expected, validation_copy.messages[0].to_json())
def test_copy_errors(self):
with assert_raises(TypeError):
StudioValidation.copy("foo")
def test_empty(self):
"""
Test that `empty` return True iff there are no messages and no summary.
Also test the "bool" property of `Validation`.
"""
validation = StudioValidation("id")
self.assertTrue(validation.empty)
self.assertTrue(validation)
validation.add(StudioValidationMessage(StudioValidationMessage.ERROR, u"Error message"))
self.assertFalse(validation.empty)
self.assertFalse(validation)
validation_with_summary = StudioValidation("id")
validation_with_summary.set_summary(
StudioValidationMessage(StudioValidationMessage.NOT_CONFIGURED, u"Summary message")
)
self.assertFalse(validation.empty)
self.assertFalse(validation)
def test_add_messages(self):
"""
Test the behavior of calling `add_messages` with combination of `StudioValidation` instances.
"""
validation_1 = StudioValidation("id")
validation_1.set_summary(StudioValidationMessage(StudioValidationMessage.WARNING, u"Summary message"))
validation_1.add(StudioValidationMessage(StudioValidationMessage.ERROR, u"Error message"))
validation_2 = StudioValidation("id")
validation_2.set_summary(StudioValidationMessage(StudioValidationMessage.ERROR, u"Summary 2 message"))
validation_2.add(StudioValidationMessage(StudioValidationMessage.NOT_CONFIGURED, u"Not configured"))
validation_1.add_messages(validation_2)
self.assertEqual(2, len(validation_1.messages))
self.assertEqual(StudioValidationMessage.ERROR, validation_1.messages[0].type)
self.assertEqual(u"Error message", validation_1.messages[0].text)
self.assertEqual(StudioValidationMessage.NOT_CONFIGURED, validation_1.messages[1].type)
self.assertEqual(u"Not configured", validation_1.messages[1].text)
self.assertEqual(StudioValidationMessage.WARNING, validation_1.summary.type)
self.assertEqual(u"Summary message", validation_1.summary.text)
def test_set_summary_accepts_validation_message(self):
"""
Test that `set_summary` accepts a ValidationMessage.
"""
validation = StudioValidation("id")
validation.set_summary(ValidationMessage(ValidationMessage.WARNING, u"Summary message"))
self.assertEqual(ValidationMessage.WARNING, validation.summary.type)
self.assertEqual(u"Summary message", validation.summary.text)
def test_set_summary_errors(self):
"""
Test that `set_summary` errors if argument is not a ValidationMessage.
"""
with assert_raises(TypeError):
StudioValidation("id").set_summary("foo")
def test_to_json(self):
"""
Test the ability to serialize a `StudioValidation` instance.
"""
validation = StudioValidation("id")
expected = {
"xblock_id": "id",
"messages": [],
"empty": True
}
self.assertEqual(expected, validation.to_json())
validation.add(
StudioValidationMessage(
StudioValidationMessage.ERROR,
u"Error message",
action_label=u"Action label",
action_class="edit-button"
)
)
validation.add(
StudioValidationMessage(
StudioValidationMessage.NOT_CONFIGURED,
u"Not configured message",
action_label=u"Action label",
action_runtime_event="make groups"
)
)
validation.set_summary(
StudioValidationMessage(
StudioValidationMessage.WARNING,
u"Summary message",
action_label=u"Summary label",
action_runtime_event="fix everything"
)
)
# Note: it is important to test all the expected strings here because the client-side model depends on them
# (for instance, "warning" vs. using the xblock constant ValidationMessageTypes.WARNING).
expected = {
"xblock_id": "id",
"messages": [
{
"type": "error",
"text": u"Error message",
"action_label": u"Action label",
"action_class": "edit-button"
},
{
"type": "not-configured",
"text": u"Not configured message",
"action_label": u"Action label",
"action_runtime_event": "make groups"
}
],
"summary": {
"type": "warning",
"text": u"Summary message",
"action_label": u"Summary label",
"action_runtime_event": "fix everything"
},
"empty": False
}
self.assertEqual(expected, validation.to_json())
| agpl-3.0 |
gitdealdo/serva | apps/recetario/views/unidad.py | 1 | 3130 | from django.core.urlresolvers import reverse_lazy, reverse
from django.utils.translation import ugettext as _ # , ungettext
from django.utils.text import capfirst # , get_text_list
from django.contrib import messages
from django.views import generic
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse
from django.conf import settings
from django.utils.encoding import force_text
from django.contrib.auth.mixins import LoginRequiredMixin
from backend_apps.utils.forms import empty
from backend_apps.utils.security import log_params, get_dep_objects # , SecurityKey, UserToken
from ..models.unidad import Unidad
from ..forms.unidad import UnidadForm
class UnidadListView(LoginRequiredMixin, generic.ListView):
"""UnidadListView"""
model = Unidad
template_name = 'unidad/list.html'
paginate_by = settings.PER_PAGE
def get_paginate_by(self, queryset):
if 'all' in self.request.GET:
return None
return generic.ListView.get_paginate_by(self, queryset)
def get_queryset(self):
self.o = empty(self.request, 'o', '-id')
self.f = empty(self.request, 'f', 'nombre')
self.q = empty(self.request, 'q', '')
column_contains = u'%s__%s' % (self.f, 'contains')
return self.model.objects.filter(**{column_contains: self.q}).order_by(self.o)
def get_context_data(self, **kwargs):
context = super(UnidadListView, self).get_context_data(**kwargs)
context['opts'] = self.model._meta
context['form'] = UnidadForm
context['title'] = _('Select %s to change') % capfirst(self.model._meta.verbose_name)
context['o'] = self.o
context['f'] = self.f
context['q'] = self.q.replace('/', '-')
return context
def post(self, request):
print(request.POST)
try:
delete = request.POST['delete']
except Exception:
delete = None
if delete:
"""Eliminar"""
Unidad.objects.get(id=request.POST['delete']).delete()
messages.success(request, 'Unidad eliminada con éxito')
elif request.POST['id_unidad']:
"""Actualizar"""
u = Unidad.objects.get(id=request.POST['id_unidad'])
u.nombre = request.POST['nombre']
u.simbolo = request.POST['simbolo']
u.save()
messages.success(request, 'Unidad %s actualizada con éxito' % u)
else:
"""Crear"""
u = Unidad(
nombre=request.POST['nombre'],
simbolo=request.POST['simbolo']
)
u.save()
print("Creando")
messages.success(request, 'Unidad %s registrada con éxito' % u)
return HttpResponseRedirect(reverse('recetario:unidad_list'))
def crear_unidad(request):
"""crear unidad por ajax"""
if request.method == 'POST':
uni = Unidad.objects.create(
nombre=request.POST.get('nombre'),
simbolo=request.POST.get('simbolo'))
respuesta = {'id': uni.id, 'nombre': uni.nombre}
return JsonResponse(respuesta)
| gpl-2.0 |
chirilo/kitsune | kitsune/karma/tests/test_helpers.py | 17 | 1320 | from nose.tools import eq_
from kitsune.karma.helpers import karma_titles
from kitsune.karma.models import Title
from kitsune.users.tests import TestCase, user, group
class KarmaTitleHelperTests(TestCase):
def setUp(self):
super(KarmaTitleHelperTests, self).setUp()
self.user = user(save=True)
self.group = group(name='group', save=True)
self.user.groups.add(self.group)
def test_user_title(self):
title = 'User Title'
t = Title(name=title)
t.save()
t.users.add(self.user)
titles = karma_titles(self.user)
eq_(1, len(titles))
eq_(title, titles[0].name)
def test_group_title(self):
title = 'Group Title'
t = Title(name=title)
t.save()
t.groups.add(self.group)
titles = karma_titles(self.user)
eq_(1, len(titles))
eq_(title, titles[0].name)
def test_user_and_group_title(self):
u_title = 'User Title'
g_title = 'Group Title'
t = Title(name=u_title)
t.save()
t.users.add(self.user)
t = Title(name=g_title)
t.save()
t.groups.add(self.group)
titles = [k.name for k in karma_titles(self.user)]
eq_(2, len(titles))
assert u_title in titles
assert g_title in titles
| bsd-3-clause |
Unow/edx-platform | lms/djangoapps/psychometrics/psychoanalyze.py | 23 | 11311 | #
# File: psychometrics/psychoanalyze.py
#
# generate pyschometrics plots from PsychometricData
from __future__ import division
import datetime
import logging
import json
import math
import numpy as np
from scipy.optimize import curve_fit
from django.conf import settings
from django.db.models import Sum, Max
from psychometrics.models import PsychometricData
from courseware.models import StudentModule
from pytz import UTC
log = logging.getLogger("edx.psychometrics")
#db = "ocwtutor" # for debugging
#db = "default"
db = getattr(settings, 'DATABASE_FOR_PSYCHOMETRICS', 'default')
#-----------------------------------------------------------------------------
# fit functions
def func_2pl(x, a, b):
"""
2-parameter logistic function
"""
D = 1.7
edax = np.exp(D * a * (x - b))
return edax / (1 + edax)
#-----------------------------------------------------------------------------
# statistics class
class StatVar(object):
"""
Simple statistics on floating point numbers: avg, sdv, var, min, max
"""
def __init__(self, unit=1):
self.sum = 0
self.sum2 = 0
self.cnt = 0
self.unit = unit
self.min = None
self.max = None
def add(self, x):
if x is None:
return
if self.min is None:
self.min = x
else:
if x < self.min:
self.min = x
if self.max is None:
self.max = x
else:
if x > self.max:
self.max = x
self.sum += x
self.sum2 += x ** 2
self.cnt += 1
def avg(self):
if self.cnt is None:
return 0
return self.sum / 1.0 / self.cnt / self.unit
def var(self):
if self.cnt is None:
return 0
return (self.sum2 / 1.0 / self.cnt / (self.unit ** 2)) - (self.avg() ** 2)
def sdv(self):
v = self.var()
if v > 0:
return math.sqrt(v)
else:
return 0
def __str__(self):
return 'cnt=%d, avg=%f, sdv=%f' % (self.cnt, self.avg(), self.sdv())
def __add__(self, x):
self.add(x)
return self
#-----------------------------------------------------------------------------
# histogram generator
def make_histogram(ydata, bins=None):
'''
Generate histogram of ydata using bins provided, or by default bins
from 0 to 100 by 10. bins should be ordered in increasing order.
returns dict with keys being bins, and values being counts.
special: hist['bins'] = bins
'''
if bins is None:
bins = range(0, 100, 10)
nbins = len(bins)
hist = dict(zip(bins, [0] * nbins))
for y in ydata:
for b in bins[::-1]: # in reverse order
if y > b:
hist[b] += 1
break
# hist['bins'] = bins
return hist
#-----------------------------------------------------------------------------
def problems_with_psychometric_data(course_id):
'''
Return dict of {problems (location urls): count} for which psychometric data is available.
Does this for a given course_id.
'''
pmdset = PsychometricData.objects.using(db).filter(studentmodule__course_id=course_id)
plist = [p['studentmodule__module_state_key'] for p in pmdset.values('studentmodule__module_state_key').distinct()]
problems = dict((p, pmdset.filter(studentmodule__module_state_key=p).count()) for p in plist)
return problems
#-----------------------------------------------------------------------------
def generate_plots_for_problem(problem):
pmdset = PsychometricData.objects.using(db).filter(studentmodule__module_state_key=problem)
nstudents = pmdset.count()
msg = ""
plots = []
if nstudents < 2:
msg += "%s nstudents=%d --> skipping, too few" % (problem, nstudents)
return msg, plots
max_grade = pmdset[0].studentmodule.max_grade
agdat = pmdset.aggregate(Sum('attempts'), Max('attempts'))
max_attempts = agdat['attempts__max']
total_attempts = agdat['attempts__sum'] # not used yet
msg += "max attempts = %d" % max_attempts
xdat = range(1, max_attempts + 1)
dataset = {'xdat': xdat}
# compute grade statistics
grades = [pmd.studentmodule.grade for pmd in pmdset]
gsv = StatVar()
for g in grades:
gsv += g
msg += "<br><p><font color='blue'>Grade distribution: %s</font></p>" % gsv
# generate grade histogram
ghist = []
axisopts = """{
xaxes: [{
axisLabel: 'Grade'
}],
yaxes: [{
position: 'left',
axisLabel: 'Count'
}]
}"""
if gsv.max > max_grade:
msg += "<br/><p><font color='red'>Something is wrong: max_grade=%s, but max(grades)=%s</font></p>" % (max_grade, gsv.max)
max_grade = gsv.max
if max_grade > 1:
ghist = make_histogram(grades, np.linspace(0, max_grade, max_grade + 1))
ghist_json = json.dumps(ghist.items())
plot = {'title': "Grade histogram for %s" % problem,
'id': 'histogram',
'info': '',
'data': "var dhist = %s;\n" % ghist_json,
'cmd': '[ {data: dhist, bars: { show: true, align: "center" }} ], %s' % axisopts,
}
plots.append(plot)
else:
msg += "<br/>Not generating histogram: max_grade=%s" % max_grade
# histogram of time differences between checks
# Warning: this is inefficient - doesn't scale to large numbers of students
dtset = [] # time differences in minutes
dtsv = StatVar()
for pmd in pmdset:
try:
checktimes = eval(pmd.checktimes) # update log of attempt timestamps
except:
continue
if len(checktimes) < 2:
continue
ct0 = checktimes[0]
for ct in checktimes[1:]:
dt = (ct - ct0).total_seconds() / 60.0
if dt < 20: # ignore if dt too long
dtset.append(dt)
dtsv += dt
ct0 = ct
if dtsv.cnt > 2:
msg += "<br/><p><font color='brown'>Time differences between checks: %s</font></p>" % dtsv
bins = np.linspace(0, 1.5 * dtsv.sdv(), 30)
dbar = bins[1] - bins[0]
thist = make_histogram(dtset, bins)
thist_json = json.dumps(sorted(thist.items(), key=lambda(x): x[0]))
axisopts = """{ xaxes: [{ axisLabel: 'Time (min)'}], yaxes: [{position: 'left',axisLabel: 'Count'}]}"""
plot = {'title': "Histogram of time differences between checks",
'id': 'thistogram',
'info': '',
'data': "var thist = %s;\n" % thist_json,
'cmd': '[ {data: thist, bars: { show: true, align: "center", barWidth:%f }} ], %s' % (dbar, axisopts),
}
plots.append(plot)
# one IRT plot curve for each grade received (TODO: this assumes integer grades)
for grade in range(1, int(max_grade) + 1):
yset = {}
gset = pmdset.filter(studentmodule__grade=grade)
ngset = gset.count()
if ngset == 0:
continue
ydat = []
ylast = 0
for x in xdat:
y = gset.filter(attempts=x).count() / ngset
ydat.append(y + ylast)
ylast = y + ylast
yset['ydat'] = ydat
if len(ydat) > 3: # try to fit to logistic function if enough data points
try:
cfp = curve_fit(func_2pl, xdat, ydat, [1.0, max_attempts / 2.0])
yset['fitparam'] = cfp
yset['fitpts'] = func_2pl(np.array(xdat), *cfp[0])
yset['fiterr'] = [yd - yf for (yd, yf) in zip(ydat, yset['fitpts'])]
fitx = np.linspace(xdat[0], xdat[-1], 100)
yset['fitx'] = fitx
yset['fity'] = func_2pl(np.array(fitx), *cfp[0])
except Exception as err:
log.debug('Error in psychoanalyze curve fitting: %s' % err)
dataset['grade_%d' % grade] = yset
axisopts = """{
xaxes: [{
axisLabel: 'Number of Attempts'
}],
yaxes: [{
max:1.0,
position: 'left',
axisLabel: 'Probability of correctness'
}]
}"""
# generate points for flot plot
for grade in range(1, int(max_grade) + 1):
jsdata = ""
jsplots = []
gkey = 'grade_%d' % grade
if gkey in dataset:
yset = dataset[gkey]
jsdata += "var d%d = %s;\n" % (grade, json.dumps(zip(xdat, yset['ydat'])))
jsplots.append('{ data: d%d, lines: { show: false }, points: { show: true}, color: "red" }' % grade)
if 'fitpts' in yset:
jsdata += 'var fit = %s;\n' % (json.dumps(zip(yset['fitx'], yset['fity'])))
jsplots.append('{ data: fit, lines: { show: true }, color: "blue" }')
(a, b) = yset['fitparam'][0]
irtinfo = "(2PL: D=1.7, a=%6.3f, b=%6.3f)" % (a, b)
else:
irtinfo = ""
plots.append({'title': 'IRT Plot for grade=%s %s' % (grade, irtinfo),
'id': "irt%s" % grade,
'info': '',
'data': jsdata,
'cmd': '[%s], %s' % (','.join(jsplots), axisopts),
})
#log.debug('plots = %s' % plots)
return msg, plots
#-----------------------------------------------------------------------------
def make_psychometrics_data_update_handler(course_id, user, module_state_key):
"""
Construct and return a procedure which may be called to update
the PsychometricData instance for the given StudentModule instance.
"""
sm, status = StudentModule.objects.get_or_create(
course_id=course_id,
student=user,
module_state_key=module_state_key,
defaults={'state': '{}', 'module_type': 'problem'},
)
try:
pmd = PsychometricData.objects.using(db).get(studentmodule=sm)
except PsychometricData.DoesNotExist:
pmd = PsychometricData(studentmodule=sm)
def psychometrics_data_update_handler(state):
"""
This function may be called each time a problem is successfully checked
(eg on save_problem_check events in capa_module).
state = instance state (a nice, uniform way to interface - for more future psychometric feature extraction)
"""
try:
state = json.loads(sm.state)
done = state['done']
except:
log.exception("Oops, failed to eval state for %s (state=%s)" % (sm, sm.state))
return
pmd.done = done
try:
pmd.attempts = state.get('attempts', 0)
except:
log.exception("no attempts for %s (state=%s)" % (sm, sm.state))
try:
checktimes = eval(pmd.checktimes) # update log of attempt timestamps
except:
checktimes = []
checktimes.append(datetime.datetime.now(UTC))
pmd.checktimes = checktimes
try:
pmd.save()
except:
log.exception("Error in updating psychometrics data for %s" % sm)
return psychometrics_data_update_handler
| agpl-3.0 |
saurabh6790/med_app_rels | accounts/report/budget_variance_report/budget_variance_report.py | 28 | 4654 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes import _, msgprint
from webnotes.utils import flt
import time
from accounts.utils import get_fiscal_year
from controllers.trends import get_period_date_ranges, get_period_month_ranges
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
period_month_ranges = get_period_month_ranges(filters["period"], filters["fiscal_year"])
cam_map = get_costcenter_account_month_map(filters)
data = []
for cost_center, cost_center_items in cam_map.items():
for account, monthwise_data in cost_center_items.items():
row = [cost_center, account]
totals = [0, 0, 0]
for relevant_months in period_month_ranges:
period_data = [0, 0, 0]
for month in relevant_months:
month_data = monthwise_data.get(month, {})
for i, fieldname in enumerate(["target", "actual", "variance"]):
value = flt(month_data.get(fieldname))
period_data[i] += value
totals[i] += value
period_data[2] = period_data[0] - period_data[1]
row += period_data
totals[2] = totals[0] - totals[1]
row += totals
data.append(row)
return columns, sorted(data, key=lambda x: (x[0], x[1]))
def get_columns(filters):
for fieldname in ["fiscal_year", "period", "company"]:
if not filters.get(fieldname):
label = (" ".join(fieldname.split("_"))).title()
msgprint(_("Please specify") + ": " + label,
raise_exception=True)
columns = ["Cost Center:Link/Cost Center:120", "Account:Link/Account:120"]
group_months = False if filters["period"] == "Monthly" else True
for from_date, to_date in get_period_date_ranges(filters["period"], filters["fiscal_year"]):
for label in ["Target (%s)", "Actual (%s)", "Variance (%s)"]:
if group_months:
label = label % (from_date.strftime("%b") + " - " + to_date.strftime("%b"))
else:
label = label % from_date.strftime("%b")
columns.append(label+":Float:120")
return columns + ["Total Target:Float:120", "Total Actual:Float:120",
"Total Variance:Float:120"]
#Get cost center & target details
def get_costcenter_target_details(filters):
return webnotes.conn.sql("""select cc.name, cc.distribution_id,
cc.parent_cost_center, bd.account, bd.budget_allocated
from `tabCost Center` cc, `tabBudget Detail` bd
where bd.parent=cc.name and bd.fiscal_year=%s and
cc.company=%s order by cc.name""" % ('%s', '%s'),
(filters.get("fiscal_year"), filters.get("company")), as_dict=1)
#Get target distribution details of accounts of cost center
def get_target_distribution_details(filters):
target_details = {}
for d in webnotes.conn.sql("""select bd.name, bdd.month, bdd.percentage_allocation
from `tabBudget Distribution Detail` bdd, `tabBudget Distribution` bd
where bdd.parent=bd.name and bd.fiscal_year=%s""", (filters["fiscal_year"]), as_dict=1):
target_details.setdefault(d.name, {}).setdefault(d.month, flt(d.percentage_allocation))
return target_details
#Get actual details from gl entry
def get_actual_details(filters):
ac_details = webnotes.conn.sql("""select gl.account, gl.debit, gl.credit,
gl.cost_center, MONTHNAME(gl.posting_date) as month_name
from `tabGL Entry` gl, `tabBudget Detail` bd
where gl.fiscal_year=%s and company=%s
and bd.account=gl.account and bd.parent=gl.cost_center""" % ('%s', '%s'),
(filters.get("fiscal_year"), filters.get("company")), as_dict=1)
cc_actual_details = {}
for d in ac_details:
cc_actual_details.setdefault(d.cost_center, {}).setdefault(d.account, []).append(d)
return cc_actual_details
def get_costcenter_account_month_map(filters):
import datetime
costcenter_target_details = get_costcenter_target_details(filters)
tdd = get_target_distribution_details(filters)
actual_details = get_actual_details(filters)
cam_map = {}
for ccd in costcenter_target_details:
for month_id in range(1, 13):
month = datetime.date(2013, month_id, 1).strftime('%B')
cam_map.setdefault(ccd.name, {}).setdefault(ccd.account, {})\
.setdefault(month, webnotes._dict({
"target": 0.0, "actual": 0.0
}))
tav_dict = cam_map[ccd.name][ccd.account][month]
month_percentage = tdd.get(ccd.distribution_id, {}).get(month, 0) \
if ccd.distribution_id else 100.0/12
tav_dict.target = flt(ccd.budget_allocated) * month_percentage / 100
for ad in actual_details.get(ccd.name, {}).get(ccd.account, []):
if ad.month_name == month:
tav_dict.actual += flt(ad.debit) - flt(ad.credit)
return cam_map
| agpl-3.0 |
dmirubtsov/k8s-executor | vendor/k8s.io/kubernetes/hack/boilerplate/boilerplate.py | 31 | 5238 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import glob
import json
import mmap
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument("--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
parser.add_argument("--boilerplate-dir", default=default_boilerplate_dir)
args = parser.parse_args()
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except:
return False
data = f.read()
f.close()
basename = os.path.basename(filename)
extension = file_extension(filename)
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
# remove build tags from the top of Go files
if extension == "go":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
# remove shebang from the top of shell files
if extension == "sh":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
return False
# Replace all occurrences of the regex "2016|2015|2014" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'cluster/env.sh', "vendor", "test/e2e/generated/bindata.go"]
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# dates can be 2014, 2015 or 2016, company holder names can be anything
regexs["date"] = re.compile( '(2014|2015|2016)' )
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
mrkipling/maraschino | lib/sqlalchemy/dialects/postgresql/pypostgresql.py | 14 | 2156 | # postgresql/pypostgresql.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the PostgreSQL database via py-postgresql.
Connecting
----------
URLs are of the form ``postgresql+pypostgresql://user:password@host:port/dbname[?key=value&key=value...]``.
"""
from sqlalchemy import util
from sqlalchemy import types as sqltypes
from sqlalchemy.dialects.postgresql.base import PGDialect, PGExecutionContext
from sqlalchemy import processors
class PGNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
return processors.to_str
def result_processor(self, dialect, coltype):
if self.asdecimal:
return None
else:
return processors.to_float
class PGExecutionContext_pypostgresql(PGExecutionContext):
pass
class PGDialect_pypostgresql(PGDialect):
driver = 'pypostgresql'
supports_unicode_statements = True
supports_unicode_binds = True
description_encoding = None
default_paramstyle = 'pyformat'
# requires trunk version to support sane rowcounts
# TODO: use dbapi version information to set this flag appropariately
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
execution_ctx_cls = PGExecutionContext_pypostgresql
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric : PGNumeric,
sqltypes.Float: sqltypes.Float, # prevents PGNumeric from being used
}
)
@classmethod
def dbapi(cls):
from postgresql.driver import dbapi20
return dbapi20
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if 'port' in opts:
opts['port'] = int(opts['port'])
else:
opts['port'] = 5432
opts.update(url.query)
return ([], opts)
def is_disconnect(self, e, connection, cursor):
return "connection is closed" in str(e)
dialect = PGDialect_pypostgresql
| mit |
wangjun/odoo | addons/account/project/wizard/account_analytic_cost_ledger_for_journal_report.py | 378 | 2209 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_analytic_cost_ledger_journal_report(osv.osv_memory):
_name = 'account.analytic.cost.ledger.journal.report'
_description = 'Account Analytic Cost Ledger For Journal Report'
_columns = {
'date1': fields.date('Start of period', required=True),
'date2': fields.date('End of period', required=True),
'journal': fields.many2many('account.analytic.journal', 'ledger_journal_rel', 'ledger_id', 'journal_id', 'Journals'),
}
_defaults = {
'date1': lambda *a: time.strftime('%Y-01-01'),
'date2': lambda *a: time.strftime('%Y-%m-%d')
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
datas = {
'ids': context.get('active_ids', []),
'model': 'account.analytic.account',
'form': data
}
datas['form']['active_ids'] = context.get('active_ids', False)
return self.pool['report'].get_action(cr, uid, [], 'account.report_analyticcostledgerquantity', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
EvanzzzZ/mxnet | example/neural-style/end_to_end/basic.py | 15 | 6447 | import sys
sys.path.insert(0, "../../mxnet/python/")
import mxnet as mx
import numpy as np
import model_vgg19 as vgg
class PretrainedInit(mx.init.Initializer):
def __init__(self, prefix, params, verbose=False):
self.prefix_len = len(prefix) + 1
self.verbose = verbose
self.arg_params = {k : v for k, v in params.items() if k.startswith("arg:")}
self.aux_params = {k : v for k, v in params.items() if k.startswith("aux:")}
self.arg_names = set([k[4:] for k in self.arg_params.keys()])
self.aux_names = set([k[4:] for k in self.aux_params.keys()])
def __call__(self, name, arr):
key = name[self.prefix_len:]
if key in self.arg_names:
if self.verbose:
print("Init %s" % name)
self.arg_params["arg:" + key].copyto(arr)
elif key in self.aux_params:
if self.verbose:
print("Init %s" % name)
self.aux_params["aux:" + key].copyto(arr)
else:
print("Unknown params: %s, init with 0" % name)
arr[:] = 0.
def style_gram_symbol(input_shape, style):
_, output_shapes, _ = style.infer_shape(**input_shape)
gram_list = []
grad_scale = []
for i in range(len(style.list_outputs())):
shape = output_shapes[i]
x = mx.sym.Reshape(style[i], shape=(int(shape[1]), int(np.prod(shape[2:]))))
# use fully connected to quickly do dot(x, x^T)
gram = mx.sym.FullyConnected(x, x, no_bias=True, num_hidden=shape[1])
gram_list.append(gram)
grad_scale.append(np.prod(shape[1:]) * shape[1])
return mx.sym.Group(gram_list), grad_scale
def get_loss(gram, content):
gram_loss = []
for i in range(len(gram.list_outputs())):
gvar = mx.sym.Variable("target_gram_%d" % i)
gram_loss.append(mx.sym.sum(mx.sym.square(gvar - gram[i])))
cvar = mx.sym.Variable("target_content")
content_loss = mx.sym.sum(mx.sym.square(cvar - content))
return mx.sym.Group(gram_loss), content_loss
def get_content_module(prefix, dshape, ctx, params):
sym = vgg.get_vgg_symbol(prefix, True)
init = PretrainedInit(prefix, params)
mod = mx.mod.Module(symbol=sym,
data_names=("%s_data" % prefix,),
label_names=None,
context=ctx)
mod.bind(data_shapes=[("%s_data" % prefix, dshape)], for_training=False)
mod.init_params(init)
return mod
def get_style_module(prefix, dshape, ctx, params):
input_shape = {"%s_data" % prefix : dshape}
style, content = vgg.get_vgg_symbol(prefix)
gram, gscale = style_gram_symbol(input_shape, style)
init = PretrainedInit(prefix, params)
mod = mx.mod.Module(symbol=gram,
data_names=("%s_data" % prefix,),
label_names=None,
context=ctx)
mod.bind(data_shapes=[("%s_data" % prefix, dshape)], for_training=False)
mod.init_params(init)
return mod
def get_loss_module(prefix, dshape, ctx, params):
input_shape = {"%s_data" % prefix : dshape}
style, content = vgg.get_vgg_symbol(prefix)
gram, gscale = style_gram_symbol(input_shape, style)
style_loss, content_loss = get_loss(gram, content)
sym = mx.sym.Group([style_loss, content_loss])
init = PretrainedInit(prefix, params)
gram_size = len(gram.list_outputs())
mod = mx.mod.Module(symbol=sym,
data_names=("%s_data" % prefix,),
label_names=None,
context=ctx)
mod.bind(data_shapes=[("%s_data" % prefix, dshape)],
for_training=True, inputs_need_grad=True)
mod.init_params(init)
return mod, gscale
if __name__ == "__main__":
from data_processing import PreprocessContentImage, PreprocessStyleImage
from data_processing import PostprocessImage, SaveImage
vgg_params = mx.nd.load("./model/vgg19.params")
style_weight = 2
content_weight = 10
long_edge = 384
content_np = PreprocessContentImage("./input/IMG_4343.jpg", long_edge)
style_np = PreprocessStyleImage("./input/starry_night.jpg", shape=content_np.shape)
dshape = content_np.shape
ctx = mx.gpu()
# style
style_mod = get_style_module("style", dshape, ctx, vgg_params)
style_mod.forward(mx.io.DataBatch([mx.nd.array(style_np)], [0]), is_train=False)
style_array = [arr.copyto(mx.cpu()) for arr in style_mod.get_outputs()]
del style_mod
# content
content_mod = get_content_module("content", dshape, ctx, vgg_params)
content_mod.forward(mx.io.DataBatch([mx.nd.array(content_np)], [0]), is_train=False)
content_array = content_mod.get_outputs()[0].copyto(mx.cpu())
del content_mod
# loss
mod, gscale = get_loss_module("loss", dshape, ctx, vgg_params)
extra_args = {"target_gram_%d" % i : style_array[i] for i in range(len(style_array))}
extra_args["target_content"] = content_array
mod.set_params(extra_args, {}, True, True)
grad_array = []
for i in range(len(style_array)):
grad_array.append(mx.nd.ones((1,), ctx) * (float(style_weight) / gscale[i]))
grad_array.append(mx.nd.ones((1,), ctx) * (float(content_weight)))
# train
img = mx.nd.zeros(content_np.shape, ctx=ctx)
img[:] = mx.rnd.uniform(-0.1, 0.1, img.shape)
lr = mx.lr_scheduler.FactorScheduler(step=80, factor=.9)
optimizer = mx.optimizer.SGD(
learning_rate = 0.001,
wd = 0.0005,
momentum=0.9,
lr_scheduler = lr)
optim_state = optimizer.create_state(0, img)
old_img = img.copyto(ctx)
clip_norm = 1 * np.prod(img.shape)
import logging
for e in range(800):
mod.forward(mx.io.DataBatch([img], [0]), is_train=True)
mod.backward(grad_array)
data_grad = mod.get_input_grads()[0]
gnorm = mx.nd.norm(data_grad).asscalar()
if gnorm > clip_norm:
print("Data Grad: ", gnorm / clip_norm)
data_grad[:] *= clip_norm / gnorm
optimizer.update(0, img, data_grad, optim_state)
new_img = img
eps = (mx.nd.norm(old_img - new_img) / mx.nd.norm(new_img)).asscalar()
old_img = new_img.copyto(ctx)
logging.info('epoch %d, relative change %f', e, eps)
if (e+1) % 50 == 0:
SaveImage(new_img.asnumpy(), 'output/tmp_'+str(e+1)+'.jpg')
SaveImage(new_img.asnumpy(), "./output/out.jpg")
| apache-2.0 |
mcus/SickRage | lib/unidecode/x079.py | 252 | 4602 | data = (
'Tani ', # 0x00
'Jiao ', # 0x01
'[?] ', # 0x02
'Zhang ', # 0x03
'Qiao ', # 0x04
'Dun ', # 0x05
'Xian ', # 0x06
'Yu ', # 0x07
'Zhui ', # 0x08
'He ', # 0x09
'Huo ', # 0x0a
'Zhai ', # 0x0b
'Lei ', # 0x0c
'Ke ', # 0x0d
'Chu ', # 0x0e
'Ji ', # 0x0f
'Que ', # 0x10
'Dang ', # 0x11
'Yi ', # 0x12
'Jiang ', # 0x13
'Pi ', # 0x14
'Pi ', # 0x15
'Yu ', # 0x16
'Pin ', # 0x17
'Qi ', # 0x18
'Ai ', # 0x19
'Kai ', # 0x1a
'Jian ', # 0x1b
'Yu ', # 0x1c
'Ruan ', # 0x1d
'Meng ', # 0x1e
'Pao ', # 0x1f
'Ci ', # 0x20
'[?] ', # 0x21
'[?] ', # 0x22
'Mie ', # 0x23
'Ca ', # 0x24
'Xian ', # 0x25
'Kuang ', # 0x26
'Lei ', # 0x27
'Lei ', # 0x28
'Zhi ', # 0x29
'Li ', # 0x2a
'Li ', # 0x2b
'Fan ', # 0x2c
'Que ', # 0x2d
'Pao ', # 0x2e
'Ying ', # 0x2f
'Li ', # 0x30
'Long ', # 0x31
'Long ', # 0x32
'Mo ', # 0x33
'Bo ', # 0x34
'Shuang ', # 0x35
'Guan ', # 0x36
'Lan ', # 0x37
'Zan ', # 0x38
'Yan ', # 0x39
'Shi ', # 0x3a
'Shi ', # 0x3b
'Li ', # 0x3c
'Reng ', # 0x3d
'She ', # 0x3e
'Yue ', # 0x3f
'Si ', # 0x40
'Qi ', # 0x41
'Ta ', # 0x42
'Ma ', # 0x43
'Xie ', # 0x44
'Xian ', # 0x45
'Xian ', # 0x46
'Zhi ', # 0x47
'Qi ', # 0x48
'Zhi ', # 0x49
'Beng ', # 0x4a
'Dui ', # 0x4b
'Zhong ', # 0x4c
'[?] ', # 0x4d
'Yi ', # 0x4e
'Shi ', # 0x4f
'You ', # 0x50
'Zhi ', # 0x51
'Tiao ', # 0x52
'Fu ', # 0x53
'Fu ', # 0x54
'Mi ', # 0x55
'Zu ', # 0x56
'Zhi ', # 0x57
'Suan ', # 0x58
'Mei ', # 0x59
'Zuo ', # 0x5a
'Qu ', # 0x5b
'Hu ', # 0x5c
'Zhu ', # 0x5d
'Shen ', # 0x5e
'Sui ', # 0x5f
'Ci ', # 0x60
'Chai ', # 0x61
'Mi ', # 0x62
'Lu ', # 0x63
'Yu ', # 0x64
'Xiang ', # 0x65
'Wu ', # 0x66
'Tiao ', # 0x67
'Piao ', # 0x68
'Zhu ', # 0x69
'Gui ', # 0x6a
'Xia ', # 0x6b
'Zhi ', # 0x6c
'Ji ', # 0x6d
'Gao ', # 0x6e
'Zhen ', # 0x6f
'Gao ', # 0x70
'Shui ', # 0x71
'Jin ', # 0x72
'Chen ', # 0x73
'Gai ', # 0x74
'Kun ', # 0x75
'Di ', # 0x76
'Dao ', # 0x77
'Huo ', # 0x78
'Tao ', # 0x79
'Qi ', # 0x7a
'Gu ', # 0x7b
'Guan ', # 0x7c
'Zui ', # 0x7d
'Ling ', # 0x7e
'Lu ', # 0x7f
'Bing ', # 0x80
'Jin ', # 0x81
'Dao ', # 0x82
'Zhi ', # 0x83
'Lu ', # 0x84
'Shan ', # 0x85
'Bei ', # 0x86
'Zhe ', # 0x87
'Hui ', # 0x88
'You ', # 0x89
'Xi ', # 0x8a
'Yin ', # 0x8b
'Zi ', # 0x8c
'Huo ', # 0x8d
'Zhen ', # 0x8e
'Fu ', # 0x8f
'Yuan ', # 0x90
'Wu ', # 0x91
'Xian ', # 0x92
'Yang ', # 0x93
'Ti ', # 0x94
'Yi ', # 0x95
'Mei ', # 0x96
'Si ', # 0x97
'Di ', # 0x98
'[?] ', # 0x99
'Zhuo ', # 0x9a
'Zhen ', # 0x9b
'Yong ', # 0x9c
'Ji ', # 0x9d
'Gao ', # 0x9e
'Tang ', # 0x9f
'Si ', # 0xa0
'Ma ', # 0xa1
'Ta ', # 0xa2
'[?] ', # 0xa3
'Xuan ', # 0xa4
'Qi ', # 0xa5
'Yu ', # 0xa6
'Xi ', # 0xa7
'Ji ', # 0xa8
'Si ', # 0xa9
'Chan ', # 0xaa
'Tan ', # 0xab
'Kuai ', # 0xac
'Sui ', # 0xad
'Li ', # 0xae
'Nong ', # 0xaf
'Ni ', # 0xb0
'Dao ', # 0xb1
'Li ', # 0xb2
'Rang ', # 0xb3
'Yue ', # 0xb4
'Ti ', # 0xb5
'Zan ', # 0xb6
'Lei ', # 0xb7
'Rou ', # 0xb8
'Yu ', # 0xb9
'Yu ', # 0xba
'Chi ', # 0xbb
'Xie ', # 0xbc
'Qin ', # 0xbd
'He ', # 0xbe
'Tu ', # 0xbf
'Xiu ', # 0xc0
'Si ', # 0xc1
'Ren ', # 0xc2
'Tu ', # 0xc3
'Zi ', # 0xc4
'Cha ', # 0xc5
'Gan ', # 0xc6
'Yi ', # 0xc7
'Xian ', # 0xc8
'Bing ', # 0xc9
'Nian ', # 0xca
'Qiu ', # 0xcb
'Qiu ', # 0xcc
'Chong ', # 0xcd
'Fen ', # 0xce
'Hao ', # 0xcf
'Yun ', # 0xd0
'Ke ', # 0xd1
'Miao ', # 0xd2
'Zhi ', # 0xd3
'Geng ', # 0xd4
'Bi ', # 0xd5
'Zhi ', # 0xd6
'Yu ', # 0xd7
'Mi ', # 0xd8
'Ku ', # 0xd9
'Ban ', # 0xda
'Pi ', # 0xdb
'Ni ', # 0xdc
'Li ', # 0xdd
'You ', # 0xde
'Zu ', # 0xdf
'Pi ', # 0xe0
'Ba ', # 0xe1
'Ling ', # 0xe2
'Mo ', # 0xe3
'Cheng ', # 0xe4
'Nian ', # 0xe5
'Qin ', # 0xe6
'Yang ', # 0xe7
'Zuo ', # 0xe8
'Zhi ', # 0xe9
'Zhi ', # 0xea
'Shu ', # 0xeb
'Ju ', # 0xec
'Zi ', # 0xed
'Huo ', # 0xee
'Ji ', # 0xef
'Cheng ', # 0xf0
'Tong ', # 0xf1
'Zhi ', # 0xf2
'Huo ', # 0xf3
'He ', # 0xf4
'Yin ', # 0xf5
'Zi ', # 0xf6
'Zhi ', # 0xf7
'Jie ', # 0xf8
'Ren ', # 0xf9
'Du ', # 0xfa
'Yi ', # 0xfb
'Zhu ', # 0xfc
'Hui ', # 0xfd
'Nong ', # 0xfe
'Fu ', # 0xff
)
| gpl-3.0 |
houzhenggang/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib/python2.7/test/test_compileall.py | 91 | 2725 | import compileall
import imp
import os
import py_compile
import shutil
import struct
import tempfile
from test import test_support
import unittest
class CompileallTests(unittest.TestCase):
def setUp(self):
self.directory = tempfile.mkdtemp()
self.source_path = os.path.join(self.directory, '_test.py')
self.bc_path = self.source_path + ('c' if __debug__ else 'o')
with open(self.source_path, 'w') as file:
file.write('x = 123\n')
self.source_path2 = os.path.join(self.directory, '_test2.py')
self.bc_path2 = self.source_path2 + ('c' if __debug__ else 'o')
shutil.copyfile(self.source_path, self.source_path2)
def tearDown(self):
shutil.rmtree(self.directory)
def data(self):
with open(self.bc_path, 'rb') as file:
data = file.read(8)
mtime = int(os.stat(self.source_path).st_mtime)
compare = struct.pack('<4sl', imp.get_magic(), mtime)
return data, compare
def recreation_check(self, metadata):
"""Check that compileall recreates bytecode when the new metadata is
used."""
if not hasattr(os, 'stat'):
return
py_compile.compile(self.source_path)
self.assertEqual(*self.data())
with open(self.bc_path, 'rb') as file:
bc = file.read()[len(metadata):]
with open(self.bc_path, 'wb') as file:
file.write(metadata)
file.write(bc)
self.assertNotEqual(*self.data())
compileall.compile_dir(self.directory, force=False, quiet=True)
self.assertTrue(*self.data())
def test_mtime(self):
# Test a change in mtime leads to a new .pyc.
self.recreation_check(struct.pack('<4sl', imp.get_magic(), 1))
def test_magic_number(self):
# Test a change in mtime leads to a new .pyc.
self.recreation_check(b'\0\0\0\0')
def test_compile_files(self):
# Test compiling a single file, and complete directory
for fn in (self.bc_path, self.bc_path2):
try:
os.unlink(fn)
except:
pass
compileall.compile_file(self.source_path, force=False, quiet=True)
self.assertTrue(os.path.isfile(self.bc_path) \
and not os.path.isfile(self.bc_path2))
os.unlink(self.bc_path)
compileall.compile_dir(self.directory, force=False, quiet=True)
self.assertTrue(os.path.isfile(self.bc_path) \
and os.path.isfile(self.bc_path2))
os.unlink(self.bc_path)
os.unlink(self.bc_path2)
def test_main():
test_support.run_unittest(CompileallTests)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
openpolis/open_municipio | open_municipio/people/search_indexes.py | 1 | 16892 | from django.utils.translation import activate
from haystack import indexes
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from datetime import datetime
from django.db.models import Q
import logging
from open_municipio.people.models import Institution, InstitutionCharge, Group, GroupCharge, Person, SittingItem, InstitutionResponsability
from open_municipio.acts.models import Act, Agenda,\
CGDeliberation, Deliberation, Interpellation,\
Interrogation, Motion, Amendment, Transition,\
Decision, Decree, Audit, Minute, Speech
class InstitutionChargeIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
first_name = indexes.CharField(model_attr='person__first_name')
last_name = indexes.CharField(model_attr='person__last_name')
person = indexes.CharField(model_attr='person__slug')
institution = indexes.FacetCharField()
group = indexes.MultiValueField(indexed=True, stored=False)
current_group = indexes.CharField(indexed=True, stored=False)
responsability = indexes.FacetCharField()
group_responsability = indexes.FacetCharField()
level = indexes.IntegerField()
start_date = indexes.FacetDateField(model_attr='start_date')
end_date = indexes.FacetDateField(model_attr='end_date')
is_active = indexes.FacetCharField()
n_presented_acts = indexes.IntegerField(indexed=True, stored=True, model_attr='n_presented_acts')
n_received_acts = indexes.IntegerField(indexed=True, stored=True, model_attr='n_received_acts')
n_presented_acts_index = indexes.FloatField()
n_received_acts_index = indexes.FloatField()
n_rebel_votations = indexes.IntegerField(indexed=False, stored=True, model_attr='n_rebel_votations')
n_present_votations = indexes.IntegerField(indexed=False, stored=True, model_attr='n_present_votations')
n_absent_votations = indexes.IntegerField(indexed=False, stored=True, model_attr='n_absent_votations')
n_present_attendances = indexes.IntegerField(indexed=False, stored=True, model_attr='n_present_attendances')
n_absent_attendances = indexes.IntegerField(indexed=False, stored=True, model_attr='n_absent_attendances')
n_presents = indexes.IntegerField()
n_present_votations_percent = indexes.FloatField()
n_present_attendances_percent = indexes.FloatField()
n_presents_percent = indexes.FloatField()
n_presents_bin = indexes.FacetCharField()
n_deliberations = indexes.IntegerField()
n_cgdeliberations = indexes.IntegerField()
n_agendas = indexes.IntegerField()
n_motions = indexes.IntegerField()
n_motions_agendas = indexes.IntegerField()
n_amendments = indexes.IntegerField()
n_interrogations = indexes.IntegerField()
n_interpellations = indexes.IntegerField()
n_audits = indexes.IntegerField()
n_inspection_acts = indexes.IntegerField()
n_deliberations_index = indexes.FloatField()
n_cgdeliberations_index = indexes.FloatField()
n_agendas_index = indexes.FloatField()
n_motions_index = indexes.FloatField()
n_motions_agendas_index = indexes.FloatField()
n_amendments_index = indexes.FloatField()
n_interrogations_index = indexes.FloatField()
n_interpellations_index = indexes.FloatField()
n_audits_index = indexes.FloatField()
n_inspection_index = indexes.FloatField()
n_speeches = indexes.IntegerField()
n_speeches_index = indexes.FloatField()
speeches_minutes = indexes.IntegerField()
speeches_minutes_index = indexes.FloatField()
speeches_minutes_index_bin = indexes.FacetCharField()
logger = logging.getLogger('import')
def get_model(self):
return InstitutionCharge
def prepare_institution(self, obj):
return obj.charge_type if obj.institution.institution_type <= Institution.COUNCIL else ''
def prepare_group(self, obj):
return [p['group__slug'] for p in
GroupCharge.objects.select_related().filter(charge__id=obj.id).values('group__slug').distinct()]
def prepare_current_group(self, obj):
return obj.current_groupcharge.group.slug if obj.current_groupcharge else ''
def prepare_responsability(self, obj):
if obj.responsabilities.count() >= 1:
return obj.responsabilities[0].get_charge_type_display()
def prepare_group_responsability(self, obj):
try:
return obj.current_groupcharge.current_responsability.get_charge_type_display()
except Exception, e:
return ''
def prepare_level(self, obj):
n = 10 * obj.institution.institution_type
if obj.responsabilities.count() >= 1:
n += [i[0] for i in list(InstitutionResponsability.CHARGE_TYPES)].index(obj.responsabilities[0].charge_type)
else:
n += 9
return n
def prepare_is_active(self, obj):
return _("no") if obj.end_date else _("yes")
def prepare_n_presented_acts_index(self, obj):
return (float(obj.n_presented_acts) / obj.duration.days) * 30 if obj.duration.days else None
def prepare_n_received_acts_index(self, obj):
return (float(obj.n_received_acts) / obj.duration.days) * 30 if obj.duration.days else None
def prepare_n_presents(self, obj):
return (obj.n_present_attendances \
if obj.institution.institution_type < Institution.COUNCIL \
else obj.n_present_votations)
def prepare_n_present_votations_percent(self, obj):
n_votations = obj.n_present_votations + obj.n_absent_votations
return (float(obj.n_present_votations) * 100 / n_votations) if n_votations else 0
def prepare_n_present_attendances_percent(self, obj):
n_attendances = obj.n_present_attendances + obj.n_absent_attendances
return (float(obj.n_present_attendances) * 100 / n_attendances) if n_attendances else 0
def prepare_n_presents_percent(self, obj):
n_presents = (obj.n_present_attendances + obj.n_absent_attendances) \
if obj.institution.institution_type < Institution.COUNCIL \
else (obj.n_present_votations + obj.n_absent_votations)
return (float(self.prepare_n_presents(obj)) * 100 / n_presents) if n_presents else 0
def prepare_n_presents_bin(self, obj):
edges = range(0, 101, 10)
value = self.prepare_n_presents_percent(obj)
if not value: return
for i in range(len(edges) - 1):
if edges[i] <= value < edges[i + 1]:
return str(edges[i]) + '%-' + str(edges[i + 1]) + '%'
def prepare_n_deliberations(self, obj):
return obj.presented_act_set.filter(deliberation__isnull=False).count()
def prepare_n_deliberations_index(self, obj):
return (float(self.prepare_n_deliberations(obj)) / obj.duration.days) * 30 if obj.duration.days else None
def prepare_n_cgdeliberations(self, obj):
return obj.presented_act_set.filter(cgdeliberation__isnull=False).count()
def prepare_n_cgdeliberations_index(self, obj):
return (float(self.prepare_n_cgdeliberations(obj)) / obj.duration.days) * 30 if obj.duration.days else None
def prepare_n_agendas(self, obj):
return obj.presented_act_set.filter(agenda__isnull=False).count()
def prepare_n_agendas_index(self, obj):
return (float(self.prepare_n_agendas(obj)) / obj.duration.days) * 30 if obj.duration.days else None
def prepare_n_motions(self, obj):
return obj.presented_act_set.filter(motion__isnull=False).count()
def prepare_n_motions_index(self, obj):
return (float(self.prepare_n_motions(obj)) / obj.duration.days) * 30 if obj.duration.days else None
def prepare_n_motions_agendas(self, obj):
return obj.presented_act_set.filter(Q(motion__isnull=False) | Q(agenda__isnull=False)).count()
def prepare_n_motions_agendas_index(self, obj):
return (float(self.prepare_n_motions_agendas(obj)) / obj.duration.days) * 30 if obj.duration.days else None
def prepare_n_amendments(self, obj):
return obj.presented_act_set.filter(amendment__isnull=False).count()
def prepare_n_amendments_index(self, obj):
return (float(self.prepare_n_amendments(obj)) / obj.duration.days) * 30 if obj.duration.days else None
def prepare_n_interrogations(self, obj):
return obj.presented_act_set.filter(interrogation__isnull=False).count()
def prepare_n_interrogations_index(self, obj):
return (float(self.prepare_n_interrogations(obj)) / obj.duration.days) * 30 if obj.duration.days else None
def prepare_n_interpellations(self, obj):
return obj.presented_act_set.filter(interpellation__isnull=False).count()
def prepare_n_interpellations_index(self, obj):
return (float(self.prepare_n_interpellations(obj)) / obj.duration.days) * 30 if obj.duration.days else None
def prepare_n_audits(self, obj):
return obj.presented_act_set.filter(audit__isnull=False).count()
def prepare_n_audits_index(self, obj):
return (float(self.prepare_n_audits(obj)) / obj.duration.days) * 30 if obj.duration.days else None
def prepare_n_inspection_acts(self, obj):
return obj.presented_act_set.filter(Q(interrogation__isnull=False) | Q(interpellation__isnull=False) | Q(audit__isnull=False)).count()
def prepare_n_inspection_index(self, obj):
return (float(self.prepare_n_inspection_acts(obj)) / obj.duration.days) * 30 if obj.duration.days else None
def prepare_n_speeches(self, obj):
return obj.n_speeches
def prepare_n_speeches_index(self, obj):
return (float(self.prepare_n_speeches(obj)) / obj.duration.days) * 30 if obj.duration.days else None
def prepare_speeches_minutes(self, obj):
return (obj.speeches_size / 750)
def prepare_speeches_minutes_index(self, obj):
return (float(self.prepare_speeches_minutes(obj)) / obj.duration.days) * 30 if obj.duration.days else None
def prepare_speeches_minutes_index_bin(self, obj):
edges = [ 0, 1, 5, 10, 20, 30, 40, 60 ]
value = self.prepare_speeches_minutes_index(obj)
if not value: return
for i in range(len(edges) - 1):
if edges[i] <= value < edges[i + 1]:
return str(edges[i]) + '-' + str(edges[i + 1])
return str(edges[-1]) + ' e oltre'
class GroupIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
name = indexes.CharField(indexed=True, model_attr='name')
acronym = indexes.CharField(indexed=True, model_attr='acronym')
url = indexes.CharField(indexed=True, stored=True)
is_active = indexes.FacetCharField()
n_members = indexes.IntegerField()
aggregate_charge_duration_days = indexes.IntegerField()
n_presented_acts = indexes.IntegerField()
n_presented_acts_index = indexes.FloatField()
n_presented_deliberations = indexes.IntegerField()
n_presented_deliberations_index = indexes.FloatField()
n_presented_agendas = indexes.IntegerField()
n_presented_agendas_index = indexes.FloatField()
n_presented_motions = indexes.IntegerField()
n_presented_motions_index = indexes.FloatField()
n_presented_motions_agendas = indexes.IntegerField()
n_presented_motions_agendas_index = indexes.FloatField()
n_presented_amendments = indexes.IntegerField()
n_presented_amendments_index = indexes.FloatField()
n_presented_interrogations = indexes.IntegerField()
n_presented_interrogations_index = indexes.FloatField()
n_presented_interpellations = indexes.IntegerField()
n_presented_interpellations_index = indexes.FloatField()
n_presented_audits = indexes.IntegerField()
n_presented_audits_index = indexes.FloatField()
n_presented_inspection_acts = indexes.IntegerField()
n_presented_inspection_acts_index = indexes.FloatField()
logger = logging.getLogger('import')
def get_model(self):
return Group
def prepare_url(self, obj):
return obj.get_absolute_url()
def prepare_is_active(self, obj):
return _("yes") if obj.is_current else _("no")
def prepare_n_members(self, obj):
return obj.charge_set.count()
def prepare_aggregate_charge_duration_days(self, obj):
days = 0
now = datetime.now().date()
for gc in obj.groupcharge_set.all():
start_date = gc.start_date
end_date = gc.end_date if gc.end_date else now
if not start_date or start_date > end_date:
self.logger.warning("invalid start date")
continue
days += (end_date - start_date).days
return days
def prepare_n_presented_acts_generic(self, obj, act_types=[]):
now = datetime.now().date()
query_act_types = Q()
query_act_support = Q()
for act_type in act_types:
query_act_types |= Q(**{ act_type.__name__.lower() + '__isnull' : False })
for gc in obj.groupcharge_set.all():
start_date = gc.start_date
end_date = gc.end_date if gc.end_date else now
query_act_support |= (Q(actsupport__charge=gc.charge) &
Q(presentation_date__gte=start_date) &
Q(presentation_date__lte=end_date))
return Act.objects.filter(query_act_types & query_act_support).distinct().count()
def prepare_n_presented_acts(self, obj):
return self.prepare_n_presented_acts_generic(obj)
def prepare_n_presented_acts_index(self, obj):
days = self.prepare_aggregate_charge_duration_days(obj)
return (float(self.prepare_n_presented_acts(obj)) / days) * 30 if days else None
def prepare_n_presented_deliberations(self, obj):
return self.prepare_n_presented_acts_generic(obj, [Deliberation, ])
def prepare_n_presented_deliberations_index(self, obj):
days = self.prepare_aggregate_charge_duration_days(obj)
return (float(self.prepare_n_presented_deliberations(obj)) / days) * 30 if days else None
def prepare_n_presented_agendas(self, obj):
return self.prepare_n_presented_acts_generic(obj, [Agenda, ])
def prepare_n_presented_agendas_index(self, obj):
days = self.prepare_aggregate_charge_duration_days(obj)
return (float(self.prepare_n_presented_agendas(obj)) / days) * 30 if days else None
def prepare_n_presented_motions(self, obj):
return self.prepare_n_presented_acts_generic(obj, [Motion, ])
def prepare_n_presented_motions_index(self, obj):
days = self.prepare_aggregate_charge_duration_days(obj)
return (float(self.prepare_n_presented_motions(obj)) / days) * 30 if days else None
def prepare_n_presented_motions_agendas(self, obj):
return self.prepare_n_presented_acts_generic(obj, [Agenda, Motion])
def prepare_n_presented_motions_agendas_index(self, obj):
days = self.prepare_aggregate_charge_duration_days(obj)
return (float(self.prepare_n_presented_motions_agendas(obj)) / days) * 30 if days else None
def prepare_n_presented_amendments(self, obj):
return self.prepare_n_presented_acts_generic(obj, [Amendment, ])
def prepare_n_presented_amendments_index(self, obj):
days = self.prepare_aggregate_charge_duration_days(obj)
return (float(self.prepare_n_presented_amendments(obj)) / days) * 30 if days else None
def prepare_n_presented_interrogations(self, obj):
return self.prepare_n_presented_acts_generic(obj, [Interrogation, ])
def prepare_n_presented_interrogations_index(self, obj):
days = self.prepare_aggregate_charge_duration_days(obj)
return (float(self.prepare_n_presented_interrogations(obj)) / days) * 30 if days else None
def prepare_n_presented_interpellations(self, obj):
return self.prepare_n_presented_acts_generic(obj, [Interpellation, ])
def prepare_n_presented_interpellations_index(self, obj):
days = self.prepare_aggregate_charge_duration_days(obj)
return (float(self.prepare_n_presented_interpellations(obj)) / days) * 30 if days else None
def prepare_n_presented_audits(self, obj):
return self.prepare_n_presented_acts_generic(obj, [Audit, ])
def prepare_n_presented_audits_index(self, obj):
days = self.prepare_aggregate_charge_duration_days(obj)
return (float(self.prepare_n_presented_audits(obj)) / days) * 30 if days else None
def prepare_n_presented_inspection_acts(self, obj):
return self.prepare_n_presented_acts_generic(obj, [Interrogation, Interpellation, Audit])
def prepare_n_presented_inspection_acts_index(self, obj):
days = self.prepare_aggregate_charge_duration_days(obj)
return (float(self.prepare_n_presented_inspection_acts(obj)) / days) * 30 if days else None
| agpl-3.0 |
keflavich/scikit-image | skimage/io/setup.py | 37 | 1385 | #!/usr/bin/env python
from skimage._build import cython
import os.path
base_path = os.path.abspath(os.path.dirname(__file__))
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
config = Configuration('io', parent_package, top_path)
config.add_data_dir('tests')
config.add_data_files('_plugins/*.ini')
# This function tries to create C files from the given .pyx files. If
# it fails, we build the checked-in .c files.
cython(['_plugins/_colormixer.pyx', '_plugins/_histograms.pyx'],
working_path=base_path)
config.add_extension('_plugins._colormixer',
sources=['_plugins/_colormixer.c'],
include_dirs=[get_numpy_include_dirs()])
config.add_extension('_plugins._histograms',
sources=['_plugins/_histograms.c'],
include_dirs=[get_numpy_include_dirs()])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(maintainer='scikit-image Developers',
maintainer_email='scikit-image@googlegroups.com',
description='Image I/O Routines',
url='https://github.com/scikit-image/scikit-image',
license='Modified BSD',
**(configuration(top_path='').todict())
)
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/examples/api/sankey_demo_basics.py | 12 | 3421 | """Demonstrate the Sankey class by producing three basic diagrams.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.sankey import Sankey
# Example 1 -- Mostly defaults
# This demonstrates how to create a simple diagram by implicitly calling the
# Sankey.add() method and by appending finish() to the call to the class.
Sankey(flows=[0.25, 0.15, 0.60, -0.20, -0.15, -0.05, -0.50, -0.10],
labels=['', '', '', 'First', 'Second', 'Third', 'Fourth', 'Fifth'],
orientations=[-1, 1, 0, 1, 1, 1, 0, -1]).finish()
plt.title("The default settings produce a diagram like this.")
# Notice:
# 1. Axes weren't provided when Sankey() was instantiated, so they were
# created automatically.
# 2. The scale argument wasn't necessary since the data was already
# normalized.
# 3. By default, the lengths of the paths are justified.
# Example 2
# This demonstrates:
# 1. Setting one path longer than the others
# 2. Placing a label in the middle of the diagram
# 3. Using the the scale argument to normalize the flows
# 4. Implicitly passing keyword arguments to PathPatch()
# 5. Changing the angle of the arrow heads
# 6. Changing the offset between the tips of the paths and their labels
# 7. Formatting the numbers in the path labels and the associated unit
# 8. Changing the appearance of the patch and the labels after the figure is
# created
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[],
title="Flow Diagram of a Widget")
sankey = Sankey(ax=ax, scale=0.01, offset=0.2, head_angle=180,
format='%.0f', unit='%')
sankey.add(flows=[25, 0, 60, -10, -20, -5, -15, -10, -40],
labels = ['', '', '', 'First', 'Second', 'Third', 'Fourth',
'Fifth', 'Hurray!'],
orientations=[-1, 1, 0, 1, 1, 1, -1, -1, 0],
pathlengths = [0.25, 0.25, 0.25, 0.25, 0.25, 0.6, 0.25, 0.25,
0.25],
patchlabel="Widget\nA",
alpha=0.2, lw=2.0) # Arguments to matplotlib.patches.PathPatch()
diagrams = sankey.finish()
diagrams[0].patch.set_facecolor('#37c959')
diagrams[0].texts[-1].set_color('r')
diagrams[0].text.set_fontweight('bold')
# Notice:
# 1. Since the sum of the flows is nonzero, the width of the trunk isn't
# uniform. If verbose.level is helpful (in matplotlibrc), a message is
# given in the terminal window.
# 2. The second flow doesn't appear because its value is zero. Again, if
# verbose.level is helpful, a message is given in the terminal window.
# Example 3
# This demonstrates:
# 1. Connecting two systems
# 2. Turning off the labels of the quantities
# 3. Adding a legend
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[], title="Two Systems")
flows = [0.25, 0.15, 0.60, -0.10, -0.05, -0.25, -0.15, -0.10, -0.35]
sankey = Sankey(ax=ax, unit=None)
sankey.add(flows=flows, label='one',
orientations=[-1, 1, 0, 1, 1, 1, -1, -1, 0])
sankey.add(flows=[-0.25, 0.15, 0.1], fc='#37c959', label='two',
orientations=[-1, -1, -1], prior=0, connect=(0, 0))
diagrams = sankey.finish()
diagrams[-1].patch.set_hatch('/')
plt.legend(loc='best')
# Notice that only one connection is specified, but the systems form a
# circuit since: (1) the lengths of the paths are justified and (2) the
# orientation and ordering of the flows is mirrored.
plt.show()
| gpl-2.0 |
drodri/common | test/dev/system_id_test.py | 5 | 1127 | import unittest
from biicode.common.dev.system_id import SystemID
from biicode.common.settings.version import Version
class SystemIDTest(unittest.TestCase):
def setUp(self):
self.sut = SystemID("open_gl", "CPP")
def test_set_generic_system(self):
self.sut.set_generic_system()
self.assertEquals(self.sut.language_version, Version())
def test_version_id_setter_exception(self):
self.assertRaises(ValueError, self.sut.version_id, 1)
def test_version_id_setter_with_version_instance(self):
self.sut.version_id = Version()
def test_version_id_setter_with_string(self):
self.sut.version_id = "1"
def test_language_version_id_setter_with_version_instance(self):
self.sut.language_version = Version()
self.assertEquals(self.sut.language_version, Version())
def test_language_version_id_setter_with_string(self):
self.sut.language_version = "1"
self.assertEquals(self.sut.language_version, Version("1"))
def test_equal(self):
self.assertTrue(self.sut == self.sut)
self.assertFalse(self.sut == 1)
| mit |
balloob/github3.py | github3/exceptions.py | 1 | 4215 | # -*- coding: utf-8 -*-
"""All exceptions for the github3 library."""
class GitHubError(Exception):
"""The base exception class."""
def __init__(self, resp):
super(GitHubError, self).__init__(resp)
#: Response code that triggered the error
self.response = resp
self.code = resp.status_code
self.errors = []
try:
error = resp.json()
#: Message associated with the error
self.msg = error.get('message')
#: List of errors provided by GitHub
if error.get('errors'):
self.errors = error.get('errors')
except: # Amazon S3 error
self.msg = resp.content or '[No message]'
def __repr__(self):
return '<{0} [{1}]>'.format(self.__class__.__name__,
self.msg or self.code)
def __str__(self):
return '{0} {1}'.format(self.code, self.msg)
@property
def message(self):
"""The actual message returned by the API."""
return self.msg
class ResponseError(GitHubError):
"""The base exception for errors stemming from GitHub responses."""
pass
class TransportError(GitHubError):
"""Catch-all exception for errors coming from Requests."""
msg_format = 'An error occurred while making a request to GitHub: {0}'
def __init__(self, exception):
Exception.__init__(self, exception)
self.exception = exception
self.msg = self.msg_format.format(str(exception))
def __str__(self):
return '{0}: {1}'.format(type(self.exception), self.msg)
class ConnectionError(TransportError):
"""Exception for errors in connecting to or reading data from GitHub."""
msg_format = 'A connection-level exception occurred: {0}'
class UnprocessableResponseBody(ResponseError):
"""Exception class for response objects that cannot be handled."""
def __init__(self, message, body):
Exception.__init__(self, message)
self.body = body
self.msg = message
def __repr__(self):
return '<{0} [{1}]>'.format('UnprocessableResponseBody', self.body)
def __str__(self):
return self.message
class BadRequest(ResponseError):
"""Exception class for 400 responses."""
pass
class AuthenticationFailed(ResponseError):
"""Exception class for 401 responses.
Possible reasons:
- Need one time password (for two-factor authentication)
- You are not authorized to access the resource
"""
pass
class ForbiddenError(ResponseError):
"""Exception class for 403 responses.
Possible reasons:
- Too many requests (you've exceeded the ratelimit)
- Too many login failures
"""
pass
class NotFoundError(ResponseError):
"""Exception class for 404 responses."""
pass
class MethodNotAllowed(ResponseError):
"""Exception class for 405 responses."""
pass
class NotAcceptable(ResponseError):
"""Exception class for 406 responses."""
pass
class Conflict(ResponseError):
"""Exception class for 409 responses.
Possible reasons:
- Head branch was modified (SHA sums do not match)
"""
pass
class UnprocessableEntity(ResponseError):
"""Exception class for 422 responses."""
pass
class ClientError(ResponseError):
"""Catch-all for 400 responses that aren't specific errors."""
pass
class ServerError(ResponseError):
"""Exception class for 5xx responses."""
pass
class UnavailableForLegalReasons(ResponseError):
"""Exception class for 451 responses."""
pass
error_classes = {
400: BadRequest,
401: AuthenticationFailed,
403: ForbiddenError,
404: NotFoundError,
405: MethodNotAllowed,
406: NotAcceptable,
409: Conflict,
422: UnprocessableEntity,
451: UnavailableForLegalReasons,
}
def error_for(response):
"""Return the appropriate initialized exception class for a response."""
klass = error_classes.get(response.status_code)
if klass is None:
if 400 <= response.status_code < 500:
klass = ClientError
if 500 <= response.status_code < 600:
klass = ServerError
return klass(response)
| bsd-3-clause |
dhp-denero/LibrERP | core_extended/__openerp__.py | 2 | 3133 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (C) 2014 Didotech srl (<http://www.didotech.com>).
#
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Core extended",
"version": "2.0.13.0",
"author": "Didotech SRL",
"website": "http://www.didotech.com",
"category": "Base",
"description": """
Module extendes OpenERP core functionality:
ir_sequence - add functions:
current_number(cr, uid, sequence_id)
go_back(cr, uid, sequence_id, steps_back=1)
create() will set code to the value find in context dictionary (if value is present in context)
ir_attachment - add function
get_as_zip(cr, uid, ids, log=False, encode=True, compress=True)
ordereddict - Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
wkf_service - add function
trg_last_action(uid, model, obj_id, cr) - this function is useful when debugging a workflow related problems
orm - Monkey Patched function name_get(). Now it will throw an error if 'name' or name_get() are not defined, but will not break a code execution.
Code will break only if 'debug_mode' defined in config file.
bizdatetime - simple library for performing business day arithmetic:
policy = Policy(weekends=(SAT, SUN), holidays=(date(2011,7,1),))
policy.biz_day_delta(date(2011, 6, 30), date(2011, 7, 4)) # one holiday, one weekend between
odf_to_array - class that permits reading of Open Document spreadsheet
file_manipulation - contains function that recognise Excel, Open Document and CSV documents
and return them as list of rows. Additional python modules are required:
'xls', 'xlsb' - requires module xlrd
'xlsx', 'xlsm', 'xltx', 'xltm' - requires openpyxl
'ods' - requires odf
'csv' - uses module csv included in standard Python distribution
""",
"depends": [
'base',
],
"init_xml": [],
"update_xml": [
],
"active": False,
"installable": True,
'external_dependencies': {
'python': [
'zlib',
]
}
}
| agpl-3.0 |
ldong/vim_youcompleteme | third_party/jedi/test/completion/ordering.py | 13 | 1905 | # -----------------
# normal
# -----------------
a = ""
a = 1
#? int()
a
#? []
a.append
a = list
b = 1; b = ""
#? str()
b
# temp should not be accessible before definition
#? []
temp
a = 1
temp = b;
b = a
a = temp
#? int()
b
#? int()
b
#? str()
a
a = tuple
if 1:
a = list
#? ['append']
a.append
#? ['index']
a.index
# -----------------
# tuples exchanges
# -----------------
a, b = 1, ""
#? int()
a
#? str()
b
b, a = a, b
#? int()
b
#? str()
a
b, a = a, b
#? int()
a
#? str()
b
# -----------------
# function
# -----------------
def a(a=3):
#? int()
a
#? []
a.func
return a
#? int()
a(2)
#? []
a(2).func
a_param = 3
def func(a_param):
# should not be int
#? []
a_param.
from os import path
# should not return a function, because `a` is a function above
def f(b, a): return a
#? []
f(b=3)
# -----------------
# class
# -----------------
class A(object):
a = ""
a = 3
#? int()
a
a = list()
def __init__(self):
self.b = ""
def before(self):
self.b = 3
# TODO should this be so?
#? int() str() list()
self.b
self.b = list
self.a = 1
#? str() int()
self.a
#? ['after']
self.after
self.c = 3
#? int()
self.c
def after(self):
self.a = ''
c = set()
#? list()
A.a
a = A()
#? ['after']
a.after
#? []
a.upper
#? []
a.append
#? []
a.real
#? str() int()
a.a
a = 3
class a():
def __init__(self, a):
self.a = a
#? float()
a(1.0).a
#?
a().a
# -----------------
# imports
# -----------------
math = 3
import math
#? ['cosh']
math.cosh
#? []
math.real
math = 3
#? int()
math
#? []
math.cos
# do the same for star imports
cosh = 3
from math import *
# cosh doesn't work, but that's not a problem, star imports should be at the
# start of EVERY script!
cosh.real
cosh = 3
#? int()
cosh
| gpl-3.0 |
ruuk/script.web.viewer2 | lib/webviewer/cssutils/css/value.py | 5 | 33736 | """Value related classes.
DOM Level 2 CSS CSSValue, CSSPrimitiveValue and CSSValueList are **no longer**
supported and are replaced by these new classes.
"""
__all__ = ['PropertyValue',
'Value',
'ColorValue',
'DimensionValue',
'URIValue',
'CSSFunction',
'CSSVariable',
'MSValue'
]
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from cssutils.prodparser import *
import cssutils
from cssutils.helper import normalize, pushtoken
import colorsys
import math
import re
import xml.dom
import urlparse
class PropertyValue(cssutils.util._NewBase):
"""
An unstructured list like holder for all values defined for a
:class:`~cssutils.css.Property`. Contains :class:`~cssutils.css.Value`
or subclass objects. Currently there is no access to the combinators of
the defined values which might simply be space or comma or slash.
You may:
- iterate over all contained Value objects (not the separators like ``,``,
``/`` or `` `` though!)
- get a Value item by index or use ``PropertyValue[index]``
- find out the number of values defined (unstructured)
"""
def __init__(self, cssText=None, parent=None, readonly=False):
"""
:param cssText:
the parsable cssText of the value
:param readonly:
defaults to False
"""
super(PropertyValue, self).__init__()
self.parent = parent
self.wellformed = False
if cssText is not None: # may be 0
if isinstance(cssText, (int, float)):
cssText = unicode(cssText) # if it is a number
self.cssText = cssText
self._readonly = readonly
def __len__(self):
return len(list(self.__items()))
def __getitem__(self, index):
try:
return list(self.__items())[index]
except IndexError:
return None
def __iter__(self):
"Generator which iterates over values."
for item in self.__items():
yield item
def __repr__(self):
return u"cssutils.css.%s(%r)" % (self.__class__.__name__,
self.cssText)
def __str__(self):
return u"<cssutils.css.%s object length=%r cssText=%r at "\
u"0x%x>" % (self.__class__.__name__,
self.length, self.cssText, id(self))
def __items(self, seq=None):
"a generator of Value obects only, no , / or ' '"
if seq is None:
seq = self.seq
return (x.value for x in seq if isinstance(x.value, Value))
def _setCssText(self, cssText):
if isinstance(cssText, (int, float)):
cssText = unicode(cssText) # if it is a number
"""
Format::
unary_operator
: '-' | '+'
;
operator
: '/' S* | ',' S* | /* empty */
;
expr
: term [ operator term ]*
;
term
: unary_operator?
[ NUMBER S* | PERCENTAGE S* | LENGTH S* | EMS S* | EXS S* |
ANGLE S* | TIME S* | FREQ S* ]
| STRING S* | IDENT S* | URI S* | hexcolor | function
| UNICODE-RANGE S*
;
function
: FUNCTION S* expr ')' S*
;
/*
* There is a constraint on the color that it must
* have either 3 or 6 hex-digits (i.e., [0-9a-fA-F])
* after the "#"; e.g., "#000" is OK, but "#abcd" is not.
*/
hexcolor
: HASH S*
;
:exceptions:
- :exc:`~xml.dom.SyntaxErr`:
Raised if the specified CSS string value has a syntax error
(according to the attached property) or is unparsable.
- :exc:`~xml.dom.InvalidModificationErr`:
TODO: Raised if the specified CSS string value represents a
different type of values than the values allowed by the CSS
property.
- :exc:`~xml.dom.NoModificationAllowedErr`:
Raised if this value is readonly.
"""
self._checkReadonly()
# used as operator is , / or S
nextSor = u',/'
term = Choice(_ColorProd(self, nextSor),
_DimensionProd(self, nextSor),
_URIProd(self, nextSor),
_ValueProd(self, nextSor),
# _CalcValueProd(self, nextSor),
# _Rect(self, nextSor),
# all other functions
_CSSVariableProd(self, nextSor),
_MSValueProd(self, nextSor),
_CSSFunctionProd(self, nextSor)
)
operator = Choice(PreDef.S(toSeq=False),
PreDef.char('comma', ',',
toSeq=lambda t, tokens: ('operator', t[1])),
PreDef.char('slash', '/',
toSeq=lambda t, tokens: ('operator', t[1])),
optional=True)
prods = Sequence(term,
Sequence(# mayEnd this Sequence if whitespace
operator,
# TODO: only when setting via other class
# used by variabledeclaration currently
PreDef.char('END', ';',
stopAndKeep=True,
optional=True),
# TODO: } and !important ends too!
term,
minmax=lambda: (0, None)))
# parse
ok, seq, store, unused = ProdParser().parse(cssText,
u'PropertyValue',
prods)
# must be at least one value!
ok = ok and len(list(self.__items(seq))) > 0
if ok:
self._setSeq(seq)
self.wellformed = True
else:
self._log.error(u'PropertyValue: Unknown syntax or no value: %s' %
self._valuestr(cssText))
cssText = property(lambda self: cssutils.ser.do_css_PropertyValue(self),
_setCssText,
doc="A string representation of the current value.")
def item(self, index):
"""
The value at position `index`. Alternatively simple use
``PropertyValue[index]``.
:param index:
the parsable cssText of the value
:exceptions:
- :exc:`~IndexError`:
Raised if index if out of bounds
"""
return self[index]
length = property(lambda self: len(self),
doc=u"Number of values set.")
value = property(lambda self: cssutils.ser.do_css_PropertyValue(self,
valuesOnly=True),
doc=u"A string representation of the current value "
u"without any comments used for validation.")
class Value(cssutils.util._NewBase):
"""
Represents a single CSS value. For now simple values of
IDENT, STRING, or UNICODE-RANGE values are represented directly
as Value objects. Other values like e.g. FUNCTIONs are represented by
subclasses with an extended API.
"""
IDENT = u'IDENT'
STRING = u'STRING'
UNICODE_RANGE = u'UNICODE-RANGE'
URI = u'URI'
DIMENSION = u'DIMENSION'
NUMBER = u'NUMBER'
PERCENTAGE = u'PERCENTAGE'
COLOR_VALUE = u'COLOR_VALUE'
HASH = u'HASH'
FUNCTION = u'FUNCTION'
VARIABLE = u'VARIABLE'
_type = None
_value = u''
def __init__(self, cssText=None, parent=None, readonly=False):
super(Value, self).__init__()
self.parent = parent
if cssText:
self.cssText = cssText
def __repr__(self):
return u"cssutils.css.%s(%r)" % (self.__class__.__name__,
self.cssText)
def __str__(self):
return u"<cssutils.css.%s object type=%s value=%r cssText=%r at 0x%x>"\
% (self.__class__.__name__,
self.type, self.value, self.cssText,
id(self))
def _setCssText(self, cssText):
self._checkReadonly()
prods = Choice(PreDef.hexcolor(stop=True),
PreDef.ident(stop=True),
PreDef.string(stop=True),
PreDef.unicode_range(stop=True),
)
ok, seq, store, unused = ProdParser().parse(cssText, u'Value', prods)
if ok:
# only 1 value anyway!
self._type = seq[0].type
self._value = seq[0].value
self._setSeq(seq)
self.wellformed = ok
cssText = property(lambda self: cssutils.ser.do_css_Value(self),
_setCssText,
doc=u'String value of this value.')
type = property(lambda self: self._type, #_setType,
doc=u"Type of this value, for now the production type "
u"like e.g. `DIMENSION` or `STRING`. All types are "
u"defined as constants in :class:`~cssutils.css.Value`.")
def _setValue(self, value):
# TODO: check!
self._value = value
value = property(lambda self: self._value, _setValue,
doc=u"Actual value if possible: An int or float or else "
u" a string")
class ColorValue(Value):
"""
A color value like rgb(), rgba(), hsl(), hsla() or #rgb, #rrggbb
TODO: Color Keywords
"""
from colors import COLORS
type = Value.COLOR_VALUE
# hexcolor, FUNCTION?
_colorType = None
_red = 0
_green = 0
_blue = 0
_alpha = 0
def __str__(self):
return u"<cssutils.css.%s object type=%s value=%r colorType=%r "\
u"red=%s blue=%s green=%s alpha=%s at 0x%x>"\
% (self.__class__.__name__,
self.type, self.value,
self.colorType, self.red, self.green, self.blue, self.alpha,
id(self))
def _setCssText(self, cssText):
self._checkReadonly()
types = self._prods # rename!
component = Choice(PreDef.unary(toSeq=lambda t, tokens: (t[0],
DimensionValue(pushtoken(t, tokens),
parent=self)
)),
PreDef.number(toSeq=lambda t, tokens: (t[0],
DimensionValue(pushtoken(t, tokens),
parent=self)
)),
PreDef.percentage(toSeq=lambda t, tokens: (t[0],
DimensionValue(pushtoken(t, tokens),
parent=self)
))
)
noalp = Sequence(Prod(name='FUNCTION',
match=lambda t, v: t == types.FUNCTION and
v in (u'rgb(', u'hsl('),
toSeq=lambda t, tokens: (t[0], normalize(t[1]))),
component,
Sequence(PreDef.comma(),
component,
minmax=lambda: (2, 2)
),
PreDef.funcEnd(stop=True)
)
witha = Sequence(Prod(name='FUNCTION',
match=lambda t, v: t == types.FUNCTION and
v in (u'rgba(', u'hsla('),
toSeq=lambda t, tokens: (t[0],
normalize(t[1]))
),
component,
Sequence(PreDef.comma(),
component,
minmax=lambda: (3, 3)
),
PreDef.funcEnd(stop=True)
)
namedcolor = Prod(name='Named Color',
match=lambda t, v: t == 'IDENT' and (
normalize(v) in self.COLORS.keys()
),
stop=True)
prods = Choice(PreDef.hexcolor(stop=True),
namedcolor,
noalp,
witha)
ok, seq, store, unused = ProdParser().parse(cssText,
self.type,
prods)
if ok:
t, v = seq[0].type, seq[0].value
if u'IDENT' == t:
rgba = self.COLORS[normalize(v)]
if u'HASH' == t:
if len(v) == 4:
# HASH #rgb
rgba = (int(2*v[1], 16),
int(2*v[2], 16),
int(2*v[3], 16),
1.0)
else:
# HASH #rrggbb
rgba = (int(v[1:3], 16),
int(v[3:5], 16),
int(v[5:7], 16),
1.0)
elif u'FUNCTION' == t:
functiontype, raw, check = None, [], u''
HSL = False
for item in seq:
try:
type_ = item.value.type
except AttributeError, e:
# type of function, e.g. rgb(
if item.type == 'FUNCTION':
functiontype = item.value
HSL = functiontype in (u'hsl(', u'hsla(')
continue
# save components
if type_ == Value.NUMBER:
raw.append(item.value.value)
check += u'N'
elif type_ == Value.PERCENTAGE:
if HSL:
# save as percentage fraction
raw.append(item.value.value / 100.0)
else:
# save as real value of percentage of 255
raw.append(int(255 * item.value.value / 100))
check += u'P'
if HSL:
# convert to rgb
# h is 360 based (circle)
h, s, l = raw[0] / 360.0, raw[1], raw[2]
# ORDER h l s !!!
r, g, b = colorsys.hls_to_rgb(h, l, s)
# back to 255 based
rgba = [int(round(r*255)),
int(round(g*255)),
int(round(b*255))]
if len(raw) > 3:
rgba.append(raw[3])
else:
# rgb, rgba
rgba = raw
if len(rgba) < 4:
rgba.append(1.0)
# validate
checks = {u'rgb(': ('NNN', 'PPP'),
u'rgba(': ('NNNN', 'PPPN'),
u'hsl(': ('NPP',),
u'hsla(': ('NPPN',)
}
if check not in checks[functiontype]:
self._log.error(u'ColorValue has invalid %s) parameters: '
u'%s (N=Number, P=Percentage)' %
(functiontype, check))
self._colorType = t
self._red, self._green, self._blue, self._alpha = tuple(rgba)
self._setSeq(seq)
self.wellformed = ok
cssText = property(lambda self: cssutils.ser.do_css_ColorValue(self),
_setCssText,
doc=u"String value of this value.")
value = property(lambda self: cssutils.ser.do_css_CSSFunction(self, True),
doc=u'Same as cssText but without comments.')
type = property(lambda self: Value.COLOR_VALUE,
doc=u"Type is fixed to Value.COLOR_VALUE.")
def _getName(self):
for n, v in self.COLORS.items():
if v == (self.red, self.green, self.blue, self.alpha):
return n
colorType = property(lambda self: self._colorType,
doc=u"IDENT (red), HASH (#f00) or FUNCTION (rgb(255, 0, 0).")
name = property(_getName,
doc=u'Name of the color if known (in ColorValue.COLORS) '
u'else None')
red = property(lambda self: self._red,
doc=u'red part as integer between 0 and 255')
green = property(lambda self: self._green,
doc=u'green part as integer between 0 and 255')
blue = property(lambda self: self._blue,
doc=u'blue part as integer between 0 and 255')
alpha = property(lambda self: self._alpha,
doc=u'alpha part as float between 0.0 and 1.0')
class DimensionValue(Value):
"""
A numerical value with an optional dimenstion like e.g. "px" or "%".
Covers DIMENSION, PERCENTAGE or NUMBER values.
"""
__reNumDim = re.compile(ur'^(\d*\.\d+|\d+)(.*)$', re.I | re.U | re.X)
_dimension = None
_sign = None
def __str__(self):
return u"<cssutils.css.%s object type=%s value=%r dimension=%r cssText=%r at 0x%x>"\
% (self.__class__.__name__,
self.type, self.value, self.dimension, self.cssText,
id(self))
def _setCssText(self, cssText):
self._checkReadonly()
prods = Sequence(PreDef.unary(),
Choice(PreDef.dimension(stop=True),
PreDef.number(stop=True),
PreDef.percentage(stop=True)
)
)
ok, seq, store, unused = ProdParser().parse(cssText,
u'DimensionValue',
prods)
if ok:
sign = val = u''
dim = type_ = None
# find
for item in seq:
if item.value in u'+-':
sign = item.value
else:
type_ = item.type
# number + optional dim
v, d = self.__reNumDim.findall(
normalize(item.value))[0]
if u'.' in v:
val = float(sign + v)
else:
val = int(sign + v)
if d:
dim = d
self._sign = sign
self._value = val
self._dimension = dim
self._type = type_
self._setSeq(seq)
self.wellformed = ok
cssText = property(lambda self: cssutils.ser.do_css_Value(self),
_setCssText,
doc=u"String value of this value including dimension.")
dimension = property(lambda self: self._dimension, #_setValue,
doc=u"Dimension if a DIMENSION or PERCENTAGE value, "
u"else None")
class URIValue(Value):
"""
An URI value like ``url(example.png)``.
"""
_type = Value.URI
_uri = Value._value
def __str__(self):
return u"<cssutils.css.%s object type=%s value=%r uri=%r cssText=%r at 0x%x>"\
% (self.__class__.__name__,
self.type, self.value, self.uri, self.cssText,
id(self))
def _setCssText(self, cssText):
self._checkReadonly()
prods = Sequence(PreDef.uri(stop=True))
ok, seq, store, unused = ProdParser().parse(cssText, u'URIValue', prods)
if ok:
# only 1 value only anyway
self._type = seq[0].type
self._value = seq[0].value
self._setSeq(seq)
self.wellformed = ok
cssText = property(lambda self: cssutils.ser.do_css_Value(self),
_setCssText,
doc=u'String value of this value.')
def _setUri(self, uri):
# TODO: check?
self._value = uri
uri = property(lambda self: self._value, _setUri,
doc=u"Actual URL without delimiters or the empty string")
def absoluteUri(self):
"""Actual URL, made absolute if possible, else same as `uri`."""
# Ancestry: PropertyValue, Property, CSSStyleDeclaration, CSSStyleRule,
# CSSStyleSheet
try:
# TODO: better way?
styleSheet = self.parent.parent.parent.parentRule.parentStyleSheet
except AttributeError, e:
return self.uri
else:
return urlparse.urljoin(styleSheet.href, self.uri)
absoluteUri = property(absoluteUri, doc=absoluteUri.__doc__)
class CSSFunction(Value):
"""
A function value.
"""
_functionName = 'Function'
def _productions(self):
"""Return definition used for parsing."""
types = self._prods # rename!
itemProd = Choice(_ColorProd(self),
_DimensionProd(self),
_URIProd(self),
_ValueProd(self),
#_CalcValueProd(self),
_CSSVariableProd(self),
_CSSFunctionProd(self)
)
funcProds = Sequence(Prod(name='FUNCTION',
match=lambda t, v: t == types.FUNCTION,
toSeq=lambda t, tokens: (t[0],
normalize(t[1]))),
Choice(Sequence(itemProd,
Sequence(PreDef.comma(),
itemProd,
minmax=lambda: (0, None)),
PreDef.funcEnd(stop=True)),
PreDef.funcEnd(stop=True))
)
return funcProds
def _setCssText(self, cssText):
self._checkReadonly()
ok, seq, store, unused = ProdParser().parse(cssText,
self.type,
self._productions())
if ok:
self._setSeq(seq)
self.wellformed = ok
cssText = property(lambda self: cssutils.ser.do_css_CSSFunction(self),
_setCssText,
doc=u"String value of this value.")
value = property(lambda self: cssutils.ser.do_css_CSSFunction(self, True),
doc=u'Same as cssText but without comments.')
type = property(lambda self: Value.FUNCTION,
doc=u"Type is fixed to Value.FUNCTION.")
class MSValue(CSSFunction):
"""An IE specific Microsoft only function value which is much looser
in what is syntactically allowed."""
_functionName = 'MSValue'
def _productions(self):
"""Return definition used for parsing."""
types = self._prods # rename!
func = Prod(name='MSValue-Sub',
match=lambda t, v: t == self._prods.FUNCTION,
toSeq=lambda t, tokens: (MSValue._functionName,
MSValue(pushtoken(t,
tokens
),
parent=self
)
)
)
funcProds = Sequence(Prod(name='FUNCTION',
match=lambda t, v: t == types.FUNCTION,
toSeq=lambda t, tokens: (t[0], t[1])
),
Sequence(Choice(_ColorProd(self),
_DimensionProd(self),
_URIProd(self),
_ValueProd(self),
_MSValueProd(self),
#_CalcValueProd(self),
_CSSVariableProd(self),
func,
#_CSSFunctionProd(self),
Prod(name='MSValuePart',
match=lambda t, v: v != u')',
toSeq=lambda t, tokens: (t[0], t[1])
)
),
minmax=lambda: (0, None)
),
PreDef.funcEnd(stop=True)
)
return funcProds
def _setCssText(self, cssText):
super(MSValue, self)._setCssText(cssText)
cssText = property(lambda self: cssutils.ser.do_css_MSValue(self),
_setCssText,
doc=u"String value of this value.")
class CSSVariable(CSSFunction):
"""The CSSVariable represents a CSS variables like ``var(varname)``.
A variable has a (nonnormalized!) `name` and a `value` which is
tried to be resolved from any available CSSVariablesRule definition.
"""
_functionName = 'CSSVariable'
_name = None
def __str__(self):
return u"<cssutils.css.%s object name=%r value=%r at 0x%x>" % (
self.__class__.__name__, self.name, self.value, id(self))
def _setCssText(self, cssText):
self._checkReadonly()
types = self._prods # rename!
prods = Sequence(Prod(name='var',
match=lambda t, v: t == types.FUNCTION and
normalize(v) == u'var('
),
PreDef.ident(toStore='ident'),
PreDef.funcEnd(stop=True))
# store: name of variable
store = {'ident': None}
ok, seq, store, unused = ProdParser().parse(cssText,
u'CSSVariable',
prods)
if ok:
self._name = store['ident'].value
self._setSeq(seq)
self.wellformed = ok
cssText = property(lambda self: cssutils.ser.do_css_CSSVariable(self),
_setCssText, doc=u"String representation of variable.")
# TODO: writable? check if var (value) available?
name = property(lambda self: self._name,
doc=u"The name identifier of this variable referring to "
u"a value in a "
u":class:`cssutils.css.CSSVariablesDeclaration`.")
type = property(lambda self: Value.VARIABLE,
doc=u"Type is fixed to Value.VARIABLE.")
def _getValue(self):
"Find contained sheet and @variables there"
rel = self
while True:
# find node which has parentRule to get to StyleSheet
if hasattr(rel, 'parent'):
rel = rel.parent
else:
break
try:
variables = rel.parentRule.parentStyleSheet.variables
except AttributeError:
return None
else:
try:
return variables[self.name]
except KeyError:
return None
value = property(_getValue,
doc=u'The resolved actual value or None.')
# helper for productions
def _ValueProd(parent, nextSor=False):
return Prod(name='Value',
match=lambda t, v: t in ('IDENT', 'STRING', 'UNICODE-RANGE'),
nextSor = nextSor,
toSeq=lambda t, tokens: ('Value', Value(
pushtoken(t,
tokens),
parent=parent)
)
)
def _DimensionProd(parent, nextSor=False):
return Prod(name='Dimension',
match=lambda t, v: t in (u'DIMENSION',
u'NUMBER',
u'PERCENTAGE') or v in u'+-',
nextSor = nextSor,
toSeq=lambda t, tokens: (t[0], DimensionValue(
pushtoken(t,
tokens),
parent=parent)
)
)
def _URIProd(parent, nextSor=False):
return Prod(name='URIValue',
match=lambda t, v: t == 'URI',
nextSor = nextSor,
toSeq=lambda t, tokens: ('URIValue', URIValue(
pushtoken(t,
tokens),
parent=parent)
)
)
reHexcolor = re.compile(r'^\#(?:[0-9abcdefABCDEF]{3}|[0-9abcdefABCDEF]{6})$')
def _ColorProd(parent, nextSor=False):
return Prod(name='ColorValue',
match=lambda t, v:
(t == 'HASH' and
reHexcolor.match(v)
) or
(t == 'FUNCTION' and
normalize(v) in (u'rgb(',
u'rgba(',
u'hsl(',
u'hsla(')
) or
(t == 'IDENT' and
normalize(v) in ColorValue.COLORS.keys()
),
nextSor = nextSor,
toSeq=lambda t, tokens: ('ColorValue', ColorValue(
pushtoken(t,
tokens),
parent=parent)
)
)
def _CSSFunctionProd(parent, nextSor=False):
return PreDef.function(nextSor=nextSor,
toSeq=lambda t, tokens: (CSSFunction._functionName,
CSSFunction(
pushtoken(t, tokens),
parent=parent)
)
)
def _CSSVariableProd(parent, nextSor=False):
return PreDef.variable(nextSor=nextSor,
toSeq=lambda t, tokens: (CSSVariable._functionName,
CSSVariable(
pushtoken(t, tokens),
parent=parent)
)
)
def _MSValueProd(parent, nextSor=False):
return Prod(name=MSValue._functionName,
match=lambda t, v: (#t == self._prods.FUNCTION and (
normalize(v) in (u'expression(',
u'alpha(',
u'blur(',
u'chroma(',
u'dropshadow(',
u'fliph(',
u'flipv(',
u'glow(',
u'gray(',
u'invert(',
u'mask(',
u'shadow(',
u'wave(',
u'xray(') or
v.startswith(u'progid:DXImageTransform.Microsoft.')
),
nextSor=nextSor,
toSeq=lambda t, tokens: (MSValue._functionName,
MSValue(pushtoken(t,
tokens
),
parent=parent
)
)
)
| gpl-2.0 |
DataDog/dogapi | src/dogapi/http/dashes.py | 2 | 2573 | __all__ = [
'DashApi',
]
try:
import simplejson as json
except ImportError:
import json
class DashApi(object):
def dashboard(self, dash_id):
"""
Return the dashboard with the given id.
See the `dashboard API documentation <http://docs.datadoghq.com/api/#dashboard>`_ for the
dashboard data format.
"""
return self.http_request('GET', '/dash/' + str(dash_id),
response_formatter=lambda x: x['dash'],
)
def dashboards(self):
"""
Return all of your dashboards.
See the `dashboard API documentation <http://docs.datadoghq.com/api/#dashboard>`_ for the
dashboard data format.
"""
return self.http_request('GET', '/dash',
response_formatter=lambda x: x['dashes'],
)
def create_dashboard(self, title, description, graphs, template_variables=None):
"""
Create a new dashboard with the given *title*, *description* and *graphs*.
See the `dashboard API documentation <http://docs.datadoghq.com/api/#dashboard>`_ for the
dashboard data format.
"""
if isinstance(graphs, str):
graphs = json.loads(graphs)
body = {
'title': title,
'description': description,
'graphs': graphs,
'template_variables': template_variables or [],
}
return self.http_request('POST', '/dash', body,
response_formatter=lambda x: x['dash']['id'],
)
def update_dashboard(self, dash_id, title, description, graphs, template_variables=None):
"""
Update the dashboard whose id is *dash_id*, replacing it's *title*, *description* and *graphs*.
Return the dashboard with the given id.
See the `dashboard API documentation <http://docs.datadoghq.com/api/#dashboard>`_ for the
dashboard data format.
"""
if isinstance(graphs, str):
graphs = json.loads(graphs)
body = {
'title': title,
'description': description,
'graphs': graphs,
'template_variables': template_variables or [],
}
return self.http_request('PUT', '/dash/' + str(dash_id), body,
response_formatter=lambda x: x['dash']['id'],
)
def delete_dashboard(self, dash_id):
"""
Delete the dashboard with the given *dash_id*.
>>> dog_http_api.delete_dashboard(dash_id)
"""
return self.http_request('DELETE', '/dash/' + str(dash_id))
| bsd-3-clause |
danielbair/aeneas | aeneas/tools/run_vad.py | 5 | 6852 | #!/usr/bin/env python
# coding=utf-8
# aeneas is a Python/C library and a set of tools
# to automagically synchronize audio and text (aka forced alignment)
#
# Copyright (C) 2012-2013, Alberto Pettarin (www.albertopettarin.it)
# Copyright (C) 2013-2015, ReadBeyond Srl (www.readbeyond.it)
# Copyright (C) 2015-2017, Alberto Pettarin (www.albertopettarin.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Extract a list of speech intervals from the given audio file,
using the MFCC energy-based VAD algorithm.
"""
from __future__ import absolute_import
from __future__ import print_function
import io
import sys
from aeneas.audiofile import AudioFileConverterError
from aeneas.audiofile import AudioFileNotInitializedError
from aeneas.audiofile import AudioFileUnsupportedFormatError
from aeneas.audiofilemfcc import AudioFileMFCC
from aeneas.runtimeconfiguration import RuntimeConfiguration
from aeneas.tools.abstract_cli_program import AbstractCLIProgram
import aeneas.globalfunctions as gf
class RunVADCLI(AbstractCLIProgram):
"""
Extract a list of speech intervals from the given audio file,
using the MFCC energy-based VAD algorithm.
"""
INPUT_FILE = gf.relative_path("res/audio.mp3", __file__)
OUTPUT_BOTH = "output/both.txt"
OUTPUT_NONSPEECH = "output/nonspeech.txt"
OUTPUT_SPEECH = "output/speech.txt"
MODES = [u"both", u"nonspeech", u"speech"]
NAME = gf.file_name_without_extension(__file__)
HELP = {
"description": u"Extract a list of speech intervals using the MFCC energy-based VAD.",
"synopsis": [
(u"AUDIO_FILE [%s] [OUTPUT_FILE]" % (u"|".join(MODES)), True)
],
"examples": [
u"%s both %s" % (INPUT_FILE, OUTPUT_BOTH),
u"%s nonspeech %s" % (INPUT_FILE, OUTPUT_NONSPEECH),
u"%s speech %s" % (INPUT_FILE, OUTPUT_SPEECH)
],
"options": [
u"-i, --index : output intervals as indices instead of seconds",
]
}
def perform_command(self):
"""
Perform command and return the appropriate exit code.
:rtype: int
"""
if len(self.actual_arguments) < 2:
return self.print_help()
audio_file_path = self.actual_arguments[0]
mode = self.actual_arguments[1]
if mode not in [u"speech", u"nonspeech", u"both"]:
return self.print_help()
output_file_path = None
if len(self.actual_arguments) >= 3:
output_file_path = self.actual_arguments[2]
output_time = not self.has_option([u"-i", u"--index"])
self.check_c_extensions("cmfcc")
if not self.check_input_file(audio_file_path):
return self.ERROR_EXIT_CODE
if (output_file_path is not None) and (not self.check_output_file(output_file_path)):
return self.ERROR_EXIT_CODE
self.print_info(u"Reading audio...")
try:
audio_file_mfcc = AudioFileMFCC(audio_file_path, rconf=self.rconf, logger=self.logger)
except AudioFileConverterError:
self.print_error(u"Unable to call the ffmpeg executable '%s'" % (self.rconf[RuntimeConfiguration.FFMPEG_PATH]))
self.print_error(u"Make sure the path to ffmpeg is correct")
return self.ERROR_EXIT_CODE
except (AudioFileUnsupportedFormatError, AudioFileNotInitializedError):
self.print_error(u"Cannot read file '%s'" % (audio_file_path))
self.print_error(u"Check that its format is supported by ffmpeg")
return self.ERROR_EXIT_CODE
except Exception as exc:
self.print_error(u"An unexpected error occurred while reading the audio file:")
self.print_error(u"%s" % exc)
return self.ERROR_EXIT_CODE
self.print_info(u"Reading audio... done")
self.print_info(u"Executing VAD...")
audio_file_mfcc.run_vad()
self.print_info(u"Executing VAD... done")
speech = audio_file_mfcc.intervals(speech=True, time=output_time)
nonspeech = audio_file_mfcc.intervals(speech=False, time=output_time)
if mode == u"speech":
if output_time:
intervals = [(i.begin, i.end) for i in speech]
template = u"%.3f\t%.3f"
else:
intervals = speech
template = u"%d\t%d"
elif mode == u"nonspeech":
if output_time:
intervals = [(i.begin, i.end) for i in nonspeech]
template = u"%.3f\t%.3f"
else:
intervals = nonspeech
template = u"%d\t%d"
elif mode == u"both":
if output_time:
speech = [(i.begin, i.end, u"speech") for i in speech]
nonspeech = [(i.begin, i.end, u"nonspeech") for i in nonspeech]
template = u"%.3f\t%.3f\t%s"
else:
speech = [(i[0], i[1], u"speech") for i in speech]
nonspeech = [(i[0], i[1], u"nonspeech") for i in nonspeech]
template = u"%d\t%d\t%s"
intervals = sorted(speech + nonspeech)
self.write_to_file(output_file_path, intervals, template)
return self.NO_ERROR_EXIT_CODE
def write_to_file(self, output_file_path, intervals, template):
"""
Write intervals to file.
:param output_file_path: path of the output file to be written;
if ``None``, print to stdout
:type output_file_path: string (path)
:param intervals: a list of tuples, each representing an interval
:type intervals: list of tuples
"""
msg = [template % (interval) for interval in intervals]
if output_file_path is None:
self.print_info(u"Intervals detected:")
for line in msg:
self.print_generic(line)
else:
with io.open(output_file_path, "w", encoding="utf-8") as output_file:
output_file.write(u"\n".join(msg))
self.print_success(u"Created file '%s'" % output_file_path)
def main():
"""
Execute program.
"""
RunVADCLI().run(arguments=sys.argv)
if __name__ == '__main__':
main()
| agpl-3.0 |
jilljenn/voyageavecmoi | backend/twitter_bot.py | 1 | 2004 | #!/usr/bin/env python3
import os
import time
import pprint
import twitter
import traceback
import rethinkdb as r
from secret import CONSUMER_KEY, CONSUMER_SECRET
def respond(text):
print('Got one: %r' % text)
# TODO
return None
def isAddressedToMe(tweet):
pprint.pprint(tweet)
return tweet['text'].lower().startswith('@' + me['screen_name'].lower())
def on_tweet(tweet):
try:
_on_tweet(tweet)
except KeyboardInterrupt:
raise
except Exception as e:
traceback.print_exc()
def _on_tweet(tweet):
if not isAddressedToMe(tweet):
return
response = respond(tweet['text'].split(' ', 1)[1])
if not response:
response = 'No response'
response = '@{} {}'.format(tweet['user']['screen_name'], response)
twitter.statuses.update(status=response, in_reply_to_status_id=tweet['id'])
def fetch_tweets(db, stream):
s = stream.user(replies='all')
next(s) # https://dev.twitter.com/streaming/overview/messages-types#user_stream_messsages
for tweet in s:
on_tweet(tweet)
MY_TWITTER_CREDS = os.path.expanduser('~/.voyageavecmoi_credentials')
if not os.path.exists(MY_TWITTER_CREDS):
twitter.oauth_dance("Voyage avec moi", CONSUMER_KEY, CONSUMER_SECRET,
MY_TWITTER_CREDS)
oauth_token, oauth_secret = twitter.read_token_file(MY_TWITTER_CREDS)
auth = twitter.OAuth(oauth_token, oauth_secret, CONSUMER_KEY, CONSUMER_SECRET)
stream = twitter.TwitterStream(auth=auth, domain='userstream.twitter.com')
twitter = twitter.Twitter(auth=auth)
me = twitter.account.verify_credentials()
print('Logged to Twitter as @{}'.format(me['screen_name']))
try:
db = r.connect('localhost', 28015)
list_db = r.db_list().run(db)
if 'voyageavecmoi' not in list_db:
raise RuntimeError('Il faut créer la DB voyageavecmoi avec le script create_database.py avant de lancer ce script!')
fetch_tweets(db, stream)
except Exception as e:
print ('Une erreur est survenue!')
print (e)
| agpl-3.0 |
julioeiras/pygame-site | src/app/users/web_views.py | 3 | 1585 | # coding: utf-8
from flask import Blueprint, render_template, redirect, request
from flask_login import current_user, login_user
import usecase
web_views = Blueprint('user_web_views', __name__, template_folder='templates')
@web_views.route('/admin')
def admin():
values = {}
if not current_user.is_authenticated:
return redirect('/login')
values["user"] = current_user.name
return render_template("admin.html", values=values)
@web_views.route('/signup', methods=["POST"])
def signup():
name = request.form.get("name")
email = request.form.get("email")
password = request.form.get("password")
gender = None
if request.form.get("male"):
gender = 'male'
if request.form.get("female"):
gender = 'female'
if all([name, email, password, gender]):
user = usecase.add_user(name, email, password, gender, is_admin=True)
login_user(user)
return redirect('/admin')
return redirect('/login')
@web_views.route('/login', methods=["GET", "POST"])
def login():
if request.method == 'POST':
password = request.form.get("password")
login = request.form.get("login")
user = usecase.check_user_credentials(login, password)
if user is not None:
usecase.authentic_user(user)
login_user(user)
return redirect('/admin')
else:
return "usuario inexistente"
return render_template('sign-in-up.html')
@web_views.route("/logout", methods=["GET"])
def logout():
# logout_user()
return redirect('/')
| mit |
capturePointer/avplayer | libtorrent/parse_dht_stats.py | 48 | 1579 | #! /usr/bin/env python
import sys
import os
gnuplot_scripts = []
def gen_stats_gnuplot(name, y, lines):
global gnuplot_scripts
stat = open(sys.argv[1])
line = stat.readline()
while not 'minute:' in line:
line = stat.readline()
names = line.strip().split(':')
counter = 1
for i in names:
print '%d: %s' % (counter, i)
counter += 1
out = open('%s.gnuplot' % name, 'w+')
out.write('''
set term png size 1200,700 small
set output "%s.png"
set title "%s"
set ylabel "%s"
set xlabel "time (minutes)"
plot ''' % (name, name.strip('_'), y))
first = True
for i in lines:
if not first:
out.write(', \\\n')
first = False
out.write('"%s" using 1:%d title "%s" with lines' % (sys.argv[1], names.index(i)+1, i))
out.write('\n')
out.write('''set terminal postscript
set output "%s.ps"
replot
''' % (name))
out.close()
gnuplot_scripts += [name]
gen_stats_gnuplot('dht_routing_table_size', 'nodes', ['active nodes','passive nodes'])
gen_stats_gnuplot('dht_tracker_table_size', '', ['num torrents', 'num peers'])
gen_stats_gnuplot('dht_announces', 'messages per minute', ['announces per min', 'failed announces per min'])
gen_stats_gnuplot('dht_clients', 'messages per minute', ['total msgs per min', 'az msgs per min', 'ut msgs per min', 'lt msgs per min', 'mp msgs per min', 'gr msgs per min'])
gen_stats_gnuplot('dht_rate', 'bytes per second', ['bytes in per sec', 'bytes out per sec'])
gen_stats_gnuplot('dht_errors', 'messages per minute', ['error replies sent', 'error queries recvd'])
for i in gnuplot_scripts:
os.system('gnuplot %s.gnuplot' % i);
| gpl-3.0 |
maloi/iadmin | iapp_maillist/views.py | 1 | 3940 | from django.shortcuts import render, get_object_or_404, redirect
from django.core.urlresolvers import reverse, reverse_lazy
from django.views.generic import ListView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.detail import DetailView
from .models import LdapMaillist
from .forms import LdapMaillistForm
from iapp_user.models import LdapUser
from iapp_user.utils import debug, get_or_none
class MaillistList(ListView):
model = LdapMaillist
def get_context_data(self, **kwargs):
context = super(MaillistList, self).get_context_data(**kwargs)
context['maillists'] = sorted(context['ldapmaillist_list'], key=lambda ldapmaillist: ldapmaillist.cn)
return context
class MaillistCreate(CreateView):
model = LdapMaillist
form_class = LdapMaillistForm
def form_valid(self, form):
return _form_valid(self, form)
def get_success_url(self):
return _get_success_url(self)
class MaillistDelete(DeleteView):
model = LdapMaillist
success_url = reverse_lazy('maillist_list')
class MaillistUpdate(UpdateView):
model = LdapMaillist
form_class = LdapMaillistForm
template_name_suffix = '_update_form'
def get_initial(self):
owners = []
for owner in self.object.owner:
owners.append(LdapUser.objects.get(uid=owner.split('=')[1].split(',')[0]))
members = []
for member in self.object.member:
m = get_or_none(LdapUser, uid=member.split('=')[1].split(',')[0])
if m:
members.append(LdapUser.objects.get(uid=member.split('=')[1].split(',')[0]))
return { 'member': sorted(members, key=lambda member: member.cn),
'owner': sorted(owners, key=lambda owner: owner.cn),
}
def get_context_data(self, **kwargs):
context = super(self.__class__, self).get_context_data(**kwargs)
members = self.object.member
invalidMembers = []
for member in members:
m = get_or_none(LdapUser, uid=member.split('=')[1].split(',')[0])
if not m:
invalidMembers.append(member.split('=')[1].split(',')[0])
context['invalidMembers'] = sorted(invalidMembers)
return context
def form_valid(self, form):
return _form_valid(self, form)
def get_success_url(self):
return _get_success_url(self)
def _get_success_url(self):
return reverse('maillist_detail', kwargs={'pk': self.request.POST['cn']})
def _form_valid(self, form):
self.object = form.save(commit=False)
member = self.request.POST.getlist('member')
owner = self.request.POST.getlist('owner')
rfc822MailMember = self.request.POST.getlist('rfc822MailMember')
self.object.member = list(set(member)) # remove duplicates
self.object.owner = list(set(owner)) # remove duplicates
self.object.rfc822MailMember = list(set(rfc822MailMember)) # remove duplicates
self.object.save()
return redirect(self.get_success_url())
class MaillistDetail(DetailView):
model = LdapMaillist
def get_context_data(self, **kwargs):
context = super(MaillistDetail, self).get_context_data(**kwargs)
context['owners'] = []
for owner in self.object.owner:
context['owners'].append(LdapUser.objects.get(uid=owner.split('=')[1].split(',')[0]))
context['members'] = []
invalidMembers = []
# filters for invalid entries and put it in a different list
for member in self.object.member:
m = get_or_none(LdapUser, uid=member.split('=')[1].split(',')[0])
if not m:
invalidMembers.append(member.split('=')[1].split(',')[0])
else:
context['members'].append(LdapUser.objects.get(uid=member.split('=')[1].split(',')[0]))
context['invalidMembers'] = sorted(invalidMembers)
return context
| mit |
kumar303/zamboni | mkt/inapp/serializers.py | 14 | 4576 | from cStringIO import StringIO
from django import forms
from django.conf import settings
from django.utils.translation import trans_real as translation
import commonware
import requests
from jinja2.filters import do_dictsort
from PIL import Image
from rest_framework import serializers
from rest_framework.serializers import ValidationError
from tower import ugettext as _
from mkt.prices.models import Price
from mkt.api.fields import TranslationSerializerField
from mkt.api.forms import SchemeURLValidator as URLValidator
from mkt.inapp.models import InAppProduct
log = commonware.log.getLogger('z.inapp')
class NameField(TranslationSerializerField):
def field_to_native(self, obj, field_name):
# TODO: maybe remove this when the API response is fixed in
# bug 1070125
self.requested_language = obj.default_locale
return super(NameField, self).field_to_native(obj, field_name)
class InAppProductSerializer(serializers.ModelSerializer):
_locales = [(translation.to_locale(k).replace('_', '-').lower(), v)
for k, v in do_dictsort(settings.LANGUAGES)]
app = serializers.SlugRelatedField(read_only=True, slug_field='app_slug',
source='webapp')
guid = serializers.CharField(read_only=True)
include_inactive = serializers.BooleanField(read_only=True)
logo_url = serializers.CharField(
validators=[URLValidator(schemes=['http', 'https'])],
required=False)
name = NameField()
default_locale = serializers.ChoiceField(choices=_locales)
price_id = serializers.PrimaryKeyRelatedField(source='price')
class Meta:
model = InAppProduct
fields = ['active', 'guid', 'app', 'price_id', 'name',
'default_locale', 'logo_url']
def validate(self, attrs):
default_name = attrs['name'].get(attrs['default_locale'], None)
if ((attrs['default_locale'] not in attrs['name']) or
not default_name):
raise ValidationError(
'no localization for default_locale {d} in "name"'
.format(d=repr(attrs['default_locale'])))
return attrs
def validate_logo_url(self, attrs, source):
logo_url = attrs.get(source)
if not logo_url:
return attrs
# This message is shown for all image errors even though it may
# not be correct. This is to prevent leaking info that could
# lead to port scanning, DOS'ing or other vulnerabilities.
msg = _('Product logo must be a 64x64 image. '
'Check that the URL is correct.')
tmp_dest = StringIO()
try:
res = requests.get(
logo_url, timeout=3,
headers={'User-Agent': settings.MARKETPLACE_USER_AGENT})
res.raise_for_status()
payload = 0
read_size = 100000
for chunk in res.iter_content(read_size):
payload += len(chunk)
if payload > settings.MAX_INAPP_IMAGE_SIZE:
log.info('clean_logo_url: payload exceeded allowed '
'size: {url}: '.format(url=logo_url))
raise ValidationError(msg)
tmp_dest.write(chunk)
except ValidationError:
raise
except Exception, exc:
log.info('clean_logo_url: exception fetching {url}: '
'{exc.__class__.__name__}: {exc}'
.format(url=logo_url, exc=exc))
raise ValidationError(msg)
tmp_dest.seek(0)
try:
img = Image.open(tmp_dest)
img.verify()
except Exception, exc:
log.info('clean_logo_url: Error loading/verifying {url}: '
'{exc.__class__.__name__}: {exc}'
.format(url=logo_url, exc=exc))
raise ValidationError(msg)
if img.size != (settings.REQUIRED_INAPP_IMAGE_SIZE,
settings.REQUIRED_INAPP_IMAGE_SIZE):
log.info('clean_logo_url: not a valid size: {url}; '
'width={size[0]}; height={size[1]}'
.format(url=logo_url, size=img.size))
raise ValidationError(msg)
return attrs
class InAppProductForm(forms.ModelForm):
class Meta:
model = InAppProduct
fields = ['price']
def __init__(self, *args, **kwargs):
super(InAppProductForm, self).__init__(*args, **kwargs)
self.fields['price'].queryset = Price.objects.active()
| bsd-3-clause |
towerjoo/DjangoNotes | Django-1.5.1/django/contrib/comments/models.py | 99 | 7729 | from django.conf import settings
from django.contrib.comments.managers import CommentManager
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core import urlresolvers
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
COMMENT_MAX_LENGTH = getattr(settings, 'COMMENT_MAX_LENGTH', 3000)
class BaseCommentAbstractModel(models.Model):
"""
An abstract base class that any custom comment models probably should
subclass.
"""
# Content-object field
content_type = models.ForeignKey(ContentType,
verbose_name=_('content type'),
related_name="content_type_set_for_%(class)s")
object_pk = models.TextField(_('object ID'))
content_object = generic.GenericForeignKey(ct_field="content_type", fk_field="object_pk")
# Metadata about the comment
site = models.ForeignKey(Site)
class Meta:
abstract = True
def get_content_object_url(self):
"""
Get a URL suitable for redirecting to the content object.
"""
return urlresolvers.reverse(
"comments-url-redirect",
args=(self.content_type_id, self.object_pk)
)
@python_2_unicode_compatible
class Comment(BaseCommentAbstractModel):
"""
A user comment about some object.
"""
# Who posted this comment? If ``user`` is set then it was an authenticated
# user; otherwise at least user_name should have been set and the comment
# was posted by a non-authenticated user.
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('user'),
blank=True, null=True, related_name="%(class)s_comments")
user_name = models.CharField(_("user's name"), max_length=50, blank=True)
user_email = models.EmailField(_("user's email address"), blank=True)
user_url = models.URLField(_("user's URL"), blank=True)
comment = models.TextField(_('comment'), max_length=COMMENT_MAX_LENGTH)
# Metadata about the comment
submit_date = models.DateTimeField(_('date/time submitted'), default=None)
ip_address = models.IPAddressField(_('IP address'), blank=True, null=True)
is_public = models.BooleanField(_('is public'), default=True,
help_text=_('Uncheck this box to make the comment effectively ' \
'disappear from the site.'))
is_removed = models.BooleanField(_('is removed'), default=False,
help_text=_('Check this box if the comment is inappropriate. ' \
'A "This comment has been removed" message will ' \
'be displayed instead.'))
# Manager
objects = CommentManager()
class Meta:
db_table = "django_comments"
ordering = ('submit_date',)
permissions = [("can_moderate", "Can moderate comments")]
verbose_name = _('comment')
verbose_name_plural = _('comments')
def __str__(self):
return "%s: %s..." % (self.name, self.comment[:50])
def save(self, *args, **kwargs):
if self.submit_date is None:
self.submit_date = timezone.now()
super(Comment, self).save(*args, **kwargs)
def _get_userinfo(self):
"""
Get a dictionary that pulls together information about the poster
safely for both authenticated and non-authenticated comments.
This dict will have ``name``, ``email``, and ``url`` fields.
"""
if not hasattr(self, "_userinfo"):
userinfo = {
"name": self.user_name,
"email": self.user_email,
"url": self.user_url
}
if self.user_id:
u = self.user
if u.email:
userinfo["email"] = u.email
# If the user has a full name, use that for the user name.
# However, a given user_name overrides the raw user.username,
# so only use that if this comment has no associated name.
if u.get_full_name():
userinfo["name"] = self.user.get_full_name()
elif not self.user_name:
userinfo["name"] = u.get_username()
self._userinfo = userinfo
return self._userinfo
userinfo = property(_get_userinfo, doc=_get_userinfo.__doc__)
def _get_name(self):
return self.userinfo["name"]
def _set_name(self, val):
if self.user_id:
raise AttributeError(_("This comment was posted by an authenticated "\
"user and thus the name is read-only."))
self.user_name = val
name = property(_get_name, _set_name, doc="The name of the user who posted this comment")
def _get_email(self):
return self.userinfo["email"]
def _set_email(self, val):
if self.user_id:
raise AttributeError(_("This comment was posted by an authenticated "\
"user and thus the email is read-only."))
self.user_email = val
email = property(_get_email, _set_email, doc="The email of the user who posted this comment")
def _get_url(self):
return self.userinfo["url"]
def _set_url(self, val):
self.user_url = val
url = property(_get_url, _set_url, doc="The URL given by the user who posted this comment")
def get_absolute_url(self, anchor_pattern="#c%(id)s"):
return self.get_content_object_url() + (anchor_pattern % self.__dict__)
def get_as_text(self):
"""
Return this comment as plain text. Useful for emails.
"""
d = {
'user': self.user or self.name,
'date': self.submit_date,
'comment': self.comment,
'domain': self.site.domain,
'url': self.get_absolute_url()
}
return _('Posted by %(user)s at %(date)s\n\n%(comment)s\n\nhttp://%(domain)s%(url)s') % d
@python_2_unicode_compatible
class CommentFlag(models.Model):
"""
Records a flag on a comment. This is intentionally flexible; right now, a
flag could be:
* A "removal suggestion" -- where a user suggests a comment for (potential) removal.
* A "moderator deletion" -- used when a moderator deletes a comment.
You can (ab)use this model to add other flags, if needed. However, by
design users are only allowed to flag a comment with a given flag once;
if you want rating look elsewhere.
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('user'), related_name="comment_flags")
comment = models.ForeignKey(Comment, verbose_name=_('comment'), related_name="flags")
flag = models.CharField(_('flag'), max_length=30, db_index=True)
flag_date = models.DateTimeField(_('date'), default=None)
# Constants for flag types
SUGGEST_REMOVAL = "removal suggestion"
MODERATOR_DELETION = "moderator deletion"
MODERATOR_APPROVAL = "moderator approval"
class Meta:
db_table = 'django_comment_flags'
unique_together = [('user', 'comment', 'flag')]
verbose_name = _('comment flag')
verbose_name_plural = _('comment flags')
def __str__(self):
return "%s flag of comment ID %s by %s" % \
(self.flag, self.comment_id, self.user.get_username())
def save(self, *args, **kwargs):
if self.flag_date is None:
self.flag_date = timezone.now()
super(CommentFlag, self).save(*args, **kwargs)
| mit |
lisael/pg-django | tests/regressiontests/multiple_database/tests.py | 33 | 87447 | from __future__ import absolute_import
import datetime
import pickle
from StringIO import StringIO
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core import management
from django.db import connections, router, DEFAULT_DB_ALIAS
from django.db.models import signals
from django.test import TestCase
from .models import Book, Person, Pet, Review, UserProfile
def copy_content_types_from_default_to_other():
# On post_syncdb, content types are created in the 'default' database.
# However, tests of generic foreign keys require them in 'other' too.
# The problem is masked on backends that defer constraints checks: at the
# end of each test, there's a rollback, and constraints are never checked.
# It only appears on MySQL + InnoDB.
for ct in ContentType.objects.using('default').all():
ct.save(using='other')
class QueryTestCase(TestCase):
multi_db = True
def test_db_selection(self):
"Check that querysets will use the default database by default"
self.assertEqual(Book.objects.db, DEFAULT_DB_ALIAS)
self.assertEqual(Book.objects.all().db, DEFAULT_DB_ALIAS)
self.assertEqual(Book.objects.using('other').db, 'other')
self.assertEqual(Book.objects.db_manager('other').db, 'other')
self.assertEqual(Book.objects.db_manager('other').all().db, 'other')
def test_default_creation(self):
"Objects created on the default database don't leak onto other databases"
# Create a book on the default database using create()
Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
# Create a book on the default database using a save
dive = Book()
dive.title="Dive into Python"
dive.published = datetime.date(2009, 5, 4)
dive.save()
# Check that book exists on the default database, but not on other database
try:
Book.objects.get(title="Pro Django")
Book.objects.using('default').get(title="Pro Django")
except Book.DoesNotExist:
self.fail('"Dive Into Python" should exist on default database')
self.assertRaises(Book.DoesNotExist,
Book.objects.using('other').get,
title="Pro Django"
)
try:
Book.objects.get(title="Dive into Python")
Book.objects.using('default').get(title="Dive into Python")
except Book.DoesNotExist:
self.fail('"Dive into Python" should exist on default database')
self.assertRaises(Book.DoesNotExist,
Book.objects.using('other').get,
title="Dive into Python"
)
def test_other_creation(self):
"Objects created on another database don't leak onto the default database"
# Create a book on the second database
Book.objects.using('other').create(title="Pro Django",
published=datetime.date(2008, 12, 16))
# Create a book on the default database using a save
dive = Book()
dive.title="Dive into Python"
dive.published = datetime.date(2009, 5, 4)
dive.save(using='other')
# Check that book exists on the default database, but not on other database
try:
Book.objects.using('other').get(title="Pro Django")
except Book.DoesNotExist:
self.fail('"Dive Into Python" should exist on other database')
self.assertRaises(Book.DoesNotExist,
Book.objects.get,
title="Pro Django"
)
self.assertRaises(Book.DoesNotExist,
Book.objects.using('default').get,
title="Pro Django"
)
try:
Book.objects.using('other').get(title="Dive into Python")
except Book.DoesNotExist:
self.fail('"Dive into Python" should exist on other database')
self.assertRaises(Book.DoesNotExist,
Book.objects.get,
title="Dive into Python"
)
self.assertRaises(Book.DoesNotExist,
Book.objects.using('default').get,
title="Dive into Python"
)
def test_basic_queries(self):
"Queries are constrained to a single database"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
dive = Book.objects.using('other').get(published=datetime.date(2009, 5, 4))
self.assertEqual(dive.title, "Dive into Python")
self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, published=datetime.date(2009, 5, 4))
dive = Book.objects.using('other').get(title__icontains="dive")
self.assertEqual(dive.title, "Dive into Python")
self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, title__icontains="dive")
dive = Book.objects.using('other').get(title__iexact="dive INTO python")
self.assertEqual(dive.title, "Dive into Python")
self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, title__iexact="dive INTO python")
dive = Book.objects.using('other').get(published__year=2009)
self.assertEqual(dive.title, "Dive into Python")
self.assertEqual(dive.published, datetime.date(2009, 5, 4))
self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, published__year=2009)
years = Book.objects.using('other').dates('published', 'year')
self.assertEqual([o.year for o in years], [2009])
years = Book.objects.using('default').dates('published', 'year')
self.assertEqual([o.year for o in years], [])
months = Book.objects.using('other').dates('published', 'month')
self.assertEqual([o.month for o in months], [5])
months = Book.objects.using('default').dates('published', 'month')
self.assertEqual([o.month for o in months], [])
def test_m2m_separation(self):
"M2M fields are constrained to a single database"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Save the author relations
pro.authors = [marty]
dive.authors = [mark]
# Inspect the m2m tables directly.
# There should be 1 entry in each database
self.assertEqual(Book.authors.through.objects.using('default').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 1)
# Check that queries work across m2m joins
self.assertEqual(list(Book.objects.using('default').filter(authors__name='Marty Alchin').values_list('title', flat=True)),
[u'Pro Django'])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Marty Alchin').values_list('title', flat=True)),
[])
self.assertEqual(list(Book.objects.using('default').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
[])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
[u'Dive into Python'])
# Reget the objects to clear caches
dive = Book.objects.using('other').get(title="Dive into Python")
mark = Person.objects.using('other').get(name="Mark Pilgrim")
# Retrive related object by descriptor. Related objects should be database-baound
self.assertEqual(list(dive.authors.all().values_list('name', flat=True)),
[u'Mark Pilgrim'])
self.assertEqual(list(mark.book_set.all().values_list('title', flat=True)),
[u'Dive into Python'])
def test_m2m_forward_operations(self):
"M2M forward manipulations are all constrained to a single DB"
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Save the author relations
dive.authors = [mark]
# Add a second author
john = Person.objects.using('other').create(name="John Smith")
self.assertEqual(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
[])
dive.authors.add(john)
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
[u'Dive into Python'])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
[u'Dive into Python'])
# Remove the second author
dive.authors.remove(john)
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
[u'Dive into Python'])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
[])
# Clear all authors
dive.authors.clear()
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
[])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
[])
# Create an author through the m2m interface
dive.authors.create(name='Jane Brown')
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
[])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Jane Brown').values_list('title', flat=True)),
[u'Dive into Python'])
def test_m2m_reverse_operations(self):
"M2M reverse manipulations are all constrained to a single DB"
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Save the author relations
dive.authors = [mark]
# Create a second book on the other database
grease = Book.objects.using('other').create(title="Greasemonkey Hacks",
published=datetime.date(2005, 11, 1))
# Add a books to the m2m
mark.book_set.add(grease)
self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
[u'Mark Pilgrim'])
self.assertEqual(list(Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)),
[u'Mark Pilgrim'])
# Remove a book from the m2m
mark.book_set.remove(grease)
self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
[u'Mark Pilgrim'])
self.assertEqual(list(Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)),
[])
# Clear the books associated with mark
mark.book_set.clear()
self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)),
[])
# Create a book through the m2m interface
mark.book_set.create(title="Dive into HTML5", published=datetime.date(2020, 1, 1))
self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into HTML5').values_list('name', flat=True)),
[u'Mark Pilgrim'])
def test_m2m_cross_database_protection(self):
"Operations that involve sharing M2M objects across databases raise an error"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Set a foreign key set with an object from a different database
try:
marty.book_set = [pro, dive]
self.fail("Shouldn't be able to assign across databases")
except ValueError:
pass
# Add to an m2m with an object from a different database
try:
marty.book_set.add(dive)
self.fail("Shouldn't be able to assign across databases")
except ValueError:
pass
# Set a m2m with an object from a different database
try:
marty.book_set = [pro, dive]
self.fail("Shouldn't be able to assign across databases")
except ValueError:
pass
# Add to a reverse m2m with an object from a different database
try:
dive.authors.add(marty)
self.fail("Shouldn't be able to assign across databases")
except ValueError:
pass
# Set a reverse m2m with an object from a different database
try:
dive.authors = [mark, marty]
self.fail("Shouldn't be able to assign across databases")
except ValueError:
pass
def test_m2m_deletion(self):
"Cascaded deletions of m2m relations issue queries on the right database"
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
dive.authors = [mark]
# Check the initial state
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
self.assertEqual(Person.objects.using('other').count(), 1)
self.assertEqual(Book.objects.using('other').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 1)
# Delete the object on the other database
dive.delete(using='other')
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
# The person still exists ...
self.assertEqual(Person.objects.using('other').count(), 1)
# ... but the book has been deleted
self.assertEqual(Book.objects.using('other').count(), 0)
# ... and the relationship object has also been deleted.
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Now try deletion in the reverse direction. Set up the relation again
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
dive.authors = [mark]
# Check the initial state
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
self.assertEqual(Person.objects.using('other').count(), 1)
self.assertEqual(Book.objects.using('other').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 1)
# Delete the object on the other database
mark.delete(using='other')
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
# The person has been deleted ...
self.assertEqual(Person.objects.using('other').count(), 0)
# ... but the book still exists
self.assertEqual(Book.objects.using('other').count(), 1)
# ... and the relationship object has been deleted.
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
def test_foreign_key_separation(self):
"FK fields are constrained to a single database"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.create(name="Marty Alchin")
george = Person.objects.create(name="George Vilches")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
chris = Person.objects.using('other').create(name="Chris Mills")
# Save the author's favourite books
pro.editor = george
pro.save()
dive.editor = chris
dive.save()
pro = Book.objects.using('default').get(title="Pro Django")
self.assertEqual(pro.editor.name, "George Vilches")
dive = Book.objects.using('other').get(title="Dive into Python")
self.assertEqual(dive.editor.name, "Chris Mills")
# Check that queries work across foreign key joins
self.assertEqual(list(Person.objects.using('default').filter(edited__title='Pro Django').values_list('name', flat=True)),
[u'George Vilches'])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Pro Django').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('default').filter(edited__title='Dive into Python').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)),
[u'Chris Mills'])
# Reget the objects to clear caches
chris = Person.objects.using('other').get(name="Chris Mills")
dive = Book.objects.using('other').get(title="Dive into Python")
# Retrive related object by descriptor. Related objects should be database-baound
self.assertEqual(list(chris.edited.values_list('title', flat=True)),
[u'Dive into Python'])
def test_foreign_key_reverse_operations(self):
"FK reverse manipulations are all constrained to a single DB"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
chris = Person.objects.using('other').create(name="Chris Mills")
# Save the author relations
dive.editor = chris
dive.save()
# Add a second book edited by chris
html5 = Book.objects.using('other').create(title="Dive into HTML5", published=datetime.date(2010, 3, 15))
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[])
chris.edited.add(html5)
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[u'Chris Mills'])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)),
[u'Chris Mills'])
# Remove the second editor
chris.edited.remove(html5)
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)),
[u'Chris Mills'])
# Clear all edited books
chris.edited.clear()
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)),
[])
# Create an author through the m2m interface
chris.edited.create(title='Dive into Water', published=datetime.date(2010, 3, 15))
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Water').values_list('name', flat=True)),
[u'Chris Mills'])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)),
[])
def test_foreign_key_cross_database_protection(self):
"Operations that involve sharing FK objects across databases raise an error"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Set a foreign key with an object from a different database
try:
dive.editor = marty
self.fail("Shouldn't be able to assign across databases")
except ValueError:
pass
# Set a foreign key set with an object from a different database
try:
marty.edited = [pro, dive]
self.fail("Shouldn't be able to assign across databases")
except ValueError:
pass
# Add to a foreign key set with an object from a different database
try:
marty.edited.add(dive)
self.fail("Shouldn't be able to assign across databases")
except ValueError:
pass
# BUT! if you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
chris = Person(name="Chris Mills")
html5 = Book(title="Dive into HTML5", published=datetime.date(2010, 3, 15))
# initially, no db assigned
self.assertEqual(chris._state.db, None)
self.assertEqual(html5._state.db, None)
# old object comes from 'other', so the new object is set to use 'other'...
dive.editor = chris
html5.editor = mark
self.assertEqual(chris._state.db, 'other')
self.assertEqual(html5._state.db, 'other')
# ... but it isn't saved yet
self.assertEqual(list(Person.objects.using('other').values_list('name',flat=True)),
[u'Mark Pilgrim'])
self.assertEqual(list(Book.objects.using('other').values_list('title',flat=True)),
[u'Dive into Python'])
# When saved (no using required), new objects goes to 'other'
chris.save()
html5.save()
self.assertEqual(list(Person.objects.using('default').values_list('name',flat=True)),
[u'Marty Alchin'])
self.assertEqual(list(Person.objects.using('other').values_list('name',flat=True)),
[u'Chris Mills', u'Mark Pilgrim'])
self.assertEqual(list(Book.objects.using('default').values_list('title',flat=True)),
[u'Pro Django'])
self.assertEqual(list(Book.objects.using('other').values_list('title',flat=True)),
[u'Dive into HTML5', u'Dive into Python'])
# This also works if you assign the FK in the constructor
water = Book(title="Dive into Water", published=datetime.date(2001, 1, 1), editor=mark)
self.assertEqual(water._state.db, 'other')
# ... but it isn't saved yet
self.assertEqual(list(Book.objects.using('default').values_list('title',flat=True)),
[u'Pro Django'])
self.assertEqual(list(Book.objects.using('other').values_list('title',flat=True)),
[u'Dive into HTML5', u'Dive into Python'])
# When saved, the new book goes to 'other'
water.save()
self.assertEqual(list(Book.objects.using('default').values_list('title',flat=True)),
[u'Pro Django'])
self.assertEqual(list(Book.objects.using('other').values_list('title',flat=True)),
[u'Dive into HTML5', u'Dive into Python', u'Dive into Water'])
def test_foreign_key_deletion(self):
"Cascaded deletions of Foreign Key relations issue queries on the right database"
mark = Person.objects.using('other').create(name="Mark Pilgrim")
fido = Pet.objects.using('other').create(name="Fido", owner=mark)
# Check the initial state
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Pet.objects.using('default').count(), 0)
self.assertEqual(Person.objects.using('other').count(), 1)
self.assertEqual(Pet.objects.using('other').count(), 1)
# Delete the person object, which will cascade onto the pet
mark.delete(using='other')
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Pet.objects.using('default').count(), 0)
# Both the pet and the person have been deleted from the right database
self.assertEqual(Person.objects.using('other').count(), 0)
self.assertEqual(Pet.objects.using('other').count(), 0)
def test_foreign_key_validation(self):
"ForeignKey.validate() uses the correct database"
mickey = Person.objects.using('other').create(name="Mickey")
pluto = Pet.objects.using('other').create(name="Pluto", owner=mickey)
self.assertEqual(None, pluto.full_clean())
def test_o2o_separation(self):
"OneToOne fields are constrained to a single database"
# Create a user and profile on the default database
alice = User.objects.db_manager('default').create_user('alice', 'alice@example.com')
alice_profile = UserProfile.objects.using('default').create(user=alice, flavor='chocolate')
# Create a user and profile on the other database
bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com')
bob_profile = UserProfile.objects.using('other').create(user=bob, flavor='crunchy frog')
# Retrieve related objects; queries should be database constrained
alice = User.objects.using('default').get(username="alice")
self.assertEqual(alice.userprofile.flavor, "chocolate")
bob = User.objects.using('other').get(username="bob")
self.assertEqual(bob.userprofile.flavor, "crunchy frog")
# Check that queries work across joins
self.assertEqual(list(User.objects.using('default').filter(userprofile__flavor='chocolate').values_list('username', flat=True)),
[u'alice'])
self.assertEqual(list(User.objects.using('other').filter(userprofile__flavor='chocolate').values_list('username', flat=True)),
[])
self.assertEqual(list(User.objects.using('default').filter(userprofile__flavor='crunchy frog').values_list('username', flat=True)),
[])
self.assertEqual(list(User.objects.using('other').filter(userprofile__flavor='crunchy frog').values_list('username', flat=True)),
[u'bob'])
# Reget the objects to clear caches
alice_profile = UserProfile.objects.using('default').get(flavor='chocolate')
bob_profile = UserProfile.objects.using('other').get(flavor='crunchy frog')
# Retrive related object by descriptor. Related objects should be database-baound
self.assertEqual(alice_profile.user.username, 'alice')
self.assertEqual(bob_profile.user.username, 'bob')
def test_o2o_cross_database_protection(self):
"Operations that involve sharing FK objects across databases raise an error"
# Create a user and profile on the default database
alice = User.objects.db_manager('default').create_user('alice', 'alice@example.com')
# Create a user and profile on the other database
bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com')
# Set a one-to-one relation with an object from a different database
alice_profile = UserProfile.objects.using('default').create(user=alice, flavor='chocolate')
try:
bob.userprofile = alice_profile
self.fail("Shouldn't be able to assign across databases")
except ValueError:
pass
# BUT! if you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
bob_profile = UserProfile.objects.using('other').create(user=bob, flavor='crunchy frog')
new_bob_profile = UserProfile(flavor="spring surprise")
charlie = User(username='charlie',email='charlie@example.com')
charlie.set_unusable_password()
# initially, no db assigned
self.assertEqual(new_bob_profile._state.db, None)
self.assertEqual(charlie._state.db, None)
# old object comes from 'other', so the new object is set to use 'other'...
new_bob_profile.user = bob
charlie.userprofile = bob_profile
self.assertEqual(new_bob_profile._state.db, 'other')
self.assertEqual(charlie._state.db, 'other')
# ... but it isn't saved yet
self.assertEqual(list(User.objects.using('other').values_list('username',flat=True)),
[u'bob'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor',flat=True)),
[u'crunchy frog'])
# When saved (no using required), new objects goes to 'other'
charlie.save()
bob_profile.save()
new_bob_profile.save()
self.assertEqual(list(User.objects.using('default').values_list('username',flat=True)),
[u'alice'])
self.assertEqual(list(User.objects.using('other').values_list('username',flat=True)),
[u'bob', u'charlie'])
self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor',flat=True)),
[u'chocolate'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor',flat=True)),
[u'crunchy frog', u'spring surprise'])
# This also works if you assign the O2O relation in the constructor
denise = User.objects.db_manager('other').create_user('denise','denise@example.com')
denise_profile = UserProfile(flavor="tofu", user=denise)
self.assertEqual(denise_profile._state.db, 'other')
# ... but it isn't saved yet
self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor',flat=True)),
[u'chocolate'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor',flat=True)),
[u'crunchy frog', u'spring surprise'])
# When saved, the new profile goes to 'other'
denise_profile.save()
self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor',flat=True)),
[u'chocolate'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor',flat=True)),
[u'crunchy frog', u'spring surprise', u'tofu'])
def test_generic_key_separation(self):
"Generic fields are constrained to a single database"
copy_content_types_from_default_to_other()
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
review1 = Review.objects.create(source="Python Monthly", content_object=pro)
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
review2 = Review.objects.using('other').create(source="Python Weekly", content_object=dive)
review1 = Review.objects.using('default').get(source="Python Monthly")
self.assertEqual(review1.content_object.title, "Pro Django")
review2 = Review.objects.using('other').get(source="Python Weekly")
self.assertEqual(review2.content_object.title, "Dive into Python")
# Reget the objects to clear caches
dive = Book.objects.using('other').get(title="Dive into Python")
# Retrive related object by descriptor. Related objects should be database-bound
self.assertEqual(list(dive.reviews.all().values_list('source', flat=True)),
[u'Python Weekly'])
def test_generic_key_reverse_operations(self):
"Generic reverse manipulations are all constrained to a single DB"
copy_content_types_from_default_to_other()
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
temp = Book.objects.using('other').create(title="Temp",
published=datetime.date(2009, 5, 4))
review1 = Review.objects.using('other').create(source="Python Weekly", content_object=dive)
review2 = Review.objects.using('other').create(source="Python Monthly", content_object=temp)
self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
[u'Python Weekly'])
# Add a second review
dive.reviews.add(review2)
self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
[u'Python Monthly', u'Python Weekly'])
# Remove the second author
dive.reviews.remove(review1)
self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
[u'Python Monthly'])
# Clear all reviews
dive.reviews.clear()
self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
# Create an author through the generic interface
dive.reviews.create(source='Python Daily')
self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
[u'Python Daily'])
def test_generic_key_cross_database_protection(self):
"Operations that involve sharing generic key objects across databases raise an error"
copy_content_types_from_default_to_other()
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
review1 = Review.objects.create(source="Python Monthly", content_object=pro)
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
review2 = Review.objects.using('other').create(source="Python Weekly", content_object=dive)
# Set a foreign key with an object from a different database
try:
review1.content_object = dive
self.fail("Shouldn't be able to assign across databases")
except ValueError:
pass
# Add to a foreign key set with an object from a different database
try:
dive.reviews.add(review1)
self.fail("Shouldn't be able to assign across databases")
except ValueError:
pass
# BUT! if you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
review3 = Review(source="Python Daily")
# initially, no db assigned
self.assertEqual(review3._state.db, None)
# Dive comes from 'other', so review3 is set to use 'other'...
review3.content_object = dive
self.assertEqual(review3._state.db, 'other')
# ... but it isn't saved yet
self.assertEqual(list(Review.objects.using('default').filter(object_id=pro.pk).values_list('source', flat=True)),
[u'Python Monthly'])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source',flat=True)),
[u'Python Weekly'])
# When saved, John goes to 'other'
review3.save()
self.assertEqual(list(Review.objects.using('default').filter(object_id=pro.pk).values_list('source', flat=True)),
[u'Python Monthly'])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source',flat=True)),
[u'Python Daily', u'Python Weekly'])
def test_generic_key_deletion(self):
"Cascaded deletions of Generic Key relations issue queries on the right database"
copy_content_types_from_default_to_other()
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
review = Review.objects.using('other').create(source="Python Weekly", content_object=dive)
# Check the initial state
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Review.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('other').count(), 1)
self.assertEqual(Review.objects.using('other').count(), 1)
# Delete the Book object, which will cascade onto the pet
dive.delete(using='other')
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Review.objects.using('default').count(), 0)
# Both the pet and the person have been deleted from the right database
self.assertEqual(Book.objects.using('other').count(), 0)
self.assertEqual(Review.objects.using('other').count(), 0)
def test_ordering(self):
"get_next_by_XXX commands stick to a single database"
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
learn = Book.objects.using('other').create(title="Learning Python",
published=datetime.date(2008, 7, 16))
self.assertEqual(learn.get_next_by_published().title, "Dive into Python")
self.assertEqual(dive.get_previous_by_published().title, "Learning Python")
def test_raw(self):
"test the raw() method across databases"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
val = Book.objects.db_manager("other").raw('SELECT id FROM multiple_database_book')
self.assertEqual(map(lambda o: o.pk, val), [dive.pk])
val = Book.objects.raw('SELECT id FROM multiple_database_book').using('other')
self.assertEqual(map(lambda o: o.pk, val), [dive.pk])
def test_select_related(self):
"Database assignment is retained if an object is retrieved with select_related()"
# Create a book and author on the other database
mark = Person.objects.using('other').create(name="Mark Pilgrim")
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
editor=mark)
# Retrieve the Person using select_related()
book = Book.objects.using('other').select_related('editor').get(title="Dive into Python")
# The editor instance should have a db state
self.assertEqual(book.editor._state.db, 'other')
def test_subquery(self):
"""Make sure as_sql works with subqueries and master/slave."""
sub = Person.objects.using('other').filter(name='fff')
qs = Book.objects.filter(editor__in=sub)
# When you call __str__ on the query object, it doesn't know about using
# so it falls back to the default. If the subquery explicitly uses a
# different database, an error should be raised.
self.assertRaises(ValueError, str, qs.query)
# Evaluating the query shouldn't work, either
try:
for obj in qs:
pass
self.fail('Iterating over query should raise ValueError')
except ValueError:
pass
def test_related_manager(self):
"Related managers return managers, not querysets"
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# extra_arg is removed by the BookManager's implementation of
# create(); but the BookManager's implementation won't get called
# unless edited returns a Manager, not a queryset
mark.book_set.create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
extra_arg=True)
mark.book_set.get_or_create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
extra_arg=True)
mark.edited.create(title="Dive into Water",
published=datetime.date(2009, 5, 4),
extra_arg=True)
mark.edited.get_or_create(title="Dive into Water",
published=datetime.date(2009, 5, 4),
extra_arg=True)
class TestRouter(object):
# A test router. The behavior is vaguely master/slave, but the
# databases aren't assumed to propagate changes.
def db_for_read(self, model, instance=None, **hints):
if instance:
return instance._state.db or 'other'
return 'other'
def db_for_write(self, model, **hints):
return DEFAULT_DB_ALIAS
def allow_relation(self, obj1, obj2, **hints):
return obj1._state.db in ('default', 'other') and obj2._state.db in ('default', 'other')
def allow_syncdb(self, db, model):
return True
class AuthRouter(object):
"""A router to control all database operations on models in
the contrib.auth application"""
def db_for_read(self, model, **hints):
"Point all read operations on auth models to 'default'"
if model._meta.app_label == 'auth':
# We use default here to ensure we can tell the difference
# between a read request and a write request for Auth objects
return 'default'
return None
def db_for_write(self, model, **hints):
"Point all operations on auth models to 'other'"
if model._meta.app_label == 'auth':
return 'other'
return None
def allow_relation(self, obj1, obj2, **hints):
"Allow any relation if a model in Auth is involved"
if obj1._meta.app_label == 'auth' or obj2._meta.app_label == 'auth':
return True
return None
def allow_syncdb(self, db, model):
"Make sure the auth app only appears on the 'other' db"
if db == 'other':
return model._meta.app_label == 'auth'
elif model._meta.app_label == 'auth':
return False
return None
class WriteRouter(object):
# A router that only expresses an opinion on writes
def db_for_write(self, model, **hints):
return 'writer'
class RouterTestCase(TestCase):
multi_db = True
def setUp(self):
# Make the 'other' database appear to be a slave of the 'default'
self.old_routers = router.routers
router.routers = [TestRouter()]
def tearDown(self):
# Restore the 'other' database as an independent database
router.routers = self.old_routers
def test_db_selection(self):
"Check that querysets obey the router for db suggestions"
self.assertEqual(Book.objects.db, 'other')
self.assertEqual(Book.objects.all().db, 'other')
self.assertEqual(Book.objects.using('default').db, 'default')
self.assertEqual(Book.objects.db_manager('default').db, 'default')
self.assertEqual(Book.objects.db_manager('default').all().db, 'default')
def test_syncdb_selection(self):
"Synchronization behavior is predictable"
self.assertTrue(router.allow_syncdb('default', User))
self.assertTrue(router.allow_syncdb('default', Book))
self.assertTrue(router.allow_syncdb('other', User))
self.assertTrue(router.allow_syncdb('other', Book))
# Add the auth router to the chain.
# TestRouter is a universal synchronizer, so it should have no effect.
router.routers = [TestRouter(), AuthRouter()]
self.assertTrue(router.allow_syncdb('default', User))
self.assertTrue(router.allow_syncdb('default', Book))
self.assertTrue(router.allow_syncdb('other', User))
self.assertTrue(router.allow_syncdb('other', Book))
# Now check what happens if the router order is the other way around
router.routers = [AuthRouter(), TestRouter()]
self.assertFalse(router.allow_syncdb('default', User))
self.assertTrue(router.allow_syncdb('default', Book))
self.assertTrue(router.allow_syncdb('other', User))
self.assertFalse(router.allow_syncdb('other', Book))
def test_partial_router(self):
"A router can choose to implement a subset of methods"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
# First check the baseline behavior.
self.assertEqual(router.db_for_read(User), 'other')
self.assertEqual(router.db_for_read(Book), 'other')
self.assertEqual(router.db_for_write(User), 'default')
self.assertEqual(router.db_for_write(Book), 'default')
self.assertTrue(router.allow_relation(dive, dive))
self.assertTrue(router.allow_syncdb('default', User))
self.assertTrue(router.allow_syncdb('default', Book))
router.routers = [WriteRouter(), AuthRouter(), TestRouter()]
self.assertEqual(router.db_for_read(User), 'default')
self.assertEqual(router.db_for_read(Book), 'other')
self.assertEqual(router.db_for_write(User), 'writer')
self.assertEqual(router.db_for_write(Book), 'writer')
self.assertTrue(router.allow_relation(dive, dive))
self.assertFalse(router.allow_syncdb('default', User))
self.assertTrue(router.allow_syncdb('default', Book))
def test_database_routing(self):
marty = Person.objects.using('default').create(name="Marty Alchin")
pro = Book.objects.using('default').create(title="Pro Django",
published=datetime.date(2008, 12, 16),
editor=marty)
pro.authors = [marty]
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
# An update query will be routed to the default database
Book.objects.filter(title='Pro Django').update(pages=200)
try:
# By default, the get query will be directed to 'other'
Book.objects.get(title='Pro Django')
self.fail("Shouldn't be able to find the book")
except Book.DoesNotExist:
pass
# But the same query issued explicitly at a database will work.
pro = Book.objects.using('default').get(title='Pro Django')
# Check that the update worked.
self.assertEqual(pro.pages, 200)
# An update query with an explicit using clause will be routed
# to the requested database.
Book.objects.using('other').filter(title='Dive into Python').update(pages=300)
self.assertEqual(Book.objects.get(title='Dive into Python').pages, 300)
# Related object queries stick to the same database
# as the original object, regardless of the router
self.assertEqual(list(pro.authors.values_list('name', flat=True)), [u'Marty Alchin'])
self.assertEqual(pro.editor.name, u'Marty Alchin')
# get_or_create is a special case. The get needs to be targeted at
# the write database in order to avoid potential transaction
# consistency problems
book, created = Book.objects.get_or_create(title="Pro Django")
self.assertFalse(created)
book, created = Book.objects.get_or_create(title="Dive Into Python",
defaults={'published':datetime.date(2009, 5, 4)})
self.assertTrue(created)
# Check the head count of objects
self.assertEqual(Book.objects.using('default').count(), 2)
self.assertEqual(Book.objects.using('other').count(), 1)
# If a database isn't specified, the read database is used
self.assertEqual(Book.objects.count(), 1)
# A delete query will also be routed to the default database
Book.objects.filter(pages__gt=150).delete()
# The default database has lost the book.
self.assertEqual(Book.objects.using('default').count(), 1)
self.assertEqual(Book.objects.using('other').count(), 1)
def test_foreign_key_cross_database_protection(self):
"Foreign keys can cross databases if they two databases have a common source"
# Create a book and author on the default database
pro = Book.objects.using('default').create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.using('default').create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Set a foreign key with an object from a different database
try:
dive.editor = marty
except ValueError:
self.fail("Assignment across master/slave databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# ... but they will when the affected object is saved.
dive.save()
self.assertEqual(dive._state.db, 'default')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real master-slave database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
self.assertEqual(dive._state.db, 'other')
# Set a foreign key set with an object from a different database
try:
marty.edited = [pro, dive]
except ValueError:
self.fail("Assignment across master/slave databases with a common source should be ok")
# Assignment implies a save, so database assignments of original objects have changed...
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'default')
self.assertEqual(mark._state.db, 'other')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real master-slave database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
self.assertEqual(dive._state.db, 'other')
# Add to a foreign key set with an object from a different database
try:
marty.edited.add(dive)
except ValueError:
self.fail("Assignment across master/slave databases with a common source should be ok")
# Add implies a save, so database assignments of original objects have changed...
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'default')
self.assertEqual(mark._state.db, 'other')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real master-slave database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
# If you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
chris = Person(name="Chris Mills")
html5 = Book(title="Dive into HTML5", published=datetime.date(2010, 3, 15))
# initially, no db assigned
self.assertEqual(chris._state.db, None)
self.assertEqual(html5._state.db, None)
# old object comes from 'other', so the new object is set to use the
# source of 'other'...
self.assertEqual(dive._state.db, 'other')
dive.editor = chris
html5.editor = mark
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
self.assertEqual(chris._state.db, 'default')
self.assertEqual(html5._state.db, 'default')
# This also works if you assign the FK in the constructor
water = Book(title="Dive into Water", published=datetime.date(2001, 1, 1), editor=mark)
self.assertEqual(water._state.db, 'default')
# For the remainder of this test, create a copy of 'mark' in the
# 'default' database to prevent integrity errors on backends that
# don't defer constraints checks until the end of the transaction
mark.save(using='default')
# This moved 'mark' in the 'default' database, move it back in 'other'
mark.save(using='other')
self.assertEqual(mark._state.db, 'other')
# If you create an object through a FK relation, it will be
# written to the write database, even if the original object
# was on the read database
cheesecake = mark.edited.create(title='Dive into Cheesecake', published=datetime.date(2010, 3, 15))
self.assertEqual(cheesecake._state.db, 'default')
# Same goes for get_or_create, regardless of whether getting or creating
cheesecake, created = mark.edited.get_or_create(title='Dive into Cheesecake', published=datetime.date(2010, 3, 15))
self.assertEqual(cheesecake._state.db, 'default')
puddles, created = mark.edited.get_or_create(title='Dive into Puddles', published=datetime.date(2010, 3, 15))
self.assertEqual(puddles._state.db, 'default')
def test_m2m_cross_database_protection(self):
"M2M relations can cross databases if the database share a source"
# Create books and authors on the inverse to the usual database
pro = Book.objects.using('other').create(pk=1, title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.using('other').create(pk=1, name="Marty Alchin")
dive = Book.objects.using('default').create(pk=2, title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('default').create(pk=2, name="Mark Pilgrim")
# Now save back onto the usual database.
# This simulates master/slave - the objects exist on both database,
# but the _state.db is as it is for all other tests.
pro.save(using='default')
marty.save(using='default')
dive.save(using='other')
mark.save(using='other')
# Check that we have 2 of both types of object on both databases
self.assertEqual(Book.objects.using('default').count(), 2)
self.assertEqual(Book.objects.using('other').count(), 2)
self.assertEqual(Person.objects.using('default').count(), 2)
self.assertEqual(Person.objects.using('other').count(), 2)
# Set a m2m set with an object from a different database
try:
marty.book_set = [pro, dive]
except ValueError:
self.fail("Assignment across master/slave databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 2)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Reset relations
Book.authors.through.objects.using('default').delete()
# Add to an m2m with an object from a different database
try:
marty.book_set.add(dive)
except ValueError:
self.fail("Assignment across master/slave databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Reset relations
Book.authors.through.objects.using('default').delete()
# Set a reverse m2m with an object from a different database
try:
dive.authors = [mark, marty]
except ValueError:
self.fail("Assignment across master/slave databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 2)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Reset relations
Book.authors.through.objects.using('default').delete()
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Add to a reverse m2m with an object from a different database
try:
dive.authors.add(marty)
except ValueError:
self.fail("Assignment across master/slave databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# If you create an object through a M2M relation, it will be
# written to the write database, even if the original object
# was on the read database
alice = dive.authors.create(name='Alice')
self.assertEqual(alice._state.db, 'default')
# Same goes for get_or_create, regardless of whether getting or creating
alice, created = dive.authors.get_or_create(name='Alice')
self.assertEqual(alice._state.db, 'default')
bob, created = dive.authors.get_or_create(name='Bob')
self.assertEqual(bob._state.db, 'default')
def test_o2o_cross_database_protection(self):
"Operations that involve sharing FK objects across databases raise an error"
# Create a user and profile on the default database
alice = User.objects.db_manager('default').create_user('alice', 'alice@example.com')
# Create a user and profile on the other database
bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com')
# Set a one-to-one relation with an object from a different database
alice_profile = UserProfile.objects.create(user=alice, flavor='chocolate')
try:
bob.userprofile = alice_profile
except ValueError:
self.fail("Assignment across master/slave databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(alice._state.db, 'default')
self.assertEqual(alice_profile._state.db, 'default')
self.assertEqual(bob._state.db, 'other')
# ... but they will when the affected object is saved.
bob.save()
self.assertEqual(bob._state.db, 'default')
def test_generic_key_cross_database_protection(self):
"Generic Key operations can span databases if they share a source"
copy_content_types_from_default_to_other()
# Create a book and author on the default database
pro = Book.objects.using('default'
).create(title="Pro Django", published=datetime.date(2008, 12, 16))
review1 = Review.objects.using('default'
).create(source="Python Monthly", content_object=pro)
# Create a book and author on the other database
dive = Book.objects.using('other'
).create(title="Dive into Python", published=datetime.date(2009, 5, 4))
review2 = Review.objects.using('other'
).create(source="Python Weekly", content_object=dive)
# Set a generic foreign key with an object from a different database
try:
review1.content_object = dive
except ValueError:
self.fail("Assignment across master/slave databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(pro._state.db, 'default')
self.assertEqual(review1._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(review2._state.db, 'other')
# ... but they will when the affected object is saved.
dive.save()
self.assertEqual(review1._state.db, 'default')
self.assertEqual(dive._state.db, 'default')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real master-slave database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
self.assertEqual(dive._state.db, 'other')
# Add to a generic foreign key set with an object from a different database
try:
dive.reviews.add(review1)
except ValueError:
self.fail("Assignment across master/slave databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(pro._state.db, 'default')
self.assertEqual(review1._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(review2._state.db, 'other')
# ... but they will when the affected object is saved.
dive.save()
self.assertEqual(dive._state.db, 'default')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# BUT! if you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
review3 = Review(source="Python Daily")
# initially, no db assigned
self.assertEqual(review3._state.db, None)
# Dive comes from 'other', so review3 is set to use the source of 'other'...
review3.content_object = dive
self.assertEqual(review3._state.db, 'default')
# If you create an object through a M2M relation, it will be
# written to the write database, even if the original object
# was on the read database
dive = Book.objects.using('other').get(title='Dive into Python')
nyt = dive.reviews.create(source="New York Times", content_object=dive)
self.assertEqual(nyt._state.db, 'default')
def test_m2m_managers(self):
"M2M relations are represented by managers, and can be controlled like managers"
pro = Book.objects.using('other').create(pk=1, title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.using('other').create(pk=1, name="Marty Alchin")
pro_authors = pro.authors.using('other')
authors = [marty]
self.assertEqual(pro.authors.db, 'other')
self.assertEqual(pro.authors.db_manager('default').db, 'default')
self.assertEqual(pro.authors.db_manager('default').all().db, 'default')
self.assertEqual(marty.book_set.db, 'other')
self.assertEqual(marty.book_set.db_manager('default').db, 'default')
self.assertEqual(marty.book_set.db_manager('default').all().db, 'default')
def test_foreign_key_managers(self):
"FK reverse relations are represented by managers, and can be controlled like managers"
marty = Person.objects.using('other').create(pk=1, name="Marty Alchin")
pro = Book.objects.using('other').create(pk=1, title="Pro Django",
published=datetime.date(2008, 12, 16),
editor=marty)
self.assertEqual(marty.edited.db, 'other')
self.assertEqual(marty.edited.db_manager('default').db, 'default')
self.assertEqual(marty.edited.db_manager('default').all().db, 'default')
def test_generic_key_managers(self):
"Generic key relations are represented by managers, and can be controlled like managers"
copy_content_types_from_default_to_other()
pro = Book.objects.using('other').create(title="Pro Django",
published=datetime.date(2008, 12, 16))
review1 = Review.objects.using('other').create(source="Python Monthly",
content_object=pro)
self.assertEqual(pro.reviews.db, 'other')
self.assertEqual(pro.reviews.db_manager('default').db, 'default')
self.assertEqual(pro.reviews.db_manager('default').all().db, 'default')
def test_subquery(self):
"""Make sure as_sql works with subqueries and master/slave."""
# Create a book and author on the other database
mark = Person.objects.using('other').create(name="Mark Pilgrim")
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
editor=mark)
sub = Person.objects.filter(name='Mark Pilgrim')
qs = Book.objects.filter(editor__in=sub)
# When you call __str__ on the query object, it doesn't know about using
# so it falls back to the default. Don't let routing instructions
# force the subquery to an incompatible database.
str(qs.query)
# If you evaluate the query, it should work, running on 'other'
self.assertEqual(list(qs.values_list('title', flat=True)), [u'Dive into Python'])
class AuthTestCase(TestCase):
multi_db = True
def setUp(self):
# Make the 'other' database appear to be a slave of the 'default'
self.old_routers = router.routers
router.routers = [AuthRouter()]
def tearDown(self):
# Restore the 'other' database as an independent database
router.routers = self.old_routers
def test_auth_manager(self):
"The methods on the auth manager obey database hints"
# Create one user using default allocation policy
User.objects.create_user('alice', 'alice@example.com')
# Create another user, explicitly specifying the database
User.objects.db_manager('default').create_user('bob', 'bob@example.com')
# The second user only exists on the other database
alice = User.objects.using('other').get(username='alice')
self.assertEqual(alice.username, 'alice')
self.assertEqual(alice._state.db, 'other')
self.assertRaises(User.DoesNotExist, User.objects.using('default').get, username='alice')
# The second user only exists on the default database
bob = User.objects.using('default').get(username='bob')
self.assertEqual(bob.username, 'bob')
self.assertEqual(bob._state.db, 'default')
self.assertRaises(User.DoesNotExist, User.objects.using('other').get, username='bob')
# That is... there is one user on each database
self.assertEqual(User.objects.using('default').count(), 1)
self.assertEqual(User.objects.using('other').count(), 1)
def test_dumpdata(self):
"Check that dumpdata honors allow_syncdb restrictions on the router"
User.objects.create_user('alice', 'alice@example.com')
User.objects.db_manager('default').create_user('bob', 'bob@example.com')
# Check that dumping the default database doesn't try to include auth
# because allow_syncdb prohibits auth on default
new_io = StringIO()
management.call_command('dumpdata', 'auth', format='json', database='default', stdout=new_io)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '[]')
# Check that dumping the other database does include auth
new_io = StringIO()
management.call_command('dumpdata', 'auth', format='json', database='other', stdout=new_io)
command_output = new_io.getvalue().strip()
self.assertTrue('"email": "alice@example.com",' in command_output)
_missing = object()
class UserProfileTestCase(TestCase):
def setUp(self):
self.old_auth_profile_module = getattr(settings, 'AUTH_PROFILE_MODULE', _missing)
settings.AUTH_PROFILE_MODULE = 'multiple_database.UserProfile'
def tearDown(self):
if self.old_auth_profile_module is _missing:
del settings.AUTH_PROFILE_MODULE
else:
settings.AUTH_PROFILE_MODULE = self.old_auth_profile_module
def test_user_profiles(self):
alice = User.objects.create_user('alice', 'alice@example.com')
bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com')
alice_profile = UserProfile(user=alice, flavor='chocolate')
alice_profile.save()
bob_profile = UserProfile(user=bob, flavor='crunchy frog')
bob_profile.save()
self.assertEqual(alice.get_profile().flavor, 'chocolate')
self.assertEqual(bob.get_profile().flavor, 'crunchy frog')
class AntiPetRouter(object):
# A router that only expresses an opinion on syncdb,
# passing pets to the 'other' database
def allow_syncdb(self, db, model):
"Make sure the auth app only appears on the 'other' db"
if db == 'other':
return model._meta.object_name == 'Pet'
else:
return model._meta.object_name != 'Pet'
class FixtureTestCase(TestCase):
multi_db = True
fixtures = ['multidb-common', 'multidb']
def setUp(self):
# Install the anti-pet router
self.old_routers = router.routers
router.routers = [AntiPetRouter()]
def tearDown(self):
# Restore the 'other' database as an independent database
router.routers = self.old_routers
def test_fixture_loading(self):
"Multi-db fixtures are loaded correctly"
# Check that "Pro Django" exists on the default database, but not on other database
try:
Book.objects.get(title="Pro Django")
Book.objects.using('default').get(title="Pro Django")
except Book.DoesNotExist:
self.fail('"Pro Django" should exist on default database')
self.assertRaises(Book.DoesNotExist,
Book.objects.using('other').get,
title="Pro Django"
)
# Check that "Dive into Python" exists on the default database, but not on other database
try:
Book.objects.using('other').get(title="Dive into Python")
except Book.DoesNotExist:
self.fail('"Dive into Python" should exist on other database')
self.assertRaises(Book.DoesNotExist,
Book.objects.get,
title="Dive into Python"
)
self.assertRaises(Book.DoesNotExist,
Book.objects.using('default').get,
title="Dive into Python"
)
# Check that "Definitive Guide" exists on the both databases
try:
Book.objects.get(title="The Definitive Guide to Django")
Book.objects.using('default').get(title="The Definitive Guide to Django")
Book.objects.using('other').get(title="The Definitive Guide to Django")
except Book.DoesNotExist:
self.fail('"The Definitive Guide to Django" should exist on both databases')
def test_pseudo_empty_fixtures(self):
"A fixture can contain entries, but lead to nothing in the database; this shouldn't raise an error (ref #14068)"
new_io = StringIO()
management.call_command('loaddata', 'pets', stdout=new_io, stderr=new_io)
command_output = new_io.getvalue().strip()
# No objects will actually be loaded
self.assertEqual(command_output, "Installed 0 object(s) (of 2) from 1 fixture(s)")
class PickleQuerySetTestCase(TestCase):
multi_db = True
def test_pickling(self):
for db in connections:
Book.objects.using(db).create(title='Dive into Python', published=datetime.date(2009, 5, 4))
qs = Book.objects.all()
self.assertEqual(qs.db, pickle.loads(pickle.dumps(qs)).db)
class DatabaseReceiver(object):
"""
Used in the tests for the database argument in signals (#13552)
"""
def __call__(self, signal, sender, **kwargs):
self._database = kwargs['using']
class WriteToOtherRouter(object):
"""
A router that sends all writes to the other database.
"""
def db_for_write(self, model, **hints):
return "other"
class SignalTests(TestCase):
multi_db = True
def setUp(self):
self.old_routers = router.routers
def tearDown(self):
router.routers = self.old_routers
def _write_to_other(self):
"Sends all writes to 'other'."
router.routers = [WriteToOtherRouter()]
def _write_to_default(self):
"Sends all writes to the default DB"
router.routers = self.old_routers
def test_database_arg_save_and_delete(self):
"""
Tests that the pre/post_save signal contains the correct database.
(#13552)
"""
# Make some signal receivers
pre_save_receiver = DatabaseReceiver()
post_save_receiver = DatabaseReceiver()
pre_delete_receiver = DatabaseReceiver()
post_delete_receiver = DatabaseReceiver()
# Make model and connect receivers
signals.pre_save.connect(sender=Person, receiver=pre_save_receiver)
signals.post_save.connect(sender=Person, receiver=post_save_receiver)
signals.pre_delete.connect(sender=Person, receiver=pre_delete_receiver)
signals.post_delete.connect(sender=Person, receiver=post_delete_receiver)
p = Person.objects.create(name='Darth Vader')
# Save and test receivers got calls
p.save()
self.assertEqual(pre_save_receiver._database, DEFAULT_DB_ALIAS)
self.assertEqual(post_save_receiver._database, DEFAULT_DB_ALIAS)
# Delete, and test
p.delete()
self.assertEqual(pre_delete_receiver._database, DEFAULT_DB_ALIAS)
self.assertEqual(post_delete_receiver._database, DEFAULT_DB_ALIAS)
# Save again to a different database
p.save(using="other")
self.assertEqual(pre_save_receiver._database, "other")
self.assertEqual(post_save_receiver._database, "other")
# Delete, and test
p.delete(using="other")
self.assertEqual(pre_delete_receiver._database, "other")
self.assertEqual(post_delete_receiver._database, "other")
def test_database_arg_m2m(self):
"""
Test that the m2m_changed signal has a correct database arg (#13552)
"""
# Make a receiver
receiver = DatabaseReceiver()
# Connect it
signals.m2m_changed.connect(receiver=receiver)
# Create the models that will be used for the tests
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
# Create a copy of the models on the 'other' database to prevent
# integrity errors on backends that don't defer constraints checks
Book.objects.using('other').create(pk=b.pk, title=b.title,
published=b.published)
Person.objects.using('other').create(pk=p.pk, name=p.name)
# Test addition
b.authors.add(p)
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
self._write_to_other()
b.authors.add(p)
self._write_to_default()
self.assertEqual(receiver._database, "other")
# Test removal
b.authors.remove(p)
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
self._write_to_other()
b.authors.remove(p)
self._write_to_default()
self.assertEqual(receiver._database, "other")
# Test addition in reverse
p.book_set.add(b)
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
self._write_to_other()
p.book_set.add(b)
self._write_to_default()
self.assertEqual(receiver._database, "other")
# Test clearing
b.authors.clear()
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
self._write_to_other()
b.authors.clear()
self._write_to_default()
self.assertEqual(receiver._database, "other")
class AttributeErrorRouter(object):
"A router to test the exception handling of ConnectionRouter"
def db_for_read(self, model, **hints):
raise AttributeError
def db_for_write(self, model, **hints):
raise AttributeError
class RouterAttributeErrorTestCase(TestCase):
multi_db = True
def setUp(self):
self.old_routers = router.routers
router.routers = [AttributeErrorRouter()]
def tearDown(self):
router.routers = self.old_routers
def test_attribute_error_read(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
router.routers = [] # Reset routers so we can save a Book instance
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
router.routers = [AttributeErrorRouter()] # Install our router
self.assertRaises(AttributeError, Book.objects.get, pk=b.pk)
def test_attribute_error_save(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
dive = Book()
dive.title="Dive into Python"
dive.published = datetime.date(2009, 5, 4)
self.assertRaises(AttributeError, dive.save)
def test_attribute_error_delete(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
router.routers = [] # Reset routers so we can save our Book, Person instances
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
b.authors = [p]
b.editor = p
router.routers = [AttributeErrorRouter()] # Install our router
self.assertRaises(AttributeError, b.delete)
def test_attribute_error_m2m(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
router.routers = [] # Reset routers so we can save our Book, Person instances
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
router.routers = [AttributeErrorRouter()] # Install our router
self.assertRaises(AttributeError, setattr, b, 'authors', [p])
class ModelMetaRouter(object):
"A router to ensure model arguments are real model classes"
def db_for_write(self, model, **hints):
if not hasattr(model, '_meta'):
raise ValueError
class RouterModelArgumentTestCase(TestCase):
multi_db = True
def setUp(self):
self.old_routers = router.routers
router.routers = [ModelMetaRouter()]
def tearDown(self):
router.routers = self.old_routers
def test_m2m_collection(self):
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
# test add
b.authors.add(p)
# test remove
b.authors.remove(p)
# test clear
b.authors.clear()
# test setattr
b.authors = [p]
# test M2M collection
b.delete()
def test_foreignkey_collection(self):
person = Person.objects.create(name='Bob')
pet = Pet.objects.create(owner=person, name='Wart')
# test related FK collection
person.delete()
| bsd-3-clause |
rimbalinux/MSISDNArea | dbindexer/filter.py | 3 | 5061 | from django.db import models
from djangotoolbox.fields import ListField
from copy import deepcopy
class ExtraFieldLookup():
def __init__(self, model=None, field_name=None, lookup_type=None,
field_to_add=models.CharField(max_length=500, editable=False, null=True)):
self.model = model
self.field_name = field_name
self.column_name = None
if model and field_name:
self.column_name = model._meta.get_field(self.field_name).column
self.field_to_add = field_to_add
self.lookup_type = lookup_type
def contribute(self, model, field_name):
self.model = model
self.field_name = field_name
if model and field_name:
self.column_name = model._meta.get_field(self.field_name).column
@property
def index_name(self):
return 'idxf_%s_l_%s' % (self.column_name, self.lookup_type)
def create_index(self, model):
index_field = deepcopy(self.field_to_add)
model.add_to_class(self.index_name, index_field)
def convert_lookup(self, value, annotation):
pass
def convert_value(self, value):
pass
class DateLookup(ExtraFieldLookup):
def __init__(self, model, field_name, lookup_type):
super(ExtraFieldLookup, self).__init__(model, field_name,
lookup_type, models.IntegerField(editable=False, null=True))
def convert_lookup(self, value, annotation):
return 'exact', value
class Day(DateLookup):
def __init__(self, model, field_name):
super(DateLookup, self).__init__(model, field_name, 'day')
def convert_value(self, value):
return value.day
class Month(DateLookup):
def __init__(self, model, field_name):
super(DateLookup, self).__init__(model, field_name, 'month')
def convert_value(self, value):
return value.month
class Year(DateLookup):
def __init__(self, model, field_name):
super(DateLookup, self).__init__(model, field_name, 'year')
def convert_value(self, value):
return value.year
class Weekday(DateLookup):
def __init__(self, model, field_name):
super(DateLookup, self).__init__(model, field_name, 'week_day')
def convert_value(self, value):
return value.isoweekday()
class RegexFilter(ExtraFieldLookup):
def __init__(self, model, field_name, regex):
self.regex = re.compile(regex.pattern, re.S | re.U | (regex.flags & re.I))
lookup_type = self.regex.flags & re.I and 'iregex' or 'regex'
super(ExtraFieldLookup, self).__init__(model, field_name,
lookup_type, ListField(models.CharField(max_length=256),
editable=False, null=True))
def create_index(self, model):
# TODO: only create one list field for all regexes for a given model
super(ExtraFieldLookup, self).create_index(model)
def convert_lookup(self, value, annotation):
return self.lookup_type == 'regex' and ('exact', ':' + value) or \
('exact', 'i:' + value)
def convert_value(self, value):
return
class Contains(ExtraFieldLookup):
def __init__(self, model, field_name):
super(ExtraFieldLookup, self).__init__(model, field_name,
'contains', ListField(models.CharField(500), editable=False, null=True))
def convert_lookup(self, value, annotation):
return 'startswith', value
def convert_value(self, value):
return self.contains_indexer(value)
@classmethod
def contains_indexer(cls, value):
# In indexing mode we add all postfixes ('o', 'lo', ..., 'hello')
result = []
if value:
result.extend([value[count:] for count in range(len(value))])
return result
class Icontains(Contains):
def __init__(self, model, field_name):
super(Contains, self).__init__(model, field_name)
self.lookup_type = 'icontains'
def convert_lookup(self, value, annotation):
return 'startswith', value.lower()
def convert_value(self, value):
return [val.lower() for val in super(Contains, self).convert_value(value)]
class Iexact(ExtraFieldLookup):
def convert_lookup(self, value, annotation):
return 'exact', value.lower()
def convert_value(self, value):
return value.lower()
class Istartswith(ExtraFieldLookup):
def convert_lookup(self, value, annotation):
return 'startswith', value.lower()
def convert_value(self, value):
return value.lower()
class Endswith(ExtraFieldLookup):
def convert_lookup(self, value, annotation):
return 'startswith', value[::-1]
def convert_value(self, value):
return value[::-1]
class Iendswith(ExtraFieldLookup):
def convert_lookup(self, value, annotation):
return 'startswith', value[::-1].lower()
def convert_value(self, value):
return value[::-1].lower()
| bsd-3-clause |
sirkubax/ansible-modules-extras | cloud/misc/virt_net.py | 97 | 16577 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Maciej Delmanowski <drybjed@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: virt_net
author: "Maciej Delmanowski (@drybjed)"
version_added: "2.0"
short_description: Manage libvirt network configuration
description:
- Manage I(libvirt) networks.
options:
name:
required: true
aliases: ['network']
description:
- name of the network being managed. Note that network must be previously
defined with xml.
state:
required: false
choices: [ "active", "inactive", "present", "absent" ]
description:
- specify which state you want a network to be in.
If 'active', network will be started.
If 'present', ensure that network is present but do not change its
state; if it's missing, you need to specify xml argument.
If 'inactive', network will be stopped.
If 'undefined' or 'absent', network will be removed from I(libvirt) configuration.
command:
required: false
choices: [ "define", "create", "start", "stop", "destroy",
"undefine", "get_xml", "list_nets", "facts",
"info", "status"]
description:
- in addition to state management, various non-idempotent commands are available.
See examples.
autostart:
required: false
choices: ["yes", "no"]
description:
- Specify if a given storage pool should be started automatically on system boot.
uri:
required: false
default: "qemu:///system"
description:
- libvirt connection uri.
xml:
required: false
description:
- XML document used with the define command.
requirements:
- "python >= 2.6"
- "python-libvirt"
- "python-lxml"
'''
EXAMPLES = '''
# Define a new network
- virt_net: command=define name=br_nat xml='{{ lookup("template", "network/bridge.xml.j2") }}'
# Start a network
- virt_net: command=create name=br_nat
# List available networks
- virt_net: command=list_nets
# Get XML data of a specified network
- virt_net: command=get_xml name=br_nat
# Stop a network
- virt_net: command=destroy name=br_nat
# Undefine a network
- virt_net: command=undefine name=br_nat
# Gather facts about networks
# Facts will be available as 'ansible_libvirt_networks'
- virt_net: command=facts
# Gather information about network managed by 'libvirt' remotely using uri
- virt_net: command=info uri='{{ item }}'
with_items: libvirt_uris
register: networks
# Ensure that a network is active (needs to be defined and built first)
- virt_net: state=active name=br_nat
# Ensure that a network is inactive
- virt_net: state=inactive name=br_nat
# Ensure that a given network will be started at boot
- virt_net: autostart=yes name=br_nat
# Disable autostart for a given network
- virt_net: autostart=no name=br_nat
'''
VIRT_FAILED = 1
VIRT_SUCCESS = 0
VIRT_UNAVAILABLE=2
import sys
try:
import libvirt
except ImportError:
HAS_VIRT = False
else:
HAS_VIRT = True
try:
from lxml import etree
except ImportError:
HAS_XML = False
else:
HAS_XML = True
ALL_COMMANDS = []
ENTRY_COMMANDS = ['create', 'status', 'start', 'stop',
'undefine', 'destroy', 'get_xml', 'define']
HOST_COMMANDS = [ 'list_nets', 'facts', 'info' ]
ALL_COMMANDS.extend(ENTRY_COMMANDS)
ALL_COMMANDS.extend(HOST_COMMANDS)
ENTRY_STATE_ACTIVE_MAP = {
0 : "inactive",
1 : "active"
}
ENTRY_STATE_AUTOSTART_MAP = {
0 : "no",
1 : "yes"
}
ENTRY_STATE_PERSISTENT_MAP = {
0 : "no",
1 : "yes"
}
class EntryNotFound(Exception):
pass
class LibvirtConnection(object):
def __init__(self, uri, module):
self.module = module
conn = libvirt.open(uri)
if not conn:
raise Exception("hypervisor connection failure")
self.conn = conn
def find_entry(self, entryid):
# entryid = -1 returns a list of everything
results = []
# Get active entries
for name in self.conn.listNetworks():
entry = self.conn.networkLookupByName(name)
results.append(entry)
# Get inactive entries
for name in self.conn.listDefinedNetworks():
entry = self.conn.networkLookupByName(name)
results.append(entry)
if entryid == -1:
return results
for entry in results:
if entry.name() == entryid:
return entry
raise EntryNotFound("network %s not found" % entryid)
def create(self, entryid):
if not self.module.check_mode:
return self.find_entry(entryid).create()
else:
try:
state = self.find_entry(entryid).isActive()
except:
return self.module.exit_json(changed=True)
if not state:
return self.module.exit_json(changed=True)
def destroy(self, entryid):
if not self.module.check_mode:
return self.find_entry(entryid).destroy()
else:
if self.find_entry(entryid).isActive():
return self.module.exit_json(changed=True)
def undefine(self, entryid):
if not self.module.check_mode:
return self.find_entry(entryid).undefine()
else:
if not self.find_entry(entryid):
return self.module.exit_json(changed=True)
def get_status2(self, entry):
state = entry.isActive()
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
def get_status(self, entryid):
if not self.module.check_mode:
state = self.find_entry(entryid).isActive()
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
else:
try:
state = self.find_entry(entryid).isActive()
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
except:
return ENTRY_STATE_ACTIVE_MAP.get("inactive","unknown")
def get_uuid(self, entryid):
return self.find_entry(entryid).UUIDString()
def get_xml(self, entryid):
return self.find_entry(entryid).XMLDesc(0)
def get_forward(self, entryid):
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
try:
result = xml.xpath('/network/forward')[0].get('mode')
except:
raise ValueError('Forward mode not specified')
return result
def get_domain(self, entryid):
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
try:
result = xml.xpath('/network/domain')[0].get('name')
except:
raise ValueError('Domain not specified')
return result
def get_macaddress(self, entryid):
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
try:
result = xml.xpath('/network/mac')[0].get('address')
except:
raise ValueError('MAC address not specified')
return result
def get_autostart(self, entryid):
state = self.find_entry(entryid).autostart()
return ENTRY_STATE_AUTOSTART_MAP.get(state,"unknown")
def get_autostart2(self, entryid):
if not self.module.check_mode:
return self.find_entry(entryid).autostart()
else:
try:
return self.find_entry(entryid).autostart()
except:
return self.module.exit_json(changed=True)
def set_autostart(self, entryid, val):
if not self.module.check_mode:
return self.find_entry(entryid).setAutostart(val)
else:
try:
state = self.find_entry(entryid).autostart()
except:
return self.module.exit_json(changed=True)
if bool(state) != val:
return self.module.exit_json(changed=True)
def get_bridge(self, entryid):
return self.find_entry(entryid).bridgeName()
def get_persistent(self, entryid):
state = self.find_entry(entryid).isPersistent()
return ENTRY_STATE_PERSISTENT_MAP.get(state,"unknown")
def define_from_xml(self, entryid, xml):
if not self.module.check_mode:
return self.conn.networkDefineXML(xml)
else:
try:
state = self.find_entry(entryid)
except:
return self.module.exit_json(changed=True)
class VirtNetwork(object):
def __init__(self, uri, module):
self.module = module
self.uri = uri
self.conn = LibvirtConnection(self.uri, self.module)
def get_net(self, entryid):
return self.conn.find_entry(entryid)
def list_nets(self, state=None):
results = []
for entry in self.conn.find_entry(-1):
if state:
if state == self.conn.get_status2(entry):
results.append(entry.name())
else:
results.append(entry.name())
return results
def state(self):
results = []
for entry in self.list_nets():
state_blurb = self.conn.get_status(entry)
results.append("%s %s" % (entry,state_blurb))
return results
def autostart(self, entryid):
return self.conn.set_autostart(entryid, True)
def get_autostart(self, entryid):
return self.conn.get_autostart2(entryid)
def set_autostart(self, entryid, state):
return self.conn.set_autostart(entryid, state)
def create(self, entryid):
return self.conn.create(entryid)
def start(self, entryid):
return self.conn.create(entryid)
def stop(self, entryid):
return self.conn.destroy(entryid)
def destroy(self, entryid):
return self.conn.destroy(entryid)
def undefine(self, entryid):
return self.conn.undefine(entryid)
def status(self, entryid):
return self.conn.get_status(entryid)
def get_xml(self, entryid):
return self.conn.get_xml(entryid)
def define(self, entryid, xml):
return self.conn.define_from_xml(entryid, xml)
def info(self):
return self.facts(facts_mode='info')
def facts(self, facts_mode='facts'):
results = dict()
for entry in self.list_nets():
results[entry] = dict()
results[entry]["autostart"] = self.conn.get_autostart(entry)
results[entry]["persistent"] = self.conn.get_persistent(entry)
results[entry]["state"] = self.conn.get_status(entry)
results[entry]["bridge"] = self.conn.get_bridge(entry)
results[entry]["uuid"] = self.conn.get_uuid(entry)
try:
results[entry]["forward_mode"] = self.conn.get_forward(entry)
except ValueError as e:
pass
try:
results[entry]["domain"] = self.conn.get_domain(entry)
except ValueError as e:
pass
try:
results[entry]["macaddress"] = self.conn.get_macaddress(entry)
except ValueError as e:
pass
facts = dict()
if facts_mode == 'facts':
facts["ansible_facts"] = dict()
facts["ansible_facts"]["ansible_libvirt_networks"] = results
elif facts_mode == 'info':
facts['networks'] = results
return facts
def core(module):
state = module.params.get('state', None)
name = module.params.get('name', None)
command = module.params.get('command', None)
uri = module.params.get('uri', None)
xml = module.params.get('xml', None)
autostart = module.params.get('autostart', None)
v = VirtNetwork(uri, module)
res = {}
if state and command == 'list_nets':
res = v.list_nets(state=state)
if type(res) != dict:
res = { command: res }
return VIRT_SUCCESS, res
if state:
if not name:
module.fail_json(msg = "state change requires a specified name")
res['changed'] = False
if state in [ 'active' ]:
if v.status(name) is not 'active':
res['changed'] = True
res['msg'] = v.start(name)
elif state in [ 'present' ]:
try:
v.get_net(name)
except EntryNotFound:
if not xml:
module.fail_json(msg = "network '" + name + "' not present, but xml not specified")
v.define(name, xml)
res = {'changed': True, 'created': name}
elif state in [ 'inactive' ]:
entries = v.list_nets()
if name in entries:
if v.status(name) is not 'inactive':
res['changed'] = True
res['msg'] = v.destroy(name)
elif state in [ 'undefined', 'absent' ]:
entries = v.list_nets()
if name in entries:
if v.status(name) is not 'inactive':
v.destroy(name)
res['changed'] = True
res['msg'] = v.undefine(name)
else:
module.fail_json(msg="unexpected state")
return VIRT_SUCCESS, res
if command:
if command in ENTRY_COMMANDS:
if not name:
module.fail_json(msg = "%s requires 1 argument: name" % command)
if command == 'define':
if not xml:
module.fail_json(msg = "define requires xml argument")
try:
v.get_net(name)
except EntryNotFound:
v.define(name, xml)
res = {'changed': True, 'created': name}
return VIRT_SUCCESS, res
res = getattr(v, command)(name)
if type(res) != dict:
res = { command: res }
return VIRT_SUCCESS, res
elif hasattr(v, command):
res = getattr(v, command)()
if type(res) != dict:
res = { command: res }
return VIRT_SUCCESS, res
else:
module.fail_json(msg="Command %s not recognized" % basecmd)
if autostart:
if not name:
module.fail_json(msg = "state change requires a specified name")
res['changed'] = False
if autostart == 'yes':
if not v.get_autostart(name):
res['changed'] = True
res['msg'] = v.set_autostart(name, True)
elif autostart == 'no':
if v.get_autostart(name):
res['changed'] = True
res['msg'] = v.set_autostart(name, False)
return VIRT_SUCCESS, res
module.fail_json(msg="expected state or command parameter to be specified")
def main():
module = AnsibleModule (
argument_spec = dict(
name = dict(aliases=['network']),
state = dict(choices=['active', 'inactive', 'present', 'absent']),
command = dict(choices=ALL_COMMANDS),
uri = dict(default='qemu:///system'),
xml = dict(),
autostart = dict(choices=['yes', 'no'])
),
supports_check_mode = True
)
if not HAS_VIRT:
module.fail_json(
msg='The `libvirt` module is not importable. Check the requirements.'
)
if not HAS_XML:
module.fail_json(
msg='The `lxml` module is not importable. Check the requirements.'
)
rc = VIRT_SUCCESS
try:
rc, result = core(module)
except Exception, e:
module.fail_json(msg=str(e))
if rc != 0: # something went wrong emit the msg
module.fail_json(rc=rc, msg=result)
else:
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
sycy600/rslib | vendor/gtest-1.7.0/test/gtest_xml_outfiles_test.py | 2526 | 5340 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
__author__ = "keith.ray@gmail.com (Keith Ray)"
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO(wan@google.com): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
| bsd-2-clause |
espadrine/opera | chromium/src/tools/gyp/test/mac/gyptest-copies.py | 258 | 1437 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that 'copies' with app bundles are handled correctly.
"""
import TestGyp
import os
import sys
import time
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('framework.gyp', chdir='framework')
test.build('framework.gyp', 'copy_target', chdir='framework')
# Check that the copy succeeded.
test.built_file_must_exist(
'Test Framework.framework/foo/Dependency Bundle.framework',
chdir='framework')
test.built_file_must_exist(
'Test Framework.framework/foo/Dependency Bundle.framework/Versions/A',
chdir='framework')
test.built_file_must_exist(
'Test Framework.framework/Versions/A/Libraries/empty.c',
chdir='framework')
# Check that rebuilding the target a few times works.
dep_bundle = test.built_file_path('Dependency Bundle.framework',
chdir='framework')
mtime = os.path.getmtime(dep_bundle)
atime = os.path.getatime(dep_bundle)
for i in range(3):
os.utime(dep_bundle, (atime + i * 1000, mtime + i * 1000))
test.build('framework.gyp', 'copy_target', chdir='framework')
# Check that actions ran.
test.built_file_must_exist('action_file', chdir='framework')
test.pass_test()
| bsd-3-clause |
consbio/clover | trefoil/render/renderers/tests/test_renderers.py | 1 | 4012 | import numpy
from trefoil.utilities.color import Color
from trefoil.render.renderers.stretched import StretchedRenderer
from trefoil.render.renderers.classified import ClassifiedRenderer
from trefoil.render.renderers.unique import UniqueValuesRenderer
from trefoil.render.renderers.utilities import get_renderer_by_name
def test_stretched_renderer(tmpdir):
data = numpy.zeros((100, 100))
for i in range(0, 100):
data[i] = i
colors = (
(data.min(), Color(255, 0, 0, 255)),
(data.max(), Color(0, 0, 255, 255))
)
renderer = StretchedRenderer(colors)
assert renderer.name == 'stretched'
img = renderer.render_image(data)
assert len(img.getpalette()) / 3 == 256
assert img.size == (100, 100)
img.save(str(tmpdir.join("stretched.png")))
legend = renderer.get_legend(20, 20)
assert len(legend) == 1
assert legend[0].image.size == (20, 20)
legend[0].image.save(str(tmpdir.join("stretched_legend.png")))
legend = renderer.get_legend(20, 20, discrete_images=True)
assert len(legend) == 2
assert legend[0].image.size == (20, 20)
expected = {
'colors': [(0.0, '#F00'), (99.0, '#00F')],
'type': 'stretched',
'options': {'color_space': 'hsv'}
}
assert renderer.serialize() == expected
def test_classified_rendererer(tmpdir):
data = numpy.zeros((100, 100))
for i in range(0, 100):
data[i] = i
colors = (
(10, Color(255, 0, 0, 255)),
(50, Color(0, 255, 0, 255)),
(data.max(), Color(0, 0, 255, 255))
)
renderer = ClassifiedRenderer(colors)
assert renderer.name == 'classified'
img = renderer.render_image(data)
img.save(str(tmpdir.join("classified.png")))
assert img.palette.palette == b'\xff\x00\x00\x00\xff\x00\x00\x00\xff\x00\x00\x00'
assert img.size == (100, 100)
legend = renderer.get_legend(20, 20)
assert len(legend) == 3
for index, element in enumerate(legend):
element.image.save(str(tmpdir.join("classified_legend_%i.png" % index)))
expected = {
'colors': [(10, '#F00'), (50, '#0F0'), (99.0, '#00F')],
'type': 'classified'
}
assert renderer.serialize() == expected
def test_uniquevalues_renderer(tmpdir):
data = numpy.zeros((100, 100))
data[10:25] = 10
data[35:50] = 25
data[50:75] = 50
data[85:100] = 100
colors = (
(10, Color(255, 0, 0, 255)),
(25, Color(255, 255, 255, 255)),
(50, Color(0, 255, 0, 255)),
(100, Color(0, 0, 255, 255))
)
labels = ('A', 'B', 'C', 'D')
renderer = UniqueValuesRenderer(colors, labels=labels)
assert renderer.name == 'unique'
img = renderer.render_image(data)
img.save(str(tmpdir.join("unique.png")))
assert img.palette.palette == b'\xff\x00\x00\xff\xff\xff\x00\xff\x00\x00\x00\xff\x00\x00\x00'
assert img.size == (100, 100)
legend = renderer.get_legend(20, 20)
assert len(legend) == 4
for index, element in enumerate(legend):
element.image.save(
str(tmpdir.join("uniquevalues_legend_%i.png" % index)))
expected = {
'colors': [
(10, '#F00'),
(25, '#FFF'),
(50, '#0F0'),
(100, '#00F')],
'type': 'unique',
'options': {
'labels': ('A', 'B', 'C', 'D')
}
}
assert renderer.serialize() == expected
def test_get_renderers_by_name():
data = numpy.zeros((100, 100))
for i in range(0, 100):
data[i] = i
colors = (
(10, Color(255, 0, 0, 255)),
(50, Color(0, 255, 0, 255)),
(data.max(), Color(0, 0, 255, 255))
)
renderer = get_renderer_by_name("classified")(colors)
img = renderer.render_image(data)
assert img.palette.palette == b'\xff\x00\x00\x00\xff\x00\x00\x00\xff\x00\x00\x00'
assert img.size == (100, 100)
| bsd-3-clause |
Hurence/log-island | logisland-components/logisland-processors/logisland-processor-scripting/src/main/resources/nltk/corpus/reader/opinion_lexicon.py | 7 | 3977 | # Natural Language Toolkit: Opinion Lexicon Corpus Reader
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Pierpaolo Pantone <24alsecondo@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
CorpusReader for the Opinion Lexicon.
- Opinion Lexicon information -
Authors: Minqing Hu and Bing Liu, 2004.
Department of Computer Sicence
University of Illinois at Chicago
Contact: Bing Liu, liub@cs.uic.edu
http://www.cs.uic.edu/~liub
Distributed with permission.
Related papers:
- Minqing Hu and Bing Liu. "Mining and summarizing customer reviews".
Proceedings of the ACM SIGKDD International Conference on Knowledge Discovery
& Data Mining (KDD-04), Aug 22-25, 2004, Seattle, Washington, USA.
- Bing Liu, Minqing Hu and Junsheng Cheng. "Opinion Observer: Analyzing and
Comparing Opinions on the Web". Proceedings of the 14th International World
Wide Web conference (WWW-2005), May 10-14, 2005, Chiba, Japan.
"""
from nltk.compat import string_types
from nltk.corpus.reader import WordListCorpusReader
from nltk.corpus.reader.api import *
class IgnoreReadmeCorpusView(StreamBackedCorpusView):
"""
This CorpusView is used to skip the initial readme block of the corpus.
"""
def __init__(self, *args, **kwargs):
StreamBackedCorpusView.__init__(self, *args, **kwargs)
# open self._stream
self._open()
# skip the readme block
read_blankline_block(self._stream)
# Set the initial position to the current stream position
self._filepos = [self._stream.tell()]
class OpinionLexiconCorpusReader(WordListCorpusReader):
"""
Reader for Liu and Hu opinion lexicon. Blank lines and readme are ignored.
>>> from nltk.corpus import opinion_lexicon
>>> opinion_lexicon.words()
['2-faced', '2-faces', 'abnormal', 'abolish', ...]
The OpinionLexiconCorpusReader provides shortcuts to retrieve positive/negative
words:
>>> opinion_lexicon.negative()
['2-faced', '2-faces', 'abnormal', 'abolish', ...]
Note that words from `words()` method are sorted by file id, not alphabetically:
>>> opinion_lexicon.words()[0:10]
['2-faced', '2-faces', 'abnormal', 'abolish', 'abominable', 'abominably',
'abominate', 'abomination', 'abort', 'aborted']
>>> sorted(opinion_lexicon.words())[0:10]
['2-faced', '2-faces', 'a+', 'abnormal', 'abolish', 'abominable', 'abominably',
'abominate', 'abomination', 'abort']
"""
CorpusView = IgnoreReadmeCorpusView
def words(self, fileids=None):
"""
Return all words in the opinion lexicon. Note that these words are not
sorted in alphabetical order.
:param fileids: a list or regexp specifying the ids of the files whose
words have to be returned.
:return: the given file(s) as a list of words and punctuation symbols.
:rtype: list(str)
"""
if fileids is None: fileids = self._fileids
elif isinstance(fileids, compat.string_types): fileids = [fileids]
return concat([self.CorpusView(path, self._read_word_block, encoding=enc)
for (path, enc, fileid) in self.abspaths(fileids, True, True)])
def positive(self):
"""
Return all positive words in alphabetical order.
:return: a list of positive words.
:rtype: list(str)
"""
return self.words('positive-words.txt')
def negative(self):
"""
Return all negative words in alphabetical order.
:return: a list of negative words.
:rtype: list(str)
"""
return self.words('negative-words.txt')
def _read_word_block(self, stream):
words = []
for i in range(20): # Read 20 lines at a time.
line = stream.readline()
if not line:
continue
words.append(line.strip())
return words
| apache-2.0 |
callowayproject/django-concepts | example/settings.py | 1 | 4239 | # Django settings for example project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
import os, sys
APP = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
PROJ_ROOT = os.path.abspath(os.path.dirname(__file__))
sys.path.append(APP)
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'dev.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.abspath(os.path.join(PROJ_ROOT, 'media', 'uploads'))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/uploads/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.abspath(os.path.join(PROJ_ROOT, 'media', 'static'))
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'g2_39yupn*6j4p*cg2%w643jiq-1n_annua*%i8+rq0dx9p=$n'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'example.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJ_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'concepts',
'simpleapp',
)
| apache-2.0 |
henrywoo/googletest | test/gtest_output_test.py | 496 | 12051 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO(vladl@google.com): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# --gtest_internal_skip_environment_and_ad_hoc_tests flag.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS and
not IS_WINDOWS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual)
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
| bsd-3-clause |
spaceof7/QGIS | python/PyQt/PyQt5/QtCore.py | 7 | 1031 | from PyQt5.QtCore import *
from types import MethodType
_QVariant__repr__ = QVariant.__repr__
_QVariant__eq__ = QVariant.__eq__
_QVariant__ne__ = QVariant.__ne__
_QVariant__hash__ = QVariant.__hash__
def __bool__(self):
return not self.isNull()
def __repr__(self):
if self.isNull():
return 'NULL'
else:
return _QVariant__repr__(self)
def __eq__(self, other):
if self.isNull():
return (isinstance(other, QVariant) and other.isNull())or other is None
else:
return _QVariant__eq__(self, other)
def __ne__(self, other):
if self.isNull():
return not (isinstance(other, QVariant) and other.isNull()) and other is not None
else:
return _QVariant__ne__(self, other)
def __hash__(self):
if self.isNull():
return 2178309
else:
return _QVariant__hash__(self)
QVariant.__bool__ = __bool__
QVariant.__repr__ = __repr__
QVariant.__eq__ = __eq__
QVariant.__ne__ = __ne__
QVariant.__hash__ = __hash__
NULL = QVariant(QVariant.Int)
| gpl-2.0 |
jhseu/tensorflow | tensorflow/python/ops/ragged/ragged_concat_op_test.py | 6 | 13299 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_array_ops.concat."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedConcatOpTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def _rt_inputs_to_tensors(self, rt_inputs, ragged_ranks=None):
if ragged_ranks is None:
ragged_ranks = [None] * len(rt_inputs)
return [ # pylint: disable=g-long-ternary
ragged_factory_ops.constant(rt_input, ragged_rank=rrank)
if rrank != 0 else constant_op.constant(rt_input)
for (rt_input, rrank) in zip(rt_inputs, ragged_ranks)
]
@parameterized.parameters(
dict(
descr='Two rank-2 inputs with empty value axis=1',
rt_inputs=([[]], [[]]),
axis=1,
expected=[[]]),
dict(
descr='Two rank-2 inputs (ragged_rank=1), axis=0',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21']], # shape=(3, None)
[['b00'], ['b10']]), # shape=(2, None)
axis=0,
expected=[[b'a00', b'a01'], [], [b'a20', b'a21'], [b'b00'],
[b'b10']]),
dict(
descr='Two rank-2 inputs (ragged_rank=1), axis=1',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00'], ['b10', 'b11', 'b12'], ['b20']]), # shape=(3, None)
axis=1,
expected=[
[b'a00', b'a01', b'b00'],
[b'b10', b'b11', b'b12'],
[b'a20', b'a21', b'a22', b'b20']]),
dict(
descr='Two rank-2 inputs (ragged_rank=1), axis=-2',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21']], # shape=(3, None)
[['b00'], ['b10']]), # shape=(2, None)
axis=-2,
expected=[[b'a00', b'a01'], [], [b'a20', b'a21'], [b'b00'],
[b'b10']]),
dict(
descr='Two rank-2 inputs (ragged_rank=1), axis=-1',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00'], ['b10', 'b11', 'b12'], ['b20']]), # shape=(3, None)
axis=-1,
expected=[
[b'a00', b'a01', b'b00'],
[b'b10', b'b11', b'b12'],
[b'a20', b'a21', b'a22', b'b20']],
expected_shape=[3, None]),
dict(
descr='Three rank-2 inputs (ragged_rank=1), axis=0',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00'], ['b10']], # shape=(2, None)
[['c00'], ['c10', 'c11'], ['c21']]), # shape=(3, None)
axis=0,
expected=[[b'a00', b'a01'], [], [b'a20', b'a21', b'a22'], [b'b00'],
[b'b10'], [b'c00'], [b'c10', b'c11'], [b'c21']]),
dict(
descr='Three rank-2 inputs (ragged_rank=1), axis=1',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00'], ['b10', 'b11', 'b12'], ['b20']], # shape=(3, None)
[[], ['c10', 'c11'], ['c20', 'c21']]), # shape=(3, None)
axis=1,
expected=[
[b'a00', b'a01', b'b00'],
[b'b10', b'b11', b'b12', b'c10', b'c11'],
[b'a20', b'a21', b'a22', b'b20', b'c20', b'c21']]),
dict(
descr='Three rank-3 inputs (ragged_rank=2), axis=0',
rt_inputs=(
[[['a000', 'a001'], ['a010']],
[['a100', 'a101', 'a102'], ['a110', 'a111']]],
[[['b000']], [['b100', 'b101'], ['b110']]],
[[], [['c100', 'c101', 'c102', 'c103']], [[], ['c210', 'c211']]]),
axis=0,
expected=[
[[b'a000', b'a001'], [b'a010']],
[[b'a100', b'a101', b'a102'], [b'a110', b'a111']],
[[b'b000']],
[[b'b100', b'b101'], [b'b110']],
[],
[[b'c100', b'c101', b'c102', b'c103']],
[[], [b'c210', b'c211']]]),
dict(
descr='Three rank-3 inputs (ragged_rank=2), axis=1',
rt_inputs=(
[[['a000', 'a001'], ['a010']],
[['a100', 'a101', 'a102'], ['a110', 'a111']]],
[[['b000']], [['b100', 'b101'], ['b110']]],
[[], [[], ['c110', 'c111']]]),
axis=1,
expected=[
[[b'a000', b'a001'], [b'a010'], [b'b000']],
[[b'a100', b'a101', b'a102'], [b'a110', b'a111'],
[b'b100', b'b101'], [b'b110'], [], [b'c110', b'c111']]]),
dict(
descr='Three rank-3 inputs (ragged_rank=2), axis=2',
rt_inputs=(
[[['a000', 'a001'], ['a010']],
[['a100', 'a101', 'a102'], ['a110', 'a111']]],
[[[], ['b010', 'b011']], [['b100', 'b101'], ['b110']]],
[[['c000'], ['c010']], [[], ['c110', 'c111']]]),
axis=2,
expected=[
[[b'a000', b'a001', b'c000'],
[b'a010', b'b010', b'b011', b'c010']],
[[b'a100', b'a101', b'a102', b'b100', b'b101'],
[b'a110', b'a111', b'b110', b'c110', b'c111']]]),
dict(
descr='Three rank-3 inputs (ragged_rank=2), axis=-1',
rt_inputs=(
[[['a000', 'a001'], ['a010']],
[['a100', 'a101', 'a102'], ['a110', 'a111']]],
[[[], ['b010', 'b011']], [['b100', 'b101'], ['b110']]],
[[['c000'], ['c010']], [[], ['c110', 'c111']]]),
axis=-1,
expected=[
[[b'a000', b'a001', b'c000'],
[b'a010', b'b010', b'b011', b'c010']],
[[b'a100', b'a101', b'a102', b'b100', b'b101'],
[b'a110', b'a111', b'b110', b'c110', b'c111']]]),
dict(
descr='ragged_concat([uniform, ragged, uniform], axis=1)',
ragged_ranks=[0, 1, 0],
rt_inputs=(
[['0('], ['1('], ['2(']], # shape=(3, 1)
[['b00'], ['b10', 'b11', 'b12'], ['b20']], # shape=(3, None)
[[')0'], [')1'], [')2']]), # shape=(3, 1)
axis=1,
expected=[
[b'0(', b'b00', b')0'],
[b'1(', b'b10', b'b11', b'b12', b')1'],
[b'2(', b'b20', b')2']]),
dict(
descr='ragged_concat([uniform, uniform], axis=0)',
ragged_ranks=[0, 0],
rt_inputs=(
[['a00', 'a01'], ['a10', 'a11'], ['a20', 'a21']], # shape=(3, 2)
[['b00', 'b01', 'b02'], ['b10', 'b11', 'b12']]), # shape=(2, 3)
axis=0,
expected=[
[b'a00', b'a01'], [b'a10', b'a11'], [b'a20', b'a21'],
[b'b00', b'b01', b'b02'], [b'b10', b'b11', b'b12']],
expected_ragged_rank=1),
dict(
descr='ragged_concat([uniform, ragged], axis=0)',
ragged_ranks=[0, 1],
rt_inputs=(
[['a00', 'a01'], ['a10', 'a11'], ['a20', 'a21']], # shape=(3, 2)
[['b00', 'b01', 'b02'], ['b10', 'b11', 'b12']]), # shape=(2, 3)
axis=0,
expected=[
[b'a00', b'a01'], [b'a10', b'a11'], [b'a20', b'a21'],
[b'b00', b'b01', b'b02'], [b'b10', b'b11', b'b12']]),
dict(
descr='ragged_concat([uniform, ragged], axis=0) with rank-3 inputs',
ragged_ranks=[0, 2],
rt_inputs=(
[[[0, 1], [2, 3]], [[4, 5], [6, 7]]], # shape = (2, 2, 2)
[[[8], [8, 8]]]), # shape = (2, None, None)
axis=0,
expected=[[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8], [8, 8]]]),
dict(
descr='Two rank-3 inputs with ragged_rank=1, axis=-1',
ragged_ranks=[1, 1],
rt_inputs=(
[[[0, 1], [2, 3], [4, 5]], [], [[6, 7], [8, 9]]],
[[[9, 8], [7, 6], [5, 4]], [], [[3, 2], [1, 0]]]),
axis=-1,
expected=[
[[0, 1, 9, 8], [2, 3, 7, 6], [4, 5, 5, 4]], [],
[[6, 7, 3, 2], [8, 9, 1, 0]]],
expected_ragged_rank=1),
dict(
descr='ragged_concat([vector, vector], axis=0)',
ragged_ranks=[0, 0],
rt_inputs=([1, 2, 3], [4, 5, 6]),
axis=0,
expected=[1, 2, 3, 4, 5, 6]),
dict(
descr='One input (so ragged_conat is a noop)',
rt_inputs=([['a00', 'a01'], [], ['a20', 'a21']],),
axis=0,
expected=[[b'a00', b'a01'], [], [b'a20', b'a21']]),
) # pyformat: disable
def testRaggedConcat(self,
descr,
rt_inputs,
axis,
expected,
ragged_ranks=None,
expected_ragged_rank=None,
expected_shape=None):
rt_inputs = self._rt_inputs_to_tensors(rt_inputs, ragged_ranks)
concatenated = ragged_concat_ops.concat(rt_inputs, axis)
if expected_ragged_rank is not None:
self.assertEqual(concatenated.ragged_rank, expected_ragged_rank)
if expected_shape is not None:
self.assertEqual(concatenated.shape.as_list(), expected_shape)
self.assertAllEqual(concatenated, expected)
@parameterized.parameters(
dict(
rt_inputs=(),
axis=0,
error=ValueError,
message=r'rt_inputs may not be empty\.'),
dict(
rt_inputs=([[1, 2]], [[3, 4]]),
axis=r'foo',
error=TypeError,
message='axis must be an int'),
dict(
rt_inputs=([[1, 2]], [[3, 4]]),
axis=-3,
error=ValueError,
message='axis=-3 out of bounds: expected -2<=axis<2'),
dict(
rt_inputs=([[1, 2]], [[3, 4]]),
axis=2,
error=ValueError,
message='axis=2 out of bounds: expected -2<=axis<2'),
dict(
ragged_ranks=(0, 0),
rt_inputs=([[1, 2]], [[3, 4], [5, 6]]),
axis=1,
error=(ValueError, errors.InvalidArgumentError)),
)
def testStaticError(self,
rt_inputs,
axis,
error,
message=None,
ragged_ranks=None):
rt_inputs = self._rt_inputs_to_tensors(rt_inputs, ragged_ranks)
self.assertRaisesRegexp(error, message, ragged_concat_ops.concat, rt_inputs,
axis)
@parameterized.parameters([
dict(
ragged_ranks=(1, 1),
rt_inputs=([[1, 2]], [[3, 4], [5, 6]]),
axis=1,
error=errors.InvalidArgumentError,
message='Input tensors have incompatible shapes'),
])
def testRuntimeError(self, rt_inputs, axis, error, message,
ragged_ranks=None):
if context.executing_eagerly():
return
rt_inputs = [
array_ops.placeholder_with_default(rt, shape=None) for rt in rt_inputs
]
concatenated = ragged_concat_ops.concat(rt_inputs, axis)
with self.assertRaisesRegexp(error, message):
self.evaluate(concatenated)
def testNegativeAxisWithUnknownRankError(self):
if context.executing_eagerly():
return
rt_inputs = [
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.int64)
]
self.assertRaisesRegexp(
ValueError, r'axis may only be negative if ndims is statically known.',
ragged_concat_ops.concat, rt_inputs, -1)
def testSingleTensorInput(self):
"""Tests ragged_concat with a single tensor input.
Usually, we pass a list of values in for rt_inputs. However, you can
also pass in a single value (as with tf.concat), in which case it simply
returns that tensor. This test exercises that path.
"""
rt_inputs = ragged_factory_ops.constant([[1, 2], [3, 4]])
concatenated = ragged_concat_ops.concat(rt_inputs, 0)
self.assertAllEqual(concatenated, [[1, 2], [3, 4]])
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
amenonsen/ansible | lib/ansible/modules/cloud/vultr/_vultr_server_facts.py | 7 | 6016 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vultr_server_facts
deprecated:
removed_in: '2.13'
why: Deprecated in favour of C(_info) module.
alternative: Use M(vultr_server_info) instead.
short_description: Gather facts about the Vultr servers available.
description:
- Gather facts about servers available.
version_added: "2.7"
author: "Yanis Guenane (@Spredzy)"
extends_documentation_fragment: vultr
'''
EXAMPLES = r'''
- name: Gather Vultr servers facts
local_action:
module: vultr_server_facts
- name: Print the gathered facts
debug:
var: ansible_facts.vultr_server_facts
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_retry_max_delay:
description: Exponential backoff delay in seconds between retries up to this max delay value.
returned: success
type: int
sample: 12
version_added: '2.9'
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_server_facts:
description: Response from Vultr API
returned: success
type: complex
contains:
"vultr_server_facts": [
{
"allowed_bandwidth_gb": 1000,
"auto_backup_enabled": false,
"application": null,
"cost_per_month": 5.00,
"current_bandwidth_gb": 0,
"date_created": "2018-07-19 08:23:03",
"default_password": "p4ssw0rd!",
"disk": "Virtual 25 GB",
"firewallgroup": null,
"id": 17241096,
"internal_ip": "",
"kvm_url": "https://my.vultr.com/subs/vps/novnc/api.php?data=OFB...",
"name": "ansibletest",
"os": "CentOS 7 x64",
"pending_charges": 0.01,
"plan": "1024 MB RAM,25 GB SSD,1.00 TB BW",
"power_status": "running",
"ram": "1024 MB",
"region": "Amsterdam",
"server_state": "ok",
"status": "active",
"tag": "",
"v4_gateway": "105.178.158.1",
"v4_main_ip": "105.178.158.181",
"v4_netmask": "255.255.254.0",
"v6_main_ip": "",
"v6_network": "",
"v6_network_size": "",
"v6_networks": [],
"vcpu_count": 1
}
]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrServerFacts(Vultr):
def __init__(self, module):
super(AnsibleVultrServerFacts, self).__init__(module, "vultr_server_facts")
self.returns = {
"APPID": dict(key='application', convert_to='int', transform=self._get_application_name),
"FIREWALLGROUPID": dict(key='firewallgroup', transform=self._get_firewallgroup_name),
"SUBID": dict(key='id', convert_to='int'),
"VPSPLANID": dict(key='plan', convert_to='int', transform=self._get_plan_name),
"allowed_bandwidth_gb": dict(convert_to='int'),
'auto_backups': dict(key='auto_backup_enabled', convert_to='bool'),
"cost_per_month": dict(convert_to='float'),
"current_bandwidth_gb": dict(convert_to='float'),
"date_created": dict(),
"default_password": dict(),
"disk": dict(),
"gateway_v4": dict(key='v4_gateway'),
"internal_ip": dict(),
"kvm_url": dict(),
"label": dict(key='name'),
"location": dict(key='region'),
"main_ip": dict(key='v4_main_ip'),
"netmask_v4": dict(key='v4_netmask'),
"os": dict(),
"pending_charges": dict(convert_to='float'),
"power_status": dict(),
"ram": dict(),
"server_state": dict(),
"status": dict(),
"tag": dict(),
"v6_main_ip": dict(),
"v6_network": dict(),
"v6_network_size": dict(),
"v6_networks": dict(),
"vcpu_count": dict(convert_to='int'),
}
def _get_application_name(self, application):
if application == 0:
return None
return self.get_application(application, 'APPID').get('name')
def _get_firewallgroup_name(self, firewallgroup):
if firewallgroup == 0:
return None
return self.get_firewallgroup(firewallgroup, 'FIREWALLGROUPID').get('description')
def _get_plan_name(self, plan):
return self.get_plan(plan, 'VPSPLANID').get('name')
def get_servers(self):
return self.api_query(path="/v1/server/list")
def parse_servers_list(servers_list):
return [server for id, server in servers_list.items()]
def main():
argument_spec = vultr_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
server_facts = AnsibleVultrServerFacts(module)
result = server_facts.get_result(parse_servers_list(server_facts.get_servers()))
ansible_facts = {
'vultr_server_facts': result['vultr_server_facts']
}
module.exit_json(ansible_facts=ansible_facts, **result)
if __name__ == '__main__':
main()
| gpl-3.0 |
Chitrank-Dixit/werkzeug | examples/shorty/utils.py | 44 | 2500 | from os import path
from urlparse import urlparse
from random import sample, randrange
from jinja2 import Environment, FileSystemLoader
from werkzeug.local import Local, LocalManager
from werkzeug.utils import cached_property
from werkzeug.wrappers import Response
from werkzeug.routing import Map, Rule
from sqlalchemy import MetaData
from sqlalchemy.orm import create_session, scoped_session
TEMPLATE_PATH = path.join(path.dirname(__file__), 'templates')
STATIC_PATH = path.join(path.dirname(__file__), 'static')
ALLOWED_SCHEMES = frozenset(['http', 'https', 'ftp', 'ftps'])
URL_CHARS = 'abcdefghijkmpqrstuvwxyzABCDEFGHIJKLMNPQRST23456789'
local = Local()
local_manager = LocalManager([local])
application = local('application')
metadata = MetaData()
url_map = Map([Rule('/static/<file>', endpoint='static', build_only=True)])
session = scoped_session(lambda: create_session(application.database_engine,
autocommit=False,
autoflush=False))
jinja_env = Environment(loader=FileSystemLoader(TEMPLATE_PATH))
def expose(rule, **kw):
def decorate(f):
kw['endpoint'] = f.__name__
url_map.add(Rule(rule, **kw))
return f
return decorate
def url_for(endpoint, _external=False, **values):
return local.url_adapter.build(endpoint, values, force_external=_external)
jinja_env.globals['url_for'] = url_for
def render_template(template, **context):
return Response(jinja_env.get_template(template).render(**context),
mimetype='text/html')
def validate_url(url):
return urlparse(url)[0] in ALLOWED_SCHEMES
def get_random_uid():
return ''.join(sample(URL_CHARS, randrange(3, 9)))
class Pagination(object):
def __init__(self, query, per_page, page, endpoint):
self.query = query
self.per_page = per_page
self.page = page
self.endpoint = endpoint
@cached_property
def count(self):
return self.query.count()
@cached_property
def entries(self):
return self.query.offset((self.page - 1) * self.per_page) \
.limit(self.per_page).all()
has_previous = property(lambda x: x.page > 1)
has_next = property(lambda x: x.page < x.pages)
previous = property(lambda x: url_for(x.endpoint, page=x.page - 1))
next = property(lambda x: url_for(x.endpoint, page=x.page + 1))
pages = property(lambda x: max(0, x.count - 1) // x.per_page + 1)
| bsd-3-clause |
ichuang/sympy | sympy/mpmath/tests/test_calculus.py | 40 | 1825 | from sympy.mpmath import *
def test_approximation():
mp.dps = 15
f = lambda x: cos(2-2*x)/x
p, err = chebyfit(f, [2, 4], 8, error=True)
assert err < 1e-5
for i in range(10):
x = 2 + i/5.
assert abs(polyval(p, x) - f(x)) < err
def test_limits():
mp.dps = 15
assert limit(lambda x: (x-sin(x))/x**3, 0).ae(mpf(1)/6)
assert limit(lambda n: (1+1/n)**n, inf).ae(e)
def test_polyval():
assert polyval([], 3) == 0
assert polyval([0], 3) == 0
assert polyval([5], 3) == 5
# 4x^3 - 2x + 5
p = [4, 0, -2, 5]
assert polyval(p,4) == 253
assert polyval(p,4,derivative=True) == (253, 190)
def test_polyroots():
p = polyroots([1,-4])
assert p[0].ae(4)
p, q = polyroots([1,2,3])
assert p.ae(-1 - sqrt(2)*j)
assert q.ae(-1 + sqrt(2)*j)
#this is not a real test, it only tests a specific case
assert polyroots([1]) == []
try:
polyroots([0])
assert False
except ValueError:
pass
def test_pade():
one = mpf(1)
mp.dps = 20
N = 10
a = [one]
k = 1
for i in range(1, N+1):
k *= i
a.append(one/k)
p, q = pade(a, N//2, N//2)
for x in arange(0, 1, 0.1):
r = polyval(p[::-1], x)/polyval(q[::-1], x)
assert(r.ae(exp(x), 1.0e-10))
mp.dps = 15
def test_fourier():
mp.dps = 15
c, s = fourier(lambda x: x+1, [-1, 2], 2)
#plot([lambda x: x+1, lambda x: fourierval((c, s), [-1, 2], x)], [-1, 2])
assert c[0].ae(1.5)
assert c[1].ae(-3*sqrt(3)/(2*pi))
assert c[2].ae(3*sqrt(3)/(4*pi))
assert s[0] == 0
assert s[1].ae(3/(2*pi))
assert s[2].ae(3/(4*pi))
assert fourierval((c, s), [-1, 2], 1).ae(1.9134966715663442)
def test_differint():
mp.dps = 15
assert differint(lambda t: t, 2, -0.5).ae(8*sqrt(2/pi)/3)
| bsd-3-clause |
rogerscristo/BotFWD | env/lib/python3.6/site-packages/chardet/escsm.py | 289 | 10510 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .enums import MachineState
HZ_CLS = (
1,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,4,0,5,2,0, # 78 - 7f
1,1,1,1,1,1,1,1, # 80 - 87
1,1,1,1,1,1,1,1, # 88 - 8f
1,1,1,1,1,1,1,1, # 90 - 97
1,1,1,1,1,1,1,1, # 98 - 9f
1,1,1,1,1,1,1,1, # a0 - a7
1,1,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,1,1,1,1,1,1, # c0 - c7
1,1,1,1,1,1,1,1, # c8 - cf
1,1,1,1,1,1,1,1, # d0 - d7
1,1,1,1,1,1,1,1, # d8 - df
1,1,1,1,1,1,1,1, # e0 - e7
1,1,1,1,1,1,1,1, # e8 - ef
1,1,1,1,1,1,1,1, # f0 - f7
1,1,1,1,1,1,1,1, # f8 - ff
)
HZ_ST = (
MachineState.START,MachineState.ERROR, 3,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START, 4,MachineState.ERROR,# 10-17
5,MachineState.ERROR, 6,MachineState.ERROR, 5, 5, 4,MachineState.ERROR,# 18-1f
4,MachineState.ERROR, 4, 4, 4,MachineState.ERROR, 4,MachineState.ERROR,# 20-27
4,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 28-2f
)
HZ_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
HZ_SM_MODEL = {'class_table': HZ_CLS,
'class_factor': 6,
'state_table': HZ_ST,
'char_len_table': HZ_CHAR_LEN_TABLE,
'name': "HZ-GB-2312",
'language': 'Chinese'}
ISO2022CN_CLS = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,4,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022CN_ST = (
MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07
MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f
MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,# 18-1f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 20-27
5, 6,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 28-2f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 30-37
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,# 38-3f
)
ISO2022CN_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022CN_SM_MODEL = {'class_table': ISO2022CN_CLS,
'class_factor': 9,
'state_table': ISO2022CN_ST,
'char_len_table': ISO2022CN_CHAR_LEN_TABLE,
'name': "ISO-2022-CN",
'language': 'Chinese'}
ISO2022JP_CLS = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,2,2, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,7,0,0,0, # 20 - 27
3,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
6,0,4,0,8,0,0,0, # 40 - 47
0,9,5,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022JP_ST = (
MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07
MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,# 18-1f
MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,MachineState.ERROR,# 20-27
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 6,MachineState.ITS_ME,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,# 28-2f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,# 30-37
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 38-3f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.START,# 40-47
)
ISO2022JP_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022JP_SM_MODEL = {'class_table': ISO2022JP_CLS,
'class_factor': 10,
'state_table': ISO2022JP_ST,
'char_len_table': ISO2022JP_CHAR_LEN_TABLE,
'name': "ISO-2022-JP",
'language': 'Japanese'}
ISO2022KR_CLS = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,3,0,0,0, # 20 - 27
0,4,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,5,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022KR_ST = (
MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,MachineState.ERROR,# 10-17
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 18-1f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 20-27
)
ISO2022KR_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
ISO2022KR_SM_MODEL = {'class_table': ISO2022KR_CLS,
'class_factor': 6,
'state_table': ISO2022KR_ST,
'char_len_table': ISO2022KR_CHAR_LEN_TABLE,
'name': "ISO-2022-KR",
'language': 'Korean'}
| mit |
CompPhysics/ComputationalPhysics2 | doc/src/NeuralNet/figures/plotEnergies.py | 10 | 1487 | import sys
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
try:
dataFileName = sys.argv[1]
except IndexError:
print("USAGE: python plotEnergies.py 'filename'")
sys.exit(0)
HFEnergy3 = 3.161921401722216
HFEnergy6 = 20.71924844033019
numParticles = \
int(dataFileName[dataFileName.find('N')+1:dataFileName.find('E')-1])
hfenergyFound = False
if (numParticles == 2):
HFEnergy = 3.161921401722216
hfenergyFound = True
elif (numParticles == 6):
HFEnergy = 20.71924844033019
hfenergyFound = True
else:
hfenergyFound = False
data = np.loadtxt(dataFileName, dtype=np.float64)
data[:,1] = np.sqrt(data[:,1])
n = len(data[:,0])
x = np.arange(0,n)
fig = plt.figure()
if (hfenergyFound):
yline = np.zeros(n)
yline.fill(HFEnergy)
plt.plot(x, yline, 'r--', label="HF Energy")
msize = 1.0
ax = fig.add_subplot(111)
plt.errorbar(x, data[:,0], yerr=data[:,1], fmt='bo', markersize=msize, label="VMC Energy")
plt.fill_between(x, data[:,0]-data[:,1], data[:,0]+data[:,1])
plt.xlim(0,n)
plt.xlabel('Iteration')
plt.ylabel('$E_0[a.u]$')
plt.legend(loc='best')
minSub = 80
maxSub = 120
inset_axes(ax, width="50%", height=1.0, loc='right')
plt.errorbar(x[minSub:maxSub], data[minSub:maxSub,0],
yerr=data[minSub:maxSub,1], fmt='bo', markersize=msize, label="VMC "
"Energy")
plt.plot(x[minSub:maxSub], yline[minSub:maxSub], 'r--', label="HF Energy")
plt.show()
| cc0-1.0 |
roubert/python-phonenumbers | python/phonenumbers/data/region_TM.py | 9 | 2114 | """Auto-generated file, do not edit by hand. TM metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_TM = PhoneMetadata(id='TM', country_code=993, international_prefix='810',
general_desc=PhoneNumberDesc(national_number_pattern='[1-6]\\d{7}', possible_number_pattern='\\d{8}'),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:1(?:2\\d|3[1-9])|2(?:22|4[0-35-8])|3(?:22|4[03-9])|4(?:22|3[128]|4\\d|6[15])|5(?:22|5[7-9]|6[014-689]))\\d{5}', possible_number_pattern='\\d{8}', example_number='12345678'),
mobile=PhoneNumberDesc(national_number_pattern='6[2-9]\\d{6}', possible_number_pattern='\\d{8}', example_number='66123456'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
preferred_international_prefix='8~10',
national_prefix='8',
national_prefix_for_parsing='8',
number_format=[NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2-\\3-\\4', leading_digits_pattern=['12'], national_prefix_formatting_rule='(8 \\1)'),
NumberFormat(pattern='(\\d{2})(\\d{6})', format='\\1 \\2', leading_digits_pattern=['6'], national_prefix_formatting_rule='8 \\1'),
NumberFormat(pattern='(\\d{3})(\\d)(\\d{2})(\\d{2})', format='\\1 \\2-\\3-\\4', leading_digits_pattern=['13|[2-5]'], national_prefix_formatting_rule='(8 \\1)')])
| apache-2.0 |
bikong2/scikit-learn | doc/conf.py | 210 | 8446 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
michath/MetaMonkey | js/src/builtin/make_intl_data.py | 10 | 8536 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
""" Usage: make_intl_data.py [language-subtag-registry.txt]
This script extracts information about mappings between deprecated and
current BCP 47 language tags from the IANA Language Subtag Registry and
converts it to JavaScript object definitions in IntlData.js. The definitions
are used in Intl.js.
The IANA Language Subtag Registry is imported from
http://www.iana.org/assignments/language-subtag-registry
and uses the syntax specified in
http://tools.ietf.org/html/rfc5646#section-3
"""
def readRegistryRecord(registry):
""" Yields the records of the IANA Language Subtag Registry as dictionaries. """
record = {}
for line in registry:
line = line.strip()
if line == "":
continue
if line == "%%":
yield record
record = {}
else:
if ":" in line:
key, value = line.split(":", 1)
key, value = key.strip(), value.strip()
record[key] = value
else:
# continuation line
record[key] += " " + line
if record:
yield record
return
def readRegistry(registry):
""" Reads IANA Language Subtag Registry and extracts information for Intl.js.
Information extracted:
- langTagMappings: mappings from complete language tags to preferred
complete language tags
- langSubtagMappings: mappings from subtags to preferred subtags
- extlangMappings: mappings from extlang subtags to preferred subtags,
with prefix to be removed
Returns these three mappings as dictionaries, along with the registry's
file date.
We also check that mappings for language subtags don't affect extlang
subtags and vice versa, so that CanonicalizeLanguageTag doesn't have
to separate them for processing. Region codes are separated by case,
and script codes by length, so they're unproblematic.
"""
langTagMappings = {}
langSubtagMappings = {}
extlangMappings = {}
languageSubtags = set()
extlangSubtags = set()
for record in readRegistryRecord(registry):
if "File-Date" in record:
fileDate = record["File-Date"]
continue
if record["Type"] == "grandfathered":
# Grandfathered tags don't use standard syntax, so
# CanonicalizeLanguageTag expects the mapping table to provide
# the final form for all.
# For langTagMappings, keys must be in lower case; values in
# the case used in the registry.
tag = record["Tag"]
if "Preferred-Value" in record:
langTagMappings[tag.lower()] = record["Preferred-Value"]
else:
langTagMappings[tag.lower()] = tag
elif record["Type"] == "redundant":
# For langTagMappings, keys must be in lower case; values in
# the case used in the registry.
if "Preferred-Value" in record:
langTagMappings[record["Tag"].lower()] = record["Preferred-Value"]
elif record["Type"] in ("language", "script", "region", "variant"):
# For langSubtagMappings, keys and values must be in the case used
# in the registry.
subtag = record["Subtag"]
if record["Type"] == "language":
languageSubtags.add(subtag)
if "Preferred-Value" in record:
if subtag == "heploc":
# The entry for heploc is unique in its complexity; handle
# it as special case below.
continue
if "Prefix" in record:
# This might indicate another heploc-like complex case.
raise Exception("Please evaluate: subtag mapping with prefix value.")
langSubtagMappings[subtag] = record["Preferred-Value"]
elif record["Type"] == "extlang":
# For extlangMappings, keys must be in the case used in the
# registry; values are records with the preferred value and the
# prefix to be removed.
subtag = record["Subtag"]
extlangSubtags.add(subtag)
if "Preferred-Value" in record:
preferred = record["Preferred-Value"]
prefix = record["Prefix"]
extlangMappings[subtag] = {"preferred": preferred, "prefix": prefix}
else:
# No other types are allowed by
# http://tools.ietf.org/html/rfc5646#section-3.1.3
assert False, "Unrecognized Type: {0}".format(record["Type"])
# Check that mappings for language subtags and extlang subtags don't affect
# each other.
for lang in languageSubtags:
if lang in extlangMappings and extlangMappings[lang]["preferred"] != lang:
raise Exception("Conflict: lang with extlang mapping: " + lang)
for extlang in extlangSubtags:
if extlang in langSubtagMappings:
raise Exception("Conflict: extlang with lang mapping: " + extlang)
# Special case for heploc.
langTagMappings["ja-latn-hepburn-heploc"] = "ja-Latn-alalc97"
return {"fileDate": fileDate,
"langTagMappings": langTagMappings,
"langSubtagMappings": langSubtagMappings,
"extlangMappings": extlangMappings}
def writeMappingsVar(intlData, dict, name, description, fileDate, url):
""" Writes a variable definition with a mapping table to file intlData.
Writes the contents of dictionary dict to file intlData with the given
variable name and a comment with description, fileDate, and URL.
"""
intlData.write("\n")
intlData.write("// {0}.\n".format(description))
intlData.write("// Derived from IANA Language Subtag Registry, file date {0}.\n".format(fileDate))
intlData.write("// {0}\n".format(url))
intlData.write("var {0} = {{\n".format(name))
keys = sorted(dict)
for key in keys:
if isinstance(dict[key], basestring):
value = '"{0}"'.format(dict[key])
else:
preferred = dict[key]["preferred"]
prefix = dict[key]["prefix"]
value = '{{preferred: "{0}", prefix: "{1}"}}'.format(preferred, prefix)
intlData.write(' "{0}": {1},\n'.format(key, value))
intlData.write("};\n")
def writeLanguageTagData(intlData, fileDate, url, langTagMappings, langSubtagMappings, extlangMappings):
""" Writes the language tag data to the Intl data file. """
writeMappingsVar(intlData, langTagMappings, "langTagMappings",
"Mappings from complete tags to preferred values", fileDate, url)
writeMappingsVar(intlData, langSubtagMappings, "langSubtagMappings",
"Mappings from non-extlang subtags to preferred values", fileDate, url)
writeMappingsVar(intlData, extlangMappings, "extlangMappings",
"Mappings from extlang subtags to preferred values", fileDate, url)
if __name__ == '__main__':
import codecs
import sys
import urllib2
url = "http://www.iana.org/assignments/language-subtag-registry"
if len(sys.argv) > 1:
print("Always make sure you have the newest language-subtag-registry.txt!")
registry = codecs.open(sys.argv[1], "r", encoding="utf-8")
else:
print("Downloading IANA Language Subtag Registry...")
reader = urllib2.urlopen(url)
text = reader.read().decode("utf-8")
reader.close()
registry = codecs.open("language-subtag-registry.txt", "w+", encoding="utf-8")
registry.write(text)
registry.seek(0)
print("Processing IANA Language Subtag Registry...")
data = readRegistry(registry)
fileDate = data["fileDate"]
langTagMappings = data["langTagMappings"]
langSubtagMappings = data["langSubtagMappings"]
extlangMappings = data["extlangMappings"]
registry.close()
print("Writing Intl data...")
intlData = codecs.open("IntlData.js", "w", encoding="utf-8")
intlData.write("// Generated by make_intl_data.py. DO NOT EDIT.\n")
writeLanguageTagData(intlData, fileDate, url, langTagMappings, langSubtagMappings, extlangMappings)
intlData.close()
| mpl-2.0 |
cliburn/flow | src/MainFrame.py | 2 | 29580 | import wx
import os
from my_io import Io
from VizFrame import VizFrame
from EditTable import EditFrame, Table
from dialogs import ParameterDialog, ChoiceDialog, RemoteProcessDialog
from numpy import array, where, greater, log10, log, clip, take, argsort, arcsinh, min, max, savetxt, loadtxt
from numpy.random import shuffle, randint, get_state
import transforms
from OboFrame import OboTreeFrame
from AnnotateFrame import annotateFrame
import sys
from net_wrapper import connect
class MainFrame(VizFrame):
"""Main user interface frame includes the control panel"""
def __init__(self, parent=None, id=-1,
pos=wx.DefaultPosition,
title="Flow Control Window"):
"""creates main frame for user interface"""
VizFrame.__init__(self, parent, id, pos, title)
self.sp = wx.SplitterWindow(self, -1)
self.p1 = wx.Panel(self.sp, -1)
self.p2 = wx.Panel(self.sp, -1)
self.tree = wx.TreeCtrl(self.p1, -1, wx.DefaultPosition, wx.DefaultSize,
wx.TR_HAS_BUTTONS|wx.TR_EDIT_LABELS)
self.tree.SetDimensions(0, 0, 100, 100)
self.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.OnTreeEdit, self.tree)
self.root = self.tree.AddRoot('root')
self.log = wx.TextCtrl(self.p2, -1, "",
style=wx.TE_RICH|wx.TE_MULTILINE|wx.TE_READONLY, size=(200,100))
# current server URL
self.server = 'http://localhost/'
menubar = wx.MenuBar()
transforms = {}
transforms['clip'] = (self.OnClip, "Clip transform")
transforms['scale'] = (self.OnScale, "Scale transform")
transforms['normal_scale'] = (self.OnNormalScale, "Normal scale transform")
transforms['linear'] = (self.OnLinear, "Linear transform")
transforms['quadratic'] = (self.OnQuadratic, "Quadtratic transform")
transforms['log'] = (self.OnLog, "Log10 transform")
transforms['logn'] = (self.OnLogn, "LogN transform")
transforms['biexponential'] = (self.OnBiexponential, "Biexponential transform")
transforms['logicle'] = (self.OnLogicle, "Logicle transform")
transforms['heyperlog'] = (self.OnHyperlog, "Hyperlog transform")
transforms['arcsinh'] = (self.OnArcsinh, "Arcsinh transform")
# transform menu
self.transformMenu = wx.Menu()
for i in transforms.keys():
menuitem = self.transformMenu.Append(-1, transforms[i][1])
self.Bind(wx.EVT_MENU, transforms[i][0], menuitem)
# filter menu
self.filterMenu = wx.Menu()
self.channels = self.filterMenu.Append(-1, "Sub-Sample by channel")
self.filterMenu.AppendSeparator()
self.events_index = self.filterMenu.Append(-1, "Sub-Sample events: index")
self.events_random = self.filterMenu.Append(-1, "Sub-Sample events: random choice without replacement")
self.events_replace = self.filterMenu.Append(-1, "Sub-Sample events: random choice with replacement")
# bind filter menuitems
self.Bind(wx.EVT_MENU, self.OnChannels, self.channels)
self.Bind(wx.EVT_MENU, self.OnEventsIndex, self.events_index)
self.Bind(wx.EVT_MENU, self.OnEventsRandom, self.events_random)
self.Bind(wx.EVT_MENU, self.OnEventsReplace, self.events_replace)
# ontology menu
self.ontologyMenu = wx.Menu()
loadOntology = self.ontologyMenu.Append(-1, "Load OBO file")
self.Bind(wx.EVT_MENU, self.OnLoadOntology, loadOntology)
# remote process menu
self.remoteProcessMenu = wx.Menu()
submit_job = self.remoteProcessMenu.Append(-1, "Submit job to remote process")
submit_batch = self.remoteProcessMenu.Append(-1, "Submit batch jobs to remote process")
self.Bind(wx.EVT_MENU, self.OnSubmitJob, submit_job)
self.Bind(wx.EVT_MENU, self.OnSubmitBatch, submit_batch)
self.SetMenuBar(menubar)
#statusbar = self.CreateStatusBar()
# createEdit needs to be called before CreatePopup
# to ensure menu's wx.IDs are consistent
self.edit = self.CreateEdit()
self.popup = self.CreatePopup()
self.zPopMenuItem = None
self.Bind(wx.EVT_CONTEXT_MENU, self.OnShowPopup)
self.Bind(wx.EVT_TREE_ITEM_RIGHT_CLICK, self.OnShowPopup, self.tree)
self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.OnDisplayItem, self.tree)
# default dir for OBO files
self.defaultOBOdir = "."
self.exportDir = self.io.defaultDir.replace('results','data')
# flag for saved
def DoLayout(self):
# layout code
self.box = wx.BoxSizer(wx.VERTICAL)
panelsizer = wx.BoxSizer(wx.VERTICAL)
box1 = wx.BoxSizer(wx.VERTICAL)
box2 = wx.BoxSizer(wx.VERTICAL)
panelsizer.Add(self.sp, 1, wx.EXPAND, 0)
box1.Add(self.tree, 1, wx.EXPAND, 0)
box2.Add(self.log,1, wx.EXPAND)
self.p1.SetSizer(box1)
self.p2.SetSizer(box2)
self.SetSizer(panelsizer)
self.box.Layout()
self.Layout()
self.sp.SplitVertically(self.p1, self.p2)
self.sp.SetMinimumPaneSize(20)
# Ontology
def OnLoadOntology(self, event):
"""Load an ontology in OBO format"""
dlg = wx.FileDialog(self,
wildcard="OBO files (*.obo)|*.obo|All files (*.*)|*.*",
defaultDir=self.defaultOBOdir,
style=wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
file = dlg.GetPath()
self.defaultOBOdir = os.path.split(file)[0]
self.model.obofile = file
dlg.Destroy()
# Remote process
def OnSubmitJob(self, event):
"""Job submission dialog."""
if self.model.ready:
_data = self.model.GetCurrentData()
data = _data[:]
data_path = _data._v_pathname
filename = self.model.savedAs
else:
wx.MessageBox("No data found")
return
dlg = RemoteProcessDialog(self.server, filename, data_path, data.shape)
if dlg.ShowModal() == wx.ID_OK:
self.server = dlg.server_ctrl.GetValue()
self.user = dlg.user_ctrl.GetValue()
self.password = dlg.password_ctrl.GetValue()
self.filename = dlg.file_ctrl.GetValue()
self.data = dlg.data_ctrl.GetValue()
self.job = dlg.job_ctrl.GetValue()
print self.server, self.user, self.password, self.filename, self.data, self.job
session = connect(self.server, self.user, self.password)
session.send_data(self.data)
#TODO add sending source file name for use later
#session.send_filename(self.filename) #NOT IMPLEMENTED
job_def = parsejob(self.job)
print job_def
session.send_job(job_def)
dlg.Destroy()
def OnSubmitBatch(self, event):
"""Batch job submission dialog."""
pass
# if self.model.ready:
# data = self.model.GetCurrentData()[:]
# else:
# wx.MessageBox("No data found")
# return
# dlg = RemoteProcessDialog(self.model.server)
# if dlg.ShowModal() == wx.ID_OK:
# job = dlg.job
# self.model.server = dlg.server_ctrl.GetValue()
# print data.shape, job, self.model.server
# dlg.Destroy()
# Filters
def OnChannels(self, event):
if self.model.ready:
cs = self.model.GetCurrentData().getAttr('fields')
else:
cs = []
dlg = ChoiceDialog(cs)
if dlg.ShowModal() == wx.ID_OK:
indices = dlg.GetSelections()
name = self.model.GetCurrentGroup()._v_pathname
self.model.FilterOnCols('FilterOnCols', indices)
self.model.AddHistory(('FilterOnCols', [name, indices]))
dlg.Destroy()
def OnEventsIndex(self, event):
data = array(self.model.GetCurrentData()[:])
n = data.shape[0]
inputs = {}
dlg = ParameterDialog([('start', 'IntValidator', str(0)),
('stop', 'IntValidator', str(n)),
('stride', 'IntValidator', str(1))],
inputs,
'Returns data[start:stop:stride, :]')
if dlg.ShowModal() == wx.ID_OK:
indices = range(inputs['start'], inputs['stop'], inputs['stride'])
name = self.model.GetCurrentGroup()._v_pathname
self.model.FilterOnRows('SampleOnEventsIndex', indices)
self.model.AddHistory(('SampleOnRows', [name, inputs]))
dlg.Destroy()
def OnEventsRandom(self, event):
data = array(self.model.GetCurrentData()[:])
n = data.shape[0]
inputs = {}
dlg = ParameterDialog([('n', 'IntValidator', str(n))],
inputs,
'Random choice of n events without replacement')
if dlg.ShowModal() == wx.ID_OK:
indices = range(n)
shuffle(indices)
indices = indices[:inputs['n']]
name = self.model.GetCurrentGroup()._v_pathname
self.model.FilterOnRows('SampleOnEventsRandom', indices)
self.model.AddHistory(('SampleOnRows', [name, inputs, ('state', get_state())]))
dlg.Destroy()
def OnEventsReplace(self, event):
data = array(self.model.GetCurrentData()[:])
n = data.shape[0]
inputs = {}
dlg = ParameterDialog([('n', 'IntValidator', str(n))],
inputs,
'Random choice of n events with replacement')
if dlg.ShowModal() == wx.ID_OK:
indices = randint(0, n, inputs['n'])
name = self.model.GetCurrentGroup()._v_pathname
self.model.FilterOnRows('SampleOnEventsReplace', indices)
self.model.AddHistory(('SampleOnRows', [name, inputs, ('state', get_state())]))
dlg.Destroy()
# Transforms
def GetIndices(self, dlg2):
inputs = {}
if self.model.ready:
cs = self.model.GetCurrentData().getAttr('fields')
else:
cs = []
dlg = ChoiceDialog(cs)
if dlg.ShowModal() == wx.ID_OK:
indices = dlg.GetSelections()
if dlg2.ShowModal() == wx.ID_OK:
pass
dlg2.Destroy()
dlg.Destroy()
return indices
def OnClip(self, event):
inputs = {}
dlg2 = ParameterDialog([('lower', 'FloatValidator', str(0.0)),
('upper', 'FloatValidator', str(1024.0))],
inputs,
'f(x) = clip(x, lower, upper)')
indices = self.GetIndices(dlg2)
self.model.ClipTransform(indices, inputs)
def OnScale(self, event):
inputs = {}
dlg2 = ParameterDialog([('lower', 'FloatValidator', str(0.0)),
('upper', 'FloatValidator', str(1024.0))],
inputs,
'f(x) = lower + (upper-lower)*(x - min(x))/(max(x)-min(x))')
indices = self.GetIndices(dlg2)
self.model.ScaleTransform(indices, inputs)
def OnNormalScale(self, event):
inputs = {}
dlg2 = ParameterDialog([],
inputs,
'f(x) = (x - mean(x))/std(x)')
indices = self.GetIndices(dlg2)
self.model.NormalScaleTransform(indices, inputs)
def OnLinear(self, event):
inputs = {}
dlg2 = ParameterDialog([('a', 'FloatValidator', str(0)),
('b', 'FloatValidator', str(1))],
inputs,
'f(x) = a + b*x')
indices = self.GetIndices(dlg2)
self.model.LinearTransform(indices, inputs)
def OnQuadratic(self, event):
inputs = {}
dlg2 = ParameterDialog([('a', 'FloatValidator', str(0)),
('b', 'FloatValidator', str(1)),
('c', 'FloatValidator', str(0))],
inputs,
'f(x) = a*x^2 + b*x + c')
indices = self.GetIndices(dlg2)
self.model.QuadraticTransform(indices, inputs)
def OnLog(self, event):
inputs = {}
dlg2 = ParameterDialog([('l', 'FloatValidator', str(1)),
('r', 'FloatValidator', str(1)),
('d', 'FloatValidator', str(1))],
inputs,
'f(x) = r/d * log10(x) for x>l, 0 otherwise')
indices = self.GetIndices(dlg2)
self.model.LogTransform(indices, inputs)
def OnLogn(self, event):
inputs = {}
dlg2 = ParameterDialog([('l', 'FloatValidator', str(1)),
('r', 'FloatValidator', str(1)),
('d', 'FloatValidator', str(1))],
inputs,
'f(x) = r/d * log(x) for x>l, 0 otherwise')
indices = self.GetIndices(dlg2)
self.model.LognTransform(indices, inputs)
def OnBiexponential(self, event):
inputs = {}
dlg2 = ParameterDialog([('a', 'FloatValidator', str(0.5)),
('b', 'FloatValidator', str(1.0)),
('c', 'FloatValidator', str(0.5)),
('d', 'FloatValidator', str(1.0)),
('f', 'FloatValidator', str(0))],
inputs,
'finv(x) = a*exp(b*x) - c*exp(d*x) + f')
indices = self.GetIndices(dlg2)
self.model.BiexponentialTransform(indices, inputs)
def OnLogicle(self, event):
inputs = {}
data = array(self.model.GetCurrentData()[:])
T = 262144
# find r as 5th percentile of negative values for each column
r = 0.05
dlg2 = ParameterDialog([('T', 'FloatValidator', str(T)),
('M', 'FloatValidator', str(4.5)),
('r', 'FloatValidator', str(r))],
inputs,
'finv(x) = T*exp(-(m-w))*(exp(x-w)-p^2*exp(-(x-w)/p)+p^2-1')
indices = self.GetIndices(dlg2)
self.model.LogicleTransform(indices, inputs)
def OnHyperlog(self, event):
inputs = {}
dlg2 = ParameterDialog([('b', 'FloatValidator', str(100.0)),
('d', 'FloatValidator', str(5.0)),
('r', 'FloatValidator', str(1024.0))],
inputs,
'finv(x) = sgn(x)*10^(x*sgn(x)*d/r) + b*(d/r)*y - sgn(x)')
indices = self.GetIndices(dlg2)
self.model.HyperlogTransform(indices, inputs)
def OnArcsinh(self, event):
inputs = {}
dlg2 = ParameterDialog([('a', 'FloatValidator', str(1.0)),
('b', 'FloatValidator', str(1.0)),
('c', 'FloatValidator', str(0.0))],
inputs,
'x = arcsinh(a+b*x) + c')
indices = self.GetIndices(dlg2)
self.model.ArcsinhTransform(indices, inputs)
def ModelUpdate(self, model):
VizFrame.ModelUpdate(self, model)
self.io.ModelUpdate(self.model)
self.tree.DeleteAllItems()
self.root = self.tree.AddRoot('root')
self.treeItems = []
self.treeItems.append(self.root)
self.tree.SetItemPyData(self.root,self.model.hdf5.root)
for leaf in self.model.hdf5.root._v_leaves.keys():
item = self.tree.AppendItem(self.root,leaf)
self.tree.SetItemPyData(item,self.model.hdf5.root._v_leaves[leaf])
for subGroup in self.model.hdf5.root._v_groups:
self.UpdateTree(self.model.hdf5.root._v_groups[subGroup],self.root)
for item in self.treeItems:
# self.tree.Expand(item)
if self.tree.GetItemPyData(item) == self.model.GetCurrentGroup():
self.tree.SelectItem(item)
# sort tree
self.tree.SortChildren(self.root)
self.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnTreeActivated, self.tree)
self.Bind(wx.EVT_TREE_ITEM_RIGHT_CLICK, self.OnShowPopup, self.tree)
def TreeItemsDefaultColor(self, parent=None):
if parent is None:
parent = self.root
item, cookie = self.tree.GetFirstChild(parent)
while item:
self.tree.SetItemTextColour(item, 'black')
self.TreeItemsDefaultColor(item)
item, cookie = self.tree.GetNextChild(parent, cookie)
def UpdateTree(self, newH5Group, curTreeGroup, data = None):
self.curData = data
newTreeGroup = self.tree.AppendItem(curTreeGroup, newH5Group._v_name)
self.treeItems.append(newTreeGroup)
self.tree.SetItemPyData(newTreeGroup,newH5Group)
# if self.curData is not None and 'data' not in newH5Group._v_leaves.keys():
# item = self.tree.AppendItem(newTreeGroup, 'data')
# self.tree.SetItemPyData(item,self.curData)
for leaf in newH5Group._v_leaves.keys():
if leaf is 'data':
self.curData = newH5Group._v_leaves[leaf]
item = self.tree.AppendItem(newTreeGroup,leaf)
self.tree.SetItemPyData(item,newH5Group._v_leaves[leaf])
for subGroup in newH5Group._v_groups.keys():
self.UpdateTree(newH5Group._v_groups[subGroup],newTreeGroup, self.curData)
def OnTreeActivated(self,event):
self.log.Clear()
obj = self.tree.GetItemPyData(event.GetItem())
self.log.WriteText( self.model.TextInfo(obj) + "\n")
self.model.SelectGroup(obj)
self.TreeItemsDefaultColor()
self.tree.SetItemTextColour(event.GetItem(),'red')
if self.model.IsZ():
if self.zPopMenuItem is None:
self.zPopMenuItem = self.popup.Append(-1, "Edit Z Labels")
self.Bind(wx.EVT_MENU, self.OnZEdit, self.zPopMenuItem)
else:
if self.zPopMenuItem is not None:
self.popup.Remove(self.zPopMenuItem.GetId() )
self.Unbind(wx.EVT_MENU, self.zPopMenuItem)
self.zPopMenuItem.Destroy()
self.zPopMenuItem = None
def OnTreeRightClick(self,event):
if self.tree.GetItemPyData(event.GetItem())._c_classId == "GROUP":
pass
elif self.tree.GetItemPyData(event.GetItem())._c_classId == "ARRAY":
pass
def OnTreeEdit(self,event):
if event.GetItem() == self.root:
event.Veto()
else:
item = self.tree.GetItemPyData(event.GetItem())
label = event.GetLabel()
if label:
item._f_rename(label)
def OnZEdit(self, event):
self.obo = OboTreeFrame(self.model, self)
self.obo.Show()
def CreateEdit(self):
try:
return self.edit
except:
menu = wx.Menu()
self.popupItems = {}
self.pasteItem = None
for str in ['Edit','Cut','Copy','Paste', 'New Group', 'Rename', 'Delete', 'Export', 'Annotate', 'Batch', 'Remote Process']:
self.popupItems[str] = menu.Append(-1, str)
self.Bind(wx.EVT_MENU, self.OnPopupSelected, self.popupItems[str])
self.popupItems['Paste'].Enable(False)
return menu
def OnDisplayItem(self, event):
if self.tree.GetItemPyData(event.GetItem())._c_classId != 'GROUP':
table = EditFrame(self.tree.GetItemPyData(self.tree.GetSelection()))
table.Show()
def CreatePopup(self):
menu = self.CreateEdit()
self.io = Io(self.model, self)
self.io.LoadPlugins()
self.openMenu = menu.AppendMenu(-1,
"Import...",
self.io.BuildOpenMenu())
return menu
def OnShowPopup(self, event):
if self.tree.GetItemPyData(event.GetItem())._c_classId == "GROUP":
self.popupItems['Edit'].Enable(False)
self.popupItems['Annotate'].Enable(True)
else:
self.popupItems['Edit'].Enable(True)
self.popupItems['Annotate'].Enable(False)
if 'batch' in self.tree.GetItemPyData(event.GetItem())._v_attrs:
self.popupItems['Batch'].Enable(True)
else:
self.popupItems['Batch'].Enable(False)
self.tree.PopupMenu(self.popup)
def OnPopupSelected(self, event):
item = self.popup.FindItemById(event.GetId())
self.OnMenuSelect(item)
def OnMenuSelect(self, item):
text = item.GetText()
if text == 'Edit':
table = EditFrame(self.tree.GetItemPyData(self.tree.GetSelection()))
table.Show()
elif text == 'New Group':
self.OnNewGroup()
elif text == 'Copy':
self.OnCopy()
elif text == 'Cut':
self.OnCut()
elif text == 'Paste':
self.OnPaste()
elif text == 'Rename':
self.OnRename()
elif text == 'Delete':
self.OnDelete()
elif text == 'Export':
self.OnExport()
elif text == 'Annotate':
self.OnAnnotate()
elif text == 'Batch':
self.OnBatch()
elif text == 'Remote Process':
self.OnSubmitJob(None)
else:
wx.MessageBox(text)
def OnBatch(self):
source = self.model.GetCurrentData()
choices = self.model.GetDataGroups()
#print source._v_pathname
try:
choices.remove(self.GetCurrentGroup()._v_pathname)
except:
pass
dialog = wx.MultiChoiceDialog(None, "Chose groups to apply " +source.getAttr('batch')[0] + " to",
"choices", choices)
batchOp = source.getAttr('batch')
transforms = { 'scale' : self.model.ScaleTransform,
'normal scale' : self.model.NormalScaleTransform,
'clip' : self.model.ClipTransform,
'linear' : self.model.LinearTransform,
'quadradic' : self.model.QuadraticTransform,
'log' : self.model.LogTransform,
'logn': self.model.LognTransform,
'biexponential':self.model.BiexponentialTransform,
'logicle': self.model.LogicleTransform,
'heyerlog': self.model.HyperlogTransform,
'arcsin': self.model.ArcsinhTransform }
if dialog.ShowModal() == wx.ID_OK:
print [choices[i] for i in dialog.GetSelections()]
if batchOp[0] == 'gate':
self.OnBatchGate(source, [choices[i] for i in dialog.GetSelections()])
elif batchOp[0] == 'qgate':
self.OnBatchGate(source, [choices[i] for i in dialog.GetSelections()], quad=True)
elif batchOp[0] in transforms.keys():
for i in dialog.GetSelections():
self.model.SelectGroupByPath(choices[i])
transforms[batchOp[0]](batchOp[1], batchOp[2])
else:
print source.getAttr('batch')[0]
def OnBatchGate(self, source, dest, quad=False):
x,y = source.getAttr('batch')[1]
if quad:
for group in dest:
self.model.SelectGroupByPath(group)
if x and y in self.model.GetCurrentData().getAttr('fields'):
xline, yline = source.getAttr('batch')[2]
window = self.Visuals['2D Density'](self, show=False)
window.AttachModel(self.model)
window.radioX.SetStringSelection(x)
window.radioY.SetStringSelection(y)
window.OnControlSwitch(-1)
window.OnAddQuadGate(-1)
window.widget.vline._x = [xline,xline]
window.widget.hline._y = [yline,yline]
window.Gate(-1)
window.Destroy()
else:
dialog = wx.MessageDialog(self, "Unable to find matching fields for " + group, style=wx.OK)
if dialog.ShowModal() == wx.ID_OK:
pass
else:
for group in dest:
self.model.SelectGroupByPath(group)
if x and y in self.model.GetCurrentData().getAttr('fields'):
window = self.Visuals['2D Density'](self, show=False)
window.AttachModel(self.model)
window.radioX.SetStringSelection(x)
window.radioY.SetStringSelection(y)
window.OnControlSwitch(-1)
window.OnAddPolyGate(-1)
window.widget.p.poly.verts = list(source.getAttr('batch')[2])
window.widget.p.poly_changed(window.widget.p.poly)
window.Gate(-1)
window.Destroy()
else:
dialog = wx.MessageDialog(self, "Unable to find matching fields for " + group, style=wx.OK)
if dialog.ShowModal() == wx.ID_OK:
pass
def OnAnnotate(self):
selection = self.tree.GetSelection()
item = self.tree.GetItemPyData(selection)
txt = self.tree.GetItemText(selection)
current = self.model.GetCurrentAnnotation()
if current is not None:
notes = map(lambda x, y: (x,y), current[:,0],current[:,1])
else:
notes = None
annotate = annotateFrame(item, self.model, txt, notes)
annotate.Show()
def OnRename(self):
selection = self.tree.GetSelection()
if selection != self.root:
self.tree.EditLabel(selection)
item = self.tree.GetItemPyData(selection)
label = self.tree.GetItemText(selection)
if label:
item._f_rename(label)
def OnDelete(self):
self.model.deleteNode(self.tree.GetItemPyData(self.tree.GetSelection()))
self.model.update()
def OnCut(self):
self.OnCopy(cut=True)
def OnCopy(self, cut=False):
self._cut = cut
self._source = self.tree.GetItemPyData(self.tree.GetSelection())
self.popupItems['Paste'].Enable(True)
def OnPaste(self):
self.model.copyNode(self._source, self.model.GetCurrentGroup())
if self._cut:
self.model.deleteNode(self._source)
self._cut = False
self.popupItems['Paste'].Enable(False)
self.model.update()
def OnNewGroup(self):
name = wx.GetTextFromUser('Name for new group', caption='Create New Group', default_value='',
parent = None)
if name is not '':
self.model.NewGroup(name)
self.model.update()
def OnExport(self):
# window = DBDialog(self.model)
# window.Show()
"""Export data associated with current group."""
if self.model.ready:
x = self.model.GetCurrentData()[:]
cs = self.model.GetCurrentData().getAttr('fields')
print cs
else:
wx.MessageBox("No data found")
return
dlg = ChoiceDialog(cs)
if dlg.ShowModal() == wx.ID_OK:
indices = dlg.GetSelections()
dlg.Destroy()
wildcard = "Tab-delimited (*.out)|*.out"
dialog = wx.FileDialog(parent=self,
wildcard=wildcard,
message="Export Data",
defaultDir=self.exportDir,
style=wx.SAVE|wx.OVERWRITE_PROMPT)
if dialog.ShowModal() == wx.ID_OK:
path = dialog.GetPath()
self.exportDir = os.path.split(path)[0]
if path.split('.')[-1] in ['out']:
ext = ''
else:
ext = '.out'
datafile = path + ext
savetxt(datafile, x[:,indices], delimiter='\t')
savetxt(datafile.replace('out', 'txt'),
array(cs)[indices], fmt='%s')
# fo = open(path + ext, 'w')
# fo.write('\n'.join(['\t'.join(map(str, item))
# for item in x[:,indices]]))
# fo.close()
# # store associated headers
# fo = open(path + '.txt', 'w')
# fo.write('\n'.join([header for header in array(cs)[indices]]))
# fo.close()
dialog.Destroy()
def parsejob(jobfile):
results = []
file = open(jobfile, 'r')
for line in file:
line = line.replace('\n','')
tmp = line.split('\t')
type = strip_quotes(tmp[0])
args = strip_quotes(tmp[1])
more = [strip_quotes(x) for x in tmp[2:]]
results.append((type, args, more))
file.close()
print results
return results
def strip_quotes(str):
# gotta be a more sane way to do this...
return str.replace("'",'').replace('"','')
if __name__ == '__main__':
print parsejob('/home/jolly/foo.txt')
| gpl-3.0 |
SivagnanamCiena/coding-skills-sample-code | coding102-REST-python/apic-em-helloworld.py | 2 | 2395 | # Getting started with APIC-EM APIs
# First Call to APIC-EM REST API - "Hello World"
# * THIS SAMPLE APPLICATION AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY
# * OF ANY KIND BY CISCO, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED
# * TO THE IMPLIED WARRANTIES OF MERCHANTABILITY FITNESS FOR A PARTICULAR
# * PURPOSE, NONINFRINGEMENT, SATISFACTORY QUALITY OR ARISING FROM A COURSE OF
# * DEALING, LAW, USAGE, OR TRADE PRACTICE. CISCO TAKES NO RESPONSIBILITY
# * REGARDING ITS USAGE IN AN APPLICATION, AND IT IS PRESENTED ONLY AS AN
# * EXAMPLE. THE SAMPLE CODE HAS NOT BEEN THOROUGHLY TESTED AND IS PROVIDED AS AN
# * EXAMPLE ONLY, THEREFORE CISCO DOES NOT GUARANTEE OR MAKE ANY REPRESENTATIONS
# * REGARDING ITS RELIABILITY, SERVICEABILITY, OR FUNCTION. IN NO EVENT DOES
# * CISCO WARRANT THAT THE SOFTWARE IS ERROR FREE OR THAT CUSTOMER WILL BE ABLE
# * TO OPERATE THE SOFTWARE WITHOUT PROBLEMS OR INTERRUPTIONS. NOR DOES CISCO
# * WARRANT THAT THE SOFTWARE OR ANY EQUIPMENT ON WHICH THE SOFTWARE IS USED WILL
# * BE FREE OF VULNERABILITY TO INTRUSION OR ATTACK. THIS SAMPLE APPLICATION IS
# * NOT SUPPORTED BY CISCO IN ANY MANNER. CISCO DOES NOT ASSUME ANY LIABILITY
# * ARISING FROM THE USE OF THE APPLICATION. FURTHERMORE, IN NO EVENT SHALL CISCO
# * OR ITS SUPPLIERS BE LIABLE FOR ANY INCIDENTAL OR CONSEQUENTIAL DAMAGES, LOST
# * PROFITS, OR LOST DATA, OR ANY OTHER INDIRECT DAMAGES EVEN IF CISCO OR ITS
# * SUPPLIERS HAVE BEEN INFORMED OF THE POSSIBILITY THEREOF.-->
# import the requests library so we can use it to make REST calls (http://docs.python-requests.org/en/latest/index.html)
import requests
# All of our REST calls will use the url for the APIC EM Controller as the base URL
# So lets define a variable for the controller IP or DNS so we don't have to keep typing it
controller = "https://sandboxapic.cisco.com"
# Get Devices
# This function allows you to view a list of all the devices in the network(routers and switches).
# Use our controller address plus the url for this API to construct the URL we need to call
get_devices_url = controller + '/api/v0/network-device/1/3'
# Send Request using GET Method and specify URL
# An object containing the response codes and json is returned.
get_devices_response = requests.get(get_devices_url, verify=False)
# Print the response so we can see it.
print ("Devices = ")
print (get_devices_response.text)
| apache-2.0 |
WillisXChen/django-oscar | tests/unit/offer/availability_tests.py | 50 | 3444 | from decimal import Decimal as D
import datetime
from django.test import TestCase
from oscar.apps.offer import models
from oscar.core.compat import get_user_model
User = get_user_model()
class TestADateBasedConditionalOffer(TestCase):
def setUp(self):
self.start = datetime.date(2011, 1, 1)
self.end = datetime.date(2011, 2, 1)
self.offer = models.ConditionalOffer(start_datetime=self.start,
end_datetime=self.end)
def test_is_available_during_date_range(self):
test = datetime.date(2011, 1, 10)
self.assertTrue(self.offer.is_available(test_date=test))
def test_is_inactive_before_date_range(self):
test = datetime.date(2010, 3, 10)
self.assertFalse(self.offer.is_available(test_date=test))
def test_is_inactive_after_date_range(self):
test = datetime.date(2011, 3, 10)
self.assertFalse(self.offer.is_available(test_date=test))
def test_is_active_on_end_datetime(self):
self.assertTrue(self.offer.is_available(test_date=self.end))
class TestAConsumptionFrequencyBasedConditionalOffer(TestCase):
def setUp(self):
self.offer = models.ConditionalOffer(max_global_applications=4)
def test_is_available_with_no_applications(self):
self.assertTrue(self.offer.is_available())
def test_is_available_with_fewer_applications_than_max(self):
self.offer.num_applications = 3
self.assertTrue(self.offer.is_available())
def test_is_inactive_with_equal_applications_to_max(self):
self.offer.num_applications = 4
self.assertFalse(self.offer.is_available())
def test_is_inactive_with_more_applications_than_max(self):
self.offer.num_applications = 4
self.assertFalse(self.offer.is_available())
def test_restricts_number_of_applications_correctly_with_no_applications(self):
self.assertEqual(4, self.offer.get_max_applications())
def test_restricts_number_of_applications_correctly_with_fewer_applications_than_max(self):
self.offer.num_applications = 3
self.assertEqual(1, self.offer.get_max_applications())
def test_restricts_number_of_applications_correctly_with_more_applications_than_max(self):
self.offer.num_applications = 5
self.assertEqual(0, self.offer.get_max_applications())
class TestCappedDiscountConditionalOffer(TestCase):
def setUp(self):
self.offer = models.ConditionalOffer(
max_discount=D('100.00'),
total_discount=D('0.00'))
def test_is_available_when_below_threshold(self):
self.assertTrue(self.offer.is_available())
def test_is_inactive_when_on_threshold(self):
self.offer.total_discount = self.offer.max_discount
self.assertFalse(self.offer.is_available())
def test_is_inactive_when_above_threshold(self):
self.offer.total_discount = self.offer.max_discount + D('10.00')
self.assertFalse(self.offer.is_available())
class TestASuspendedOffer(TestCase):
def setUp(self):
self.offer = models.ConditionalOffer(
status=models.ConditionalOffer.SUSPENDED)
def test_is_unavailable(self):
self.assertFalse(self.offer.is_available())
def test_lists_suspension_as_an_availability_restriction(self):
restrictions = self.offer.availability_restrictions()
self.assertEqual(1, len(restrictions))
| bsd-3-clause |
lpatmo/actionify_the_news | open_connect/media/tests/test_models.py | 1 | 16113 | """Tests for media.models."""
# pylint: disable=maybe-no-member, too-many-instance-attributes
from base64 import urlsafe_b64encode
from unittest import skipIf
import hashlib
import os
import re
from django.core.files import File
from django.core.urlresolvers import reverse
from django.test import TestCase, Client
from mock import patch, Mock
from model_mommy import mommy
from PIL import Image as PILImage
from open_connect.media import models
from open_connect.media.models import Image
from open_connect.media.tests.test_utils import gifsicle_not_installed
from open_connect.connect_core.utils.basetests import ConnectTestMixin
class ImageTest(ConnectTestMixin, TestCase):
"""Tests for Image model"""
def setUp(self):
super(ImageTest, self).setUp()
self.path = os.path.dirname(os.path.abspath(__file__))
def get_image(self, filename):
"""Returns the specified image."""
path = os.path.join(self.path, filename)
image = Image()
image.image = File(open(path))
image.user = self.create_user()
image.save(process=False)
return image
def get_large_image(self):
"""Returns the large image."""
return self.get_image('1000x500.png')
def get_small_image(self):
"""Returns the small image."""
return self.get_image('200x200.png')
def get_animated_image(self):
"""Returns the animated GIF"""
return self.get_image('animation.gif')
def get_exif_image(self):
"""Returns the exif image"""
return self.get_image('exif.jpg')
def test_create_display_size(self):
"""Test creating a display_image."""
largeimage = self.get_large_image()
largeimage.create_display_size()
smallimage = self.get_small_image()
smallimage.create_display_size()
# Confirm that the large image was resized, the small was not
self.assertEqual(smallimage.image, smallimage.display_image)
self.assertNotEqual(largeimage.image, largeimage.display_image)
# Confirm that the large image is at or below 600x600
largeimage.display_image.open()
large_image_display = PILImage.open(largeimage.display_image)
self.assertLessEqual(large_image_display.size, (600, 600))
def test_create_thumbnail(self):
"""Test creating a thumbnail."""
largeimage = self.get_large_image()
largeimage.create_thumbnail()
largeimage.thumbnail.open()
thumbnail = PILImage.open(largeimage.thumbnail)
self.assertLessEqual(thumbnail.size, (200, 200))
@skipIf(gifsicle_not_installed(), 'Gifsicle not installed')
def test_create_thumbnail_animation(self):
"""Test creating a thumbnail of an animated GIF."""
# pylint: disable=expression-not-assigned
animatedimage = self.get_animated_image()
animatedimage.image.open()
image = PILImage.open(animatedimage.image)
# Confirm there are 4 frames by ensuring that the 5th frame raises an
# error
[image.seek(frame) for frame in range(0, 4)]
with self.assertRaises(ValueError):
image.seek(5)
animatedimage.create_thumbnail()
animatedimage.thumbnail.open()
thumbnail = PILImage.open(animatedimage.thumbnail)
# Confirm that there are the same number of frames (4) as the original
[thumbnail.seek(frame) for frame in range(0, 4)]
with self.assertRaises(ValueError):
thumbnail.seek(5)
self.assertLessEqual(thumbnail.size, (200, 200))
@patch('open_connect.media.models.resize_gif')
def test_create_thumbnail_animation_no_gifsicle(self, mock_resize):
"""Test resizing an image when gifsicle is not installed"""
mock_resize.return_value = ('', True)
animatedimage = self.get_animated_image()
mock_resize.assert_called_once()
animatedimage.create_thumbnail()
# Open both, confirm that the thumbnail is identical to the image
animatedimage.image.open()
animatedimage.thumbnail.open()
# Hash both files to confirm they are the same
image_hash = hashlib.md5(animatedimage.image.read()).hexdigest()
thumbnail_hash = hashlib.md5(animatedimage.thumbnail.read()).hexdigest()
self.assertEqual(image_hash, thumbnail_hash)
def test_process_exif_data(self):
"""Test grabbing exif data from an image"""
image = self.get_exif_image()
image.image = File(open(os.path.join(self.path, 'exif.jpg')))
image.save()
self.assertFalse(image.exif)
image.process_exif_data()
self.assertTrue(image.exif)
exif_data = image.exif
self.assertEqual(exif_data['ExifImageWidth'], 375)
self.assertEqual(exif_data['ExifImageHeight'], 500)
self.assertEqual(
exif_data['LensModel'], u'iPhone 5s back camera 4.15mm f/2.2')
self.assertEqual(exif_data['Model'], 'iPhone 5s')
def test_process_exif_data_when_no_data(self):
"""Test process_exif_data when there is no exif data"""
image = self.get_small_image()
self.assertFalse(image.exif)
image.process_exif_data()
self.assertFalse(image.exif)
@patch.object(models, 'PILImage')
def test_process_exif_getexif_returns_none(self, mock_pilimage):
"""If _getexif returns None, don't fail."""
# pylint: disable=protected-access
mock_original = Mock()
mock_original._getexif.return_value = None
mock_pilimage.open.return_value = mock_original
image = self.get_small_image()
image.process_exif_data()
self.assertEqual(mock_original._getexif.call_count, 1)
def test_process_exif_raises_unicode_decode_error(self):
"""Handle UnicodeDecodeError gracefully when saving exif."""
image = self.get_exif_image()
with patch.object(image, 'save') as mock_save:
mock_save.side_effect = UnicodeDecodeError(b'utf-8', b'', 0, 1, 'a')
self.assertIsNone(image.process_exif_data())
def test_get_thumbnail(self):
"""Test getting the thumbnail."""
image = self.get_large_image()
self.assertEqual(image.image, image.get_thumbnail)
image.create_thumbnail()
self.assertNotEqual(image.image, image.thumbnail)
def test_get_display_size(self):
"""Test getting the display size."""
image = self.get_large_image()
self.assertEqual(image.image, image.get_display_image)
image.create_display_size()
self.assertNotEqual(image.image, image.get_display_image)
@patch('open_connect.media.models.process_image')
def test_image_process_called(self, mock):
"""process_image is called when save is called with process=True."""
image = self.get_small_image()
image.save(process=True)
self.assertTrue(mock.delay.called)
def test_serializable(self):
"""Test serializable method."""
image = self.get_small_image()
serialized = image.serializable()
self.assertEqual(serialized['pk'], image.pk)
self.assertEqual(
serialized['image_url'], image.get_absolute_url())
self.assertEqual(
serialized['display_image_url'],
image.get_display_image.url
)
self.assertEqual(
serialized['thumbnail_url'], image.get_thumbnail.url)
def test_file_name(self):
"""Should return just the name of the file without any path."""
image = self.get_small_image()
self.assertTrue(
re.search(
r'^[1-4][0-9]{5}\.[0-9a-f]{32}\.png$',
image.file_name()
)
)
class ImagePopularityManagerTest(ConnectTestMixin, TestCase):
"""Tests for the Image Popularity manager"""
def setUp(self):
"""Setup for image popularity manager tests"""
self.banned_user = self.create_user(is_banned=True, is_superuser=True)
self.super_user = self.create_superuser()
self.normal_user = self.create_user()
self.group = mommy.make('groups.Group')
self.banned_user.add_to_group(self.group.pk)
self.super_user.add_to_group(self.group.pk)
self.normal_user.add_to_group(self.group.pk)
self.client.login(username=self.super_user.email, password='moo')
path = os.path.dirname(os.path.abspath(__file__))
self.largefile = path + '/1000x500.png'
self.smallfile = path + '/200x200.png'
self.largeimage = Image()
self.largeimage.image = File(open(self.largefile))
self.largeimage.user = self.super_user
self.largeimage.save()
self.smallimage = Image()
self.smallimage.image = File(open(self.smallfile))
self.smallimage.user = self.super_user
self.smallimage.save()
def test_with_user(self):
"""Should return images attached to approved messages."""
# Create a new thread and message
thread = mommy.make('connectmessages.Thread', group=self.group)
message = mommy.make(
'connectmessages.Message',
thread=thread,
sender=self.normal_user,
status='approved'
)
# Create and attach a new image
image1 = Image()
image1.user = message.sender
image1.image = File(open(self.smallfile))
image1.save()
message.images.add(image1)
result = self.client.get(reverse('admin_gallery'))
self.assertEqual(result.status_code, 200)
self.assertIn(image1, result.context['images'])
def test_with_user_non_group_message(self):
"""Images posted in direct messages shouldn't be returned."""
# Create a new thread and message
thread = mommy.make(
'connectmessages.Thread', group=None, thread_type='direct')
message = mommy.make(
'connectmessages.Message',
thread=thread,
sender=self.normal_user,
status='spam'
)
# Create and attach a new image
image1 = Image()
image1.user = message.sender
image1.image = File(open(self.smallfile))
image1.save()
message.images.add(image1)
result = self.client.get(reverse('admin_gallery'))
self.assertEqual(result.status_code, 200)
self.assertNotIn(image1, result.context['images'])
def test_with_user_message_not_approved(self):
"""Images that are not approved should not be returned."""
# Create a new thread and message
thread = mommy.make('connectmessages.Thread', group=self.group)
message = mommy.make(
'connectmessages.Message',
thread=thread,
sender=self.normal_user,
status='spam'
)
# Create and attach a new image
image1 = Image()
image1.user = message.sender
image1.image = File(open(self.smallfile))
image1.save()
message.images.add(image1)
result = self.client.get(reverse('admin_gallery'))
self.assertEqual(result.status_code, 200)
self.assertIn(image1, result.context['images'])
def test_with_user_message_not_approved_user_is_sender(self):
"""Images that are not approved should be returned to the sender."""
# Create a new thread and message
thread = mommy.make('connectmessages.Thread', group=self.group)
message = mommy.make(
'connectmessages.Message',
thread=thread,
sender=self.super_user,
status='spam'
)
# Create and attach a new image
image1 = Image()
image1.user = message.sender
image1.image = File(open(self.smallfile))
image1.save()
message.images.add(image1)
result = self.client.get(reverse('admin_gallery'))
self.assertEqual(result.status_code, 200)
self.assertIn(image1, result.context['images'])
def test_with_user_no_images_from_banned_users(self):
"""Images from banned users shouldn't be present."""
# Create a new thread and message
thread = mommy.make('connectmessages.Thread', group=self.group)
message = mommy.make(
'connectmessages.Message', thread=thread, sender=self.banned_user)
# Create and attach a new image
image1 = Image()
image1.user = message.sender
image1.image = File(open(self.smallfile))
image1.save()
message.images.add(image1)
result = self.client.get(reverse('admin_gallery'))
self.assertEqual(result.status_code, 200)
self.assertNotIn(image1, result.context['images'])
def test_with_user_current_user_is_banned(self):
"""Images from banned users should be visible to the banned user.."""
# Create a new thread and message
thread = mommy.make('connectmessages.Thread', group=self.group)
message = mommy.make(
'connectmessages.Message', thread=thread, sender=self.banned_user)
# Create and attach a new image
image1 = Image()
image1.user = message.sender
image1.image = File(open(self.smallfile))
image1.save()
message.images.add(image1)
client = Client()
client.post(
reverse('login'),
{'username': self.banned_user.email, 'password': 'moo'}
)
result = client.get(reverse('admin_gallery'))
self.assertEqual(result.status_code, 200)
self.assertIn(image1, result.context['images'])
class Base64URLShortenerTest(TestCase):
"""Tests for Base64URLShortener."""
def setUp(self):
self.shortener = models.Base64URLShortener()
def test_shorten(self):
"""Test that shortener returns expected value."""
result = self.shortener.shorten(123)
self.assertEqual(result, 'MTIz')
def test_expand(self):
"""Test that shortener can expand shortened value."""
result = self.shortener.expand('MTIz')
self.assertEqual(result, '123')
class ShortenedURLTest(TestCase):
"""Tests for ShortenedURL model."""
def setUp(self):
"""ShortenURL Test Setup"""
self.url = models.ShortenedURL.objects.create(
url='http://www.google.com')
def test_save_without_short_code(self):
"""Test that saving ShortenedURL sets the short code."""
self.assertEqual(
self.url.short_code, urlsafe_b64encode(str(self.url.pk)).strip('='))
def test_save_with_short_code(self):
"""Test that saving ShortenedURL doesn't override a preset short_code"""
result = models.ShortenedURL.objects.create(
url='http://www.thisisanewurl.com',
short_code='something crazy'
)
self.assertEqual(result.short_code, 'something crazy')
def test_get_absolute_url(self):
"""Test that get_absolute_url returns redirect view."""
self.assertEqual(
self.url.get_absolute_url(),
reverse('shortened_url_redirect',
kwargs={'code': self.url.short_code})
)
def test_click_increases_click_count(self):
"""Test that click method increments click_count."""
click_count = self.url.click_count
self.url.click()
url = models.ShortenedURL.objects.get(pk=self.url.pk)
self.assertEqual(url.click_count, click_count + 1)
def test_click_creates_shortened_url_click(self):
"""Test that click method creates new ShortenedURLClick instance."""
clicks = self.url.shortenedurlclick_set.count()
self.url.click()
url = models.ShortenedURL.objects.get(pk=self.url.pk)
self.assertEqual(url.shortenedurlclick_set.count(), clicks + 1)
def test_unicode(self):
"""Test that unicode response is as expected."""
self.assertEqual(
unicode(self.url),
u'ShortenedURL %s: %s' % (self.url.pk, self.url.url)
)
| mit |
dmvo/ecgpatch | ios-new/coreplot-framework/Source/CPTNumericData+TypeConversions_Generation.py | 6 | 6503 | dataTypes = ["CPTUndefinedDataType", "CPTIntegerDataType", "CPTUnsignedIntegerDataType", "CPTFloatingPointDataType", "CPTComplexFloatingPointDataType", "CPTDecimalDataType"]
types = { "CPTUndefinedDataType" : [],
"CPTIntegerDataType" : ["int8_t", "int16_t", "int32_t", "int64_t"],
"CPTUnsignedIntegerDataType" : ["uint8_t", "uint16_t", "uint32_t", "uint64_t"],
"CPTFloatingPointDataType" : ["float", "double"],
"CPTComplexFloatingPointDataType" : ["float complex", "double complex"],
"CPTDecimalDataType" : ["NSDecimal"] }
nsnumber_factory = { "int8_t" : "Char",
"int16_t" : "Short",
"int32_t" : "Long",
"int64_t" : "LongLong",
"uint8_t" : "UnsignedChar",
"uint16_t" : "UnsignedShort",
"uint32_t" : "UnsignedLong",
"uint64_t" : "UnsignedLongLong",
"float" : "Float",
"double" : "Double",
"float complex" : "Float",
"double complex" : "Double",
"NSDecimal" : "Decimal"
}
nsnumber_methods = { "int8_t" : "char",
"int16_t" : "short",
"int32_t" : "long",
"int64_t" : "longLong",
"uint8_t" : "unsignedChar",
"uint16_t" : "unsignedShort",
"uint32_t" : "unsignedLong",
"uint64_t" : "unsignedLongLong",
"float" : "float",
"double" : "double",
"float complex" : "float",
"double complex" : "double",
"NSDecimal" : "decimal"
}
null_values = { "int8_t" : "0",
"int16_t" : "0",
"int32_t" : "0",
"int64_t" : "0",
"uint8_t" : "0",
"uint16_t" : "0",
"uint32_t" : "0",
"uint64_t" : "0",
"float" : "NAN",
"double" : "NAN",
"float complex" : "NAN",
"double complex" : "NAN",
"NSDecimal" : "CPTDecimalNaN()"
}
print "[CPTNumericData sampleValue:]"
print ""
print "switch ( self.dataTypeFormat ) {"
for dt in dataTypes:
print "\tcase %s:" % dt
if ( len(types[dt]) == 0 ):
print '\t\t[NSException raise:NSInvalidArgumentException format:@"Unsupported data type (%s)"];' % (dt)
else:
print "\t\tswitch ( self.sampleBytes ) {"
for t in types[dt]:
print "\t\t\tcase sizeof(%s):" % t
if ( t == "float complex" ):
print "\t\t\t\tresult = @(*( crealf(%s *)[self samplePointer:sample]) );" % (t)
elif ( t == "double complex" ):
print "\t\t\t\tresult = @(*( creal(%s *)[self samplePointer:sample]) );" % (t)
elif ( t == "NSDecimal" ):
print "\t\t\t\tresult = [NSDecimalNumber decimalNumberWithDecimal:*(%s *)[self samplePointer:sample]];" % (t)
else:
print "\t\t\t\tresult = @(*(%s *)[self samplePointer:sample]);" % (t)
print "\t\t\t\tbreak;"
print "\t\t}"
print "\t\tbreak;"
print "}"
print "\n\n"
print "---------------"
print "\n\n"
print "[CPTNumericData dataFromArray:dataType:]"
print ""
print "switch ( newDataType.dataTypeFormat ) {"
for dt in dataTypes:
print "\tcase %s:" % dt
if ( len(types[dt]) == 0 ):
print "\t\t// Unsupported"
else:
print "\t\tswitch ( newDataType.sampleBytes ) {"
for t in types[dt]:
print "\t\t\tcase sizeof(%s): {" % t
print "\t\t\t\t%s *toBytes = (%s *)sampleData.mutableBytes;" % (t, t)
print "\t\t\t\tfor ( id sample in newData ) {"
print "\t\t\t\t\tif ( [sample respondsToSelector:@selector(%sValue)] ) {" % nsnumber_methods[t]
print "\t\t\t\t\t\t*toBytes++ = (%s)[sample %sValue];" % (t, nsnumber_methods[t])
print "\t\t\t\t\t}"
print "\t\t\t\t\telse {"
print "\t\t\t\t\t\t*toBytes++ = %s;" % null_values[t]
print "\t\t\t\t\t}"
print "\t\t\t\t}"
print "\t\t\t}"
print "\t\t\t\tbreak;"
print "\t\t}"
print "\t\tbreak;"
print "}"
print "\n\n"
print "---------------"
print "\n\n"
print "[CPTNumericData convertData:dataType:toData:dataType:]"
print ""
print "switch ( sourceDataType->dataTypeFormat ) {"
for dt in dataTypes:
print "\tcase %s:" % dt
if ( len(types[dt]) > 0 ):
print "\t\tswitch ( sourceDataType->sampleBytes ) {"
for t in types[dt]:
print "\t\t\tcase sizeof(%s):" % t
print "\t\t\t\tswitch ( destDataType->dataTypeFormat ) {"
for ndt in dataTypes:
print "\t\t\t\t\tcase %s:" % ndt
if ( len(types[ndt]) > 0 ):
print "\t\t\t\t\t\tswitch ( destDataType->sampleBytes ) {"
for nt in types[ndt]:
print "\t\t\t\t\t\t\tcase sizeof(%s): { // %s -> %s" % (nt, t, nt)
if ( t == nt ):
print "\t\t\t\t\t\t\t\t\tmemcpy(destData.mutableBytes, sourceData.bytes, sampleCount * sizeof(%s));" % t
else:
print "\t\t\t\t\t\t\t\t\tconst %s *fromBytes = (%s *)sourceData.bytes;" % (t, t)
print "\t\t\t\t\t\t\t\t\tconst %s *lastSample = fromBytes + sampleCount;" % t
print "\t\t\t\t\t\t\t\t\t%s *toBytes = (%s *)destData.mutableBytes;" % (nt, nt)
if ( t == "NSDecimal" ):
print "\t\t\t\t\t\t\t\t\twhile ( fromBytes < lastSample ) *toBytes++ = CPTDecimal%sValue(*fromBytes++);" % nsnumber_factory[nt]
elif ( nt == "NSDecimal" ):
print "\t\t\t\t\t\t\t\t\twhile ( fromBytes < lastSample ) *toBytes++ = CPTDecimalFrom%s(*fromBytes++);" % nsnumber_factory[t]
else:
print "\t\t\t\t\t\t\t\t\twhile ( fromBytes < lastSample ) *toBytes++ = (%s)*fromBytes++;" % nt
print "\t\t\t\t\t\t\t\t}"
print "\t\t\t\t\t\t\t\tbreak;"
print "\t\t\t\t\t\t}"
print "\t\t\t\t\t\tbreak;"
print "\t\t\t\t}"
print "\t\t\t\tbreak;"
print "\t\t}"
print "\t\tbreak;"
print "}"
| mit |
gxx/lettuce | tests/integration/lib/Django-1.2.5/django/contrib/gis/admin/options.py | 45 | 5095 | from django.conf import settings
from django.contrib.admin import ModelAdmin
from django.contrib.gis.admin.widgets import OpenLayersWidget
from django.contrib.gis.gdal import OGRGeomType
from django.contrib.gis.db import models
class GeoModelAdmin(ModelAdmin):
"""
The administration options class for Geographic models. Map settings
may be overloaded from their defaults to create custom maps.
"""
# The default map settings that may be overloaded -- still subject
# to API changes.
default_lon = 0
default_lat = 0
default_zoom = 4
display_wkt = False
display_srid = False
extra_js = []
num_zoom = 18
max_zoom = False
min_zoom = False
units = False
max_resolution = False
max_extent = False
modifiable = True
mouse_position = True
scale_text = True
layerswitcher = True
scrollable = True
map_width = 600
map_height = 400
map_srid = 4326
map_template = 'gis/admin/openlayers.html'
openlayers_url = 'http://openlayers.org/api/2.8/OpenLayers.js'
point_zoom = num_zoom - 6
wms_url = 'http://labs.metacarta.com/wms/vmap0'
wms_layer = 'basic'
wms_name = 'OpenLayers WMS'
debug = False
widget = OpenLayersWidget
def _media(self):
"Injects OpenLayers JavaScript into the admin."
media = super(GeoModelAdmin, self)._media()
media.add_js([self.openlayers_url])
media.add_js(self.extra_js)
return media
media = property(_media)
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Overloaded from ModelAdmin so that an OpenLayersWidget is used
for viewing/editing GeometryFields.
"""
if isinstance(db_field, models.GeometryField):
request = kwargs.pop('request', None)
# Setting the widget with the newly defined widget.
kwargs['widget'] = self.get_map_widget(db_field)
return db_field.formfield(**kwargs)
else:
return super(GeoModelAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def get_map_widget(self, db_field):
"""
Returns a subclass of the OpenLayersWidget (or whatever was specified
in the `widget` attribute) using the settings from the attributes set
in this class.
"""
is_collection = db_field.geom_type in ('MULTIPOINT', 'MULTILINESTRING', 'MULTIPOLYGON', 'GEOMETRYCOLLECTION')
if is_collection:
if db_field.geom_type == 'GEOMETRYCOLLECTION': collection_type = 'Any'
else: collection_type = OGRGeomType(db_field.geom_type.replace('MULTI', ''))
else:
collection_type = 'None'
class OLMap(self.widget):
template = self.map_template
geom_type = db_field.geom_type
params = {'default_lon' : self.default_lon,
'default_lat' : self.default_lat,
'default_zoom' : self.default_zoom,
'display_wkt' : self.debug or self.display_wkt,
'geom_type' : OGRGeomType(db_field.geom_type),
'field_name' : db_field.name,
'is_collection' : is_collection,
'scrollable' : self.scrollable,
'layerswitcher' : self.layerswitcher,
'collection_type' : collection_type,
'is_linestring' : db_field.geom_type in ('LINESTRING', 'MULTILINESTRING'),
'is_polygon' : db_field.geom_type in ('POLYGON', 'MULTIPOLYGON'),
'is_point' : db_field.geom_type in ('POINT', 'MULTIPOINT'),
'num_zoom' : self.num_zoom,
'max_zoom' : self.max_zoom,
'min_zoom' : self.min_zoom,
'units' : self.units, #likely shoud get from object
'max_resolution' : self.max_resolution,
'max_extent' : self.max_extent,
'modifiable' : self.modifiable,
'mouse_position' : self.mouse_position,
'scale_text' : self.scale_text,
'map_width' : self.map_width,
'map_height' : self.map_height,
'point_zoom' : self.point_zoom,
'srid' : self.map_srid,
'display_srid' : self.display_srid,
'wms_url' : self.wms_url,
'wms_layer' : self.wms_layer,
'wms_name' : self.wms_name,
'debug' : self.debug,
}
return OLMap
from django.contrib.gis import gdal
if gdal.HAS_GDAL:
class OSMGeoAdmin(GeoModelAdmin):
map_template = 'gis/admin/osm.html'
extra_js = ['http://openstreetmap.org/openlayers/OpenStreetMap.js']
num_zoom = 20
map_srid = 900913
max_extent = '-20037508,-20037508,20037508,20037508'
max_resolution = '156543.0339'
point_zoom = num_zoom - 6
units = 'm'
| gpl-3.0 |
yuxiang-zhou/menpo | menpo/transform/homogeneous/scale.py | 4 | 10188 | import numpy as np
from .base import HomogFamilyAlignment
from .affine import DiscreteAffine, Affine
from .similarity import Similarity
def Scale(scale_factor, n_dims=None):
r"""
Factory function for producing Scale transforms. Zero scale factors are not
permitted.
A :class:`UniformScale` will be produced if:
- A `float` ``scale_factor`` and a ``n_dims`` `kwarg` are provided
- A `ndarray` ``scale_factor`` with shape ``(n_dims,)`` is provided
with all elements being the same
A :class:`NonUniformScale` will be provided if:
- A `ndarray` ``scale_factor`` with shape ``(n_dims,)`` is provided with
at least two differing scale factors.
Parameters
----------
scale_factor : `float` or ``(n_dims,)`` `ndarray`
Scale for each axis.
n_dims : `int`, optional
The dimensionality of the output transform.
Returns
-------
scale : :class:`UniformScale` or :class:`NonUniformScale`
The correct type of scale
Raises
------
ValueError
If any of the scale factors is zero
"""
from numbers import Number
if not isinstance(scale_factor, Number):
# some array like thing - make it a numpy array for sure
scale_factor = np.asarray(scale_factor)
if not np.all(scale_factor):
raise ValueError('Having a zero in one of the scales is invalid')
if n_dims is None:
# scale_factor better be a numpy array then
if np.allclose(scale_factor, scale_factor[0]):
return UniformScale(scale_factor[0], scale_factor.shape[0])
else:
return NonUniformScale(scale_factor)
else:
# interpret as a scalar then
return UniformScale(scale_factor, n_dims)
class NonUniformScale(DiscreteAffine, Affine):
r"""
An ``n_dims`` scale transform, with a scale component for each dimension.
Parameters
----------
scale : ``(n_dims,)`` `ndarray`
A scale for each axis.
skip_checks : `bool`, optional
If ``True`` avoid sanity checks on ``h_matrix`` for performance.
"""
def __init__(self, scale, skip_checks=False):
scale = np.asarray(scale)
if not skip_checks:
if scale.size > 3 or scale.size < 2:
raise ValueError("NonUniformScale can only be 2D or 3D"
", not {}".format(scale.size))
h_matrix = np.eye(scale.size + 1)
np.fill_diagonal(h_matrix, scale)
h_matrix[-1, -1] = 1
Affine.__init__(self, h_matrix, skip_checks=True, copy=False)
@classmethod
def init_identity(cls, n_dims):
r"""
Creates an identity transform.
Parameters
----------
n_dims : `int`
The number of dimensions.
Returns
-------
identity : :class:`NonUniformScale`
The identity matrix transform.
"""
return NonUniformScale(np.ones(n_dims))
@property
def scale(self):
r"""
The scale vector.
:type: ``(n_dims,)`` `ndarray`
"""
# Copy the vector as Numpy 1.10 will return a writeable view
return self.h_matrix.diagonal()[:-1].copy()
def _transform_str(self):
message = 'NonUniformScale by {}'.format(self.scale)
return message
@property
def n_parameters(self):
"""
The number of parameters: ``n_dims``. They have the form
``[scale_x, scale_y, ....]`` representing the scale across each axis.
:type: `list` of `int`
"""
return self.scale.size
def _as_vector(self):
r"""
Return the parameters of the transform as a 1D array. These parameters
are parametrised as deltas from the identity warp. The parameters
are output in the order ``[s0, s1, ...]``.
+----------+--------------------------------------------+
|parameter | definition |
+==========+============================================+
|s0 | The scale across the first axis |
+----------+--------------------------------------------+
|s1 | The scale across the second axis |
+----------+--------------------------------------------+
|... | ... |
+----------+--------------------------------------------+
|sn | The scale across the nth axis |
+----------+--------------------------------------------+
Returns
-------
s : ``(n_dims,)`` `ndarray`
The scale across each axis.
"""
return self.scale
def _from_vector_inplace(self, vector):
r"""
Updates the :class:`NonUniformScale` inplace.
Parameters
----------
vector : ``(n_dims,)`` `ndarray`
The array of parameters.
"""
np.fill_diagonal(self.h_matrix, vector)
self.h_matrix[-1, -1] = 1
@property
def composes_inplace_with(self):
r"""
:class:`NonUniformScale` can swallow composition with any other
:class:`NonUniformScale` and :class:`UniformScale`.
"""
return NonUniformScale, UniformScale
def pseudoinverse(self):
"""
The inverse scale matrix.
:type: :class:`NonUniformScale`
"""
return NonUniformScale(1.0 / self.scale, skip_checks=True)
class UniformScale(DiscreteAffine, Similarity):
r"""
An abstract similarity scale transform, with a single scale component
applied to all dimensions. This is abstracted out to remove unnecessary
code duplication.
Parameters
----------
scale : ``(n_dims,)`` `ndarray`
A scale for each axis.
n_dims : `int`
The number of dimensions
skip_checks : `bool`, optional
If ``True`` avoid sanity checks on ``h_matrix`` for performance.
"""
def __init__(self, scale, n_dims, skip_checks=False):
if not skip_checks:
if n_dims > 3 or n_dims < 2:
raise ValueError("UniformScale can only be 2D or 3D"
", not {}".format(n_dims))
h_matrix = np.eye(n_dims + 1)
np.fill_diagonal(h_matrix, scale)
h_matrix[-1, -1] = 1
Similarity.__init__(self, h_matrix, copy=False,
skip_checks=True)
@classmethod
def init_identity(cls, n_dims):
r"""
Creates an identity transform.
Parameters
----------
n_dims : `int`
The number of dimensions.
Returns
-------
identity : :class:`UniformScale`
The identity matrix transform.
"""
return UniformScale(1, n_dims)
@property
def scale(self):
r"""
The single scale value.
:type: `float`
"""
return self.h_matrix[0, 0]
def _transform_str(self):
message = 'UniformScale by {}'.format(self.scale)
return message
@property
def n_parameters(self):
r"""
The number of parameters: 1
:type: `int`
"""
return 1
def _as_vector(self):
r"""
Return the parameters of the transform as a 1D array. These parameters
are parametrised as deltas from the identity warp. The parameters
are output in the order ``[s]``.
+----------+--------------------------------+
|parameter | definition |
+==========+================================+
|s | The scale across each axis |
+----------+--------------------------------+
Returns
-------
s : `float`
The scale across each axis.
"""
return np.asarray(self.scale)
def _from_vector_inplace(self, p):
r"""
Returns an instance of the transform from the given parameters,
expected to be in Fortran ordering.
Parameters
----------
p : `float`
The parameter
"""
np.fill_diagonal(self.h_matrix, p)
self.h_matrix[-1, -1] = 1
@property
def composes_inplace_with(self):
r"""
:class:`UniformScale` can swallow composition with any other
:class:`UniformScale`.
"""
return UniformScale
def pseudoinverse(self):
r"""
The inverse scale.
:type: :class:`UniformScale`
"""
return UniformScale(1.0 / self.scale, self.n_dims, skip_checks=True)
class AlignmentUniformScale(HomogFamilyAlignment, UniformScale):
r"""
Constructs a :class:`UniformScale` by finding the optimal scale transform to
align `source` to `target`.
Parameters
----------
source : :map:`PointCloud`
The source pointcloud instance used in the alignment
target : :map:`PointCloud`
The target pointcloud instance used in the alignment
"""
def __init__(self, source, target):
HomogFamilyAlignment.__init__(self, source, target)
UniformScale.__init__(self, target.norm() / source.norm(),
source.n_dims)
def _from_vector_inplace(self, p):
r"""
Returns an instance of the transform from the given parameters,
expected to be in Fortran ordering.
Parameters
----------
p : `float`
The parameter
"""
UniformScale._from_vector_inplace(self, p)
self._sync_target_from_state()
def _sync_state_from_target(self):
new_scale = self.target.norm() / self.source.norm()
np.fill_diagonal(self.h_matrix, new_scale)
self.h_matrix[-1, -1] = 1
def as_non_alignment(self):
r"""Returns a copy of this uniform scale without it's alignment nature.
Returns
-------
transform : :map:`UniformScale`
A version of this scale with the same transform behavior but
without the alignment logic.
"""
return UniformScale(self.scale, self.n_dims)
| bsd-3-clause |
trdean/grEME | gr-digital/examples/narrowband/rx_voice.py | 58 | 5692 | #!/usr/bin/env python
#
# Copyright 2005,2006,2009,2011,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, audio, uhd
from gnuradio import blocks
from gnuradio import filter
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
from gnuradio import blocks
from gnuradio import digital
from gnuradio import vocoder
import random
import struct
import sys
# from current dir
from receive_path import receive_path
from uhd_interface import uhd_receiver
#import os
#print os.getpid()
#raw_input('Attach and press enter')
class audio_tx(gr.hier_block2):
def __init__(self, audio_output_dev):
gr.hier_block2.__init__(self, "audio_tx",
gr.io_signature(0, 0, 0), # Input signature
gr.io_signature(0, 0, 0)) # Output signature
self.sample_rate = sample_rate = 8000
self.packet_src = blocks.message_source(33)
voice_decoder = vocoder.gsm_fr_decode_ps()
s2f = blocks.short_to_float()
sink_scale = blocks.multiply_const_ff(1.0/32767.)
audio_sink = audio.sink(sample_rate, audio_output_dev)
self.connect(self.packet_src, voice_decoder, s2f, sink_scale, audio_sink)
def msgq(self):
return self.packet_src.msgq()
class my_top_block(gr.top_block):
def __init__(self, demod_class, rx_callback, options):
gr.top_block.__init__(self)
self.rxpath = receive_path(demod_class, rx_callback, options)
self.audio_tx = audio_tx(options.audio_output)
if(options.rx_freq is not None):
self.source = uhd_receiver(options.args, options.bitrate,
options.samples_per_symbol,
options.rx_freq, options.rx_gain,
options.antenna, options.verbose)
options.samples_per_symbol = self.source._sps
audio_rate = self.audio_tx.sample_rate
usrp_rate = self.source.get_sample_rate()
rrate = audio_rate / usrp_rate
self.resampler = filter.pfb.arb_resampler_ccf(rrate)
self.connect(self.source, self.resampler, self.rxpath)
elif(options.from_file is not None):
self.thr = blocks.throttle(gr.sizeof_gr_complex, options.bitrate)
self.source = blocks.file_source(gr.sizeof_gr_complex, options.from_file)
self.connect(self.source, self.thr, self.rxpath)
else:
self.thr = blocks.throttle(gr.sizeof_gr_complex, 1e6)
self.source = blocks.null_source(gr.sizeof_gr_complex)
self.connect(self.source, self.thr, self.rxpath)
self.connect(self.audio_tx)
# /////////////////////////////////////////////////////////////////////////////
# main
# /////////////////////////////////////////////////////////////////////////////
global n_rcvd, n_right
def main():
global n_rcvd, n_right
n_rcvd = 0
n_right = 0
def rx_callback(ok, payload):
global n_rcvd, n_right
n_rcvd += 1
if ok:
n_right += 1
tb.audio_tx.msgq().insert_tail(gr.message_from_string(payload))
print "ok = %r n_rcvd = %4d n_right = %4d" % (
ok, n_rcvd, n_right)
demods = digital.modulation_utils.type_1_demods()
# Create Options Parser:
parser = OptionParser (option_class=eng_option, conflict_handler="resolve")
expert_grp = parser.add_option_group("Expert")
parser.add_option("-m", "--modulation", type="choice", choices=demods.keys(),
default='gmsk',
help="Select modulation from: %s [default=%%default]"
% (', '.join(demods.keys()),))
parser.add_option("-O", "--audio-output", type="string", default="",
help="pcm output device name. E.g., hw:0,0 or /dev/dsp")
parser.add_option("","--from-file", default=None,
help="input file of samples to demod")
receive_path.add_options(parser, expert_grp)
uhd_receiver.add_options(parser)
for mod in demods.values():
mod.add_options(expert_grp)
parser.set_defaults(bitrate=50e3) # override default bitrate default
(options, args) = parser.parse_args ()
if len(args) != 0:
parser.print_help(sys.stderr)
sys.exit(1)
if options.from_file is None:
if options.rx_freq is None:
sys.stderr.write("You must specify -f FREQ or --freq FREQ\n")
parser.print_help(sys.stderr)
sys.exit(1)
# build the graph
tb = my_top_block(demods[options.modulation], rx_callback, options)
r = gr.enable_realtime_scheduling()
if r != gr.RT_OK:
print "Warning: Failed to enable realtime scheduling."
tb.run()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
UweBonnes/blackmagic | scripts/get_openocd_nrf51_ids.py | 9 | 1914 | #!/usr/bin/python
"""Pulls nRF51 IDs from openocd's nrf51.c in a form suitable for
pasting into blackmagic's nrf51.c
"""
import subprocess,re
cmd = 'git archive --remote=git://git.code.sf.net/p/openocd/code HEAD src/flash/nor/nrf51.c | tar -xO'
class Spec():
def __repr__(self):
return "0x%04X: /* %s %s %s */"%(self.hwid,self.comment, self.variant,self.build_code)
fd = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout
specdict={}
specs=[]
spec=Spec()
for line in fd.read().split('\n'):
m=re.search('/\*(.*)\*/',line)
if m:
lastcomment=m.group(1)
m=re.search('.hwid.*=\s*(0x[0-9A-F]*),',line)
if m:
spec.hwid=int(m.group(1),base=0)
m=re.search('.variant.*=\s*"(.*)",',line)
if m:
spec.variant=m.group(1)
m=re.search('.build_code.*=\s*"(.*)",',line)
if m:
spec.build_code=m.group(1)
m=re.search('.flash_size_kb.*=\s*([0-9]*),',line)
if m:
spec.flash_size_kb=int(m.group(1),base=0)
ram,flash = {'AA':(16,256),
'AB':(16,128),
'AC':(32,256)}[spec.variant[-2:]]
assert flash==spec.flash_size_kb
spec.ram_size_kb = ram
nicecomment =lastcomment.strip().replace('IC ','').replace('Devices ','').replace('.','')
spec.comment=nicecomment
specdict.setdefault((ram,flash),[]).append(spec)
specs.append(spec)
spec=Spec()
for (ram,flash),specs in specdict.iteritems():
specs.sort(key=lambda x:x.hwid)
for spec in specs:
print "\tcase",spec
print '\t\tt->driver = "Nordic nRF51";'
print '\t\ttarget_add_ram(t, 0x20000000, 0x%X);'%(1024*ram)
print '\t\tnrf51_add_flash(t, 0x00000000, 0x%X, NRF51_PAGE_SIZE);'%(1024*flash)
print '\t\tnrf51_add_flash(t, NRF51_UICR, 0x100, 0x100);'
print '\t\ttarget_add_commands(t, nrf51_cmd_list, "nRF51");'
print '\t\treturn true;'
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.