code
stringlengths 1
199k
|
|---|
__author__ = 'Stephanie'
from ODMconnection import dbconnection
from readSensors import readSensors
from updateSensors import updateSensors
from createSensors import createSensors
from deleteSensors import deleteSensors
__all__ = [
'readSensors',
'updateSensors',
'createSensors',
'deleteSensors',
]
|
"""In the case that the Setup.py file fails to execute, please manually install the following packages,
or execute the requirements.sh script."""
from distutils.core import setup
setup(name='DPS East Hackathon Rule booklet.',
version='1.0',
description='DPS East Hackathon Rule booklet.',
author='thel3l',
author_email='i.rithwik@protonmail.com',
url='https://www.github.com/thel3l/hackathon-dpse',
packages=['distutils', 'distutils.command', 'pyfiglet', 'colorama', 'termcolor', 'blessings'],
)
|
import logging
from typing import cast
from rebasehelper.logger import CustomLogger
logger: CustomLogger = cast(CustomLogger, logging.getLogger(__name__))
class InputHelper:
"""Class for command line interaction with the user."""
@staticmethod
def strtobool(message):
"""Converts a user message to a corresponding truth value.
This method is a replacement for deprecated strtobool from distutils,
its behaviour remains the same.
Args:
message (str): Message to evaluate.
Returns:
bool: True on 'y', 'yes', 't', 'true', 'on' and '1'.
False on 'n', 'no', 'f', 'false', 'off' and '0'.
Raises:
ValueError: On any other value.
"""
message = message.lower()
if message in ('y', 'yes', 't', 'true', 'on', '1'):
return True
elif message in ('n', 'no', 'f', 'false', 'off', '0'):
return False
raise ValueError('No conversion to truth value for "{}"'.format(message))
@classmethod
def get_message(cls, message, default_yes=True, any_input=False):
"""Prompts a user with yes/no message and gets the response.
Args:
message (str): Prompt string.
default_yes (bool): If the default value should be YES.
any_input (bool): Whether to return default value regardless of input.
Returns:
bool: True or False, based on user's input.
"""
if default_yes:
choice = '[Y/n]'
else:
choice = '[y/N]'
if any_input:
msg = '{0} '.format(message)
else:
msg = '{0} {1}? '.format(message, choice)
while True:
user_input = input(msg).lower()
if not user_input or any_input:
return True if default_yes else False
try:
user_input = cls.strtobool(user_input)
except ValueError:
logger.error('You have to type y(es) or n(o).')
continue
if any_input:
return True
else:
return bool(user_input)
|
import os, sys, re, StringIO
sys.path.append('/Users/Jason/Dropbox/JournalMap/scripts/GeoParsers')
from jmap_geoparser import *
test = "blah blah blah 45º 23' 12'', 123º 23' 56'' and blah blah blah 32º21'59''N, 115º 23' 14''W blah blah blah"
coords = coordinateParser.searchString(test)
for coord in coords:
assert coordinate(coord).calcDD(), "Coordinate Transform Error for "+str(coord)
test = "45.234º, 123.43º"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 45.234, 'longitude': 123.43}
test = "-45º 23' 12'', -123º 23' 56''"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': -45.38667, 'longitude': 123.39889}
test = "32º21'59''N, 115º 23' 14''W"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 32.36639, 'longitude': -115.38722}
test = "12 43 56 North, 23 56 12 East"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 12.73222, 'longitude': 23.93667}
test = "52 15 10N, 0 01 54W"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 52.25278, 'longitude': -0.03167}
test = "52 35 31N, 1 28 05E"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 52.59194, 'longitude': 1.46806}
test = "30° 47' N, 34° 46' E"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 30.78333, 'longitude': 34.76667}
'''
test = "AT; 1 spm, CN 3-41, 21°00′ N, 112°30′ E"
for result, start, end in coordinateParser.scanString(test):
assert coordinate(result).calcDD() == {'latitude': 21.0, 'longitude': 112.5}
test = '27°43.886, 34°15.663'
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 27.73143, 'longitude': 34.26105}
test = '49°17’13”N, 13°40’18”E'
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 49.28694, 'longitude': 13.67167}
test = '45.9215º; -76.6219º'
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': -45.9215, 'longitude': 76.6219}
test = "latitude 32°47′47″ S and longitude 26°50′56″ E"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': -32.79639, 'longitude': 26.84889}
test = "N15°46′ W87°00'"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 15.76667, 'longitude': -87.0}
test = "latitude of 35°13', longitude of 4°11'"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 35.21667, 'longitude': 4.18333}
test = "expects to find coordinates: 52 degrees, 42 minutes north, 124 degrees, 50 minutes west"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 52.7, 'longitude': -124.83333}
'''
|
"""
Created on Mon Nov 21 15:53:15 2016
@author: agiovann
"""
from __future__ import division
from __future__ import print_function
from builtins import zip
from builtins import str
from builtins import map
from builtins import range
from past.utils import old_div
import cv2
try:
cv2.setNumThreads(1)
except:
print('Open CV is naturally single threaded')
try:
if __IPYTHON__:
print((1))
# this is used for debugging purposes only. allows to reload classes when changed
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
except NameError:
print('Not launched under iPython')
import caiman as cm
import numpy as np
import time
import pylab as pl
import psutil
import sys
import os
from ipyparallel import Client
from skimage.external.tifffile import TiffFile
from caiman.motion_correction import tile_and_correct
def tile_and_correct_wrapper(params):
from skimage.external.tifffile import imread
import numpy as np
import cv2
try:
cv2.setNumThreads(1)
except:
1 # 'Open CV is naturally single threaded'
from caiman.motion_correction import tile_and_correct
img_name, out_fname, idxs, shape_mov, template, strides, overlaps, max_shifts,\
add_to_movie, max_deviation_rigid, upsample_factor_grid, newoverlaps, newstrides, shifts_opencv = params
imgs = imread(img_name, key=idxs)
mc = np.zeros(imgs.shape, dtype=np.float32)
shift_info = []
for count, img in enumerate(imgs):
if count % 10 == 0:
print(count)
mc[count], total_shift, start_step, xy_grid = tile_and_correct(img, template, strides, overlaps, max_shifts, add_to_movie=add_to_movie, newoverlaps=newoverlaps, newstrides=newstrides,
upsample_factor_grid=upsample_factor_grid, upsample_factor_fft=10, show_movie=False, max_deviation_rigid=max_deviation_rigid, shifts_opencv=shifts_opencv)
shift_info.append([total_shift, start_step, xy_grid])
if out_fname is not None:
outv = np.memmap(out_fname, mode='r+', dtype=np.float32,
shape=shape_mov, order='F')
outv[:, idxs] = np.reshape(
mc.astype(np.float32), (len(imgs), -1), order='F').T
return shift_info, idxs, np.nanmean(mc, 0)
def motion_correction_piecewise(fname, splits, strides, overlaps, add_to_movie=0, template=None, max_shifts=(12, 12), max_deviation_rigid=3, newoverlaps=None, newstrides=None,
upsample_factor_grid=4, order='F', dview=None, save_movie=True, base_name='none', num_splits=None, shifts_opencv=False):
'''
'''
with TiffFile(fname) as tf:
d1, d2 = tf[0].shape
T = len(tf)
if type(splits) is int:
idxs = np.array_split(list(range(T)), splits)
else:
idxs = splits
save_movie = False
if template is None:
raise Exception('Not implemented')
shape_mov = (d1 * d2, T)
dims = d1, d2
if num_splits is not None:
idxs = np.array(idxs)[np.random.randint(0, len(idxs), num_splits)]
save_movie = False
print('**** MOVIE NOT SAVED BECAUSE num_splits is not None ****')
if save_movie:
if base_name is None:
base_name = fname[:-4]
fname_tot = base_name + '_d1_' + str(dims[0]) + '_d2_' + str(dims[1]) + '_d3_' + str(
1 if len(dims) == 2 else dims[2]) + '_order_' + str(order) + '_frames_' + str(T) + '_.mmap'
fname_tot = os.path.join(os.path.split(fname)[0], fname_tot)
np.memmap(fname_tot, mode='w+', dtype=np.float32,
shape=shape_mov, order=order)
else:
fname_tot = None
pars = []
for idx in idxs:
pars.append([fname, fname_tot, idx, shape_mov, template, strides, overlaps, max_shifts, np.array(
add_to_movie, dtype=np.float32), max_deviation_rigid, upsample_factor_grid, newoverlaps, newstrides, shifts_opencv])
t1 = time.time()
if dview is not None:
res = dview.map_sync(tile_and_correct_wrapper, pars)
else:
res = list(map(tile_and_correct_wrapper, pars))
print((time.time() - t1))
return fname_tot, res
backend = 'local'
if backend == 'SLURM':
n_processes = np.int(os.environ.get('SLURM_NPROCS'))
else:
# roughly number of cores on your machine minus 1
n_processes = np.maximum(np.int(psutil.cpu_count()), 1)
print(('using ' + str(n_processes) + ' processes'))
single_thread = False
if single_thread:
dview = None
else:
try:
c.close()
except:
print('C was not existing, creating one')
print("Stopping cluster to avoid unnencessary use of memory....")
sys.stdout.flush()
if backend == 'SLURM':
try:
cm.stop_server(is_slurm=True)
except:
print('Nothing to stop')
slurm_script = '/mnt/xfs1/home/agiovann/SOFTWARE/Constrained_NMF/SLURM/slurmStart.sh'
cm.start_server(slurm_script=slurm_script)
pdir, profile = os.environ['IPPPDIR'], os.environ['IPPPROFILE']
c = Client(ipython_dir=pdir, profile=profile)
else:
cm.stop_server()
cm.start_server()
c = Client()
print(('Using ' + str(len(c)) + ' processes'))
dview = c[:len(c)]
t1 = time.time()
fname = 'Sue_2000.tif'
max_shifts = (12, 12)
splits = 56 # for parallelization split the movies in num_splits chuncks across time
num_splits_to_process = 28
m = cm.load(fname, subindices=slice(0, 500, None))
template = cm.motion_correction.bin_median(m[100:400].copy().motion_correct(
max_shifts[0], max_shifts[1], template=None)[0])
print(time.time() - t1)
shifts_opencv = False
new_templ = template
add_to_movie = -np.min(template)
save_movie = False
num_iter = 1
for iter_ in range(num_iter):
print(iter_)
old_templ = new_templ.copy()
if iter_ == num_iter - 1:
save_movie = True
print('saving!')
num_splits_to_process = None
fname_tot, res = motion_correction_piecewise(fname, splits, None, None,
add_to_movie=add_to_movie, template=old_templ, max_shifts=max_shifts, max_deviation_rigid=0,
newoverlaps=None, newstrides=None,
upsample_factor_grid=4, order='F', dview=dview, save_movie=save_movie, base_name=fname[:-4] + '_rig_', num_splits=num_splits_to_process, shifts_opencv=shifts_opencv)
new_templ = np.nanmedian(np.dstack([r[-1] for r in res]), -1)
print((old_div(np.linalg.norm(new_templ - old_templ), np.linalg.norm(old_templ))))
t2 = time.time() - t1
print(t2)
pl.imshow(new_templ, cmap='gray', vmax=np.percentile(new_templ, 95))
import scipy
np.save(fname[:-4] + '_templ_rigid.npy', new_templ)
template = new_templ
mr = cm.load(fname_tot)
t1 = time.time()
if template.shape == (512, 512):
strides = (128, 128) # 512 512
overlaps = (32, 32)
newoverlaps = None
newstrides = None
# strides = (48,48)# 128 64
elif template.shape == (64, 128):
strides = (32, 32)
overlaps = (16, 16)
newoverlaps = None
newstrides = None
else:
raise Exception('Unknown size, set manually')
splits = 56
num_splits_to_process = 28
upsample_factor_grid = 4
max_deviation_rigid = 3
new_templ = template
add_to_movie = -np.min(m)
num_iter = 2
save_movie = False
for iter_ in range(num_iter):
print(iter_)
old_templ = new_templ.copy()
if iter_ == num_iter - 1:
save_movie = True
num_splits_to_process = None
print('saving!')
fname_tot, res = motion_correction_piecewise(fname, splits, strides, overlaps,
add_to_movie=add_to_movie, template=old_templ, max_shifts=max_shifts, max_deviation_rigid=max_deviation_rigid,
newoverlaps=newoverlaps, newstrides=newstrides,
upsample_factor_grid=upsample_factor_grid, order='F', dview=dview, save_movie=save_movie, base_name=fname[:-4] + '_els_opencv_', num_splits=num_splits_to_process, shifts_opencv=shifts_opencv)
new_templ = np.nanmedian(np.dstack([r[-1] for r in res]), -1)
t2 = time.time() - t1
print(t2)
mc = cm.load(fname_tot)
pl.imshow(new_templ, cmap='gray', vmax=np.percentile(new_templ, 95))
np.save(fname[:-4] + '_templ_pw_rigid.npy', new_templ)
def compute_metrics_motion_correction(fname, final_size_x, final_size_y, swap_dim, pyr_scale=.5, levels=3, winsize=100, iterations=15, poly_n=5, poly_sigma=1.2 / 5, flags=0,
play_flow=False, resize_fact_flow=.2, template=None):
# cv2.OPTFLOW_FARNEBACK_GAUSSIAN
import scipy
vmin, vmax = -1, 1
m = cm.load(fname)
max_shft_x = np.int(np.ceil((np.shape(m)[1] - final_size_x) / 2))
max_shft_y = np.int(np.ceil((np.shape(m)[2] - final_size_y) / 2))
max_shft_x_1 = - ((np.shape(m)[1] - max_shft_x) - (final_size_x))
max_shft_y_1 = - ((np.shape(m)[2] - max_shft_y) - (final_size_y))
if max_shft_x_1 == 0:
max_shft_x_1 = None
if max_shft_y_1 == 0:
max_shft_y_1 = None
m = m[:, max_shft_x:max_shft_x_1, max_shft_y:max_shft_y_1]
print('Local correlations..')
img_corr = m.local_correlations(eight_neighbours=True, swap_dim=swap_dim)
print(m.shape)
if template is None:
tmpl = cm.motion_correction.bin_median(m)
else:
tmpl = template
print('Compute Smoothness.. ')
smoothness = np.sqrt(
np.sum(np.sum(np.array(np.gradient(np.mean(m, 0)))**2, 0)))
smoothness_corr = np.sqrt(
np.sum(np.sum(np.array(np.gradient(img_corr))**2, 0)))
print('Compute correlations.. ')
correlations = []
count = 0
for fr in m:
if count % 100 == 0:
print(count)
count += 1
correlations.append(scipy.stats.pearsonr(
fr.flatten(), tmpl.flatten())[0])
print('Compute optical flow .. ')
m = m.resize(1, 1, resize_fact_flow)
norms = []
flows = []
count = 0
for fr in m:
if count % 100 == 0:
print(count)
count += 1
flow = cv2.calcOpticalFlowFarneback(
tmpl, fr, None, pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, flags)
if play_flow:
pl.subplot(1, 3, 1)
pl.cla()
pl.imshow(fr, vmin=0, vmax=300, cmap='gray')
pl.title('movie')
pl.subplot(1, 3, 3)
pl.cla()
pl.imshow(flow[:, :, 1], vmin=vmin, vmax=vmax)
pl.title('y_flow')
pl.subplot(1, 3, 2)
pl.cla()
pl.imshow(flow[:, :, 0], vmin=vmin, vmax=vmax)
pl.title('x_flow')
pl.pause(.05)
n = np.linalg.norm(flow)
flows.append(flow)
norms.append(n)
np.savez(fname[:-4] + '_metrics', flows=flows, norms=norms, correlations=correlations,
smoothness=smoothness, tmpl=tmpl, smoothness_corr=smoothness_corr, img_corr=img_corr)
return tmpl, correlations, flows, norms, smoothness
m_res = glob.glob('MKL*hdf5')
final_size = (512 - 24, 512 - 24)
winsize = 100
swap_dim = False
resize_fact_flow = .2
for mv in m_res:
tmpl, correlations, flows_orig, norms, smoothness = compute_metrics_motion_correction(
mv, final_size[0], final_size[1], swap_dim, winsize=winsize, play_flow=False, resize_fact_flow=resize_fact_flow)
m_fluos = glob.glob('M_FLUO*.mmap') + glob.glob('M_FLUO*.tif')
final_size = (64 - 20, 128 - 20)
winsize = 32
resize_fact_flow = 1
for mv in m_fluos:
tmpl, correlations, flows_orig, norms, smoothness = compute_metrics_motion_correction(
mv, final_size[0], final_size[1], winsize=winsize, play_flow=False, resize_fact_flow=resize_fact_flow)
m_res = glob.glob('Sue*mmap') + glob.glob('Sue*.tif')
final_size = (512 - 24, 512 - 24)
winsize = 100
swap_dim = False
resize_fact_flow = .2
for mv in m_res:
tmpl, correlations, flows_orig, norms, smoothness = compute_metrics_motion_correction(
mv, final_size[0], final_size[1], swap_dim, winsize=winsize, play_flow=False, resize_fact_flow=resize_fact_flow)
m_fluos = glob.glob('plane*.tif') + glob.glob('row*.tif')
final_size = (64 - 20, 128 - 20)
winsize = 32
resize_fact_flow = 1
for mv in m_fluos:
tmpl, correlations, flows_orig, norms, smoothness = compute_metrics_motion_correction(
mv, final_size[0], final_size[1], winsize=winsize, play_flow=False, resize_fact_flow=resize_fact_flow)
m_res = glob.glob('Sue*.tif')
final_size = (512 - 24, 512 - 24)
winsize = 100
resize_fact_flow = .2
for mv in m_res:
tmpl, correlations, flows_orig, norms, smoothness = compute_metrics_motion_correction(
mv, final_size[0], final_size[1], winsize=winsize, play_flow=False, resize_fact_flow=resize_fact_flow)
for mvs in glob.glob('Sue*2000*16*.mat'):
print(mvs)
cm.movie(scipy.io.loadmat(mvs)['data'].transpose(
[2, 0, 1])).save(mvs[:-3] + '.hdf5')
m_fluos = glob.glob('M_FLUO*.hdf5')
final_size = (64 - 20, 128 - 20)
winsize = 32
resize_fact_flow = 1
for mv in m_fluos:
tmpl, correlations, flows_orig, norms, smoothness = compute_metrics_motion_correction(
mv, final_size[0], final_size[1], winsize=winsize, play_flow=False, resize_fact_flow=resize_fact_flow)
m_res = glob.glob('Sue_2000*16*.hdf5')
final_size = (512 - 24, 512 - 24)
winsize = 100
resize_fact_flow = .2
for mv in m_res:
tmpl, correlations, flows_orig, norms, smoothness = compute_metrics_motion_correction(
mv, final_size[0], final_size[1], winsize=winsize, play_flow=False, resize_fact_flow=resize_fact_flow)
files_img = [u'/mnt/xfs1/home/agiovann/DataForPublications/Piecewise-Rigid-Analysis-paper/NORM_CORRE_OPENCV/Sue_2000_els_opencv__d1_512_d2_512_d3_1_order_F_frames_2000_._metrics.npz',
u'/mnt/xfs1/home/agiovann/DataForPublications/Piecewise-Rigid-Analysis-paper/NORMCORRE_EFF/Sue_2000_els__d1_512_d2_512_d3_1_order_F_frames_2000_._metrics.npz',
# u'/mnt/xfs1/home/agiovann/DataForPublications/Piecewise-Rigid-Analysis-paper/MLK/Sue_2000_MLK_metrics.npz',
# u'/mnt/xfs1/home/agiovann/DataForPublications/Piecewise-Rigid-Analysis-paper/SIMA_RESULTS/Sue_1000_T.tifrow1_example_sima_Trow1_example_sima_metrics.npz',
# u'/mnt/xfs1/home/agiovann/DataForPublications/Piecewise-Rigid-Analysis-paper/SUITE_2P_RES/Sue_2000_t_NB_16.._metrics.npz',
u'/mnt/xfs1/home/agiovann/DataForPublications/Piecewise-Rigid-Analysis-paper/MLK/MKL16T._metrics.npz']
for fl in files_img:
with np.load(fl) as ld:
print(ld.keys())
pl.figure()
print(fl + ':' + str(np.mean(ld['norms'])) + '+/-' + str(np.std(ld['norms'])) + ' ; ' + str(np.mean(ld['correlations'])
) + '+/-' + str(np.std(ld['correlations'])) + ' ; ' + str(ld['smoothness']) + ' ; ' + str(ld['smoothness_corr']))
pl.subplot(1, 2, 1)
try:
mean_img = np.mean(cm.load(fl[:-12] + 'mmap'), 0)[12:-12, 12:-12]
except:
try:
mean_img = np.mean(
cm.load(fl[:-12] + '.tif'), 0)[12:-12, 12:-12]
except:
mean_img = np.mean(
cm.load(fl[:-12] + 'hdf5'), 0)[12:-12, 12:-12]
lq, hq = 13.3, 318.01
pl.imshow(mean_img, vmin=lq, vmax=hq)
pl.colorbar()
pl.subplot(1, 2, 2)
pl.imshow(ld['img_corr'], vmin=0, vmax=.5)
pl.colorbar()
for fl in glob.glob('Mf*.npz'):
with np.load(fl) as ld:
print(ld.keys())
pl.figure()
print(fl + ':' + str(np.mean(ld['norms'])) + '+/-' + str(np.std(ld['norms'])) + ' ; ' + str(np.mean(ld['correlations'])
) + '+/-' + str(np.std(ld['correlations'])) + ' ; ' + str(ld['smoothness']) + ' ; ' + str(ld['smoothness_corr']))
mc.resize(1, 1, .1).play(gain=10., fr=30, offset=100, magnification=1.)
m.resize(1, 1, .2).play(gain=10, fr=30, offset=0, magnification=1.)
cm.concatenate([mr.resize(1, 1, .5), mc.resize(1, 1, .5)], axis=1).play(
gain=10, fr=100, offset=300, magnification=1.)
import h5py
with h5py.File('sueann_pw_rigid_movie.mat') as f:
mef = np.array(f['M2'])
mef = cm.movie(mef.transpose([0, 2, 1]))
cm.concatenate([mef.resize(1, 1, .15), mc.resize(1, 1, .15)], axis=1).play(
gain=30, fr=40, offset=300, magnification=1.)
(mef - mc).resize(1, 1, .1).play(gain=50, fr=20, offset=0, magnification=1.)
(mc - mef).resize(1, 1, .1).play(gain=50, fr=20, offset=0, magnification=1.)
T, d1, d2 = np.shape(m)
shape_mov = (d1 * d2, m.shape[0])
Y = np.memmap('M_FLUO_4_d1_64_d2_128_d3_1_order_F_frames_4620_.mmap',
mode='r', dtype=np.float32, shape=shape_mov, order='F')
mc = cm.movie(np.reshape(Y, (d2, d1, T), order='F').transpose([2, 1, 0]))
mc.resize(1, 1, .25).play(gain=10., fr=50)
total_shifts = [r[0][0][0] for r in res]
pl.plot(np.reshape(np.array(total_shifts), (len(total_shifts), -1)))
m_raw = np.nanmean(m, 0)
m_rig = np.nanmean(mr, 0)
m_el = np.nanmean(mc, 0)
m_ef = np.nanmean(mef, 0)
import scipy
r_raw = []
r_rig = []
r_el = []
r_ef = []
max_shft_x, max_shft_y = max_shifts
for fr_id in range(m.shape[0]):
fr = m[fr_id].copy()[max_shft_x:-max_shft_x, max_shft_y:-max_shft_y]
templ_ = m_raw.copy()[max_shft_x:-max_shft_x, max_shft_y:-max_shft_y]
r_raw.append(scipy.stats.pearsonr(fr.flatten(), templ_.flatten())[0])
fr = mr[fr_id].copy()[max_shft_x:-max_shft_x, max_shft_y:-max_shft_y]
templ_ = m_rig.copy()[max_shft_x:-max_shft_x, max_shft_y:-max_shft_y]
r_rig.append(scipy.stats.pearsonr(fr.flatten(), templ_.flatten())[0])
fr = mc[fr_id].copy()[max_shft_x:-max_shft_x, max_shft_y:-max_shft_y]
templ_ = m_el.copy()[max_shft_x:-max_shft_x, max_shft_y:-max_shft_y]
r_el.append(scipy.stats.pearsonr(fr.flatten(), templ_.flatten())[0])
if 1:
fr = mef[fr_id].copy()[max_shft_x:-max_shft_x, max_shft_y:-max_shft_y]
templ_ = m_ef.copy()[max_shft_x:-max_shft_x, max_shft_y:-max_shft_y]
r_ef.append(scipy.stats.pearsonr(fr.flatten(), templ_.flatten())[0])
r_raw = np.array(r_raw)
r_rig = np.array(r_rig)
r_el = np.array(r_el)
r_ef = np.array(r_ef)
pl.plot(r_raw)
pl.plot(r_rig)
pl.plot(r_el)
pl.scatter(r_el, r_ef)
pl.plot([0, 1], [0, 1], 'r--')
pl.plot(old_div((r_ef - r_el), np.abs(r_el)))
import pylab as pl
vmax = -100
max_shft = 3
pl.subplot(3, 3, 1)
pl.imshow(np.nanmean(m, 0)[max_shft:-max_shft, max_shft:-
max_shft], cmap='gray', vmax=vmax, interpolation='none')
pl.title('raw')
pl.axis('off')
pl.xlim([0, 100])
pl.ylim([220, 320])
pl.axis('off')
pl.subplot(3, 3, 2)
pl.title('rigid mean')
pl.imshow(np.nanmean(mr, 0)[max_shft:-max_shft, max_shft:-
max_shft], cmap='gray', vmax=vmax, interpolation='none')
pl.xlim([0, 100])
pl.ylim([220, 320])
pl.axis('off')
pl.subplot(3, 3, 3)
pl.imshow(np.nanmean(mc, 0)[max_shft:-max_shft, max_shft:-
max_shft], cmap='gray', vmax=vmax, interpolation='none')
pl.title('pw-rigid mean')
pl.axis('off')
pl.xlim([0, 100])
pl.ylim([220, 320])
pl.axis('off')
pl.subplot(3, 3, 5)
pl.scatter(r_raw, r_rig)
pl.plot([0, 1], [0, 1], 'r--')
pl.xlabel('raw')
pl.ylabel('rigid')
pl.xlim([0, 1])
pl.ylim([0, 1])
pl.subplot(3, 3, 6)
pl.scatter(r_rig, r_el)
pl.plot([0, 1], [0, 1], 'r--')
pl.ylabel('pw-rigid')
pl.xlabel('rigid')
pl.xlim([0, 1])
pl.ylim([0, 1])
if 0:
pl.subplot(2, 3, 3)
pl.scatter(r_el, r_ef)
pl.plot([0, 1], [0, 1], 'r--')
pl.ylabel('pw-rigid')
pl.xlabel('pw-rigid eft')
pl.xlim([0, 1])
pl.ylim([0, 1])
pl.subplot(2, 3, 6)
pl.imshow(np.nanmean(mef, 0)[max_shft:-max_shft, max_shft:-
max_shft], cmap='gray', vmax=vmax, interpolation='none')
pl.title('pw-rigid eft mean')
pl.axis('off')
pl.plot(r_ef)
mc = cm.movie(mc)
mc[np.isnan(mc)] = 0
(mc + add_to_movie).resize(1, 1, .25).play(gain=10., fr=50)
ccimage = m.local_correlations(eight_neighbours=True, swap_dim=False)
ccimage_rig = mr.local_correlations(eight_neighbours=True, swap_dim=False)
ccimage_els = mc.local_correlations(eight_neighbours=True, swap_dim=False)
ccimage_ef = mef.local_correlations(eight_neighbours=True, swap_dim=False)
pl.subplot(2, 2, 1)
pl.imshow(ccimage, vmin=0, vmax=0.4, interpolation='none')
pl.subplot(2, 2, 2)
pl.imshow(ccimage_rig, vmin=0, vmax=0.4, interpolation='none')
pl.subplot(2, 2, 3)
pl.imshow(ccimage_els, vmin=0, vmax=0.4, interpolation='none')
pl.subplot(2, 2, 4)
pl.imshow(ccimage_ef, vmin=0, vmax=0.4, interpolation='none')
all_mags = []
all_mags_eig = []
for chunk in res:
for frame in chunk[0]:
shifts, pos, init = frame
x_sh = np.zeros(np.add(init[-1], 1))
y_sh = np.zeros(np.add(init[-1], 1))
for nt, sh in zip(init, shifts):
x_sh[nt] = sh[0]
y_sh[nt] = sh[1]
jac_xx = x_sh[1:, :] - x_sh[:-1, :]
jac_yx = y_sh[1:, :] - y_sh[:-1, :]
jac_xy = x_sh[:, 1:] - x_sh[:, :-1]
jac_yy = y_sh[:, 1:] - y_sh[:, :-1]
mag_norm = np.sqrt(jac_xx[:, :-1]**2 + jac_yx[:, :-1]
** 2 + jac_xy[:-1, :]**2 + jac_yy[:-1, :]**2)
all_mags.append(mag_norm)
mam = cm.movie(np.dstack(all_mags)).transpose([2, 0, 1])
pl.imshow(np.max(mam, 0), interpolation='none')
m = cm.load('rig_sue__d1_512_d2_512_d3_1_order_F_frames_3000_.mmap')
m1 = cm.load('els_sue__d1_512_d2_512_d3_1_order_F_frames_3000_.mmap')
m0 = cm.load('k56_20160608_RSM_125um_41mW_zoom2p2_00001_00034.tif')
tmpl = cm.motion_correction.bin_median(m)
tmpl1 = cm.motion_correction.bin_median(m1)
tmpl0 = cm.motion_correction.bin_median(m0)
vmin, vmax = -1, 1
count = 0
pyr_scale = .5
levels = 3
winsize = 100
iterations = 15
poly_n = 5
poly_sigma = old_div(1.2, 5)
flags = 0 # cv2.OPTFLOW_FARNEBACK_GAUSSIAN
norms = []
flows = []
for fr, fr1, fr0 in zip(m.resize(1, 1, .2), m1.resize(1, 1, .2), m0.resize(1, 1, .2)):
count += 1
print(count)
flow1 = cv2.calcOpticalFlowFarneback(tmpl1[max_shft_x:-max_shft_x, max_shft_y:-max_shft_y], fr1[max_shft_x:-
max_shft_x, max_shft_y:-max_shft_y], None, pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, flags)
flow = cv2.calcOpticalFlowFarneback(tmpl[max_shft_x:-max_shft_x, max_shft_y:-max_shft_y], fr[max_shft_x:-
max_shft_x, max_shft_y:-max_shft_y], None, pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, flags)
flow0 = cv2.calcOpticalFlowFarneback(tmpl0[max_shft_x:-max_shft_x, max_shft_y:-max_shft_y], fr0[max_shft_x:-
max_shft_x, max_shft_y:-max_shft_y], None, pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, flags)
n1, n, n0 = np.linalg.norm(flow1), np.linalg.norm(
flow), np.linalg.norm(flow0)
flows.append([flow1, flow, flow0])
norms.append([n1, n, n0])
flm1_x = cm.movie(np.dstack([fl[0][:, :, 0]
for fl in flows])).transpose([2, 0, 1])
flm_x = cm.movie(np.dstack([fl[1][:, :, 0]
for fl in flows])).transpose([2, 0, 1])
flm0_x = cm.movie(np.dstack([fl[2][:, :, 0]
for fl in flows])).transpose([2, 0, 1])
flm1_y = cm.movie(np.dstack([fl[0][:, :, 1]
for fl in flows])).transpose([2, 0, 1])
flm_y = cm.movie(np.dstack([fl[1][:, :, 1]
for fl in flows])).transpose([2, 0, 1])
flm0_y = cm.movie(np.dstack([fl[2][:, :, 1]
for fl in flows])).transpose([2, 0, 1])
pl.figure()
pl.subplot(2, 1, 1)
pl.plot(norms)
pl.subplot(2, 1, 2)
pl.plot(np.arange(0, 3000 * .2, 0.2), r_el)
pl.plot(np.arange(0, 3000 * .2, 0.2), r_rig)
pl.plot(np.arange(0, 3000 * .2, 0.2), r_raw)
pl.figure()
vmin = -.5
vmax = .5
cmap = 'hot'
pl.subplot(2, 3, 1)
pl.imshow(np.mean(np.abs(flm1_x), 0), vmin=vmin, vmax=vmax, cmap=cmap)
pl.title('PW-RIGID')
pl.ylabel('optical flow x')
pl.colorbar()
pl.subplot(2, 3, 2)
pl.title('RIGID')
pl.imshow(np.mean(np.abs(flm_x), 0), vmin=vmin, vmax=vmax, cmap=cmap)
pl.colorbar()
pl.subplot(2, 3, 3)
pl.imshow(np.mean(np.abs(flm0_x), 0), vmin=vmin * 4, vmax=vmax * 4, cmap=cmap)
pl.title('RAW')
pl.colorbar()
pl.subplot(2, 3, 4)
pl.imshow(np.mean(np.abs(flm1_y), 0), vmin=vmin, vmax=vmax, cmap=cmap)
pl.ylabel('optical flow y')
pl.colorbar()
pl.subplot(2, 3, 5)
pl.imshow(np.mean(np.abs(flm_y), 0), vmin=vmin, vmax=vmax, cmap=cmap)
pl.colorbar()
pl.subplot(2, 3, 6)
pl.imshow(np.mean(np.abs(flm0_y), 0), vmin=vmin * 4, vmax=vmax * 4, cmap=cmap)
pl.colorbar()
fl_rig = [n[1] / 1000 for n in norms]
fl_raw = [n[2] / 1000 for n in norms]
fl_el = [n[0] / 1000 for n in norms]
font = {'family': 'Myriad Pro',
'weight': 'regular',
'size': 15}
pl.rc('font', **font)
vmax = -100
max_shft = 3
pl.subplot(4, 3, 1)
pl.imshow(np.nanmean(m, 0)[max_shft:-max_shft, max_shft:-
max_shft], cmap='gray', vmax=vmax, interpolation='none')
pl.title('raw')
pl.axis('off')
pl.xlim([0, 100])
pl.ylim([220, 320])
pl.axis('off')
pl.subplot(4, 3, 2)
pl.title('rigid mean')
pl.imshow(np.nanmean(mr, 0)[max_shft:-max_shft, max_shft:-
max_shft], cmap='gray', vmax=vmax, interpolation='none')
pl.xlim([0, 100])
pl.ylim([220, 320])
pl.axis('off')
pl.subplot(4, 3, 3)
pl.imshow(np.nanmean(mc, 0)[max_shft:-max_shft, max_shft:-
max_shft], cmap='gray', vmax=vmax, interpolation='none')
pl.title('pw-rigid mean')
pl.axis('off')
pl.xlim([0, 100])
pl.ylim([220, 320])
pl.axis('off')
pl.subplot(4, 3, 5)
pl.scatter(r_raw, r_rig, s=50, c='red')
pl.axis('tight')
pl.plot([0, 1], [0, 1], 'k--')
pl.xlabel('raw')
pl.ylabel('rigid')
pl.xlim([0.2, .45])
pl.ylim([.2, .45])
pl.locator_params(nbins=4)
pl.subplot(4, 3, 6)
pl.scatter(r_rig, r_el, s=50, c='red')
pl.plot([0, 1], [0, 1], 'k--')
pl.ylabel('pw-rigid')
pl.xlabel('rigid')
pl.xlim([0.3, .45])
pl.ylim([.3, .45])
pl.locator_params(nbins=4)
pl.subplot(4, 3, 4)
pl.plot(np.arange(0, 3000 * .2, 0.2), r_el)
pl.plot(np.arange(0, 3000 * .2, 0.2), r_rig)
pl.plot(np.arange(0, 3000 * .2, 0.2), r_raw)
pl.xlim([220, 320])
pl.ylabel('correlation')
pl.locator_params(nbins=4)
pl.subplot(4, 3, 7)
pl.plot(norms)
pl.xlim([220, 320])
pl.ylabel('norm of optical flow')
pl.xlabel('frames')
pl.locator_params(nbins=4)
pl.subplot(4, 3, 8)
pl.scatter(fl_raw, fl_rig, s=50, c='red')
pl.axis('tight')
pl.plot([0, 3000], [0, 3000], 'k--')
pl.xlabel('raw')
pl.ylabel('rigid')
pl.xlim([0, 3])
pl.ylim([0, 3])
pl.locator_params(nbins=4)
pl.subplot(4, 3, 9)
pl.scatter(fl_rig, fl_el, s=50, c='red')
pl.plot([0, 1000], [0, 1000], 'k--')
pl.ylabel('pw-rigid')
pl.xlabel('rigid')
pl.xlim([0, 1])
pl.ylim([0, 1])
pl.locator_params(nbins=4)
ofl_mod_rig = np.mean(np.sqrt(flm_x**2 + flm_y**2), 0)
ofl_mod_el = np.mean(np.sqrt(flm1_x**2 + flm1_y**2), 0)
pl.subplot(4, 3, 10)
pl.imshow(ofl_mod_el, cmap='hot', vmin=0, vmax=1, interpolation='none')
pl.axis('off')
pl.colorbar()
pl.subplot(4, 3, 11)
pl.imshow(ofl_mod_rig, cmap='hot', vmin=0, vmax=1, interpolation='none')
pl.axis('off')
pl.axis('off')
pl.subplot(4, 3, 12)
pl.imshow(ofl_mod_el, cmap='hot', vmin=0, vmax=1, interpolation='none')
pl.axis('off')
pl.axis('off')
pl.rcParams['pdf.fonttype'] = 42
import sima
import sima.motion
from sima.motion import HiddenMarkov2D
fname_gr = 'Sue_1000_T.tif'
fname_gr = 'Sue_1000_T.tifrow1_example_sima_T.tif'
sequences = [sima.Sequence.create('TIFF', fname_gr)]
dataset = sima.ImagingDataset(sequences, fname_gr)
import time
t1 = time.time()
granularity = 'row'
gran_n = 1
mc_approach = sima.motion.HiddenMarkov2D(granularity=(
granularity, gran_n), max_displacement=max_shifts, verbose=True, n_processes=14)
new_dataset = mc_approach.correct(dataset, None)
t2 = time.time() - t1
print(t2)
new_dataset.export_frames(
[[[fname_gr[:-4] + granularity + str(gran_n) + '_example_sima.tif']]], fmt='TIFF16')
m_s = cm.load(granularity + str(gran_n) + '_example_sima.tif')
m_s_row = cm.load('example_sima.tif')
def compute_jacobians(res):
all_mags = []
all_mags_eig = []
for chunk in res:
for frame in chunk[0]:
shifts, pos, init = frame
x_sh = np.zeros(np.add(init[-1], 1))
y_sh = np.zeros(np.add(init[-1], 1))
for nt, sh in zip(init, shifts):
x_sh[nt] = sh[0]
y_sh[nt] = sh[1]
jac_xx = x_sh[1:, :] - x_sh[:-1, :]
jac_yx = y_sh[1:, :] - y_sh[:-1, :]
jac_xy = x_sh[:, 1:] - x_sh[:, :-1]
jac_yy = y_sh[:, 1:] - y_sh[:, :-1]
mag_norm = np.sqrt(
jac_xx[:, :-1]**2 + jac_yx[:, :-1]**2 + jac_xy[:-1, :]**2 + jac_yy[:-1, :]**2)
for a, b, c, d in zip(jac_xx, jac_xy, jac_yy, jac_yy):
jc = np.array([[a, b], [c, d]])
w, vl, vr = scipy.linalg.eig(jc)
lsl
all_mags_eig.append(mag_eig)
all_mags.append(mag_norm)
nmf = 'M_FLUO_t_shifted_flow.tif'
m = cm.load('M_FLUO_t_1000_els__d1_64_d2_128_d3_1_order_F_frames_1000_.mmap')
shfts = [(a, b) for a, b in zip(np.random.randn(
m.shape[0]), np.random.randn(m.shape[0]))]
msh = m.copy().apply_shifts(shfts)
msh[:, 10:-10, 10:-10].save(nmf)
template = np.nanmean(m[:, 10:-10, 10:-10], 0)
tmpl, correlations, flows_orig, norms, smoothness = compute_metrics_motion_correction(
'M_FLUO_t_shifted_flow.tif', template.shape[0], template.shape[1], winsize=32, play_flow=False, resize_fact_flow=1, template=template)
with np.load('M_FLUO_t_shifted_flow_metrics.npz') as ld:
flows = ld['flows']
ff_1 = [np.nanmean(f[:, :, 1]) for f in flows]
ff_0 = [np.nanmean(f[:, :, 0]) for f in flows]
pl.subplot(2, 1, 1)
pl.plot(np.array(shfts)[:, 1])
pl.plot(np.array(ff_0))
pl.legend(['shifts', 'optical flow'])
pl.xlim([400, 600])
pl.ylabel('x shifts')
pl.subplot(2, 1, 2)
pl.plot(np.array(shfts)[:, 0])
pl.plot(np.array(ff_1))
pl.xlim([400, 600])
pl.xlabel('frames (15 Hz)')
pl.ylabel('y shifts')
|
from turtlelsystem.TurtleSVGMachine import TurtleSVGMachine
from nose.tools import assert_almost_equal
def test_forward():
turtle = TurtleSVGMachine(width = 20, height = 20)
turtle.do_command("FORWARD 10")
assert_almost_equal(turtle.x, 20.0)
def test_backward():
turtle = TurtleSVGMachine(width = 20, height = 20)
turtle.do_command("BACKWARD 10")
assert_almost_equal(turtle.x, 0.0)
def test_left():
turtle = TurtleSVGMachine()
turtle.do_command("LEFT 30")
assert_almost_equal(turtle.theta, 30.0)
def test_right():
turtle = TurtleSVGMachine()
turtle.do_command("RIGHT 30")
assert_almost_equal(turtle.theta, 330.0)
|
"""Module for the unix socket protocol
This module implements the local unix socket protocol. You only need
this module and the opcodes module in the client program in order to
communicate with the master.
The module is also used by the master daemon.
"""
import socket
import collections
import time
import errno
import logging
from ganeti import serializer
from ganeti import constants
from ganeti import errors
from ganeti import utils
from ganeti import objects
from ganeti import pathutils
KEY_METHOD = constants.LUXI_KEY_METHOD
KEY_ARGS = constants.LUXI_KEY_ARGS
KEY_SUCCESS = constants.LUXI_KEY_SUCCESS
KEY_RESULT = constants.LUXI_KEY_RESULT
KEY_VERSION = constants.LUXI_KEY_VERSION
REQ_SUBMIT_JOB = constants.LUXI_REQ_SUBMIT_JOB
REQ_SUBMIT_JOB_TO_DRAINED_QUEUE = constants.LUXI_REQ_SUBMIT_JOB_TO_DRAINED_QUEUE
REQ_SUBMIT_MANY_JOBS = constants.LUXI_REQ_SUBMIT_MANY_JOBS
REQ_WAIT_FOR_JOB_CHANGE = constants.LUXI_REQ_WAIT_FOR_JOB_CHANGE
REQ_CANCEL_JOB = constants.LUXI_REQ_CANCEL_JOB
REQ_ARCHIVE_JOB = constants.LUXI_REQ_ARCHIVE_JOB
REQ_CHANGE_JOB_PRIORITY = constants.LUXI_REQ_CHANGE_JOB_PRIORITY
REQ_AUTO_ARCHIVE_JOBS = constants.LUXI_REQ_AUTO_ARCHIVE_JOBS
REQ_QUERY = constants.LUXI_REQ_QUERY
REQ_QUERY_FIELDS = constants.LUXI_REQ_QUERY_FIELDS
REQ_QUERY_JOBS = constants.LUXI_REQ_QUERY_JOBS
REQ_QUERY_INSTANCES = constants.LUXI_REQ_QUERY_INSTANCES
REQ_QUERY_NODES = constants.LUXI_REQ_QUERY_NODES
REQ_QUERY_GROUPS = constants.LUXI_REQ_QUERY_GROUPS
REQ_QUERY_NETWORKS = constants.LUXI_REQ_QUERY_NETWORKS
REQ_QUERY_EXPORTS = constants.LUXI_REQ_QUERY_EXPORTS
REQ_QUERY_CONFIG_VALUES = constants.LUXI_REQ_QUERY_CONFIG_VALUES
REQ_QUERY_CLUSTER_INFO = constants.LUXI_REQ_QUERY_CLUSTER_INFO
REQ_QUERY_TAGS = constants.LUXI_REQ_QUERY_TAGS
REQ_SET_DRAIN_FLAG = constants.LUXI_REQ_SET_DRAIN_FLAG
REQ_SET_WATCHER_PAUSE = constants.LUXI_REQ_SET_WATCHER_PAUSE
REQ_ALL = constants.LUXI_REQ_ALL
DEF_CTMO = constants.LUXI_DEF_CTMO
DEF_RWTO = constants.LUXI_DEF_RWTO
WFJC_TIMEOUT = constants.LUXI_WFJC_TIMEOUT
class ProtocolError(errors.LuxiError):
"""Denotes an error in the LUXI protocol."""
class ConnectionClosedError(ProtocolError):
"""Connection closed error."""
class TimeoutError(ProtocolError):
"""Operation timeout error."""
class RequestError(ProtocolError):
"""Error on request.
This signifies an error in the request format or request handling,
but not (e.g.) an error in starting up an instance.
Some common conditions that can trigger this exception:
- job submission failed because the job data was wrong
- query failed because required fields were missing
"""
class NoMasterError(ProtocolError):
"""The master cannot be reached.
This means that the master daemon is not running or the socket has
been removed.
"""
class PermissionError(ProtocolError):
"""Permission denied while connecting to the master socket.
This means the user doesn't have the proper rights.
"""
class Transport:
"""Low-level transport class.
This is used on the client side.
This could be replace by any other class that provides the same
semantics to the Client. This means:
- can send messages and receive messages
- safe for multithreading
"""
def __init__(self, address, timeouts=None):
"""Constructor for the Client class.
Arguments:
- address: a valid address the the used transport class
- timeout: a list of timeouts, to be used on connect and read/write
There are two timeouts used since we might want to wait for a long
time for a response, but the connect timeout should be lower.
If not passed, we use a default of 10 and respectively 60 seconds.
Note that on reading data, since the timeout applies to an
invidual receive, it might be that the total duration is longer
than timeout value passed (we make a hard limit at twice the read
timeout).
"""
self.address = address
if timeouts is None:
self._ctimeout, self._rwtimeout = DEF_CTMO, DEF_RWTO
else:
self._ctimeout, self._rwtimeout = timeouts
self.socket = None
self._buffer = ""
self._msgs = collections.deque()
try:
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# Try to connect
try:
utils.Retry(self._Connect, 1.0, self._ctimeout,
args=(self.socket, address, self._ctimeout))
except utils.RetryTimeout:
raise TimeoutError("Connect timed out")
self.socket.settimeout(self._rwtimeout)
except (socket.error, NoMasterError):
if self.socket is not None:
self.socket.close()
self.socket = None
raise
@staticmethod
def _Connect(sock, address, timeout):
sock.settimeout(timeout)
try:
sock.connect(address)
except socket.timeout, err:
raise TimeoutError("Connect timed out: %s" % str(err))
except socket.error, err:
error_code = err.args[0]
if error_code in (errno.ENOENT, errno.ECONNREFUSED):
raise NoMasterError(address)
elif error_code in (errno.EPERM, errno.EACCES):
raise PermissionError(address)
elif error_code == errno.EAGAIN:
# Server's socket backlog is full at the moment
raise utils.RetryAgain()
raise
def _CheckSocket(self):
"""Make sure we are connected.
"""
if self.socket is None:
raise ProtocolError("Connection is closed")
def Send(self, msg):
"""Send a message.
This just sends a message and doesn't wait for the response.
"""
if constants.LUXI_EOM in msg:
raise ProtocolError("Message terminator found in payload")
self._CheckSocket()
try:
# TODO: sendall is not guaranteed to send everything
self.socket.sendall(msg + constants.LUXI_EOM)
except socket.timeout, err:
raise TimeoutError("Sending timeout: %s" % str(err))
def Recv(self):
"""Try to receive a message from the socket.
In case we already have messages queued, we just return from the
queue. Otherwise, we try to read data with a _rwtimeout network
timeout, and making sure we don't go over 2x_rwtimeout as a global
limit.
"""
self._CheckSocket()
etime = time.time() + self._rwtimeout
while not self._msgs:
if time.time() > etime:
raise TimeoutError("Extended receive timeout")
while True:
try:
data = self.socket.recv(4096)
except socket.timeout, err:
raise TimeoutError("Receive timeout: %s" % str(err))
except socket.error, err:
if err.args and err.args[0] == errno.EAGAIN:
continue
raise
break
if not data:
raise ConnectionClosedError("Connection closed while reading")
new_msgs = (self._buffer + data).split(constants.LUXI_EOM)
self._buffer = new_msgs.pop()
self._msgs.extend(new_msgs)
return self._msgs.popleft()
def Call(self, msg):
"""Send a message and wait for the response.
This is just a wrapper over Send and Recv.
"""
self.Send(msg)
return self.Recv()
def Close(self):
"""Close the socket"""
if self.socket is not None:
self.socket.close()
self.socket = None
def ParseRequest(msg):
"""Parses a LUXI request message.
"""
try:
request = serializer.LoadJson(msg)
except ValueError, err:
raise ProtocolError("Invalid LUXI request (parsing error): %s" % err)
logging.debug("LUXI request: %s", request)
if not isinstance(request, dict):
logging.error("LUXI request not a dict: %r", msg)
raise ProtocolError("Invalid LUXI request (not a dict)")
method = request.get(KEY_METHOD, None) # pylint: disable=E1103
args = request.get(KEY_ARGS, None) # pylint: disable=E1103
version = request.get(KEY_VERSION, None) # pylint: disable=E1103
if method is None or args is None:
logging.error("LUXI request missing method or arguments: %r", msg)
raise ProtocolError(("Invalid LUXI request (no method or arguments"
" in request): %r") % msg)
return (method, args, version)
def ParseResponse(msg):
"""Parses a LUXI response message.
"""
# Parse the result
try:
data = serializer.LoadJson(msg)
except KeyboardInterrupt:
raise
except Exception, err:
raise ProtocolError("Error while deserializing response: %s" % str(err))
# Validate response
if not (isinstance(data, dict) and
KEY_SUCCESS in data and
KEY_RESULT in data):
raise ProtocolError("Invalid response from server: %r" % data)
return (data[KEY_SUCCESS], data[KEY_RESULT],
data.get(KEY_VERSION, None)) # pylint: disable=E1103
def FormatResponse(success, result, version=None):
"""Formats a LUXI response message.
"""
response = {
KEY_SUCCESS: success,
KEY_RESULT: result,
}
if version is not None:
response[KEY_VERSION] = version
logging.debug("LUXI response: %s", response)
return serializer.DumpJson(response)
def FormatRequest(method, args, version=None):
"""Formats a LUXI request message.
"""
# Build request
request = {
KEY_METHOD: method,
KEY_ARGS: args,
}
if version is not None:
request[KEY_VERSION] = version
# Serialize the request
return serializer.DumpJson(request)
def CallLuxiMethod(transport_cb, method, args, version=None):
"""Send a LUXI request via a transport and return the response.
"""
assert callable(transport_cb)
request_msg = FormatRequest(method, args, version=version)
# Send request and wait for response
response_msg = transport_cb(request_msg)
(success, result, resp_version) = ParseResponse(response_msg)
# Verify version if there was one in the response
if resp_version is not None and resp_version != version:
raise errors.LuxiError("LUXI version mismatch, client %s, response %s" %
(version, resp_version))
if success:
return result
errors.MaybeRaise(result)
raise RequestError(result)
class Client(object):
"""High-level client implementation.
This uses a backing Transport-like class on top of which it
implements data serialization/deserialization.
"""
def __init__(self, address=None, timeouts=None, transport=Transport):
"""Constructor for the Client class.
Arguments:
- address: a valid address the the used transport class
- timeout: a list of timeouts, to be used on connect and read/write
- transport: a Transport-like class
If timeout is not passed, the default timeouts of the transport
class are used.
"""
if address is None:
address = pathutils.MASTER_SOCKET
self.address = address
self.timeouts = timeouts
self.transport_class = transport
self.transport = None
self._InitTransport()
def _InitTransport(self):
"""(Re)initialize the transport if needed.
"""
if self.transport is None:
self.transport = self.transport_class(self.address,
timeouts=self.timeouts)
def _CloseTransport(self):
"""Close the transport, ignoring errors.
"""
if self.transport is None:
return
try:
old_transp = self.transport
self.transport = None
old_transp.Close()
except Exception: # pylint: disable=W0703
pass
def _SendMethodCall(self, data):
# Send request and wait for response
try:
self._InitTransport()
return self.transport.Call(data)
except Exception:
self._CloseTransport()
raise
def Close(self):
"""Close the underlying connection.
"""
self._CloseTransport()
def CallMethod(self, method, args):
"""Send a generic request and return the response.
"""
if not isinstance(args, (list, tuple)):
raise errors.ProgrammerError("Invalid parameter passed to CallMethod:"
" expected list, got %s" % type(args))
return CallLuxiMethod(self._SendMethodCall, method, args,
version=constants.LUXI_VERSION)
def SetQueueDrainFlag(self, drain_flag):
return self.CallMethod(REQ_SET_DRAIN_FLAG, (drain_flag, ))
def SetWatcherPause(self, until):
return self.CallMethod(REQ_SET_WATCHER_PAUSE, (until, ))
def SubmitJob(self, ops):
ops_state = map(lambda op: op.__getstate__(), ops)
return self.CallMethod(REQ_SUBMIT_JOB, (ops_state, ))
def SubmitJobToDrainedQueue(self, ops):
ops_state = map(lambda op: op.__getstate__(), ops)
return self.CallMethod(REQ_SUBMIT_JOB_TO_DRAINED_QUEUE, (ops_state, ))
def SubmitManyJobs(self, jobs):
jobs_state = []
for ops in jobs:
jobs_state.append([op.__getstate__() for op in ops])
return self.CallMethod(REQ_SUBMIT_MANY_JOBS, (jobs_state, ))
@staticmethod
def _PrepareJobId(request_name, job_id):
try:
return int(job_id)
except ValueError:
raise RequestError("Invalid parameter passed to %s as job id: "
" expected integer, got value %s" %
(request_name, job_id))
def CancelJob(self, job_id):
job_id = Client._PrepareJobId(REQ_CANCEL_JOB, job_id)
return self.CallMethod(REQ_CANCEL_JOB, (job_id, ))
def ArchiveJob(self, job_id):
job_id = Client._PrepareJobId(REQ_ARCHIVE_JOB, job_id)
return self.CallMethod(REQ_ARCHIVE_JOB, (job_id, ))
def ChangeJobPriority(self, job_id, priority):
job_id = Client._PrepareJobId(REQ_CHANGE_JOB_PRIORITY, job_id)
return self.CallMethod(REQ_CHANGE_JOB_PRIORITY, (job_id, priority))
def AutoArchiveJobs(self, age):
timeout = (DEF_RWTO - 1) / 2
return self.CallMethod(REQ_AUTO_ARCHIVE_JOBS, (age, timeout))
def WaitForJobChangeOnce(self, job_id, fields,
prev_job_info, prev_log_serial,
timeout=WFJC_TIMEOUT):
"""Waits for changes on a job.
@param job_id: Job ID
@type fields: list
@param fields: List of field names to be observed
@type prev_job_info: None or list
@param prev_job_info: Previously received job information
@type prev_log_serial: None or int/long
@param prev_log_serial: Highest log serial number previously received
@type timeout: int/float
@param timeout: Timeout in seconds (values larger than L{WFJC_TIMEOUT} will
be capped to that value)
"""
assert timeout >= 0, "Timeout can not be negative"
return self.CallMethod(REQ_WAIT_FOR_JOB_CHANGE,
(job_id, fields, prev_job_info,
prev_log_serial,
min(WFJC_TIMEOUT, timeout)))
def WaitForJobChange(self, job_id, fields, prev_job_info, prev_log_serial):
job_id = Client._PrepareJobId(REQ_WAIT_FOR_JOB_CHANGE, job_id)
while True:
result = self.WaitForJobChangeOnce(job_id, fields,
prev_job_info, prev_log_serial)
if result != constants.JOB_NOTCHANGED:
break
return result
def Query(self, what, fields, qfilter):
"""Query for resources/items.
@param what: One of L{constants.QR_VIA_LUXI}
@type fields: List of strings
@param fields: List of requested fields
@type qfilter: None or list
@param qfilter: Query filter
@rtype: L{objects.QueryResponse}
"""
result = self.CallMethod(REQ_QUERY, (what, fields, qfilter))
return objects.QueryResponse.FromDict(result)
def QueryFields(self, what, fields):
"""Query for available fields.
@param what: One of L{constants.QR_VIA_LUXI}
@type fields: None or list of strings
@param fields: List of requested fields
@rtype: L{objects.QueryFieldsResponse}
"""
result = self.CallMethod(REQ_QUERY_FIELDS, (what, fields))
return objects.QueryFieldsResponse.FromDict(result)
def QueryJobs(self, job_ids, fields):
return self.CallMethod(REQ_QUERY_JOBS, (job_ids, fields))
def QueryInstances(self, names, fields, use_locking):
return self.CallMethod(REQ_QUERY_INSTANCES, (names, fields, use_locking))
def QueryNodes(self, names, fields, use_locking):
return self.CallMethod(REQ_QUERY_NODES, (names, fields, use_locking))
def QueryGroups(self, names, fields, use_locking):
return self.CallMethod(REQ_QUERY_GROUPS, (names, fields, use_locking))
def QueryNetworks(self, names, fields, use_locking):
return self.CallMethod(REQ_QUERY_NETWORKS, (names, fields, use_locking))
def QueryExports(self, nodes, use_locking):
return self.CallMethod(REQ_QUERY_EXPORTS, (nodes, use_locking))
def QueryClusterInfo(self):
return self.CallMethod(REQ_QUERY_CLUSTER_INFO, ())
def QueryConfigValues(self, fields):
return self.CallMethod(REQ_QUERY_CONFIG_VALUES, (fields, ))
def QueryTags(self, kind, name):
return self.CallMethod(REQ_QUERY_TAGS, (kind, name))
|
__author__ = 'Marco Maio'
import time
class Handler():
def __init__(self, stocks_today=None, investments_by_name=None, investments_by_availability=None):
# input data assessment
if stocks_today is None:
raise ValueError('Stocks_today container not specified!')
elif investments_by_name is None:
raise ValueError('Investments_by_name container not specified!')
elif investments_by_availability is None:
raise ValueError('Investments_by_availability container not specified!')
self.__stocks_today = stocks_today
self.__investments_by_name = investments_by_name
self.__investments_by_availability = investments_by_availability
def get_amount_by_stock_name(self, stock_name):
if stock_name is None or len(stock_name) == 0:
raise ValueError('Stock name not specified!')
return self.__stocks_today[stock_name]["EUR"] *\
self.__stocks_today[stock_name]["Numbers of parts"]
def get_amount_total_investment(self):
tot = 0
for i in self.__stocks_today:
tot += self.get_amount_by_stock_name(i)
return tot
def get_total_amount_by_date(self, date=None, stock_name="", closest_availability_only=False):
if date is None or len(date) == 0:
raise ValueError('Date not specified!')
dates = [d for d in self.__investments_by_availability.keys() if len(d) > 0]
eligible_dates =[]
for d in dates:
if time.strptime(date, "%d/%m/%Y") >= time.strptime(d, "%d/%m/%Y"):
if not closest_availability_only or date.split('/')[2] == d.split('/')[2]:
eligible_dates.append(d)
if len(eligible_dates)== 0:
raise ValueError('No fund available by the ' + date)
tot = 0
stocks = set()
for ed in eligible_dates:
for k, v in self.__investments_by_availability[ed].items():
if stock_name in k:
stocks.add(k)
tot += self.__stocks_today[k]["EUR"] * v
return tot, stocks
def get_paid_by_stock_name(self, stock_name=None):
if stock_name is None or len(stock_name) == 0:
raise ValueError('Stock name not specified!')
if stock_name not in self.__stocks_today:
raise ValueError('Please provide a valid stock name!')
tot = 0.0
for k, v in self.__investments_by_name[stock_name].items():
tot += v['Number of actions bought'] * v['Purchase value']
return tot
def get_total_gain(self):
tot_paid = 0.0
for stock_name in self.__investments_by_name:
tot_paid += self.get_paid_by_stock_name(stock_name)
tot = self.get_amount_total_investment()
gain = tot - tot_paid
percentage_gain = (tot/tot_paid - 1)*100
return gain, percentage_gain
def get_gain_by_stock_name(self, stock_name):
if stock_name is None or len(stock_name) == 0:
raise ValueError('Stock name not specified!')
if stock_name not in self.__stocks_today:
raise ValueError('Please provide a valid stock name!')
tot_paid = self.get_paid_by_stock_name(stock_name)
tot = self.get_amount_by_stock_name(stock_name)
gain = tot - tot_paid
percentage_gain = (tot/tot_paid - 1)*100
return gain, percentage_gain
def get_next_available_amount(self):
dates = [d for d in self.__investments_by_availability.keys() if len(d) > 0]
min_date = None
min_date_str = ""
for d in dates:
current_date = time.strptime(d, "%d/%m/%Y")
if min_date is None or min_date > current_date:
min_date = current_date
min_date_str = d
return min_date_str, self.get_total_amount_by_date(min_date_str)
|
import renpy
import codecs
import os
import os.path
import time
image_prefixes = None
filenames = None
report_node = None
def report(msg, *args):
if report_node:
out = u"%s:%d " % (renpy.parser.unicode_filename(report_node.filename), report_node.linenumber)
else:
out = ""
out += msg % args
print
print out.encode('utf-8')
added = { }
def add(msg):
if not msg in added:
added[msg] = True
print unicode(msg).encode('utf-8')
def try_eval(where, expr, additional=None):
try:
renpy.python.py_eval(expr)
except:
report( "Could not evaluate '%s', in %s.", expr, where)
if additional:
add(additional)
def try_compile(where, expr):
try:
renpy.python.py_compile_eval_bytecode(expr)
except:
report("'%s' could not be compiled as a python expression, %s.", expr, where)
def image_exists(name, expression, tag):
# Add the tag to the set of known tags.
tag = tag or name[0]
image_prefixes[tag] = True
if expression:
return
name = list(name)
names = " ".join(name)
while name:
if tuple(name) in renpy.exports.images:
return
name.pop()
report("The image named '%s' was not declared.", names)
check_file_cache = { }
def check_file(what, fn):
present = check_file_cache.get(fn, None)
if present is True:
return
if present is False:
report("%s uses file '%s', which is not loadable.", what.capitalize(), fn)
return
if not renpy.loader.loadable(fn):
report("%s uses file '%s', which is not loadable.", what.capitalize(), fn)
check_file_cache[fn] = False
return
check_file_cache[fn] = True
try:
renpy.loader.transfn(fn)
except:
return
if renpy.loader.transfn(fn) and \
fn.lower() in filenames and \
fn != filenames[fn.lower()]:
report("Filename case mismatch for %s. '%s' was used in the script, but '%s' was found on disk.", what, fn, filenames[fn.lower()])
add("Case mismatches can lead to problems on Mac, Linux/Unix, and when archiving images. To fix them, either rename the file on disk, or the filename use in the script.")
def check_displayable(what, d):
files = [ ]
def files_callback(img):
files.extend(img.predict_files())
d.predict(files_callback)
for fn in files:
check_file(what, fn)
def check_image(node):
name = " ".join(node.imgname)
check_displayable('image %s' % name, renpy.exports.images[node.imgname])
def imspec(t):
if len(t) == 3:
return t[0], None, None, t[1], t[2], 0
if len(t) == 6:
return t[0], t[1], t[2], t[3], t[4], t[5], None
else:
return t
def check_show(node):
# A Scene may have an empty imspec.
if not node.imspec:
return
name, expression, tag, at_list, layer, zorder, behind = imspec(node.imspec)
if layer not in renpy.config.layers and layer not in renpy.config.top_layers:
report("Uses layer '%s', which is not in config.layers.", layer)
image_exists(name, expression, tag)
for i in at_list:
try_eval("the at list of a scene or show statment", i, "Perhaps you forgot to declare, or misspelled, a position?")
def check_hide(node):
name, expression, tag, at_list, layer, zorder, behind = imspec(node.imspec)
tag = tag or name[0]
if layer not in renpy.config.layers and layer not in renpy.config.top_layers:
report("Uses layer '%s', which is not in config.layers.", layer)
if tag not in image_prefixes:
report("The image tag '%s' is not the prefix of a declared image, nor was it used in a show statement before this hide statement.", tag)
# for i in at_list:
# try_eval(node, "at list of hide statment", i)
def check_with(node):
try_eval("a with statement or clause", node.expr, "Perhaps you forgot to declare, or misspelled, a transition?")
def check_user(node):
def error(msg):
report("%s", msg)
renpy.exports.push_error_handler(error)
try:
node.call("lint")
finally:
renpy.exports.pop_error_handler()
try:
node.get_next()
except:
report("Didn't properly report what the next statement should be.")
check_text_tags = renpy.display.text.check_text_tags
def text_checks(s):
msg = renpy.display.text.check_text_tags(s)
if msg:
report("%s (in %s)", msg, repr(s)[1:])
if "%" in s:
state = 0
pos = 0
fmt = ""
while pos < len(s):
c = s[pos]
pos += 1
# Not in a format.
if state == 0:
if c == "%":
state = 1
fmt = "%"
# In a format.
elif state == 1:
fmt += c
if c == "(":
state = 2
elif c in "#0123456780- +hlL":
state = 1
elif c in "diouxXeEfFgGcrs%":
state = 0
else:
report("Unknown string format code '%s' (in %s)", fmt, repr(s)[1:])
state = 0
# In a mapping key.
elif state == 2:
fmt += c
if c == ")":
state = 1
if state != 0:
report("Unterminated string format code '%s' (in %s)", fmt, repr(s)[1:])
def check_say(node):
if node.who:
try_eval("the who part of a say statement", node.who, "Perhaps you forgot to declare a character?")
if node.with_:
try_eval("the with clause of a say statement", node.with_, "Perhaps you forgot to declare, or misspelled, a transition?")
text_checks(node.what)
def check_menu(node):
if node.with_:
try_eval("the with clause of a menu statement", node.with_, "Perhaps you forgot to declare, or misspelled, a transition?")
if not [ (l, c, b) for l, c, b in node.items if b ]:
report("The menu does not contain any selectable choices.")
for l, c, b in node.items:
if c:
try_compile("in the if clause of a menuitem", c)
text_checks(l)
def check_jump(node):
if node.expression:
return
if not renpy.game.script.has_label(node.target):
report("The jump is to nonexistent label '%s'.", node.target)
def check_call(node):
if node.expression:
return
if not renpy.game.script.has_label(node.label):
report("The call is to nonexistent label '%s'.", node.label)
def check_while(node):
try_compile("in the condition of the while statement", node.condition)
def check_if(node):
for condition, block in node.entries:
try_compile("in a condition of the if statement", condition)
def check_style(name, s):
if s.indexed:
for i in s.indexed:
check_style(name + "[%r]" % (name,), s.indexed[i])
for p in s.properties:
for k, v in p.iteritems():
kname = name + "." + k
# Treat font specially.
if k.endswith("font"):
check_file(name, v)
e = renpy.style.expansions[k]
# We only need to check the first function.
for prio, propn, func in e:
if func:
v = func(v)
break
if isinstance(v, renpy.display.core.Displayable):
check_displayable(kname, v)
def check_styles():
for name, s in renpy.style.style_map.iteritems():
check_style("Style property style." + name, s)
def lint():
"""
The master lint function, that's responsible for staging all of the
other checks.
"""
renpy.game.lint = True
print codecs.BOM_UTF8
print unicode(renpy.version + " lint report, generated at: " + time.ctime()).encode("utf-8")
# This is used to support the check_image.
global filenames
filenames = { }
for d in renpy.config.searchpath:
for fn in os.listdir(os.path.join(renpy.config.basedir, d)):
filenames[fn.lower()] = fn
# This supports check_hide.
global image_prefixes
image_prefixes = { }
for k in renpy.exports.images:
image_prefixes[k[0]] = True
# Iterate through every statement in the program, processing
# them. We sort them in filename, linenumber order.
all_stmts = [ (i.filename, i.linenumber, i) for i in renpy.game.script.all_stmts ]
all_stmts.sort()
say_words = 0
say_count = 0
menu_count = 0
global report_node
for fn, ln, node in all_stmts:
report_node = node
if isinstance(node, renpy.ast.Image):
check_image(node)
elif isinstance(node, renpy.ast.Show):
check_show(node)
elif isinstance(node, renpy.ast.Scene):
check_show(node)
elif isinstance(node, renpy.ast.Hide):
check_hide(node)
elif isinstance(node, renpy.ast.With):
check_with(node)
elif isinstance(node, renpy.ast.Say):
check_say(node)
say_count += 1
say_words += len(node.what.split())
elif isinstance(node, renpy.ast.Menu):
check_menu(node)
menu_count += 1
elif isinstance(node, renpy.ast.Jump):
check_jump(node)
elif isinstance(node, renpy.ast.Call):
check_call(node)
elif isinstance(node, renpy.ast.While):
check_while(node)
elif isinstance(node, renpy.ast.If):
check_if(node)
elif isinstance(node, renpy.ast.UserStatement):
check_user(node)
report_node = None
check_styles()
for f in renpy.config.lint_hooks:
f()
print
print
print "Statistics:"
print
print "The game contains", say_count, "screens of dialogue."
print "These screens contain a total of", say_words, "words,"
if say_count > 0:
print "for an average of %.1f words per screen." % (1.0 * say_words / say_count)
print "The game contains", menu_count, "menus."
print
if renpy.config.developer:
print "Remember to set config.developer to False before releasing."
print
print "Lint is not a substitute for thorough testing. Remember to update Ren'Py"
print "before releasing. New releases fix bugs and improve compatibility."
|
from datetime import datetime
from euphorie.client import model
from euphorie.client.tests.utils import addAccount
from euphorie.client.tests.utils import addSurvey
from euphorie.content.tests.utils import BASIC_SURVEY
from euphorie.testing import EuphorieIntegrationTestCase
from lxml import html
from plone import api
from Products.Five.browser.metaconfigure import ViewNotCallableError
from time import sleep
from zope.event import notify
from zope.lifecycleevent import ObjectModifiedEvent
class TestSurveyViews(EuphorieIntegrationTestCase):
def test_survey_publication_date_views(self):
"""We have some views to display and set the published column
for a survey session
"""
with api.env.adopt_user("admin"):
survey = addSurvey(self.portal, BASIC_SURVEY)
account = addAccount(password="secret")
survey_session = model.SurveySession(
id=123,
title=u"Dummy session",
created=datetime(2012, 4, 22, 23, 5, 12),
modified=datetime(2012, 4, 23, 11, 50, 30),
zodb_path="nl/ict/software-development",
account=account,
company=model.Company(country="nl", employees="1-9", referer="other"),
)
model.Session.add(survey_session)
survey = self.portal.client.nl.ict["software-development"]
session_id = "++session++%d" % survey_session.id
traversed_survey_session = survey.restrictedTraverse(session_id)
with api.env.adopt_user(user=survey_session.account):
with self._get_view(
"publication_date", traversed_survey_session, survey_session
) as view:
# The view is not callable but
# has traversable allowed attributes
self.assertRaises(ViewNotCallableError, view)
# We have some default values that will be changed
# when publishing/unpublishing the session
self.assertEqual(survey_session.last_publisher, None)
self.assertEqual(survey_session.published, None)
self.assertEqual(survey_session.last_modifier, None)
self.assertEqual(survey_session.review_state, "private")
# Calling set_date will result in having this session published
# and the publication time and the publisher will be recorded
# If no referer is set,
# the methods will redirect to the context url
self.assertEqual(
view.set_date(),
"{url}/{session_id}".format(
url=survey.absolute_url(), session_id=session_id
),
)
self.assertEqual(survey_session.last_publisher, survey_session.account)
self.assertIsInstance(survey_session.published, datetime)
self.assertEqual(survey_session.review_state, "published")
old_modified = survey_session.modified
old_published = survey_session.published
old_modifier = survey_session.last_modifier
# Changing the HTTP_REFERER will redirect there
# and calling reset_date will update the published date
view.request.set("HTTP_REFERER", "foo")
# We need to wait at least one second because the datetime
# is stored with that accuracy
sleep(1)
self.assertEqual(view.reset_date(), "foo")
self.assertEqual(survey_session.last_publisher, survey_session.account)
# The publisher and publication dates are set. The modification date
# is not touched.
self.assertEqual(survey_session.modified, old_modified)
self.assertEqual(survey_session.last_modifier, old_modifier)
self.assertTrue(survey_session.published > old_published)
# Calling unset_date will restore the publication info
self.assertEqual(view.unset_date(), "foo")
self.assertEqual(survey_session.last_publisher, None)
self.assertEqual(survey_session.published, None)
self.assertEqual(survey_session.review_state, "private")
# We also have a menu view
with self._get_view(
"publication_menu", traversed_survey_session, survey_session
) as view:
soup = html.fromstring(view())
self.assertListEqual(
["publication_date/set_date#content"],
[
el.attrib["action"].rpartition("@@")[-1]
for el in soup.cssselect("form")
],
)
# We trigger the session to be private
survey_session.published = "foo"
soup = html.fromstring(view())
self.assertListEqual(
[
"publication_date/unset_date#content",
"publication_date/reset_date#content",
],
[
el.attrib["action"].rpartition("@@")[-1]
for el in soup.cssselect("form")
],
)
def test_modify_updates_last_modifier(self):
account = addAccount(password="secret")
survey_session = model.SurveySession(
title=u"Dummy session", account=account, zodb_path=""
)
self.assertEqual(survey_session.modified, None)
self.assertEqual(survey_session.last_modifier, None)
with api.env.adopt_user(user=account):
notify(ObjectModifiedEvent(survey_session))
self.assertIsInstance(survey_session.modified, datetime)
self.assertEqual(survey_session.last_modifier, account)
|
from flask import Response
from flask.views import View
from bson import json_util
from mcp import mongo
class Map(View):
def dispatch_request(self, komuna, viti):
json = mongo.db.procurements.aggregate([
{
"$match": {
"komuna.slug": komuna,
"viti": viti,
"kompania.selia.slug": {'$ne': ''}
}
},
{
"$group": {
"_id": {
"selia": "$kompania.selia.slug",
"emri": "$kompania.selia.emri",
"gjeresi": "$kompania.selia.kordinatat.gjeresi",
"gjatesi": "$kompania.selia.kordinatat.gjatesi",
},
"cmimi": {
"$sum": "$kontrata.qmimi"
},
"vlera": {
"$sum": "$kontrata.vlera"
},
"numriKontratave": {
"$sum": 1
}
}
},
{
"$sort": {
"_id.selia": 1
}
},
{
"$project": {
"selia": "$_id.selia",
"emri": "$_id.emri",
"gjeresia": "$_id.gjeresi",
"gjatesia": "$_id.gjatesi",
"cmimi": "$cmimi",
"vlera": "$vlera",
"numriKontratave": "$numriKontratave",
"_id": 0
}
}
])
json_min_max = mongo.db.procurements.aggregate([
{
"$match": {
"komuna.slug": komuna,
"viti": viti,
"kompania.selia.slug": {'$ne': ''}
}
},
{
"$group": {
"_id": {
"selia": "$kompania.selia.slug",
"gjeresi": "$kompania.selia.kordinatat.gjeresi",
"gjatesi": "$kompania.selia.kordinatat.gjatesi",
},
"sumCmimi": {
"$sum": "$kontrata.qmimi"
},
"sumVlera": {
"$sum": "$kontrata.vlera"
},
"sumNumriKontratave": {
"$sum": 1
}
}
},
{
"$group": {
"_id": {},
"maxCmimi": {
"$max": "$sumCmimi"
},
"maxVlera": {
"$max": "$sumVlera"
},
"maxNumriKontratave": {
"$max": "$sumNumriKontratave"
},
"minCmimi": {
"$min": "$sumCmimi"
},
"minVlera": {
"$min": "$sumVlera"
},
"minNumriKontratave": {
"$min": "$sumNumriKontratave"
},
}
},
{
"$project": {
"_id": 0,
"vlera": {
"min": "$minVlera",
"max": "$maxVlera",
},
"cmimi": {
"min": "$minCmimi",
"max": "$maxCmimi",
},
"numriKontratave": {
"min": "$minNumriKontratave",
"max": "$maxNumriKontratave",
}
}
}
])
#pergjigjen e kthyer dhe te konvertuar ne JSON ne baze te json_util.dumps() e ruajme ne resp
result_json = {};
result_json['bounds'] = json_min_max['result'][0]
result_json['result'] = json['result']
resp = Response(
response=json_util.dumps(result_json),
mimetype='application/json')
return resp
|
"""
Module implementing MainWindow.
"""
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from Ui_mainWindow import Ui_MainWindow
from baseclasses import *
from splitDialog import splitDialog
from aboutDialog import aboutDialog
TITLE, CHAPTER, TRACK, DURATION, STARTTIME, FILENAME, ENDTIME = range(7)
def makeClickable(widget):
class clickFilter(QObject):
clicked = pyqtSignal()
def eventFilter(self, obj, event):
if obj == widget:
if event.type() == QEvent.MouseButtonRelease:
self.clicked.emit()
return True
return False
filter = clickFilter(widget)
widget.installEventFilter(filter)
return filter.clicked
class MainWindow(QMainWindow, Ui_MainWindow):
"""
Class documentation goes here.
"""
def __init__(self, parent = None):
"""
Constructor
"""
class delkeyFilter(QObject):
delkeyPressed = pyqtSignal()
def eventFilter(self, obj, event):
if event.type() == QEvent.KeyPress:
if event.key() == Qt.Key_Delete:
self.delkeyPressed.emit()
return True
return False
class returnkeyFilter(QObject):
def eventFilter(self, obj, event):
if event.type() == QEvent.KeyPress:
if event.key() == Qt.Key_Return:
current = obj.currentIndex()
current = obj.indexBelow(current)
obj.setCurrentIndex(current)
return False
self.audiobookList = audiobookContainer()
self.currentDir = os.getcwd()
QMainWindow.__init__(self, parent)
self.setupUi(self)
self.stackedWidget.setCurrentWidget(self.infoPage)
makeClickable(self.coverLabel).connect(self.on_coverLabel_clicked)
self.model = audiobookTreeModel()
self.dataTreeView.setModel(self.model)
self.progessDelegate = progressBarDelegate()
self.dataTreeView.setItemDelegateForColumn(1, self.progessDelegate)
self.connect(self.dataTreeView.selectionModel(),
SIGNAL('currentChanged(QModelIndex, QModelIndex)'),
self.on_dataTreeView_currentItemChanged)
self.connect(self.model, SIGNAL('dataChanged(QModelIndex,QModelIndex)'), self.dataChanged)
self.connect(self.model, SIGNAL('expand(QModelIndex)'), self.dataTreeView.expand)
#trying the new style of connecting signals
self.model.processingDone.connect(self.on_processingDone)
self.delfilter = delkeyFilter()
self.dataTreeView.installEventFilter(self.delfilter)
self.connect(self.delfilter, SIGNAL('delkeyPressed()'),
self.on_actionRemove_triggered)
self.returnFilter = returnkeyFilter()
self.dataTreeView.installEventFilter(self.returnFilter)
#allow only numbers in yearEdit
self.yearEdit.setValidator(QRegExpValidator(QRegExp(r'\d*'), self))
#set icons
self.actionMoveDown.setIcon(QIcon.fromTheme('go-down'))
self.actionMoveUp_2.setIcon(QIcon.fromTheme('go-up'))
#TODO: clean the name of this action
self.actionRemove.setIcon(QIcon.fromTheme('edit-delete'))
self.actionAddAudiobook.setIcon(QIcon.fromTheme('address-book-new'))
self.actionAddChapter.setIcon(QIcon.fromTheme('document-new'))
self.action_About.setIcon(QIcon.fromTheme('help-about'))
self.action_help.setIcon(QIcon.fromTheme('help-browser'))
self.actionExit.setIcon(QIcon.fromTheme('application-exit'))
self.actionProcess.setIcon(QIcon.fromTheme('system-run'))
self.chapterFileButton.setIcon(QIcon.fromTheme('document-open'))
self.outfileButton.setIcon(QIcon.fromTheme('document-open'))
self.updateTree()
def okToQuit(self):
reply = QMessageBox.question(self,"M4Baker - really quit?", \
"Really quit?",QMessageBox.Yes|QMessageBox.Cancel)
if reply == QMessageBox.Cancel:
return False
elif reply == QMessageBox.Yes:
return True
def closeEvent(self, event):
if not self.okToQuit():
event.ignore()
@pyqtSignature("")
def on_actionAddAudiobook_triggered(self):
"""
Slot documentation goes here.
"""
current = self.dataTreeView.currentIndex()
formats = ["*%s" % format for format in supportedInputFiles]
fnames = QFileDialog.getOpenFileNames(
self,
"Choose audio files to create audiobook from",
self.currentDir,
'audio files (%s)' % " ".join(formats))
if fnames:
#fnames = [unicode(element) for element in fnames]
self.currentDir = fnames[-1].section(os.sep,0,-2)
newbook = audiobook([chapter(element) for element in fnames])
self.model.addAudiobooks(newbook, current)
self.updateTree()
@pyqtSignature("")
def on_actionMoveDown_triggered(self):
"""
Slot documentation goes here.
"""
indexes = self.dataTreeView.selectionModel().selectedIndexes()
#clean indexes list from double entries
cleanIndexes = []
for index in indexes:
if index.column() == 0:
cleanIndexes.append(index)
indexes = cleanIndexes
self.model.move(indexes, 'down')
@pyqtSignature("")
def on_actionRemove_triggered(self):
"""
Slot documentation goes here.
"""
current = self.dataTreeView.currentIndex()
indexes = self.dataTreeView.selectionModel().selectedIndexes()
#clean indexes list from double entries
cleanIndexes = []
for index in indexes:
if index.column() == 0:
cleanIndexes.append(index)
indexes = cleanIndexes
self.model.remove(indexes)
self.updateTree()
@pyqtSignature("")
def on_actionAddChapter_triggered(self):
"""
Slot documentation goes here.
"""
formats = ["*%s" % format for format in supportedInputFiles]
fnames = QFileDialog.getOpenFileNames(
self,
"Choose audio files to append to audiobook",
self.currentDir,
'audio files (%s)' % " ".join(formats))
if fnames:
self.currentDir = fnames[-1].section(os.sep,0,-2)
#fnames = [unicode(element) for element in fnames]
chaplist = [chapter(element) for element in fnames]
current = self.dataTreeView.currentIndex()
self.model.addChapters(chaplist, current)
self.updateTree()
#TODO: maybe it is smarter to add the chapter after current item?
@pyqtSignature("")
def on_actionSortByFilename_triggered(self):
"""
Slot documentation goes here.
"""
current = self.dataTreeView.currentIndex()
self.model.sort(current, 'filename')
self.updateTree()
@pyqtSignature("")
def on_actionSortByTracknumber_triggered(self):
"""
Slot documentation goes here.
"""
current = self.dataTreeView.currentIndex()
self.model.sort(current, 'trackNumber')
self.updateTree()
@pyqtSignature("")
def on_actionProcess_triggered(self):
"""
Slot documentation goes here.
"""
uiElements = (self.actionAddChapter, self.actionMoveDown,
self.actionMoveUp_2, self.actionProcess, self.actionRemove, self.actionSortByFilename,
self.actionSortByTracknumber, self.actionSplit, self.actionAddAudiobook)
for element in uiElements:
element.setEnabled(False)
#switch to about docker to prevent data from being changed
self.stackedWidget.setCurrentWidget(self.infoPage)
#disable treeview
self.dataTreeView.setEnabled(False)
self.model.process()
@pyqtSignature("")
def on_actionMoveUp_2_triggered(self):
"""
Slot documentation goes here.
"""
indexes = self.dataTreeView.selectionModel().selectedIndexes()
#clean indexes list from double entries
cleanIndexes = []
for index in indexes:
if index.column() == 0:
cleanIndexes.append(index)
indexes = cleanIndexes
self.model.move(indexes, 'up')
def populateChapterProperties(self):
#current must be a chapter, otherwise this method wont be called
current = self.dataTreeView.currentIndex()
title = self.model.data(self.model.index(current.row(), TITLE, current.parent()),
Qt.DisplayRole).toString()
startTime = self.model.data(self.model.index(current.row(), STARTTIME, current.parent()),
Qt.DisplayRole).toString()
duration = self.model.data(self.model.index(current.row(), DURATION, current.parent()),
Qt.DisplayRole).toString()
filename = self.model.data(self.model.index(current.row(), FILENAME, current.parent()),
Qt.DisplayRole).toString()
endTime= self.model.data(self.model.index(current.row(), TITLE, current.parent()),
Qt.UserRole)['endTime']
endTime = u'%.2d:%.2d:%#06.3f' % secConverter(endTime)
self.chapterTitleEdit.setText(title)
self.startTimeEdit.setText(startTime)
self.durationEdit.setText(duration)
self.chapterFileEdit.setText(filename)
self.endTimeEdit.setText(endTime)
def populateAudiobookProperties(self):
current = self.dataTreeView.currentIndex()
title = self.model.data(self.model.index(current.row(), TITLE, current.parent()),
Qt.UserRole)['title']
booknum = self.model.data(self.model.index(current.row(), TITLE, current.parent()),
Qt.UserRole)['booknum']
author = self.model.data(self.model.index(current.row(), TITLE, current.parent()),
Qt.UserRole)['author']
encodeString = self.model.data(self.model.index(current.row(), TITLE, current.parent()),
Qt.UserRole)['encodeString']
outfileName = self.model.data(self.model.index(current.row(), TITLE, current.parent()),
Qt.UserRole)['outfileName']
year = self.model.data(self.model.index(current.row(), TITLE, current.parent()),
Qt.UserRole)['year']
self.authorEdit.setText(author)
self.titleEdit.setText(title)
self.yearEdit.setText(year)
self.faacEdit.setText(encodeString)
self.outfileEdit.setText(outfileName)
pixmap = self.model.data(self.model.index(current.row(), 0, current.parent()), Qt.UserRole).get('cover')
if pixmap:
pixmap = self.model.data(self.model.index(current.row(), 0, current.parent()), Qt.UserRole)['cover']
width = self.coverLabel.size().width()
pixmap = pixmap.scaledToWidth(width)
self.coverLabel.setPixmap(pixmap)
else:
self.coverLabel.setText('(click to change)')
@pyqtSignature("QModelIndex*, QModelIndex*")
def on_dataTreeView_currentItemChanged(self, current, previous):
"""
Slot documentation goes here.
"""
uiElements = (self.actionAddChapter, self.actionMoveDown,
self.actionMoveUp_2, self.actionProcess, self.actionRemove, self.actionSortByFilename,
self.actionSortByTracknumber, self.actionSplit)
if not current.isValid():
#current is rootItem
for element in uiElements:
element.setDisabled(True)
return
else:
for element in uiElements:
element.setEnabled(True)
if not current.parent().isValid():
#current is audiobook
self.stackedWidget.setCurrentWidget(self.audiobookPropertiesPage)
self.populateAudiobookProperties()
if current.row() == 0:
#current is first audiobook
self.actionMoveUp_2.setEnabled(False)
if current.row() == self.model.rowCount(current.parent()) -1:
#current is last audiobook
self.actionMoveDown.setEnabled(False)
else:
#current is chapter
self.stackedWidget.setCurrentWidget(self.chapterPropertiesPage)
self.populateChapterProperties()
if current.row() == 0:
#current is the first chapter of its book
if current.parent().row() == 0:
#current is the first chapter of the first book
self.actionMoveUp_2.setEnabled(False)
if current.row() == self.model.rowCount(current.parent()) -1:
#current is the last chapter of its book
if current.parent().row() == self.model.rowCount(current.parent().parent()) -1:
#current is the last chapter of the last book
self.actionMoveDown.setEnabled(False)
@pyqtSignature("")
def on_chapterFileButton_clicked(self):
"""
Slot documentation goes here.
"""
current = self.dataTreeView.currentIndex()
formats = ["*%s" % format for format in supportedInputFiles]
fname = QFileDialog.getOpenFileName(
self,
"change chapter source file",
self.currentDir,
'audio files (%s)' % " ".join(formats))
if not fname.isEmpty():
self.currentDir = fname.section(os.sep,0,-2)
self.model.setData(self.model.index(current.row(), FILENAME, current.parent()), QVariant(fname))
self.populateChapterProperties()
@pyqtSignature("")
def on_outfileButton_clicked(self):
"""
Slot documentation goes here.
"""
current = self.dataTreeView.currentIndex()
fname = QFileDialog.getSaveFileName(
self,
'choose audiobook output file',
self.currentDir,
"Audiobook files (*.m4b)")
if not fname.isEmpty():
self.currentDir = fname.section(os.sep,0,-2)
if not fname.endsWith('.m4b'):
fname += ".m4b"
self.model.setData(self.model.index(current.row(), FILENAME, current.parent()), QVariant(fname))
self.populateAudiobookProperties()
@pyqtSignature("")
def on_action_About_triggered(self):
dialog = aboutDialog()
if dialog.exec_():
pass
@pyqtSignature("")
def on_actionSplit_triggered(self):
"""
Slot documentation goes here.
"""
current = self.dataTreeView.currentIndex()
if not current.parent().isValid():
#audiobook
pass
else:
#chapter
current = current.parent()
minSplitDuration = self.model.data(current, Qt.UserRole)['minSplitDuration']
hours, minutes, seconds = secConverter(minSplitDuration)
minSplitDuration = QTime(hours, minutes, seconds+1)
dialog = splitDialog(minSplitDuration)
if dialog.exec_():
maxSplitDuration = dialog.getMaxSplitDuration()
self.model.split(current, maxSplitDuration)
self.updateTree()
@pyqtSignature("")
def on_coverLabel_clicked(self):
current = self.dataTreeView.currentIndex()
fname = QFileDialog.getOpenFileName(
self,
"Choose a cover file",
self.currentDir,
"image files (*.png *.jpg *.jpeg *.bmp *.gif *.pbm *.pgm *ppm *xpm *xpm)",
"cover.png"
)
if not fname.isEmpty():
self.currentDir = fname.section(os.sep,0,-2)
self.model.setData(self.model.index(current.row(), 0, current.parent()),
{'cover':QPixmap(fname)}, Qt.UserRole)
self.populateAudiobookProperties()
def updateTree(self):
for i in range(6):
self.dataTreeView.resizeColumnToContents(i)
def dataChanged(self, topLeft, bottomRight):
current = self.dataTreeView.currentIndex()
if not current.parent().isValid():
#audiobook
self.populateAudiobookProperties()
else:
#chapter
self.populateChapterProperties()
def on_processingDone(self):
self.actionProcess.setEnabled(True)
self.actionAddAudiobook.setEnabled(True)
self.dataTreeView.setEnabled(True)
self.dataTreeView.reset()
@pyqtSignature("")
def on_chapterTitleEdit_editingFinished(self):
"""
Slot documentation goes here.
"""
current = self.dataTreeView.currentIndex()
text = self.chapterTitleEdit.text()
self.model.setData(self.model.index(current.row(), TITLE, current.parent()), QVariant(text))
@pyqtSignature("")
def on_faacEdit_editingFinished(self):
"""
Slot documentation goes here.
"""
text = self.faacEdit.text()
current = self.dataTreeView.currentIndex()
value = {'encodeString':QVariant(text)}
self.model.setData(self.model.index(current.row(), 0, QModelIndex()), value, Qt.UserRole)
@pyqtSignature("")
def on_titleEdit_editingFinished(self):
"""
Slot documentation goes here.
"""
text = self.titleEdit.text()
current = self.dataTreeView.currentIndex()
self.model.setData(self.model.index(current.row(), TITLE, QModelIndex()), QVariant(text))
@pyqtSignature("")
def on_yearEdit_editingFinished(self):
"""
Slot documentation goes here.
"""
text = self.titleEdit.text()
current = self.dataTreeView.currentIndex()
self.model.setData(self.model.index(current.row(), TITLE, QModelIndex()), QVariant(text))
@pyqtSignature("")
def on_authorEdit_editingFinished(self):
"""
Slot documentation goes here.
"""
text = self.authorEdit.text()
current = self.dataTreeView.currentIndex()
value = {'author':QVariant(text)}
self.model.setData(self.model.index(current.row(), 0, QModelIndex()), value, Qt.UserRole)
@pyqtSignature("")
def on_action_help_triggered(self):
"""
Slot documentation goes here.
"""
self.stackedWidget.setCurrentWidget(self.infoPage)
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.contrib.auth import views
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'lite_note.views.home', name='home'),
url(r'^test','lite_note.views.new_home',name='new_home'),
url(r'^admin/', include(admin.site.urls)),
url(r'^login/', views.login, name='login'),
url(r'^logout/', views.logout, {'next_page': 'home'}, name='logout'),
url(r'^register/', 'regsiter.views.registration', name='registration_register'),
url(r'^create/', 'lite_note.views.create_note', name='create_note'),
url(r'^unknown/', 'lite_note.views.enter_anonymous_user', name='enter_anonymous'),
url(r'^note/(?P<id>[0-9]+)/', 'lite_note.views.note', name='note'),
url(r'^delete/(?P<id>[0-9]+)','lite_note.tools.delet_note'),
url(r'^private/(?P<id>[0-9]+)','lite_note.tools.make_private_note'),
url(r'^public/(?P<id>[0-9]+)','lite_note.tools.make_public_note'),
url(r'^favorite/(?P<id>[0-9]+)','lite_note.tools.make_favorite_note'),
url(r'^unfavorite/(?P<id>[0-9]+)','lite_note.tools.make_usual_note'),
url(r'^get_login','regsiter.views.request_login'),
url(r'^test','lite_note.views.new_home',name='new_home'),
url(r'^get_notes','lite_note.views.new_note',name='new_note')
)
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('usermodule', '0002_auto_20151108_2019'),
]
operations = [
migrations.CreateModel(
name='Period',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=10)),
('start_date', models.DateField()),
('end_date', models.DateField()),
('professor', models.ForeignKey(to='usermodule.Professor')),
],
),
]
|
from __future__ import division
from deskbar.handlers.actions.CopyToClipboardAction import CopyToClipboardAction
from deskbar.defs import VERSION
from gettext import gettext as _
import deskbar.core.Utils
import deskbar.interfaces.Match
import deskbar.interfaces.Module
import logging
import math
import re
LOGGER = logging.getLogger(__name__)
HANDLERS = ["CalculatorModule"]
def bin (n):
"""A local binary equivalent of the hex and oct builtins."""
if (n == 0):
return "0b0"
s = ""
if (n < 0):
while n != -1:
s = str (n & 1) + s
n >>= 1
return "0b" + "...111" + s
else:
while n != 0:
s = str (n & 1) + s
n >>= 1
return "0b" + s
def lenient_hex (c):
try:
return hex (c)
except TypeError:
return hex (int (c))
def lenient_oct (c):
try:
return oct (c)
except TypeError:
return oct (int (c))
def lenient_bin (c):
try:
return bin (c)
except TypeError:
return bin (int (c))
class CalculatorAction (CopyToClipboardAction):
def __init__ (self, text, answer):
CopyToClipboardAction.__init__ (self, answer, answer)
self.text = text
def get_verb(self):
return _("Copy <b>%(origtext)s = %(name)s</b> to clipboard")
def get_name(self, text = None):
"""Because the text variable for history entries contains the text
typed for the history search (and not the text of the orginal action),
we store the original text seperately."""
result = CopyToClipboardAction.get_name (self, text)
result["origtext"] = self.text
return result
def get_tooltip(self, text=None):
return self._name
class CalculatorMatch (deskbar.interfaces.Match):
def __init__ (self, text, answer, **kwargs):
deskbar.interfaces.Match.__init__ (self, name = text,
icon = "gtk-add", category = "calculator", **kwargs)
self.answer = str (answer)
self.add_action (CalculatorAction (text, self.answer))
def get_hash (self):
return self.answer
class CalculatorModule (deskbar.interfaces.Module):
INFOS = {"icon": deskbar.core.Utils.load_icon ("gtk-add"),
"name": _("Calculator"),
"description": _("Calculate simple equations"),
"version" : VERSION,
"categories" : { "calculator" : { "name" : _("Calculator") }}}
def __init__ (self):
deskbar.interfaces.Module.__init__ (self)
self.hexre = re.compile ("0[Xx][0-9a-fA-F_]*[0-9a-fA-F]")
self.binre = re.compile ("0[bB][01_]*[01]")
def _number_parser (self, match, base):
"""A generic number parser, regardless of base. It also ignores the
'_' character so it can be used as a separator. Note how we skip
the first two characters since we assume it is something like '0x'
or '0b' and identifies the base."""
table = { '0' : 0, '1' : 1, '2' : 2, '3' : 3, '4' : 4,
'5' : 5, '6' : 6, '7' : 7, '8' : 8, '9' : 9,
'a' : 10, 'b' : 11, 'c' : 12, 'd' : 13,
'e' : 14, 'f' : 15 }
d = 0
for c in match.group()[2:]:
if c != "_":
d = d * base + table[c]
return str (d)
def _binsub (self, match):
"""Because python doesn't handle binary literals, we parse it
ourselves and replace it with a decimal representation."""
return self._number_parser (match, 2)
def _hexsub (self, match):
"""Parse the hex literal ourselves. We could let python do it, but
since we have a generic parser we use that instead."""
return self._number_parser (match, 16)
def run_query (self, query):
"""We evaluate the equation by first replacing hex and binary literals
with their decimal representation. (We need to check hex, so we can
distinguish 0x10b1 as a hex number, not 0x1 followed by 0b1.) We
severely restrict the eval environment. Any errors are ignored."""
restricted_dictionary = { "__builtins__" : None, "abs" : abs,
"acos" : math.acos, "asin" : math.asin,
"atan" : math.atan, "atan2" : math.atan2,
"bin" : lenient_bin,"ceil" : math.ceil,
"cos" : math.cos, "cosh" : math.cosh,
"degrees" : math.degrees,
"exp" : math.exp, "floor" : math.floor,
"hex" : lenient_hex, "int" : int,
"log" : math.log, "pow" : math.pow,
"log10" : math.log10, "oct" : lenient_oct,
"pi" : math.pi, "radians" : math.radians,
"round": round, "sin" : math.sin,
"sinh" : math.sinh, "sqrt" : math.sqrt,
"tan" : math.tan, "tanh" : math.tanh}
try:
scrubbedquery = query.lower()
scrubbedquery = self.hexre.sub (self._hexsub, scrubbedquery)
scrubbedquery = self.binre.sub (self._binsub, scrubbedquery)
for (c1, c2) in (("[", "("), ("{", "("), ("]", ")"), ("}", ")")):
scrubbedquery = scrubbedquery.replace (c1, c2)
answer = eval (scrubbedquery, restricted_dictionary)
# Try and avoid echoing back simple numbers. Note that this
# doesn't work well for floating point, e.g. '3.' behaves badly.
if str (answer) == query:
return None
# We need this check because the eval can return function objects
# when we are halfway through typing the expression.
if isinstance (answer, (float, int, long, str)):
return answer
else:
return None
except Exception, e:
LOGGER.debug (str(e))
return None
def query (self, query):
answer = self.run_query(query)
if answer != None:
result = [CalculatorMatch (query, answer)]
self._emit_query_ready (query, result)
return answer
else:
return []
|
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(457, 95)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.formLayout = QtGui.QFormLayout(self.centralwidget)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.pathInLineEdit = QtGui.QLineEdit(self.centralwidget)
self.pathInLineEdit.setObjectName(_fromUtf8("pathInLineEdit"))
self.formLayout.setWidget(0, QtGui.QFormLayout.SpanningRole, self.pathInLineEdit)
self.pathOutLineEdit = QtGui.QLineEdit(self.centralwidget)
self.pathOutLineEdit.setReadOnly(True)
self.pathOutLineEdit.setObjectName(_fromUtf8("pathOutLineEdit"))
self.formLayout.setWidget(1, QtGui.QFormLayout.SpanningRole, self.pathOutLineEdit)
self.buttonLayout = QtGui.QHBoxLayout()
self.buttonLayout.setObjectName(_fromUtf8("buttonLayout"))
self.explorerButton = QtGui.QPushButton(self.centralwidget)
self.explorerButton.setObjectName(_fromUtf8("explorerButton"))
self.buttonLayout.addWidget(self.explorerButton)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.buttonLayout.addItem(spacerItem)
self.convertButton = QtGui.QPushButton(self.centralwidget)
self.convertButton.setObjectName(_fromUtf8("convertButton"))
self.buttonLayout.addWidget(self.convertButton)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.buttonLayout.addItem(spacerItem1)
self.closeButton = QtGui.QPushButton(self.centralwidget)
self.closeButton.setObjectName(_fromUtf8("closeButton"))
self.buttonLayout.addWidget(self.closeButton)
self.formLayout.setLayout(2, QtGui.QFormLayout.SpanningRole, self.buttonLayout)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.pathInLineEdit.setPlaceholderText(_translate("MainWindow", "Input Path", None))
self.pathOutLineEdit.setPlaceholderText(_translate("MainWindow", "Output Path", None))
self.explorerButton.setText(_translate("MainWindow", "Open In Explorer", None))
self.convertButton.setText(_translate("MainWindow", "Convert", None))
self.closeButton.setText(_translate("MainWindow", "Close", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
"""Bio.SeqIO support for the binary Standard Flowgram Format (SFF) file format.
SFF was designed by 454 Life Sciences (Roche), the Whitehead Institute for
Biomedical Research and the Wellcome Trust Sanger Institute. You are expected
to use this module via the Bio.SeqIO functions under the format name "sff" (or
"sff-trim" as described below).
For example, to iterate over the records in an SFF file,
>>> from Bio import SeqIO
>>> for record in SeqIO.parse("Roche/E3MFGYR02_random_10_reads.sff", "sff"):
... print record.id, len(record), record.seq[:20]+"..."
E3MFGYR02JWQ7T 265 tcagGGTCTACATGTTGGTT...
E3MFGYR02JA6IL 271 tcagTTTTTTTTGGAAAGGA...
E3MFGYR02JHD4H 310 tcagAAAGACAAGTGGTATC...
E3MFGYR02GFKUC 299 tcagCGGCCGGGCCTCTCAT...
E3MFGYR02FTGED 281 tcagTGGTAATGGGGGGAAA...
E3MFGYR02FR9G7 261 tcagCTCCGTAAGAAGGTGC...
E3MFGYR02GAZMS 278 tcagAAAGAAGTAAGGTAAA...
E3MFGYR02HHZ8O 221 tcagACTTTCTTCTTTACCG...
E3MFGYR02GPGB1 269 tcagAAGCAGTGGTATCAAC...
E3MFGYR02F7Z7G 219 tcagAATCATCCACTTTTTA...
Each SeqRecord object will contain all the annotation from the SFF file,
including the PHRED quality scores.
>>> print record.id, len(record)
E3MFGYR02F7Z7G 219
>>> print record.seq[:10], "..."
tcagAATCAT ...
>>> print record.letter_annotations["phred_quality"][:10], "..."
[22, 21, 23, 28, 26, 15, 12, 21, 28, 21] ...
Notice that the sequence is given in mixed case, the central upper case region
corresponds to the trimmed sequence. This matches the output of the Roche
tools (and the 3rd party tool sff_extract) for SFF to FASTA.
>>> print record.annotations["clip_qual_left"]
4
>>> print record.annotations["clip_qual_right"]
134
>>> print record.seq[:4]
tcag
>>> print record.seq[4:20], "...", record.seq[120:134]
AATCATCCACTTTTTA ... CAAAACACAAACAG
>>> print record.seq[134:]
atcttatcaacaaaactcaaagttcctaactgagacacgcaacaggggataagacaaggcacacaggggataggnnnnnnnnnnn
The annotations dictionary also contains any adapter clip positions
(usually zero), and information about the flows. e.g.
>>> print record.annotations["flow_key"]
TCAG
>>> print record.annotations["flow_values"][:10], "..."
(83, 1, 128, 7, 4, 84, 6, 106, 3, 172) ...
>>> print len(record.annotations["flow_values"])
400
>>> print record.annotations["flow_index"][:10], "..."
(1, 2, 3, 2, 2, 0, 3, 2, 3, 3) ...
>>> print len(record.annotations["flow_index"])
219
As a convenience method, you can read the file with SeqIO format name "sff-trim"
instead of "sff" to get just the trimmed sequences (without any annotation
except for the PHRED quality scores):
>>> from Bio import SeqIO
>>> for record in SeqIO.parse("Roche/E3MFGYR02_random_10_reads.sff", "sff-trim"):
... print record.id, len(record), record.seq[:20]+"..."
E3MFGYR02JWQ7T 260 GGTCTACATGTTGGTTAACC...
E3MFGYR02JA6IL 265 TTTTTTTTGGAAAGGAAAAC...
E3MFGYR02JHD4H 292 AAAGACAAGTGGTATCAACG...
E3MFGYR02GFKUC 295 CGGCCGGGCCTCTCATCGGT...
E3MFGYR02FTGED 277 TGGTAATGGGGGGAAATTTA...
E3MFGYR02FR9G7 256 CTCCGTAAGAAGGTGCTGCC...
E3MFGYR02GAZMS 271 AAAGAAGTAAGGTAAATAAC...
E3MFGYR02HHZ8O 150 ACTTTCTTCTTTACCGTAAC...
E3MFGYR02GPGB1 221 AAGCAGTGGTATCAACGCAG...
E3MFGYR02F7Z7G 130 AATCATCCACTTTTTAACGT...
Looking at the final record in more detail, note how this differs to the
example above:
>>> print record.id, len(record)
E3MFGYR02F7Z7G 130
>>> print record.seq[:10], "..."
AATCATCCAC ...
>>> print record.letter_annotations["phred_quality"][:10], "..."
[26, 15, 12, 21, 28, 21, 36, 28, 27, 27] ...
>>> print record.annotations
{}
You might use the Bio.SeqIO.convert() function to convert the (trimmed) SFF
reads into a FASTQ file (or a FASTA file and a QUAL file), e.g.
>>> from Bio import SeqIO
>>> from StringIO import StringIO
>>> out_handle = StringIO()
>>> count = SeqIO.convert("Roche/E3MFGYR02_random_10_reads.sff", "sff",
... out_handle, "fastq")
>>> print "Converted %i records" % count
Converted 10 records
The output FASTQ file would start like this:
>>> print "%s..." % out_handle.getvalue()[:50]
@E3MFGYR02JWQ7T
tcagGGTCTACATGTTGGTTAACCCGTACTGATT...
Bio.SeqIO.index() provides memory efficient random access to the reads in an
SFF file by name. SFF files can include an index within the file, which can
be read in making this very fast. If the index is missing (or in a format not
yet supported in Biopython) the file is indexed by scanning all the reads -
which is a little slower. For example,
>>> from Bio import SeqIO
>>> reads = SeqIO.index("Roche/E3MFGYR02_random_10_reads.sff", "sff")
>>> record = reads["E3MFGYR02JHD4H"]
>>> print record.id, len(record), record.seq[:20]+"..."
E3MFGYR02JHD4H 310 tcagAAAGACAAGTGGTATC...
Or, using the trimmed reads:
>>> from Bio import SeqIO
>>> reads = SeqIO.index("Roche/E3MFGYR02_random_10_reads.sff", "sff-trim")
>>> record = reads["E3MFGYR02JHD4H"]
>>> print record.id, len(record), record.seq[:20]+"..."
E3MFGYR02JHD4H 292 AAAGACAAGTGGTATCAACG...
You can also use the Bio.SeqIO.write() function with the "sff" format. Note
that this requires all the flow information etc, and thus is probably only
useful for SeqRecord objects originally from reading another SFF file (and
not the trimmed SeqRecord objects from parsing an SFF file as "sff-trim").
As an example, let's pretend this example SFF file represents some DNA which
was pre-amplified with a PCR primers AAAGANNNNN. The following script would
produce a sub-file containing all those reads whose post-quality clipping
region (i.e. the sequence after trimming) starts with AAAGA exactly (the non-
degenerate bit of this pretend primer):
>>> from Bio import SeqIO
>>> records = (record for record in
... SeqIO.parse("Roche/E3MFGYR02_random_10_reads.sff","sff")
... if record.seq[record.annotations["clip_qual_left"]:].startswith("AAAGA"))
>>> count = SeqIO.write(records, "temp_filtered.sff", "sff")
>>> print "Selected %i records" % count
Selected 2 records
Of course, for an assembly you would probably want to remove these primers.
If you want FASTA or FASTQ output, you could just slice the SeqRecord. However,
if you want SFF output we have to preserve all the flow information - the trick
is just to adjust the left clip position!
>>> from Bio import SeqIO
>>> def filter_and_trim(records, primer):
... for record in records:
... if record.seq[record.annotations["clip_qual_left"]:].startswith(primer):
... record.annotations["clip_qual_left"] += len(primer)
... yield record
>>> records = SeqIO.parse("Roche/E3MFGYR02_random_10_reads.sff", "sff")
>>> count = SeqIO.write(filter_and_trim(records,"AAAGA"),
... "temp_filtered.sff", "sff")
>>> print "Selected %i records" % count
Selected 2 records
We can check the results, note the lower case clipped region now includes the "AAAGA"
sequence:
>>> for record in SeqIO.parse("temp_filtered.sff", "sff"):
... print record.id, len(record), record.seq[:20]+"..."
E3MFGYR02JHD4H 310 tcagaaagaCAAGTGGTATC...
E3MFGYR02GAZMS 278 tcagaaagaAGTAAGGTAAA...
>>> for record in SeqIO.parse("temp_filtered.sff", "sff-trim"):
... print record.id, len(record), record.seq[:20]+"..."
E3MFGYR02JHD4H 287 CAAGTGGTATCAACGCAGAG...
E3MFGYR02GAZMS 266 AGTAAGGTAAATAACAAACG...
>>> import os
>>> os.remove("temp_filtered.sff")
For a description of the file format, please see the Roche manuals and:
http://www.ncbi.nlm.nih.gov/Traces/trace.cgi?cmd=show&f=formats&m=doc&s=formats
"""
from Bio.SeqIO.Interfaces import SequenceWriter
from Bio import Alphabet
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import struct
import sys
from Bio._py3k import _bytes_to_string, _as_bytes
_null = _as_bytes("\0")
_sff = _as_bytes(".sff")
_hsh = _as_bytes(".hsh")
_srt = _as_bytes(".srt")
_mft = _as_bytes(".mft")
try:
#This works on Python 2.6+ or Python 3.0
_flag = eval(r'b"\xff"')
except SyntaxError:
#Must be on Python 2.4 or 2.5
_flag = "\xff" #Char 255
def _sff_file_header(handle):
"""Read in an SFF file header (PRIVATE).
Assumes the handle is at the start of the file, will read forwards
though the header and leave the handle pointing at the first record.
Returns a tuple of values from the header (header_length, index_offset,
index_length, number_of_reads, flows_per_read, flow_chars, key_sequence)
>>> handle = open("Roche/greek.sff", "rb")
>>> values = _sff_file_header(handle)
>>> print values[0]
840
>>> print values[1]
65040
>>> print values[2]
256
>>> print values[3]
24
>>> print values[4]
800
>>> values[-1]
'TCAG'
"""
if hasattr(handle,"mode") and "U" in handle.mode.upper():
raise ValueError("SFF files must NOT be opened in universal new "
"lines mode. Binary mode is recommended (although "
"on Unix the default mode is also fine).")
elif hasattr(handle,"mode") and "B" not in handle.mode.upper() \
and sys.platform == "win32":
raise ValueError("SFF files must be opened in binary mode on Windows")
#file header (part one)
#use big endiean encdoing >
#magic_number I
#version 4B
#index_offset Q
#index_length I
#number_of_reads I
#header_length H
#key_length H
#number_of_flows_per_read H
#flowgram_format_code B
#[rest of file header depends on the number of flows and how many keys]
fmt = '>4s4BQIIHHHB'
assert 31 == struct.calcsize(fmt)
data = handle.read(31)
if not data:
raise ValueError("Empty file.")
elif len(data) < 13:
raise ValueError("File too small to hold a valid SFF header.")
magic_number, ver0, ver1, ver2, ver3, index_offset, index_length, \
number_of_reads, header_length, key_length, number_of_flows_per_read, \
flowgram_format = struct.unpack(fmt, data)
if magic_number in [_hsh, _srt, _mft]:
#Probably user error, calling Bio.SeqIO.parse() twice!
raise ValueError("Handle seems to be at SFF index block, not start")
if magic_number != _sff: # 779314790
raise ValueError("SFF file did not start '.sff', but %s" \
% repr(magic_number))
if (ver0, ver1, ver2, ver3) != (0, 0, 0, 1):
raise ValueError("Unsupported SFF version in header, %i.%i.%i.%i" \
% (ver0, ver1, ver2, ver3))
if flowgram_format != 1:
raise ValueError("Flowgram format code %i not supported" \
% flowgram_format)
if (index_offset!=0) ^ (index_length!=0):
raise ValueError("Index offset %i but index length %i" \
% (index_offset, index_length))
flow_chars = _bytes_to_string(handle.read(number_of_flows_per_read))
key_sequence = _bytes_to_string(handle.read(key_length))
#According to the spec, the header_length field should be the total number
#of bytes required by this set of header fields, and should be equal to
#"31 + number_of_flows_per_read + key_length" rounded up to the next value
#divisible by 8.
assert header_length % 8 == 0
padding = header_length - number_of_flows_per_read - key_length - 31
assert 0 <= padding < 8, padding
if handle.read(padding).count(_null) != padding:
raise ValueError("Post header %i byte padding region contained data" \
% padding)
return header_length, index_offset, index_length, \
number_of_reads, number_of_flows_per_read, \
flow_chars, key_sequence
def _sff_do_slow_index(handle):
"""Generates an index by scanning though all the reads in an SFF file (PRIVATE).
This is a slow but generic approach if we can't parse the provided index
(if present).
Will use the handle seek/tell functions.
"""
handle.seek(0)
header_length, index_offset, index_length, number_of_reads, \
number_of_flows_per_read, flow_chars, key_sequence \
= _sff_file_header(handle)
#Now on to the reads...
read_header_fmt = '>2HI4H'
read_header_size = struct.calcsize(read_header_fmt)
#NOTE - assuming flowgram_format==1, which means struct type H
read_flow_fmt = ">%iH" % number_of_flows_per_read
read_flow_size = struct.calcsize(read_flow_fmt)
assert 1 == struct.calcsize(">B")
assert 1 == struct.calcsize(">s")
assert 1 == struct.calcsize(">c")
assert read_header_size % 8 == 0 #Important for padding calc later!
for read in range(number_of_reads):
record_offset = handle.tell()
if record_offset == index_offset:
#Found index block within reads, ignore it:
offset = index_offset + index_length
if offset % 8:
offset += 8 - (offset % 8)
assert offset % 8 == 0
handle.seek(offset)
record_offset = offset
#assert record_offset%8 == 0 #Worth checking, but slow
#First the fixed header
data = handle.read(read_header_size)
read_header_length, name_length, seq_len, clip_qual_left, \
clip_qual_right, clip_adapter_left, clip_adapter_right \
= struct.unpack(read_header_fmt, data)
if read_header_length < 10 or read_header_length % 8 != 0:
raise ValueError("Malformed read header, says length is %i:\n%s" \
% (read_header_length, repr(data)))
#now the name and any padding (remainder of header)
name = _bytes_to_string(handle.read(name_length))
padding = read_header_length - read_header_size - name_length
if handle.read(padding).count(_null) != padding:
raise ValueError("Post name %i byte padding region contained data" \
% padding)
assert record_offset + read_header_length == handle.tell()
#now the flowgram values, flowgram index, bases and qualities
size = read_flow_size + 3*seq_len
handle.seek(size, 1)
#now any padding...
padding = size % 8
if padding:
padding = 8 - padding
if handle.read(padding).count(_null) != padding:
raise ValueError("Post quality %i byte padding region contained data" \
% padding)
#print read, name, record_offset
yield name, record_offset
if handle.tell() % 8 != 0:
raise ValueError("After scanning reads, did not end on a multiple of 8")
def _sff_find_roche_index(handle):
"""Locate any existing Roche style XML meta data and read index (PRIVATE).
Makes a number of hard coded assumptions based on reverse engineered SFF
files from Roche 454 machines.
Returns a tuple of read count, SFF "index" offset and size, XML offset
and size, and the actual read index offset and size.
Raises a ValueError for unsupported or non-Roche index blocks.
"""
handle.seek(0)
header_length, index_offset, index_length, number_of_reads, \
number_of_flows_per_read, flow_chars, key_sequence \
= _sff_file_header(handle)
assert handle.tell() == header_length
if not index_offset or not index_offset:
raise ValueError("No index present in this SFF file")
#Now jump to the header...
handle.seek(index_offset)
fmt = ">4s4B"
fmt_size = struct.calcsize(fmt)
data = handle.read(fmt_size)
if not data:
raise ValueError("Premature end of file? Expected index of size %i at offest %i, found nothing" \
% (index_length, index_offset))
if len(data) < fmt_size:
raise ValueError("Premature end of file? Expected index of size %i at offest %i, found %s" \
% (index_length, index_offset, repr(data)))
magic_number, ver0, ver1, ver2, ver3 = struct.unpack(fmt, data)
if magic_number == _mft: # 778921588
#Roche 454 manifest index
#This is typical from raw Roche 454 SFF files (2009), and includes
#both an XML manifest and the sorted index.
if (ver0, ver1, ver2, ver3) != (49, 46, 48, 48):
#This is "1.00" as a string
raise ValueError("Unsupported version in .mft index header, %i.%i.%i.%i" \
% (ver0, ver1, ver2, ver3))
fmt2 = ">LL"
fmt2_size = struct.calcsize(fmt2)
xml_size, data_size = struct.unpack(fmt2, handle.read(fmt2_size))
if index_length != fmt_size + fmt2_size + xml_size + data_size:
raise ValueError("Problem understanding .mft index header, %i != %i + %i + %i + %i" \
% (index_length, fmt_size, fmt2_size, xml_size, data_size))
return number_of_reads, header_length, \
index_offset, index_length, \
index_offset + fmt_size + fmt2_size, xml_size, \
index_offset + fmt_size + fmt2_size + xml_size, data_size
elif magic_number == _srt: #779317876
#Roche 454 sorted index
#I've had this from Roche tool sfffile when the read identifiers
#had nonstandard lengths and there was no XML manifest.
if (ver0, ver1, ver2, ver3) != (49, 46, 48, 48):
#This is "1.00" as a string
raise ValueError("Unsupported version in .srt index header, %i.%i.%i.%i" \
% (ver0, ver1, ver2, ver3))
data = handle.read(4)
if data != _null*4:
raise ValueError("Did not find expected null four bytes in .srt index")
return number_of_reads, header_length, \
index_offset, index_length, \
0, 0, \
index_offset + fmt_size + 4, index_length - fmt_size - 4
elif magic_number == _hsh:
raise ValueError("Hash table style indexes (.hsh) in SFF files are "
"not (yet) supported")
else:
raise ValueError("Unknown magic number %s in SFF index header:\n%s" \
% (repr(magic_number), repr(data)))
def _sff_read_roche_index_xml(handle):
"""Reads any existing Roche style XML manifest data in the SFF "index" (PRIVATE, DEPRECATED).
Will use the handle seek/tell functions. Returns a string.
This has been replaced by ReadRocheXmlManifest. We would normally just
delete an old private function without warning, but I believe some people
are using this so we'll handle this with a deprecation warning.
"""
import warnings
warnings.warn("Private function _sff_read_roche_index_xml is deprecated. "
"Use new public function ReadRocheXmlManifest instead",
DeprecationWarning)
return ReadRocheXmlManifest(handle)
def ReadRocheXmlManifest(handle):
"""Reads any Roche style XML manifest data in the SFF "index".
The SFF file format allows for multiple different index blocks, and Roche
took advantage of this to define their own index block wich also embeds
an XML manifest string. This is not a publically documented extension to
the SFF file format, this was reverse engineered.
The handle should be to an SFF file opened in binary mode. This function
will use the handle seek/tell functions and leave the handle in an
arbitrary location.
Any XML manifest found is returned as a Python string, which you can then
parse as appropriate, or reuse when writing out SFF files with the
SffWriter class.
Returns a string, or raises a ValueError if an Roche manifest could not be
found.
"""
number_of_reads, header_length, index_offset, index_length, xml_offset, \
xml_size, read_index_offset, read_index_size = _sff_find_roche_index(handle)
if not xml_offset or not xml_size:
raise ValueError("No XML manifest found")
handle.seek(xml_offset)
return _bytes_to_string(handle.read(xml_size))
def _sff_read_roche_index(handle):
"""Reads any existing Roche style read index provided in the SFF file (PRIVATE).
Will use the handle seek/tell functions.
This works on ".srt1.00" and ".mft1.00" style Roche SFF index blocks.
Roche SFF indices use base 255 not 256, meaning we see bytes in range the
range 0 to 254 only. This appears to be so that byte 0xFF (character 255)
can be used as a marker character to separate entries (required if the
read name lengths vary).
Note that since only four bytes are used for the read offset, this is
limited to 255^4 bytes (nearly 4GB). If you try to use the Roche sfffile
tool to combine SFF files beyound this limit, they issue a warning and
omit the index (and manifest).
"""
number_of_reads, header_length, index_offset, index_length, xml_offset, \
xml_size, read_index_offset, read_index_size = _sff_find_roche_index(handle)
#Now parse the read index...
handle.seek(read_index_offset)
fmt = ">5B"
for read in range(number_of_reads):
#TODO - Be more aware of when the index should end?
data = handle.read(6)
while True:
more = handle.read(1)
if not more:
raise ValueError("Premature end of file!")
data += more
if more == _flag: break
assert data[-1:] == _flag, data[-1:]
name = _bytes_to_string(data[:-6])
off4, off3, off2, off1, off0 = struct.unpack(fmt, data[-6:-1])
offset = off0 + 255*off1 + 65025*off2 + 16581375*off3
if off4:
#Could in theory be used as a fifth piece of offset information,
#i.e. offset =+ 4228250625L*off4, but testing the Roche tools this
#is not the case. They simple don't support such large indexes.
raise ValueError("Expected a null terminator to the read name.")
yield name, offset
if handle.tell() != read_index_offset + read_index_size:
raise ValueError("Problem with index length? %i vs %i" \
% (handle.tell(), read_index_offset + read_index_size))
def _sff_read_seq_record(handle, number_of_flows_per_read, flow_chars,
key_sequence, alphabet, trim=False):
"""Parse the next read in the file, return data as a SeqRecord (PRIVATE)."""
#Now on to the reads...
#the read header format (fixed part):
#read_header_length H
#name_length H
#seq_len I
#clip_qual_left H
#clip_qual_right H
#clip_adapter_left H
#clip_adapter_right H
#[rest of read header depends on the name length etc]
read_header_fmt = '>2HI4H'
read_header_size = struct.calcsize(read_header_fmt)
read_flow_fmt = ">%iH" % number_of_flows_per_read
read_flow_size = struct.calcsize(read_flow_fmt)
read_header_length, name_length, seq_len, clip_qual_left, \
clip_qual_right, clip_adapter_left, clip_adapter_right \
= struct.unpack(read_header_fmt, handle.read(read_header_size))
if clip_qual_left:
clip_qual_left -= 1 #python counting
if clip_adapter_left:
clip_adapter_left -= 1 #python counting
if read_header_length < 10 or read_header_length % 8 != 0:
raise ValueError("Malformed read header, says length is %i" \
% read_header_length)
#now the name and any padding (remainder of header)
name = _bytes_to_string(handle.read(name_length))
padding = read_header_length - read_header_size - name_length
if handle.read(padding).count(_null) != padding:
raise ValueError("Post name %i byte padding region contained data" \
% padding)
#now the flowgram values, flowgram index, bases and qualities
#NOTE - assuming flowgram_format==1, which means struct type H
flow_values = handle.read(read_flow_size) #unpack later if needed
temp_fmt = ">%iB" % seq_len # used for flow index and quals
flow_index = handle.read(seq_len) #unpack later if needed
seq = _bytes_to_string(handle.read(seq_len)) #TODO - Use bytes in Seq?
quals = list(struct.unpack(temp_fmt, handle.read(seq_len)))
#now any padding...
padding = (read_flow_size + seq_len*3)%8
if padding:
padding = 8 - padding
if handle.read(padding).count(_null) != padding:
raise ValueError("Post quality %i byte padding region contained data" \
% padding)
#Now build a SeqRecord
if trim:
seq = seq[clip_qual_left:clip_qual_right].upper()
quals = quals[clip_qual_left:clip_qual_right]
#Don't record the clipping values, flow etc, they make no sense now:
annotations = {}
else:
#This use of mixed case mimics the Roche SFF tool's FASTA output
seq = seq[:clip_qual_left].lower() + \
seq[clip_qual_left:clip_qual_right].upper() + \
seq[clip_qual_right:].lower()
annotations = {"flow_values":struct.unpack(read_flow_fmt, flow_values),
"flow_index":struct.unpack(temp_fmt, flow_index),
"flow_chars":flow_chars,
"flow_key":key_sequence,
"clip_qual_left":clip_qual_left,
"clip_qual_right":clip_qual_right,
"clip_adapter_left":clip_adapter_left,
"clip_adapter_right":clip_adapter_right}
record = SeqRecord(Seq(seq, alphabet),
id=name,
name=name,
description="",
annotations=annotations)
#Dirty trick to speed up this line:
#record.letter_annotations["phred_quality"] = quals
dict.__setitem__(record._per_letter_annotations,
"phred_quality", quals)
#TODO - adaptor clipping
#Return the record and then continue...
return record
def SffIterator(handle, alphabet=Alphabet.generic_dna, trim=False):
"""Iterate over Standard Flowgram Format (SFF) reads (as SeqRecord objects).
handle - input file, an SFF file, e.g. from Roche 454 sequencing.
This must NOT be opened in universal read lines mode!
alphabet - optional alphabet, defaults to generic DNA.
trim - should the sequences be trimmed?
The resulting SeqRecord objects should match those from a paired FASTA
and QUAL file converted from the SFF file using the Roche 454 tool
ssfinfo. i.e. The sequence will be mixed case, with the trim regions
shown in lower case.
This function is used internally via the Bio.SeqIO functions:
>>> from Bio import SeqIO
>>> handle = open("Roche/E3MFGYR02_random_10_reads.sff", "rb")
>>> for record in SeqIO.parse(handle, "sff"):
... print record.id, len(record)
E3MFGYR02JWQ7T 265
E3MFGYR02JA6IL 271
E3MFGYR02JHD4H 310
E3MFGYR02GFKUC 299
E3MFGYR02FTGED 281
E3MFGYR02FR9G7 261
E3MFGYR02GAZMS 278
E3MFGYR02HHZ8O 221
E3MFGYR02GPGB1 269
E3MFGYR02F7Z7G 219
>>> handle.close()
You can also call it directly:
>>> handle = open("Roche/E3MFGYR02_random_10_reads.sff", "rb")
>>> for record in SffIterator(handle):
... print record.id, len(record)
E3MFGYR02JWQ7T 265
E3MFGYR02JA6IL 271
E3MFGYR02JHD4H 310
E3MFGYR02GFKUC 299
E3MFGYR02FTGED 281
E3MFGYR02FR9G7 261
E3MFGYR02GAZMS 278
E3MFGYR02HHZ8O 221
E3MFGYR02GPGB1 269
E3MFGYR02F7Z7G 219
>>> handle.close()
Or, with the trim option:
>>> handle = open("Roche/E3MFGYR02_random_10_reads.sff", "rb")
>>> for record in SffIterator(handle, trim=True):
... print record.id, len(record)
E3MFGYR02JWQ7T 260
E3MFGYR02JA6IL 265
E3MFGYR02JHD4H 292
E3MFGYR02GFKUC 295
E3MFGYR02FTGED 277
E3MFGYR02FR9G7 256
E3MFGYR02GAZMS 271
E3MFGYR02HHZ8O 150
E3MFGYR02GPGB1 221
E3MFGYR02F7Z7G 130
>>> handle.close()
"""
if isinstance(Alphabet._get_base_alphabet(alphabet),
Alphabet.ProteinAlphabet):
raise ValueError("Invalid alphabet, SFF files do not hold proteins.")
if isinstance(Alphabet._get_base_alphabet(alphabet),
Alphabet.RNAAlphabet):
raise ValueError("Invalid alphabet, SFF files do not hold RNA.")
header_length, index_offset, index_length, number_of_reads, \
number_of_flows_per_read, flow_chars, key_sequence \
= _sff_file_header(handle)
#Now on to the reads...
#the read header format (fixed part):
#read_header_length H
#name_length H
#seq_len I
#clip_qual_left H
#clip_qual_right H
#clip_adapter_left H
#clip_adapter_right H
#[rest of read header depends on the name length etc]
read_header_fmt = '>2HI4H'
read_header_size = struct.calcsize(read_header_fmt)
read_flow_fmt = ">%iH" % number_of_flows_per_read
read_flow_size = struct.calcsize(read_flow_fmt)
assert 1 == struct.calcsize(">B")
assert 1 == struct.calcsize(">s")
assert 1 == struct.calcsize(">c")
assert read_header_size % 8 == 0 #Important for padding calc later!
#The spec allows for the index block to be before or even in the middle
#of the reads. We can check that if we keep track of our position
#in the file...
for read in range(number_of_reads):
if index_offset and handle.tell() == index_offset:
offset = index_offset + index_length
if offset % 8:
offset += 8 - (offset % 8)
assert offset % 8 == 0
handle.seek(offset)
#Now that we've done this, we don't need to do it again. Clear
#the index_offset so we can skip extra handle.tell() calls:
index_offset = 0
yield _sff_read_seq_record(handle,
number_of_flows_per_read,
flow_chars,
key_sequence,
alphabet,
trim)
#The following is not essential, but avoids confusing error messages
#for the user if they try and re-parse the same handle.
if index_offset and handle.tell() == index_offset:
offset = index_offset + index_length
if offset % 8:
offset += 8 - (offset % 8)
assert offset % 8 == 0
handle.seek(offset)
#Should now be at the end of the file...
if handle.read(1):
raise ValueError("Additional data at end of SFF file")
def _SffTrimIterator(handle, alphabet=Alphabet.generic_dna):
"""Iterate over SFF reads (as SeqRecord objects) with trimming (PRIVATE)."""
return SffIterator(handle, alphabet, trim=True)
class SffWriter(SequenceWriter):
"""SFF file writer."""
def __init__(self, handle, index=True, xml=None):
"""Creates the writer object.
handle - Output handle, ideally in binary write mode.
index - Boolean argument, should we try and write an index?
xml - Optional string argument, xml manifest to be recorded in the index
block (see function ReadRocheXmlManifest for reading this data).
"""
if hasattr(handle,"mode") and "U" in handle.mode.upper():
raise ValueError("SFF files must NOT be opened in universal new "
"lines mode. Binary mode is required")
elif hasattr(handle,"mode") and "B" not in handle.mode.upper():
raise ValueError("SFF files must be opened in binary mode")
self.handle = handle
self._xml = xml
if index:
self._index = []
else:
self._index = None
def write_file(self, records):
"""Use this to write an entire file containing the given records."""
try:
self._number_of_reads = len(records)
except TypeError:
self._number_of_reads = 0 #dummy value
if not hasattr(self.handle, "seek") \
or not hasattr(self.handle, "tell"):
raise ValueError("A handle with a seek/tell methods is "
"required in order to record the total "
"record count in the file header (once it "
"is known at the end).")
if self._index is not None and \
not (hasattr(self.handle, "seek") and hasattr(self.handle, "tell")):
import warnings
warnings.warn("A handle with a seek/tell methods is required in "
"order to record an SFF index.")
self._index = None
self._index_start = 0
self._index_length = 0
if not hasattr(records, "next"):
records = iter(records)
#Get the first record in order to find the flow information
#we will need for the header.
try:
record = records.next()
except StopIteration:
record = None
if record is None:
#No records -> empty SFF file (or an error)?
#We can't write a header without the flow information.
#return 0
raise ValueError("Need at least one record for SFF output")
try:
self._key_sequence = _as_bytes(record.annotations["flow_key"])
self._flow_chars = _as_bytes(record.annotations["flow_chars"])
self._number_of_flows_per_read = len(self._flow_chars)
except KeyError:
raise ValueError("Missing SFF flow information")
self.write_header()
self.write_record(record)
count = 1
for record in records:
self.write_record(record)
count += 1
if self._number_of_reads == 0:
#Must go back and record the record count...
offset = self.handle.tell()
self.handle.seek(0)
self._number_of_reads = count
self.write_header()
self.handle.seek(offset) #not essential?
else:
assert count == self._number_of_reads
if self._index is not None:
self._write_index()
return count
def _write_index(self):
assert len(self._index)==self._number_of_reads
handle = self.handle
self._index.sort()
self._index_start = handle.tell() #need for header
#XML...
if self._xml is not None:
xml = _as_bytes(self._xml)
else:
from Bio import __version__
xml = "<!-- This file was output with Biopython %s -->\n" % __version__
xml += "<!-- This XML and index block attempts to mimic Roche SFF files -->\n"
xml += "<!-- This file may be a combination of multiple SFF files etc -->\n"
xml = _as_bytes(xml)
xml_len = len(xml)
#Write to the file...
fmt = ">I4BLL"
fmt_size = struct.calcsize(fmt)
handle.write(_null*fmt_size + xml) #will come back later to fill this
fmt2 = ">6B"
assert 6 == struct.calcsize(fmt2)
self._index.sort()
index_len = 0 #don't know yet!
for name, offset in self._index:
#Roche files record the offsets using base 255 not 256.
#See comments for parsing the index block. There may be a faster
#way to code this, but we can't easily use shifts due to odd base
off3 = offset
off0 = off3 % 255
off3 -= off0
off1 = off3 % 65025
off3 -= off1
off2 = off3 % 16581375
off3 -= off2
assert offset == off0 + off1 + off2 + off3, \
"%i -> %i %i %i %i" % (offset, off0, off1, off2, off3)
off3, off2, off1, off0 = off3//16581375, off2//65025, \
off1//255, off0
assert off0 < 255 and off1 < 255 and off2 < 255 and off3 < 255, \
"%i -> %i %i %i %i" % (offset, off0, off1, off2, off3)
handle.write(name + struct.pack(fmt2, 0, \
off3, off2, off1, off0, 255))
index_len += len(name) + 6
#Note any padding in not included:
self._index_length = fmt_size + xml_len + index_len #need for header
#Pad out to an 8 byte boundary (although I have noticed some
#real Roche SFF files neglect to do this depsite their manual
#suggesting this padding should be there):
if self._index_length % 8:
padding = 8 - (self._index_length%8)
handle.write(_null*padding)
else:
padding = 0
offset = handle.tell()
assert offset == self._index_start + self._index_length + padding, \
"%i vs %i + %i + %i" % (offset, self._index_start, \
self._index_length, padding)
#Must now go back and update the index header with index size...
handle.seek(self._index_start)
handle.write(struct.pack(fmt, 778921588, #magic number
49,46,48,48, #Roche index version, "1.00"
xml_len, index_len) + xml)
#Must now go back and update the header...
handle.seek(0)
self.write_header()
handle.seek(offset) #not essential?
def write_header(self):
#Do header...
key_length = len(self._key_sequence)
#file header (part one)
#use big endiean encdoing >
#magic_number I
#version 4B
#index_offset Q
#index_length I
#number_of_reads I
#header_length H
#key_length H
#number_of_flows_per_read H
#flowgram_format_code B
#[rest of file header depends on the number of flows and how many keys]
fmt = '>I4BQIIHHHB%is%is' % (self._number_of_flows_per_read, key_length)
#According to the spec, the header_length field should be the total
#number of bytes required by this set of header fields, and should be
#equal to "31 + number_of_flows_per_read + key_length" rounded up to
#the next value divisible by 8.
if struct.calcsize(fmt) % 8 == 0:
padding = 0
else:
padding = 8 - (struct.calcsize(fmt) % 8)
header_length = struct.calcsize(fmt) + padding
assert header_length % 8 == 0
header = struct.pack(fmt, 779314790, #magic number 0x2E736666
0, 0, 0, 1, #version
self._index_start, self._index_length,
self._number_of_reads,
header_length, key_length,
self._number_of_flows_per_read,
1, #the only flowgram format code we support
self._flow_chars, self._key_sequence)
self.handle.write(header + _null*padding)
def write_record(self, record):
"""Write a single additional record to the output file.
This assumes the header has been done.
"""
#Basics
name = _as_bytes(record.id)
name_len = len(name)
seq = _as_bytes(str(record.seq).upper())
seq_len = len(seq)
#Qualities
try:
quals = record.letter_annotations["phred_quality"]
except KeyError:
raise ValueError("Missing PHRED qualities information")
#Flow
try:
flow_values = record.annotations["flow_values"]
flow_index = record.annotations["flow_index"]
if self._key_sequence != _as_bytes(record.annotations["flow_key"]) \
or self._flow_chars != _as_bytes(record.annotations["flow_chars"]):
raise ValueError("Records have inconsistent SFF flow data")
except KeyError:
raise ValueError("Missing SFF flow information")
except AttributeError:
raise ValueError("Header not written yet?")
#Clipping
try:
clip_qual_left = record.annotations["clip_qual_left"]
if clip_qual_left:
clip_qual_left += 1
clip_qual_right = record.annotations["clip_qual_right"]
clip_adapter_left = record.annotations["clip_adapter_left"]
if clip_adapter_left:
clip_adapter_left += 1
clip_adapter_right = record.annotations["clip_adapter_right"]
except KeyError:
raise ValueError("Missing SFF clipping information")
#Capture information for index
if self._index is not None:
offset = self.handle.tell()
#Check the position of the final record (before sort by name)
#See comments earlier about how base 255 seems to be used.
#This means the limit is 255**4 + 255**3 +255**2 + 255**1
if offset > 4244897280:
import warnings
warnings.warn("Read %s has file offset %i, which is too large "
"to store in the Roche SFF index structure. No "
"index block will be recorded." % (name, offset))
#No point recoring the offsets now
self._index = None
else:
self._index.append((name, self.handle.tell()))
#the read header format (fixed part):
#read_header_length H
#name_length H
#seq_len I
#clip_qual_left H
#clip_qual_right H
#clip_adapter_left H
#clip_adapter_right H
#[rest of read header depends on the name length etc]
#name
#flow values
#flow index
#sequence
#padding
read_header_fmt = '>2HI4H%is' % name_len
if struct.calcsize(read_header_fmt) % 8 == 0:
padding = 0
else:
padding = 8 - (struct.calcsize(read_header_fmt) % 8)
read_header_length = struct.calcsize(read_header_fmt) + padding
assert read_header_length % 8 == 0
data = struct.pack(read_header_fmt,
read_header_length,
name_len, seq_len,
clip_qual_left, clip_qual_right,
clip_adapter_left, clip_adapter_right,
name) + _null*padding
assert len(data) == read_header_length
#now the flowgram values, flowgram index, bases and qualities
#NOTE - assuming flowgram_format==1, which means struct type H
read_flow_fmt = ">%iH" % self._number_of_flows_per_read
read_flow_size = struct.calcsize(read_flow_fmt)
temp_fmt = ">%iB" % seq_len # used for flow index and quals
data += struct.pack(read_flow_fmt, *flow_values) \
+ struct.pack(temp_fmt, *flow_index) \
+ seq \
+ struct.pack(temp_fmt, *quals)
#now any final padding...
padding = (read_flow_size + seq_len*3)%8
if padding:
padding = 8 - padding
self.handle.write(data + _null*padding)
if __name__ == "__main__":
print "Running quick self test"
filename = "../../Tests/Roche/E3MFGYR02_random_10_reads.sff"
metadata = ReadRocheXmlManifest(open(filename, "rb"))
index1 = sorted(_sff_read_roche_index(open(filename, "rb")))
index2 = sorted(_sff_do_slow_index(open(filename, "rb")))
assert index1 == index2
assert len(index1) == len(list(SffIterator(open(filename, "rb"))))
from StringIO import StringIO
try:
#This is in Python 2.6+, and is essential on Python 3
from io import BytesIO
except ImportError:
BytesIO = StringIO
assert len(index1) == len(list(SffIterator(BytesIO(open(filename,"rb").read()))))
if sys.platform != "win32":
assert len(index1) == len(list(SffIterator(open(filename, "r"))))
index2 = sorted(_sff_read_roche_index(open(filename)))
assert index1 == index2
index2 = sorted(_sff_do_slow_index(open(filename)))
assert index1 == index2
assert len(index1) == len(list(SffIterator(open(filename))))
assert len(index1) == len(list(SffIterator(BytesIO(open(filename,"r").read()))))
assert len(index1) == len(list(SffIterator(BytesIO(open(filename).read()))))
sff = list(SffIterator(open(filename, "rb")))
sff2 = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_alt_index_at_end.sff", "rb")))
assert len(sff) == len(sff2)
for old, new in zip(sff, sff2):
assert old.id == new.id
assert str(old.seq) == str(new.seq)
sff2 = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_alt_index_at_start.sff", "rb")))
assert len(sff) == len(sff2)
for old, new in zip(sff, sff2):
assert old.id == new.id
assert str(old.seq) == str(new.seq)
sff2 = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_alt_index_in_middle.sff", "rb")))
assert len(sff) == len(sff2)
for old, new in zip(sff, sff2):
assert old.id == new.id
assert str(old.seq) == str(new.seq)
sff2 = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_index_at_start.sff", "rb")))
assert len(sff) == len(sff2)
for old, new in zip(sff, sff2):
assert old.id == new.id
assert str(old.seq) == str(new.seq)
sff2 = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_index_in_middle.sff", "rb")))
assert len(sff) == len(sff2)
for old, new in zip(sff, sff2):
assert old.id == new.id
assert str(old.seq) == str(new.seq)
sff_trim = list(SffIterator(open(filename, "rb"), trim=True))
print ReadRocheXmlManifest(open(filename, "rb"))
from Bio import SeqIO
filename = "../../Tests/Roche/E3MFGYR02_random_10_reads_no_trim.fasta"
fasta_no_trim = list(SeqIO.parse(open(filename,"rU"), "fasta"))
filename = "../../Tests/Roche/E3MFGYR02_random_10_reads_no_trim.qual"
qual_no_trim = list(SeqIO.parse(open(filename,"rU"), "qual"))
filename = "../../Tests/Roche/E3MFGYR02_random_10_reads.fasta"
fasta_trim = list(SeqIO.parse(open(filename,"rU"), "fasta"))
filename = "../../Tests/Roche/E3MFGYR02_random_10_reads.qual"
qual_trim = list(SeqIO.parse(open(filename,"rU"), "qual"))
for s, sT, f, q, fT, qT in zip(sff, sff_trim, fasta_no_trim,
qual_no_trim, fasta_trim, qual_trim):
#print
print s.id
#print s.seq
#print s.letter_annotations["phred_quality"]
assert s.id == f.id == q.id
assert str(s.seq) == str(f.seq)
assert s.letter_annotations["phred_quality"] == q.letter_annotations["phred_quality"]
assert s.id == sT.id == fT.id == qT.id
assert str(sT.seq) == str(fT.seq)
assert sT.letter_annotations["phred_quality"] == qT.letter_annotations["phred_quality"]
print "Writing with a list of SeqRecords..."
handle = StringIO()
w = SffWriter(handle, xml=metadata)
w.write_file(sff) #list
data = handle.getvalue()
print "And again with an iterator..."
handle = StringIO()
w = SffWriter(handle, xml=metadata)
w.write_file(iter(sff))
assert data == handle.getvalue()
#Check 100% identical to the original:
filename = "../../Tests/Roche/E3MFGYR02_random_10_reads.sff"
original = open(filename,"rb").read()
assert len(data) == len(original)
assert data == original
del data
handle.close()
print "-"*50
filename = "../../Tests/Roche/greek.sff"
for record in SffIterator(open(filename,"rb")):
print record.id
index1 = sorted(_sff_read_roche_index(open(filename, "rb")))
index2 = sorted(_sff_do_slow_index(open(filename, "rb")))
assert index1 == index2
try:
print ReadRocheXmlManifest(open(filename, "rb"))
assert False, "Should fail!"
except ValueError:
pass
handle = open(filename, "rb")
for record in SffIterator(handle):
pass
try:
for record in SffIterator(handle):
print record.id
assert False, "Should have failed"
except ValueError, err:
print "Checking what happens on re-reading a handle:"
print err
"""
#Ugly code to make test files...
index = ".diy1.00This is a fake index block (DIY = Do It Yourself), which is allowed under the SFF standard.\0"
padding = len(index)%8
if padding:
padding = 8 - padding
index += chr(0)*padding
assert len(index)%8 == 0
#Ugly bit of code to make a fake index at start
records = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_random_10_reads.sff", "rb")))
out_handle = open("../../Tests/Roche/E3MFGYR02_alt_index_at_start.sff", "w")
index = ".diy1.00This is a fake index block (DIY = Do It Yourself), which is allowed under the SFF standard.\0"
padding = len(index)%8
if padding:
padding = 8 - padding
index += chr(0)*padding
w = SffWriter(out_handle, index=False, xml=None)
#Fake the header...
w._number_of_reads = len(records)
w._index_start = 0
w._index_length = 0
w._key_sequence = records[0].annotations["flow_key"]
w._flow_chars = records[0].annotations["flow_chars"]
w._number_of_flows_per_read = len(w._flow_chars)
w.write_header()
w._index_start = out_handle.tell()
w._index_length = len(index)
out_handle.seek(0)
w.write_header() #this time with index info
w.handle.write(index)
for record in records:
w.write_record(record)
out_handle.close()
records2 = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_alt_index_at_start.sff", "rb")))
for old, new in zip(records, records2):
assert str(old.seq)==str(new.seq)
i = list(_sff_do_slow_index(open("../../Tests/Roche/E3MFGYR02_alt_index_at_start.sff", "rb")))
#Ugly bit of code to make a fake index in middle
records = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_random_10_reads.sff", "rb")))
out_handle = open("../../Tests/Roche/E3MFGYR02_alt_index_in_middle.sff", "w")
index = ".diy1.00This is a fake index block (DIY = Do It Yourself), which is allowed under the SFF standard.\0"
padding = len(index)%8
if padding:
padding = 8 - padding
index += chr(0)*padding
w = SffWriter(out_handle, index=False, xml=None)
#Fake the header...
w._number_of_reads = len(records)
w._index_start = 0
w._index_length = 0
w._key_sequence = records[0].annotations["flow_key"]
w._flow_chars = records[0].annotations["flow_chars"]
w._number_of_flows_per_read = len(w._flow_chars)
w.write_header()
for record in records[:5]:
w.write_record(record)
w._index_start = out_handle.tell()
w._index_length = len(index)
w.handle.write(index)
for record in records[5:]:
w.write_record(record)
out_handle.seek(0)
w.write_header() #this time with index info
out_handle.close()
records2 = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_alt_index_in_middle.sff", "rb")))
for old, new in zip(records, records2):
assert str(old.seq)==str(new.seq)
j = list(_sff_do_slow_index(open("../../Tests/Roche/E3MFGYR02_alt_index_in_middle.sff", "rb")))
#Ugly bit of code to make a fake index at end
records = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_random_10_reads.sff", "rb")))
out_handle = open("../../Tests/Roche/E3MFGYR02_alt_index_at_end.sff", "w")
w = SffWriter(out_handle, index=False, xml=None)
#Fake the header...
w._number_of_reads = len(records)
w._index_start = 0
w._index_length = 0
w._key_sequence = records[0].annotations["flow_key"]
w._flow_chars = records[0].annotations["flow_chars"]
w._number_of_flows_per_read = len(w._flow_chars)
w.write_header()
for record in records:
w.write_record(record)
w._index_start = out_handle.tell()
w._index_length = len(index)
out_handle.write(index)
out_handle.seek(0)
w.write_header() #this time with index info
out_handle.close()
records2 = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_alt_index_at_end.sff", "rb")))
for old, new in zip(records, records2):
assert str(old.seq)==str(new.seq)
try:
print ReadRocheXmlManifest(open("../../Tests/Roche/E3MFGYR02_alt_index_at_end.sff", "rb"))
assert False, "Should fail!"
except ValueError:
pass
k = list(_sff_do_slow_index(open("../../Tests/Roche/E3MFGYR02_alt_index_at_end.sff", "rb")))
"""
print "Done"
|
import subprocess
import os
class MakeException(Exception):
pass
def swapExt(path, current, replacement):
path, ext = os.path.splitext(path)
if ext == current:
path += replacement
return path
else:
raise MakeException(
"swapExt: expected file name ending in %s, got file name ending in %s" % \
(current, replacement))
headerFiles = [
'benc.h',
'bencode.h',
]
codeFiles = [
'benc_int.c',
'benc_bstr.c',
'benc_list.c',
'benc_dict.c',
'bencode.c',
'bcopy.c',
]
cflags = ['-g']
programFile = 'bcopy'
def gcc(*packedArgs):
args = []
for arg in packedArgs:
if isinstance(arg, list):
args += arg
elif isinstance(arg, tuple):
args += list(arg)
else:
args.append(arg)
subprocess.check_call(['gcc'] + args)
def compile(codeFile, cflags=[]):
objectFile = swapExt(codeFile, '.c', '.o')
gcc(cflags, '-c', ('-o', objectFile), codeFile)
return objectFile
def link(programFile, objectFiles, cflags=[]):
gcc(cflags, ('-o', programFile), objectFiles)
if __name__ == '__main__':
objectFiles = [compile(codeFile, cflags) for codeFile in codeFiles]
link(programFile, objectFiles, cflags)
|
import os
import socket
import subprocess
import time
import unittest
import simplejson
import func.utils
from func import yaml
from func import jobthing
def structToYaml(data):
# takes a data structure, serializes it to
# yaml
buf = yaml.dump(data)
return buf
def structToJSON(data):
#Take data structure for the test
#and serializes it using json
serialized = simplejson.dumps(input)
return serialized
class BaseTest(object):
# assume we are talking to localhost
# th = socket.gethostname()
th = socket.getfqdn()
nforks=1
async=False
ft_cmd = "func-transmit"
# just so we can change it easy later
def _serialize(self, data):
raise NotImplementedError
def _deserialize(self, buf):
raise NotImplementedError
def _call_async(self, data):
data['async'] = True
data['nforks'] = 4
job_id = self._call(data)
no_answer = True
while (no_answer):
out = self._call({'clients': '*',
'method':'job_status',
'parameters': job_id})
if out[0] == jobthing.JOB_ID_FINISHED:
no_answer = False
else:
time.sleep(.25)
result = out[1]
return result
def _call(self, data):
f = self._serialize(data)
p = subprocess.Popen(self.ft_cmd, shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
output = p.communicate(input=f)
return self._deserialize(output[0])
def call(self, data):
if self.async:
return self._call_async(data)
return self._call(data)
def __init__(self):
pass
# we do this all over the place...
def assert_on_fault(self, result):
assert func.utils.is_error(result[self.th]) == False
class YamlBaseTest(BaseTest):
# i'd add the "yaml" attr here for nosetest to find, but it doesnt
# seem to find it unless the class is a test class directly
ft_cmd = "func-transmit --yaml"
def _serialize(self, data):
buf = yaml.dump(data)
return buf
def _deserialize(self, buf):
data = yaml.load(buf).next()
return data
class JSONBaseTest(BaseTest):
ft_cmd = "func-transmit --json"
def _serialize(self, data):
buf = simplejson.dumps(data)
return buf
def _deserialize(self, buf):
data = simplejson.loads(buf)
return data
class ListMinion(object):
def test_list_minions(self):
out = self.call({'clients': '*',
'method': 'list_minions'})
def test_list_minions_no_match(self):
out = self.call({'clients': 'somerandom-name-that-shouldnt-be-a_real_host_name',
'method': 'list_minions'})
assert out == []
def test_list_minions_group_name(self):
out = self.call({'clients': '@test',
'method': 'list_minions'})
def test_list_minions_no_clients(self):
out = self.call({'method': 'list_minions'})
class ListMinionAsync(ListMinion):
async = True
class TestListMinionYaml(YamlBaseTest, ListMinion):
yaml = True
def __init__(self):
super(TestListMinionYaml, self).__init__()
class TestListMinionJSON(JSONBaseTest, ListMinion):
json = True
def __init__(self):
super(TestListMinionJSON, self).__init__()
class ClientGlob(object):
def _test_add(self, client):
result = self.call({'clients': client,
'method': 'add',
'module': 'test',
'parameters': [1,2]})
self.assert_on_fault(result)
return result
def test_single_client(self):
result = self._test_add(self.th)
def test_glob_client(self):
result = self._test_add("*")
def test_glob_list(self):
result = self._test_add([self.th, self.th])
def test_glob_string_list(self):
result = self._test_add("%s;*" % self.th)
# note, needs a /etc/func/group setup with the proper groups defined
# need to figure out a good way to test this... -akl
def test_group(self):
result = self._test_add("@test")
class ClientGlobAsync(ClientGlob):
async = True
class TestClientGlobYaml(YamlBaseTest, ClientGlob):
yaml = True
def __init__(self):
super(TestClientGlobYaml, self).__init__()
class TestClientGlobJSON(JSONBaseTest, ClientGlob):
json = True
def __init__(self):
super(TestClientGlobJSON, self).__init__()
class TestClientGlobYamlAsync(YamlBaseTest, ClientGlobAsync):
yaml = True
async = True
def __init__(self):
super(TestClientGlobYamlAsync, self).__init__()
class TestClientGlobJSONAsync(JSONBaseTest, ClientGlobAsync):
json = True
async = True
def __init__(self):
super(TestClientGlobJSONAsync, self).__init__()
class T_estTest(object):
__test__ = False
def _echo_test(self, data):
result = self.call({'clients':'*',
'method': 'echo',
'module': 'test',
'parameters': [data]})
self.assert_on_fault(result)
assert result[self.th] == data
def test_add(self):
result = self.call({'clients':'*',
'method': 'add',
'module': 'test',
'parameters': [1,2]})
assert result[self.th] == 3
def test_echo_int(self):
self._echo_test(37)
def test_echo_array(self):
self._echo_test([1,2,"three", "fore", "V"])
def test_echo_hash(self):
self._echo_test({'one':1, 'two':2, 'three': 3, 'four':"IV"})
def test_echo_float(self):
self._echo_test(1.0)
# NOTE/FIXME: the big float tests fail for yaml and json
def test_echo_big_float(self):
self._echo_test(123121232.23)
def test_echo_bigger_float(self):
self._echo_test(234234234234234234234.234234234234234)
def test_echo_little_float(self):
self._echo_test(0.0000000000000000000000000000000000037)
# Note/FIXME: these test currently fail for YAML
def test_echo_boolean_true(self):
self._echo_test(True)
def test_echo_boolean_false(self):
self._echo_test(False)
class T_estTestAsync(T_estTest):
__test__ = False
async = True
class TestTestYaml(YamlBaseTest, T_estTest):
yaml = True
def __init__(self):
super(YamlBaseTest, self).__init__()
class TestTestJSON(JSONBaseTest, T_estTest):
json = True
def __init__(self):
super(JSONBaseTest,self).__init__()
class TestTestAsyncJSON(JSONBaseTest, T_estTestAsync):
json = True
async = True
def __init__(self):
super(JSONBaseTest,self).__init__()
class TestTestAsyncYaml(YamlBaseTest, T_estTestAsync):
yaml = True
async = True
def __init__(self):
super(YamlBaseTest,self).__init__()
|
__author__ = 'ryanplyler'
def sayhi(config):
error = None
try:
server_output = "Executing action 'sayhi()'"
response = "HI THERE!"
except:
error = 1
return server_output, response, error
|
"""
EasyBuild support for building and installing MRtrix, implemented as an easyblock
"""
import glob
import os
from distutils.version import LooseVersion
import easybuild.tools.environment as env
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.filetools import copy, symlink
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_MRtrix(EasyBlock):
"""Support for building/installing MRtrix."""
def __init__(self, *args, **kwargs):
"""Initialize easyblock, enable build-in-installdir based on version."""
super(EB_MRtrix, self).__init__(*args, **kwargs)
if LooseVersion(self.version) >= LooseVersion('0.3') and LooseVersion(self.version) < LooseVersion('0.3.14'):
self.build_in_installdir = True
self.log.debug("Enabled build-in-installdir for version %s", self.version)
def extract_step(self):
"""Extract MRtrix sources."""
# strip off 'mrtrix*' part to avoid having everything in a 'mrtrix*' subdirectory
if LooseVersion(self.version) >= LooseVersion('0.3'):
self.cfg.update('unpack_options', '--strip-components=1')
super(EB_MRtrix, self).extract_step()
def configure_step(self):
"""No configuration step for MRtrix."""
if LooseVersion(self.version) >= LooseVersion('0.3'):
if LooseVersion(self.version) < LooseVersion('0.3.13'):
env.setvar('LD', "%s LDFLAGS OBJECTS -o EXECUTABLE" % os.getenv('CXX'))
env.setvar('LDLIB', "%s -shared LDLIB_FLAGS OBJECTS -o LIB" % os.getenv('CXX'))
env.setvar('QMAKE_CXX', os.getenv('CXX'))
cmd = "python configure -verbose"
run_cmd(cmd, log_all=True, simple=True, log_ok=True)
def build_step(self):
"""Custom build procedure for MRtrix."""
cmd = "python build -verbose"
run_cmd(cmd, log_all=True, simple=True, log_ok=True)
def install_step(self):
"""Custom install procedure for MRtrix."""
if LooseVersion(self.version) < LooseVersion('0.3'):
cmd = "python build -verbose install=%s linkto=" % self.installdir
run_cmd(cmd, log_all=True, simple=True, log_ok=True)
elif LooseVersion(self.version) >= LooseVersion('3.0'):
copy(os.path.join(self.builddir, 'bin'), self.installdir)
copy(os.path.join(self.builddir, 'lib'), self.installdir)
elif LooseVersion(self.version) >= LooseVersion('0.3.14'):
copy(glob.glob(os.path.join(self.builddir, 'release', '*')), self.installdir)
copy(os.path.join(self.builddir, 'scripts'), self.installdir)
# some scripts expect 'release/bin' to be there, so we put a symlink in place
symlink(self.installdir, os.path.join(self.installdir, 'release'))
def make_module_req_guess(self):
"""
Return list of subdirectories to consider to update environment variables;
also consider 'scripts' subdirectory for $PATH
"""
guesses = super(EB_MRtrix, self).make_module_req_guess()
guesses['PATH'].append('scripts')
if LooseVersion(self.version) >= LooseVersion('3.0'):
guesses.setdefault('PYTHONPATH', []).append('lib')
return guesses
def sanity_check_step(self):
"""Custom sanity check for MRtrix."""
shlib_ext = get_shared_lib_ext()
if LooseVersion(self.version) >= LooseVersion('0.3'):
libso = 'libmrtrix.%s' % shlib_ext
else:
libso = 'libmrtrix-%s.%s' % ('_'.join(self.version.split('.')), shlib_ext)
custom_paths = {
'files': [os.path.join('lib', libso)],
'dirs': ['bin'],
}
custom_commands = []
if LooseVersion(self.version) >= LooseVersion('3.0'):
custom_commands.append("python -c 'import mrtrix3'")
super(EB_MRtrix, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)
|
"""
Virtualization test - Virtual disk related utility functions
:copyright: Red Hat Inc.
"""
import os
import glob
import shutil
import stat
import tempfile
import logging
import re
try:
import configparser as ConfigParser
except ImportError:
import ConfigParser
from avocado.core import exceptions
from avocado.utils import process
from avocado.utils.service import SpecificServiceManager
from virttest import error_context
from virttest.compat_52lts import decode_to_text
DEBUG = False
def copytree(src, dst, overwrite=True, ignore=''):
"""
Copy dirs from source to target.
:param src: source directory
:param dst: destination directory
:param overwrite: overwrite file if exist or not
:param ignore: files want to ignore
"""
ignore = glob.glob(os.path.join(src, ignore))
for root, dirs, files in os.walk(src):
dst_dir = root.replace(src, dst)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for _ in files:
if _ in ignore:
continue
src_file = os.path.join(root, _)
dst_file = os.path.join(dst_dir, _)
if os.path.exists(dst_file):
if overwrite:
os.remove(dst_file)
else:
continue
shutil.copy(src_file, dst_dir)
def is_mount(src, dst=None, fstype=None, options=None, verbose=False,
session=None):
"""
Check is src or dst mounted.
:param src: source device or directory
:param dst: mountpoint, if None will skip to check
:param fstype: file system type, if None will skip to check
:param options: mount options should be seperated by ","
:param session: check within the session if given
:return: True if mounted, else return False
"""
mount_str = "%s %s %s" % (src, dst, fstype)
mount_str = mount_str.replace('None', '').strip()
mount_list_cmd = 'cat /proc/mounts'
if session:
mount_result = session.cmd_output_safe(mount_list_cmd)
else:
mount_result = decode_to_text(process.system_output(mount_list_cmd, shell=True))
if verbose:
logging.debug("/proc/mounts contents:\n%s", mount_result)
for result in mount_result.splitlines():
if mount_str in result:
if options:
options = options.split(",")
options_result = result.split()[3].split(",")
for op in options:
if op not in options_result:
if verbose:
logging.info("%s is not mounted with given"
" option %s", src, op)
return False
if verbose:
logging.info("%s is mounted", src)
return True
if verbose:
logging.info("%s is not mounted", src)
return False
def mount(src, dst, fstype=None, options=None, verbose=False, session=None):
"""
Mount src under dst if it's really mounted, then remout with options.
:param src: source device or directory
:param dst: mountpoint
:param fstype: filesystem type need to mount
:param options: mount options
:param session: mount within the session if given
:return: if mounted return True else return False
"""
options = (options and [options] or [''])[0]
if is_mount(src, dst, fstype, options, verbose, session):
if 'remount' not in options:
options = 'remount,%s' % options
cmd = ['mount']
if fstype:
cmd.extend(['-t', fstype])
if options:
cmd.extend(['-o', options])
cmd.extend([src, dst])
cmd = ' '.join(cmd)
if session:
return session.cmd_status(cmd, safe=True) == 0
return process.system(cmd, verbose=verbose) == 0
def umount(src, dst, fstype=None, verbose=False, session=None):
"""
Umount src from dst, if src really mounted under dst.
:param src: source device or directory
:param dst: mountpoint
:param fstype: fstype used to check if mounted as expected
:param session: umount within the session if given
:return: if unmounted return True else return False
"""
mounted = is_mount(src, dst, fstype, verbose=verbose, session=session)
if mounted:
from . import utils_package
package = "psmisc"
# check package is available, if not try installing it
if not utils_package.package_install(package):
logging.error("%s is not available/installed for fuser", package)
fuser_cmd = "fuser -km %s" % dst
umount_cmd = "umount %s" % dst
if session:
session.cmd_output_safe(fuser_cmd)
return session.cmd_status(umount_cmd, safe=True) == 0
process.system(fuser_cmd, ignore_status=True, verbose=True, shell=True)
return process.system(umount_cmd, ignore_status=True, verbose=True) == 0
return True
@error_context.context_aware
def cleanup(folder):
"""
If folder is a mountpoint, do what is possible to unmount it. Afterwards,
try to remove it.
:param folder: Directory to be cleaned up.
"""
error_context.context(
"cleaning up unattended install directory %s" % folder)
umount(None, folder)
if os.path.isdir(folder):
shutil.rmtree(folder)
@error_context.context_aware
def clean_old_image(image):
"""
Clean a leftover image file from previous processes. If it contains a
mounted file system, do the proper cleanup procedures.
:param image: Path to image to be cleaned up.
"""
error_context.context("cleaning up old leftover image %s" % image)
if os.path.exists(image):
umount(image, None)
os.remove(image)
class Disk(object):
"""
Abstract class for Disk objects, with the common methods implemented.
"""
def __init__(self):
self.path = None
def get_answer_file_path(self, filename):
return os.path.join(self.mount, filename)
def copy_to(self, src):
logging.debug("Copying %s to disk image mount", src)
dst = os.path.join(self.mount, os.path.basename(src))
if os.path.isdir(src):
shutil.copytree(src, dst)
elif os.path.isfile(src):
shutil.copyfile(src, dst)
def close(self):
os.chmod(self.path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
cleanup(self.mount)
logging.debug("Disk %s successfully set", self.path)
class FloppyDisk(Disk):
"""
Represents a floppy disk. We can copy files to it, and setup it in
convenient ways.
"""
@error_context.context_aware
def __init__(self, path, qemu_img_binary, tmpdir, vfd_size):
error_context.context(
"Creating unattended install floppy image %s" % path)
self.mount = tempfile.mkdtemp(prefix='floppy_virttest_', dir=tmpdir)
self.path = path
self.vfd_size = vfd_size
clean_old_image(path)
try:
c_cmd = '%s create -f raw %s %s' % (qemu_img_binary, path,
self.vfd_size)
process.run(c_cmd, verbose=DEBUG)
f_cmd = 'mkfs.msdos -s 1 %s' % path
process.run(f_cmd, verbose=DEBUG)
except process.CmdError as e:
logging.error("Error during floppy initialization: %s" % e)
cleanup(self.mount)
raise
def close(self):
"""
Copy everything that is in the mountpoint to the floppy.
"""
pwd = os.getcwd()
try:
os.chdir(self.mount)
path_list = glob.glob('*')
for path in path_list:
self.copy_to(path)
finally:
os.chdir(pwd)
cleanup(self.mount)
def copy_to(self, src):
logging.debug("Copying %s to floppy image", src)
mcopy_cmd = "mcopy -s -o -n -i %s %s ::/" % (self.path, src)
process.run(mcopy_cmd, verbose=DEBUG)
def _copy_virtio_drivers(self, virtio_floppy):
"""
Copy the virtio drivers on the virtio floppy to the install floppy.
1) Mount the floppy containing the viostor drivers
2) Copy its contents to the root of the install floppy
"""
pwd = os.getcwd()
try:
m_cmd = 'mcopy -s -o -n -i %s ::/* %s' % (
virtio_floppy, self.mount)
process.run(m_cmd, verbose=DEBUG)
finally:
os.chdir(pwd)
def setup_virtio_win2003(self, virtio_floppy, virtio_oemsetup_id):
"""
Setup the install floppy with the virtio storage drivers, win2003 style.
Win2003 and WinXP depend on the file txtsetup.oem file to install
the virtio drivers from the floppy, which is a .ini file.
Process:
1) Copy the virtio drivers on the virtio floppy to the install floppy
2) Parse the ini file with config parser
3) Modify the identifier of the default session that is going to be
executed on the config parser object
4) Re-write the config file to the disk
"""
self._copy_virtio_drivers(virtio_floppy)
txtsetup_oem = os.path.join(self.mount, 'txtsetup.oem')
if not os.path.isfile(txtsetup_oem):
raise IOError('File txtsetup.oem not found on the install '
'floppy. Please verify if your floppy virtio '
'driver image has this file')
parser = ConfigParser.ConfigParser()
parser.read(txtsetup_oem)
if not parser.has_section('Defaults'):
raise ValueError('File txtsetup.oem does not have the session '
'"Defaults". Please check txtsetup.oem')
default_driver = parser.get('Defaults', 'SCSI')
if default_driver != virtio_oemsetup_id:
parser.set('Defaults', 'SCSI', virtio_oemsetup_id)
fp = open(txtsetup_oem, 'w')
parser.write(fp)
fp.close()
def setup_virtio_win2008(self, virtio_floppy):
"""
Setup the install floppy with the virtio storage drivers, win2008 style.
Win2008, Vista and 7 require people to point out the path to the drivers
on the unattended file, so we just need to copy the drivers to the
driver floppy disk. Important to note that it's possible to specify
drivers from a CDROM, so the floppy driver copy is optional.
Process:
1) Copy the virtio drivers on the virtio floppy to the install floppy,
if there is one available
"""
if os.path.isfile(virtio_floppy):
self._copy_virtio_drivers(virtio_floppy)
else:
logging.debug(
"No virtio floppy present, not needed for this OS anyway")
class CdromDisk(Disk):
"""
Represents a CDROM disk that we can master according to our needs.
"""
def __init__(self, path, tmpdir):
self.mount = tempfile.mkdtemp(prefix='cdrom_virttest_', dir=tmpdir)
self.tmpdir = tmpdir
self.path = path
clean_old_image(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
def _copy_virtio_drivers(self, virtio_floppy, cdrom_virtio):
"""
Copy the virtio drivers from floppy and cdrom to install cdrom.
1) Mount the floppy and cdrom containing the virtio drivers
2) Copy its contents to the root of the install cdrom
"""
pwd = os.getcwd()
mnt_pnt = tempfile.mkdtemp(prefix='cdrom_virtio_', dir=self.tmpdir)
mount(cdrom_virtio, mnt_pnt, options='loop,ro', verbose=DEBUG)
try:
copytree(mnt_pnt, self.mount, ignore='*.vfd')
cmd = 'mcopy -s -o -n -i %s ::/* %s' % (virtio_floppy, self.mount)
process.run(cmd, verbose=DEBUG)
finally:
os.chdir(pwd)
umount(None, mnt_pnt, verbose=DEBUG)
os.rmdir(mnt_pnt)
def setup_virtio_win2008(self, virtio_floppy, cdrom_virtio):
"""
Setup the install cdrom with the virtio storage drivers, win2008 style.
Win2008, Vista and 7 require people to point out the path to the drivers
on the unattended file, so we just need to copy the drivers to the
extra cdrom disk. Important to note that it's possible to specify
drivers from a CDROM, so the floppy driver copy is optional.
Process:
1) Copy the virtio drivers on the virtio floppy to the install cdrom,
if there is one available
"""
if os.path.isfile(virtio_floppy):
self._copy_virtio_drivers(virtio_floppy, cdrom_virtio)
else:
logging.debug(
"No virtio floppy present, not needed for this OS anyway")
@error_context.context_aware
def close(self):
error_context.context(
"Creating unattended install CD image %s" % self.path)
g_cmd = ('mkisofs -o %s -max-iso9660-filenames '
'-relaxed-filenames -D --input-charset iso8859-1 '
'%s' % (self.path, self.mount))
process.run(g_cmd, verbose=DEBUG)
os.chmod(self.path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
cleanup(self.mount)
logging.debug("unattended install CD image %s successfully created",
self.path)
class CdromInstallDisk(Disk):
"""
Represents a install CDROM disk that we can master according to our needs.
"""
def __init__(self, path, tmpdir, source_cdrom, extra_params):
self.mount = tempfile.mkdtemp(prefix='cdrom_unattended_', dir=tmpdir)
self.path = path
self.extra_params = extra_params
self.source_cdrom = source_cdrom
cleanup(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
cp_cmd = ('cp -r %s/isolinux/ %s/' % (source_cdrom, self.mount))
listdir = os.listdir(self.source_cdrom)
for i in listdir:
if i == 'isolinux':
continue
os.symlink(os.path.join(self.source_cdrom, i),
os.path.join(self.mount, i))
process.run(cp_cmd)
def get_answer_file_path(self, filename):
return os.path.join(self.mount, 'isolinux', filename)
@error_context.context_aware
def close(self):
error_context.context(
"Creating unattended install CD image %s" % self.path)
if os.path.exists(os.path.join(self.mount, 'isolinux')):
# bootable cdrom
f = open(os.path.join(self.mount, 'isolinux', 'isolinux.cfg'), 'w')
f.write('default /isolinux/vmlinuz append initrd=/isolinux/'
'initrd.img %s\n' % self.extra_params)
f.close()
boot = '-b isolinux/isolinux.bin'
else:
# Not a bootable CDROM, using -kernel instead (eg.: arm64)
boot = ''
m_cmd = ('mkisofs -o %s %s -c isolinux/boot.cat -no-emul-boot '
'-boot-load-size 4 -boot-info-table -f -R -J -V -T %s'
% (self.path, boot, self.mount))
process.run(m_cmd)
os.chmod(self.path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
cleanup(self.mount)
cleanup(self.source_cdrom)
logging.debug("unattended install CD image %s successfully created",
self.path)
class GuestFSModiDisk(object):
"""
class of guest disk using guestfs lib to do some operation(like read/write)
on guest disk:
"""
def __init__(self, disk, backend='direct'):
"""
:params disk: target disk image.
:params backend: let libguestfs creates/connects to backend daemon
by starting qemu directly, or using libvirt to manage
an appliance, running User-Mode Linux, or connecting
to an already running daemon.
'direct', 'appliance', 'libvirt', 'libvirt:null',
'libvirt:URI', 'uml', 'unix:path'.
"""
try:
import guestfs
except ImportError:
install_cmd = "yum -y install python-libguestfs"
try:
process.run(install_cmd)
import guestfs
except Exception:
raise exceptions.TestSkipError('We need python-libguestfs (or '
'the equivalent for your '
'distro) for this particular '
'feature (modifying guest '
'files with libguestfs)')
self.g = guestfs.GuestFS()
self.disk = disk
self.g.add_drive(disk)
self.g.set_backend(backend)
libvirtd = SpecificServiceManager("libvirtd")
libvirtd_status = libvirtd.status()
if libvirtd_status is None:
raise exceptions.TestError('libvirtd: service not found')
if (not libvirtd_status) and (not libvirtd.start()):
raise exceptions.TestError('libvirtd: failed to start')
logging.debug("Launch the disk %s, wait..." % self.disk)
self.g.launch()
def os_inspects(self):
self.roots = self.g.inspect_os()
if self.roots:
return self.roots
else:
return None
def mounts(self):
return self.g.mounts()
def mount_all(self):
def compare(a, b):
if len(a[0]) > len(b[0]):
return 1
elif len(a[0]) == len(b[0]):
return 0
else:
return -1
roots = self.os_inspects()
if roots:
for root in roots:
mps = self.g.inspect_get_mountpoints(root)
mps.sort(compare)
for mp_dev in mps:
try:
msg = "Mount dev '%s' partitions '%s' to '%s'"
logging.info(msg % (root, mp_dev[1], mp_dev[0]))
self.g.mount(mp_dev[1], mp_dev[0])
except RuntimeError as err_msg:
logging.info("%s (ignored)" % err_msg)
else:
raise exceptions.TestError(
"inspect_vm: no operating systems found")
def umount_all(self):
logging.debug("Umount all device partitions")
if self.mounts():
self.g.umount_all()
def read_file(self, file_name):
"""
read file from the guest disk, return the content of the file
:param file_name: the file you want to read.
"""
try:
self.mount_all()
o = self.g.cat(file_name)
if o:
return o
else:
err_msg = "Can't read file '%s', check is it exist?"
raise exceptions.TestError(err_msg % file_name)
finally:
self.umount_all()
def write_to_image_file(self, file_name, content, w_append=False):
"""
Write content to the file on the guest disk.
When using this method all the original content will be overriding.
if you don't hope your original data be override set ``w_append=True``.
:param file_name: the file you want to write
:param content: the content you want to write.
:param w_append: append the content or override
"""
try:
try:
self.mount_all()
if w_append:
self.g.write_append(file_name, content)
else:
self.g.write(file_name, content)
except Exception:
raise exceptions.TestError("write '%s' to file '%s' error!"
% (content, file_name))
finally:
self.umount_all()
def replace_image_file_content(self, file_name, find_con, rep_con):
"""
replace file content matches in the file with rep_con.
support using Regular expression
:param file_name: the file you want to replace
:param find_con: the original content you want to replace.
:param rep_con: the replace content you want.
"""
try:
self.mount_all()
file_content = self.g.cat(file_name)
if file_content:
file_content_after_replace = re.sub(find_con, rep_con,
file_content)
if file_content != file_content_after_replace:
self.g.write(file_name, file_content_after_replace)
else:
err_msg = "Can't read file '%s', check is it exist?"
raise exceptions.TestError(err_msg % file_name)
finally:
self.umount_all()
def close(self):
"""
Explicitly close the guestfs handle.
"""
if self.g:
self.g.close()
|
from unittest import TestCase
from cStringIO import StringIO
from sys import exc_info
from enkel.wansgli.apprunner import run_app, AppError, Response
from enkel.wansgli.testhelpers import unit_case_suite, run_suite
HEAD = "HTTP/1.1 200 OK\r\ncontent-type: text/plain\r\n"
ERRHEAD = "HTTP/1.1 500 ERROR\r\ncontent-type: text/plain\r\n"
def only_header_app(env, start_response):
start_response("200 OK", [("Content-type", "text/plain")])
return list() # return empty list
def simple_app(env, start_response):
start_response("200 OK", [("Content-type", "text/plain")])
return ["Simple app"]
def using_write_app(env, start_response):
""" WSGI app for testing of the write function. """
write = start_response("200 OK", [("Content-type", "text/plain")])
write("Using write")
return []
def mixing_write_app(env, start_response):
""" WSGI app for tesing of mixing using the write function and iterator. """
write = start_response("200 OK", [("Content-type", "text/plain")])
write("Mixing write... ")
return [" ...and iterator."]
def double_response_error_app(env, start_response):
""" WSGI app for testing the situation when an error occurs BEFORE
HTTP headers are sent to the browser and a traceback IS NOT supplied.
This should produce an error, and the same will happen if start_response
is called after HTTP headers are sent. """
start_response("200 OK", [("Content-type", "text/plain")])
start_response("500 ERROR", [("Content-type", "text/plain")])
return list() # return empty list
def double_response_ok_app(env, start_response):
""" WSGI app for testing the situation when an error occurs BEFORE
HTTP headers are sent to the browser and a traceback is supplied.
Should work.
"""
start_response("200 OK", [("Content-type", "text/plain")])
try:
int("jeje")
except ValueError:
start_response("500 ERROR", [("Content-type", "text/plain")],
exc_info())
return list() # return empty list
class DoubleResponseErrInResponse(object):
""" WSGI app for testing the situation when an error occurs AFTER
HTTP headers are sent to the browser and a traceback is supplied.
Should re-raise the ValueError raised when "four" is sent to the
int function.
"""
def __init__(self, env, start_response):
start_response("200 OK", [("Content-type", "text/plain")])
self.it = [1, "2", 3, "four", 5, "6"].__iter__()
self.start_response = start_response
def __iter__(self):
for d in self.it:
try:
yield str(int(d)) # will fail on "four"
except ValueError:
self.start_response("500 ERROR",
[("Content-type", "text/plain")],
exc_info())
def noiter_app(env, start_response):
""" An app that does not return an iterator. This is an error,
and should raise AppError. """
start_response("200 OK", [("Content-type", "text/plain")])
return 10
def override_defaultheader(env, start_response):
""" An app that overrides the default HTTP header "server".
This should result in only one "server" header with the new value.
"""
start_response("200 OK", [
("Content-type", "text/plain"),
("Server", "xxx")
])
return []
class TestApprunner(TestCase):
""" Tests the entire apprunner module. """
def setUp(self):
self.buf = StringIO()
self.env = dict(SERVER_PROTOCOL="HTTP/1.1")
self.sr = Response(self.buf, self.env)
def test_only_header(self):
run_app(only_header_app, self.sr)
b = self.buf.getvalue()
self.assert_(b.startswith(HEAD))
def test_simple(self):
run_app(simple_app, self.sr)
b = self.buf.getvalue()
self.assert_(b.startswith(HEAD))
self.assert_(b.endswith("Simple app"))
def test_using_write(self):
run_app(using_write_app, self.sr)
b = self.buf.getvalue()
self.assert_(b.startswith(HEAD))
self.assert_(b.endswith("Using write"))
def test_mixing_write(self):
run_app(mixing_write_app, self.sr)
b = self.buf.getvalue()
self.assert_(b.startswith(HEAD))
self.assert_(b.endswith("Mixing write... ...and iterator."))
def test_double_response_error(self):
self.assertRaises(AppError, run_app,
double_response_error_app, self.sr)
def test_double_response_ok(self):
run_app(double_response_ok_app, self.sr)
b = self.buf.getvalue()
self.assert_(b.startswith(ERRHEAD))
def testDoubleResponseErrInResponse(self):
self.assertRaises(ValueError, run_app,
DoubleResponseErrInResponse, self.sr)
def test_noiter(self):
self.assertRaises(AppError, run_app,
noiter_app, self.sr)
def suite():
return unit_case_suite(TestApprunner)
if __name__ == '__main__':
run_suite(suite())
|
from gimpfu import *
import time
import re
def preview (image, delay, loops, force_delay, ignore_hidden, restore_hide):
if not image:
raise "No image given."
layers = image.layers
nlayers = len (layers)
visible = []
length = []
i = 0
while i < nlayers:
visible += [pdb.gimp_item_get_visible (layers [i])]
if visible [i]:
pdb.gimp_item_set_visible (layers [i], False)
name = pdb.gimp_item_get_name (layers [i])
l = None
if not force_delay:
l = re.search ("\([0-9]+ms\)", name)
if l:
l = tuple (map (sum, zip (l.span (), tuple ([+1, -3]))))
l = name [slice (*l)]
if not l:
l = delay
length += [float (l) / 1000.0]
i += 1
j = 0
while j < loops:
while i > 0:
i -= 1
if (not ignore_hidden) or visible [i]:
pdb.gimp_item_set_visible (layers [i], True)
pdb.gimp_displays_flush ()
time.sleep (length [i])
j += 1
# unhides everything for optimized
if j < loops:
while i < nlayers:
if (not ignore_hidden) or visible [i]:
pdb.gimp_item_set_visible (layers [i], False)
i += 1
else:
i = nlayers
i = nlayers
if restore_hide:
while i > 0:
i -= 1
if visible [i]:
pdb.gimp_item_set_visible (layers [i], True)
register(
"preview",
"preview",
"Preview the animation of a gif",
"Roger Bongers",
"Roger Bongers",
"2016",
"Preview...",
"*",
[
(PF_IMAGE, "image", "The image to modify", None),
(PF_INT32, "delay", "The default length in ms of each frame", 100),
(PF_INT32, "loops", "The number of times to loop the animation", 1),
(PF_BOOL, "force-delay", "Force the default length on every frame", 0),
(PF_BOOL, "ignore-hidden", "Ignore currently hidden items", 0),
(PF_BOOL, "restore-hide", "Restore the hidden status after preview", 0),
],
[],
preview,
menu = "<Image>/Filters/Animation")
main()
|
from __future__ import print_function
from Components.Task import PythonTask, Task, Job, job_manager as JobManager
from Tools.Directories import fileExists
from enigma import eTimer
from os import path
from shutil import rmtree, copy2, move
class DeleteFolderTask(PythonTask):
def openFiles(self, fileList):
self.fileList = fileList
def work(self):
print("[DeleteFolderTask] files ", self.fileList)
errors = []
try:
rmtree(self.fileList)
except Exception as e:
errors.append(e)
if errors:
raise errors[0]
class CopyFileJob(Job):
def __init__(self, srcfile, destfile, name):
Job.__init__(self, _("Copying files"))
cmdline = 'cp -Rf "%s" "%s"' % (srcfile, destfile)
AddFileProcessTask(self, cmdline, srcfile, destfile, name)
class MoveFileJob(Job):
def __init__(self, srcfile, destfile, name):
Job.__init__(self, _("Moving files"))
cmdline = 'mv -f "%s" "%s"' % (srcfile, destfile)
AddFileProcessTask(self, cmdline, srcfile, destfile, name)
class AddFileProcessTask(Task):
def __init__(self, job, cmdline, srcfile, destfile, name):
Task.__init__(self, job, name)
self.setCmdline(cmdline)
self.srcfile = srcfile
self.destfile = destfile
self.ProgressTimer = eTimer()
self.ProgressTimer.callback.append(self.ProgressUpdate)
def ProgressUpdate(self):
if self.srcsize <= 0 or not fileExists(self.destfile, 'r'):
return
self.setProgress(int((path.getsize(self.destfile)/float(self.srcsize))*100))
self.ProgressTimer.start(5000, True)
def prepare(self):
if fileExists(self.srcfile, 'r'):
self.srcsize = path.getsize(self.srcfile)
self.ProgressTimer.start(5000, True)
def afterRun(self):
self.setProgress(100)
self.ProgressTimer.stop()
def copyFiles(fileList, name):
for src, dst in fileList:
if path.isdir(src) or int(path.getsize(src))/1000/1000 > 100:
JobManager.AddJob(CopyFileJob(src, dst, name))
else:
copy2(src, dst)
def moveFiles(fileList, name):
for src, dst in fileList:
if path.isdir(src) or int(path.getsize(src))/1000/1000 > 100:
JobManager.AddJob(MoveFileJob(src, dst, name))
else:
move(src, dst)
def deleteFiles(fileList, name):
job = Job(_("Deleting files"))
task = DeleteFolderTask(job, name)
task.openFiles(fileList)
JobManager.AddJob(job)
|
import sys
from cx_Freeze import setup, Executable
includefiles = ['windows/libusb-1.0.dll',
('icons/buzzer.png', 'icons/buzzer.png'),
'README.md',
'LICENSE',
'C:\\Windows\\SysWOW64\\msvcr110.dll']
excludes = []
packages = []
buildOptions = {'packages': packages,
'excludes': excludes,
'include_files':includefiles
}
base = None
if sys.platform == "win32":
base = "Win32GUI"
executables = [
Executable('pyPardy.py', base=base),
Executable('pyPardyEdit.py', base=base)
]
setup(
name='pyPardy',
#long_description='',
keywords='game jeopardy',
version='0.2',
author='Christian Wichmann',
author_email='christian@freenono.org',
packages=['data', 'gui'],
url='',
license='LICENSE',
description='Jeopardy(tm) game system',
platforms=['any'],
classifiers=[
'Intended Audience :: End Users/Desktop',
'Development Status :: 4 - Beta',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Operating System :: OS Independent',
'Natural Language :: English',
'Natural Language :: German',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Games/Entertainment',
],
options=dict(build_exe=buildOptions),
executables=executables, requires=['PyQt4', 'libusb1'],
#data_files=[('libs', 'windows/libusb-1.0.dll'),
# ('icons', 'icons/buzzer.png')],
)
|
from PyQt4.QtCore import (QDate, QString, Qt, SIGNAL, pyqtSignature)
from PyQt4.QtGui import (QApplication, QDialog, QDialogButtonBox)
import moviedata_ans as moviedata
import ui_addeditmoviedlg_ans as ui_addeditmoviedlg
class AddEditMovieDlg(QDialog,
ui_addeditmoviedlg.Ui_AddEditMovieDlg):
def __init__(self, movies, movie=None, parent=None):
super(AddEditMovieDlg, self).__init__(parent)
self.setupUi(self)
self.movies = movies
self.movie = movie
self.acquiredDateEdit.setDisplayFormat(moviedata.DATEFORMAT)
if movie is not None:
self.titleLineEdit.setText(movie.title)
self.yearSpinBox.setValue(movie.year)
self.minutesSpinBox.setValue(movie.minutes)
self.acquiredDateEdit.setDate(movie.acquired)
self.acquiredDateEdit.setEnabled(False)
self.locationLineEdit.setText(movie.location)
self.notesTextEdit.setPlainText(movie.notes)
self.notesTextEdit.setFocus()
self.buttonBox.button(QDialogButtonBox.Ok).setText(
"&Accept")
self.setWindowTitle("My Movies - Edit Movie")
else:
today = QDate.currentDate()
self.acquiredDateEdit.setDateRange(today.addDays(-5),
today)
self.acquiredDateEdit.setDate(today)
self.titleLineEdit.setFocus()
self.on_titleLineEdit_textEdited(QString())
@pyqtSignature("QString")
def on_titleLineEdit_textEdited(self, text):
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(
not self.titleLineEdit.text().isEmpty())
def accept(self):
title = self.titleLineEdit.text()
year = self.yearSpinBox.value()
minutes = self.minutesSpinBox.value()
location = self.locationLineEdit.text()
notes = self.notesTextEdit.toPlainText()
if self.movie is None:
acquired = self.acquiredDateEdit.date()
self.movie = moviedata.Movie(title, year, minutes,
acquired, location, notes)
self.movies.add(self.movie)
else:
self.movies.updateMovie(self.movie, title, year,
minutes, location, notes)
QDialog.accept(self)
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
form = AddEditMovieDlg(0)
form.show()
app.exec_()
|
"""
Copyright (C) 2014 smokdpi
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from t0mm0.common.net import Net
from lib import jsunpack
from urlresolver import common
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
class UsersCloudResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "userscloud"
domains = ["userscloud.com"]
pattern = '(?://|\.)(userscloud\.com)/(?:embed-)?([0-9a-zA-Z/]+)'
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
self.user_agent = common.IE_USER_AGENT
self.net.set_user_agent(self.user_agent)
self.headers = {'User-Agent': self.user_agent}
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
stream_url = None
self.headers['Referer'] = web_url
html = self.net.http_GET(web_url, headers=self.headers).content
r = re.search('>(eval\(function\(p,a,c,k,e,d\).+?)</script>', html, re.DOTALL)
if r:
js_data = jsunpack.unpack(r.group(1))
stream_url = re.findall('<param\s+name="src"\s*value="([^"]+)', js_data)
stream_url += re.findall('file\s*:\s*[\'|\"](.+?)[\'|\"]', js_data)
stream_url = [i for i in stream_url if not i.endswith('.srt')]
if stream_url:
return stream_url[0]
raise UrlResolver.ResolverError('File not found')
def get_url(self, host, media_id):
return 'https://%s/%s' % (host, media_id)
def get_host_and_id(self, url):
r = re.search(self.pattern, url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
return re.search(self.pattern, url) or self.name in host
|
'''
test_qgsatlascomposition.py
--------------------------------------
Date : Oct 2012
Copyright : (C) 2012 by Dr. Hugo Mercier
email : hugo dot mercier at oslandia dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
'''
import qgis # NOQA
import os
import glob
import shutil
import tempfile
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
from qgis.PyQt.QtCore import QFileInfo, QRectF, qWarning
from qgis.core import (
QgsCategorizedSymbolRenderer,
QgsComposerLabel,
QgsComposerLegend,
QgsComposerMap,
QgsComposition,
QgsCoordinateReferenceSystem,
QgsFeature,
QgsFillSymbol,
QgsFontUtils,
QgsGeometry,
QgsMarkerSymbol,
QgsPointXY,
QgsProject,
QgsRectangle,
QgsRendererCategory,
QgsSingleSymbolRenderer,
QgsVectorLayer,
)
from qgscompositionchecker import QgsCompositionChecker
start_app()
class TestQgsAtlasComposition(unittest.TestCase):
def testCase(self):
self.TEST_DATA_DIR = unitTestDataPath()
tmppath = tempfile.mkdtemp()
for file in glob.glob(os.path.join(self.TEST_DATA_DIR, 'france_parts.*')):
shutil.copy(os.path.join(self.TEST_DATA_DIR, file), tmppath)
vectorFileInfo = QFileInfo(tmppath + "/france_parts.shp")
mVectorLayer = QgsVectorLayer(vectorFileInfo.filePath(), vectorFileInfo.completeBaseName(), "ogr")
QgsProject.instance().addMapLayers([mVectorLayer])
self.layers = [mVectorLayer]
# create composition with composer map
# select epsg:2154
crs = QgsCoordinateReferenceSystem()
crs.createFromSrid(2154)
QgsProject.instance().setCrs(crs)
self.mComposition = QgsComposition(QgsProject.instance())
self.mComposition.setPaperSize(297, 210)
# fix the renderer, fill with green
props = {"color": "0,127,0"}
fillSymbol = QgsFillSymbol.createSimple(props)
renderer = QgsSingleSymbolRenderer(fillSymbol)
mVectorLayer.setRenderer(renderer)
# the atlas map
self.mAtlasMap = QgsComposerMap(self.mComposition, 20, 20, 130, 130)
self.mAtlasMap.setFrameEnabled(True)
self.mAtlasMap.setLayers([mVectorLayer])
self.mComposition.addComposerMap(self.mAtlasMap)
# the atlas
self.mAtlas = self.mComposition.atlasComposition()
self.mAtlas.setCoverageLayer(mVectorLayer)
self.mAtlas.setEnabled(True)
self.mComposition.setAtlasMode(QgsComposition.ExportAtlas)
# an overview
self.mOverview = QgsComposerMap(self.mComposition, 180, 20, 50, 50)
self.mOverview.setFrameEnabled(True)
self.mOverview.overview().setFrameMap(self.mAtlasMap.id())
self.mOverview.setLayers([mVectorLayer])
self.mComposition.addComposerMap(self.mOverview)
nextent = QgsRectangle(49670.718, 6415139.086, 699672.519, 7065140.887)
self.mOverview.setNewExtent(nextent)
# set the fill symbol of the overview map
props2 = {"color": "127,0,0,127"}
fillSymbol2 = QgsFillSymbol.createSimple(props2)
self.mOverview.overview().setFrameSymbol(fillSymbol2)
# header label
self.mLabel1 = QgsComposerLabel(self.mComposition)
self.mComposition.addComposerLabel(self.mLabel1)
self.mLabel1.setText("[% \"NAME_1\" %] area")
self.mLabel1.setFont(QgsFontUtils.getStandardTestFont())
self.mLabel1.adjustSizeToText()
self.mLabel1.setSceneRect(QRectF(150, 5, 60, 15))
qWarning(
"header label font: %s exactMatch:%s" % (self.mLabel1.font().toString(), self.mLabel1.font().exactMatch()))
# feature number label
self.mLabel2 = QgsComposerLabel(self.mComposition)
self.mComposition.addComposerLabel(self.mLabel2)
self.mLabel2.setText("# [%@atlas_featurenumber || ' / ' || @atlas_totalfeatures%]")
self.mLabel2.setFont(QgsFontUtils.getStandardTestFont())
self.mLabel2.adjustSizeToText()
self.mLabel2.setSceneRect(QRectF(150, 200, 60, 15))
qWarning("feature number label font: %s exactMatch:%s" % (
self.mLabel2.font().toString(), self.mLabel2.font().exactMatch()))
self.filename_test()
self.autoscale_render_test()
self.fixedscale_render_test()
self.predefinedscales_render_test()
self.hidden_render_test()
self.legend_test()
self.rotation_test()
shutil.rmtree(tmppath, True)
def filename_test(self):
self.mAtlas.setFilenamePattern("'output_' || @atlas_featurenumber")
self.mAtlas.beginRender()
for i in range(0, self.mAtlas.numFeatures()):
self.mAtlas.prepareForFeature(i)
expected = "output_%d" % (i + 1)
self.assertEqual(self.mAtlas.currentFilename(), expected)
self.mAtlas.endRender()
def autoscale_render_test(self):
self.mAtlasMap.setAtlasDriven(True)
self.mAtlasMap.setAtlasScalingMode(QgsComposerMap.Auto)
self.mAtlasMap.setAtlasMargin(0.10)
self.mAtlas.beginRender()
for i in range(0, 2):
self.mAtlas.prepareForFeature(i)
self.mLabel1.adjustSizeToText()
checker = QgsCompositionChecker('atlas_autoscale%d' % (i + 1), self.mComposition)
checker.setControlPathPrefix("atlas")
myTestResult, myMessage = checker.testComposition(0, 200)
assert myTestResult
self.mAtlas.endRender()
self.mAtlasMap.setAtlasDriven(False)
self.mAtlasMap.setAtlasScalingMode(QgsComposerMap.Fixed)
self.mAtlasMap.setAtlasMargin(0)
def fixedscale_render_test(self):
self.mAtlasMap.setNewExtent(QgsRectangle(209838.166, 6528781.020, 610491.166, 6920530.620))
self.mAtlasMap.setAtlasDriven(True)
self.mAtlasMap.setAtlasScalingMode(QgsComposerMap.Fixed)
self.mAtlas.beginRender()
for i in range(0, 2):
self.mAtlas.prepareForFeature(i)
self.mLabel1.adjustSizeToText()
checker = QgsCompositionChecker('atlas_fixedscale%d' % (i + 1), self.mComposition)
checker.setControlPathPrefix("atlas")
myTestResult, myMessage = checker.testComposition(0, 200)
assert myTestResult
self.mAtlas.endRender()
def predefinedscales_render_test(self):
self.mAtlasMap.setNewExtent(QgsRectangle(209838.166, 6528781.020, 610491.166, 6920530.620))
self.mAtlasMap.setAtlasDriven(True)
self.mAtlasMap.setAtlasScalingMode(QgsComposerMap.Predefined)
scales = [1800000, 5000000]
self.mAtlas.setPredefinedScales(scales)
for i, s in enumerate(self.mAtlas.predefinedScales()):
assert s == scales[i]
self.mAtlas.beginRender()
for i in range(0, 2):
self.mAtlas.prepareForFeature(i)
self.mLabel1.adjustSizeToText()
checker = QgsCompositionChecker('atlas_predefinedscales%d' % (i + 1), self.mComposition)
checker.setControlPathPrefix("atlas")
myTestResult, myMessage = checker.testComposition(0, 200)
assert myTestResult
self.mAtlas.endRender()
def hidden_render_test(self):
self.mAtlasMap.setNewExtent(QgsRectangle(209838.166, 6528781.020, 610491.166, 6920530.620))
self.mAtlasMap.setAtlasScalingMode(QgsComposerMap.Fixed)
self.mAtlas.setHideCoverage(True)
self.mAtlas.beginRender()
for i in range(0, 2):
self.mAtlas.prepareForFeature(i)
self.mLabel1.adjustSizeToText()
checker = QgsCompositionChecker('atlas_hiding%d' % (i + 1), self.mComposition)
checker.setControlPathPrefix("atlas")
myTestResult, myMessage = checker.testComposition(0, 200)
assert myTestResult
self.mAtlas.endRender()
self.mAtlas.setHideCoverage(False)
def sorting_render_test(self):
self.mAtlasMap.setNewExtent(QgsRectangle(209838.166, 6528781.020, 610491.166, 6920530.620))
self.mAtlasMap.setAtlasScalingMode(QgsComposerMap.Fixed)
self.mAtlas.setHideCoverage(False)
self.mAtlas.setSortFeatures(True)
self.mAtlas.setSortKeyAttributeIndex(4) # departement name
self.mAtlas.setSortAscending(False)
self.mAtlas.beginRender()
for i in range(0, 2):
self.mAtlas.prepareForFeature(i)
self.mLabel1.adjustSizeToText()
checker = QgsCompositionChecker('atlas_sorting%d' % (i + 1), self.mComposition)
checker.setControlPathPrefix("atlas")
myTestResult, myMessage = checker.testComposition(0, 200)
assert myTestResult
self.mAtlas.endRender()
def filtering_render_test(self):
self.mAtlasMap.setNewExtent(QgsRectangle(209838.166, 6528781.020, 610491.166, 6920530.620))
self.mAtlasMap.setAtlasScalingMode(QgsComposerMap.Fixed)
self.mAtlas.setHideCoverage(False)
self.mAtlas.setSortFeatures(False)
self.mAtlas.setFilterFeatures(True)
self.mAtlas.setFeatureFilter("substr(NAME_1,1,1)='P'") # select only 'Pays de la loire'
self.mAtlas.beginRender()
for i in range(0, 1):
self.mAtlas.prepareForFeature(i)
self.mLabel1.adjustSizeToText()
checker = QgsCompositionChecker('atlas_filtering%d' % (i + 1), self.mComposition)
checker.setControlPathPrefix("atlas")
myTestResult, myMessage = checker.testComposition(0, 200)
assert myTestResult
self.mAtlas.endRender()
def legend_test(self):
self.mAtlasMap.setAtlasDriven(True)
self.mAtlasMap.setAtlasScalingMode(QgsComposerMap.Auto)
self.mAtlasMap.setAtlasMargin(0.10)
# add a point layer
ptLayer = QgsVectorLayer("Point?crs=epsg:4326&field=attr:int(1)&field=label:string(20)", "points", "memory")
pr = ptLayer.dataProvider()
f1 = QgsFeature(1)
f1.initAttributes(2)
f1.setAttribute(0, 1)
f1.setAttribute(1, "Test label 1")
f1.setGeometry(QgsGeometry.fromPoint(QgsPointXY(-0.638, 48.954)))
f2 = QgsFeature(2)
f2.initAttributes(2)
f2.setAttribute(0, 2)
f2.setAttribute(1, "Test label 2")
f2.setGeometry(QgsGeometry.fromPoint(QgsPointXY(-1.682, 48.550)))
pr.addFeatures([f1, f2])
# categorized symbology
r = QgsCategorizedSymbolRenderer("attr", [QgsRendererCategory(1, QgsMarkerSymbol.createSimple({"color": "255,0,0"}), "red"),
QgsRendererCategory(2, QgsMarkerSymbol.createSimple({"color": "0,0,255"}), "blue")])
ptLayer.setRenderer(r)
QgsProject.instance().addMapLayer(ptLayer)
# add the point layer to the map settings
layers = self.layers
layers = [ptLayer] + layers
self.mAtlasMap.setLayers(layers)
self.mOverview.setLayers(layers)
# add a legend
legend = QgsComposerLegend(self.mComposition)
legend.moveBy(200, 100)
# sets the legend filter parameter
legend.setComposerMap(self.mAtlasMap)
legend.setLegendFilterOutAtlas(True)
self.mComposition.addComposerLegend(legend)
self.mAtlas.beginRender()
self.mAtlas.prepareForFeature(0)
self.mLabel1.adjustSizeToText()
checker = QgsCompositionChecker('atlas_legend', self.mComposition)
myTestResult, myMessage = checker.testComposition()
assert myTestResult
self.mAtlas.endRender()
# restore state
self.mAtlasMap.setLayers([layers[1]])
self.mComposition.removeComposerItem(legend)
QgsProject.instance().removeMapLayer(ptLayer.id())
def rotation_test(self):
# We will create a polygon layer with a rotated rectangle.
# Then we will make it the object layer for the atlas,
# rotate the map and test that the bounding rectangle
# is smaller than the bounds without rotation.
polygonLayer = QgsVectorLayer('Polygon', 'test_polygon', 'memory')
poly = QgsFeature(polygonLayer.pendingFields())
points = [(10, 15), (15, 10), (45, 40), (40, 45)]
poly.setGeometry(QgsGeometry.fromPolygon([[QgsPointXY(x[0], x[1]) for x in points]]))
polygonLayer.dataProvider().addFeatures([poly])
QgsProject.instance().addMapLayer(polygonLayer)
# Recreating the composer locally
composition = QgsComposition(QgsProject.instance())
composition.setPaperSize(297, 210)
# the atlas map
atlasMap = QgsComposerMap(composition, 20, 20, 130, 130)
atlasMap.setFrameEnabled(True)
atlasMap.setLayers([polygonLayer])
atlasMap.setNewExtent(QgsRectangle(0, 0, 100, 50))
composition.addComposerMap(atlasMap)
# the atlas
atlas = composition.atlasComposition()
atlas.setCoverageLayer(polygonLayer)
atlas.setEnabled(True)
composition.setAtlasMode(QgsComposition.ExportAtlas)
atlasMap.setAtlasDriven(True)
atlasMap.setAtlasScalingMode(QgsComposerMap.Auto)
atlasMap.setAtlasMargin(0.0)
# Testing
atlasMap.setMapRotation(0.0)
atlas.firstFeature()
nonRotatedExtent = QgsRectangle(atlasMap.currentMapExtent())
atlasMap.setMapRotation(45.0)
atlas.firstFeature()
rotatedExtent = QgsRectangle(atlasMap.currentMapExtent())
assert rotatedExtent.width() < nonRotatedExtent.width() * 0.9
assert rotatedExtent.height() < nonRotatedExtent.height() * 0.9
QgsProject.instance().removeMapLayer(polygonLayer)
if __name__ == '__main__':
unittest.main()
|
a, b = map(int,raw_input().split())
i=0
while(i<a):
j=0
c=[]
if(i%2==0):
while(j<b):
c.append('#')
j=j+1
print (''.join(c))
else:
k = int(i/2)
if (k%2==0):
while(j<(b-1)):
c.append(".")
j=j+1
c.append("#")
print (''.join(c))
else:
c.append('#')
while(j<(b-1)):
c.append(".")
j=j+1
print (''.join(c))
i=i+1
|
import simplegui
import math
import random
WIDTH = 800
HEIGHT = 600
score = 0
lives = 3
time = 0
game_mode = 0 # 0 = splash screen, 1 = game mode, 2 = game over
ANGULAR_ACCEL_SCALAR = math.pi / 800.0
ANGULAR_FRICTION = 0.95
LINEAR_ACCEL_SCALAR = 0.25
LINEAR_FRICTION = 0.99
RANDOM_VEL_MAX = 4.0
RANDOM_VEL_MIN = 0.5
RANDOM_ANG_MAX = math.pi / 100.0
BULLET_VEL = 10
SMALL_ROCK_SPEED = 3
class ImageInfo:
def __init__(self, center, size, radius = 0, lifespan = None, animated = False):
self.center = center
self.size = size
self.radius = radius
if lifespan:
self.lifespan = lifespan
else:
self.lifespan = float('inf')
self.animated = animated
def get_center(self):
return self.center
def get_size(self):
return self.size
def get_radius(self):
return self.radius
def get_lifespan(self):
return self.lifespan
def get_animated(self):
return self.animated
debris_info = ImageInfo([320, 240], [640, 480])
debris_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/debris2_blue.png")
nebula_info = ImageInfo([400, 300], [800, 600])
nebula_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/nebula_blue.f2014.png")
splash_info = ImageInfo([200, 150], [400, 300])
splash_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/splash.png")
ship_info = ImageInfo([45, 45], [90, 90], 35)
ship_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/double_ship.png")
missile_info = ImageInfo([5,5], [10, 10], 3, 75)
missile_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/shot2.png")
asteroid_info = ImageInfo([45, 45], [90, 90], 40)
asteroid_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/asteroid_blue.png")
explosion_info = ImageInfo([64, 64], [128, 128], 17, 24, True)
explosion_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/explosion_alpha.png")
soundtrack = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/soundtrack.mp3")
missile_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/missile.mp3")
missile_sound.set_volume(.5)
ship_thrust_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/thrust.mp3")
explosion_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/explosion.mp3")
extra_life_sound = simplegui.load_sound("http://mwales.net/junk/SFX_Pickup_44.mp3")
def angle_to_vector(ang):
return [math.cos(ang), math.sin(ang)]
def vector_to_angle(v):
return math.atan2(v[0],v[1])
def vector_scale(vec, scale):
return [vec[0] * scale, vec[1] * scale]
def vector_add(vec1, vec2):
return [vec1[0] + vec2[0], vec1[1] + vec2[1]]
def dist(p,q):
return math.sqrt((p[0] - q[0]) ** 2+(p[1] - q[1]) ** 2)
def smallRockExplode(rockInstance):
# Return an explosion sprite
explodeObj = Sprite(rockInstance.get_position(),
(0,0),
random.random() * 2 * math.pi,
0,
explosion_image,
explosion_info,
explosion_sound,
relSize = 0.3)
return explodeObj
def rockExplode(rockInstance, deathBullet):
# Return an explosion sprite
explodeObj = Sprite(rockInstance.get_position(),
(0,0),
random.random() * 2 * math.pi,
0,
explosion_image,
explosion_info,
explosion_sound)
# Create 4 smaller rocks that explode away based on angle bullet came in at
bulletAngle = vector_to_angle(deathBullet.get_velocity())
smallRockAngle = bulletAngle + 45.0 / 360.0 * math.pi * 2.0
for i in range(0,4):
smallRockAngle += math.pi / 2.0
smallRockVel = angle_to_vector(smallRockAngle)
smallRockVel = vector_scale(smallRockVel, SMALL_ROCK_SPEED)
smallRockVel = vector_add(smallRockVel, rockInstance.get_velocity())
randomAngVel = random.random() * RANDOM_ANG_MAX * 4.0 - RANDOM_ANG_MAX
smallRock = Sprite(rockInstance.get_position(),
smallRockVel,
random.random() * 2 * math.pi,
randomAngVel,
asteroid_image,
asteroid_info,
relSize = 0.5)
smallRockList.append(smallRock)
return explodeObj
class Ship:
def __init__(self, pos, vel, angle, image, info, bulletTimer):
self.pos = [pos[0],pos[1]]
self.vel = [vel[0],vel[1]]
self.thrust = False
self.angle = angle
self.angle_vel = 0
self.angle_acc = 0
self.image = image
self.image_center = info.get_center()
self.image_size = info.get_size()
self.radius = info.get_radius()
self.bullet_timer = bulletTimer
self.spawn_bullets = False
self.bullets = []
self.bullet_type = 0
self.weapon_name = {}
self.weapon_name[0] = "Speed Shot"
self.weapon_name[1] = "Spread Shot"
self.weapon_name[2] = "Power Shot"
def get_weapon_name(self):
return self.weapon_name[self.bullet_type]
def draw(self,canvas):
if self.thrust:
canvas.draw_image(self.image,
(self.image_center[0] + self.image_size[0], self.image_center[1]),
self.image_size,
self.pos,
self.image_size,
self.angle)
else:
canvas.draw_image(self.image,
self.image_center,
self.image_size,
self.pos,
self.image_size,
self.angle)
for singleBullets in self.bullets:
singleBullets.draw(canvas)
def update(self):
self.pos = vector_add(self.pos, self.vel)
# Position should wrap around the screen
self.pos = [self.pos[0] % WIDTH, self.pos[1] % HEIGHT]
# Handle ship thrust
if self.thrust:
accel = angle_to_vector(self.angle)
accel = vector_scale(accel, LINEAR_ACCEL_SCALAR)
self.vel = vector_add(self.vel, accel)
# Friction against motion
self.vel = vector_scale(self.vel, LINEAR_FRICTION)
self.angle = self.angle + self.angle_vel
self.angle_vel = self.angle_vel + self.angle_acc
self.angle_vel = self.angle_vel * ANGULAR_FRICTION
oldBullets = []
for singleBullets in self.bullets:
if singleBullets.update():
oldBullets.append(singleBullets)
for bulletToDelete in oldBullets:
self.bullets.remove(bulletToDelete)
def process_collisions(self, rockList, smallRockList, explosionList):
global score, lives, extra_life_sound
# Don't change containers while looping through them
shipExplodes = False
rockListCopy = rockList
bulletListCopy = self.bullets
for singleRock in rockListCopy:
for singleBullet in bulletListCopy:
# Collisions of bullets and rocks
if singleBullet.collide(singleRock):
# delete the bullet
self.bullets.remove(singleBullet)
# delete and explode the rock
if singleRock in rockList:
rockList.remove(singleRock)
explosionList.append(rockExplode(singleRock, singleBullet))
print "Rock goes boom"
# increase score , 1-up consideration
self.scorePoint()
# Collisions of rock and ship
if singleRock.collide(self):
#print "Ship goes boom"
shipExplodes = True
smallRockListCopy = smallRockList
bulletListCopy = self.bullets
for singleSmallRock in smallRockListCopy:
for singleBullet in bulletListCopy:
if singleBullet.collide(singleSmallRock):
# delete the bullet
self.bullets.remove(singleBullet)
# delete and explode the rock
if singleSmallRock in smallRockList:
smallRockList.remove(singleSmallRock)
explosionList.append(smallRockExplode(singleSmallRock))
print "Small Rock goes boom"
# increase score , 1-up consideration
self.scorePoint()
# Collisions of rock and ship
if singleSmallRock.collide(self):
#print "Ship goes boom"
shipExplodes = True
if shipExplodes:
self.attemptRespawn(rockList, explosionList)
def scorePoint(self):
global lives, score
score += 1
if ((score % 100) == 0):
print "1-up"
lives += 1
extra_life_sound.rewind()
extra_life_sound.play()
def attemptRespawn(self, rockList, explosionList):
global lives
lives -= 1
if (lives == 0):
game_over()
return
# Find a safe spot to respawn
bestLocation = []
bestLocationClosestRock = 0
for respawnX in range( int(WIDTH / 10), int(WIDTH * .9), 10):
for respawnY in range( int(HEIGHT / 10), int(HEIGHT * .9), 10):
closestRock = WIDTH * HEIGHT
potentialLocation = [respawnX, respawnY]
# Determine at this location how close closest rock is
for singleRock in rockList:
distFromRock = dist(potentialLocation, singleRock.get_position())
if (distFromRock < closestRock):
closestRock = distFromRock
for singleRock in smallRockList:
distFromRock = dist(potentialLocation, singleRock.get_position())
if (distFromRock < closestRock):
closestRock = distFromRock
# If the closest rock is farther away than other locations, use this location
if (closestRock > bestLocationClosestRock):
bestLocationClosestRock = closestRock
bestLocation = potentialLocation
# Move ship to new location
shipExplosion = Sprite(self.pos,
(0,0),
random.random() * 2 * math.pi,
0,
explosion_image,
explosion_info,
explosion_sound,
relSize = 3.0)
explosionList.append(shipExplosion)
self.pos = bestLocation
self.vel = [0,0]
self.angle_vel = 0
# Just pass in -1 to rotate right, +1 to rotate left
def rotate(self, angularAcceleration):
self.angle_acc = angularAcceleration * ANGULAR_ACCEL_SCALAR
#print "Alpha =" + str(self.angle_acc)
# Just pass in True to thrust, False to not thrust
def setThrust(self, thrustBool):
global ship_thrust_sound
self.thrust = thrustBool
if thrustBool:
ship_thrust_sound.rewind()
ship_thrust_sound.play()
else:
ship_thrust_sound.pause()
def startShooting(self):
self.spawn_bullets = True;
self.bullet_timer.start()
self.spawn_bullet()
def stopShooting(self):
self.spawn_bullets = False
self.bullet_timer.stop()
def change_bullet_type(self):
self.bullet_type = (self.bullet_type + 1) % 3
def set_bullet_type(self, bulletType):
self.bullet_type = bulletType % 3
def get_bullet_type(self):
return self.bullet_type
def spawn_bullet(self):
if (self.bullet_type == 0):
# speed shot
self.make_bullet()
elif (self.bullet_type == 1):
# spread
self.make_bullet(relSpeed=0.5)
self.make_bullet(relAngle=-math.pi * 2 * 30.0 / 360.0,
relSpeed=0.5)
self.make_bullet(relAngle=math.pi * 2 * 30.0 / 360.0,
relSpeed=0.5)
else:
# big bullet
self.make_bullet(relSpeed=0.25,
relSize=3.0,
relLifetime=5.0)
curDirection = angle_to_vector(self.angle)
recoil = vector_scale(curDirection, -1.0)
self.vel = vector_add(self.vel, recoil)
def make_bullet(self, relAngle=0, relSpeed=1.0, relSize=1.0, relLifetime=1.0):
global missle_sound
bulletPos = angle_to_vector(self.angle)
bulletPos = vector_scale(bulletPos, self.image_size[0] / 2)
bulletPos = vector_add(self.pos, bulletPos)
bulletVel = angle_to_vector(self.angle + relAngle)
bulletVel = vector_scale(bulletVel, BULLET_VEL * relSpeed)
bulletVel = vector_add(bulletVel, self.vel)
bulletObj = Sprite(bulletPos,
bulletVel,
self.angle,
0,
missile_image,
missile_info,
missile_sound,
relSize,
relLifetime)
self.bullets.append(bulletObj)
def get_position(self):
return self.pos
def reset(self):
self.pos = [WIDTH / 2, HEIGHT / 2]
self.vel = [0,0]
self.angle = 0
self.bullets = []
def get_radius(self):
return self.radius
def get_velocity(self):
return self.vel
class Sprite:
def __init__(self, pos, vel, ang, ang_vel, image, info, sound = None, relSize=1.0, relLifetime=1.0):
self.pos = [pos[0],pos[1]]
self.vel = [vel[0],vel[1]]
self.angle = ang
self.angle_vel = ang_vel
self.image = image
self.image_center = info.get_center()
self.image_size = info.get_size()
self.draw_size = vector_scale(self.image_size, relSize)
self.radius = info.get_radius() * relSize
self.lifespan = info.get_lifespan() * relLifetime
self.animated = info.get_animated()
self.age = 0
if sound:
sound.rewind()
sound.play()
def draw(self, canvas):
if self.animated:
frameCenter = vector_add(self.image_center, [self.image_size[0] * self.age,0])
canvas.draw_image(self.image,
frameCenter,
self.image_size,
self.pos,
self.draw_size,
self.angle)
else:
canvas.draw_image(self.image,
self.image_center,
self.image_size,
self.pos,
self.draw_size,
self.angle)
def update(self):
pass
self.pos = vector_add(self.pos, self.vel)
# Position should wrap around the screen
self.pos = [self.pos[0] % WIDTH, self.pos[1] % HEIGHT]
self.angle = self.angle + self.angle_vel
# Age out?
self.age += 1
return (self.age > self.lifespan)
def collide(self, otherObject):
currentDistOfCenters = dist(otherObject.get_position(),
self.pos)
minSafeDistance = (otherObject.get_radius() + \
self.radius) * 0.9
return (currentDistOfCenters < minSafeDistance)
def get_position(self):
return self.pos
def get_radius(self):
return self.radius
def get_velocity(self):
return self.vel
def process_sprites(canvas):
global explodeList
# draw ship and sprites
my_ship.draw(canvas)
for singleRock in rockList:
singleRock.draw(canvas)
for smallRock in smallRockList:
smallRock.draw(canvas)
# update ship and sprites
my_ship.update()
for singleRock in rockList:
singleRock.update()
for smallRock in smallRockList:
smallRock.update()
# update explosions
splodeCopy = explodeList
for singleSplosion in splodeCopy:
singleSplosion.draw(canvas)
if singleSplosion.update():
explodeList.remove(singleSplosion)
my_ship.process_collisions(rockList, smallRockList, explodeList)
def draw(canvas):
global time
# animiate background
time += 1
wtime = (time / 4) % WIDTH
center = debris_info.get_center()
size = debris_info.get_size()
canvas.draw_image(nebula_image, nebula_info.get_center(), nebula_info.get_size(), [WIDTH / 2, HEIGHT / 2], [WIDTH, HEIGHT])
canvas.draw_image(debris_image, center, size, (wtime - WIDTH / 2, HEIGHT / 2), (WIDTH, HEIGHT))
canvas.draw_image(debris_image, center, size, (wtime + WIDTH / 2, HEIGHT / 2), (WIDTH, HEIGHT))
if game_mode == 1:
process_sprites(canvas)
if ( (game_mode == 1) or (game_mode == 2) ):
canvas.draw_text("Score: " + str(score),
(WIDTH - 250,60),
30,
'White')
canvas.draw_text("Lives: " + str(lives),
(150,60),
30,
'White')
canvas.draw_text("Weapon: " + my_ship.get_weapon_name(),
(WIDTH-400, HEIGHT - 50),
25,
'White',
'monospace')
if game_mode == 0:
canvas.draw_image(splash_image,
splash_info.get_center(),
splash_info.get_size(),
[WIDTH / 2, HEIGHT / 2],
splash_info.get_size())
def rock_spawner(recurseDepth = 10):
global rockList
if (len(rockList) > 12):
print "Too many rocks"
return
randomX = random.choice(range(0, WIDTH))
randomY = random.choice(range(0, HEIGHT))
#print "Rock + " + str(recurseDepth) + " dist = " + str(dist(my_ship.get_position(), [randomX, randomY]))
if (dist(my_ship.get_position(), [randomX, randomY]) < 150):
print "too close for a rock"
if recurseDepth == 0:
return
else:
rock_spawner(recurseDepth - 1)
return
randomVel = angle_to_vector(random.random() * math.pi * 2.0)
randomVel = vector_scale(randomVel, random.random() * (RANDOM_VEL_MAX - RANDOM_VEL_MIN) + RANDOM_VEL_MIN)
randomAngVel = random.random() * RANDOM_ANG_MAX * 2.0 - RANDOM_ANG_MAX
#print "Spawn rock: [" + str(randomX) + "," + str(randomY) + "] v=" + \
# str(randomVel) + " Alpha=" + str(randomAngVel)
spawnRock = Sprite([randomX, randomY],
randomVel,
random.random() * math.pi * 2.0,
randomAngVel,
asteroid_image,
asteroid_info)
rockList.append(spawnRock)
def bullet_spawner():
global my_ship
my_ship.spawn_bullet()
def key_down_handler(key):
global my_ship, game_mode
if (game_mode == 1):
if ( (key == simplegui.KEY_MAP['left']) or (key == simplegui.KEY_MAP['a']) ):
my_ship.rotate(-1)
elif ( (key == simplegui.KEY_MAP['right']) or (key == simplegui.KEY_MAP['d']) ):
my_ship.rotate(1)
elif ( (key == simplegui.KEY_MAP['up']) or (key == simplegui.KEY_MAP['w']) ):
my_ship.setThrust(True)
elif ( (key == simplegui.KEY_MAP['down']) or (key == simplegui.KEY_MAP['s']) ):
pass
elif (key == simplegui.KEY_MAP['space']):
my_ship.startShooting()
elif (key == simplegui.KEY_MAP['1']):
pass
elif (key == simplegui.KEY_MAP['2']):
pass
elif (key == simplegui.KEY_MAP['3']):
pass
elif (game_mode == 0):
if (key == simplegui.KEY_MAP['space']):
start_game()
else:
if (key == simplegui.KEY_MAP['space']):
game_mode = 0
def key_up_handler(key):
global my_ship
if ( (key == simplegui.KEY_MAP['left']) or (key == simplegui.KEY_MAP['a']) ):
my_ship.rotate(0)
elif ( (key == simplegui.KEY_MAP['right']) or (key == simplegui.KEY_MAP['d']) ):
my_ship.rotate(0)
elif ( (key == simplegui.KEY_MAP['up']) or (key == simplegui.KEY_MAP['w']) ):
my_ship.setThrust(False)
elif ( (key == simplegui.KEY_MAP['down']) or (key == simplegui.KEY_MAP['s']) ):
my_ship.change_bullet_type()
elif (key == simplegui.KEY_MAP['space']):
my_ship.stopShooting()
elif (key == simplegui.KEY_MAP['1']):
my_ship.set_bullet_type(0)
elif (key == simplegui.KEY_MAP['2']):
my_ship.set_bullet_type(1)
elif (key == simplegui.KEY_MAP['3']):
my_ship.set_bullet_type(2)
def game_over():
global my_ship, rockList, smallRockList, timer, game_mode, soundtrack
rockList = []
smallRockList = []
timer.stop()
game_mode = 2
soundtrack.pause()
def start_game():
global timer, game_mode, lives, score, soundtrack
my_ship.reset()
timer.start()
game_mode = 1
lives = 3
score = 0
soundtrack.rewind()
soundtrack.play()
def mouse_handler(position):
if (game_mode == 0):
start_game()
frame = simplegui.create_frame("Asteroids", WIDTH, HEIGHT)
frame.set_keydown_handler(key_down_handler)
frame.set_keyup_handler(key_up_handler)
frame.set_mouseclick_handler(mouse_handler)
frame.add_label("A/D or Left/Right to rotate")
frame.add_label("W or Up to thrust")
frame.add_label("S or Down to change weapon")
frame.add_label("1,2,3 are weapon hot key")
bulletSpawnerTimer = simplegui.create_timer(200, bullet_spawner)
my_ship = Ship([WIDTH / 2, HEIGHT / 2], [0, 0], math.pi, ship_image, ship_info, bulletSpawnerTimer)
rockList = []
smallRockList = []
explodeList = []
frame.set_draw_handler(draw)
timer = simplegui.create_timer(1000.0, rock_spawner)
frame.start()
|
"""
AUTHOR: Peter Collins, 2005.
This software is Copyright (C) 2004-2008 Bristol University
and is released under the GNU General Public License version 2.
MODULE: RunHill
PURPOSE:
A sample setup and configuration for the normalization algorithms.
NOTES:
See RunConfig.py for configuration options
"""
import sys
import RunConfig
degree = 6
if len(sys.argv)>1:
degree = int(sys.argv[1])
config = { "tolerance" : 5.0e-14 , "degree" : degree , "system" : "Hill" ,
"do_stream" : False ,
"compute_diagonalisation" : True ,
"run_normal_form_python" : False ,
"run_normal_form_cpp" : True }
RunConfig.NfConfig(config).run_examp()
config["compute_diagonalisation"] = False
config["run_normal_form_python"] = True
config["run_normal_form_cpp"] = False
if degree < 7:
RunConfig.NfConfig(config).run_examp()
|
from Plugins.Plugin import PluginDescriptor
from Screens.PluginBrowser import *
from Screens.Ipkg import Ipkg
from Screens.HarddiskSetup import HarddiskSetup
from Components.ProgressBar import ProgressBar
from Components.SelectionList import SelectionList
from Screens.NetworkSetup import *
from enigma import *
from Screens.Standby import *
from Screens.LogManager import *
from Screens.MessageBox import MessageBox
from Plugins.SystemPlugins.SoftwareManager.Flash_online import FlashOnline
from Components.ActionMap import ActionMap, NumberActionMap, HelpableActionMap
from Screens.Screen import Screen
from Screens.TaskView import JobView
from Components.Task import Task, Job, job_manager, Condition
from GlobalActions import globalActionMap
from Screens.ChoiceBox import ChoiceBox
from Tools.BoundFunction import boundFunction
from Tools.LoadPixmap import LoadPixmap
from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN, SCOPE_PLUGINS
from Components.MenuList import MenuList
from Components.FileList import FileList
from Components.Label import Label
from Components.ScrollLabel import ScrollLabel
from Components.Pixmap import Pixmap
from Components.config import ConfigSubsection, ConfigInteger, ConfigText, getConfigListEntry, ConfigSelection, ConfigIP, ConfigYesNo, ConfigSequence, ConfigNumber, NoSave, ConfigEnableDisable, configfile
from Components.ConfigList import ConfigListScreen, ConfigList
from Components.Sources.StaticText import StaticText
from Components.Sources.Progress import Progress
from Components.Button import Button
from Components.ActionMap import ActionMap
from Components.SystemInfo import SystemInfo
from Screens.VirtualKeyBoard import VirtualKeyBoard
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest
from OPENDROID.OscamSmartcard import *
from enigma import eConsoleAppContainer
from Tools.Directories import fileExists
from Tools.Downloader import downloadWithProgress
from boxbranding import getBoxType, getMachineName, getMachineBrand, getBrandOEM
from enigma import getDesktop
from Screens.InputBox import PinInput
import string
from random import Random
import os
import sys
import re, string
font = 'Regular;16'
import ServiceReference
import time
import datetime
inOPD_panel = None
config.softcam = ConfigSubsection()
config.softcam.actCam = ConfigText(visible_width=200)
config.softcam.actCam2 = ConfigText(visible_width=200)
config.softcam.waittime = ConfigSelection([('0',_("dont wait")),('1',_("1 second")), ('5',_("5 seconds")),('10',_("10 seconds")),('15',_("15 seconds")),('20',_("20 seconds")),('30',_("30 seconds"))], default='15')
if os.path.isfile('/usr/lib/enigma2/python/Plugins/Extensions/MultiQuickButton/plugin.pyo') is True:
try:
from Plugins.Extensions.MultiQuickButton.plugin import *
except:
pass
from OPENDROID.BluePanel import *
from OPENDROID.CronManager import *
from OPENDROID.ScriptRunner import *
from OPENDROID.MountManager import *
from OPENDROID.SwapManager import Swap, SwapAutostart
from OPENDROID.SoftwarePanel import SoftwarePanel
from Plugins.SystemPlugins.SoftwareManager.BackupRestore import BackupScreen, RestoreScreen, BackupSelection, getBackupPath, getBackupFilename
import gettext
def _(txt):
t = gettext.dgettext("OPD_panel", txt)
if t == txt:
print "[OPD_panel] fallback to default translation for", txt
t = gettext.gettext(txt)
return t
def command(comandline, strip=1):
comandline = comandline + " >/tmp/command.txt"
os.system(comandline)
text = ""
if os.path.exists("/tmp/command.txt") is True:
file = open("/tmp/command.txt", "r")
if strip == 1:
for line in file:
text = text + line.strip() + '\n'
else:
for line in file:
text = text + line
if text[-1:] != '\n': text = text + "\n"
file.close()
# if one or last line then remove linefeed
if text[-1:] == '\n': text = text[:-1]
comandline = text
os.system("rm /tmp/command.txt")
return comandline
boxversion = getBoxType()
machinename = getMachineName()
machinebrand = getMachineBrand()
OEMname = getBrandOEM()
OPD_panel_Version = 'OPD PANEL V1.4 (By OPD-Team)'
print "[OPD_panel] machinebrand: %s" % (machinebrand)
print "[OPD_panel] machinename: %s" % (machinename)
print "[OPD_panel] oem name: %s" % (OEMname)
print "[OPD_panel] boxtype: %s" % (boxversion)
panel = open("/tmp/OPD_panel.ver", "w")
panel.write(OPD_panel_Version + '\n')
panel.write("Machinebrand: %s " % (machinebrand)+ '\n')
panel.write("Machinename: %s " % (machinename)+ '\n')
panel.write("oem name: %s " % (OEMname)+ '\n')
panel.write("Boxtype: %s " % (boxversion)+ '\n')
panel.close()
ExitSave = "[Exit] = " +_("Cancel") +" [Ok] =" +_("Save")
class ConfigPORT(ConfigSequence):
def __init__(self, default):
ConfigSequence.__init__(self, seperator = ".", limits = [(1,65535)], default = default)
def main(session, **kwargs):
session.open(OPD_panel)
def Apanel(menuid, **kwargs):
if menuid == "mainmenu":
return [(_("OPD_panel"), main, "OPD_panel", 3)]
else:
return []
def Plugins(**kwargs):
return [
PluginDescriptor(name='OPD_panel', description='OPD_panel GUI 16/5/2016', where=PluginDescriptor.WHERE_MENU, fnc=Apanel),
PluginDescriptor(where=[PluginDescriptor.WHERE_SESSIONSTART, PluginDescriptor.WHERE_AUTOSTART], fnc=camstart),
PluginDescriptor(where=[PluginDescriptor.WHERE_SESSIONSTART, PluginDescriptor.WHERE_AUTOSTART], fnc=SwapAutostart),
PluginDescriptor(name='OPD_panel', description='OPD_panel GUI 16/5/2016', where=PluginDescriptor.WHERE_EXTENSIONSMENU, fnc=main)]
MENU_SKIN = '<screen position="center,center" size="950,470" title="OPD Panel - Main Menu" >\n\t<ePixmap pixmap="/usr/lib/enigma2/python/OPENDROID/icons/redlogo.png" position="0,380" size="950,84" alphatest="on" zPosition="1"/>\n\t<ePixmap pixmap="/usr/lib/enigma2/python/OPENDROID/icons/opendroid_info.png" position="510,11" size="550,354" alphatest="on" zPosition="1"/>\n\t\t<widget source="global.CurrentTime" render="Label" position="450, 340" size="500,24" font="Regular;20" foregroundColor="#FFFFFF" halign="right" transparent="1" zPosition="5">\n\t\t<convert type="ClockToText">>Format%H:%M:%S</convert>\n\t</widget>\n\t<eLabel backgroundColor="#56C856" position="0,330" size="950,1" zPosition="0" />\n <widget name="Mlist" position="70,110" size="705,260" itemHeight="50" scrollbarMode="showOnDemand" transparent="1" zPosition="0" />\n\t<widget name="label1" position="10,340" size="490,25" font="Regular;20" transparent="1" foregroundColor="#f2e000" halign="left" />\n</screen>'
CONFIG_SKIN = '<screen position="center,center" size="600,440" title="PANEL Config" >\n\t<widget name="config" position="10,10" size="580,377" enableWrapAround="1" scrollbarMode="showOnDemand" />\n\t<widget name="labelExitsave" position="90,410" size="420,25" halign="center" font="Regular;20" transparent="1" foregroundColor="#f2e000" />\n</screen>'
INFO_SKIN = '<screen name="OPD_panel" position="center,center" size="730,400" title="OPD_panel" >\n\t<widget name="label2" position="0,10" size="730,25" font="Regular;20" transparent="1" halign="center" foregroundColor="#f2e000" />\n\t<widget name="label1" position="10,45" size="710,350" font="Console;20" zPosition="1" backgroundColor="#251e1f20" transparent="1" />\n</screen>'
INFO_SKIN2 = '<screen name="OPD_panel" position="center,center" size="530,400" title="OPD_panel" backgroundColor="#251e1f20">\n\t<widget name="label1" position="10,50" size="510,340" font="Regular;15" zPosition="1" backgroundColor="#251e1f20" transparent="1" />\n</screen>'
class PanelList(MenuList):
if (getDesktop(0).size().width() == 1920):
def __init__(self, list, font0 = 38, font1 = 28, itemHeight = 60, enableWrapAround = True):
MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent)
self.l.setFont(0, gFont("Regular", font0))
self.l.setFont(1, gFont("Regular", font1))
self.l.setItemHeight(itemHeight)
else:
def __init__(self, list, font0 = 24, font1 = 16, itemHeight = 50, enableWrapAround = True):
MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent)
self.l.setFont(0, gFont("Regular", font0))
self.l.setFont(1, gFont("Regular", font1))
self.l.setItemHeight(itemHeight)
def MenuEntryItem(entry):
if (getDesktop(0).size().width() == 1920):
res = [entry]
res.append(MultiContentEntryPixmapAlphaTest(pos=(0, 10), size=(60, 60), png=entry[0]))
res.append(MultiContentEntryText(pos=(110, 5), size=(690, 50), font=0, text=entry[1]))
return res
else:
res = [entry]
res.append(MultiContentEntryPixmapAlphaTest(pos=(0, 5), size=(100, 40), png=entry[0]))
res.append(MultiContentEntryText(pos=(110, 10), size=(440, 40), font=0, text=entry[1]))
return res
from Screens.PiPSetup import PiPSetup
from Screens.InfoBarGenerics import InfoBarPiP
def InfoEntryComponent(file):
png = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'icons/' + file + '.png'))
if png == None:
png = LoadPixmap('/usr/lib/enigma2/python/OPENDROID/icons/' + file + '.png')
if png == None:
png = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'icons/default.png'))
if png == None:
png = LoadPixmap('/usr/lib/enigma2/python/OPENDROID/icons/default.png')
res = png
return res
class OPD_panel(Screen, InfoBarPiP):
servicelist = None
def __init__(self, session, services = None):
global menu
global inOPD_panel
global pluginlist
global INFOCONF
Screen.__init__(self, session)
self.session = session
self.skin = MENU_SKIN
self.onShown.append(self.setWindowTitle)
self.service = None
INFOCONF = 0
pluginlist = 'False'
try:
print '[OPD_panel] SHOW'
OPD_panel = self
except:
print '[OPD_Panel] Error Hide'
if services is not None:
self.servicelist = services
else:
self.servicelist = None
self.list = []
self['actions'] = ActionMap(['OkCancelActions', 'DirectionActions', 'ColorActions'], {'cancel': self.Exit,
'upUp': self.up,
'downUp': self.down,
'ok': self.ok}, 1)
self['label1'] = Label(OPD_panel_Version)
self.Mlist = []
self.Mlist.append(MenuEntryItem((InfoEntryComponent('ImageFlash'), _('Image-Flasher'), 'ImageFlash')))
self.Mlist.append(MenuEntryItem((InfoEntryComponent('LogManager'), _('Log-Manager'), 'LogManager')))
self.Mlist.append(MenuEntryItem((InfoEntryComponent('SoftwareManager'), _('Software-Manager'), 'software-manager')))
self.Mlist.append(MenuEntryItem((InfoEntryComponent('services'), _('services'), 'services')))
self.Mlist.append(MenuEntryItem((InfoEntryComponent('Infos'), _('Infos'), 'Infos')))
self.Mlist.append(MenuEntryItem((InfoEntryComponent('Infobar_Setup'), _('Infobar_Setup'), 'Infobar_Setup')))
self.onChangedEntry = []
self["Mlist"] = PanelList([])
self["Mlist"].l.setList(self.Mlist)
menu = 0
self['Mlist'].onSelectionChanged.append(self.selectionChanged)
def getCurrentEntry(self):
if self['Mlist'].l.getCurrentSelection():
selection = self['Mlist'].l.getCurrentSelection()[0]
if selection[0] is not None:
return selection[0]
return
def selectionChanged(self):
item = self.getCurrentEntry()
def setWindowTitle(self):
self.setTitle(_('OPD-Main Menu'))
def up(self):
pass
def down(self):
pass
def left(self):
pass
def right(self):
pass
def Red(self):
self.showExtensionSelection1(Parameter='run')
def Green(self):
pass
def yellow(self):
pass
def blue(self):
pass
def Exit(self):
global menu
global inOPD_panel
if menu == 0:
try:
self.service = self.session.nav.getCurrentlyPlayingServiceReference()
service = self.service.toCompareString()
servicename = ServiceReference.ServiceReference(service).getServiceName().replace('\xc2\x87', '').replace('\xc2\x86', '').ljust(16)
print '[OPD_panel] HIDE'
inOPD_panel = None
except:
print '[OPD_panel] Error Hide'
self.close()
elif menu == 1:
self['Mlist'].moveToIndex(0)
self['Mlist'].l.setList(self.oldmlist)
menu = 0
self['label1'].setText(OPD_panel_Version)
elif menu == 2:
self['Mlist'].moveToIndex(0)
self['Mlist'].l.setList(self.oldmlist1)
menu = 1
self['label1'].setText('Infos')
return
def ok(self):
menu = self['Mlist'].l.getCurrentSelection()[0][2]
print '[OPD_panel] MenuItem: ' + menu
if menu == 'services':
self.services()
elif menu == 'Pluginbrowser':
self.session.open(PluginBrowser)
elif menu == 'Infos':
self.Infos()
elif menu == 'Service_Team':
self.session.open(Info, 'Service_Team')
elif menu == 'Info':
self.session.open(Info, 'SystemInfo')
elif menu == 'ImageVersion':
self.session.open(Info, 'ImageVersion')
elif menu == 'FreeSpace':
self.session.open(Info, 'FreeSpace')
elif menu == 'Network':
self.session.open(Info, 'Network')
elif menu == 'Mounts':
self.session.open(Info, 'Mounts')
elif menu == 'Kernel':
self.session.open(Info, 'Kernel')
elif menu == 'Ram':
self.session.open(Info, 'Free')
elif menu == 'Cpu':
self.session.open(Info, 'Cpu')
elif menu == 'Top':
self.session.open(Info, 'Top')
elif menu == 'MemInfo':
self.session.open(Info, 'MemInfo')
elif menu == 'Module':
self.session.open(Info, 'Module')
elif menu == 'Mtd':
self.session.open(Info, 'Mtd')
elif menu == 'Partitions':
self.session.open(Info, 'Partitions')
elif menu == 'Swap':
self.session.open(Info, 'Swap')
elif menu == 'SystemInfo':
self.System()
elif menu == 'CronManager':
self.session.open(CronManager)
elif menu == 'Infobar_Setup':
from OPENDROID.GreenPanel import InfoBarSetup
self.session.open(InfoBarSetup)
elif menu == 'Decoding_Setup':
from OPENDROID.GreenPanel import DecodingSetup
self.session.open(DecodingSetup)
elif menu == 'JobManager':
self.session.open(ScriptRunner)
elif menu == 'software-manager':
self.Software_Manager()
elif menu == 'software-update':
self.session.open(SoftwarePanel)
elif menu == 'backup-settings':
self.session.openWithCallback(self.backupDone, BackupScreen, runBackup=True)
elif menu == 'restore-settings':
self.backuppath = getBackupPath()
self.backupfile = getBackupFilename()
self.fullbackupfilename = self.backuppath + '/' + self.backupfile
if os_path.exists(self.fullbackupfilename):
self.session.openWithCallback(self.startRestore, MessageBox, _('Are you sure you want to restore your STB backup?\nSTB will restart after the restore'))
else:
self.session.open(MessageBox, _('Sorry no backups found!'), MessageBox.TYPE_INFO, timeout=10)
elif menu == 'backup-files':
self.session.openWithCallback(self.backupfiles_choosen, BackupSelection)
elif menu == 'MultiQuickButton':
self.session.open(MultiQuickButton)
elif menu == 'MountManager':
self.session.open(DeviceManager)
elif menu == 'OscamSmartcard':
self.session.open(OscamSmartcard)
elif menu == 'SwapManager':
self.session.open(Swap)
elif menu == 'RedPanel':
self.session.open(RedPanel)
elif menu == 'Yellow-Key-Action':
self.session.open(YellowPanel)
elif menu == 'LogManager':
self.session.open(LogManager)
elif menu == 'ImageFlash':
self.session.open(FlashOnline)
elif menu == 'Samba':
self.session.open(NetworkSamba)
def services(self):
global menu
menu = 1
self['label1'].setText(_('services'))
self.tlist = []
self.oldmlist = []
self.oldmlist = self.Mlist
self.tlist.append(MenuEntryItem((InfoEntryComponent('MountManager'), _('MountManager'), 'MountManager')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('CronManager'), _('CronManager'), 'CronManager')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('JobManager'), _('JobManager'), 'JobManager')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('SwapManager'), _('SwapManager'), 'SwapManager')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('OscamSmartcard'), _('OscamSmartcard'), 'OscamSmartcard')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('Samba'), _('Samba'), 'Samba')))
if os.path.isfile('/usr/lib/enigma2/python/Plugins/Extensions/MultiQuickButton/plugin.pyo') is True:
self.tlist.append(MenuEntryItem((InfoEntryComponent('MultiQuickButton'), _('MultiQuickButton'), 'MultiQuickButton')))
self['Mlist'].moveToIndex(0)
self['Mlist'].l.setList(self.tlist)
def Infos(self):
global menu
menu = 1
self['label1'].setText(_('Infos'))
self.tlist = []
self.oldmlist = []
self.oldmlist1 = []
self.oldmlist = self.Mlist
self.tlist.append(MenuEntryItem((InfoEntryComponent('Service_Team'), _('Service_Team'), 'Service_Team')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('ImageVersion'), _('Image-Version'), 'ImageVersion')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('FreeSpace'), _('FreeSpace'), 'FreeSpace')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('Kernel'), _('Kernel'), 'Kernel')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('Mounts'), _('Mounts'), 'Mounts')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('Network'), _('Network'), 'Network')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('Ram'), _('Ram'), 'Ram')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('SystemInfo'), _('SystemInfo'), 'SystemInfo')))
self['Mlist'].moveToIndex(0)
self['Mlist'].l.setList(self.tlist)
self.oldmlist1 = self.tlist
def System(self):
global menu
menu = 2
self['label1'].setText(_('System Info'))
self.tlist = []
self.tlist.append(MenuEntryItem((InfoEntryComponent('Cpu'), _('Cpu'), 'Cpu')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('MemInfo'), _('MemInfo'), 'MemInfo')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('Mtd'), _('Mtd'), 'Mtd')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('Module'), _('Module'), 'Module')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('Partitions'), _('Partitions'), 'Partitions')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('Swap'), _('Swap'), 'Swap')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('Top'), _('Top'), 'Top')))
self['Mlist'].moveToIndex(0)
self['Mlist'].l.setList(self.tlist)
def System_main(self):
global menu
menu = 1
self["label1"].setText(_("Image/Remote Setup"))
self.tlist = []
self.oldmlist = []
self.oldmlist = self.Mlist
self.tlist.append(MenuEntryItem((InfoEntryComponent('Red-Key-Action'), _("Red Panel"), 'Red-Key-Action')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('Blue-Key-Action'), _("Blue Panel"), 'Blue-Key-Action')))
self["Mlist"].moveToIndex(0)
self["Mlist"].l.setList(self.tlist)
def System_main(self):
global menu
menu = 1
self['label1'].setText(_('System'))
self.tlist = []
self.oldmlist = []
self.oldmlist = self.Mlist
self.tlist.append(MenuEntryItem((InfoEntryComponent('Info'), _('Info'), 'Info')))
self['Mlist'].moveToIndex(0)
self['Mlist'].l.setList(self.tlist)
def Software_Manager(self):
global menu
menu = 1
self['label1'].setText(_('Software Manager'))
self.tlist = []
self.oldmlist = []
self.oldmlist = self.Mlist
self.tlist.append(MenuEntryItem((InfoEntryComponent('SoftwareManager'), _('Software update'), 'software-update')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('BackupSettings'), _('Backup Settings'), 'backup-settings')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('RestoreSettings'), _('Restore Settings'), 'restore-settings')))
self.tlist.append(MenuEntryItem((InfoEntryComponent('BackupFiles'), _('Choose backup files'), 'backup-files')))
self['Mlist'].moveToIndex(0)
self['Mlist'].l.setList(self.tlist)
def backupfiles_choosen(self, ret):
config.plugins.configurationbackup.backupdirs.save()
config.plugins.configurationbackup.save()
config.save()
def backupDone(self, retval = None):
if retval is True:
self.session.open(MessageBox, _('Backup done.'), MessageBox.TYPE_INFO, timeout=10)
else:
self.session.open(MessageBox, _('Backup failed.'), MessageBox.TYPE_INFO, timeout=10)
def startRestore(self, ret = False):
if ret == True:
self.exe = True
self.session.open(RestoreScreen, runRestore=True)
class RedPanel(ConfigListScreen, Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.session = session
self.skinName = 'Setup'
Screen.setTitle(self, _('RedPanel') + '...')
self.setup_title = _('RedPanel') + '...'
self['HelpWindow'] = Pixmap()
self['HelpWindow'].hide()
self['status'] = StaticText()
self['footnote'] = Label('')
self['description'] = Label(_(''))
self['labelExitsave'] = Label('[Exit] = ' + _('Cancel') + ' [Ok] =' + _('Save'))
self.onChangedEntry = []
self.list = []
ConfigListScreen.__init__(self, self.list, session=self.session, on_change=self.changedEntry)
self.createSetup()
self['actions'] = ActionMap(['SetupActions', 'ColorActions'], {'ok': self.keySave,
'cancel': self.keyCancel,
'red': self.keyCancel,
'green': self.keySave,
'menu': self.keyCancel}, -2)
self['key_red'] = StaticText(_('Cancel'))
self['key_green'] = StaticText(_('OK'))
if self.selectionChanged not in self['config'].onSelectionChanged:
self['config'].onSelectionChanged.append(self.selectionChanged)
self.selectionChanged()
def createSetup(self):
self.editListEntry = None
self.list = []
self.list.append(getConfigListEntry(_('Show OPD_panel Red-key'), config.plugins.OPD_panel_redpanel.enabled))
self.list.append(getConfigListEntry(_('Show Softcam-Panel Red-key long'), config.plugins.OPD_panel_redpanel.enabledlong))
self['config'].list = self.list
self['config'].setList(self.list)
if config.usage.sort_settings.value:
self['config'].list.sort()
return
def selectionChanged(self):
self['status'].setText(self['config'].getCurrent()[0])
def changedEntry(self):
for x in self.onChangedEntry:
x()
self.selectionChanged()
def getCurrentEntry(self):
return self['config'].getCurrent()[0]
def getCurrentValue(self):
return str(self['config'].getCurrent()[1].getText())
def getCurrentDescription(self):
return self['config'].getCurrent() and len(self['config'].getCurrent()) > 2 and self['config'].getCurrent()[2] or ''
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
def saveAll(self):
for x in self['config'].list:
x[1].save()
configfile.save()
def keySave(self):
self.saveAll()
self.close()
def cancelConfirm(self, result):
if not result:
return
for x in self['config'].list:
x[1].cancel()
self.close()
def keyCancel(self):
if self['config'].isChanged():
self.session.openWithCallback(self.cancelConfirm, MessageBox, _('Really close without saving settings?'))
else:
self.close()
class YellowPanel(ConfigListScreen, Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.session = session
self.skinName = 'Setup'
Screen.setTitle(self, _('Yellow Key Action') + '...')
self.setup_title = _('Yellow Key Action') + '...'
self['HelpWindow'] = Pixmap()
self['HelpWindow'].hide()
self['status'] = StaticText()
self['footnote'] = Label('')
self['description'] = Label('')
self['labelExitsave'] = Label('[Exit] = ' + _('Cancel') + ' [Ok] =' + _('Save'))
self.onChangedEntry = []
self.list = []
ConfigListScreen.__init__(self, self.list, session=self.session, on_change=self.changedEntry)
self.createSetup()
self['actions'] = ActionMap(['SetupActions', 'ColorActions'], {'ok': self.keySave,
'cancel': self.keyCancel,
'red': self.keyCancel,
'green': self.keySave,
'menu': self.keyCancel}, -2)
self['key_red'] = StaticText(_('Cancel'))
self['key_green'] = StaticText(_('OK'))
if self.selectionChanged not in self['config'].onSelectionChanged:
self['config'].onSelectionChanged.append(self.selectionChanged)
self.selectionChanged()
def createSetup(self):
self.editListEntry = None
self.list = []
self.list.append(getConfigListEntry(_('Yellow Key Action'), config.plugins.OPD_panel_yellowkey.list))
self['config'].list = self.list
self['config'].setList(self.list)
if config.usage.sort_settings.value:
self['config'].list.sort()
return
def selectionChanged(self):
self['status'].setText(self['config'].getCurrent()[0])
def changedEntry(self):
for x in self.onChangedEntry:
x()
self.selectionChanged()
def getCurrentEntry(self):
return self['config'].getCurrent()[0]
def getCurrentValue(self):
return str(self['config'].getCurrent()[1].getText())
def getCurrentDescription(self):
return self['config'].getCurrent() and len(self['config'].getCurrent()) > 2 and self['config'].getCurrent()[2] or ''
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
def saveAll(self):
for x in self['config'].list:
x[1].save()
configfile.save()
def keySave(self):
self.saveAll()
self.close()
def cancelConfirm(self, result):
if not result:
return
for x in self['config'].list:
x[1].cancel()
self.close()
def keyCancel(self):
if self['config'].isChanged():
self.session.openWithCallback(self.cancelConfirm, MessageBox, _('Really close without saving settings?'))
else:
self.close()
class Info(Screen):
def __init__(self, session, info):
self.service = None
Screen.__init__(self, session)
self.skin = INFO_SKIN
self['label2'] = Label('INFO')
self['label1'] = ScrollLabel()
if info == 'Service_Team':
self.Service_Team()
if info == 'SystemInfo':
self.SystemInfo()
elif info == 'ImageVersion':
self.ImageVersion()
elif info == 'FreeSpace':
self.FreeSpace()
elif info == 'Mounts':
self.Mounts()
elif info == 'Network':
self.Network()
elif info == 'Kernel':
self.Kernel()
elif info == 'Free':
self.Free()
elif info == 'Cpu':
self.Cpu()
elif info == 'Top':
self.Top()
elif info == 'MemInfo':
self.MemInfo()
elif info == 'Module':
self.Module()
elif info == 'Mtd':
self.Mtd()
elif info == 'Partitions':
self.Partitions()
elif info == 'Swap':
self.Swap()
self['actions'] = ActionMap(['OkCancelActions', 'DirectionActions'], {'cancel': self.Exit,
'ok': self.ok,
'up': self.Up,
'down': self.Down}, -1)
return
def Exit(self):
self.close()
def ok(self):
self.close()
def Down(self):
self['label1'].pageDown()
def Up(self):
self['label1'].pageUp()
def Service_Team(self):
try:
self['label2'].setText('INFO')
info1 = self.Do_cmd('cat', '/etc/motd', None)
if info1.find('wElc0me') > -1:
info1 = info1[info1.find('wElc0me'):len(info1)] + '\n'
info1 = info1.replace('|', '')
else:
info1 = info1[info1.find('INFO'):len(info1)] + '\n'
info2 = self.Do_cmd('cat', '/etc/image-version', None)
info3 = self.Do_cut(info1 + info2)
self['label1'].setText(info3)
except:
self['label1'].setText(_('an internal error has occur'))
return
def SystemInfo(self):
try:
self['label2'].setText(_('Image Info'))
info1 = self.Do_cmd('cat', '/etc/version', None)
info1 = self.Do_cut(info1)
self['label1'].setText(info1)
except:
self['label1'].setText(_('an internal error has occur'))
return
def ImageVersion(self):
try:
self['label2'].setText(_('Image Version'))
now = datetime.now()
info1 = 'Date = ' + now.strftime('%d-%B-%Y') + '\n'
info2 = 'Time = ' + now.strftime('%H:%M:%S') + '\n'
info3 = self.Do_cmd('uptime', None, None)
tmp = info3.split(',')
info3 = 'Uptime = ' + tmp[0].lstrip() + '\n'
info4 = self.Do_cmd('cat', '/etc/image-version', ' | head -n 1')
info4 = info4[9:]
info4 = 'Imagetype = ' + info4 + '\n'
info5 = 'Load = ' + self.Do_cmd('cat', '/proc/loadavg', None)
info6 = self.Do_cut(info1 + info2 + info3 + info4 + info5)
self['label1'].setText(info6)
except:
self['label1'].setText(_('an internal error has occur'))
return
def FreeSpace(self):
try:
self['label2'].setText(_('FreeSpace'))
info1 = self.Do_cmd('df', None, '-h')
info1 = self.Do_cut(info1)
self['label1'].setText(info1)
except:
self['label1'].setText(_('an internal error has occur'))
return
def Mounts(self):
try:
self['label2'].setText(_('Mounts'))
info1 = self.Do_cmd('mount', None, None)
info1 = self.Do_cut(info1)
self['label1'].setText(info1)
except:
self['label1'].setText(_('an internal error has occur'))
return
def Network(self):
try:
self['label2'].setText(_('Network'))
info1 = self.Do_cmd('ifconfig', None, None) + '\n'
info2 = self.Do_cmd('route', None, '-n')
info3 = self.Do_cut(info1 + info2)
self['label1'].setText(info3)
except:
self['label1'].setText(_('an internal error has occur'))
return
def Kernel(self):
try:
self['label2'].setText(_('Kernel'))
info0 = self.Do_cmd('cat', '/proc/version', None)
info = info0.split('(')
info1 = 'Name = ' + info[0] + '\n'
info2 = 'Owner = ' + info[1].replace(')', '') + '\n'
info3 = 'Mainimage = ' + info[2][0:info[2].find(')')] + '\n'
info4 = 'Date = ' + info[3][info[3].find('SMP') + 4:len(info[3])]
info5 = self.Do_cut(info1 + info2 + info3 + info4)
self['label1'].setText(info5)
except:
self['label1'].setText(_('an internal error has occur'))
return
def Free(self):
try:
self['label2'].setText(_('Ram'))
info1 = self.Do_cmd('free', None, None)
info1 = self.Do_cut(info1)
self['label1'].setText(info1)
except:
self['label1'].setText(_('an internal error has occur'))
return
def Cpu(self):
try:
self['label2'].setText(_('Cpu'))
info1 = self.Do_cmd('cat', '/proc/cpuinfo', None, " | sed 's/\t\t/\t/'")
info1 = self.Do_cut(info1)
self['label1'].setText(info1)
except:
self['label1'].setText(_('an internal error has occur'))
return
def Top(self):
try:
self['label2'].setText(_('Top'))
info1 = self.Do_cmd('top', None, '-b -n1')
info1 = self.Do_cut(info1)
self['label1'].setText(info1)
except:
self['label1'].setText(_('an internal error has occur'))
return
def MemInfo(self):
try:
self['label2'].setText(_('MemInfo'))
info1 = self.Do_cmd('cat', '/proc/meminfo', None)
info1 = self.Do_cut(info1)
self['label1'].setText(info1)
except:
self['label1'].setText(_('an internal error has occur'))
return
def Module(self):
try:
self['label2'].setText(_('Module'))
info1 = self.Do_cmd('cat', '/proc/modules', None)
info1 = self.Do_cut(info1)
self['label1'].setText(info1)
except:
self['label1'].setText(_('an internal error has occur'))
return
def Mtd(self):
try:
self['label2'].setText(_('Mtd'))
info1 = self.Do_cmd('cat', '/proc/mtd', None)
info1 = self.Do_cut(info1)
self['label1'].setText(info1)
except:
self['label1'].setText(_('an internal error has occur'))
return
def Partitions(self):
try:
self['label2'].setText(_('Partitions'))
info1 = self.Do_cmd('cat', '/proc/partitions', None)
info1 = self.Do_cut(info1)
self['label1'].setText(info1)
except:
self['label1'].setText(_('an internal error has occur'))
return
def Swap(self):
try:
self['label2'].setText(_('Swap'))
info0 = self.Do_cmd('cat', '/proc/swaps', None, " | sed 's/\t/ /g; s/[ ]* / /g'")
info0 = info0.split('\n')
info1 = ''
for l in info0[1:]:
l1 = l.split(' ')
info1 = info1 + 'Name: ' + l1[0] + '\n'
info1 = info1 + 'Type: ' + l1[1] + '\n'
info1 = info1 + 'Size: ' + l1[2] + '\n'
info1 = info1 + 'Used: ' + l1[3] + '\n'
info1 = info1 + 'Prio: ' + l1[4] + '\n\n'
if info1[-1:] == '\n':
info1 = info1[:-1]
if info1[-1:] == '\n':
info1 = info1[:-1]
info1 = self.Do_cut(info1)
self['label1'].setText(info1)
except:
self['label1'].setText(_('an internal error has occur'))
return
def Do_find(self, text, search):
text = text + ' '
ret = ''
pos = text.find(search)
pos1 = text.find(' ', pos)
if pos > -1:
ret = text[pos + len(search):pos1]
return ret
def Do_cut(self, text):
text1 = text.split('\n')
text = ''
for line in text1:
text = text + line[:95] + '\n'
if text[-1:] == '\n':
text = text[:-1]
return text
def Do_cmd(self, cmd, file, arg, pipe = ''):
try:
if file != None:
if os.path.exists(file) is True:
o = command(cmd + ' ' + file + pipe, 0)
else:
o = 'File not found: \n' + file
elif arg == None:
o = command(cmd, 0)
else:
o = command(cmd + ' ' + arg, 0)
return o
except:
o = ''
return o
return
class FileDownloadJob(Job):
def __init__(self, url, filename, file):
Job.__init__(self, _('Downloading %s' % file))
FileDownloadTask(self, url, filename)
class DownloaderPostcondition(Condition):
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return self.error_message
class FileDownloadTask(Task):
def __init__(self, job, url, path):
Task.__init__(self, job, _('Downloading'))
self.postconditions.append(DownloaderPostcondition())
self.job = job
self.url = url
self.path = path
self.error_message = ''
self.last_recvbytes = 0
self.error_message = None
self.download = None
self.aborted = False
return
def run(self, callback):
self.callback = callback
self.download = downloadWithProgress(self.url, self.path)
self.download.addProgress(self.download_progress)
self.download.start().addCallback(self.download_finished).addErrback(self.download_failed)
print '[FileDownloadTask] downloading', self.url, 'to', self.path
def abort(self):
print '[FileDownloadTask] aborting', self.url
if self.download:
self.download.stop()
self.aborted = True
def download_progress(self, recvbytes, totalbytes):
if recvbytes - self.last_recvbytes > 10000:
self.progress = int(100 * (float(recvbytes) / float(totalbytes)))
self.name = _('Downloading') + ' ' + '%d of %d kBytes' % (recvbytes / 1024, totalbytes / 1024)
self.last_recvbytes = recvbytes
def download_failed(self, failure_instance = None, error_message = ''):
self.error_message = error_message
if error_message == '' and failure_instance is not None:
self.error_message = failure_instance.getErrorMessage()
Task.processFinished(self, 1)
return
def download_finished(self, string = ''):
if self.aborted:
self.finish(aborted=True)
else:
Task.processFinished(self, 0)
|
import os
import sys
import tnetstring
def read_packets (filename):
try:
os.stat (filename)
except OSError:
print "No such file : %s"%filename
sys.exit (1)
pkts = open (filename).read ()
pkts = tnetstring.loads (pkts, 'iso-8859-15')
for data in pkts:
yield data
if '__main__' == __name__:
if not sys.argv [1:]:
print "Usage: %s 'file'"%sys.argv [0]
sys.exit (0)
filename = sys.argv [1]
for pkt in read_packets (filename):
print "found %d's len packet"%len (pkt)
|
from polybori import BooleSet, interpolate_smallest_lex
class PartialFunction(object):
"""docstring for PartialFunction"""
def __init__(self, zeros, ones):
super(PartialFunction, self).__init__()
self.zeros = zeros.set()
self.ones = ones.set()
def interpolate_smallest_lex(self):
return interpolate_smallest_lex(self.zeros, self.ones)
def __str__(self):
return "PartialFunction(zeros=" + str(self.zeros) + ", ones=" + str(
self.ones) + ")"
def definedOn(self):
return self.zeros.union(self.ones)
def __add__(self, other):
domain = self.definedOn().intersect(other.definedOn())
zeros = self.zeros.intersect(other.zeros).union(self.ones.intersect(
other.ones))
ones = self.zeros.intersect(other.ones).union(self.ones.intersect(
other.zeros))
assert zeros.diff(domain).empty()
assert ones.diff(domain).empty()
return PartialFunction(zeros, ones)
def __repr__(self):
return str(self)
def __mul__(self, other):
zeros = self.zeros.union(other.zeros)
ones = self.ones.intersect(other.ones)
return PartialFunction(zeros, ones)
def __or__(self, other):
zeros = self.zeros.intersect(other.zeros)
ones = self.ones.union(other.ones)
return PartialFunction(zeros, ones)
def __xor__(self, other):
return self + other
def __and__(self, other):
return self * other
|
"""
This is the Create_Modify_Interface function (along with its helpers).
It is used by WebSubmit for the "Modify Bibliographic Information" action.
"""
__revision__ = "$Id$"
import os
import re
import time
import pprint
import cgi
from invenio.dbquery import run_sql
from invenio.websubmit_config import InvenioWebSubmitFunctionError
from invenio.websubmit_functions.Retrieve_Data import Get_Field
from invenio.errorlib import register_exception
from invenio.htmlutils import escape_javascript_string
from invenio.messages import gettext_set_language, wash_language
def Create_Modify_Interface_getfieldval_fromfile(cur_dir, fld=""):
"""Read a field's value from its corresponding text file in 'cur_dir' (if it exists) into memory.
Delete the text file after having read-in its value.
This function is called on the reload of the modify-record page. This way, the field in question
can be populated with the value last entered by the user (before reload), instead of always being
populated with the value still found in the DB.
"""
fld_val = ""
if len(fld) > 0 and os.access("%s/%s" % (cur_dir, fld), os.R_OK|os.W_OK):
fp = open( "%s/%s" % (cur_dir, fld), "r" )
fld_val = fp.read()
fp.close()
try:
os.unlink("%s/%s"%(cur_dir, fld))
except OSError:
# Cannot unlink file - ignore, let WebSubmit main handle this
pass
fld_val = fld_val.strip()
return fld_val
def Create_Modify_Interface_getfieldval_fromDBrec(fieldcode, recid):
"""Read a field's value from the record stored in the DB.
This function is called when the Create_Modify_Interface function is called for the first time
when modifying a given record, and field values must be retrieved from the database.
"""
fld_val = ""
if fieldcode != "":
for next_field_code in [x.strip() for x in fieldcode.split(",")]:
fld_val += "%s\n" % Get_Field(next_field_code, recid)
fld_val = fld_val.rstrip('\n')
return fld_val
def Create_Modify_Interface_transform_date(fld_val):
"""Accept a field's value as a string. If the value is a date in one of the following formats:
DD Mon YYYY (e.g. 23 Apr 2005)
YYYY-MM-DD (e.g. 2005-04-23)
...transform this date value into "DD/MM/YYYY" (e.g. 23/04/2005).
"""
if re.search("^[0-9]{2} [a-z]{3} [0-9]{4}$", fld_val, re.IGNORECASE) is not None:
try:
fld_val = time.strftime("%d/%m/%Y", time.strptime(fld_val, "%d %b %Y"))
except (ValueError, TypeError):
# bad date format:
pass
elif re.search("^[0-9]{4}-[0-9]{2}-[0-9]{2}$", fld_val, re.IGNORECASE) is not None:
try:
fld_val = time.strftime("%d/%m/%Y", time.strptime(fld_val, "%Y-%m-%d"))
except (ValueError,TypeError):
# bad date format:
pass
return fld_val
def Create_Modify_Interface(parameters, curdir, form, user_info=None):
"""
Create an interface for the modification of a document, based on
the fields that the user has chosen to modify. This avoids having
to redefine a submission page for the modifications, but rely on
the elements already defined for the initial submission i.e. SBI
action (The only page that needs to be built for the modification
is the page letting the user specify a document to modify).
This function should be added at step 1 of your modification
workflow, after the functions that retrieves report number and
record id (Get_Report_Number, Get_Recid). Functions at step 2 are
the one executed upon successful submission of the form.
Create_Modify_Interface expects the following parameters:
* "fieldnameMBI" - the name of a text file in the submission
working directory that contains a list of the names of the
WebSubmit fields to include in the Modification interface.
These field names are separated by"\n" or "+".
* "prefix" - some content displayed before the main
modification interface. Can contain HTML (i.e. needs to be
pre-escaped). The prefix can make use of Python string
replacement for common values (such as 'rn'). Percent signs
(%) must consequently be escaped (with %%).
* "suffix" - some content displayed after the main modification
interface. Can contain HTML (i.e. needs to be
pre-escaped). The suffix can make use of Python string
replacement for common values (such as 'rn'). Percent signs
(%) must consequently be escaped (with %%).
* "button_label" - the label for the "END" button.
* "button_prefix" - some content displayed before the button to
submit the form. Can contain HTML (i.e. needs to be
pre-escaped). The prefix can make use of Python string
replacement for common values (such as 'rn'). Percent signs
(%) must consequently be escaped (with %%).
* "dates_conversion" - by default, values interpreted as dates
are converted to their 'DD/MM/YYYY' format, whenever
possible. Set another value for a different behaviour
(eg. 'none' for no conversion)
Given the list of WebSubmit fields to be included in the
modification interface, the values for each field are retrieved
for the given record (by way of each WebSubmit field being
configured with a MARC Code in the WebSubmit database). An HTML
FORM is then created. This form allows a user to modify certain
field values for a record.
The file referenced by 'fieldnameMBI' is usually generated from a
multiple select form field): users can then select one or several
fields to modify
Note that the function will display WebSubmit Response elements,
but will not be able to set an initial value: this must be done by
the Response element iteself.
Additionally the function creates an internal field named
'Create_Modify_Interface_DONE' on the interface, that can be
retrieved in curdir after the form has been submitted.
This flag is an indicator for the function that displayed values
should not be retrieved from the database, but from the submitted
values (in case the page is reloaded). You can also rely on this
value when building your WebSubmit Response element in order to
retrieve value either from the record, or from the submission
directory.
"""
ln = wash_language(form['ln'])
_ = gettext_set_language(ln)
global sysno,rn
t = ""
# variables declaration
fieldname = parameters['fieldnameMBI']
prefix = ''
suffix = ''
end_button_label = 'END'
end_button_prefix = ''
date_conversion_setting = ''
if parameters.has_key('prefix'):
prefix = parameters['prefix']
if parameters.has_key('suffix'):
suffix = parameters['suffix']
if parameters.has_key('button_label') and parameters['button_label']:
end_button_label = parameters['button_label']
if parameters.has_key('button_prefix'):
end_button_prefix = parameters['button_prefix']
if parameters.has_key('dates_conversion'):
date_conversion_setting = parameters['dates_conversion']
# Path of file containing fields to modify
the_globals = {
'doctype' : doctype,
'action' : action,
'act' : action, ## for backward compatibility
'step' : step,
'access' : access,
'ln' : ln,
'curdir' : curdir,
'uid' : user_info['uid'],
'uid_email' : user_info['email'],
'rn' : rn,
'last_step' : last_step,
'action_score' : action_score,
'__websubmit_in_jail__' : True,
'form': form,
'sysno': sysno,
'user_info' : user_info,
'__builtins__' : globals()['__builtins__'],
'Request_Print': Request_Print
}
if os.path.exists("%s/%s" % (curdir, fieldname)):
fp = open( "%s/%s" % (curdir, fieldname), "r" )
fieldstext = fp.read()
fp.close()
fieldstext = re.sub("\+","\n", fieldstext)
fields = fieldstext.split("\n")
else:
res = run_sql("SELECT fidesc FROM sbmFIELDDESC WHERE name=%s", (fieldname,))
if len(res) == 1:
fields = res[0][0].replace(" ", "")
fields = re.findall("<optionvalue=.*>", fields)
regexp = re.compile("""<optionvalue=(?P<quote>['|"]?)(?P<value>.*?)(?P=quote)""")
fields = [regexp.search(x) for x in fields]
fields = [x.group("value") for x in fields if x is not None]
fields = [x for x in fields if x not in ("Select", "select")]
else:
raise InvenioWebSubmitFunctionError("cannot find fields to modify")
#output some text
if not prefix:
t += "<center bgcolor=\"white\">The document <b>%s</b> has been found in the database.</center><br />Please modify the following fields:<br />Then press the '%s' button at the bottom of the page<br />\n" % \
(rn, cgi.escape(_(end_button_label)))
else:
t += prefix % the_globals
for field in fields:
subfield = ""
value = ""
marccode = ""
text = ""
# retrieve and display the modification text
t = t + "<FONT color=\"darkblue\">\n"
res = run_sql("SELECT modifytext FROM sbmFIELDDESC WHERE name=%s", (field,))
if len(res)>0:
t = t + "<small>%s</small> </FONT>\n" % res[0][0]
# retrieve the marc code associated with the field
res = run_sql("SELECT marccode FROM sbmFIELDDESC WHERE name=%s", (field,))
if len(res) > 0:
marccode = res[0][0]
# then retrieve the previous value of the field
if os.path.exists("%s/%s" % (curdir, "Create_Modify_Interface_DONE")):
# Page has been reloaded - get field value from text file on server, not from DB record
value = Create_Modify_Interface_getfieldval_fromfile(curdir, field)
else:
# First call to page - get field value from DB record
value = Create_Modify_Interface_getfieldval_fromDBrec(marccode, sysno)
if date_conversion_setting != 'none':
# If field is a date value, transform date into format DD/MM/YYYY:
value = Create_Modify_Interface_transform_date(value)
res = run_sql("SELECT * FROM sbmFIELDDESC WHERE name=%s", (field,)) # kwalitee: disable=sql
if len(res) > 0:
element_type = res[0][3]
numcols = res[0][6]
numrows = res[0][5]
size = res[0][4]
maxlength = res[0][7]
val = res[0][8]
fidesc = res[0][9]
if element_type == "T":
text = "<textarea name=\"%s\" rows=%s cols=%s wrap>%s</textarea>" % (field, numrows, numcols, cgi.escape(value))
elif element_type == "F":
text = "<input type=\"file\" name=\"%s\" size=%s maxlength=\"%s\">" % (field, size, maxlength)
elif element_type == "I":
text = "<input name=\"%s\" size=%s value=\"%s\"> " % (field, size, val and escape_javascript_string(val, escape_quote_for_html=True) or '')
text = text + '''<script type="text/javascript">/*<![CDATA[*/
document.forms[0].%s.value="%s";
/*]]>*/</script>''' % (field, escape_javascript_string(value, escape_for_html=False))
elif element_type == "H":
text = "<input type=\"hidden\" name=\"%s\" value=\"%s\">" % (field, val and escape_javascript_string(val, escape_quote_for_html=True) or '')
text = text + '''<script type="text/javascript">/*<![CDATA[*/
document.forms[0].%s.value="%s";
/*]]>*/</script>''' % (field, escape_javascript_string(value, escape_for_html=False))
elif element_type == "S":
values = re.split("[\n\r]+", value)
text = fidesc
if re.search("%s\[\]" % field, fidesc):
multipletext = "[]"
else:
multipletext = ""
if len(values) > 0 and not(len(values) == 1 and values[0] == ""):
text += '<script type="text/javascript">/*<![CDATA[*/\n'
text += "var i = 0;\n"
text += "el = document.forms[0].elements['%s%s'];\n" % (field, multipletext)
text += "max = el.length;\n"
for val in values:
text += "var found = 0;\n"
text += "var i=0;\n"
text += "while (i != max) {\n"
text += " if (el.options[i].value == \"%s\" || el.options[i].text == \"%s\") {\n" % \
(escape_javascript_string(val, escape_for_html=False), escape_javascript_string(val, escape_for_html=False))
text += " el.options[i].selected = true;\n"
text += " found = 1;\n"
text += " }\n"
text += " i=i+1;\n"
text += "}\n"
#text += "if (found == 0) {\n"
#text += " el[el.length] = new Option(\"%s\", \"%s\", 1,1);\n"
#text += "}\n"
text += "/*]]>*/</script>\n"
elif element_type == "D":
text = fidesc
elif element_type == "R":
try:
co = compile(fidesc.replace("\r\n", "\n"), "<string>", "exec")
## Note this exec is safe WRT global variable because the
## Create_Modify_Interface has already been parsed by
## execfile within a protected environment.
the_globals['text'] = ''
exec co in the_globals
text = the_globals['text']
except:
msg = "Error in evaluating response element %s with globals %s" % (pprint.pformat(field), pprint.pformat(globals()))
register_exception(req=None, alert_admin=True, prefix=msg)
raise InvenioWebSubmitFunctionError(msg)
else:
text = "%s: unknown field type" % field
t = t + "<small>%s</small>" % text
# output our flag field
t += '<input type="hidden" name="Create_Modify_Interface_DONE" value="DONE\n" />'
t += '<br />'
if end_button_prefix:
t += end_button_prefix % the_globals
# output some more text
t += "<br /><CENTER><small><INPUT type=\"button\" width=400 height=50 name=\"End\" value=\"%(end_button_label)s\" onClick=\"document.forms[0].step.value = 2;user_must_confirm_before_leaving_page = false;document.forms[0].submit();\"></small></CENTER></H4>" % {'end_button_label': escape_javascript_string(_(end_button_label), escape_quote_for_html=True)}
if suffix:
t += suffix % the_globals
return t
|
import glob
import os
import site
from cx_Freeze import setup, Executable
import meld.build_helpers
import meld.conf
site_dir = site.getsitepackages()[1]
include_dll_path = os.path.join(site_dir, "gnome")
missing_dll = [
'libgtk-3-0.dll',
'libgdk-3-0.dll',
'libatk-1.0-0.dll',
'libintl-8.dll',
'libzzz.dll',
'libwinpthread-1.dll',
'libcairo-gobject-2.dll',
'libgdk_pixbuf-2.0-0.dll',
'libpango-1.0-0.dll',
'libpangocairo-1.0-0.dll',
'libpangoft2-1.0-0.dll',
'libpangowin32-1.0-0.dll',
'libffi-6.dll',
'libfontconfig-1.dll',
'libfreetype-6.dll',
'libgio-2.0-0.dll',
'libglib-2.0-0.dll',
'libgmodule-2.0-0.dll',
'libgobject-2.0-0.dll',
'libgirepository-1.0-1.dll',
'libgtksourceview-3.0-1.dll',
'libjasper-1.dll',
'libjpeg-8.dll',
'libpng16-16.dll',
'libgnutls-26.dll',
'libxmlxpat.dll',
'librsvg-2-2.dll',
'libharfbuzz-gobject-0.dll',
'libwebp-5.dll',
]
gtk_libs = [
'etc/fonts',
'etc/gtk-3.0/settings.ini',
'etc/pango',
'lib/gdk-pixbuf-2.0',
'lib/girepository-1.0',
'share/fontconfig',
'share/fonts',
'share/glib-2.0',
'share/gtksourceview-3.0',
'share/icons',
]
include_files = [(os.path.join(include_dll_path, path), path) for path in
missing_dll + gtk_libs]
build_exe_options = {
"compressed": False,
"icon": "data/icons/meld.ico",
"includes": ["gi"],
"packages": ["gi", "weakref"],
"include_files": include_files,
}
registry_table = [
('MeldKLM', 2, 'SOFTWARE\Meld', '*', None, 'TARGETDIR'),
('MeldInstallDir', 2, 'SOFTWARE\Meld', 'InstallDir', '[TARGETDIR]', 'TARGETDIR'),
('MeldExecutable', 2, 'SOFTWARE\Meld', 'Executable', '[TARGETDIR]Meld.exe', 'TARGETDIR'),
]
reg_locator_table = [
('MeldInstallDirLocate', 2, 'SOFTWARE\Meld', 'InstallDir', 0)
]
app_search_table = [('TARGETDIR', 'MeldInstallDirLocate')]
msi_data = {
'Registry': registry_table,
'RegLocator': reg_locator_table,
'AppSearch': app_search_table
}
bdist_msi_options = {
"upgrade_code": "{1d303789-b4e2-4d6e-9515-c301e155cd50}",
"data": msi_data,
}
setup(
name="Meld",
version=meld.conf.__version__,
description='Visual diff and merge tool',
author='The Meld project',
author_email='meld-list@gnome.org',
maintainer='Kai Willadsen',
url='http://meldmerge.org',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: X11 Applications :: GTK',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Programming Language :: Python',
'Topic :: Desktop Environment :: Gnome',
'Topic :: Software Development',
'Topic :: Software Development :: Version Control',
],
options = {
"build_exe": build_exe_options,
"bdist_msi": bdist_msi_options,
},
executables = [
Executable(
"bin/meld",
base="Win32GUI",
targetName="Meld.exe",
shortcutName="Meld",
shortcutDir="ProgramMenuFolder",
),
],
packages=[
'meld',
'meld.ui',
'meld.util',
'meld.vc',
],
package_data={
'meld': ['README', 'COPYING', 'NEWS']
},
scripts=['bin/meld'],
data_files=[
('share/man/man1',
['meld.1']
),
('share/doc/meld-' + meld.conf.__version__,
['COPYING', 'NEWS']
),
('share/meld',
['data/meld.css', 'data/meld-dark.css']
),
('share/meld/icons',
glob.glob("data/icons/*.png") +
glob.glob("data/icons/COPYING*")
),
('share/meld/ui',
glob.glob("data/ui/*.ui") + glob.glob("data/ui/*.xml")
),
],
cmdclass={
"build_i18n": meld.build_helpers.build_i18n,
"build_help": meld.build_helpers.build_help,
"build_icons": meld.build_helpers.build_icons,
"build_data": meld.build_helpers.build_data,
}
)
|
from Sensor import Sensor
import nxt
class ColorSensor(Sensor):
name = 'color'
def Initialize(self):
#self.sensor = nxt.Light(self.robot.GetBrick(), self.port)
#self.sensor.set_illuminated(0)
self.sensor = nxt.Color20(self.robot.GetBrick(), self.port)
def Scan(self):
return self.sensor.get_sample()
|
"""
Implements compartmental model of a passive cable. See Neuronal Dynamics
`Chapter 3 Section 2 <http://neuronaldynamics.epfl.ch/online/Ch3.S2.html>`_
"""
import brian2 as b2
from neurodynex3.tools import input_factory
import matplotlib.pyplot as plt
import numpy as np
b2.defaultclock.dt = 0.01 * b2.ms
CABLE_LENGTH = 500. * b2.um # length of dendrite
CABLE_DIAMETER = 2. * b2.um # diameter of dendrite
R_LONGITUDINAL = 0.5 * b2.kohm * b2.mm # Intracellular medium resistance
R_TRANSVERSAL = 1.25 * b2.Mohm * b2.mm ** 2 # cell membrane resistance (->leak current)
E_LEAK = -70. * b2.mV # reversal potential of the leak current (-> resting potential)
CAPACITANCE = 0.8 * b2.uF / b2.cm ** 2 # membrane capacitance
DEFAULT_INPUT_CURRENT = input_factory.get_step_current(2000, 3000, unit_time=b2.us, amplitude=0.2 * b2.namp)
DEFAULT_INPUT_LOCATION = [CABLE_LENGTH / 3] # provide an array of locations
def simulate_passive_cable(current_injection_location=DEFAULT_INPUT_LOCATION, input_current=DEFAULT_INPUT_CURRENT,
length=CABLE_LENGTH, diameter=CABLE_DIAMETER,
r_longitudinal=R_LONGITUDINAL,
r_transversal=R_TRANSVERSAL, e_leak=E_LEAK, initial_voltage=E_LEAK,
capacitance=CAPACITANCE, nr_compartments=200, simulation_time=5 * b2.ms):
"""Builds a multicompartment cable and numerically approximates the cable equation.
Args:
t_spikes (int): list of spike times
current_injection_location (list): List [] of input locations (Quantity, Length): [123.*b2.um]
input_current (TimedArray): TimedArray of current amplitudes. One column per current_injection_location.
length (Quantity): Length of the cable: 0.8*b2.mm
diameter (Quantity): Diameter of the cable: 0.2*b2.um
r_longitudinal (Quantity): The longitudinal (axial) resistance of the cable: 0.5*b2.kohm*b2.mm
r_transversal (Quantity): The transversal resistance (=membrane resistance): 1.25*b2.Mohm*b2.mm**2
e_leak (Quantity): The reversal potential of the leak current (=resting potential): -70.*b2.mV
initial_voltage (Quantity): Value of the potential at t=0: -70.*b2.mV
capacitance (Quantity): Membrane capacitance: 0.8*b2.uF/b2.cm**2
nr_compartments (int): Number of compartments. Spatial discretization: 200
simulation_time (Quantity): Time for which the dynamics are simulated: 5*b2.ms
Returns:
(StateMonitor, SpatialNeuron): The state monitor contains the membrane voltage in a
Time x Location matrix. The SpatialNeuron object specifies the simulated neuron model
and gives access to the morphology. You may want to use those objects for
spatial indexing: myVoltageStateMonitor[mySpatialNeuron.morphology[0.123*b2.um]].v
"""
assert isinstance(input_current, b2.TimedArray), "input_current is not of type TimedArray"
assert input_current.values.shape[1] == len(current_injection_location),\
"number of injection_locations does not match nr of input currents"
cable_morphology = b2.Cylinder(diameter=diameter, length=length, n=nr_compartments)
# Im is transmembrane current
# Iext is injected current at a specific position on dendrite
EL = e_leak
RT = r_transversal
eqs = """
Iext = current(t, location_index): amp (point current)
location_index : integer (constant)
Im = (EL-v)/RT : amp/meter**2
"""
cable_model = b2.SpatialNeuron(morphology=cable_morphology, model=eqs, Cm=capacitance, Ri=r_longitudinal)
monitor_v = b2.StateMonitor(cable_model, "v", record=True)
# inject all input currents at the specified location:
nr_input_locations = len(current_injection_location)
input_current_0 = np.insert(input_current.values, 0, 0., axis=1) * b2.amp # insert default current: 0. [amp]
current = b2.TimedArray(input_current_0, dt=input_current.dt * b2.second)
for current_index in range(nr_input_locations):
insert_location = current_injection_location[current_index]
compartment_index = int(np.floor(insert_location / (length / nr_compartments)))
# next line: current_index+1 because 0 is the default current 0Amp
cable_model.location_index[compartment_index] = current_index + 1
# set initial values and run for 1 ms
cable_model.v = initial_voltage
b2.run(simulation_time)
return monitor_v, cable_model
def getting_started():
"""A simple code example to get started.
"""
current = input_factory.get_step_current(500, 510, unit_time=b2.us, amplitude=3. * b2.namp)
voltage_monitor, cable_model = simulate_passive_cable(
length=0.5 * b2.mm, current_injection_location=[0.1 * b2.mm], input_current=current,
nr_compartments=100, simulation_time=2 * b2.ms)
# provide a minimal plot
plt.figure()
plt.imshow(voltage_monitor.v / b2.volt)
plt.colorbar(label="voltage")
plt.xlabel("time index")
plt.ylabel("location index")
plt.title("vm at (t,x), raw data voltage_monitor.v")
plt.show()
if __name__ == "__main__":
getting_started()
|
__author__ = "V.A. Sole - ESRF Data Analysis"
import os
import numpy
import time
try:
from PyMca import EdfFile
from PyMca import TiffIO
except ImportError:
print("ArraySave.py is importing EdfFile and TiffIO from local directory")
import EdfFile
import TiffIO
HDF5 = True
try:
import h5py
except ImportError:
HDF5 = False
DEBUG = 0
def getDate():
localtime = time.localtime()
gtime = time.gmtime()
#year, month, day, hour, minute, second,\
# week_day, year_day, delta = time.localtime()
year = localtime[0]
month = localtime[1]
day = localtime[2]
hour = localtime[3]
minute = localtime[4]
second = localtime[5]
#get the difference against Greenwich
delta = hour - gtime[3]
return "%4d-%02d-%02dT%02d:%02d:%02d%+02d:00" % (year, month, day, hour,
minute, second, delta)
def save2DArrayListAsASCII(datalist, filename,
labels=None, csv=False, csvseparator=";"):
if type(datalist) != type([]):
datalist = [datalist]
r, c = datalist[0].shape
ndata = len(datalist)
if os.path.exists(filename):
try:
os.remove(filename)
except OSError:
pass
if labels is None:
labels = []
for i in range(len(datalist)):
labels.append("Array_%d" % i)
if len(labels) != len(datalist):
raise ValueError("Incorrect number of labels")
if csv:
header = '"row"%s"column"' % csvseparator
for label in labels:
header += '%s"%s"' % (csvseparator, label)
else:
header = "row column"
for label in labels:
header += " %s" % label
filehandle = open(filename, 'w+')
filehandle.write('%s\n' % header)
fileline = ""
if csv:
for row in range(r):
for col in range(c):
fileline += "%d" % row
fileline += "%s%d" % (csvseparator, col)
for i in range(ndata):
fileline += "%s%g" % (csvseparator, datalist[i][row, col])
fileline += "\n"
filehandle.write("%s" % fileline)
fileline = ""
else:
for row in range(r):
for col in range(c):
fileline += "%d" % row
fileline += " %d" % col
for i in range(ndata):
fileline += " %g" % datalist[i][row, col]
fileline += "\n"
filehandle.write("%s" % fileline)
fileline = ""
filehandle.write("\n")
filehandle.close()
def save2DArrayListAsEDF(datalist, filename, labels=None, dtype=None):
if type(datalist) != type([]):
datalist = [datalist]
ndata = len(datalist)
if os.path.exists(filename):
try:
os.remove(filename)
except OSError:
pass
if labels is None:
labels = []
for i in range(ndata):
labels.append("Array_%d" % i)
if len(labels) != ndata:
raise ValueError("Incorrect number of labels")
edfout = EdfFile.EdfFile(filename, access="ab")
for i in range(ndata):
if dtype is None:
edfout.WriteImage({'Title': labels[i]},
datalist[i], Append=1)
else:
edfout.WriteImage({'Title': labels[i]},
datalist[i].astype(dtype),
Append=1)
del edfout # force file close
def save2DArrayListAsMonochromaticTiff(datalist, filename,
labels=None, dtype=None):
if type(datalist) != type([]):
datalist = [datalist]
ndata = len(datalist)
if dtype is None:
dtype = datalist[0].dtype
for i in range(len(datalist)):
dtypeI = datalist[i].dtype
if dtypeI in [numpy.float32, numpy.float64] or\
dtypeI.str[-2] == 'f':
dtype = numpy.float32
break
elif dtypeI != dtype:
dtype = numpy.float32
break
if os.path.exists(filename):
try:
os.remove(filename)
except OSError:
pass
if labels is None:
labels = []
for i in range(ndata):
labels.append("Array_%d" % i)
if len(labels) != ndata:
raise ValueError("Incorrect number of labels")
outfileInstance = TiffIO.TiffIO(filename, mode="wb+")
for i in range(ndata):
if i == 1:
outfileInstance = TiffIO.TiffIO(filename, mode="rb+")
if dtype is None:
data = datalist[i]
else:
data = datalist[i].astype(dtype)
outfileInstance.writeImage(data, info={'Title': labels[i]})
outfileInstance.close() # force file close
def openHDF5File(name, mode='a', **kwargs):
"""
Open an HDF5 file.
Valid modes (like Python's file() modes) are:
- r Readonly, file must exist
- r+ Read/write, file must exist
- w Create file, truncate if exists
- w- Create file, fail if exists
- a Read/write if exists, create otherwise (default)
sorted_with is a callable function like python's builtin sorted, or
None.
"""
h5file = h5py.File(name, mode, **kwargs)
if h5file.mode != 'r' and len(h5file) == 0:
if 'file_name' not in h5file.attrs:
attr = 'file_name'
txt = "%s" % name
dtype = '<S%d' % len(txt)
h5file.attrs.create(attr, txt, dtype=dtype)
if 'file_time' not in h5file.attrs:
attr = 'file_time'
txt = "%s" % getDate()
dtype = '<S%d' % len(txt)
h5file.attrs.create(attr, txt, dtype=dtype)
if 'HDF5_version' not in h5file.attrs:
attr = 'HDF5_version'
txt = "%s" % h5py.version.hdf5_version
dtype = '<S%d' % len(txt)
h5file.attrs.create(attr, txt, dtype=dtype)
if 'HDF5_API_version' not in h5file.attrs:
attr = 'HDF5_API_version'
txt = "%s" % h5py.version.api_version
dtype = '<S%d' % len(txt)
h5file.attrs.create(attr, txt, dtype=dtype)
if 'h5py_version' not in h5file.attrs:
attr = 'h5py_version'
txt = "%s" % h5py.version.version
dtype = '<S%d' % len(txt)
h5file.attrs.create(attr, txt, dtype=dtype)
if 'creator' not in h5file.attrs:
attr = 'creator'
txt = "%s" % 'PyMca'
dtype = '<S%d' % len(txt)
h5file.attrs.create(attr, txt, dtype=dtype)
#if 'format_version' not in self.attrs and len(h5file) == 0:
# h5file.attrs['format_version'] = __format_version__
return h5file
def getHDF5FileInstanceAndBuffer(filename, shape,
buffername="data",
dtype=numpy.float32,
interpretation=None,
compression=None):
if not HDF5:
raise IOError('h5py does not seem to be installed in your system')
if os.path.exists(filename):
try:
os.remove(filename)
except:
raise IOError("Cannot overwrite existing file!")
hdf = openHDF5File(filename, 'a')
entryName = "data"
#entry
nxEntry = hdf.require_group(entryName)
if 'NX_class' not in nxEntry.attrs:
nxEntry.attrs['NX_class'] = 'NXentry'.encode('utf-8')
elif nxEntry.attrs['NX_class'] != 'NXentry'.encode('utf-8'):
#should I raise an error?
pass
nxEntry['title'] = "PyMca saved 3D Array".encode('utf-8')
nxEntry['start_time'] = getDate().encode('utf-8')
nxData = nxEntry.require_group('NXdata')
if 'NX_class' not in nxData.attrs:
nxData.attrs['NX_class'] = 'NXdata'.encode('utf-8')
elif nxData.attrs['NX_class'] == 'NXdata'.encode('utf-8'):
#should I raise an error?
pass
if compression:
if DEBUG:
print("Saving compressed and chunked dataset")
chunk1 = int(shape[1] / 10)
if chunk1 == 0:
chunk1 = shape[1]
for i in [11, 10, 8, 7, 5, 4]:
if (shape[1] % i) == 0:
chunk1 = int(shape[1] / i)
break
chunk2 = int(shape[2] / 10)
if chunk2 == 0:
chunk2 = shape[2]
for i in [11, 10, 8, 7, 5, 4]:
if (shape[2] % i) == 0:
chunk2 = int(shape[2] / i)
break
data = nxData.require_dataset(buffername,
shape=shape,
dtype=dtype,
chunks=(1, chunk1, chunk2),
compression=compression)
else:
#no chunking
if DEBUG:
print("Saving not compressed and not chunked dataset")
data = nxData.require_dataset(buffername,
shape=shape,
dtype=dtype,
compression=None)
data.attrs['signal'] = numpy.int32(1)
if interpretation is not None:
data.attrs['interpretation'] = interpretation.encode('utf-8')
for i in range(len(shape)):
dim = numpy.arange(shape[i]).astype(numpy.float32)
dset = nxData.require_dataset('dim_%d' % i,
dim.shape,
dim.dtype,
dim,
chunks=dim.shape)
dset.attrs['axis'] = numpy.int32(i + 1)
nxEntry['end_time'] = getDate().encode('utf-8')
return hdf, data
def save3DArrayAsMonochromaticTiff(data, filename,
labels=None, dtype=None, mcaindex=-1):
ndata = data.shape[mcaindex]
if dtype is None:
dtype = numpy.float32
if os.path.exists(filename):
try:
os.remove(filename)
except OSError:
pass
if labels is None:
labels = []
for i in range(ndata):
labels.append("Array_%d" % i)
if len(labels) != ndata:
raise ValueError("Incorrect number of labels")
outfileInstance = TiffIO.TiffIO(filename, mode="wb+")
if mcaindex in [2, -1]:
for i in range(ndata):
if i == 1:
outfileInstance = TiffIO.TiffIO(filename, mode="rb+")
if dtype is None:
tmpData = data[:, :, i]
else:
tmpData = data[:, :, i].astype(dtype)
outfileInstance.writeImage(tmpData, info={'Title': labels[i]})
if (ndata > 10):
print("Saved image %d of %d" % (i + 1, ndata))
elif mcaindex == 1:
for i in range(ndata):
if i == 1:
outfileInstance = TiffIO.TiffIO(filename, mode="rb+")
if dtype is None:
tmpData = data[:, i, :]
else:
tmpData = data[:, i, :].astype(dtype)
outfileInstance.writeImage(tmpData, info={'Title': labels[i]})
if (ndata > 10):
print("Saved image %d of %d" % (i + 1, ndata))
else:
for i in range(ndata):
if i == 1:
outfileInstance = TiffIO.TiffIO(filename, mode="rb+")
if dtype is None:
tmpData = data[i]
else:
tmpData = data[i].astype(dtype)
outfileInstance.writeImage(tmpData, info={'Title': labels[i]})
if (ndata > 10):
print("Saved image %d of %d" % (i + 1, ndata))
outfileInstance.close() # force file close
def save3DArrayAsHDF5(data, filename, axes=None, labels=None, dtype=None, mode='nexus',
mcaindex=-1, interpretation=None, compression=None):
if not HDF5:
raise IOError('h5py does not seem to be installed in your system')
if (mcaindex == 0) and (interpretation in ["spectrum", None]):
#stack of images to be saved as stack of spectra
modify = True
shape = [data.shape[1], data.shape[2], data.shape[0]]
elif (mcaindex != 0) and (interpretation in ["image"]):
#stack of spectra to be saved as stack of images
modify = True
shape = [data.shape[2], data.shape[0], data.shape[1]]
else:
modify = False
shape = data.shape
if dtype is None:
dtype = data.dtype
if mode.lower() in ['nexus', 'nexus+']:
#raise IOError, 'NeXus data saving not implemented yet'
if os.path.exists(filename):
try:
os.remove(filename)
except:
raise IOError("Cannot overwrite existing file!")
hdf = openHDF5File(filename, 'a')
entryName = "data"
#entry
nxEntry = hdf.require_group(entryName)
if 'NX_class' not in nxEntry.attrs:
nxEntry.attrs['NX_class'] = 'NXentry'.encode('utf-8')
elif nxEntry.attrs['NX_class'] != 'NXentry'.encode('utf-8'):
#should I raise an error?
pass
nxEntry['title'] = "PyMca saved 3D Array".encode('utf-8')
nxEntry['start_time'] = getDate().encode('utf-8')
nxData = nxEntry.require_group('NXdata')
if ('NX_class' not in nxData.attrs):
nxData.attrs['NX_class'] = 'NXdata'.encode('utf-8')
elif nxData.attrs['NX_class'] != 'NXdata'.encode('utf-8'):
#should I raise an error?
pass
if modify:
if interpretation in ["image", "image".encode('utf-8')]:
if compression:
if DEBUG:
print("Saving compressed and chunked dataset")
#risk of taking a 10 % more space in disk
chunk1 = int(shape[1] / 10)
if chunk1 == 0:
chunk1 = shape[1]
for i in [11, 10, 8, 7, 5, 4]:
if (shape[1] % i) == 0:
chunk1 = int(shape[1] / i)
break
chunk2 = int(shape[2] / 10)
for i in [11, 10, 8, 7, 5, 4]:
if (shape[2] % i) == 0:
chunk2 = int(shape[2] / i)
break
dset = nxData.require_dataset('data',
shape=shape,
dtype=dtype,
chunks=(1, chunk1, chunk2),
compression=compression)
else:
if DEBUG:
print("Saving not compressed and not chunked dataset")
#print not compressed -> Not chunked
dset = nxData.require_dataset('data',
shape=shape,
dtype=dtype,
compression=None)
for i in range(data.shape[-1]):
tmp = data[:, :, i:i + 1]
tmp.shape = 1, shape[1], shape[2]
dset[i, 0:shape[1], :] = tmp
print("Saved item %d of %d" % (i + 1, data.shape[-1]))
elif 0:
#if I do not match the input and output shapes it takes ages
#to save the images as spectra. However, it is much faster
#when performing spectra operations.
dset = nxData.require_dataset('data',
shape=shape,
dtype=dtype,
chunks=(1, shape[1], shape[2]))
for i in range(data.shape[1]): # shape[0]
chunk = numpy.zeros((1, data.shape[2], data.shape[0]),
dtype)
for k in range(data.shape[0]): # shape[2]
if 0:
tmpData = data[k:k + 1]
for j in range(data.shape[2]): # shape[1]
tmpData.shape = data.shape[1], data.shape[2]
chunk[0, j, k] = tmpData[i, j]
else:
tmpData = data[k:k + 1, i, :]
tmpData.shape = -1
chunk[0, :, k] = tmpData
print("Saving item %d of %d" % (i, data.shape[1]))
dset[i, :, :] = chunk
else:
#if I do not match the input and output shapes it takes ages
#to save the images as spectra. This is a very fast saving, but
#the performance is awful when reading.
if compression:
if DEBUG:
print("Saving compressed and chunked dataset")
dset = nxData.require_dataset('data',
shape=shape,
dtype=dtype,
chunks=(shape[0], shape[1], 1),
compression=compression)
else:
if DEBUG:
print("Saving not compressed and not chunked dataset")
dset = nxData.require_dataset('data',
shape=shape,
dtype=dtype,
compression=None)
for i in range(data.shape[0]):
tmp = data[i:i + 1, :, :]
tmp.shape = shape[0], shape[1], 1
dset[:, :, i:i + 1] = tmp
else:
if compression:
if DEBUG:
print("Saving compressed and chunked dataset")
chunk1 = int(shape[1] / 10)
if chunk1 == 0:
chunk1 = shape[1]
for i in [11, 10, 8, 7, 5, 4]:
if (shape[1] % i) == 0:
chunk1 = int(shape[1] / i)
break
chunk2 = int(shape[2] / 10)
if chunk2 == 0:
chunk2 = shape[2]
for i in [11, 10, 8, 7, 5, 4]:
if (shape[2] % i) == 0:
chunk2 = int(shape[2] / i)
break
if DEBUG:
print("Used chunk size = (1, %d, %d)" % (chunk1, chunk2))
dset = nxData.require_dataset('data',
shape=shape,
dtype=dtype,
chunks=(1, chunk1, chunk2),
compression=compression)
else:
if DEBUG:
print("Saving not compressed and notchunked dataset")
dset = nxData.require_dataset('data',
shape=shape,
dtype=dtype,
compression=None)
tmpData = numpy.zeros((1, data.shape[1], data.shape[2]),
data.dtype)
for i in range(data.shape[0]):
tmpData[0:1] = data[i:i + 1]
dset[i:i + 1] = tmpData[0:1]
print("Saved item %d of %d" % (i + 1, data.shape[0]))
dset.attrs['signal'] = "1".encode('utf-8')
if interpretation is not None:
dset.attrs['interpretation'] = interpretation.encode('utf-8')
axesAttribute = []
for i in range(len(shape)):
if axes is None:
dim = numpy.arange(shape[i]).astype(numpy.float32)
dimlabel = 'dim_%d' % i
elif axes[i] is not None:
dim = axes[i]
try:
dimlabel = "%s" % labels[i]
except:
dimlabel = 'dim_%d' % i
else:
dim = numpy.arange(shape[i]).astype(numpy.float32)
dimlabel = 'dim_%d' % i
axesAttribute.append(dimlabel)
adset = nxData.require_dataset(dimlabel,
dim.shape,
dim.dtype,
compression=None)
adset[:] = dim[:]
adset.attrs['axis'] = i + 1
dset.attrs['axes'] = (":".join(axesAttribute)).encode('utf-8')
nxEntry['end_time'] = getDate().encode('utf-8')
if mode.lower() == 'nexus+':
#create link
g = h5py.h5g.open(hdf.fid, '/'.encode('utf-8'))
g.link('/data/NXdata/data'.encode('utf-8'),
'/data/data'.encode('utf-8'),
h5py.h5g.LINK_HARD)
elif mode.lower() == 'simplest':
if os.path.exists(filename):
try:
os.remove(filename)
except:
raise IOError("Cannot overwrite existing file!")
hdf = h5py.File(filename, 'a')
if compression:
hdf.require_dataset('data',
shape=shape,
dtype=dtype,
data=data,
chunks=(1, shape[1], shape[2]),
compression=compression)
else:
hdf.require_dataset('data',
shape=shape,
data=data,
dtype=dtype,
compression=None)
else:
if os.path.exists(filename):
try:
os.remove(filename)
except:
raise IOError("Cannot overwrite existing file!")
shape = data.shape
dtype = data.dtype
hdf = h5py.File(filename, 'a')
dataGroup = hdf.require_group('data')
dataGroup.require_dataset('data',
shape=shape,
dtype=dtype,
data=data,
chunks=(1, shape[1], shape[2]))
hdf.flush()
hdf.close()
def main():
a = numpy.arange(1000000.)
a.shape = 20, 50, 1000
save3DArrayAsHDF5(a, '/test.h5', mode='nexus+', interpretation='image')
getHDF5FileInstanceAndBuffer('/test2.h5', (100, 100, 100))
print("Date String = ", getDate())
if __name__ == "__main__":
main()
|
from Screen import Screen
from Screens.ChoiceBox import ChoiceBox
class ResolutionSelection(Screen):
def __init__(self, session, infobar=None):
Screen.__init__(self, session)
self.session = session
xresString = open("/proc/stb/vmpeg/0/xres", "r").read()
yresString = open("/proc/stb/vmpeg/0/yres", "r").read()
fpsString = open("/proc/stb/vmpeg/0/framerate", "r").read()
xres = int(xresString, 16)
yres = int(yresString, 16)
fps = int(fpsString, 16)
fpsFloat = float(fps)
fpsFloat = fpsFloat/1000
selection = 0
tlist = []
tlist.append((_("Exit"), "exit"))
tlist.append((_("Auto(not available)"), "auto"))
tlist.append(("Video: " + str(xres) + "x" + str(yres) + "@" + str(fpsFloat) + "hz", ""))
tlist.append(("--", ""))
tlist.append(("576i", "576i50"))
tlist.append(("576p", "576p50"))
tlist.append(("720p", "720p50"))
tlist.append(("1080i", "1080i50"))
tlist.append(("1080p@23.976hz", "1080p23"))
tlist.append(("1080p@24hz", "1080p24"))
tlist.append(("1080p@25hz", "1080p25"))
keys = ["green", "yellow", "blue", "", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9" ]
mode = open("/proc/stb/video/videomode").read()[:-1]
print mode
for x in range(len(tlist)):
if tlist[x][1] == mode:
selection = x
self.session.openWithCallback(self.ResolutionSelected, ChoiceBox, title=_("Please select a resolution..."), list = tlist, selection = selection, keys = keys)
#return
def ResolutionSelected(self, Resolution):
if not Resolution is None:
if isinstance(Resolution[1], str):
if Resolution[1] == "exit":
self.ExGreen_toggleGreen()
elif Resolution[1] != "auto":
open("/proc/stb/video/videomode", "w").write(Resolution[1])
from enigma import gFBDC
gFBDC.getInstance().setResolution(-1, -1)
self.ExGreen_toggleGreen()
return
|
from time import sleep
import os
import RPi.GPIO as GPIO
import subprocess
import datetime
GPIO.setmode(GPIO.BCM)
GPIO.setup(24, GPIO.IN)
count = 0
up = False
down = False
command = ""
filename = ""
index = 0
camera_pause = "500"
def takepic(imageName):
print("picture")
command = "sudo raspistill -o " + imageName + " -q 100 -t " + camera_pause
print(command)
os.system(command)
while(True):
if(up==True):
if(GPIO.input(24)==False):
now = datetime.datetime.now()
timeString = now.strftime("%Y-%m-%d_%H%M%S")
filename = "photo-"+timeString+".jpg"
takepic(filename)
subprocess.call(['./processImage.sh', filename, '&'])
up = GPIO.input(24)
count = count+1
sleep(.1)
print "done"
|
class City(object):
def __init__(self, name):
self.name = name
def Name(self):
return self.name
class Home(object):
def __init__(self, name, city):
self.name = name
self.city = city
def Name(self):
return self.name
def City(self):
return self.city
class Person(object):
def __init__(self, name, home):
self.name = name
self.home = home
def Name(self):
return self.name
def Home(self):
return self.home
city = City('Karlstad')
home = Home('Nilssons hemmet', city)
person = Person('Nils', home)
print '%s bor i %s som ligger i staden %s.' % (person.Name(), person.Home().Name(), person.Home().City().Name())
|
import P1011
import unittest
class test_phylab(unittest.TestCase):
def testSteelWire_1(self):
m = [10.000,12.000,14.000,16.000,18.000,20.000,22.000,24.000,26.00]
C_plus = [3.50, 3.81, 4.10, 4.40, 4.69, 4.98, 5.28, 5.59, 5.89]
C_sub = [3.52, 3.80, 4.08, 4.38, 4.70, 4.99, 5.30, 5.59, 5.89]
D = [0.789, 0.788, 0.788, 0.787, 0.788]
L = 38.9
H = 77.0
b = 8.50
res = P1011.SteelWire(m, C_plus, C_sub, D, L, H, b)
self.assertEqual(res,'(1.90\\pm0.04){\\times}10^{11}',"test SteelWire fail")
def testInertia_1(self):
m = [711.77, 711.82, 1242.30, 131.76, 241.56,238.38]
d = [99.95, 99.95, 93.85, 114.60, 610.00]
T = [[4.06, 4.06, 4.07, 4.06, 4.06], [6.57, 6.57, 6.57, 6.56, 6.57],
[8.16, 8.16, 8.17, 8.17, 8.17], [7.35, 7.35, 7.33, 7.35, 7.37],
[11.40, 11.40, 11.41, 11.41, 11.41]]
l = [34.92, 6.02, 33.05]
T2 = [[13.07,13.07,13.07,13.07,13.06],[16.86,16.86,16.88,16.87,16.88],
[21.79,21.82,21.83,21.84,21.84],[27.28,27.28,27.29,27.27,27.27],
[32.96,32.96,32.96,32.97,32.96]]
res = P1011.Inertia(m, d, T, l, T2)
x = 1
if(abs(res[0] - 0.9999989) > pow(10,-7)):
x = 0
if(abs(res[1] - 610.9)/610.9 > 0.001):
x = 0
self.assertEqual(x,1,"test Inertia fail")
if __name__ =='__main__':
unittest.main()
|
from .extractor_crossplatform import CrossPlatformFileSystemExtractor
from .extractor_epub import EpubMetadataExtractor
from .extractor_exiftool import ExiftoolMetadataExtractor
from .extractor_filetags import FiletagsMetadataExtractor
from .extractor_guessit import GuessitMetadataExtractor
from .extractor_jpeginfo import JpeginfoMetadataExtractor
from .extractor_pandoc import PandocMetadataExtractor
|
import unittest
class FooTest(unittest.TestCase):
'''Sample test case -- FooTest()'''
def setUp(self):
'''Set up for testing...'''
print 'FooTest:setUp_:begin'
testName = self.shortDescription()
if (testName == 'Test routine A'):
print 'setting up for test A'
elif (testName == 'Test routine B'):
print 'setting up for test B'
else:
print 'UNKNOWN TEST ROUTINE'
print 'FooTest:setUp_:end'
def testA(self):
'''Test routine A'''
print 'FooTest: running testA...'
def testB(self):
'''Test routine B'''
print 'FooTest: running testB...'
def tearDown(self):
'''Tear down from testing...'''
print 'FooTest:tearDown_:begin'
testName = self.shortDescription()
if (testName == 'Test routine A'):
print 'cleaning up after test A'
elif (testName == 'Test routine B'):
print 'cleaning up after test B'
else:
print 'UNKNOWN TEST ROUTINE'
print 'FooTest:tearDown_:end'
class BarTest(unittest.TestCase):
'''Sample test case -- BarTest()'''
def setUp(self):
'''Set up for testing...'''
print 'BarTest:setUp_:begin'
testName = self.shortDescription()
if (testName == 'Test routine A'):
print 'setting up for test A'
elif (testName == 'Test routine B'):
print 'setting up for test B'
else:
print 'UNKNOWN TEST ROUTINE'
print 'BarTest:setUp_:end'
def testA(self):
'''Test routine A'''
print 'BarTest: running testA...'
def testB(self):
'''Test routine B'''
print 'BarTest: running testB...'
def tearDown(self):
'''Tear down from testing...'''
print 'BarTest:tearDown_:begin'
testName = self.shortDescription()
if (testName == 'Test routine A'):
print 'cleaning up after test A'
elif (testName == 'Test routine B'):
print 'cleaning up after test B'
else:
print 'UNKNOWN TEST ROUTINE'
print 'BarTest:tearDown_:end'
if __name__ == '__main__':
unittest.main()
|
import webapp2
import time
import dbcontroller as dc
import speak
import User
import logging
class MainHandler(webapp2.RequestHandler):
def get(self):
list = dc.refresh()
lines = speak.speak(list)
import twitter
for user in User.users:
for i in lines:
str1 = i
logging.log(logging.INFO, u"twitter length is " + \
str(len(str1)))
try:
twitter.sendMessage(str1)
except:
logging.log(logging.WARNING, u"twitter send fail:" + str1)
return self.response.out.write('ok')
app = webapp2.WSGIApplication([
('/whyisme', MainHandler)
], debug=True)
|
__author__ = 'bruno'
import unittest
import algorithms.math.abacus as Abacus
class TestAbacus(unittest.TestCase):
def setUp(self):
pass
def test_abacus1(self):
abacus = Abacus.generate_abacus(0)
self.assertEqual(['|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |'], abacus)
def test_abacus2(self):
abacus = Abacus.generate_abacus(8)
self.assertEqual(['|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00 000*****|'], abacus)
def test_abacus3(self):
abacus = Abacus.generate_abacus(32)
self.assertEqual(['|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000** ***|',
'|00000*** **|'], abacus)
def test_abacus4(self):
abacus = Abacus.generate_abacus(147)
self.assertEqual(['|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000**** *|',
'|00000* ****|',
'|000 00*****|'], abacus)
def test_abacus5(self):
abacus = Abacus.generate_abacus(986)
self.assertEqual(['|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|0 0000*****|',
'|00 000*****|',
'|0000 0*****|'], abacus)
def test_abacus6(self):
abacus = Abacus.generate_abacus(5821)
self.assertEqual(['|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000 *****|',
'|00 000*****|',
'|00000*** **|',
'|00000**** *|'], abacus)
def test_abacus7(self):
abacus = Abacus.generate_abacus(1234)
self.assertEqual(['|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000**** *|',
'|00000*** **|',
'|00000** ***|',
'|00000* ****|'], abacus)
def test_abacus8(self):
abacus = Abacus.generate_abacus(999)
self.assertEqual(['|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|0 0000*****|',
'|0 0000*****|',
'|0 0000*****|'], abacus)
def test_abacus9(self):
abacus = Abacus.generate_abacus(13)
self.assertEqual(['|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000**** *|',
'|00000** ***|'], abacus)
def test_abacus10(self):
abacus = Abacus.generate_abacus(49)
self.assertEqual(['|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000* ****|',
'|0 0000*****|'], abacus)
|
"""
Package represents the collection of resources the user is editing
i.e. the "package".
"""
import datetime
import shutil
import logging
import time
import zipfile
import uuid
import re
from xml.dom import minidom
from exe.engine.path import Path, TempDirPath, toUnicode
from exe.engine.node import Node
from exe.engine.genericidevice import GenericIdevice
from exe.engine.multichoiceidevice import MultichoiceIdevice
from exe.engine.quiztestidevice import QuizTestIdevice
from exe.engine.truefalseidevice import TrueFalseIdevice
from exe.engine.wikipediaidevice import WikipediaIdevice
from exe.engine.casestudyidevice import CasestudyIdevice
from exe.engine.casopracticofpdidevice import CasopracticofpdIdevice
from exe.engine.citasparapensarfpdidevice import CitasparapensarfpdIdevice
from exe.engine.clozefpdidevice import ClozefpdIdevice
from exe.engine.clozeidevice import ClozeIdevice
from exe.engine.clozelangfpdidevice import ClozelangfpdIdevice
from exe.engine.debesconocerfpdidevice import DebesconocerfpdIdevice
from exe.engine.destacadofpdidevice import DestacadofpdIdevice
from exe.engine.ejercicioresueltofpdidevice import EjercicioresueltofpdIdevice
from exe.engine.eleccionmultiplefpdidevice import EleccionmultiplefpdIdevice
from exe.engine.freetextfpdidevice import FreeTextfpdIdevice
from exe.engine.galleryidevice import GalleryIdevice
from exe.engine.imagemagnifieridevice import ImageMagnifierIdevice
from exe.engine.listaidevice import ListaIdevice
from exe.engine.multiselectidevice import MultiSelectIdevice
from exe.engine.orientacionesalumnadofpdidevice import OrientacionesalumnadofpdIdevice
from exe.engine.orientacionestutoriafpdidevice import OrientacionestutoriafpdIdevice
from exe.engine.parasabermasfpdidevice import ParasabermasfpdIdevice
from exe.engine.recomendacionfpdidevice import RecomendacionfpdIdevice
from exe.engine.reflectionfpdidevice import ReflectionfpdIdevice
from exe.engine.reflectionfpdmodifidevice import ReflectionfpdmodifIdevice
from exe.engine.reflectionidevice import ReflectionIdevice
from exe.engine.seleccionmultiplefpdidevice import SeleccionmultiplefpdIdevice
from exe.engine.verdaderofalsofpdidevice import VerdaderofalsofpdIdevice
from exe.engine.persist import Persistable, encodeObject, decodeObjectRaw
from exe import globals as G
from exe.engine.resource import Resource
from twisted.persisted.styles import doUpgrade
from twisted.spread.jelly import Jellyable, Unjellyable
from exe.engine.beautifulsoup import BeautifulSoup
from exe.engine.field import Field, TextAreaField
from exe.engine.persistxml import encodeObjectToXML, decodeObjectFromXML
from exe.engine.lom import lomsubs
from exe.engine.checker import Checker
from exe.webui import common
log = logging.getLogger(__name__)
def clonePrototypeIdevice(title):
idevice = None
for prototype in G.application.ideviceStore.getIdevices():
if prototype.get_title() == title:
log.debug('have prototype of:' + prototype.get_title())
idevice = prototype.clone()
idevice.edit = False
break
return idevice
def burstIdevice(idev_type, i, node):
# given the iDevice type and the BeautifulSoup fragment i, burst it:
idevice = clonePrototypeIdevice(idev_type)
if idevice is None:
log.warn("unable to clone " + idev_type + " idevice")
freetext_idevice = clonePrototypeIdevice('Free Text')
if freetext_idevice is None:
log.error("unable to clone Free Text for " + idev_type
+ " idevice")
return
idevice = freetext_idevice
# For idevices such as GalleryImage, where resources are being attached,
# the idevice should already be attached to a node before bursting it open:
node.addIdevice(idevice)
idevice.burstHTML(i)
return idevice
def loadNodesIdevices(node, s):
soup = BeautifulSoup(s)
body = soup.find('body')
if body:
idevices = body.findAll(name='div',
attrs={'class' : re.compile('Idevice$') })
if len(idevices) > 0:
for i in idevices:
# WARNING: none of the idevices yet re-attach their media,
# but they do attempt to re-attach images and other links.
if i.attrMap['class']=="activityIdevice":
idevice = burstIdevice('Activity', i, node)
elif i.attrMap['class']=="objectivesIdevice":
idevice = burstIdevice('Objectives', i, node)
elif i.attrMap['class']=="preknowledgeIdevice":
idevice = burstIdevice('Preknowledge', i, node)
elif i.attrMap['class']=="readingIdevice":
idevice = burstIdevice('Reading Activity', i, node)
# the above are all Generic iDevices;
# below are all others:
elif i.attrMap['class']=="RssIdevice":
idevice = burstIdevice('RSS', i, node)
elif i.attrMap['class']=="WikipediaIdevice":
# WARNING: Wiki problems loading images with accents, etc:
idevice = burstIdevice('Wiki Article', i, node)
elif i.attrMap['class']=="ReflectionIdevice":
idevice = burstIdevice('Reflection', i, node)
elif i.attrMap['class']=="GalleryIdevice":
# WARNING: Gallery problems with the popup html:
idevice = burstIdevice('Image Gallery', i, node)
elif i.attrMap['class']=="ImageMagnifierIdevice":
# WARNING: Magnifier missing major bursting components:
idevice = burstIdevice('Image Magnifier', i, node)
elif i.attrMap['class']=="AppletIdevice":
# WARNING: Applet missing file bursting components:
idevice = burstIdevice('Java Applet', i, node)
elif i.attrMap['class']=="ExternalUrlIdevice":
idevice = burstIdevice('External Web Site', i, node)
elif i.attrMap['class']=="ClozeIdevice":
idevice = burstIdevice('Cloze Activity', i, node)
elif i.attrMap['class']=="FreeTextIdevice":
idevice = burstIdevice('Free Text', i, node)
elif i.attrMap['class']=="CasestudyIdevice":
idevice = burstIdevice('Case Study', i, node)
elif i.attrMap['class']=="MultichoiceIdevice":
idevice = burstIdevice('Multi-choice', i, node)
elif i.attrMap['class']=="MultiSelectIdevice":
idevice = burstIdevice('Multi-select', i, node)
elif i.attrMap['class']=="QuizTestIdevice":
idevice = burstIdevice('SCORM Quiz', i, node)
elif i.attrMap['class']=="TrueFalseIdevice":
idevice = burstIdevice('True-False Question', i, node)
else:
# NOTE: no custom idevices burst yet,
# nor any deprecated idevices. Just burst into a FreeText:
log.warn("unburstable idevice " + i.attrMap['class'] +
"; bursting into Free Text")
idevice = burstIdevice('Free Text', i, node)
else:
# no idevices listed on this page,
# just create a free-text for the entire page:
log.warn("no idevices found on this node, bursting into Free Text.")
idevice = burstIdevice('Free Text', i, node)
else:
log.warn("unable to read the body of this node.")
def test_for_node(html_content):
# to see if this html really is an exe-generated node
exe_string = u"<!-- Created using eXe: http://exelearning.org -->"
if html_content.decode('utf-8').find(exe_string) >= 0:
return True
else:
return False
def loadNode(pass_num, resourceDir, zippedFile, node, doc, item, level):
# populate this node
# 1st pass = merely unzipping all resources such that they are available,
# 2nd pass = loading the actual node idevices.
titles = item.getElementsByTagName('title')
node.setTitle(titles[0].firstChild.data)
node_resource = item.attributes['identifierref'].value
log.debug('*' * level + ' ' + titles[0].firstChild.data + '->' + item.attributes['identifierref'].value)
for resource in doc.getElementsByTagName('resource'):
if resource.attributes['identifier'].value == node_resource:
for file in resource.childNodes:
if file.nodeName == 'file':
filename = file.attributes['href'].value
is_exe_node_html = False
if filename.endswith('.html') \
and filename != "fdl.html" \
and not filename.startswith("galleryPopup"):
# fdl.html is the wikipedia license, ignore it
# as well as any galleryPopups:
is_exe_node_html = \
test_for_node(zippedFile.read(filename))
if is_exe_node_html:
if pass_num == 1:
# 2nd pass call to actually load the nodes:
log.debug('loading idevices from node: ' + filename)
loadNodesIdevices(node, zippedFile.read(filename))
elif filename == "fdl.html" or \
filename.startswith("galleryPopup."):
# let these be re-created upon bursting.
if pass_num == 0:
# 1st pass call to unzip the resources:
log.debug('ignoring resource file: '+ filename)
else:
if pass_num == 0:
# 1st pass call to unzip the resources:
try:
zipinfo = zippedFile.getinfo(filename)
log.debug('unzipping resource file: '
+ resourceDir/filename )
outFile = open(resourceDir/filename, "wb")
outFile.write(zippedFile.read(filename))
outFile.flush()
outFile.close()
except:
log.warn('error unzipping resource file: '
+ resourceDir/filename )
##########
# WARNING: the resource is now in the resourceDir,
# BUT it is NOT YET added into any of the project,
# much less to the specific idevices or fields!
# Although they WILL be saved out with the project
# upon the next Save.
##########
break
# process this node's children
for subitem in item.childNodes:
if subitem.nodeName == 'item':
# for the first pass, of unzipping only, do not
# create any child nodes, just cruise on with this one:
next_node = node
if pass_num == 1:
# if this is actually loading the nodes:
next_node = node.createChild()
loadNode(pass_num, resourceDir, zippedFile, next_node,
doc, subitem, level+1)
def loadCC(zippedFile, filename):
"""
Load an IMS Common Cartridge or Content Package from filename
"""
package = Package(Path(filename).namebase)
xmldoc = minidom.parseString( zippedFile.read('imsmanifest.xml'))
organizations_list = xmldoc.getElementsByTagName('organizations')
level = 0
# now a two-pass system to first unzip all applicable resources:
for pass_num in range(2):
for organizations in organizations_list:
organization_list = organizations.getElementsByTagName(
'organization')
for organization in organization_list:
for item in organization.childNodes:
if item.nodeName == 'item':
loadNode(pass_num, package.resourceDir, zippedFile,
package.root, xmldoc, item, level)
return package
class DublinCore(Jellyable, Unjellyable):
"""
Holds dublin core info
"""
def __init__(self):
self.title = ''
self.creator = ''
self.subject = ''
self.description = ''
self.publisher = ''
self.contributors = ''
self.date = ''
self.type = ''
self.format = ''
self.identifier = str(uuid.uuid4())
self.source = ''
self.language = ''
self.relation = ''
self.coverage = ''
self.rights = ''
def __setattr__(self, name, value):
self.__dict__[name] = toUnicode(value)
class Package(Persistable):
"""
Package represents the collection of resources the user is editing
i.e. the "package".
"""
persistenceVersion = 13
nonpersistant = ['resourceDir', 'filename', 'previewDir']
# Name is used in filenames and urls (saving and navigating)
_name = ''
tempFile = False # This is set when the package is saved as a temp copy file
# Title is rendered in exports
_title = ''
_author = ''
_description = ''
_backgroundImg = ''
#styledefault=u"INTEF"
# This is like a constant
defaultLevelNames = [x_(u"Topic"), x_(u"Section"), x_(u"Unit")]
def __init__(self, name):
"""
Initialize
"""
log.debug(u"init " + repr(name))
self._nextIdeviceId = 0
self._nextNodeId = 0
# For looking up nodes by ids
self._nodeIdDict = {}
self._levelNames = self.defaultLevelNames[:]
self.name = name
self._title = u''
self._backgroundImg = u''
self.backgroundImgTile = False
# Empty if never saved/loaded
self.filename = u''
self.root = Node(self, None, _(u"Home"))
self.currentNode = self.root
#self.styledefault=u"INTEF"
self.style = G.application.config.defaultStyle
self._isChanged = False
self.previewDir = None
self.idevices = []
self.dublinCore = DublinCore()
self._lang = G.application.config.locale.split('_')[0]
self.setLomDefaults()
self.setLomEsDefaults()
self.scolinks = False
self.scowsinglepage= False
self.scowwebsite = False
self.exportSource = True
self.exportMetadataType = "LOMES"
self.license = u''
self.footer = ""
self._objectives = u''
self._preknowledge = u''
self._learningResourceType = u''
self._intendedEndUserRoleType = u''
self._intendedEndUserRoleGroup = False
self._intendedEndUserRoleTutor = False
self._contextPlace = u''
self._contextMode = u''
self.compatibleWithVersion9 = False
#for export to Sugar (e.g. OLPC)
self.sugaractivityname = ""
self.sugarservicename = ""
#for export to Ustad Mobile
self.mxmlprofilelist = ""
self.mxmlheight = ""
self.mxmlwidth = ""
self.mxmlforcemediaonly = False
# Temporary directory to hold resources in
self.resourceDir = TempDirPath()
self.resources = {} # Checksum-[_Resource(),..]
self._docType = G.application.config.docType
def setLomDefaults(self):
self.lom = lomsubs.lomSub.factory()
self.lom.addChilds(self.lomDefaults(self.dublinCore.identifier, 'LOMv1.0'))
def setLomEsDefaults(self):
self.lomEs = lomsubs.lomSub.factory()
self.lomEs.addChilds(self.lomDefaults(self.dublinCore.identifier, 'LOM-ESv1.0', True))
# Property Handlers
def set_docType(self,value):
self._docType = toUnicode(value)
common.setExportDocType(value)
def set_name(self, value):
self._name = toUnicode(value)
def set_title(self, value):
if self.dublinCore.title == self._title:
self.dublinCore.title = value
lang_str = self.lang.encode('utf-8')
value_str = value.encode('utf-8')
for metadata in [self.lom, self.lomEs]:
title = metadata.get_general().get_title()
if title:
found = False
for string in title.get_string():
if string.get_valueOf_() == self._title.encode('utf-8'):
found = True
if value:
string.set_language(lang_str)
string.set_valueOf_(value_str)
else:
title.string.remove(string)
if not found:
if value:
title.add_string(lomsubs.LangStringSub(lang_str, value_str))
else:
if value:
title = lomsubs.titleSub([lomsubs.LangStringSub(lang_str, value_str)])
metadata.get_general().set_title(title)
self._title = toUnicode(value)
def set_lang(self, value):
if self.dublinCore.language in [self._lang, '']:
self.dublinCore.language = value
value_str = value.encode('utf-8')
for metadata in [self.lom, self.lomEs]:
language = metadata.get_general().get_language()
if language:
for LanguageId in language:
if LanguageId.get_valueOf_() == self._lang.encode('utf-8'):
LanguageId.set_valueOf_(value_str)
else:
language = [lomsubs.LanguageIdSub(value_str)]
metadata.get_general().set_language(language)
metametadata = metadata.get_metaMetadata()
if metametadata:
language = metametadata.get_language()
if language:
if language.get_valueOf_() == self._lang.encode('utf-8'):
language.set_valueOf_(value_str)
else:
language = lomsubs.LanguageIdSub(value_str)
metametadata.set_language(language)
else:
language = lomsubs.LanguageIdSub(value_str)
metametadata = lomsubs.metaMetadataSub(language=language)
metadata.set_metaMetadata(metametadata)
educationals = metadata.get_educational()
if educationals:
for educational in educationals:
language = educational.get_language()
if language:
for LanguageId in language:
if LanguageId.get_valueOf_() == self._lang.encode('utf-8'):
LanguageId.set_valueOf_(value_str)
else:
language = lomsubs.LanguageIdSub(value_str)
educational = [lomsubs.educationalSub(language=[language])]
metadata.set_educational(educational)
self._lang = toUnicode(value)
if value in G.application.config.locales:
__builtins__['c_'] = lambda s: G.application.config.locales[value].ugettext(s) if s else s
def set_author(self, value):
if self.dublinCore.creator == self._author:
self.dublinCore.creator = value
value_str = value.encode('utf-8')
vcard = 'BEGIN:VCARD VERSION:3.0 FN:%s EMAIL;TYPE=INTERNET: ORG: END:VCARD'
for metadata, source in [(self.lom, 'LOMv1.0'), (self.lomEs, 'LOM-ESv1.0')]:
src = lomsubs.sourceValueSub()
src.set_valueOf_(source)
src.set_uniqueElementName('source')
val = lomsubs.roleValueSub()
val.set_valueOf_('author')
val.set_uniqueElementName('value')
role = lomsubs.roleSub()
role.set_source(src)
role.set_value(val)
role.set_uniqueElementName('role')
entity = lomsubs.entitySub(vcard % value_str)
dateTime = lomsubs.DateTimeValueSub()
dateTime.set_valueOf_(datetime.datetime.now().strftime('%Y-%m-%d'))
dateTime.set_uniqueElementName('dateTime')
lang_str = self.lang.encode('utf-8')
value_meta_str = c_(u'Metadata creation date').encode('utf-8')
dateDescription = lomsubs.LanguageStringSub([lomsubs.LangStringSub(lang_str, value_meta_str)])
date = lomsubs.dateSub(dateTime, dateDescription)
lifeCycle = metadata.get_lifeCycle()
if lifeCycle:
contributes = lifeCycle.get_contribute()
found = False
for contribute in contributes:
entitys = contribute.get_entity()
rol = contribute.get_role()
if rol:
rolval = rol.get_value()
if rolval:
if rolval.get_valueOf_() == 'author':
for ent in entitys:
if ent.get_valueOf_() == vcard % self.author.encode('utf-8'):
found = True
if value:
ent.set_valueOf_(vcard % value_str)
else:
contribute.entity.remove(ent)
if not contribute.entity:
contributes.remove(contribute)
if not found:
contribute = lomsubs.contributeSub(role, [entity], date)
lifeCycle.add_contribute(contribute)
else:
if value:
contribute = lomsubs.contributeSub(role, [entity], date)
lifeCycle = lomsubs.lifeCycleSub(contribute=[contribute])
metadata.set_lifeCycle(lifeCycle)
val = lomsubs.roleValueSub()
val.set_valueOf_('creator')
val.set_uniqueElementName('value')
role = lomsubs.roleSub()
role.set_source(src)
role.set_value(val)
role.set_uniqueElementName('role')
metaMetadata = metadata.get_metaMetadata()
if metaMetadata:
contributes = metaMetadata.get_contribute()
found = False
for contribute in contributes:
entitys = contribute.get_entity()
rol = contribute.get_role()
if rol:
rolval = rol.get_value()
if rolval:
if rolval.get_valueOf_() == 'creator':
for ent in entitys:
if ent.get_valueOf_() == vcard % self.author.encode('utf-8'):
found = True
if value:
ent.set_valueOf_(vcard % value_str)
else:
contribute.entity.remove(ent)
if not contribute.entity:
contributes.remove(contribute)
if not found:
contribute = lomsubs.contributeMetaSub(role, [entity], date)
metaMetadata.add_contribute(contribute)
else:
if value:
contribute = lomsubs.contributeMetaSub(role, [entity], date)
metaMetadata.set_contribute([contribute])
self._author = toUnicode(value)
def set_description(self, value):
if self.dublinCore.description == self._description:
self.dublinCore.description = value
lang_str = self.lang.encode('utf-8')
value_str = value.encode('utf-8')
for metadata in [self.lom, self.lomEs]:
description = metadata.get_general().get_description()
if description:
description_found = False
for desc in description:
for string in desc.get_string():
if string.get_valueOf_() == self._description.encode('utf-8'):
description_found = True
if value:
string.set_language(lang_str)
string.set_valueOf_(value_str)
else:
desc.string.remove(string)
description.remove(desc)
if not description_found:
if value:
description = lomsubs.descriptionSub([lomsubs.LangStringSub(lang_str, value_str)])
metadata.get_general().add_description(description)
else:
if value:
description = [lomsubs.descriptionSub([lomsubs.LangStringSub(lang_str, value_str)])]
metadata.get_general().set_description(description)
self._description = toUnicode(value)
def get_backgroundImg(self):
"""Get the background image for this package"""
if self._backgroundImg:
return "file://" + self._backgroundImg.path
else:
return ""
def set_backgroundImg(self, value):
"""Set the background image for this package"""
if self._backgroundImg:
self._backgroundImg.delete()
if value:
if value.startswith("file://"):
value = value[7:]
imgFile = Path(value)
self._backgroundImg = Resource(self, Path(imgFile))
else:
self._backgroundImg = u''
def get_level1(self):
return self.levelName(0)
def set_level1(self, value):
if value != '':
self._levelNames[0] = value
else:
self._levelNames[0] = self.defaultLevelNames[0]
def get_level2(self):
return self.levelName(1)
def set_level2(self, value):
if value != '':
self._levelNames[1] = value
else:
self._levelNames[1] = self.defaultLevelNames[1]
def get_level3(self):
return self.levelName(2)
def set_level3(self, value):
if value != '':
self._levelNames[2] = value
else:
self._levelNames[2] = self.defaultLevelNames[2]
def set_objectives(self, value):
lang_str = self.lang.encode('utf-8')
value_str = c_("Objectives").upper() + ": " + value.encode('utf-8')
for metadata in [self.lom, self.lomEs]:
educationals = metadata.get_educational()
description = lomsubs.descriptionSub([lomsubs.LangStringSub(lang_str, value_str)])
if educationals:
for educational in educationals:
descriptions = educational.get_description()
found = False
if descriptions:
for desc in descriptions:
for string in desc.get_string():
if string.get_valueOf_() == c_("Objectives").upper() + ": " + self._objectives.encode('utf-8'):
found = True
if value:
string.set_language(lang_str)
string.set_valueOf_(value_str)
else:
desc.string.remove(string)
descriptions.remove(desc)
if not found:
if value:
educational.add_description(description)
else:
if value:
educational = [lomsubs.educationalSub(description=[description])]
metadata.set_educational(educational)
self._objectives = toUnicode(value)
def set_preknowledge(self, value):
lang_str = self.lang.encode('utf-8')
value_str = c_("Preknowledge").upper() + ": " + value.encode('utf-8')
for metadata in [self.lom, self.lomEs]:
educationals = metadata.get_educational()
description = lomsubs.descriptionSub([lomsubs.LangStringSub(lang_str, value_str)])
if educationals:
for educational in educationals:
descriptions = educational.get_description()
found = False
if descriptions:
for desc in descriptions:
for string in desc.get_string():
if string.get_valueOf_() == c_("Preknowledge").upper() + ": " + self._preknowledge.encode('utf-8'):
found = True
if value:
string.set_language(lang_str)
string.set_valueOf_(value_str)
else:
desc.string.remove(string)
descriptions.remove(desc)
if not found:
if value:
educational.add_description(description)
else:
if value:
educational = [lomsubs.educationalSub(description=[description])]
metadata.set_educational(educational)
self._preknowledge = toUnicode(value)
def license_map(self, source, value):
'''From document "ANEXO XIII ANÁLISIS DE MAPEABILIDAD LOM/LOM-ES V1.0"'''
if source == 'LOM-ESv1.0':
return value
elif source == 'LOMv1.0':
if value == 'not appropriate' or value == 'public domain':
return 'no'
else:
return 'yes'
def set_license(self, value):
value_str = value.rstrip(' 0123456789.').encode('utf-8')
if self.dublinCore.rights == self.license:
self.dublinCore.rights = value
for metadata, source in [(self.lom, 'LOMv1.0'), (self.lomEs, 'LOM-ESv1.0')]:
rights = metadata.get_rights()
if not rights:
metadata.set_rights(lomsubs.rightsSub())
copyrightAndOtherRestrictions = metadata.get_rights().get_copyrightAndOtherRestrictions()
if copyrightAndOtherRestrictions:
if copyrightAndOtherRestrictions.get_value().get_valueOf_() == self.license_map(source, self.license.encode('utf-8').rstrip(' 0123456789.')):
if value:
copyrightAndOtherRestrictions.get_value().set_valueOf_(self.license_map(source, value_str))
else:
metadata.get_rights().set_copyrightAndOtherRestrictions(None)
else:
if value:
src = lomsubs.sourceValueSub()
src.set_valueOf_(source)
src.set_uniqueElementName('source')
val = lomsubs.copyrightAndOtherRestrictionsValueSub()
val.set_valueOf_(self.license_map(source, value_str))
val.set_uniqueElementName('value')
copyrightAndOtherRestrictions = lomsubs.copyrightAndOtherRestrictionsSub()
copyrightAndOtherRestrictions.set_source(src)
copyrightAndOtherRestrictions.set_value(val)
copyrightAndOtherRestrictions.set_uniqueElementName('copyrightAndOtherRestrictions')
metadata.get_rights().set_copyrightAndOtherRestrictions(copyrightAndOtherRestrictions)
self.license = toUnicode(value)
def learningResourceType_map(self, source, value):
'''From document "ANEXO XIII ANÁLISIS DE MAPEABILIDAD LOM/LOM-ES V1.0"'''
if source == 'LOM-ESv1.0':
return value
elif source == 'LOMv1.0':
lomMap = {
"guided reading": "narrative text",
"master class": "lecture",
"textual-image analysis": "exercise",
"discussion activity": "problem statement",
"closed exercise or problem": "exercise",
"contextualized case problem": "exercise",
"open problem": "problem statement",
"real or virtual learning environment": "simulation",
"didactic game": "exercise",
"webquest": "problem statement",
"experiment": "experiment",
"real project": "simulation",
"simulation": "simulation",
"questionnaire": "questionnaire",
"exam": "exam",
"self assessment": "self assessment",
"": ""
}
return lomMap[value]
def set_learningResourceType(self, value):
value_str = value.encode('utf-8')
for metadata, source in [(self.lom, 'LOMv1.0'), (self.lomEs, 'LOM-ESv1.0')]:
educationals = metadata.get_educational()
src = lomsubs.sourceValueSub()
src.set_valueOf_(source)
src.set_uniqueElementName('source')
val = lomsubs.learningResourceTypeValueSub()
val.set_valueOf_(self.learningResourceType_map(source, value_str))
val.set_uniqueElementName('value')
learningResourceType = lomsubs.learningResourceTypeSub(self.learningResourceType_map(source, value_str))
learningResourceType.set_source(src)
learningResourceType.set_value(val)
if educationals:
for educational in educationals:
learningResourceTypes = educational.get_learningResourceType()
found = False
if learningResourceTypes:
for i in learningResourceTypes:
if i.get_value().get_valueOf_() == self.learningResourceType_map(source, self.learningResourceType.encode('utf-8')):
found = True
index = learningResourceTypes.index(i)
if value:
educational.insert_learningResourceType(index, learningResourceType)
else:
learningResourceTypes.pop(index)
if not found:
educational.add_learningResourceType(learningResourceType)
else:
educational = [lomsubs.educationalSub(learningResourceType=[learningResourceType])]
metadata.set_educational(educational)
self._learningResourceType = toUnicode(value)
def intendedEndUserRole_map(self, source, value):
'''From document "ANEXO XIII ANÁLISIS DE MAPEABILIDAD LOM/LOM-ES V1.0"'''
if source == 'LOM-ESv1.0':
return value
elif source == 'LOMv1.0':
if not value or value == 'tutor':
return value
else:
return 'learner'
def set_intendedEndUserRoleType(self, value):
value_str = value.encode('utf-8')
if value:
for metadata, source in [(self.lom, 'LOMv1.0'), (self.lomEs, 'LOM-ESv1.0')]:
educationals = metadata.get_educational()
src = lomsubs.sourceValueSub()
src.set_valueOf_(source)
src.set_uniqueElementName('source')
val = lomsubs.intendedEndUserRoleValueSub()
val.set_valueOf_(self.intendedEndUserRole_map(source, value_str))
val.set_uniqueElementName('value')
intendedEndUserRole = lomsubs.intendedEndUserRoleSub(self.intendedEndUserRole_map(source, value_str))
intendedEndUserRole.set_source(src)
intendedEndUserRole.set_value(val)
if educationals:
for educational in educationals:
intendedEndUserRoles = educational.get_intendedEndUserRole()
found = False
if intendedEndUserRoles:
for i in intendedEndUserRoles:
if i.get_value().get_valueOf_() == self.intendedEndUserRole_map(source, self.intendedEndUserRoleType.encode('utf-8')):
found = True
index = intendedEndUserRoles.index(i)
educational.insert_intendedEndUserRole(index, intendedEndUserRole)
if not found:
educational.add_intendedEndUserRole(intendedEndUserRole)
else:
educational = [lomsubs.educationalSub(intendedEndUserRole=[intendedEndUserRole])]
metadata.set_educational(educational)
self._intendedEndUserRoleType = toUnicode(value)
def set_intendedEndUserRole(self, value, valueOf):
for metadata, source in [(self.lom, 'LOMv1.0'), (self.lomEs, 'LOM-ESv1.0')]:
educationals = metadata.get_educational()
src = lomsubs.sourceValueSub()
src.set_valueOf_(source)
src.set_uniqueElementName('source')
val = lomsubs.intendedEndUserRoleValueSub()
mappedValueOf = self.intendedEndUserRole_map(source, valueOf)
val.set_valueOf_(mappedValueOf)
val.set_uniqueElementName('value')
intendedEndUserRole = lomsubs.intendedEndUserRoleSub(mappedValueOf)
intendedEndUserRole.set_source(src)
intendedEndUserRole.set_value(val)
if educationals:
for educational in educationals:
intendedEndUserRoles = educational.get_intendedEndUserRole()
found = False
if intendedEndUserRoles:
for i in intendedEndUserRoles:
if i.get_value().get_valueOf_() == mappedValueOf:
found = True
if value:
index = intendedEndUserRoles.index(i)
educational.insert_intendedEndUserRole(index, intendedEndUserRole)
else:
if source != 'LOMv1.0' or valueOf != 'group':
educational.intendedEndUserRole.remove(i)
if not found and value:
educational.add_intendedEndUserRole(intendedEndUserRole)
else:
if value:
educational = [lomsubs.educationalSub(intendedEndUserRole=[intendedEndUserRole])]
metadata.set_educational(educational)
def set_intendedEndUserRoleGroup(self, value):
self.set_intendedEndUserRole(value, 'group')
self._intendedEndUserRoleGroup = value
def set_intendedEndUserRoleTutor(self, value):
self.set_intendedEndUserRole(value, 'tutor')
self._intendedEndUserRoleTutor = value
def context_map(self, source, value):
'''From document "ANEXO XIII ANÁLISIS DE MAPEABILIDAD LOM/LOM-ES V1.0"'''
if source == 'LOM-ESv1.0':
return value
elif source == 'LOMv1.0':
lomMap = {
"classroom": "school",
"real environment": "training",
"face to face": "other",
"blended": "other",
"distance": "other",
"presencial": "other",
"": ""
}
return lomMap[value]
def set_context(self, value, valueOf):
value_str = value.encode('utf-8')
if value:
for metadata, source in [(self.lom, 'LOMv1.0'), (self.lomEs, 'LOM-ESv1.0')]:
educationals = metadata.get_educational()
src = lomsubs.sourceValueSub()
src.set_valueOf_(source)
src.set_uniqueElementName('source')
val = lomsubs.contextValueSub()
val.set_valueOf_(self.context_map(source, value_str))
val.set_uniqueElementName('value')
context = lomsubs.contextSub(self.context_map(source, value_str))
context.set_source(src)
context.set_value(val)
if educationals:
for educational in educationals:
contexts = educational.get_context()
found = False
if contexts:
for i in contexts:
if i.get_value().get_valueOf_() == self.context_map(source, valueOf.encode('utf-8')):
found = True
index = contexts.index(i)
educational.insert_context(index, context)
if not found:
educational.add_context(context)
else:
educational = [lomsubs.educationalSub(context=[context])]
metadata.set_educational(educational)
def set_contextPlace(self, value):
self.set_context(value, self._contextPlace)
self._contextPlace = toUnicode(value)
def set_contextMode(self, value):
self.set_context(value, self._contextMode)
self._contextMode = toUnicode(value)
def set_changed(self, changed):
self._isChanged = changed
if changed:
if hasattr(self, 'previewDir'):
if self.previewDir:
shutil.rmtree(self.previewDir, True)
self.previewDir = None
# Properties
isChanged = property(lambda self: self._isChanged, set_changed)
name = property(lambda self:self._name, set_name)
title = property(lambda self:self._title, set_title)
lang = property(lambda self: self._lang, set_lang)
author = property(lambda self:self._author, set_author)
description = property(lambda self:self._description, set_description)
newlicense = property(lambda self:self.license, set_license)
docType = property(lambda self:self._docType, set_docType)
backgroundImg = property(get_backgroundImg, set_backgroundImg)
level1 = property(get_level1, set_level1)
level2 = property(get_level2, set_level2)
level3 = property(get_level3, set_level3)
objectives = property(lambda self: self._objectives, set_objectives)
preknowledge = property(lambda self: self._preknowledge, set_preknowledge)
learningResourceType = property(lambda self: self._learningResourceType, set_learningResourceType)
intendedEndUserRoleType = property(lambda self: self._intendedEndUserRoleType, set_intendedEndUserRoleType)
intendedEndUserRoleGroup = property(lambda self: self._intendedEndUserRoleGroup, set_intendedEndUserRoleGroup)
intendedEndUserRoleTutor = property(lambda self: self._intendedEndUserRoleTutor, set_intendedEndUserRoleTutor)
contextPlace = property(lambda self: self._contextPlace, set_contextPlace)
contextMode = property(lambda self: self._contextMode, set_contextMode)
def findNode(self, nodeId):
"""
Finds a node from its nodeId
(nodeId can be a string or a list/tuple)
"""
log.debug(u"findNode" + repr(nodeId))
node = self._nodeIdDict.get(nodeId)
if node and node.package is self:
return node
else:
return None
def levelName(self, level):
"""
Return the level name
"""
if level < len(self._levelNames):
return _(self._levelNames[level])
else:
return _(u"?????")
def save(self, filename=None, tempFile=False):
"""
Save package to disk
pass an optional filename
"""
self.tempFile = tempFile
# Get the filename
if filename:
filename = Path(filename)
# If we are being given a new filename...
# Change our name to match our new filename
name = filename.splitpath()[1]
if not tempFile:
self.name = name.basename().splitext()[0]
elif self.filename:
# Otherwise use our last saved/loaded from filename
filename = Path(self.filename)
else:
# If we don't have a last saved/loaded from filename,
# raise an exception because, we need to have a new
# file passed when a brand new package is saved
raise AssertionError(u'No name passed when saving a new package')
#JR: Convertimos el nombre del paquete para evitar nombres problematicos
import string
validPackagenameChars = "-_. %s%s" % (string.ascii_letters, string.digits)
self.name = ''.join(c for c in self.name if c in validPackagenameChars).replace(' ','_')
#JR: Si por casualidad quedase vacio le damos un nombre por defecto
if self.name == "":
self.name = "invalidpackagename"
# Store our new filename for next file|save, and save the package
log.debug(u"Will save %s to: %s" % (self.name, filename))
if tempFile:
self.nonpersistant.remove('filename')
oldFilename, self.filename = self.filename, unicode(self.filename)
try:
filename.safeSave(self.doSave, _('SAVE FAILED!\nLast succesful save is %s.'))
finally:
self.nonpersistant.append('filename')
self.filename = oldFilename
else:
# Update our new filename for future saves
self.filename = filename
filename.safeSave(self.doSave, _('SAVE FAILED!\nLast succesful save is %s.'))
self.isChanged = False
self.updateRecentDocuments(filename)
def updateRecentDocuments(self, filename):
"""
Updates the list of recent documents
"""
# Don't update the list for the generic.data "package"
genericData = G.application.config.configDir/'idevices'/'generic.data'
if genericData.isfile() or genericData.islink():
if Path(filename).samefile(genericData):
return
# Save in recentDocuments list
recentProjects = G.application.config.recentProjects
if filename in recentProjects:
# If we're already number one, carry on
if recentProjects[0] == filename:
return
recentProjects.remove(filename)
recentProjects.insert(0, filename)
del recentProjects[5:] # Delete any older names from the list
G.application.config.configParser.write() # Save the settings
def doSave(self, fileObj):
"""
Actually performs the save to 'fileObj'.
"""
if self.compatibleWithVersion9:
self.downgradeToVersion9()
zippedFile = zipfile.ZipFile(fileObj, "w", zipfile.ZIP_DEFLATED)
try:
for resourceFile in self.resourceDir.files():
zippedFile.write(unicode(resourceFile.normpath()),
resourceFile.name.encode('utf8'), zipfile.ZIP_DEFLATED)
zinfo = zipfile.ZipInfo(filename='content.data',
date_time=time.localtime()[0:6])
zinfo.external_attr = 0100644<<16L
zinfo.compress_type = zipfile.ZIP_DEFLATED
zippedFile.writestr(zinfo, encodeObject(self))
zinfo2 = zipfile.ZipInfo(filename='contentv3.xml',
date_time=time.localtime()[0:6])
zinfo2.external_attr = 0100644<<16L
zinfo2.compress_type = zipfile.ZIP_DEFLATED
zippedFile.writestr(zinfo2, encodeObjectToXML(self))
zippedFile.write(G.application.config.webDir/'templates'/'content.xsd', 'content.xsd', zipfile.ZIP_DEFLATED)
finally:
zippedFile.close()
if self.compatibleWithVersion9:
self.upgradeToVersion10()
CasestudyIdevice.persistenceVersion = 9
CasopracticofpdIdevice.persistenceVersion = 9
CitasparapensarfpdIdevice.persistenceVersion = 9
ClozefpdIdevice.persistenceVersion = 7
ClozeIdevice.persistenceVersion = 7
ClozelangfpdIdevice.persistenceVersion = 7
DebesconocerfpdIdevice.persistenceVersion = 9
DestacadofpdIdevice.persistenceVersion = 9
EjercicioresueltofpdIdevice.persistenceVersion = 10
EleccionmultiplefpdIdevice.persistenceVersion = 10
TextAreaField.persistenceVersion = 2
FreeTextfpdIdevice.persistenceVersion = 8
GalleryIdevice.persistenceVersion = 8
ImageMagnifierIdevice.persistenceVersion = 4
ListaIdevice.persistenceVersion = 5
MultichoiceIdevice.persistenceVersion = 9
GenericIdevice.persistenceVersion = 11
MultiSelectIdevice.persistenceVersion = 1
OrientacionesalumnadofpdIdevice.persistenceVersion = 9
OrientacionestutoriafpdIdevice.persistenceVersion = 9
ParasabermasfpdIdevice.persistenceVersion = 9
QuizTestIdevice.persistenceVersion = 10
RecomendacionfpdIdevice.persistenceVersion = 9
ReflectionfpdIdevice.persistenceVersion = 9
ReflectionfpdmodifIdevice.persistenceVersion = 9
ReflectionIdevice.persistenceVersion = 8
SeleccionmultiplefpdIdevice.persistenceVersion = 2
TrueFalseIdevice.persistenceVersion = 11
VerdaderofalsofpdIdevice.persistenceVersion = 12
WikipediaIdevice.persistenceVersion = 9
Package.persistenceVersion = 13
def extractNode(self):
"""
Clones and extracts the currently selected node into a new package.
"""
newPackage = Package('NoName') # Name will be set once it is saved..
newPackage.title = self.currentNode.title
newPackage.style = self.style
newPackage.author = self.author
newPackage._nextNodeId = self._nextNodeId
# Copy the nodes from the original package
# and merge into the root of the new package
self.currentNode.copyToPackage(newPackage)
return newPackage
@staticmethod
def load(filename, newLoad=True, destinationPackage=None, fromxml=None):
"""
Load package from disk, returns a package.
"""
if not zipfile.is_zipfile(filename):
return None
zippedFile = zipfile.ZipFile(filename, "r")
xml = None
try:
xml = zippedFile.read(u"contentv3.xml")
except:
pass
if not xml:
try:
# Get the jellied package data
toDecode = zippedFile.read(u"content.data")
except KeyError:
log.info("no content.data, trying Common Cartridge/Content Package")
newPackage = loadCC(zippedFile, filename)
newPackage.tempFile = False
newPackage.isChanged = False
newPackage.filename = Path(filename)
return newPackage
# Need to add a TempDirPath because it is a nonpersistant member
resourceDir = TempDirPath()
# Extract resource files from package to temporary directory
for fn in zippedFile.namelist():
if unicode(fn, 'utf8') not in [u"content.data", u"content.xml", u"contentv2.xml", u"contentv3.xml", u"content.xsd" ]:
#JR: Hacemos las comprobaciones necesarias por si hay directorios
if ("/" in fn):
dir = fn[:fn.index("/")]
Dir = Path(resourceDir/dir)
if not Dir.exists():
Dir.mkdir()
Fn = Path(resourceDir/fn)
if not Fn.isdir():
outFile = open(resourceDir/fn, "wb")
outFile.write(zippedFile.read(fn))
outFile.flush()
outFile.close()
try:
validxml = False
if fromxml:
newPackage, validxml = decodeObjectFromXML(fromxml)
elif xml:
xmlinfo = zippedFile.getinfo(u"contentv3.xml")
if u"content.data" not in zippedFile.NameToInfo:
newPackage, validxml = decodeObjectFromXML(xml)
else:
datainfo = zippedFile.getinfo(u"content.data")
if xmlinfo.date_time >= datainfo.date_time:
newPackage, validxml = decodeObjectFromXML(xml)
if not validxml:
toDecode = zippedFile.read(u"content.data")
newPackage = decodeObjectRaw(toDecode)
try:
lomdata = zippedFile.read(u'imslrm.xml')
if 'LOM-ES' in lomdata:
importType = 'lomEs'
else:
importType = 'lom'
setattr(newPackage, importType, lomsubs.parseString(lomdata))
except:
pass
G.application.afterUpgradeHandlers = []
newPackage.resourceDir = resourceDir
G.application.afterUpgradeZombies2Delete = []
if not validxml and (xml or fromxml or "content.xml" in zippedFile.namelist()):
for key, res in newPackage.resources.items():
if len(res) < 1:
newPackage.resources.pop(key)
else:
if (hasattr(res[0], 'testForAndDeleteZombieResources')):
res[0].testForAndDeleteZombieResources()
if newLoad:
# provide newPackage to doUpgrade's versionUpgrade() to
# correct old corrupt extracted packages by setting the
# any corrupt package references to the new package:
#JR: Convertimos el nombre del paquete para evitar nombres problematicos
import string
validPackagenameChars = "-_. %s%s" % (string.ascii_letters, string.digits)
newPackage._name = ''.join(c for c in newPackage._name if c in validPackagenameChars).replace(' ','_')
#JR: Si por casualidad quedase vacio le damos un nombre por defecto
if newPackage._name == "":
newPackage._name = "invalidpackagename"
log.debug("load() about to doUpgrade newPackage \""
+ newPackage._name + "\" " + repr(newPackage) )
if hasattr(newPackage, 'resourceDir'):
log.debug("newPackage resourceDir = "
+ newPackage.resourceDir)
else:
# even though it was just set above? should not get here:
log.error("newPackage resourceDir has NO resourceDir!")
doUpgrade(newPackage)
# after doUpgrade, compare the largest found field ID:
if G.application.maxFieldId >= Field.nextId:
Field.nextId = G.application.maxFieldId + 1
if hasattr(newPackage,'_docType'):
common.setExportDocType(newPackage.docType)
else:
newPackage.set_docType(toUnicode('XHTML'))
else:
# and when merging, automatically set package references to
# the destinationPackage, into which this is being merged:
log.debug("load() about to merge doUpgrade newPackage \""
+ newPackage._name + "\" " + repr(newPackage)
+ " INTO destinationPackage \""
+ destinationPackage._name + "\" "
+ repr(destinationPackage))
log.debug("using their resourceDirs:")
if hasattr(newPackage, 'resourceDir'):
log.debug(" newPackage resourceDir = "
+ newPackage.resourceDir)
else:
log.error("newPackage has NO resourceDir!")
if hasattr(destinationPackage, 'resourceDir'):
log.debug(" destinationPackage resourceDir = "
+ destinationPackage.resourceDir)
else:
log.error("destinationPackage has NO resourceDir!")
doUpgrade(destinationPackage,
isMerge=True, preMergePackage=newPackage)
# after doUpgrade, compare the largest found field ID:
if G.application.maxFieldId >= Field.nextId:
Field.nextId = G.application.maxFieldId + 1
except:
import traceback
traceback.print_exc()
raise
if newPackage.tempFile:
# newPackage.filename was stored as it's original filename
newPackage.tempFile = False
else:
# newPackage.filename is the name that the package was last loaded from
# or saved to
newPackage.filename = Path(filename)
checker = Checker(newPackage)
inconsistencies = checker.check()
for inconsistency in inconsistencies:
inconsistency.fix()
# Let idevices and nodes handle any resource upgrading they may need to
# Note: Package afterUpgradeHandlers *must* be done after Resources'
# and the package should be updated before everything else,
# so, prioritize with a 3-pass, 3-level calling setup
# in order of: 1) resources, 2) package, 3) anything other objects
for handler_priority in range(3):
for handler in G.application.afterUpgradeHandlers:
if handler_priority == 0 and \
repr(handler.im_class)=="<class 'exe.engine.resource.Resource'>":
# level-0 handlers: Resource
handler()
elif handler_priority == 1 and \
repr(handler.im_class)=="<class 'exe.engine.package.Package'>":
# level-1 handlers: Package (requires resources first)
if handler.im_self == newPackage:
handler()
else:
log.warn("Extra package object found, " \
+ "ignoring its afterUpgradeHandler: " \
+ repr(handler))
elif handler_priority == 2 and \
repr(handler.im_class)!="<class 'exe.engine.resource.Resource'>" \
and \
repr(handler.im_class)!="<class 'exe.engine.package.Package'>":
# level-2 handlers: all others
handler()
G.application.afterUpgradeHandlers = []
num_zombies = len(G.application.afterUpgradeZombies2Delete)
for i in range(num_zombies-1, -1, -1):
zombie = G.application.afterUpgradeZombies2Delete[i]
# now, the zombie list can contain nodes OR resources to delete.
# if zombie is a node, then also pass in a pruning parameter..
zombie_is_node = False
if isinstance(zombie, Node):
zombie_is_node = True
if zombie_is_node:
zombie.delete(pruningZombies=True)
else:
#JR: Eliminamos el recurso del idevice
if hasattr(zombie._idevice, 'userResources'):
for i in range(len(zombie._idevice.userResources)-1, -1, -1):
if hasattr(zombie._idevice.userResources[i], 'storageName'):
if zombie._idevice.userResources[i].storageName == zombie.storageName:
aux = zombie._idevice.userResources[i]
zombie._idevice.userResources.remove(aux)
aux.delete
#Eliminamos el recurso de los recursos del sistema
#for resource in newPackage.resources.keys():
# if hasattr(newPackage.resources[resource][0], 'storageName'):
# if newPackage.resources[resource][0].storageName == zombie.storageName:
# del newPackage.resources[resource]
#JR: Esto ya no haria falta
#zombie.delete()
del zombie
userResourcesFiles = newPackage.getUserResourcesFiles(newPackage.root)
#JR: Borramos recursos que no estan siendo utilizados
newPackage.cleanUpResources(userResourcesFiles)
G.application.afterUpgradeZombies2Delete = []
newPackage.updateRecentDocuments(newPackage.filename)
newPackage.isChanged = False
nstyle=Path(G.application.config.stylesDir/newPackage.style)
if not nstyle.isdir():
newPackage.style=G.application.config.defaultStyle
newPackage.lang = newPackage._lang
return newPackage
def getUserResourcesFiles(self, node):
resourceFiles = set()
for idevice in node.idevices:
if hasattr(idevice, 'userResources'):
for i in range(len(idevice.userResources) - 1, -1, -1):
if hasattr(idevice.userResources[i], 'storageName'):
resourceFiles.add(idevice.userResources[i].storageName)
for child in node.children:
resourceFiles = resourceFiles | self.getUserResourcesFiles(child)
return resourceFiles
def cleanUpResources(self, userResourcesFiles=set()):
"""
Removes duplicate resource files
"""
# Delete unused resources.
# Only really needed for upgrading to version 0.20,
# but upgrading of resources and package happens in no particular order
# and must be done after all resources have been upgraded
# some earlier .elp files appear to have been corrupted with
# two packages loaded, *possibly* from some strange extract/merge
# functionality in earlier eXe versions?
# Regardless, only the real package will have a resourceDir,
# and the other will fail.
# For now, then, put in this quick and easy safety check:
if not hasattr(self,'resourceDir'):
log.warn("cleanUpResources called on a redundant package")
return
existingFiles = set([fn.basename() for fn in self.resourceDir.files()])
#JR
usedFiles = set([])
for reses in self.resources.values():
if hasattr(reses[0], 'storageName'):
usedFiles.add(reses[0].storageName)
#usedFiles = set([reses[0].storageName for reses in self.resources.values()])
for fn in existingFiles - usedFiles - userResourcesFiles:
log.debug('Removing unused resource %s' % fn)
(self.resourceDir/fn).remove()
def findResourceByName(self, queryName):
"""
Support for merging, and anywhere else that unique names might be
checked before actually comparing against the files (as will be
done by the resource class itself in its _addOurselvesToPackage() )
"""
foundResource = None
queryResources = self.resources
for this_checksum in queryResources:
for this_resource in queryResources[this_checksum]:
if queryName == this_resource.storageName:
foundResource = this_resource
return foundResource
return foundResource
def upgradeToVersion1(self):
"""
Called to upgrade from 0.3 release
"""
self._nextNodeId = 0
self._nodeIdDict = {}
# Also upgrade all the nodes.
# This needs to be done here so that draft gets id 0
# If it's done in the nodes, the ids are assigned in reverse order
draft = getattr(self, 'draft')
draft._id = self._regNewNode(draft)
draft._package = self
setattr(self, 'editor', Node(self, None, _(u"iDevice Editor")))
# Add a default idevice to the editor
idevice = GenericIdevice("", "", "", "", "")
editor = getattr(self, 'editor')
idevice.parentNode = editor
editor.addIdevice(idevice)
def superReg(node):
"""Registers all our nodes
because in v0 they were not registered
in this way"""
node._id = self._regNewNode(node)
node._package = self
for child in node.children:
superReg(child)
superReg(self.root)
def _regNewNode(self, node):
"""
Called only by nodes,
stores the node in our id lookup dict
returns a new unique id
"""
id_ = unicode(self._nextNodeId)
self._nextNodeId += 1
self._nodeIdDict[id_] = node
return id_
def getNewIdeviceId(self):
"""
Returns an iDevice Id which is unique for this package.
"""
id_ = unicode(self._nextIdeviceId)
self._nextIdeviceId += 1
return id_
def upgradeToVersion2(self):
"""
Called to upgrade from 0.4 release
"""
getattr(self, 'draft').delete()
getattr(self, 'editor').delete()
delattr(self, 'draft')
delattr(self, 'editor')
# Need to renumber nodes because idevice node and draft nodes are gone
self._nextNodeId = 0
def renumberNode(node):
"""
Gives the old node a number
"""
node._id = self._regNewNode(node)
for child in node.children:
renumberNode(child)
renumberNode(self.root)
def upgradeToVersion3(self):
"""
Also called to upgrade from 0.4 release
"""
self._nextIdeviceId = 0
def upgradeToVersion4(self):
"""
Puts properties in their place
Also called to upgrade from 0.8 release
"""
self._name = toUnicode(self.__dict__['name'])
self._author = toUnicode(self.__dict__['author'])
self._description = toUnicode(self.__dict__['description'])
def upgradeToVersion5(self):
"""
For version 0.11
"""
self._levelNames = self.levelNames
del self.levelNames
def upgradeToVersion6(self):
"""
For version 0.14
"""
self.dublinCore = DublinCore()
# Copy some of the package properties to dublin core
self.title = self.root.title
self.dublinCore.title = self.root.title
self.dublinCore.creator = self._author
self.dublinCore.description = self._description
self.scolinks = False
def upgradeToVersion7(self):
"""
For version 0.15
"""
self._backgroundImg = ''
self.backgroundImgTile = False
def upgradeToVersion8(self):
"""
For version 0.20, alpha, for nightlies r2469
"""
self.license = 'None'
self.footer = ""
self.idevices = []
def upgradeToVersion9(self):
"""
For version >= 0.20.4
"""
if not hasattr(self, 'resources'):
# The hasattr is needed, because sometimes, Resource instances are upgraded
# first and they also set this attribute on the package
self.resources = {}
G.application.afterUpgradeHandlers.append(self.cleanUpResources)
def lomDefaults(self, entry, schema, rights=False):
defaults = {'general': {'identifier': [{'catalog': c_('My Catalog'), 'entry': entry}],
'aggregationLevel': {'source': schema, 'value': '2'}
},
'metaMetadata': {'metadataSchema': [schema]},
}
if rights:
defaults['rights'] = {'access': {'accessType': {'source': schema, 'value': 'universal'},
'description': {'string': [{'valueOf_': c_('Default'), 'language': str(self.lang)}]}}}
return defaults
oldLicenseMap = {"None": "None",
"GNU Free Documentation License": u"license GFDL",
"Creative Commons Attribution 3.0 License": u"creative commons: attribution 3.0",
"Creative Commons Attribution Share Alike 3.0 License": u"creative commons: attribution - share alike 3.0",
"Creative Commons Attribution No Derivatives 3.0 License": u"creative commons: attribution - non derived work 3.0",
"Creative Commons Attribution Non-commercial 3.0 License": u"creative commons: attribution - non commercial 3.0",
"Creative Commons Attribution Non-commercial Share Alike 3.0 License": u"creative commons: attribution - non commercial - share alike 3.0",
"Creative Commons Attribution Non-commercial No Derivatives 3.0 License": u"creative commons: attribution - non derived work - non commercial 3.0",
"Creative Commons Attribution 2.5 License": u"creative commons: attribution 2.5",
"Creative Commons Attribution-ShareAlike 2.5 License": u"creative commons: attribution - share alike 2.5",
"Creative Commons Attribution-NoDerivs 2.5 License": u"creative commons: attribution - non derived work 2.5",
"Creative Commons Attribution-NonCommercial 2.5 License": u"creative commons: attribution - non commercial 2.5",
"Creative Commons Attribution-NonCommercial-ShareAlike 2.5 License": u"creative commons: attribution - non commercial - share alike 2.5",
"Creative Commons Attribution-NonCommercial-NoDerivs 2.5 License": u"creative commons: attribution - non derived work - non commercial 2.5",
"Developing Nations 2.0": u""
}
def upgradeToVersion10(self):
"""
For version >= 2.0
"""
if not hasattr(self, 'lang'):
self._lang = G.application.config.locale.split('_')[0]
entry = str(uuid.uuid4())
if not hasattr(self, 'lomEs') or not isinstance(self.lomEs, lomsubs.lomSub):
self.lomEs = lomsubs.lomSub.factory()
self.lomEs.addChilds(self.lomDefaults(entry, 'LOM-ESv1.0', True))
if not hasattr(self, 'lom') or not isinstance(self.lom, lomsubs.lomSub):
self.lom = lomsubs.lomSub.factory()
self.lom.addChilds(self.lomDefaults(entry, 'LOMv1.0'))
if not hasattr(self, 'scowsinglepage'):
self.scowsinglepage = False
if not hasattr(self, 'scowwebsite'):
self.scowwebsite = False
if not hasattr(self, 'exportSource'):
self.exportSource = True
if not hasattr(self, 'exportMetadataType'):
self.exportMetadataType = "LOMES"
if not hasattr(self, 'objectives'):
self._objectives = u''
if not hasattr(self, 'preknowledge'):
self._preknowledge = u''
if not hasattr(self, 'learningResourceType'):
self._learningResourceType = u''
if not hasattr(self, 'intendedEndUserRoleType'):
self._intendedEndUserRoleType = u''
if not hasattr(self, 'intendedEndUserRoleGroup'):
self._intendedEndUserRoleGroup = False
if not hasattr(self, 'intendedEndUserRoleTutor'):
self._intendedEndUserRoleTutor = False
if not hasattr(self, 'contextPlace'):
self._contextPlace = u''
if not hasattr(self, 'contextMode'):
self._contextMode = u''
if hasattr(self, 'scowsource'):
del self.scowsource
try:
if not self.license in self.oldLicenseMap.values():
self.newlicense = self.oldLicenseMap[self.license]
except:
self.license = u''
if not hasattr(self, 'mxmlprofilelist'):
self.mxmlprofilelist = ""
if not hasattr(self, 'mxmlforcemediaonly'):
self.mxmlforcemediaonly = False
if not hasattr(self, 'mxmlheight'):
self.mxmlheight = ""
if not hasattr(self, 'mxmlwidth'):
self.mxmlwidth = ""
if not hasattr(self, 'compatibleWithVersion9'):
self.compatibleWithVersion9 = False
self.set_title(self._title)
self.set_author(self._author)
self.set_description(self._description)
def upgradeToVersion11(self):
pass
def upgradeToVersion12(self):
#because actually version 11 was exe-next-gen
self.upgradeToVersion9()
self.upgradeToVersion10()
def upgradeToVersion13(self):
if not hasattr(self, '_docType'):
self._docType = G.application.config.docType
def downgradeToVersion9(self):
for attr in ['lomEs', 'lom', 'scowsinglepage', 'scowwebsite',
'exportSource', 'exportMetadataType', '_lang',
'_objectives', '_preknowledge', '_learningResourceType',
'_intendedEndUserRoleType', '_intendedEndUserRoleGroup',
'_intendedEndUserRoleTutor', '_contextPlace',
'_contextMode', 'scowsource', 'mxmlprofilelist',
'mxmlforcemediaonly', 'mxmlheight', 'mxmlwidth']:
if hasattr(self, attr):
delattr(self, attr)
self.license = u''
CasestudyIdevice.persistenceVersion = 8
CasopracticofpdIdevice.persistenceVersion = 7
CitasparapensarfpdIdevice.persistenceVersion = 7
ClozefpdIdevice.persistenceVersion = 4
ClozeIdevice.persistenceVersion = 4
ClozelangfpdIdevice.persistenceVersion = 4
DebesconocerfpdIdevice.persistenceVersion = 7
DestacadofpdIdevice.persistenceVersion = 7
EjercicioresueltofpdIdevice.persistenceVersion = 8
EleccionmultiplefpdIdevice.persistenceVersion = 7
TextAreaField.persistenceVersion = 1
FreeTextfpdIdevice.persistenceVersion = 7
GalleryIdevice.persistenceVersion = 7
ImageMagnifierIdevice.persistenceVersion = 2
ListaIdevice.persistenceVersion = 4
MultichoiceIdevice.persistenceVersion = 7
GenericIdevice.persistenceVersion = 9
delattr(MultiSelectIdevice, "persistenceVersion")
OrientacionesalumnadofpdIdevice.persistenceVersion = 7
OrientacionestutoriafpdIdevice.persistenceVersion = 7
ParasabermasfpdIdevice.persistenceVersion = 7
QuizTestIdevice.persistenceVersion = 8
RecomendacionfpdIdevice.persistenceVersion = 7
ReflectionfpdIdevice.persistenceVersion = 7
ReflectionfpdmodifIdevice.persistenceVersion = 7
ReflectionIdevice.persistenceVersion = 7
delattr(SeleccionmultiplefpdIdevice, "persistenceVersion")
TrueFalseIdevice.persistenceVersion = 9
VerdaderofalsofpdIdevice.persistenceVersion = 9
WikipediaIdevice.persistenceVersion = 8
Package.persistenceVersion = 9
def getExportDocType(self):
return self._docType
def delNotes(self, node):
"""
Delete all notes
"""
for idevice in node.idevices:
if idevice.klass == 'NotaIdevice':
idevice.delete()
for child in node.children:
self.delNotes(child)
|
encoding = 'utf-8'
dict = {
'&About...': '&\xd8\xb9\xd9\x86...',
'&Delete Window': '&\xd8\xa7\xd8\xad\xd8\xb0\xd9\x81 \xd8\xa7\xd9\x84\xd9\x86\xd8\xa7\xd9\x81\xd8\xb0\xd8\xa9',
'&Describe Action': '&\xd8\xa3\xd9\x88\xd8\xb5\xd9\x81 \xd8\xa7\xd9\x84\xd8\xb9\xd9\x85\xd9\x84\xd9\x8a\xd8\xa9',
'&Execute Action': '&\xd9\x86\xd9\x81\xd8\xb0 \xd8\xa7\xd9\x84\xd8\xb9\xd9\x85\xd9\x84\xd9\x8a\xd8\xa9',
'&Folding': '&\xd8\xa7\xd9\x84\xd8\xb7\xd9\x8a',
'&Help': '&\xd9\x85\xd8\xb3\xd8\xa7\xd8\xb9\xd8\xaf\xd8\xa9',
'&Line Numbers': '&\xd8\xb9\xd8\xaf\xd8\xaf \xd8\xa7\xd9\x84\xd8\xb3\xd8\xb7\xd9\x88\xd8\xb1',
'&New Window': '&\xd9\x86\xd8\xa7\xd9\x81\xd8\xb0\xd8\xa9 \xd8\xac\xd8\xaf\xd9\x8a\xd8\xaf\xd8\xa9',
'&Preferences...': '&\xd8\xa7\xd9\x84\xd8\xaa\xd9\x81\xd8\xb6\xd9\x8a\xd9\x84\xd8\xa7\xd8\xaa...',
'&Revert': '&\xd8\xa5\xd8\xb3\xd8\xaa\xd8\xb1\xd8\xac\xd8\xb9',
'&Save...': '&\xd8\xad\xd9\x81\xd8\xb8...',
'&Show Toolbars': '&\xd8\xb9\xd8\xb1\xd8\xb6 \xd8\xb4\xd8\xb1\xd9\x8a\xd8\xb7 \xd8\xa7\xd9\x84\xd8\xa3\xd8\xaf\xd9\x88\xd8\xa7\xd8\xa9',
'&Word Count': '&\xd8\xb9\xd8\xaf \xd8\xa7\xd9\x84\xd9\x83\xd9\x84\xd9\x85\xd8\xa7\xd8\xaa',
'About this program': '\xd8\xad\xd9\x88\xd9\x92\xd9\x84 \xd9\x87\xd8\xb0\xd8\xa7 \xd8\xa7\xd9\x84\xd8\xa8\xd8\xb1\xd9\x86\xd8\xa7\xd9\x85\xd8\xac',
'Actions': '\xd8\xa5\xd8\xac\xd8\xb1\xd8\xa7\xd8\xa1\xd8\xa7\xd8\xaa',
'Attributes': '\xd8\xa7\xd9\x84\xd8\xb5\xd9\x91\xd9\x81\xd8\xa7\xd8\xaa',
'Background': '\xd8\xa7\xd9\x84\xd8\xae\xd9\x84\xd9\x81\xd9\x8a\xd9\x91\xd8\xa9',
'Cancel': '\xd8\xa5\xd9\x84\xd8\xba\xd8\xa7\xef\xba\x80',
'Case': '\xd8\xa7\xd9\x84\xd8\xad\xd8\xa7\xd9\x84\xd8\xa9',
'Clear Playlist': '\xd9\x85\xd8\xb3\xd8\xad \xd9\x82\xd8\xa7\xd8\xa6\xd9\x85\xd8\xa9 \xd8\xa7\xd9\x84\xd8\xaa\xd8\xb4\xd8\xba\xd9\x8a\xd9\x84',
'Close Tab': '\xd8\xa3\xd8\xba\xd9\x84\xd9\x82 \xd8\xa7\xd9\x84\xd9\x84\xd8\xb3\xd8\xa7\xd9\x86',
'Close the current tab': '\xd8\xa3\xd8\xba\xd9\x84\xd9\x82 \xd8\xa7\xd9\x84\xd9\x84\xd8\xb3\xd8\xa7\xd9\x86 \xd8\xa7\xd9\x84\xd8\xad\xd8\xa7\xd9\x84\xd9\x8a',
'Color': '\xd8\xa7\xd9\x84\xd9\x84\xd9\x88\xd9\x86',
'Contrast': '\xd8\xa7\xd9\x84\xd8\xaa\xd8\xa8\xd8\xa7\xd9\x8a\xd9\x86',
'Copy': '\xd9\x86\xd8\xb3\xd8\xae',
'Cut': '\xd9\x82\xd8\xb5',
'Debug': '\xd8\xaa\xd9\x86\xd9\x82\xd9\x8a\xd8\xad',
'Documents': '\xd8\xa7\xd9\x84\xd9\x85\xd8\xb3\xd8\xaa\xd9\x86\xd8\xaf\xd8\xa7\xd8\xaa',
'E&xit': '&\xd8\xae\xd8\xb1\xd9\x88\xd8\xac',
}
|
import sys
from starstoloves.models import User as UserModel
from starstoloves import model_repository
from starstoloves.lib.track import lastfm_track_repository
from .user import User
def from_session_key(session_key):
user_model, created = UserModel.objects.get_or_create(session_key=session_key)
return User(
session_key=session_key,
repository=sys.modules[__name__],
);
def delete(user):
try:
user_model = model_repository.from_user(user)
user_model.delete()
except UserModel.DoesNotExist:
pass;
|
# -*- coding: utf-8 -*-
"""
translate variance and its formated character which have regularities
for example:
raw input:
v={'aa': 12345, 'bbbb': [1, 2, 3, 4, {'flag': 'vvvv||||xxxxx'}, set(['y', 'x', 'z'])]}
after `var2str.var2str(v)`
v_str=<aa::12345##bbbb::<1||2||3||4||<flag::vvvv|xxxxx>||<y|||x|||z>>>
then reverse back: `var2str.str2var(v_str)`
v_var={'aa': '12345', 'bbbb': ['1', '2', '3', '4', {'flag': 'vvvv|xxxxx'}, set(['y', 'x', 'z'])]}
NOTATION:
1, KEY of DICT should be string.
2, SET amd TUPLE automatically are transformed to LIST
3, INT/FLOAT/LONG etc. are automatically transformed to STRING
4, SEPERATORS would be replace to '' in character.
"""
import types
sep_dict = {
"dict_sep": "##", # seperator of elements of dict
"dict_k_v_sep": "::", # k::v
"list_sep": "||", # list seperator
"set_sep": "|||", # set seperator
"tuple_sep": "||" # tuple seperator
}
sep_nest = ("<", ">") # better not repeated char, e.x. ("<-", "->")
sep_values = sep_dict.values()
def erase_sep(s):
for v in sep_values:
s = s.replace(v, "")
for v in sep_nest:
s=s.replace(v, "")
return s
_s=sep_nest[0]
_e=sep_nest[1]
class var2str(object):
@staticmethod
def var2str(var):
if not var: return ""
if type(var) == types.DictType:
result = []
for key,value in var.items():
v_str = var2str.var2str(value)
k_str = erase_sep("{0}".format(key))
result.append("{key}{sep}{value}".format(
key=k_str,
sep=sep_dict["dict_k_v_sep"],
value=v_str))
return _s+sep_dict["dict_sep"].join(result)+_e
#return sep_dict["dict_sep"].join(result)
elif type(var) == types.ListType:
result = [var2str.var2str(v) for v in var]
return _s+sep_dict["list_sep"].join(result)+_e
#return sep_dict["list_sep"].join(result)
elif type(var) == type(set([])):
result = [var2str.var2str(v) for v in var]
return _s+sep_dict["set_sep"].join(result)+_e
#return sep_dict["set_sep"].join(result)
elif type(var) == types.TupleType:
result = [var2str.var2str(v) for v in var]
return _s+sep_dict["tuple_sep"].join(result)+_e
#return sep_dict["tuple_sep"].join(result)
elif type(var) in [types.StringType,
types.IntType,
types.LongType,
types.FloatType]:
return erase_sep("{0}".format(var))
else:
raise TypeError("Type is not supported. var: {0}, type: {1}".format(
var, type(var)))
@staticmethod
def str2var(value):
# certain the outer nested elements' type
if NestType.is_nest_type(value, _s, _e):
_var = NestType(value)
_var.replace_nest_vars()
var = _var.parse_var()
if type(var) == types.DictType:
for k, v in var.items():
if type(v)==NestType:
var[k] = var2str.str2var(str(v))
if type(var) == types.ListType:
for n, v in enumerate(var):
if type(v) == NestType:
var[n] = var2str.str2var(str(v))
if type(var) == type(set()):
# because element in set must be hashable, so there is no meaning for
# for parsing set
pass
return var
else:
return value
class NestType(object):
def __init__(self, s, s_tag=_s, e_tag=_e):
self.value = str(s)
self.s_tag = s_tag
self.e_tag = e_tag
self.replace_s = None
@staticmethod
def is_nest_type(value, s_tag, e_tag):
if (not value.startswith(s_tag) or
not value.endswith(e_tag)):
return 0
return 1
def _get_obj_str(self, var):
return "[NestType]"+str(hash(var))
def has_nest_element(self):
if self.replace_s is None:
self.replace_nest_vars()
return self.repalce_s == self.value
def _replace_nest_var(self, s, nest_dic={}):
s_len = len(s)
tag_index = 0
s_tag_len, e_tag_len = len(self.s_tag), len(self.e_tag)
nest_index =[]
for i in range(s_len):
if s[i:i+s_tag_len] == self.s_tag:
tag_index +=1
if tag_index == 1: nest_index.append(i)
if s[i:i+e_tag_len] == self.e_tag:
tag_index -=1
if tag_index == 0: nest_index.append(i)
if len(nest_index) == 2: break
if len(nest_index) <2: return s
nest_index_s = nest_index[0]
nest_index_e = nest_index[1] + e_tag_len
nest_str = s[nest_index_s:nest_index_e]
nest_var = NestType(nest_str, s_tag=self.s_tag, e_tag = self.e_tag)
nest_var_str = self._get_obj_str(nest_var)
nest_dic[nest_var_str] = nest_var
return s[0:nest_index_s] + nest_var_str + s[nest_index_e:]
def replace_nest_vars(self):
# trim sign in start and end
nest_dic = {}
if not NestType.is_nest_type(self.value, self.s_tag, self.e_tag):
raise Exception(
"[ERROR] `{0}` does not match NestType format".format(self.value))
s = _trim_tag(self.value, self.s_tag, self.e_tag)
while 1:
replace_s = self._replace_nest_var(s,nest_dic)
if replace_s == s: break
s = replace_s
self.replace_s = replace_s
self.nest_dic = nest_dic
def parse_var(self):
"""string `replace_s` has no nestType at all"""
s = self.replace_s
var = None
dict_sep = sep_dict["dict_sep"]
dict_k_v_sep = sep_dict["dict_k_v_sep"]
list_sep = sep_dict["list_sep"]
set_sep = sep_dict["set_sep"]
if dict_k_v_sep in s: # dict
var = {}
items = s.split(dict_sep)
for item in items:
if not item: continue
k,v=item.split(dict_k_v_sep)
var[k] = self.nest_dic.get(v, v)
elif set_sep in s:
var = set([self.nest_dic.get(t, t) for t in s.split(set_sep)])
elif list_sep in s:
var = [self.nest_dic.get(t, t) for t in s.split(list_sep)]
else:
# just one string
var = s
return var
def __str__(self):
return self.value
def __unicode__(self):
return self.value
def _trim_tag(str, s, e):
"""trim the `str` off start `s` and end `e`"""
return str[len(s):(len(str)-len(e))]
def test():
a = {"aa": 12345, "bbbb":[1,2,3,4,{'flag':"vvvv||||世界是我的"},set(['x', 'y','z'])]}
#a = {}
print a
a_str = var2str.var2str(a)
print ">>", a_str
a_var = var2str.str2var(a_str)
print ">>", a_var
if __name__ == "__main__":
test()
|
from triple_draw_poker.model.Pot import Pot
class HandDetails:
def __init__(self):
self.pot = Pot()
self.raised = 0
self.street = 0
self.number_of_streets = 4
self.in_draw = False
self.hands = []
self.dealt_cards_index = 0
def getDealtCardsIndex(self):
return dealt_cards_index
def getHands(self):
return self.hands
def getPot(self):
return self.pot
def getRaised(self):
return self.raised
def getStreet(self):
return self.street
def getStreetPremium(self):
if self.street < 3:
return 2
return 1
def getNumberOfStreets(self):
return self.number_of_streets
def getInDraw(self):
return self.in_draw
def setDealtCardsIndex(self, index):
self.dealt_cards_index = index
def addHand(self, hand):
self.hands.append(hand)
def incrementRaised(self):
self.raised += 1
def incrementStreet(self):
self.street += 1
def changeInDraw(self):
self.in_draw = not self.in_draw
|
from src.tools.enum import enum
import pyxbmct.addonwindow as pyxbmct
from src.tools.dialog import dialog
EnumMode = enum(SELECT=0, ROTATE=1)
class EnumButton(object):
def __init__(self, label, values, current, default, changeCallback=None, saveCallback=None, customLabels=None, mode=EnumMode.SELECT, returnValue=False, alignment=pyxbmct.ALIGN_CENTER):
self.label = label
self.values = values
self.customLabels = customLabels
self.mode = mode
self.returnValue = returnValue
self.changeCallback = changeCallback
self.saveCallback = saveCallback
self.currentValue = current
self.defaultValue = default
self.currentIndex = None
self.defaultIndex = None
self.assignedValue = False
if saveCallback is None:
self.onSave = None
if customLabels:
self._findCurrentIndex()
label = str(customLabels[self.currentIndex])
else:
label = str(current)
if alignment is not None:
self.button = pyxbmct.Button(label, alignment=alignment)
else:
self.button = pyxbmct.Button(label)
def update(self, value):
if self.currentValue != value:
self.currentValue = value
if self.customLabels:
self._findCurrentIndex()
label = str(self.customLabels[self.currentIndex])
else:
self.currentIndex = None
label = str(value)
self.button.setLabel(label)
self.assignedValue = True
def onClick(self):
if self.mode == EnumMode.SELECT:
if self.customLabels:
values = self.customLabels
else:
values = self.values
selectedIndex = dialog.select(self.label, list((str(value) for value in values)))
if selectedIndex == -1:
return
index = selectedIndex
else:
if self.currentIndex is None:
self._findCurrentIndex()
if self.currentIndex == len(self.values) - 1:
index = 0
else:
index = self.currentIndex + 1
self.assign(index)
def onDefault(self):
if self.defaultIndex is None:
self._findDefaultIndex()
self.assign(self.defaultIndex)
def onSave(self):
if self.assignedValue:
if self.returnValue:
self.saveCallback(self.currentValue)
else:
self.saveCallback(self.currentIndex)
def assign(self, index):
value = self.values[index]
self.currentIndex = index
self.currentValue = value
if self.customLabels:
label = str(self.customLabels[index])
else:
label = str(value)
self.button.setLabel(label)
self.assignedValue = True
if self.changeCallback:
if self.returnValue:
self.changeCallback(value)
else:
self.changeCallback(index)
def _findDefaultIndex(self):
for i in range(0, len(self.values)):
value = self.values[i]
if value == self.defaultValue:
self.defaultIndex = i
if self.defaultIndex is None:
raise ValueError ('Default value not found in value list')
def _findCurrentIndex(self):
for i in range(0, len(self.values)):
value = self.values[i]
if value == self.currentValue:
self.currentIndex = i
if self.currentIndex is None:
raise ValueError ('Current value not found in value list')
|
import math
"""
Speed of light constant
"""
c = 3E8
"""
Vacuum permittivity
"""
e0 = 8.8541E-12
"""
Vacuum permeability
"""
u0 = 4E-7*math.pi
def getEffectivePermitivity(WHratio, er):
"""
Returns the effective permitivity for a given W/H ratio.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `WHratio` : W/H ratio.
- `er` : Relative permitivity of the dielectric.
"""
if WHratio <= 1:
return (er + 1)/2 + ((1 + 12/WHratio)**(-0.5) + 0.04*(1-WHratio)**2)*(er -1)/2
else:
return (er + 1)/2 + ((1 + 12/WHratio)**(-0.5))*(er -1)/2
def getAuxVarA(Zo,er):
"""
Returns the auxiliary variable
A = (Zo)/60 * math.sqrt((er + 1)/2) + (er-1)/(er+1)*(0.23+0.11/er)
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
return (Zo)/60 * math.sqrt((er + 1)/2) + (er-1)/(er+1)*(0.23+0.11/er)
def getAuxVarB(Zo,er):
"""
Returns the auxiliary variable
B = (377*math.pi)/(2*Zo*math.sqrt(er))
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
return (377*math.pi)/(2*Zo*math.sqrt(er))
def getWHRatioA(Zo,er):
"""
Returns the W/H ratio for W/H < 2. If the result is > 2, then other method
should be used.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
A = getAuxVarA(Zo,er)
return (8*math.e**A)/(math.e**(2*A) - 2)
def getWHRatioB(Zo,er):
"""
Returns the W/H ratio for W/H > 2. If the result is < 2, then other method
should be used.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
B = getAuxVarB(Zo,er)
return (2/math.pi)*(B-1 - math.log(2*B - 1) + (er - 1)*(math.log(B-1) + 0.39 - 0.61/er)/(2*er))
def getCharacteristicImpedance(WHratio, ef):
"""
Returns the characteristic impedance of the medium, based on the effective
permitivity and W/H ratio.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `WHratio` : W/H ratio.
- `ef` : Effective permitivity of the dielectric.
"""
if WHratio <= 1:
return (60/math.sqrt(ef))*math.log(8/WHratio + WHratio/4)
else:
return (120*math.pi/math.sqrt(ef))/(WHratio + 1.393 + 0.667*math.log(WHratio +1.444))
def getWHRatio(Zo,er):
"""
Returns the W/H ratio, after trying with the two possible set of solutions,
for when W/H < 2 or else. When no solution, returns zero.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
efa = er
efb = er
Zoa = Zo
Zob = Zo
while 1:
rA = getWHRatioA(Zoa,efa)
rB = getWHRatioB(Zob,efb)
if rA < 2:
return rA
if rB > 2:
return rB
Zoa = math.sqrt(efa)*Zoa
Zob = math.sqrt(efb)*Zob
def getCorrectedWidth(W,H,t):
"""
For significant conductor thickness, this returns the corrected width.
Paramenters:
- `W` : Width
- `H` : Height
- `t` : Conductor thickness
"""
if t < H and t < W/2:
if W/H <= math.pi/2:
return W + (1 + math.log(2*H/t))*(t/math.pi)
else:
return W + (1 + math.log(4*math.pi*H/t))*(t/math.pi)
else:
print "The conductor is too thick!!"
def getConductorLoss(W,H,t,sigma,f,Zo):
"""
Returns the conductor loss in [Np/m].
Parameters:
- `W` : Width
- `H` : Height
- `t` : Conductor thickness
- `sigma` : Conductance of medium
- `f` : Operating frequency
- `Zo` : Characteristic impedance
"""
We = getCorrectedWidth(W,H,t)
P = 1 - (We/4/H)**2
Rs = math.sqrt((math.pi*f*u0)/sigma)
Q = 1 + H/We + (math.log((2*H)/t)-t/W)*H/(We*math.pi)
if W/H <= 1/(2*math.pi):
return (1 + H/We + (math.log(4*pi*W/t) + t/W)*H/(math.pi*We))*(8.68*Rs*P)/(2*pi*Zo*H)
elif W/H <= 2:
return (8.68*Rs*P*Q)/(2*math.pi*Zo*H)
else:
return ((8.68*Rs*Q)/(Zo*H))*(We/H + (We/math.pi/H)/(We/2/H)+0.94)*((H/We + 2*math.log(We/2/H + 0.94)/math.pi)**(-2))
def getDielectricLoss(er,ef,tanD,f):
"""
Returns the dielectric loss in [dB/cm].
Paramenters:
- `er` : Relative permitivity of the dielectric
- `ef` : Effective permitivity
- `tanD` : tan \delta
- `f` : Operating frequency
"""
lam = c/math.sqrt(ef)/f
return 27.3*(er*(ef-1)*tanD)/(lam*math.sqrt(er)*(er-1))
|
import sklearn.datasets as skds
import numpy as np
import random
import theano.tensor as T
import theano
import matplotlib.pyplot as plt
import math
x = np.arange(-50., 50., 1)
y = np.array(map(lambda tmp: 1.0/(1 + math.exp(-3 * tmp + 5.0)), x))
noise = np.random.uniform(-0.1, .1, size=len(x))
y += noise
print x
print y
theta = theano.shared(np.random.uniform(-0.1, 0.1))
omega = theano.shared(np.random.uniform(-0.1, 0.1))
X = T.dscalar('X')
Y = T.dscalar('Y')
prediction = 1/(1 + T.exp(-omega * X + theta))
loss1 = -Y * T.log(prediction)
loss2 = 1/2.0 * (prediction - Y) ** 2
predict = theano.function([X], prediction)
calculate_loss = theano.function([X, Y], loss2)
print predict(1.0)
dX = T.grad(loss2, X)
dtheta = T.grad(loss2, theta)
domega = T.grad(loss2, omega)
epsilon = .01
gradient_step = theano.function(
[X, Y],
updates=((omega, omega - epsilon * domega),
(theta, theta - epsilon * dtheta)))
for i in range(100):
loss = 0
for j in range(len(x)):
gradient_step(x[j], y[j])
loss += calculate_loss(x[j], y[j])
print 'loss after' + str(i) + 'iterations.' + str(loss)
print x
print y
mul = 1 - 1/len(x)
plt.xlim(x.min() * mul, x.max() * mul)
plt.ylim(y.min() * mul, y.max() * mul)
plt.xlabel('x')
plt.ylabel('y')
plt.title('lr test')
plt.plot(x, y, 'ro')
xx = np.arange(x.min(), x.max(), 0.1)
yy = map(lambda abc: predict(abc), xx)
plt.plot(xx, yy, 'b')
plt.show()
|
"""
This implements a redirection for CERN HR Documents in the CERN Document
Server. It's useful as a reference on how goto plugins could be implemented.
"""
import time
import re
from invenio.legacy.search_engine import perform_request_search
from invenio.legacy.bibrecord import get_fieldvalues
from invenio.legacy.bibdocfile.api import BibRecDocs
def make_cern_ssr_docname(lang, edition, modif=0):
if modif:
return "CERN_SSR_%(lang)s_ed%(edition)02d_modif%(modif)02d" % {
'lang': lang,
'edition': edition,
'modif': modif
}
else:
return "CERN_SSR_%(lang)s_ed%(edition)02d" % {
'lang': lang,
'edition': edition,
}
_RE_REVISION = re.compile(r"rev(\d\d)")
def _get_revision(docname):
"""
Return the revision in a docname. E.g.:
CERN_Circ_Op_en_02_rev01_Implementation measures.pdf -> 1
CERN_Circ_Op_en_02_rev02_Implementation measures.PDF -> 2
"""
g = _RE_REVISION.search(docname)
if g:
return int(g.group(1))
return 0
def _register_document(documents, docname, key):
"""
Register in the documents mapping the docname to key, but only if the
docname has a revision higher of the docname already associated with a key
"""
if key in documents:
if _get_revision(docname) > _get_revision(documents[key]):
documents[key] = docname
else:
documents[key] = docname
def goto(type, document='', number=0, lang='en', modif=0):
today = time.strftime('%Y-%m-%d')
if type == 'SSR':
## We would like a CERN Staff Rules and Regulations
recids = perform_request_search(cc='Staff Rules and Regulations', f="925__a:1996-01-01->%s 925__b:%s->9999-99-99" % (today, today))
recid = recids[-1]
reportnumber = get_fieldvalues(recid, '037__a')[0]
edition = int(reportnumber[-2:]) ## e.g. CERN-STAFF-RULES-ED08
return BibRecDocs(recid).get_bibdoc(make_cern_ssr_docname(lang, edition, modif)).get_file('.pdf').get_url()
elif type == "OPER-CIRC":
recids = perform_request_search(cc="Operational Circulars", p="reportnumber=\"CERN-OPER-CIRC-%s-*\"" % number, sf="925__a")
recid = recids[-1]
documents = {}
bibrecdocs = BibRecDocs(recid)
for docname in bibrecdocs.get_bibdoc_names():
ldocname = docname.lower()
if 'implementation' in ldocname:
_register_document(documents, docname, 'implementation_en')
elif 'application' in ldocname:
_register_document(documents, docname, 'implementation_fr')
elif 'archiving' in ldocname:
_register_document(documents, docname, 'archiving_en')
elif 'archivage' in ldocname:
_register_document(documents, docname, 'archiving_fr')
elif 'annexe' in ldocname or 'annexes_fr' in ldocname:
_register_document(documents, docname, 'annex_fr')
elif 'annexes_en' in ldocname or 'annex' in ldocname:
_register_document(documents, docname, 'annex_en')
elif '_en_' in ldocname or '_eng_' in ldocname or '_angl_' in ldocname:
_register_document(documents, docname, 'en')
elif '_fr_' in ldocname:
_register_document(documents, docname, 'fr')
return bibrecdocs.get_bibdoc(documents[document]).get_file('.pdf').get_url()
elif type == 'ADMIN-CIRC':
recids = perform_request_search(cc="Administrative Circulars", p="reportnumber=\"CERN-ADMIN-CIRC-%s-*\"" % number, sf="925__a")
recid = recids[-1]
documents = {}
bibrecdocs = BibRecDocs(recid)
for docname in bibrecdocs.get_bibdoc_names():
ldocname = docname.lower()
if 'implementation' in ldocname:
_register_document(documents, docname, 'implementation-en')
elif 'application' in ldocname:
_register_document(documents, docname, 'implementation-fr')
elif 'archiving' in ldocname:
_register_document(documents, docname, 'archiving-en')
elif 'archivage' in ldocname:
_register_document(documents, docname, 'archiving-fr')
elif 'annexe' in ldocname or 'annexes_fr' in ldocname:
_register_document(documents, docname, 'annex-fr')
elif 'annexes_en' in ldocname or 'annex' in ldocname:
_register_document(documents, docname, 'annex-en')
elif '_en_' in ldocname or '_eng_' in ldocname or '_angl_' in ldocname:
_register_document(documents, docname, 'en')
elif '_fr_' in ldocname:
_register_document(documents, docname, 'fr')
return bibrecdocs.get_bibdoc(documents[document]).get_file('.pdf').get_url()
def register_hr_redirections():
"""
Run this only once
"""
from invenio.modules.redirector.api import register_redirection
plugin = 'goto_plugin_cern_hr_documents'
## Staff rules and regulations
for modif in range(1, 20):
for lang in ('en', 'fr'):
register_redirection('hr-srr-modif%02d-%s' % (modif, lang), plugin, parameters={'type': 'SSR', 'lang': lang, 'modif': modif})
for lang in ('en', 'fr'):
register_redirection('hr-srr-%s' % lang, plugin, parameters={'type': 'SSR', 'lang': lang, 'modif': 0})
## Operational Circulars
for number in range(1, 10):
for lang in ('en', 'fr'):
register_redirection('hr-oper-circ-%s-%s' % (number, lang), plugin, parameters={'type': 'OPER-CIRC', 'document': lang, 'number': number})
for number, special_document in ((2, 'implementation'), (2, 'annex'), (3, 'archiving'), (3, 'annex')):
for lang in ('en', 'fr'):
register_redirection('hr-circ-%s-%s-%s' % (number, special_document, lang), plugin, parameters={'type': 'OPER-CIRC', 'document': '%s-%s' % (special_document, lang), 'number': number})
## Administrative Circulars:
for number in range(1, 32):
for lang in ('en', 'fr'):
register_redirection('hr-admin-circ-%s-%s' % (number, lang), plugin, parameters={'type': 'ADMIN-CIRC', 'document': lang, 'number': number})
if __name__ == "__main__":
register_hr_redirections()
|
import os.path
import sys
from PyQt5 import QtGui
if sys.platform == 'win32':
_search_paths = []
else:
_search_paths = [
os.path.expanduser('~/.icons'),
os.path.join(os.environ.get('XDG_DATA_DIRS', '/usr/share'), 'icons'),
'/usr/share/pixmaps',
]
_current_theme = None
if 'XDG_CURRENT_DESKTOP' in os.environ:
desktop = os.environ['XDG_CURRENT_DESKTOP'].lower()
if desktop in ('gnome', 'unity'):
_current_theme = (os.popen('gsettings get org.gnome.desktop.interface icon-theme').read().strip()[1:-1]
or None)
elif os.environ.get('KDE_FULL_SESSION'):
_current_theme = (os.popen("kreadconfig --file kdeglobals --group Icons --key Theme --default crystalsvg").read().strip()
or None)
ICON_SIZE_MENU = ('16x16',)
ICON_SIZE_TOOLBAR = ('22x22',)
ICON_SIZE_ALL = ('22x22', '16x16')
def lookup(name, size=ICON_SIZE_ALL):
icon = QtGui.QIcon()
if _current_theme:
for path in _search_paths:
for subdir in ('actions', 'places', 'devices'):
fullpath = os.path.join(path, _current_theme, size[0], subdir, name)
if os.path.exists(fullpath + '.png'):
icon.addFile(fullpath + '.png')
for s in size[1:]:
icon.addFile(os.path.join(path, _current_theme, s, subdir, name) + '.png')
return icon
for s in size:
icon.addFile('/'.join([':', 'images', s, name]) + '.png')
return icon
|
"""OpCodes module
This module implements the data structures which define the cluster
operations - the so-called opcodes.
Every operation which modifies the cluster state is expressed via
opcodes.
"""
import logging
import re
import operator
from ganeti import constants
from ganeti import errors
from ganeti import ht
_POutputFields = ("output_fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString),
"Selected output fields")
_PShutdownTimeout = \
("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT, ht.TPositiveInt,
"How long to wait for instance to shut down")
_PForce = ("force", False, ht.TBool, "Whether to force the operation")
_PInstanceName = ("instance_name", ht.NoDefault, ht.TNonEmptyString,
"Instance name")
_PIgnoreOfflineNodes = ("ignore_offline_nodes", False, ht.TBool,
"Whether to ignore offline nodes")
_PNodeName = ("node_name", ht.NoDefault, ht.TNonEmptyString, "Node name")
_PGroupName = ("group_name", ht.NoDefault, ht.TNonEmptyString, "Group name")
_PMigrationMode = ("mode", None,
ht.TOr(ht.TNone, ht.TElemOf(constants.HT_MIGRATION_MODES)),
"Migration mode")
_PMigrationLive = ("live", None, ht.TMaybeBool,
"Legacy setting for live migration, do not use")
_PTagKind = ("kind", ht.NoDefault, ht.TElemOf(constants.VALID_TAG_TYPES), None)
_PTags = ("tags", ht.NoDefault, ht.TListOf(ht.TNonEmptyString), None)
_PForceVariant = ("force_variant", False, ht.TBool,
"Whether to force an unknown OS variant")
_PWaitForSync = ("wait_for_sync", True, ht.TBool,
"Whether to wait for the disk to synchronize")
_PIgnoreConsistency = ("ignore_consistency", False, ht.TBool,
"Whether to ignore disk consistency")
_PStorageName = ("name", ht.NoDefault, ht.TMaybeString, "Storage name")
_PUseLocking = ("use_locking", False, ht.TBool,
"Whether to use synchronization")
_PNameCheck = ("name_check", True, ht.TBool, "Whether to check name")
_PNodeGroupAllocPolicy = \
("alloc_policy", None,
ht.TOr(ht.TNone, ht.TElemOf(constants.VALID_ALLOC_POLICIES)),
"Instance allocation policy")
_PGroupNodeParams = ("ndparams", None, ht.TMaybeDict,
"Default node parameters for group")
_PQueryWhat = ("what", ht.NoDefault, ht.TElemOf(constants.QR_VIA_OP),
"Resource(s) to query for")
_PIpCheckDoc = "Whether to ensure instance's IP address is inactive"
_PNoRemember = ("no_remember", False, ht.TBool,
"Do not remember the state change")
_PMigrationTargetNode = ("target_node", None, ht.TMaybeString,
"Target node for shared-storage instances")
_OPID_RE = re.compile("([a-z])([A-Z])")
_TestClusterOsList = ht.TOr(ht.TNone,
ht.TListOf(ht.TAnd(ht.TList, ht.TIsLength(2),
ht.TMap(ht.WithDesc("GetFirstItem")(operator.itemgetter(0)),
ht.TElemOf(constants.DDMS_VALUES)))))
_TestNicDef = ht.TDictOf(ht.TElemOf(constants.INIC_PARAMS),
ht.TOr(ht.TNone, ht.TNonEmptyString))
_SUMMARY_PREFIX = {
"CLUSTER_": "C_",
"GROUP_": "G_",
"NODE_": "N_",
"INSTANCE_": "I_",
}
def _NameToId(name):
"""Convert an opcode class name to an OP_ID.
@type name: string
@param name: the class name, as OpXxxYyy
@rtype: string
@return: the name in the OP_XXXX_YYYY format
"""
if not name.startswith("Op"):
return None
# Note: (?<=[a-z])(?=[A-Z]) would be ideal, since it wouldn't
# consume any input, and hence we would just have all the elements
# in the list, one by one; but it seems that split doesn't work on
# non-consuming input, hence we have to process the input string a
# bit
name = _OPID_RE.sub(r"\1,\2", name)
elems = name.split(",")
return "_".join(n.upper() for n in elems)
def RequireFileStorage():
"""Checks that file storage is enabled.
While it doesn't really fit into this module, L{utils} was deemed too large
of a dependency to be imported for just one or two functions.
@raise errors.OpPrereqError: when file storage is disabled
"""
if not constants.ENABLE_FILE_STORAGE:
raise errors.OpPrereqError("File storage disabled at configure time",
errors.ECODE_INVAL)
def RequireSharedFileStorage():
"""Checks that shared file storage is enabled.
While it doesn't really fit into this module, L{utils} was deemed too large
of a dependency to be imported for just one or two functions.
@raise errors.OpPrereqError: when shared file storage is disabled
"""
if not constants.ENABLE_SHARED_FILE_STORAGE:
raise errors.OpPrereqError("Shared file storage disabled at"
" configure time", errors.ECODE_INVAL)
@ht.WithDesc("CheckFileStorage")
def _CheckFileStorage(value):
"""Ensures file storage is enabled if used.
"""
if value == constants.DT_FILE:
RequireFileStorage()
elif value == constants.DT_SHARED_FILE:
RequireSharedFileStorage()
return True
_CheckDiskTemplate = ht.TAnd(ht.TElemOf(constants.DISK_TEMPLATES),
_CheckFileStorage)
def _CheckStorageType(storage_type):
"""Ensure a given storage type is valid.
"""
if storage_type not in constants.VALID_STORAGE_TYPES:
raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
errors.ECODE_INVAL)
if storage_type == constants.ST_FILE:
RequireFileStorage()
return True
_PStorageType = ("storage_type", ht.NoDefault, _CheckStorageType,
"Storage type")
class _AutoOpParamSlots(type):
"""Meta class for opcode definitions.
"""
def __new__(mcs, name, bases, attrs):
"""Called when a class should be created.
@param mcs: The meta class
@param name: Name of created class
@param bases: Base classes
@type attrs: dict
@param attrs: Class attributes
"""
assert "__slots__" not in attrs, \
"Class '%s' defines __slots__ when it should use OP_PARAMS" % name
assert "OP_ID" not in attrs, "Class '%s' defining OP_ID" % name
attrs["OP_ID"] = _NameToId(name)
# Always set OP_PARAMS to avoid duplicates in BaseOpCode.GetAllParams
params = attrs.setdefault("OP_PARAMS", [])
# Use parameter names as slots
slots = [pname for (pname, _, _, _) in params]
assert "OP_DSC_FIELD" not in attrs or attrs["OP_DSC_FIELD"] in slots, \
"Class '%s' uses unknown field in OP_DSC_FIELD" % name
attrs["__slots__"] = slots
return type.__new__(mcs, name, bases, attrs)
class BaseOpCode(object):
"""A simple serializable object.
This object serves as a parent class for OpCode without any custom
field handling.
"""
# pylint: disable-msg=E1101
# as OP_ID is dynamically defined
__metaclass__ = _AutoOpParamSlots
def __init__(self, **kwargs):
"""Constructor for BaseOpCode.
The constructor takes only keyword arguments and will set
attributes on this object based on the passed arguments. As such,
it means that you should not pass arguments which are not in the
__slots__ attribute for this class.
"""
slots = self._all_slots()
for key in kwargs:
if key not in slots:
raise TypeError("Object %s doesn't support the parameter '%s'" %
(self.__class__.__name__, key))
setattr(self, key, kwargs[key])
def __getstate__(self):
"""Generic serializer.
This method just returns the contents of the instance as a
dictionary.
@rtype: C{dict}
@return: the instance attributes and their values
"""
state = {}
for name in self._all_slots():
if hasattr(self, name):
state[name] = getattr(self, name)
return state
def __setstate__(self, state):
"""Generic unserializer.
This method just restores from the serialized state the attributes
of the current instance.
@param state: the serialized opcode data
@type state: C{dict}
"""
if not isinstance(state, dict):
raise ValueError("Invalid data to __setstate__: expected dict, got %s" %
type(state))
for name in self._all_slots():
if name not in state and hasattr(self, name):
delattr(self, name)
for name in state:
setattr(self, name, state[name])
@classmethod
def _all_slots(cls):
"""Compute the list of all declared slots for a class.
"""
slots = []
for parent in cls.__mro__:
slots.extend(getattr(parent, "__slots__", []))
return slots
@classmethod
def GetAllParams(cls):
"""Compute list of all parameters for an opcode.
"""
slots = []
for parent in cls.__mro__:
slots.extend(getattr(parent, "OP_PARAMS", []))
return slots
def Validate(self, set_defaults):
"""Validate opcode parameters, optionally setting default values.
@type set_defaults: bool
@param set_defaults: Whether to set default values
@raise errors.OpPrereqError: When a parameter value doesn't match
requirements
"""
for (attr_name, default, test, _) in self.GetAllParams():
assert test == ht.NoType or callable(test)
if not hasattr(self, attr_name):
if default == ht.NoDefault:
raise errors.OpPrereqError("Required parameter '%s.%s' missing" %
(self.OP_ID, attr_name),
errors.ECODE_INVAL)
elif set_defaults:
if callable(default):
dval = default()
else:
dval = default
setattr(self, attr_name, dval)
if test == ht.NoType:
# no tests here
continue
if set_defaults or hasattr(self, attr_name):
attr_val = getattr(self, attr_name)
if not test(attr_val):
logging.error("OpCode %s, parameter %s, has invalid type %s/value %s",
self.OP_ID, attr_name, type(attr_val), attr_val)
raise errors.OpPrereqError("Parameter '%s.%s' fails validation" %
(self.OP_ID, attr_name),
errors.ECODE_INVAL)
class OpCode(BaseOpCode):
"""Abstract OpCode.
This is the root of the actual OpCode hierarchy. All clases derived
from this class should override OP_ID.
@cvar OP_ID: The ID of this opcode. This should be unique amongst all
children of this class.
@cvar OP_DSC_FIELD: The name of a field whose value will be included in the
string returned by Summary(); see the docstring of that
method for details).
@cvar OP_PARAMS: List of opcode attributes, the default values they should
get if not already defined, and types they must match.
@cvar WITH_LU: Boolean that specifies whether this should be included in
mcpu's dispatch table
@ivar dry_run: Whether the LU should be run in dry-run mode, i.e. just
the check steps
@ivar priority: Opcode priority for queue
"""
# pylint: disable-msg=E1101
# as OP_ID is dynamically defined
WITH_LU = True
OP_PARAMS = [
("dry_run", None, ht.TMaybeBool, "Run checks only, don't execute"),
("debug_level", None, ht.TOr(ht.TNone, ht.TPositiveInt), "Debug level"),
("priority", constants.OP_PRIO_DEFAULT,
ht.TElemOf(constants.OP_PRIO_SUBMIT_VALID), "Opcode priority"),
]
def __getstate__(self):
"""Specialized getstate for opcodes.
This method adds to the state dictionary the OP_ID of the class,
so that on unload we can identify the correct class for
instantiating the opcode.
@rtype: C{dict}
@return: the state as a dictionary
"""
data = BaseOpCode.__getstate__(self)
data["OP_ID"] = self.OP_ID
return data
@classmethod
def LoadOpCode(cls, data):
"""Generic load opcode method.
The method identifies the correct opcode class from the dict-form
by looking for a OP_ID key, if this is not found, or its value is
not available in this module as a child of this class, we fail.
@type data: C{dict}
@param data: the serialized opcode
"""
if not isinstance(data, dict):
raise ValueError("Invalid data to LoadOpCode (%s)" % type(data))
if "OP_ID" not in data:
raise ValueError("Invalid data to LoadOpcode, missing OP_ID")
op_id = data["OP_ID"]
op_class = None
if op_id in OP_MAPPING:
op_class = OP_MAPPING[op_id]
else:
raise ValueError("Invalid data to LoadOpCode: OP_ID %s unsupported" %
op_id)
op = op_class()
new_data = data.copy()
del new_data["OP_ID"]
op.__setstate__(new_data)
return op
def Summary(self):
"""Generates a summary description of this opcode.
The summary is the value of the OP_ID attribute (without the "OP_"
prefix), plus the value of the OP_DSC_FIELD attribute, if one was
defined; this field should allow to easily identify the operation
(for an instance creation job, e.g., it would be the instance
name).
"""
assert self.OP_ID is not None and len(self.OP_ID) > 3
# all OP_ID start with OP_, we remove that
txt = self.OP_ID[3:]
field_name = getattr(self, "OP_DSC_FIELD", None)
if field_name:
field_value = getattr(self, field_name, None)
if isinstance(field_value, (list, tuple)):
field_value = ",".join(str(i) for i in field_value)
txt = "%s(%s)" % (txt, field_value)
return txt
def TinySummary(self):
"""Generates a compact summary description of the opcode.
"""
assert self.OP_ID.startswith("OP_")
text = self.OP_ID[3:]
for (prefix, supplement) in _SUMMARY_PREFIX.items():
if text.startswith(prefix):
return supplement + text[len(prefix):]
return text
class OpClusterPostInit(OpCode):
"""Post cluster initialization.
This opcode does not touch the cluster at all. Its purpose is to run hooks
after the cluster has been initialized.
"""
class OpClusterDestroy(OpCode):
"""Destroy the cluster.
This opcode has no other parameters. All the state is irreversibly
lost after the execution of this opcode.
"""
class OpClusterQuery(OpCode):
"""Query cluster information."""
class OpClusterVerifyConfig(OpCode):
"""Verify the cluster config.
"""
OP_PARAMS = [
("verbose", False, ht.TBool, None),
("error_codes", False, ht.TBool, None),
("debug_simulate_errors", False, ht.TBool, None),
]
class OpClusterVerifyGroup(OpCode):
"""Run verify on a node group from the cluster.
@type skip_checks: C{list}
@ivar skip_checks: steps to be skipped from the verify process; this
needs to be a subset of
L{constants.VERIFY_OPTIONAL_CHECKS}; currently
only L{constants.VERIFY_NPLUSONE_MEM} can be passed
"""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
("group_name", ht.NoDefault, ht.TNonEmptyString, None),
("skip_checks", ht.EmptyList,
ht.TListOf(ht.TElemOf(constants.VERIFY_OPTIONAL_CHECKS)), None),
("verbose", False, ht.TBool, None),
("error_codes", False, ht.TBool, None),
("debug_simulate_errors", False, ht.TBool, None),
]
class OpClusterVerifyDisks(OpCode):
"""Verify the cluster disks.
Parameters: none
Result: a tuple of four elements:
- list of node names with bad data returned (unreachable, etc.)
- dict of node names with broken volume groups (values: error msg)
- list of instances with degraded disks (that should be activated)
- dict of instances with missing logical volumes (values: (node, vol)
pairs with details about the missing volumes)
In normal operation, all lists should be empty. A non-empty instance
list (3rd element of the result) is still ok (errors were fixed) but
non-empty node list means some node is down, and probably there are
unfixable drbd errors.
Note that only instances that are drbd-based are taken into
consideration. This might need to be revisited in the future.
"""
class OpClusterRepairDiskSizes(OpCode):
"""Verify the disk sizes of the instances and fixes configuration
mimatches.
Parameters: optional instances list, in case we want to restrict the
checks to only a subset of the instances.
Result: a list of tuples, (instance, disk, new-size) for changed
configurations.
In normal operation, the list should be empty.
@type instances: list
@ivar instances: the list of instances to check, or empty for all instances
"""
OP_PARAMS = [
("instances", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), None),
]
class OpClusterConfigQuery(OpCode):
"""Query cluster configuration values."""
OP_PARAMS = [
_POutputFields
]
class OpClusterRename(OpCode):
"""Rename the cluster.
@type name: C{str}
@ivar name: The new name of the cluster. The name and/or the master IP
address will be changed to match the new name and its IP
address.
"""
OP_DSC_FIELD = "name"
OP_PARAMS = [
("name", ht.NoDefault, ht.TNonEmptyString, None),
]
class OpClusterSetParams(OpCode):
"""Change the parameters of the cluster.
@type vg_name: C{str} or C{None}
@ivar vg_name: The new volume group name or None to disable LVM usage.
"""
OP_PARAMS = [
("vg_name", None, ht.TMaybeString, "Volume group name"),
("enabled_hypervisors", None,
ht.TOr(ht.TAnd(ht.TListOf(ht.TElemOf(constants.HYPER_TYPES)), ht.TTrue),
ht.TNone),
"List of enabled hypervisors"),
("hvparams", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
ht.TNone),
"Cluster-wide hypervisor parameter defaults, hypervisor-dependent"),
("beparams", None, ht.TOr(ht.TDict, ht.TNone),
"Cluster-wide backend parameter defaults"),
("os_hvp", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
ht.TNone),
"Cluster-wide per-OS hypervisor parameter defaults"),
("osparams", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
ht.TNone),
"Cluster-wide OS parameter defaults"),
("candidate_pool_size", None, ht.TOr(ht.TStrictPositiveInt, ht.TNone),
"Master candidate pool size"),
("uid_pool", None, ht.NoType,
"Set UID pool, must be list of lists describing UID ranges (two items,"
" start and end inclusive)"),
("add_uids", None, ht.NoType,
"Extend UID pool, must be list of lists describing UID ranges (two"
" items, start and end inclusive) to be added"),
("remove_uids", None, ht.NoType,
"Shrink UID pool, must be list of lists describing UID ranges (two"
" items, start and end inclusive) to be removed"),
("maintain_node_health", None, ht.TMaybeBool,
"Whether to automatically maintain node health"),
("prealloc_wipe_disks", None, ht.TMaybeBool,
"Whether to wipe disks before allocating them to instances"),
("nicparams", None, ht.TMaybeDict, "Cluster-wide NIC parameter defaults"),
("ndparams", None, ht.TMaybeDict, "Cluster-wide node parameter defaults"),
("drbd_helper", None, ht.TOr(ht.TString, ht.TNone), "DRBD helper program"),
("default_iallocator", None, ht.TOr(ht.TString, ht.TNone),
"Default iallocator for cluster"),
("master_netdev", None, ht.TOr(ht.TString, ht.TNone),
"Master network device"),
("reserved_lvs", None, ht.TOr(ht.TListOf(ht.TNonEmptyString), ht.TNone),
"List of reserved LVs"),
("hidden_os", None, _TestClusterOsList,
"Modify list of hidden operating systems. Each modification must have"
" two items, the operation and the OS name. The operation can be"
" ``%s`` or ``%s``." % (constants.DDM_ADD, constants.DDM_REMOVE)),
("blacklisted_os", None, _TestClusterOsList,
"Modify list of blacklisted operating systems. Each modification must have"
" two items, the operation and the OS name. The operation can be"
" ``%s`` or ``%s``." % (constants.DDM_ADD, constants.DDM_REMOVE)),
]
class OpClusterRedistConf(OpCode):
"""Force a full push of the cluster configuration.
"""
class OpQuery(OpCode):
"""Query for resources/items.
@ivar what: Resources to query for, must be one of L{constants.QR_VIA_OP}
@ivar fields: List of fields to retrieve
@ivar filter: Query filter
"""
OP_PARAMS = [
_PQueryWhat,
("fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString),
"Requested fields"),
("filter", None, ht.TOr(ht.TNone, ht.TListOf),
"Query filter"),
]
class OpQueryFields(OpCode):
"""Query for available resource/item fields.
@ivar what: Resources to query for, must be one of L{constants.QR_VIA_OP}
@ivar fields: List of fields to retrieve
"""
OP_PARAMS = [
_PQueryWhat,
("fields", None, ht.TOr(ht.TNone, ht.TListOf(ht.TNonEmptyString)),
"Requested fields; if not given, all are returned"),
]
class OpOobCommand(OpCode):
"""Interact with OOB."""
OP_PARAMS = [
("node_names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"List of nodes to run the OOB command against"),
("command", None, ht.TElemOf(constants.OOB_COMMANDS),
"OOB command to be run"),
("timeout", constants.OOB_TIMEOUT, ht.TInt,
"Timeout before the OOB helper will be terminated"),
("ignore_status", False, ht.TBool,
"Ignores the node offline status for power off"),
("power_delay", constants.OOB_POWER_DELAY, ht.TPositiveFloat,
"Time in seconds to wait between powering on nodes"),
]
class OpNodeRemove(OpCode):
"""Remove a node.
@type node_name: C{str}
@ivar node_name: The name of the node to remove. If the node still has
instances on it, the operation will fail.
"""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
]
class OpNodeAdd(OpCode):
"""Add a node to the cluster.
@type node_name: C{str}
@ivar node_name: The name of the node to add. This can be a short name,
but it will be expanded to the FQDN.
@type primary_ip: IP address
@ivar primary_ip: The primary IP of the node. This will be ignored when the
opcode is submitted, but will be filled during the node
add (so it will be visible in the job query).
@type secondary_ip: IP address
@ivar secondary_ip: The secondary IP of the node. This needs to be passed
if the cluster has been initialized in 'dual-network'
mode, otherwise it must not be given.
@type readd: C{bool}
@ivar readd: Whether to re-add an existing node to the cluster. If
this is not passed, then the operation will abort if the node
name is already in the cluster; use this parameter to 'repair'
a node that had its configuration broken, or was reinstalled
without removal from the cluster.
@type group: C{str}
@ivar group: The node group to which this node will belong.
@type vm_capable: C{bool}
@ivar vm_capable: The vm_capable node attribute
@type master_capable: C{bool}
@ivar master_capable: The master_capable node attribute
"""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
("primary_ip", None, ht.NoType, "Primary IP address"),
("secondary_ip", None, ht.TMaybeString, "Secondary IP address"),
("readd", False, ht.TBool, "Whether node is re-added to cluster"),
("group", None, ht.TMaybeString, "Initial node group"),
("master_capable", None, ht.TMaybeBool,
"Whether node can become master or master candidate"),
("vm_capable", None, ht.TMaybeBool,
"Whether node can host instances"),
("ndparams", None, ht.TMaybeDict, "Node parameters"),
]
class OpNodeQuery(OpCode):
"""Compute the list of nodes."""
OP_PARAMS = [
_POutputFields,
_PUseLocking,
("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Empty list to query all nodes, node names otherwise"),
]
class OpNodeQueryvols(OpCode):
"""Get list of volumes on node."""
OP_PARAMS = [
_POutputFields,
("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Empty list to query all nodes, node names otherwise"),
]
class OpNodeQueryStorage(OpCode):
"""Get information on storage for node(s)."""
OP_PARAMS = [
_POutputFields,
_PStorageType,
("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "List of nodes"),
("name", None, ht.TMaybeString, "Storage name"),
]
class OpNodeModifyStorage(OpCode):
"""Modifies the properies of a storage unit"""
OP_PARAMS = [
_PNodeName,
_PStorageType,
_PStorageName,
("changes", ht.NoDefault, ht.TDict, "Requested changes"),
]
class OpRepairNodeStorage(OpCode):
"""Repairs the volume group on a node."""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
_PStorageType,
_PStorageName,
_PIgnoreConsistency,
]
class OpNodeSetParams(OpCode):
"""Change the parameters of a node."""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
_PForce,
("master_candidate", None, ht.TMaybeBool,
"Whether the node should become a master candidate"),
("offline", None, ht.TMaybeBool,
"Whether the node should be marked as offline"),
("drained", None, ht.TMaybeBool,
"Whether the node should be marked as drained"),
("auto_promote", False, ht.TBool,
"Whether node(s) should be promoted to master candidate if necessary"),
("master_capable", None, ht.TMaybeBool,
"Denote whether node can become master or master candidate"),
("vm_capable", None, ht.TMaybeBool,
"Denote whether node can host instances"),
("secondary_ip", None, ht.TMaybeString,
"Change node's secondary IP address"),
("ndparams", None, ht.TMaybeDict, "Set node parameters"),
("powered", None, ht.TMaybeBool,
"Whether the node should be marked as powered"),
]
class OpNodePowercycle(OpCode):
"""Tries to powercycle a node."""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
_PForce,
]
class OpNodeMigrate(OpCode):
"""Migrate all instances from a node."""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
_PMigrationMode,
_PMigrationLive,
_PMigrationTargetNode,
("iallocator", None, ht.TMaybeString,
"Iallocator for deciding the target node for shared-storage instances"),
]
class OpNodeEvacStrategy(OpCode):
"""Compute the evacuation strategy for a list of nodes."""
OP_DSC_FIELD = "nodes"
OP_PARAMS = [
("nodes", ht.NoDefault, ht.TListOf(ht.TNonEmptyString), None),
("remote_node", None, ht.TMaybeString, None),
("iallocator", None, ht.TMaybeString, None),
]
class OpInstanceCreate(OpCode):
"""Create an instance.
@ivar instance_name: Instance name
@ivar mode: Instance creation mode (one of L{constants.INSTANCE_CREATE_MODES})
@ivar source_handshake: Signed handshake from source (remote import only)
@ivar source_x509_ca: Source X509 CA in PEM format (remote import only)
@ivar source_instance_name: Previous name of instance (remote import only)
@ivar source_shutdown_timeout: Shutdown timeout used for source instance
(remote import only)
"""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PForceVariant,
_PWaitForSync,
_PNameCheck,
("beparams", ht.EmptyDict, ht.TDict, "Backend parameters for instance"),
("disks", ht.NoDefault,
# TODO: Generate check from constants.IDISK_PARAMS_TYPES
ht.TListOf(ht.TDictOf(ht.TElemOf(constants.IDISK_PARAMS),
ht.TOr(ht.TNonEmptyString, ht.TInt))),
"Disk descriptions, for example ``[{\"%s\": 100}, {\"%s\": 5}]``;"
" each disk definition must contain a ``%s`` value and"
" can contain an optional ``%s`` value denoting the disk access mode"
" (%s)" %
(constants.IDISK_SIZE, constants.IDISK_SIZE, constants.IDISK_SIZE,
constants.IDISK_MODE,
" or ".join("``%s``" % i for i in sorted(constants.DISK_ACCESS_SET)))),
("disk_template", ht.NoDefault, _CheckDiskTemplate, "Disk template"),
("file_driver", None, ht.TOr(ht.TNone, ht.TElemOf(constants.FILE_DRIVER)),
"Driver for file-backed disks"),
("file_storage_dir", None, ht.TMaybeString,
"Directory for storing file-backed disks"),
("hvparams", ht.EmptyDict, ht.TDict,
"Hypervisor parameters for instance, hypervisor-dependent"),
("hypervisor", None, ht.TMaybeString, "Hypervisor"),
("iallocator", None, ht.TMaybeString,
"Iallocator for deciding which node(s) to use"),
("identify_defaults", False, ht.TBool,
"Reset instance parameters to default if equal"),
("ip_check", True, ht.TBool, _PIpCheckDoc),
("mode", ht.NoDefault, ht.TElemOf(constants.INSTANCE_CREATE_MODES),
"Instance creation mode"),
("nics", ht.NoDefault, ht.TListOf(_TestNicDef),
"List of NIC (network interface) definitions, for example"
" ``[{}, {}, {\"%s\": \"198.51.100.4\"}]``; each NIC definition can"
" contain the optional values %s" %
(constants.INIC_IP,
", ".join("``%s``" % i for i in sorted(constants.INIC_PARAMS)))),
("no_install", None, ht.TMaybeBool,
"Do not install the OS (will disable automatic start)"),
("osparams", ht.EmptyDict, ht.TDict, "OS parameters for instance"),
("os_type", None, ht.TMaybeString, "Operating system"),
("pnode", None, ht.TMaybeString, "Primary node"),
("snode", None, ht.TMaybeString, "Secondary node"),
("source_handshake", None, ht.TOr(ht.TList, ht.TNone),
"Signed handshake from source (remote import only)"),
("source_instance_name", None, ht.TMaybeString,
"Source instance name (remote import only)"),
("source_shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT,
ht.TPositiveInt,
"How long source instance was given to shut down (remote import only)"),
("source_x509_ca", None, ht.TMaybeString,
"Source X509 CA in PEM format (remote import only)"),
("src_node", None, ht.TMaybeString, "Source node for import"),
("src_path", None, ht.TMaybeString, "Source directory for import"),
("start", True, ht.TBool, "Whether to start instance after creation"),
("tags", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Instance tags"),
]
class OpInstanceReinstall(OpCode):
"""Reinstall an instance's OS."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PForceVariant,
("os_type", None, ht.TMaybeString, "Instance operating system"),
("osparams", None, ht.TMaybeDict, "Temporary OS parameters"),
]
class OpInstanceRemove(OpCode):
"""Remove an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PShutdownTimeout,
("ignore_failures", False, ht.TBool,
"Whether to ignore failures during removal"),
]
class OpInstanceRename(OpCode):
"""Rename an instance."""
OP_PARAMS = [
_PInstanceName,
_PNameCheck,
("new_name", ht.NoDefault, ht.TNonEmptyString, "New instance name"),
("ip_check", False, ht.TBool, _PIpCheckDoc),
]
class OpInstanceStartup(OpCode):
"""Startup an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PForce,
_PIgnoreOfflineNodes,
("hvparams", ht.EmptyDict, ht.TDict,
"Temporary hypervisor parameters, hypervisor-dependent"),
("beparams", ht.EmptyDict, ht.TDict, "Temporary backend parameters"),
_PNoRemember,
]
class OpInstanceShutdown(OpCode):
"""Shutdown an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PIgnoreOfflineNodes,
("timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT, ht.TPositiveInt,
"How long to wait for instance to shut down"),
_PNoRemember,
]
class OpInstanceReboot(OpCode):
"""Reboot an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PShutdownTimeout,
("ignore_secondaries", False, ht.TBool,
"Whether to start the instance even if secondary disks are failing"),
("reboot_type", ht.NoDefault, ht.TElemOf(constants.REBOOT_TYPES),
"How to reboot instance"),
]
class OpInstanceReplaceDisks(OpCode):
"""Replace the disks of an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
("mode", ht.NoDefault, ht.TElemOf(constants.REPLACE_MODES),
"Replacement mode"),
("disks", ht.EmptyList, ht.TListOf(ht.TPositiveInt),
"Disk indexes"),
("remote_node", None, ht.TMaybeString, "New secondary node"),
("iallocator", None, ht.TMaybeString,
"Iallocator for deciding new secondary node"),
("early_release", False, ht.TBool,
"Whether to release locks as soon as possible"),
]
class OpInstanceFailover(OpCode):
"""Failover an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PShutdownTimeout,
_PIgnoreConsistency,
_PMigrationTargetNode,
("iallocator", None, ht.TMaybeString,
"Iallocator for deciding the target node for shared-storage instances"),
]
class OpInstanceMigrate(OpCode):
"""Migrate an instance.
This migrates (without shutting down an instance) to its secondary
node.
@ivar instance_name: the name of the instance
@ivar mode: the migration mode (live, non-live or None for auto)
"""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PMigrationMode,
_PMigrationLive,
_PMigrationTargetNode,
("cleanup", False, ht.TBool,
"Whether a previously failed migration should be cleaned up"),
("iallocator", None, ht.TMaybeString,
"Iallocator for deciding the target node for shared-storage instances"),
("allow_failover", False, ht.TBool,
"Whether we can fallback to failover if migration is not possible"),
]
class OpInstanceMove(OpCode):
"""Move an instance.
This move (with shutting down an instance and data copying) to an
arbitrary node.
@ivar instance_name: the name of the instance
@ivar target_node: the destination node
"""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PShutdownTimeout,
("target_node", ht.NoDefault, ht.TNonEmptyString, "Target node"),
_PIgnoreConsistency,
]
class OpInstanceConsole(OpCode):
"""Connect to an instance's console."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName
]
class OpInstanceActivateDisks(OpCode):
"""Activate an instance's disks."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
("ignore_size", False, ht.TBool, "Whether to ignore recorded size"),
]
class OpInstanceDeactivateDisks(OpCode):
"""Deactivate an instance's disks."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PForce,
]
class OpInstanceRecreateDisks(OpCode):
"""Deactivate an instance's disks."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
("disks", ht.EmptyList, ht.TListOf(ht.TPositiveInt),
"List of disk indexes"),
("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"New instance nodes, if relocation is desired"),
]
class OpInstanceQuery(OpCode):
"""Compute the list of instances."""
OP_PARAMS = [
_POutputFields,
_PUseLocking,
("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Empty list to query all instances, instance names otherwise"),
]
class OpInstanceQueryData(OpCode):
"""Compute the run-time status of instances."""
OP_PARAMS = [
_PUseLocking,
("instances", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Instance names"),
("static", False, ht.TBool,
"Whether to only return configuration data without querying"
" nodes"),
]
class OpInstanceSetParams(OpCode):
"""Change the parameters of an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PForce,
_PForceVariant,
# TODO: Use _TestNicDef
("nics", ht.EmptyList, ht.TList,
"List of NIC changes. Each item is of the form ``(op, settings)``."
" ``op`` can be ``%s`` to add a new NIC with the specified settings,"
" ``%s`` to remove the last NIC or a number to modify the settings"
" of the NIC with that index." %
(constants.DDM_ADD, constants.DDM_REMOVE)),
("disks", ht.EmptyList, ht.TList, "List of disk changes. See ``nics``."),
("beparams", ht.EmptyDict, ht.TDict, "Per-instance backend parameters"),
("hvparams", ht.EmptyDict, ht.TDict,
"Per-instance hypervisor parameters, hypervisor-dependent"),
("disk_template", None, ht.TOr(ht.TNone, _CheckDiskTemplate),
"Disk template for instance"),
("remote_node", None, ht.TMaybeString,
"Secondary node (used when changing disk template)"),
("os_name", None, ht.TMaybeString,
"Change instance's OS name. Does not reinstall the instance."),
("osparams", None, ht.TMaybeDict, "Per-instance OS parameters"),
("wait_for_sync", True, ht.TBool,
"Whether to wait for the disk to synchronize, when changing template"),
]
class OpInstanceGrowDisk(OpCode):
"""Grow a disk of an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PWaitForSync,
("disk", ht.NoDefault, ht.TInt, "Disk index"),
("amount", ht.NoDefault, ht.TInt,
"Amount of disk space to add (megabytes)"),
]
class OpGroupAdd(OpCode):
"""Add a node group to the cluster."""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
_PGroupName,
_PNodeGroupAllocPolicy,
_PGroupNodeParams,
]
class OpGroupAssignNodes(OpCode):
"""Assign nodes to a node group."""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
_PGroupName,
_PForce,
("nodes", ht.NoDefault, ht.TListOf(ht.TNonEmptyString),
"List of nodes to assign"),
]
class OpGroupQuery(OpCode):
"""Compute the list of node groups."""
OP_PARAMS = [
_POutputFields,
("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Empty list to query all groups, group names otherwise"),
]
class OpGroupSetParams(OpCode):
"""Change the parameters of a node group."""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
_PGroupName,
_PNodeGroupAllocPolicy,
_PGroupNodeParams,
]
class OpGroupRemove(OpCode):
"""Remove a node group from the cluster."""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
_PGroupName,
]
class OpGroupRename(OpCode):
"""Rename a node group in the cluster."""
OP_PARAMS = [
_PGroupName,
("new_name", ht.NoDefault, ht.TNonEmptyString, "New group name"),
]
class OpOsDiagnose(OpCode):
"""Compute the list of guest operating systems."""
OP_PARAMS = [
_POutputFields,
("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Which operating systems to diagnose"),
]
class OpBackupQuery(OpCode):
"""Compute the list of exported images."""
OP_PARAMS = [
_PUseLocking,
("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Empty list to query all nodes, node names otherwise"),
]
class OpBackupPrepare(OpCode):
"""Prepares an instance export.
@ivar instance_name: Instance name
@ivar mode: Export mode (one of L{constants.EXPORT_MODES})
"""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
("mode", ht.NoDefault, ht.TElemOf(constants.EXPORT_MODES),
"Export mode"),
]
class OpBackupExport(OpCode):
"""Export an instance.
For local exports, the export destination is the node name. For remote
exports, the export destination is a list of tuples, each consisting of
hostname/IP address, port, HMAC and HMAC salt. The HMAC is calculated using
the cluster domain secret over the value "${index}:${hostname}:${port}". The
destination X509 CA must be a signed certificate.
@ivar mode: Export mode (one of L{constants.EXPORT_MODES})
@ivar target_node: Export destination
@ivar x509_key_name: X509 key to use (remote export only)
@ivar destination_x509_ca: Destination X509 CA in PEM format (remote export
only)
"""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PShutdownTimeout,
# TODO: Rename target_node as it changes meaning for different export modes
# (e.g. "destination")
("target_node", ht.NoDefault, ht.TOr(ht.TNonEmptyString, ht.TList),
"Destination information, depends on export mode"),
("shutdown", True, ht.TBool, "Whether to shutdown instance before export"),
("remove_instance", False, ht.TBool,
"Whether to remove instance after export"),
("ignore_remove_failures", False, ht.TBool,
"Whether to ignore failures while removing instances"),
("mode", constants.EXPORT_MODE_LOCAL, ht.TElemOf(constants.EXPORT_MODES),
"Export mode"),
("x509_key_name", None, ht.TOr(ht.TList, ht.TNone),
"Name of X509 key (remote export only)"),
("destination_x509_ca", None, ht.TMaybeString,
"Destination X509 CA (remote export only)"),
]
class OpBackupRemove(OpCode):
"""Remove an instance's export."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
]
class OpTagsGet(OpCode):
"""Returns the tags of the given object."""
OP_DSC_FIELD = "name"
OP_PARAMS = [
_PTagKind,
# Name is only meaningful for nodes and instances
("name", ht.NoDefault, ht.TMaybeString, None),
]
class OpTagsSearch(OpCode):
"""Searches the tags in the cluster for a given pattern."""
OP_DSC_FIELD = "pattern"
OP_PARAMS = [
("pattern", ht.NoDefault, ht.TNonEmptyString, None),
]
class OpTagsSet(OpCode):
"""Add a list of tags on a given object."""
OP_PARAMS = [
_PTagKind,
_PTags,
# Name is only meaningful for nodes and instances
("name", ht.NoDefault, ht.TMaybeString, None),
]
class OpTagsDel(OpCode):
"""Remove a list of tags from a given object."""
OP_PARAMS = [
_PTagKind,
_PTags,
# Name is only meaningful for nodes and instances
("name", ht.NoDefault, ht.TMaybeString, None),
]
class OpTestDelay(OpCode):
"""Sleeps for a configured amount of time.
This is used just for debugging and testing.
Parameters:
- duration: the time to sleep
- on_master: if true, sleep on the master
- on_nodes: list of nodes in which to sleep
If the on_master parameter is true, it will execute a sleep on the
master (before any node sleep).
If the on_nodes list is not empty, it will sleep on those nodes
(after the sleep on the master, if that is enabled).
As an additional feature, the case of duration < 0 will be reported
as an execution error, so this opcode can be used as a failure
generator. The case of duration == 0 will not be treated specially.
"""
OP_DSC_FIELD = "duration"
OP_PARAMS = [
("duration", ht.NoDefault, ht.TFloat, None),
("on_master", True, ht.TBool, None),
("on_nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), None),
("repeat", 0, ht.TPositiveInt, None),
]
class OpTestAllocator(OpCode):
"""Allocator framework testing.
This opcode has two modes:
- gather and return allocator input for a given mode (allocate new
or replace secondary) and a given instance definition (direction
'in')
- run a selected allocator for a given operation (as above) and
return the allocator output (direction 'out')
"""
OP_DSC_FIELD = "allocator"
OP_PARAMS = [
("direction", ht.NoDefault,
ht.TElemOf(constants.VALID_IALLOCATOR_DIRECTIONS), None),
("mode", ht.NoDefault, ht.TElemOf(constants.VALID_IALLOCATOR_MODES), None),
("name", ht.NoDefault, ht.TNonEmptyString, None),
("nics", ht.NoDefault, ht.TOr(ht.TNone, ht.TListOf(
ht.TDictOf(ht.TElemOf([constants.INIC_MAC, constants.INIC_IP, "bridge"]),
ht.TOr(ht.TNone, ht.TNonEmptyString)))), None),
("disks", ht.NoDefault, ht.TOr(ht.TNone, ht.TList), None),
("hypervisor", None, ht.TMaybeString, None),
("allocator", None, ht.TMaybeString, None),
("tags", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), None),
("memory", None, ht.TOr(ht.TNone, ht.TPositiveInt), None),
("vcpus", None, ht.TOr(ht.TNone, ht.TPositiveInt), None),
("os", None, ht.TMaybeString, None),
("disk_template", None, ht.TMaybeString, None),
("evac_nodes", None, ht.TOr(ht.TNone, ht.TListOf(ht.TNonEmptyString)),
None),
("instances", None, ht.TOr(ht.TNone, ht.TListOf(ht.TNonEmptyString)),
None),
("evac_mode", None,
ht.TOr(ht.TNone, ht.TElemOf(constants.IALLOCATOR_NEVAC_MODES)), None),
("target_groups", None, ht.TOr(ht.TNone, ht.TListOf(ht.TNonEmptyString)),
None),
]
class OpTestJqueue(OpCode):
"""Utility opcode to test some aspects of the job queue.
"""
OP_PARAMS = [
("notify_waitlock", False, ht.TBool, None),
("notify_exec", False, ht.TBool, None),
("log_messages", ht.EmptyList, ht.TListOf(ht.TString), None),
("fail", False, ht.TBool, None),
]
class OpTestDummy(OpCode):
"""Utility opcode used by unittests.
"""
OP_PARAMS = [
("result", ht.NoDefault, ht.NoType, None),
("messages", ht.NoDefault, ht.NoType, None),
("fail", ht.NoDefault, ht.NoType, None),
("submit_jobs", None, ht.NoType, None),
]
WITH_LU = False
def _GetOpList():
"""Returns list of all defined opcodes.
Does not eliminate duplicates by C{OP_ID}.
"""
return [v for v in globals().values()
if (isinstance(v, type) and issubclass(v, OpCode) and
hasattr(v, "OP_ID") and v is not OpCode)]
OP_MAPPING = dict((v.OP_ID, v) for v in _GetOpList())
|
import re
import string
import sys
import os
USAGE = 'USAGE: parse.y <player.h> <playercore_casts.i> <playercore_arraysofclasses.i> <Jplayercore> <playercore> <player.java>'
if __name__ == '__main__':
if len(sys.argv) != 7:
print USAGE
sys.exit(-1)
infilename = sys.argv[1]
outfilename = sys.argv[2]
aofcfilename = sys.argv[3]
outdir = sys.argv[4]
pcoutdir = sys.argv[5]
pcjfilename = sys.argv[6]
os.system('mkdir -p ' + outdir)
os.system('mkdir -p ' + pcoutdir)
# Read in the entire file
infile = open(infilename, 'r')
instream = infile.read()
infile.close()
outfile = open(outfilename, 'w+')
aofcfile = open(aofcfilename, 'w+')
pcjfile = open(pcoutdir + '/' + pcjfilename, 'w+')
# strip C++-style comments
pattern = re.compile('//.*')
instream = pattern.sub('', instream)
# strip C-style comments
pattern = re.compile('/\*.*?\*/', re.MULTILINE | re.DOTALL)
instream = pattern.sub('', instream)
# strip blank lines
pattern = re.compile('^\s*?\n', re.MULTILINE)
instream = pattern.sub('', instream)
# find structs
pattern = re.compile('typedef\s+struct\s+player_\w+[^}]+\}[^;]+',
re.MULTILINE)
structs = pattern.findall(instream)
print 'Found ' + `len(structs)` + ' struct(s)'
contentspattern = re.compile('.*\{\s*(.*?)\s*\}', re.MULTILINE | re.DOTALL)
declpattern = re.compile('\s*([^;]*?;)', re.MULTILINE)
typepattern = re.compile('\s*\S+')
variablepattern = re.compile('\s*([^,;]+?)\s*[,;]')
#arraypattern = re.compile('\[\s*(\w*?)\s*\]')
arraypattern = re.compile('\[(.*?)\]')
outfile.write('%inline\n%{\n\n')
pcjfile.write('package net.sourceforge.playerstage.Jplayercore;\n')
pcjfile.write('public class player {\n\n')
for s in structs:
# extract type of struct
split = string.split(s)
typename = split[-1]
# pick out the contents of the struct
varpart = contentspattern.findall(s)
if len(varpart) != 1:
print 'skipping nested / empty struct ' + typename
continue
# SWIG macro that lets us access arrays of this non-primitive type
# as Java arrays
aofcfile.write('JAVA_ARRAYSOFCLASSES(' + typename +')\n')
buf_to_name = 'buf_to_' + typename
buf_from_name = typename + '_to_buf'
buf_to_Jname = 'buf_to_J' + typename
buf_from_Jname = 'J' + typename + '_to_buf'
sizeof_name = typename + '_sizeof'
# function to return the size of the underlying C structure
outfile.write('size_t ' + sizeof_name + '(void)\n')
outfile.write('{\n')
outfile.write(' return(sizeof(' + typename + '));\n')
outfile.write('}\n')
# JNI cast from a void* to a pointer to this type
outfile.write(typename + '* ' + buf_to_name + '(void* buf)\n')
outfile.write('{\n')
outfile.write(' return((' + typename + '*)(buf));\n')
outfile.write('}\n')
# JNI cast from a pointer to this type to a void*
outfile.write('void* ' + buf_from_name + '(' + typename + '* msg)\n')
outfile.write('{\n')
outfile.write(' return((void*)(msg));\n')
outfile.write('}\n')
# Equivalent non-JNI Java class
jclass = 'J' + typename
jfile = open(outdir + '/' + jclass + '.java', 'w+')
jfile.write('package net.sourceforge.playerstage.Jplayercore;\n')
jfile.write('import java.io.Serializable;\n')
jfile.write('public class ' + jclass + ' implements Serializable {\n')
jfile.write(' public final static long serialVersionUID = ' + `hash(s)` + 'L;\n')
jclass_constructor = ' public ' + jclass + '() {\n';
# Static method in class player to convert from JNI Java object to
# non-JNI java object
pcj_data_to_jdata = ''
pcj_data_to_jdata += ' public static ' + jclass + ' ' + typename + '_to_' + jclass + '(' + typename + ' data) {\n'
pcj_data_to_jdata += ' ' + jclass + ' Jdata = new ' + jclass + '();\n'
# Static method in class player to convert from non-JNI Java object to
# JNI java object
pcj_jdata_to_data = ''
pcj_jdata_to_data += ' public static ' + typename + ' ' + jclass + '_to_' + typename + '(' + jclass + ' Jdata) {\n'
pcj_jdata_to_data += ' ' + typename + ' data = new ' + typename + '();\n'
# Static method in class playercore to convert from SWIGTYPE_p_void
# to non-JNI Java object.
pcjfile.write(' public static ' + jclass + ' ' + buf_to_Jname + '(SWIGTYPE_p_void buf) {\n')
pcjfile.write(' ' + typename + ' data = playercore_java.' + buf_to_name + '(buf);\n')
pcjfile.write(' return(' + typename + '_to_' + jclass + '(data));\n')
pcjfile.write(' }\n\n')
# Static method in class playercore to convert non-JNI Java object to
# SWIGTYPE_p_void.
pcjfile.write(' public static SWIGTYPE_p_void ' + buf_from_Jname + '(' + jclass + ' Jdata) {\n')
pcjfile.write(' ' + typename + ' data = ' + jclass + '_to_' + typename + '(Jdata);\n')
pcjfile.write(' return(playercore_java.' + buf_from_name + '(data));\n')
pcjfile.write(' }\n\n')
# separate the variable declarations
decls = declpattern.finditer(varpart[0])
for d in decls:
# find the type and variable names in this declaration
dstring = d.string[d.start(1):d.end(1)]
type = typepattern.findall(dstring)[0]
dstring = typepattern.sub('', dstring, 1)
vars = variablepattern.finditer(dstring)
# Do some name mangling for common types
builtin_type = 1
if type == 'int64_t':
jtype = 'long'
elif type == 'uint64_t':
jtype = 'long'
elif type == 'int32_t':
jtype = 'int'
elif type == 'uint32_t':
jtype = 'long'
elif type == 'int16_t':
jtype = 'short'
elif type == 'uint16_t':
jtype = 'int'
elif type == 'int8_t':
jtype = 'byte'
elif type == 'uint8_t':
jtype = 'short'
elif type == 'char':
jtype = 'char'
elif type == 'bool_t':
jtype = 'boolean'
elif type == 'double':
jtype = 'double'
elif type == 'float':
jtype = 'float'
else:
# rely on a previous declaration of a J class for this type
jtype = 'J' + type
builtin_type = 0
# iterate through each variable
for var in vars:
varstring = var.string[var.start(1):var.end(1)]
# is it an array or a scalar?
arraysize = arraypattern.findall(varstring)
if len(arraysize) > 0:
arraysize = arraysize[0]
varstring = arraypattern.sub('', varstring)
if jtype == 'char':
jfile.write(' public String ' + varstring + ';\n')
else:
jfile.write(' public ' + jtype + '[] ' + varstring + ';\n')
#if builtin_type == 0:
if jtype != 'char':
if arraysize.isdigit():
jclass_constructor += ' ' + varstring + ' = new ' + jtype + '[' + arraysize + '];\n'
else:
jclass_constructor += ' ' + varstring + ' = new ' + jtype + '[playercore_javaConstants.' + arraysize + '];\n'
else:
arraysize = ''
jfile.write(' public ' + jtype + ' ' + varstring + ';\n')
if builtin_type == 0:
jclass_constructor += ' ' + varstring + ' = new ' + jtype + '();\n'
capvarstring = string.capitalize(varstring[0]) + varstring[1:]
if builtin_type:
pcj_data_to_jdata += ' Jdata.' + varstring + ' = data.get' + capvarstring + '();\n'
pcj_jdata_to_data += ' data.set' + capvarstring + '(Jdata.' + varstring +');\n'
else:
if arraysize == '':
pcj_data_to_jdata += ' Jdata.' + varstring + ' = ' + type + '_to_' + jtype + '(data.get' + capvarstring + '());\n'
pcj_jdata_to_data += ' data.set' + capvarstring + '(' + jtype + '_to_' + type + '(Jdata.' + varstring + '));\n'
else:
try:
asize = int(arraysize)
except:
arraysize = 'playercore_javaConstants.' + arraysize
pcj_data_to_jdata += ' {\n'
pcj_data_to_jdata += ' ' + type + ' foo[] = data.get' + capvarstring + '();\n'
pcj_data_to_jdata += ' for(int i=0;i<' + arraysize + ';i++)\n'
pcj_data_to_jdata += ' Jdata.' + varstring + '[i] = ' + type + '_to_' + jtype + '(foo[i]);\n'
pcj_data_to_jdata += ' }\n'
pcj_jdata_to_data += ' {\n'
pcj_jdata_to_data += ' ' + type + ' foo[] = new ' + type + '[' + arraysize + '];\n'
pcj_jdata_to_data += ' for(int i=0;i<' + arraysize + ';i++)\n'
pcj_jdata_to_data += ' foo[i] = ' + jtype + '_to_' + type + '(Jdata.' + varstring + '[i]);\n'
pcj_jdata_to_data += ' data.set' + capvarstring + '(foo);\n'
pcj_jdata_to_data += ' }\n'
pcj_data_to_jdata += ' return(Jdata);\n'
pcj_data_to_jdata += ' }\n\n'
pcjfile.write(pcj_data_to_jdata)
pcj_jdata_to_data += ' return(data);\n'
pcj_jdata_to_data += ' }\n\n'
pcjfile.write(pcj_jdata_to_data)
jclass_constructor += ' }\n'
jfile.write(jclass_constructor)
jfile.write('}\n')
jfile.close()
outfile.write('\n%}\n')
outfile.close()
pcjfile.write('\n}\n')
pcjfile.close()
aofcfile.close()
|
from spacewalk.common.rhnTranslate import _
from spacewalk.common import rhnFault, rhnFlags, log_debug, log_error
from spacewalk.server.rhnLib import parseRPMName
from spacewalk.server.rhnHandler import rhnHandler
from spacewalk.server import rhnSQL, rhnCapability
class Errata(rhnHandler):
""" Errata class --- retrieve (via xmlrpc) package errata. """
def __init__(self):
rhnHandler.__init__(self)
# Exposed Errata functions:
self.functions = []
self.functions.append('GetByPackage') # Clients v1-
self.functions.append('getPackageErratum') # Clients v2+
self.functions.append('getErrataInfo') # clients v2+
def GetByPackage(self, pkg, osRel):
""" Clients v1- Get errata for a package given "n-v-r" format
IN: pkg: "n-v-r" (old client call)
or [n,v,r]
osRel: OS release
RET: a hash by errata that applies to this package
(ie, newer packages are available). We also limit the scope
for a particular osRel.
"""
if type(pkg) == type(''): # Old client support.
pkg = parseRPMName(pkg)
log_debug(1, pkg, osRel)
# Stuff the action in the headers:
transport = rhnFlags.get('outputTransportOptions')
transport['X-RHN-Action'] = 'GetByPackage'
# now look up the errata
if type(pkg[0]) != type(''):
log_error("Invalid package name: %s %s" % (type(pkg[0]), pkg[0]))
raise rhnFault(30, _("Expected a package name, not: %s") % pkg[0])
#bug#186996:adding synopsis field to advisory info
#client side changes are needed to access this data.
h = rhnSQL.prepare("""
select distinct
e.id errata_id,
e.advisory_type errata_type,
e.advisory advisory,
e.topic topic,
e.description description,
e.synopsis synopsis
from
rhnErrata e,
rhnPublicChannelFamily pcf,
rhnChannelFamilyMembers cfm,
rhnErrataPackage ep,
rhnChannelPackage cp,
rhnChannelErrata ce,
rhnDistChannelMap dcm,
rhnPackage p
where 1=1
and p.name_id = LOOKUP_PACKAGE_NAME(:name)
-- map to a channel
and p.id = cp.package_id
and cp.channel_id = dcm.channel_id
and dcm.release = :dist
-- map to an errata as well
and p.id = ep.package_id
and ep.errata_id = e.id
-- the errata and the channel have to be linked
and ce.channel_id = cp.channel_id
-- and the channel has to be public
and cp.channel_id = cfm.channel_id
and cfm.channel_family_id = pcf.channel_family_id
-- and get the erratum
and e.id = ce.errata_id
""")
h.execute(name = pkg[0], dist = str(osRel))
ret = []
# sanitize the results for display in the clients
while 1:
row = h.fetchone_dict()
if row is None:
break
for k in row.keys():
if row[k] is None:
row[k] = "N/A"
ret.append(row)
return ret
def getPackageErratum(self, system_id, pkg):
""" Clients v2+ - Get errata for a package given [n,v,r,e,a,...] format
Sing-along: You say erratum(sing), I say errata(pl)! :)
IN: pkg: [n,v,r,e,s,a,ch,...]
RET: a hash by errata that applies to this package
"""
log_debug(5, system_id, pkg)
if type(pkg) != type([]) or len(pkg) < 7:
log_error("Got invalid package specification: %s" % str(pkg))
raise rhnFault(30, _("Expected a package, not: %s") % pkg)
# Authenticate and decode server id.
self.auth_system(system_id)
# log the entry
log_debug(1, self.server_id, pkg)
# Stuff the action in the headers:
transport = rhnFlags.get('outputTransportOptions')
transport['X-RHN-Action'] = 'getPackageErratum'
name, ver, rel, epoch, arch, size, channel = pkg[:7]
if epoch in ['', 'none', 'None']:
epoch = None
# XXX: also, should arch/size/channel ever be used?
#bug#186996:adding synopsis field to errata info
#client side changes are needed to access this data.
h = rhnSQL.prepare("""
select distinct
e.id errata_id,
e.advisory_type errata_type,
e.advisory advisory,
e.topic topic,
e.description description,
e.synopsis synopsis
from
rhnServerChannel sc,
rhnChannelPackage cp,
rhnChannelErrata ce,
rhnErrata e,
rhnErrataPackage ep,
rhnPackage p
where
p.name_id = LOOKUP_PACKAGE_NAME(:name)
and p.evr_id = LOOKUP_EVR(:epoch, :ver, :rel)
-- map to a channel
and p.id = cp.package_id
-- map to an errata as well
and p.id = ep.package_id
and ep.errata_id = e.id
-- the errata and the channel have to be linked
and e.id = ce.errata_id
and ce.channel_id = cp.channel_id
-- and the server has to be subscribed to the channel
and cp.channel_id = sc.channel_id
and sc.server_id = :server_id
""") # " emacs sucks
h.execute(name = name, ver = ver, rel = rel, epoch = epoch,
server_id = str(self.server_id))
ret = []
# sanitize the results for display in the clients
while 1:
row = h.fetchone_dict()
if row is None:
break
for k in row.keys():
if row[k] is None:
row[k] = "N/A"
ret.append(row)
return ret
# I don't trust this errata_id business, but chip says "trust me"
def getErrataInfo(self, system_id, errata_id):
log_debug(5, system_id, errata_id)
# Authenticate the server certificate
self.auth_system(system_id)
# log this thing
log_debug(1, self.server_id, errata_id)
client_caps = rhnCapability.get_client_capabilities()
log_debug(3,"Client Capabilities", client_caps)
multiarch = 0
cap_info = None
if client_caps and client_caps.has_key('packages.update'):
cap_info = client_caps['packages.update']
if cap_info and cap_info['version'] > 1:
multiarch = 1
statement = """
select distinct
pn.name,
pe.epoch,
pe.version,
pe.release,
pa.label arch
from
rhnPackageName pn,
rhnPackageEVR pe,
rhnPackage p,
rhnPackageArch pa,
rhnChannelPackage cp,
rhnServerChannel sc,
rhnErrataPackage ep
where
ep.errata_id = :errata_id
and ep.package_id = p.id
and p.name_id = pn.id
and p.evr_id = pe.id
and p.package_arch_id = pa.id
and sc.server_id = :server_id
and sc.channel_id = cp.channel_id
and cp.package_id = p.id
"""
h = rhnSQL.prepare(statement)
h.execute(errata_id = errata_id, server_id = self.server_id)
packages = h.fetchall_dict()
ret = []
if not packages:
return []
for package in packages:
if package['name'] is not None:
if package['epoch'] is None:
package['epoch'] = ""
pkg_arch = ''
if multiarch:
pkg_arch = package['arch'] or ''
ret.append([package['name'],
package['version'],
package['release'],
package['epoch'],
pkg_arch])
return ret
if __name__ == "__main__":
print "You can not run this module by itself"
import sys; sys.exit(-1)
|
from django import forms
from django.contrib.auth.models import User
from django.forms import ModelForm
from django.db import models
User.add_to_class('usuario_sico', models.CharField(max_length=10, null=False, blank=False))
User.add_to_class('contrasenia_sico', models.CharField(max_length=10, null=False, blank=False))
class SignUpForm(ModelForm):
class Meta:
model = User
fields = ['username', 'password', 'email', 'first_name', 'last_name', 'usuario_sico', 'contrasenia_sico']
widgets = {
'password': forms.PasswordInput(),
'contrasenia_sico': forms.PasswordInput(),
}
|
import sys
import time
from naoqi import ALProxy
IP = "nao.local"
PORT = 9559
if (len(sys.argv) < 2):
print "Usage: 'python RecordAudio.py nume'"
sys.exit(1)
fileName = "/home/nao/" + sys.argv[1] + ".wav"
aur = ALProxy("ALAudioRecorder", IP, PORT)
channels = [0,0,1,0]
aur.startMicrophonesRecording(fileName, "wav", 160000, channels)
c=raw_input("Sfarsit?")
aur.stopMicrophonesRecording()
c=raw_input("play?")
aup = ALProxy("ALAudioPlayer", IP, PORT)
aup.playFile(fileName,0.5,-1.0)
c=raw_input("gata?")
|
import re
from PyQt4.QtCore import (Qt, SIGNAL, pyqtSignature)
from PyQt4.QtGui import (QApplication, QDialog)
import ui_findandreplacedlg
MAC = True
try:
from PyQt4.QtGui import qt_mac_set_native_menubar
except ImportError:
MAC = False
class FindAndReplaceDlg(QDialog,
ui_findandreplacedlg.Ui_FindAndReplaceDlg):
def __init__(self, text, parent=None):
super(FindAndReplaceDlg, self).__init__(parent)
self.__text = str(text)
self.__index = 0
self.setupUi(self)
if not MAC:
self.findButton.setFocusPolicy(Qt.NoFocus)
self.replaceButton.setFocusPolicy(Qt.NoFocus)
self.replaceAllButton.setFocusPolicy(Qt.NoFocus)
self.closeButton.setFocusPolicy(Qt.NoFocus)
self.updateUi()
@pyqtSignature("QString")
def on_findLineEdit_textEdited(self, text):
self.__index = 0
self.updateUi()
def makeRegex(self):
findText = str(self.findLineEdit.text())
if str(self.syntaxComboBox.currentText()) == "Literal":
findText = re.escape(findText)
flags = re.MULTILINE|re.DOTALL|re.UNICODE
if not self.caseCheckBox.isChecked():
flags |= re.IGNORECASE
if self.wholeCheckBox.isChecked():
findText = r"\b{0}\b".format(findText)
return re.compile(findText, flags)
@pyqtSignature("")
def on_findButton_clicked(self):
regex = self.makeRegex()
match = regex.search(self.__text, self.__index)
if match is not None:
self.__index = match.end()
self.emit(SIGNAL("found"), match.start())
else:
self.emit(SIGNAL("notfound"))
@pyqtSignature("")
def on_replaceButton_clicked(self):
regex = self.makeRegex()
self.__text = regex.sub(str(self.replaceLineEdit.text()),
self.__text, 1)
@pyqtSignature("")
def on_replaceAllButton_clicked(self):
regex = self.makeRegex()
self.__text = regex.sub(str(self.replaceLineEdit.text()),
self.__text)
def updateUi(self):
enable = not self.findLineEdit.text().isEmpty()
self.findButton.setEnabled(enable)
self.replaceButton.setEnabled(enable)
self.replaceAllButton.setEnabled(enable)
def text(self):
return self.__text
if __name__ == "__main__":
import sys
text = """US experience shows that, unlike traditional patents,
software patents do not encourage innovation and R&D, quite the
contrary. In particular they hurt small and medium-sized enterprises
and generally newcomers in the market. They will just weaken the market
and increase spending on patents and litigation, at the expense of
technological innovation and research. Especially dangerous are
attempts to abuse the patent system by preventing interoperability as a
means of avoiding competition with technological ability.
--- Extract quoted from Linus Torvalds and Alan Cox's letter
to the President of the European Parliament
http://www.effi.org/patentit/patents_torvalds_cox.html"""
def found(where):
print("Found at {0}".format(where))
def nomore():
print("No more found")
app = QApplication(sys.argv)
form = FindAndReplaceDlg(text)
form.connect(form, SIGNAL("found"), found)
form.connect(form, SIGNAL("notfound"), nomore)
form.show()
app.exec_()
print(form.text())
|
import json
import ast
import textwrap
from mixbox import idgen
from mixbox.namespaces import Namespace
from stix.core import STIXHeader, STIXPackage
from stix.common import InformationSource
from stix.common.vocabs import VocabString
from stix.incident import Incident
from stix.incident.time import Time as StixTime
from stix.indicator import Indicator
from stix.ttp import TTP, VictimTargeting
from stix.extensions.identity.ciq_identity_3_0 import CIQIdentity3_0Instance, STIXCIQIdentity3_0, OrganisationInfo
from cybox.core import Observable
from cybox.objects.socket_address_object import SocketAddress
from cybox.objects.address_object import Address
from cybox.objects.port_object import Port
from cybox.objects.network_connection_object import NetworkConnection
from cybox.objects.artifact_object import Artifact, ZlibCompression, Base64Encoding
from cybox.common import ToolInformationList, ToolInformation
from cybox.common import Time as CyboxTime
from datetime import datetime
import conpot
CONPOT_NAMESPACE = 'mushmush-conpot'
CONPOT_NAMESPACE_URL = 'http://mushmush.org/conpot'
class StixTransformer(object):
def __init__(self, config, dom):
self.protocol_to_port_mapping = dict(
modbus=502,
snmp=161,
http=80,
s7comm=102,
)
port_path_list = map(lambda x: '//conpot_template/protocols/'+x+'/@port', self.protocol_to_port_mapping.keys())
for port_path in port_path_list:
try:
protocol_port = ast.literal_eval(dom.xpath(port_path)[0])
protocol_name = port_path.rsplit("/", 2)[1]
self.protocol_to_port_mapping[protocol_name] = protocol_port
except IndexError:
continue
conpot_namespace = Namespace(CONPOT_NAMESPACE_URL, CONPOT_NAMESPACE, '')
idgen.set_id_namespace(conpot_namespace)
def _add_header(self, stix_package, title, desc):
stix_header = STIXHeader()
stix_header.title = title
stix_header.description = desc
stix_header.information_source = InformationSource()
stix_header.information_source.time = CyboxTime()
stix_header.information_source.time.produced_time = datetime.now()
stix_package.stix_header = stix_header
def transform(self, event):
stix_package = STIXPackage()
self._add_header(stix_package, "Unauthorized traffic to honeypot", "Describes one or more honeypot incidents")
incident = Incident(id_="%s:%s-%s" % (CONPOT_NAMESPACE, 'incident', event['session_id']))
initial_time = StixTime()
initial_time.initial_compromise = event['timestamp'].isoformat()
incident.time = initial_time
incident.title = "Conpot Event"
incident.short_description = "Traffic to Conpot ICS honeypot"
incident.add_category(VocabString(value='Scans/Probes/Attempted Access'))
tool_list = ToolInformationList()
tool_list.append(ToolInformation.from_dict({
'name': "Conpot",
'vendor': "Conpot Team",
'version': conpot.__version__,
'description': textwrap.dedent('Conpot is a low interactive server side Industrial Control Systems '
'honeypot designed to be easy to deploy, modify and extend.')
}))
incident.reporter = InformationSource(tools=tool_list)
incident.add_discovery_method("Monitoring Service")
incident.confidence = "High"
# Victim Targeting by Sector
ciq_identity = CIQIdentity3_0Instance()
#identity_spec = STIXCIQIdentity3_0()
#identity_spec.organisation_info = OrganisationInfo(industry_type="Electricity, Industrial Control Systems")
#ciq_identity.specification = identity_spec
ttp = TTP(title="Victim Targeting: Electricity Sector and Industrial Control System Sector")
ttp.victim_targeting = VictimTargeting()
ttp.victim_targeting.identity = ciq_identity
incident.leveraged_ttps.append(ttp)
indicator = Indicator(title="Conpot Event")
indicator.description = "Conpot network event"
indicator.confidence = "High"
source_port = Port.from_dict({'port_value': event['remote'][1], 'layer4_protocol': 'tcp'})
dest_port = Port.from_dict({'port_value': self.protocol_to_port_mapping[event['data_type']],
'layer4_protocol': 'tcp'})
source_ip = Address.from_dict({'address_value': event['remote'][0], 'category': Address.CAT_IPV4})
dest_ip = Address.from_dict({'address_value': event['public_ip'], 'category': Address.CAT_IPV4})
source_address = SocketAddress.from_dict({'ip_address': source_ip.to_dict(), 'port': source_port.to_dict()})
dest_address = SocketAddress.from_dict({'ip_address': dest_ip.to_dict(), 'port': dest_port.to_dict()})
network_connection = NetworkConnection.from_dict(
{'source_socket_address': source_address.to_dict(),
'destination_socket_address': dest_address.to_dict(),
'layer3_protocol': u"IPv4",
'layer4_protocol': u"TCP",
'layer7_protocol': event['data_type'],
'source_tcp_state': u"ESTABLISHED",
'destination_tcp_state': u"ESTABLISHED",
}
)
indicator.add_observable(Observable(network_connection))
artifact = Artifact()
artifact.data = json.dumps(event['data'])
artifact.packaging.append(ZlibCompression())
artifact.packaging.append(Base64Encoding())
indicator.add_observable(Observable(artifact))
incident.related_indicators.append(indicator)
stix_package.add_incident(incident)
stix_package_xml = stix_package.to_xml()
return stix_package_xml
|
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../master'))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../master/wkpf'))
print os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../master/wkpf')
from wkpf.pynvc import *
from wkpf.wkpfcomm import *
comm = getComm()
print "node ids", comm.getNodeIds()
comm.setFeature(2, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(2, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(2, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(2, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(2, "WuKong")
comm.setFeature(7, WKPF_FEATURE_LIGHT_SENSOR, 1)
comm.setFeature(7, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(7, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(7, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(7, "WuKong")
comm.setFeature(4, WKPF_FEATURE_LIGHT_SENSOR, 1)
comm.setFeature(4, WKPF_FEATURE_LIGHT_ACTUATOR, 0)
comm.setFeature(4, WKPF_FEATURE_NUMERIC_CONTROLLER, 1)
comm.setFeature(4, WKPF_FEATURE_NATIVE_THRESHOLD, 1)
comm.setLocation(4, "WuKong")
comm.setFeature(5, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(5, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(5, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(5, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(5, "WuKong")
comm.setFeature(6, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(6, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(6, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(6, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(6, "WuKong")
comm.setFeature(13, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(13, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(13, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(13, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(13, "WuKong")
comm.setFeature(14, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(14, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(14, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(14, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(14, "WuKong")
comm.setFeature(15, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(15, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(15, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(15, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(15, "WuKong")
comm.setFeature(10, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(10, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(10, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(10, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(10, "WuKong")
comm.setFeature(12, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(12, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(12, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(12, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(12, "WuKong")
|
import os
import sys
print help(sys)
print help(os)
|
try:
from io import StringIO
except ImportError:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import bs4 as BeautifulSoup
import logging
from thug.DOM.W3C.Element import Element
from thug.DOM.W3C.Style.CSS.ElementCSSInlineStyle import ElementCSSInlineStyle
from .attr_property import attr_property
log = logging.getLogger("Thug")
class HTMLElement(Element, ElementCSSInlineStyle):
id = attr_property("id")
title = attr_property("title")
lang = attr_property("lang")
dir = attr_property("dir")
className = attr_property("class", default = "")
def __init__(self, doc, tag):
Element.__init__(self, doc, tag)
ElementCSSInlineStyle.__init__(self, doc, tag)
def getInnerHTML(self):
if not self.hasChildNodes():
return ""
html = StringIO()
for tag in self.tag.contents:
html.write(unicode(tag))
return html.getvalue()
def setInnerHTML(self, html):
self.tag.clear()
soup = BeautifulSoup.BeautifulSoup(html, "html5lib")
for node in list(soup.head.descendants):
self.tag.append(node)
name = getattr(node, 'name', None)
if name is None:
continue
handler = getattr(log.DFT, 'handle_%s' % (name, ), None)
if handler:
handler(node)
for node in list(soup.body.children):
self.tag.append(node)
name = getattr(node, 'name', None)
if name is None:
continue
handler = getattr(log.DFT, 'handle_%s' % (name, ), None)
if handler:
handler(node)
# soup.head.unwrap()
# soup.body.unwrap()
# soup.html.wrap(self.tag)
# self.tag.html.unwrap()
for node in self.tag.descendants:
name = getattr(node, 'name', None)
if not name:
continue
p = getattr(self.doc.window.doc.DFT, 'handle_%s' % (name, ), None)
if p is None:
p = getattr(log.DFT, 'handle_%s' % (name, ), None)
if p:
p(node)
innerHTML = property(getInnerHTML, setInnerHTML)
# WARNING: NOT DEFINED IN W3C SPECS!
def focus(self):
pass
@property
def sourceIndex(self):
return None
|
import sys
import pygame
from pygame.locals import *
pygame.init()
screen = pygame.display.set_mode((600, 500))
pygame.display.set_caption("Drawing Lines")
screen.fill((0, 80, 0))
color = 100, 255, 200
width = 8
pygame.draw.line(screen, color, (100, 100), (500, 400), width)
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type in (QUIT, KEYDOWN):
sys.exit()
|
"""
Function-like objects that creates cubic clusters.
"""
import numpy as np
from ase.data import reference_states as _refstate
from ase.cluster.factory import ClusterFactory
class SimpleCubicFactory(ClusterFactory):
spacegroup = 221
xtal_name = 'sc'
def get_lattice_constant(self):
"Get the lattice constant of an element with cubic crystal structure."
symmetry = _refstate[self.atomic_numbers[0]]['symmetry']
if symmetry != self.xtal_name:
raise ValueError, ("Cannot guess the %s " % (self.xtal_name,) +
"lattice constant of an element with crystal " +
"structure %s." % (symmetry,))
return _refstate[self.atomic_numbers[0]]['a']
def set_basis(self):
a = self.lattice_constant
if not isinstance(a, (int, float)):
raise ValueError("Improper lattice constant for %s crystal." % (self.xtal_name,))
self.lattice_basis = np.array([[a, 0., 0.],
[0., a, 0.],
[0., 0., a]])
self.resiproc_basis = self.get_resiproc_basis(self.lattice_basis)
SimpleCubic = SimpleCubicFactory()
class BodyCenteredCubicFactory(SimpleCubicFactory):
xtal_name = 'bcc'
atomic_basis = np.array([[0., 0., 0.],
[.5, .5, .5]])
BodyCenteredCubic = BodyCenteredCubicFactory()
class FaceCenteredCubicFactory(SimpleCubicFactory):
xtal_name = 'fcc'
atomic_basis = np.array([[0., 0., 0.],
[0., .5, .5],
[.5, 0., .5],
[.5, .5, 0.]])
FaceCenteredCubic = FaceCenteredCubicFactory()
|
import math
import random
def finding_prime(number):
num=abs(number)
if num<4: return True
for x in range(2,num):
if num%x == 0:
return False
return True
def finding_prime_sqrt(number):
num=abs(number)
if num<4: return True
for x in range(2,int(math.sqrt(num))+1):
if number%x == 0:
return False
return True
def finding_prime_fermat(number):
if number<=102:
for a in range(2,number):
if pow(a,number-1,number)!=1:
return False
return True
else:
for i in range(100):
a=random.randint(2,number-1)
if pow(a,number-1,number)!=1:
return False
return True
def test_finding_prime():
number1=17
number2=20
assert(finding_prime(number1)==True)
assert(finding_prime(number2)==False)
assert(finding_prime_sqrt(number1)==True)
assert(finding_prime_sqrt(number2)==False)
assert(finding_prime_fermat(number1)==True)
assert(finding_prime_fermat(number2)==False)
print('Tests passed!')
if __name__=='__main__':
test_finding_prime()
|
"""
Low-level Python bindings for libdbus. Don't use this module directly -
the public API is provided by the `dbus`, `dbus.service`, `dbus.mainloop`
and `dbus.mainloop.glib` modules, with a lower-level API provided by the
`dbus.lowlevel` module.
"""
import dbus.lowlevel as __dbus_lowlevel
from _LongBase import _LongBase
class UInt64(_LongBase):
"""
An unsigned 64-bit integer between 0 and 0xFFFF FFFF FFFF FFFF,
represented as a subtype of `long`.
This type only exists on platforms where the C compiler has suitable
64-bit types, such as C99 ``unsigned long long``.
Constructor::
dbus.UInt64(value: long[, variant_level: int]) -> UInt64
``value`` must be within the allowed range, or `OverflowError` will be
raised.
``variant_level`` must be non-negative; the default is 0.
:IVariables:
`variant_level` : int
Indicates how many nested Variant containers this object
is contained in: if a message's wire format has a variant containing a
variant containing a uint64, this is represented in Python by a
UInt64 with variant_level==2.
"""
def __init__(self, value, variant_level=None): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
|
bl_info = {
"name" : "text objects to-from xml",
"author" : "chebhou",
"version" : (1, 0),
"blender" : (2, 7, 3),
"location" : "file->export->text to-from xml",
"discription" : "copys an text objectx from-to xml file",
"wiki_url" : " https://github.com/chebhou",
"tracker_url" : "https://github.com/chebhou",
"category" : "Import-Export"
}
import bpy
from bpy.types import Operator
from bpy_extras.io_utils import ExportHelper
from bpy.props import EnumProperty, BoolProperty
from xml.dom import minidom
from xml.dom.minidom import Document
def txt_sync(filepath):
dom = minidom.parse(filepath)
scenes =dom.getElementsByTagName('scene')
for scene in scenes:
scene_name=scene.getAttribute('name')
print("\n",scene_name)
bl_scene = bpy.data.scenes[scene_name]
txt_objs =scene.getElementsByTagName('object')
for obj in txt_objs:
obj_name = obj.getAttribute('name')
obj_body = obj.childNodes[0].nodeValue
bl_obj = bl_scene.objects[obj_name].data.body = obj_body
print(obj_name," ",obj_body)
def txt_export(filepath):
doc = Document()
root = doc.createElement('data')
doc.appendChild(root)
for sce in bpy.data.scenes :
#create a scene
scene = doc.createElement('scene')
scene.setAttribute('name', sce.name)
root.appendChild(scene)
for obj in sce.objects :
if obj.type == 'FONT':
#add object element
object = doc.createElement('object')
object.setAttribute('name', obj.name)
txt_node = doc.createTextNode(obj.data.body)
object.appendChild(txt_node)
scene.appendChild(object)
#write to a file
file_handle = open(filepath,"wb")
file_handle.write(bytes(doc.toprettyxml(indent='\t'), 'UTF-8'))
file_handle.close()
class text_export(Operator, ExportHelper):
"""write and read text objects to a file"""
bl_idname = "export_scene.text_xml"
bl_label = "text from-to xml"
bl_options = {'REGISTER', 'UNDO'} #should remove undo ?
# ExportHelper mixin class uses this
filename_ext = ".xml"
#parameters and variables
convert = EnumProperty(
name="Convert",
description="Choose conversion",
items=(('W', "write objects", "write text objects to xml"),
('R', "read objects", "read text objects from xml")),
default='W',
)
#main function
def execute(self, context):
bpy.ops.object.mode_set(mode = 'OBJECT')
if self.convert == 'W':
txt_export(self.filepath)
else:
txt_sync(self.filepath)
bpy.context.scene.update()
self.report({'INFO'},"Conversion is Done")
return {'FINISHED'}
def menu_func_export(self, context):
self.layout.operator(text_export.bl_idname, text="Text to-from xml")
def register():
bpy.utils.register_class(text_export)
bpy.types.INFO_MT_file_export.append(menu_func_export)
bpy.types.INFO_MT_file_import.append(menu_func_export)
def unregister():
bpy.utils.unregister_class(text_export)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
bpy.types.INFO_MT_file_import.append(menu_func_export)
if __name__ == "__main__":
register()
|
from __future__ import print_function, division, absolute_import
import difflib
import locale
import os
import pprint
import six
import sys
import tempfile
try:
import unittest2 as unittest
except ImportError:
import unittest
import logging
try:
# 2.7+
logging.captureWarnings(True)
except AttributeError:
pass
from mock import Mock, MagicMock, NonCallableMock, patch, mock_open
from contextlib import contextmanager
from . import stubs
import subscription_manager.injection as inj
import subscription_manager.managercli
from rhsmlib.services import config
from threading import RLock
if six.PY2:
OPEN_FUNCTION = '__builtin__.open'
else:
OPEN_FUNCTION = 'builtins.open'
@contextmanager
def open_mock(content=None, **kwargs):
content_out = six.StringIO()
m = mock_open(read_data=content)
with patch(OPEN_FUNCTION, m, create=True, **kwargs) as mo:
stream = six.StringIO(content)
rv = mo.return_value
rv.write = lambda x: content_out.write(x)
rv.content_out = lambda: content_out.getvalue()
rv.__iter__ = lambda x: iter(stream.readlines())
yield rv
@contextmanager
def open_mock_many(file_content_map=None, **kwargs):
"""
Mock out access to one or many files opened using the builtin "open".
:param file_content_map: A dictionary of path : file_contents
:type file_content_map: dict[str,str]
:param kwargs:
:return:
"""
file_content_map = file_content_map or {}
for key, value in file_content_map.items():
file_content_map[key] = (mock_open(read_data=value), value, six.StringIO())
def get_file(path, *args, **kwargs):
"""
The side effect that will allow us to "open" the right "file".
Not for use outside open_mock_many.
:param path: The path which is passed in to the built
:param args:
:param kwargs:
:return:
"""
try:
rv, file_contents, content_out = file_content_map[path]
except KeyError:
if six.PY2:
raise IOError(2, 'No such file or directory')
else:
raise OSError(2, 'No such file or directory')
rv = rv.return_value
rv.write = lambda x: content_out.write(x)
rv.content_out = lambda: content_out.getvalue()
return rv
with patch(OPEN_FUNCTION, **kwargs) as mo:
mo.side_effect = get_file
yield mo
@contextmanager
def temp_file(content, *args, **kwargs):
try:
kwargs['delete'] = False
kwargs.setdefault('prefix', 'sub-man-test')
fh = tempfile.NamedTemporaryFile(mode='w+', *args, **kwargs)
fh.write(content)
fh.close()
yield fh.name
finally:
os.unlink(fh.name)
@contextmanager
def locale_context(new_locale, category=None):
old_category = category or locale.LC_CTYPE
old_locale = locale.getlocale(old_category)
category = category or locale.LC_ALL
locale.setlocale(category, new_locale)
try:
yield
finally:
locale.setlocale(category, old_locale)
class FakeLogger(object):
def __init__(self):
self.expected_msg = ""
self.msg = None
self.logged_exception = None
def debug(self, buf, *args, **kwargs):
self.msg = buf
def error(self, buf, *args, **kwargs):
self.msg = buf
def exception(self, e, *args, **kwargs):
self.logged_exception = e
def set_expected_msg(self, msg):
self.expected_msg = msg
def info(self, buf, *args, **kwargs):
self.msg = buf
def warning(self, buf, *args, **kwargs):
self.msg = buf
class FakeException(Exception):
def __init__(self, msg=None):
self.msg = msg
def __str__(self):
return repr(self.msg)
class Matcher(object):
@staticmethod
def set_eq(first, second):
"""Useful for dealing with sets that have been cast to or instantiated as lists."""
return set(first) == set(second)
def __init__(self, compare, some_obj):
self.compare = compare
self.some_obj = some_obj
def __eq__(self, other):
return self.compare(self.some_obj, other)
class SubManFixture(unittest.TestCase):
def set_facts(self):
"""Override if you need to set facts for a test."""
return {"mock.facts": "true"}
"""
Can be extended by any subscription manager test case to make
sure nothing on the actual system is read/touched, and appropriate
mocks/stubs are in place.
"""
def setUp(self):
# No matter what, stop all patching (even if we have a failure in setUp itself)
self.addCleanup(patch.stopall)
# Never attempt to use the actual managercli.cfg which points to a
# real file in etc.
self.mock_cfg_parser = stubs.StubConfig()
original_conf = subscription_manager.managercli.conf
def unstub_conf():
subscription_manager.managercli.conf = original_conf
# Mock makes it damn near impossible to mock a module attribute (which we shouldn't be using
# in the first place because it's terrible) so we monkey-patch it ourselves.
# TODO Fix this idiocy by not reading the damn config on module import
subscription_manager.managercli.conf = config.Config(self.mock_cfg_parser)
self.addCleanup(unstub_conf)
facts_host_patcher = patch('rhsmlib.dbus.facts.FactsClient', auto_spec=True)
self.mock_facts_host = facts_host_patcher.start()
self.mock_facts_host.return_value.GetFacts.return_value = self.set_facts()
# By default mock that we are registered. Individual test cases
# can override if they are testing disconnected scenario.
id_mock = NonCallableMock(name='FixtureIdentityMock')
id_mock.exists_and_valid = Mock(return_value=True)
id_mock.uuid = 'fixture_identity_mock_uuid'
id_mock.name = 'fixture_identity_mock_name'
id_mock.cert_dir_path = "/not/a/real/path/to/pki/consumer/"
id_mock.keypath.return_value = "/not/a/real/key/path"
id_mock.certpath.return_value = "/not/a/real/cert/path"
# Don't really care about date ranges here:
self.mock_calc = NonCallableMock()
self.mock_calc.calculate.return_value = None
# Avoid trying to read real /etc/yum.repos.d/redhat.repo
self.mock_repofile_path_exists_patcher = patch('subscription_manager.repolib.YumRepoFile.path_exists')
mock_repofile_path_exists = self.mock_repofile_path_exists_patcher.start()
mock_repofile_path_exists.return_value = True
inj.provide(inj.IDENTITY, id_mock)
inj.provide(inj.PRODUCT_DATE_RANGE_CALCULATOR, self.mock_calc)
inj.provide(inj.ENTITLEMENT_STATUS_CACHE, stubs.StubEntitlementStatusCache())
inj.provide(inj.POOL_STATUS_CACHE, stubs.StubPoolStatusCache())
inj.provide(inj.PROD_STATUS_CACHE, stubs.StubProductStatusCache())
inj.provide(inj.CONTENT_ACCESS_MODE_CACHE, stubs.StubContentAccessModeCache())
inj.provide(inj.SUPPORTED_RESOURCES_CACHE, stubs.StubSupportedResourcesCache())
inj.provide(inj.SYSPURPOSE_VALID_FIELDS_CACHE, stubs.StubSyspurposeValidFieldsCache())
inj.provide(inj.CURRENT_OWNER_CACHE, stubs.StubCurrentOwnerCache)
inj.provide(inj.OVERRIDE_STATUS_CACHE, stubs.StubOverrideStatusCache())
inj.provide(inj.RELEASE_STATUS_CACHE, stubs.StubReleaseStatusCache())
inj.provide(inj.AVAILABLE_ENTITLEMENT_CACHE, stubs.StubAvailableEntitlementsCache())
inj.provide(inj.PROFILE_MANAGER, stubs.StubProfileManager())
# By default set up an empty stub entitlement and product dir.
# Tests need to modify or create their own but nothing should hit
# the system.
self.ent_dir = stubs.StubEntitlementDirectory()
inj.provide(inj.ENT_DIR, self.ent_dir)
self.prod_dir = stubs.StubProductDirectory()
inj.provide(inj.PROD_DIR, self.prod_dir)
# Installed products manager needs PROD_DIR injected first
inj.provide(inj.INSTALLED_PRODUCTS_MANAGER, stubs.StubInstalledProductsManager())
self.stub_cp_provider = stubs.StubCPProvider()
self._release_versions = []
self.stub_cp_provider.content_connection.get_versions = self._get_release_versions
inj.provide(inj.CP_PROVIDER, self.stub_cp_provider)
inj.provide(inj.CERT_SORTER, stubs.StubCertSorter())
# setup and mock the plugin_manager
plugin_manager_mock = MagicMock(name='FixturePluginManagerMock')
plugin_manager_mock.runiter.return_value = iter([])
inj.provide(inj.PLUGIN_MANAGER, plugin_manager_mock)
inj.provide(inj.DBUS_IFACE, Mock(name='FixtureDbusIfaceMock'))
pooltype_cache = Mock()
inj.provide(inj.POOLTYPE_CACHE, pooltype_cache)
# don't use file based locks for tests
inj.provide(inj.ACTION_LOCK, RLock)
self.stub_facts = stubs.StubFacts()
inj.provide(inj.FACTS, self.stub_facts)
content_access_cache_mock = MagicMock(name='ContentAccessCacheMock')
inj.provide(inj.CONTENT_ACCESS_CACHE, content_access_cache_mock)
self.dbus_patcher = patch('subscription_manager.managercli.CliCommand._request_validity_check')
self.dbus_patcher.start()
# No tests should be trying to connect to any configure or test server
# so really, everything needs this mock. May need to be in __init__, or
# better, all test classes need to use SubManFixture
self.is_valid_server_patcher = patch("subscription_manager.managercli.is_valid_server_info")
is_valid_server_mock = self.is_valid_server_patcher.start()
is_valid_server_mock.return_value = True
# No tests should be trying to test the proxy connection
# so really, everything needs this mock. May need to be in __init__, or
# better, all test classes need to use SubManFixture
self.test_proxy_connection_patcher = patch("subscription_manager.managercli.CliCommand.test_proxy_connection")
test_proxy_connection_mock = self.test_proxy_connection_patcher.start()
test_proxy_connection_mock.return_value = True
self.syncedstore_patcher = patch('subscription_manager.syspurposelib.SyncedStore')
syncedstore_mock = self.syncedstore_patcher.start()
set_up_mock_sp_store(syncedstore_mock)
self.files_to_cleanup = []
def tearDown(self):
if not hasattr(self, 'files_to_cleanup'):
return
for f in self.files_to_cleanup:
# Assuming these are tempfile.NamedTemporaryFile, created with
# the write_tempfile() method in this class.
f.close()
def write_tempfile(self, contents):
"""
Write out a tempfile and append it to the list of those to be
cleaned up in tearDown.
"""
fid = tempfile.NamedTemporaryFile(mode='w+', suffix='.tmp')
fid.write(contents)
fid.seek(0)
self.files_to_cleanup.append(fid)
return fid
def set_consumer_auth_cp(self, consumer_auth_cp):
cp_provider = inj.require(inj.CP_PROVIDER)
cp_provider.consumer_auth_cp = consumer_auth_cp
def get_consumer_cp(self):
cp_provider = inj.require(inj.CP_PROVIDER)
consumer_cp = cp_provider.get_consumer_auth_cp()
return consumer_cp
# The ContentConnection used for reading release versions from
# the cdn. The injected one uses this.
def _get_release_versions(self, listing_path):
return self._release_versions
# For changing injection consumer id to one that fails "is_valid"
def _inject_mock_valid_consumer(self, uuid=None):
"""For changing injected consumer identity to one that passes is_valid()
Returns the injected identity if it need to be examined.
"""
identity = NonCallableMock(name='ValidIdentityMock')
identity.uuid = uuid or "VALIDCONSUMERUUID"
identity.is_valid = Mock(return_value=True)
identity.cert_dir_path = "/not/a/real/path/to/pki/consumer/"
inj.provide(inj.IDENTITY, identity)
return identity
def _inject_mock_invalid_consumer(self, uuid=None):
"""For chaining injected consumer identity to one that fails is_valid()
Returns the injected identity if it need to be examined.
"""
invalid_identity = NonCallableMock(name='InvalidIdentityMock')
invalid_identity.is_valid = Mock(return_value=False)
invalid_identity.uuid = uuid or "INVALIDCONSUMERUUID"
invalid_identity.cert_dir_path = "/not/a/real/path/to/pki/consumer/"
inj.provide(inj.IDENTITY, invalid_identity)
return invalid_identity
# use our naming convention here to make it clear
# this is our extension. Note that python 2.7 adds a
# assertMultilineEquals that assertEqual of strings does
# automatically
def assert_string_equals(self, expected_str, actual_str, msg=None):
if expected_str != actual_str:
expected_lines = expected_str.splitlines(True)
actual_lines = actual_str.splitlines(True)
delta = difflib.unified_diff(expected_lines, actual_lines, "expected", "actual")
message = ''.join(delta)
if msg:
message += " : " + msg
self.fail("Multi-line strings are unequal:\n" + message)
def assert_equal_dict(self, expected_dict, actual_dict):
mismatches = []
missing_keys = []
extra = []
for key in expected_dict:
if key not in actual_dict:
missing_keys.append(key)
continue
if expected_dict[key] != actual_dict[key]:
mismatches.append((key, expected_dict[key], actual_dict[key]))
for key in actual_dict:
if key not in expected_dict:
extra.append(key)
message = ""
if missing_keys or extra:
message += "Keys in only one dict: \n"
if missing_keys:
for key in missing_keys:
message += "actual_dict: %s\n" % key
if extra:
for key in extra:
message += "expected_dict: %s\n" % key
if mismatches:
message += "Unequal values: \n"
for info in mismatches:
message += "%s: %s != %s\n" % info
# pprint the dicts
message += "\n"
message += "expected_dict:\n"
message += pprint.pformat(expected_dict)
message += "\n"
message += "actual_dict:\n"
message += pprint.pformat(actual_dict)
if mismatches or missing_keys or extra:
self.fail(message)
def assert_items_equals(self, a, b):
"""Assert that two lists contain the same items regardless of order."""
if sorted(a, key=lambda item: str(item)) != sorted(b, key=lambda item: str(item)):
self.fail("%s != %s" % (a, b))
return True
class Capture(object):
class Tee(object):
def __init__(self, stream, silent):
self.buf = six.StringIO()
self.stream = stream
self.silent = silent
def write(self, data):
self.buf.write(data)
if not self.silent:
self.stream.write(data)
def flush(self):
pass
def getvalue(self):
return self.buf.getvalue()
def isatty(self):
return False
def __init__(self, silent=False):
self.silent = silent
def __enter__(self):
self.buffs = (self.Tee(sys.stdout, self.silent), self.Tee(sys.stderr, self.silent))
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout, sys.stderr = self.buffs
return self
@property
def out(self):
return self.buffs[0].getvalue()
@property
def err(self):
return self.buffs[1].getvalue()
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout = self.stdout
sys.stderr = self.stderr
def set_up_mock_sp_store(mock_sp_store):
"""
Sets up the mock syspurpose store with methods that are mock versions of the real deal.
Allows us to test in the absence of the syspurpose module.
This documents the essential expected behaviour of the methods subman relies upon
from the syspurpose codebase.
:return:
"""
contents = {}
mock_sp_store_contents = contents
def set(item, value):
contents[item] = value
def read(path, raise_on_error=False):
return mock_sp_store
def unset(item):
contents[item] = None
def add(item, value):
current = contents.get(item, [])
if value not in current:
current.append(value)
contents[item] = current
def remove(item, value):
current = contents.get(item)
if current is not None and isinstance(current, list) and value in current:
current.remove(value)
def get_local_contents():
return contents
def get_cached_contents():
return contents
def update_local(data):
global contents
contents = data
mock_sp_store.return_value.set = Mock(side_effect=set)
mock_sp_store.return_value.read = Mock(side_effect=read)
mock_sp_store.return_value.unset = Mock(side_effect=unset)
mock_sp_store.return_value.add = Mock(side_effect=add)
mock_sp_store.return_value.remove = Mock(side_effect=remove)
mock_sp_store.return_value.local_contents = mock_sp_store_contents
mock_sp_store.return_value.get_local_contents = Mock(side_effect=get_local_contents)
mock_sp_store.return_value.update_local = Mock(side_effect=update_local)
mock_sp_store.return_value.get_cached_contents = Mock(side_effect=get_cached_contents)
return mock_sp_store, mock_sp_store_contents
|
"""
Created on Wed Jun 26 11:09:05 2013
@author: jotterbach
"""
from numpy import *
from ED_HalfFilling import EigSys_HalfFilling
from DotProduct import scalar_prod
from multiprocessing import *
from multiprocessing import Pool
import matplotlib.pyplot as plt
from ParallelizationTools import info
from os.path import *
from scipy.special import *
from scipy.linalg import qr
from DotProduct import scalar_prod
from Correlation_Generator import *
from datetime import datetime
''' define the datestamp for the filenames '''
date = str(datetime.now())
now = date[0:10]+'_'+date[11:13]+'h'+date[14:16]+'m'
def AngleSpectrum(number_particles, noEV, gamma, hopping, angle):
"""
AngleSpectrum(number_particles, noEV, gamma, hopping, angle):
computes the energy eigenspectrum as a function of the angle of the dipoles
with the chain axis given an unit interaction V and a hopping J
parameters of the function:
number_particles: number of particles in the problem
noEV: number of eigenvalues being calculated
gamma: opening angle of the zig-zag chain
hopping: hopping parameter in units of interaction V
angle: array containing the angles as a multiple of **PI**
"""
''' default values for other methods that are being called by the current
function '''
spectrum = 1 #ensures that the spectrum is calculated in EigSys_HalfFilling
independet_v1_v2 = 1 #makes v1 and v2 independent of each other
number_sites = 2*number_particles #condition for half-filling
interaction_strength = 1 #unit of energy
''' intialization of variables that will be stored for later use '''
eigval = zeros((angle.shape[0],noEV), dtype = float)
degeneracies = zeros((angle.shape[0],1))
v1 = zeros((angle.shape[0],1))
v2 = zeros((angle.shape[0],1))
v3 = zeros((angle.shape[0],1))
''' actual method call '''
if __name__ == 'DiagonalizationMethods':
info('main line')
pool = Pool()
''' invocation of the eigenvalue procedure '''
it = [pool.apply_async(EigSys_HalfFilling, (number_particles, number_sites, hopping, interaction_strength, angle[angle_idx], noEV, spectrum, gamma, independet_v1_v2)) for angle_idx in range(0,angle.shape[0])]
for ridx in it:
angle_idx = nonzero(angle == ridx.get()[0])
eigval[angle_idx,:]= ridx.get()[1]#floor(10*around(real(ridx.get()[1]),decimals = 2))/10
degeneracies[angle_idx] = sum((eigval[angle_idx,:] == eigval[angle_idx,0]).astype(int))
v1[angle_idx]=ridx.get()[2]
v2[angle_idx]=ridx.get()[3]
v3[angle_idx]=ridx.get()[4]
print 'angle:', angle[angle_idx], '\nground-state degeneracy:', degeneracies[angle_idx]
filename = 'FigureData/'+now+'_AngleSpectrum_N'+str(number_particles)+'_J'+str(hopping).replace('.','-')+'_vdd'+str(interaction_strength).replace('.','-')
save(filename+'_EigVals', eigval)
save(filename+'_angle', angle)
print 'saved: '+filename
def InteractionSpectrum(number_particles, noEV, gamma, angle, interaction_strength):
''' computes the eigenvalue spectrum for a given angle
as a function of the interaction strength in units of J
parameters of the function:
number_particles: number of particles in the problem
noEV: number of eigenvalues being calculated
gamma: opening angle of the zig-zag chain
angle: array containing the angles as a multiple of **PI**
interaction_strength: interaction in units of hopping J
'''
''' default values for other methods that are being called by the current
function '''
spectrum = 1 #ensures that the spectrum is calculated in EigSys_HalfFilling
independent_v1_v2 = 1 #makes v1 and v2 independent of each other
number_sites = 2*number_particles #condition for half-filling
hopping = 1 #unit of energy
''' intialization of variables that will be stored for later use '''
eigval = zeros((len(interaction_strength),noEV), dtype = float)
v1 = zeros((interaction_strength.shape[0],1))
v2 = zeros((interaction_strength.shape[0],1))
v3 = zeros((interaction_strength.shape[0],1))
''' actual method call '''
if __name__ == 'DiagonalizationMethods':
info('main line')
pool = Pool()
''' invocation of eigenvalue procedure '''
it = [pool.apply_async(EigSys_HalfFilling, (number_particles, number_sites, hopping, interaction_strength[idx], angle, noEV, spectrum, gamma, independent_v1_v2)) for idx in range(len(interaction_strength))]
for ridx in it:
idx = nonzero(interaction_strength == ridx.get()[6])
v1=ridx.get()[2]
v2=ridx.get()[3]
v3=ridx.get()[4]
eigval[idx,:]= ridx.get()[1]#floor(10*around(real(ridx.get()[1]),decimals = 2))/10
print 'interaction:', interaction_strength[idx], 'interaction constants: ', v1,v2,v3
filename = 'FigureData/'+now+'_InteractionSpectrum_N'+str(number_particles)+'_J'+str(hopping).replace('.','-')+'_Theta'+str(angle).replace('.','-')
save(filename+'_EigVals', eigval)
save(filename+'_interaction',interaction_strength)
print 'saved: '+filename
def HoppingSpectrum(number_particles, noEV, gamma, angle, hopping):
''' computes the eigenvalue spectrum for given interactions as a function
of the hopping in units of interaction V
parameters of the function:
number_particles: number of particles in the problem
noEV: number of eigenvalues being calculated
gamma: opening angle of the zig-zag chain
angle: array containing the angles as a multiple of **PI**
hopping: hopping in units of interaction V
'''
''' default values for other methods that are being called by the current
function '''
spectrum = 1 #ensures that the spectrum is calculated in EigSys_HalfFilling
independent_v1_v2 = 1 #makes v1 and v2 independent of each other
number_sites = 2*number_particles #condition for half-filling
interaction_strength = 1 #unit of energy
''' intialization of variables that will be stored for later use '''
eigval = zeros((len(hopping),noEV), dtype = float)
v1 = zeros((hopping.shape[0],1))
v2 = zeros((hopping.shape[0],1))
v3 = zeros((hopping.shape[0],1))
''' actual method call '''
if __name__ == 'DiagonalizationMethods':
info('main line')
pool = Pool()
''' invocation of eigenvalue procedure '''
it = [pool.apply_async(EigSys_HalfFilling, (number_particles, number_sites, hopping[idx], interaction_strength, angle, noEV, spectrum, gamma, independent_v1_v2)) for idx in range(len(hopping))]
for ridx in it:
idx = nonzero(hopping == ridx.get()[5])
v1=ridx.get()[2]
v2=ridx.get()[3]
v3=ridx.get()[4]
eigval[idx,:]= ridx.get()[1]
print 'hopping:', hopping[idx], 'interactions: ', v1,v2,v3
filename = 'FigureData/'+now+'_HoppingSpectrum-nnhopping_N'+str(number_particles)+'_vdd'+str(interaction_strength).replace('.','-')+'_Theta'+str(angle).replace('.','-')
save(filename+'_EigVals', eigval)
save(filename+'_hopping', hopping)
print 'saved: '+filename
def DensityCorrelations(number_particles, noEV, gamma, angle, hopping, degeneracy):
''' computes the density correlation function for a given set of angle,
interaction and hopping'''
''' default values for other methods that are being called by the current
function '''
spectrum = 0 #ensures that the spectrum AND the eigenvectors are calculated in EigSys_HalfFilling
independent_v1_v2 = 1 #makes v1 and v2 independent of each other
number_sites = 2*number_particles #condition for half-filling
interaction_strength = 1 #unit of energy
''' function specific parameter initilaization '''
eigval, eigvec, basisstates = EigSys_HalfFilling(number_particles, number_sites, hopping, interaction_strength, angle, noEV, spectrum, gamma, independent_v1_v2)
eigval = around(real(eigval),decimals = 2)
print '\nlow-energy spectrum: \n', eigval
print 'GS degeneracy:', degeneracy
eigvec = eigvec.astype(complex)
if degeneracy > 1:
print '\nOrthogonalizing GS manifold'
eigvec_GS = zeros((eigvec.shape[0],degeneracy), dtype = complex)
for m in range(degeneracy):
eigvec_GS[:,m] = eigvec[:,m]
Q, R = qr(eigvec_GS, mode = 'economic')
for m in range(degeneracy):
eigvec[:,m] = Q[:,m]
del Q, R, eigvec_GS
number_states = basisstates.shape[0]
if __name__ == 'DiagonalizationMethods':
''' local density '''
print '\nCalculating local density'
local_density = zeros((2*number_particles,1), dtype = float)
pool = Pool()
for deg_idx in range(0,degeneracy):
print 'state index: ', deg_idx
it = [pool.apply_async(loc_den, (basisstates, number_particles, number_states, eigvec[:,deg_idx], site_idx)) for site_idx in range(0,2*number_particles)]
for ridx in it:
site_idx = ridx.get()[0]
local_density[site_idx] += real(ridx.get()[1])/degeneracy
''' density-density correlation '''
print '\nCalculating density-density correlations'
g2 = zeros((number_sites,1), dtype = float)
for deg_idx in range(0,degeneracy):
print 'state index: ', deg_idx
it = [pool.apply_async(pair_corr, (basisstates, number_particles, number_sites, number_states, eigvec[:,deg_idx], site_idx)) for site_idx in range(0,number_sites)]
for ridx in it:
site_idx = ridx.get()[0]
g2[site_idx] += real(ridx.get()[1])/degeneracy
filename='FigureData/'+now+'_Correlations_N'+str(number_particles)+'_J'+str(hopping).replace('.','-')+'_vdd'+str(interaction_strength).replace('.','-')+'_Theta'+str(angle).replace('.','-')
save(filename+'_local_density', local_density)
save(filename+'_g2', g2)
print 'saved: '+filename
|
"""Do not call std::string::find_first_of or std::string::find with a string of
characters to locate that has the size 1.
Use the version of std::string::find that takes a single character to
locate instead. Same for find_last_of/rfind.
"""
error_msg = "Do not use find(\"a\"), use find('a')."
regexp = r"""(?x)
r?find(_(first|last)_of)?\s*
\(
"([^\\]|(\\[nt\\"]))"[,)]"""
forbidden = [
r'find_first_of("a")',
r'find_last_of("a")',
r'find("a")',
r'rfind("a")',
r'find_first_of("\n")',
r'find_last_of("\n")',
r'find("\n")',
r'rfind("\n")',
r'find_first_of("\t")',
r'find_last_of("\t")',
r'find("\t")',
r'rfind("\t")',
r'find_first_of("\\")',
r'find_last_of("\\")',
r'find("\\")',
r'rfind("\\")',
r'find_first_of("\"")',
r'find_last_of("\"")',
r'find("\"")',
r'rfind("\"")',
r'find_first_of("a", 1)',
r'find_last_of("a", 1)',
r'find("a", 1)',
r'rfind("a", 1)',
]
allowed = [
r'find("ab")',
r"find('a')",
r"rfind('a')",
r'rfind("ab")',
r"find('\n')",
r'find("\nx")',
r"rfind('\n')",
r'rfind("\nx")',
r"find('\t')",
r'find("\tx")',
r"rfind('\t')",
r'rfind("\tx")',
r"find('\\')",
r'find("\\x")',
r"rfind('\\')",
r'rfind("\\x")',
r"find('\"')",
r'find("\"x")',
r"rfind('\"')",
r'rfind("\"x")',
r"find('a', 1)",
r'find("ab", 1)',
r"rfind('a', 1)",
r'rfind("ab", 1)',
]
|
global mods
mods = []
|
import hashlib
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.colors as colors
import matplotlib.cm as cm
from matplotlib.patches import Rectangle
import os
import shutil
import tempfile
from sar_parser import SarParser
LEGEND_THRESHOLD = 50
def ascii_date(d):
return "%s" % (d.strftime("%Y-%m-%d %H:%M"))
class SarGrapher(object):
def __init__(self, filenames, starttime=None, endtime=None):
"""Initializes the class, creates a SarParser class
given a list of files and also parsers the files"""
# Temporary dir where images are stored (one per graph)
# NB: This is done to keep the memory usage constant
# in spite of being a bit slower (before this change
# we could use > 12GB RAM for a simple sar file -
# matplotlib is simply inefficient in this area)
self._tempdir = tempfile.mkdtemp(prefix='sargrapher')
self.sar_parser = SarParser(filenames, starttime, endtime)
self.sar_parser.parse()
duplicate_timestamps = self.sar_parser._duplicate_timestamps
if duplicate_timestamps:
print("There are {0} lines with duplicate timestamps. First 10"
"line numbers at {1}".format(
len(duplicate_timestamps.keys()),
sorted(list(duplicate_timestamps.keys()))[:10]))
def _graph_filename(self, graph, extension='.png'):
"""Creates a unique constant file name given a graph or graph list"""
if isinstance(graph, list):
temp = "_".join(graph)
else:
temp = graph
temp = temp.replace('%', '_')
temp = temp.replace('/', '_')
digest = hashlib.sha1()
digest.update(temp.encode('utf-8'))
fname = os.path.join(self._tempdir, digest.hexdigest() + extension)
return fname
def datasets(self):
"""Returns a list of all the available datasets"""
return self.sar_parser.available_data_types()
def timestamps(self):
"""Returns a list of all the available datasets"""
return sorted(self.sar_parser.available_timestamps())
def plot_datasets(self, data, fname, extra_labels, showreboots=False,
output='pdf'):
""" Plot timeseries data (of type dataname). The data can be either
simple (one or no datapoint at any point in time, or indexed (by
indextype). dataname is assumed to be in the form of [title, [label1,
label2, ...], [data1, data2, ...]] extra_labels is a list of tuples
[(datetime, 'label'), ...] """
sar_parser = self.sar_parser
title = data[0][0]
unit = data[0][1]
axis_labels = data[0][2]
datanames = data[1]
if not isinstance(datanames, list):
raise Exception("plottimeseries expects a list of datanames: %s" %
data)
fig = plt.figure(figsize=(10.5, 6.5))
axes = fig.add_subplot(111)
axes.set_title('{0} time series'.format(title), fontsize=12)
axes.set_xlabel('Time')
axes.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d %H:%M'))
# Twenty minutes. Could probably make it a parameter
axes.xaxis.set_minor_locator(mdates.MinuteLocator(interval=20))
fig.autofmt_xdate()
ylabel = title
if unit:
ylabel += " - " + unit
axes.set_ylabel(ylabel)
y_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes.yaxis.set_major_formatter(y_formatter)
axes.yaxis.get_major_formatter().set_scientific(False)
color_norm = colors.Normalize(vmin=0, vmax=len(datanames) - 1)
scalar_map = cm.ScalarMappable(norm=color_norm,
cmap=plt.get_cmap('Set1'))
timestamps = self.timestamps()
counter = 0
for i in datanames:
try:
dataset = [sar_parser._data[d][i] for d in timestamps]
except:
print("Key {0} does not exist in this graph".format(i))
raise
axes.plot(timestamps, dataset, 'o:', label=axis_labels[counter],
color=scalar_map.to_rgba(counter))
counter += 1
# Draw extra_labels
if extra_labels:
for extra in extra_labels:
axes.annotate(extra[1], xy=(mdates.date2num(extra[0]),
sar_parser.find_max(extra[0], datanames)),
xycoords='data', xytext=(30, 30),
textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=.2"))
# If we have a sosreport draw the reboots
if showreboots and sar_parser.sosreport is not None and \
sar_parser.sosreport.reboots is not None:
reboots = sar_parser.sosreport.reboots
for reboot in reboots.keys():
reboot_date = reboots[reboot]['date']
rboot_x = mdates.date2num(reboot_date)
(xmin, xmax) = plt.xlim()
(ymin, ymax) = plt.ylim()
if rboot_x < xmin or rboot_x > xmax:
continue
axes.annotate('', xy=(mdates.date2num(reboot_date), ymin),
xycoords='data', xytext=(-30, -30),
textcoords='offset points',
arrowprops=dict(arrowstyle="->", color='blue',
connectionstyle="arc3,rad=-0.1"))
# Show any data collection gaps in the graph
gaps = sar_parser.find_data_gaps()
if len(gaps) > 0:
for i in gaps:
(g1, g2) = i
x1 = mdates.date2num(g1)
x2 = mdates.date2num(g2)
(ymin, ymax) = plt.ylim()
axes.add_patch(Rectangle((x1, ymin), x2 - x1,
ymax - ymin, facecolor="lightgrey"))
# Add a grid to the graph to ease visualization
axes.grid(True)
lgd = None
# Draw the legend only when needed
if len(datanames) > 1 or \
(len(datanames) == 1 and len(datanames[0].split('#')) > 1):
# We want the legends box roughly square shaped
# and not take up too much room
props = matplotlib.font_manager.FontProperties(size='xx-small')
if len(datanames) < LEGEND_THRESHOLD:
cols = int((len(datanames) ** 0.5))
lgd = axes.legend(loc=1, ncol=cols, shadow=True, prop=props)
else:
cols = int(len(datanames) ** 0.6)
lgd = axes.legend(loc=9, ncol=cols,
bbox_to_anchor=(0.5, -0.29),
shadow=True, prop=props)
if len(datanames) == 0:
return None
try:
if lgd:
plt.savefig(fname, bbox_extra_artists=(lgd,),
bbox_inches='tight')
else:
plt.savefig(fname, bbox_inches='tight')
except:
import traceback
print(traceback.format_exc())
import sys
sys.exit(-1)
plt.cla()
plt.clf()
plt.close('all')
def plot_svg(self, graphs, output, labels):
"""Given a list of graphs, output an svg file per graph.
Input is a list of strings. A graph with multiple datasets
is a string with datasets separated by comma"""
if output == 'out.pdf':
output = 'graph'
counter = 1
fnames = []
for i in graphs:
subgraphs = i.split(',')
fname = self._graph_filename(subgraphs, '.svg')
fnames.append(fname)
self.plot_datasets((['', None, subgraphs], subgraphs), fname,
labels)
dest = os.path.join(os.getcwd(), "{0}{1}.svg".format(
output, counter))
shutil.move(fname, dest)
print("Created: {0}".format(dest))
counter += 1
# removes all temporary files and directories
self.close()
def plot_ascii(self, graphs, def_columns=80, def_rows=25):
"""Displays a single graph in ASCII form on the terminal"""
import subprocess
sar_parser = self.sar_parser
timestamps = self.timestamps()
try:
rows, columns = os.popen('stty size', 'r').read().split()
except:
columns = def_columns
rows = def_rows
if columns > def_columns:
columns = def_columns
for graph in graphs:
try:
gnuplot = subprocess.Popen(["/usr/bin/gnuplot"],
stdin=subprocess.PIPE)
except Exception as e:
raise("Error launching gnuplot: {0}".format(e))
gnuplot.stdin.write("set term dumb {0} {1}\n".format(
columns, rows))
gnuplot.stdin.write("set xdata time\n")
gnuplot.stdin.write('set xlabel "Time"\n')
gnuplot.stdin.write('set timefmt \"%Y-%m-%d %H:%M\"\n')
gnuplot.stdin.write('set xrange [\"%s\":\"%s\"]\n' %
(ascii_date(timestamps[0]),
ascii_date(timestamps[-1])))
gnuplot.stdin.write('set ylabel "%s"\n' % (graph))
gnuplot.stdin.write('set datafile separator ","\n')
gnuplot.stdin.write('set autoscale y\n')
gnuplot.stdin.write('set title "%s - %s"\n' %
(graph, " ".join(sar_parser._files)))
# FIXME: do it through a method
try:
dataset = [sar_parser._data[d][graph] for d in timestamps]
except KeyError:
print("Key '{0}' could not be found")
return
txt = "plot '-' using 1:2 title '{0}' with linespoints \n".format(
graph)
gnuplot.stdin.write(txt)
for i, j in zip(timestamps, dataset):
s = '\"%s\",%f\n' % (ascii_date(i), j)
gnuplot.stdin.write(s)
gnuplot.stdin.write("e\n")
gnuplot.stdin.write("exit\n")
gnuplot.stdin.flush()
def export_csv(self):
return
def close(self):
"""Removes temporary directory and files"""
if os.path.isdir(self._tempdir):
shutil.rmtree(self._tempdir)
|
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.501688
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/web/gettags.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
class gettags(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(gettags, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_91099948 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2tags>
''')
for tag in VFFSL(SL,"tags",True): # generated from line 4, col 2
write(u'''\t\t<e2tag>''')
_v = VFFSL(SL,"tag",True) # u'$tag' on line 5, col 10
if _v is not None: write(_filter(_v, rawExpr=u'$tag')) # from line 5, col 10.
write(u'''</e2tag>
''')
write(u'''</e2tags>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_91099948
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_gettags= 'respond'
if not hasattr(gettags, '_initCheetahAttributes'):
templateAPIClass = getattr(gettags, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(gettags)
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=gettags()).run()
|
"""
.. module:: editor_subscribe_label_deleted
The **Editor Subscribe Label Deleted** Model.
PostgreSQL Definition
---------------------
The :code:`editor_subscribe_label_deleted` table is defined in the MusicBrainz Server as:
.. code-block:: sql
CREATE TABLE editor_subscribe_label_deleted
(
editor INTEGER NOT NULL, -- PK, references editor.id
gid UUID NOT NULL, -- PK, references deleted_entity.gid
deleted_by INTEGER NOT NULL -- references edit.id
);
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class editor_subscribe_label_deleted(models.Model):
"""
Not all parameters are listed here, only those that present some interest
in their Django implementation.
:param editor: references :class:`.editor`
:param gid: references :class:`.deleted_entity`
:param deleted_by: references :class:`.edit`
"""
editor = models.OneToOneField('editor', primary_key=True)
gid = models.OneToOneField('deleted_entity')
deleted_by = models.ForeignKey('edit')
def __str__(self):
return 'Editor Subscribe Label Deleted'
class Meta:
db_table = 'editor_subscribe_label_deleted'
|
import werkzeug
from openerp import http, SUPERUSER_ID
from openerp.http import request
class MassMailController(http.Controller):
@http.route('/mail/track/<int:mail_id>/blank.gif', type='http', auth='none')
def track_mail_open(self, mail_id, **post):
""" Email tracking. """
mail_mail_stats = request.registry.get('mail.mail.statistics')
mail_mail_stats.set_opened(request.cr, SUPERUSER_ID, mail_mail_ids=[mail_id])
response = werkzeug.wrappers.Response()
response.mimetype = 'image/gif'
response.data = 'R0lGODlhAQABAIAAANvf7wAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw=='.decode('base64')
return response
@http.route(['/mail/mailing/<int:mailing_id>/unsubscribe'], type='http', auth='none')
def mailing(self, mailing_id, email=None, res_id=None, **post):
cr, uid, context = request.cr, request.uid, request.context
MassMailing = request.registry['mail.mass_mailing']
mailing_ids = MassMailing.exists(cr, SUPERUSER_ID, [mailing_id], context=context)
if not mailing_ids:
return 'KO'
mailing = MassMailing.browse(cr, SUPERUSER_ID, mailing_ids[0], context=context)
if mailing.mailing_model == 'mail.mass_mailing.contact':
list_ids = [l.id for l in mailing.contact_list_ids]
record_ids = request.registry[mailing.mailing_model].search(cr, SUPERUSER_ID, [('list_id', 'in', list_ids), ('id', '=', res_id), ('email', 'ilike', email)], context=context)
request.registry[mailing.mailing_model].write(cr, SUPERUSER_ID, record_ids, {'opt_out': True}, context=context)
else:
email_fname = None
if 'email_from' in request.registry[mailing.mailing_model]._all_columns:
email_fname = 'email_from'
elif 'email' in request.registry[mailing.mailing_model]._all_columns:
email_fname = 'email'
if email_fname:
record_ids = request.registry[mailing.mailing_model].search(cr, SUPERUSER_ID, [('id', '=', res_id), (email_fname, 'ilike', email)], context=context)
if 'opt_out' in request.registry[mailing.mailing_model]._all_columns:
request.registry[mailing.mailing_model].write(cr, SUPERUSER_ID, record_ids, {'opt_out': True}, context=context)
return 'OK'
|
default_app_config = 'escolar.apps.EscolarConfig'
|
"""XLIFF classes specifically suited for handling the PO representation in
XLIFF.
This way the API supports plurals as if it was a PO file, for example.
"""
import re
from lxml import etree
from translate.misc.multistring import multistring
from translate.misc.xml_helpers import setXMLspace
from translate.storage import base, lisa, poheader, xliff
from translate.storage.placeables import general
def hasplurals(thing):
if not isinstance(thing, multistring):
return False
return len(thing.strings) > 1
class PoXliffUnit(xliff.xliffunit):
"""A class to specifically handle the plural units created from a po file."""
rich_parsers = general.parsers
def __init__(self, source=None, empty=False, **kwargs):
self._rich_source = None
self._rich_target = None
self._state_n = 0
self.units = []
if empty:
return
if not hasplurals(source):
super().__init__(source)
return
self.xmlelement = etree.Element(self.namespaced("group"))
self.xmlelement.set("restype", "x-gettext-plurals")
self.source = source
def __eq__(self, other):
if isinstance(other, PoXliffUnit):
if len(self.units) != len(other.units):
return False
if not super().__eq__(other):
return False
for i in range(len(self.units) - 1):
if not self.units[i + 1] == other.units[i + 1]:
return False
return True
if len(self.units) <= 1:
if isinstance(other, lisa.LISAunit):
return super().__eq__(other)
else:
return self.source == other.source and self.target == other.target
return False
# XXX: We don't return language nodes correctly at the moment
# def getlanguageNodes(self):
# if not self.hasplural():
# return super().getlanguageNodes()
# else:
# return self.units[0].getlanguageNodes()
@property
def source(self):
if not self.hasplural():
return super().source
return multistring([unit.source for unit in self.units])
@source.setter
def source(self, source):
self.setsource(source, sourcelang="en")
def setsource(self, source, sourcelang="en"):
# TODO: consider changing from plural to singular, etc.
self._rich_source = None
if not hasplurals(source):
super().setsource(source, sourcelang)
else:
target = self.target
for unit in self.units:
try:
self.xmlelement.remove(unit.xmlelement)
except ValueError:
pass
self.units = []
for s in source.strings:
newunit = xliff.xliffunit(s)
# newunit.namespace = self.namespace #XXX?necessary?
self.units.append(newunit)
self.xmlelement.append(newunit.xmlelement)
self.target = target
# We don't support any rich strings yet
multistring_to_rich = base.TranslationUnit.multistring_to_rich
rich_to_multistring = base.TranslationUnit.rich_to_multistring
rich_source = base.TranslationUnit.rich_source
rich_target = base.TranslationUnit.rich_target
def gettarget(self, lang=None):
if self.hasplural():
strings = [unit.target for unit in self.units]
if strings:
return multistring(strings)
else:
return None
else:
return super().gettarget(lang)
def settarget(self, target, lang="xx", append=False):
self._rich_target = None
if self.target == target:
return
if not self.hasplural():
super().settarget(target, lang, append)
return
if not isinstance(target, multistring):
target = multistring(target)
source = self.source
sourcel = len(source.strings)
targetl = len(target.strings)
if sourcel < targetl:
sources = source.strings + [source.strings[-1]] * (targetl - sourcel)
targets = target.strings
id = self.getid()
self.source = multistring(sources)
self.setid(id)
elif targetl < sourcel:
targets = target.strings + [""] * (sourcel - targetl)
else:
targets = target.strings
for i in range(len(self.units)):
self.units[i].target = targets[i]
def addnote(self, text, origin=None, position="append"):
"""Add a note specifically in a "note" tag"""
note = etree.SubElement(self.xmlelement, self.namespaced("note"))
note.text = text
if origin:
note.set("from", origin)
for unit in self.units[1:]:
unit.addnote(text, origin)
def getnotes(self, origin=None):
# NOTE: We support both <context> and <note> tags in xliff files for comments
if origin == "translator":
notes = super().getnotes("translator")
trancomments = self.gettranslatorcomments()
if notes == trancomments or trancomments.find(notes) >= 0:
notes = ""
elif notes.find(trancomments) >= 0:
trancomments = notes
notes = ""
return trancomments + notes
elif origin in ["programmer", "developer", "source code"]:
devcomments = super().getnotes("developer")
autocomments = self.getautomaticcomments()
if devcomments == autocomments or autocomments.find(devcomments) >= 0:
devcomments = ""
elif devcomments.find(autocomments) >= 0:
autocomments = devcomments
devcomments = ""
return autocomments
else:
return super().getnotes(origin)
def markfuzzy(self, value=True):
super().markfuzzy(value)
for unit in self.units[1:]:
unit.markfuzzy(value)
def marktranslated(self):
super().marktranslated()
for unit in self.units[1:]:
unit.marktranslated()
def setid(self, id):
super().setid(id)
if len(self.units) > 1:
for i in range(len(self.units)):
self.units[i].setid("%s[%d]" % (id, i))
def getlocations(self):
"""Returns all the references (source locations)"""
groups = self.getcontextgroups("po-reference")
references = []
for group in groups:
sourcefile = ""
linenumber = ""
for (type, text) in group:
if type == "sourcefile":
sourcefile = text
elif type == "linenumber":
linenumber = text
assert sourcefile
if linenumber:
sourcefile = sourcefile + ":" + linenumber
references.append(sourcefile)
return references
def getautomaticcomments(self):
"""Returns the automatic comments (x-po-autocomment), which corresponds
to the #. style po comments.
"""
def hasautocomment(grp):
return grp[0] == "x-po-autocomment"
groups = self.getcontextgroups("po-entry")
comments = []
for group in groups:
commentpairs = filter(hasautocomment, group)
for (type, text) in commentpairs:
comments.append(text)
return "\n".join(comments)
def gettranslatorcomments(self):
"""Returns the translator comments (x-po-trancomment), which
corresponds to the # style po comments.
"""
def hastrancomment(grp):
return grp[0] == "x-po-trancomment"
groups = self.getcontextgroups("po-entry")
comments = []
for group in groups:
commentpairs = filter(hastrancomment, group)
for (type, text) in commentpairs:
comments.append(text)
return "\n".join(comments)
def isheader(self):
return "gettext-domain-header" in (self.getrestype() or "")
def istranslatable(self):
return super().istranslatable() and not self.isheader()
@classmethod
def createfromxmlElement(cls, element, namespace=None):
if element.tag.endswith("trans-unit"):
object = cls(None, empty=True)
object.xmlelement = element
object.namespace = namespace
return object
assert element.tag.endswith("group")
group = cls(None, empty=True)
group.xmlelement = element
group.namespace = namespace
units = list(element.iterdescendants(group.namespaced("trans-unit")))
for unit in units:
subunit = xliff.xliffunit.createfromxmlElement(unit)
subunit.namespace = namespace
group.units.append(subunit)
return group
def hasplural(self):
return self.xmlelement.tag == self.namespaced("group")
class PoXliffFile(xliff.xlifffile, poheader.poheader):
"""a file for the po variant of Xliff files"""
UnitClass = PoXliffUnit
def __init__(self, *args, **kwargs):
if "sourcelanguage" not in kwargs:
kwargs["sourcelanguage"] = "en-US"
xliff.xlifffile.__init__(self, *args, **kwargs)
def createfilenode(self, filename, sourcelanguage="en-US", datatype="po"):
# Let's ignore the sourcelanguage parameter opting for the internal
# one. PO files will probably be one language
return super().createfilenode(
filename, sourcelanguage=self.sourcelanguage, datatype="po"
)
def _insert_header(self, header):
header.xmlelement.set("restype", "x-gettext-domain-header")
header.xmlelement.set("approved", "no")
setXMLspace(header.xmlelement, "preserve")
self.addunit(header)
def addheaderunit(self, target, filename):
unit = self.addsourceunit(target, filename, True)
unit.target = target
unit.xmlelement.set("restype", "x-gettext-domain-header")
unit.xmlelement.set("approved", "no")
setXMLspace(unit.xmlelement, "preserve")
return unit
def addplural(self, source, target, filename, createifmissing=False):
"""This method should now be unnecessary, but is left for reference"""
assert isinstance(source, multistring)
if not isinstance(target, multistring):
target = multistring(target)
sourcel = len(source.strings)
targetl = len(target.strings)
if sourcel < targetl:
sources = source.strings + [source.strings[-1]] * targetl - sourcel
targets = target.strings
else:
sources = source.strings
targets = target.strings
self._messagenum += 1
pluralnum = 0
group = self.creategroup(filename, True, restype="x-gettext-plural")
for (src, tgt) in zip(sources, targets):
unit = self.UnitClass(src)
unit.target = tgt
unit.setid("%d[%d]" % (self._messagenum, pluralnum))
pluralnum += 1
group.append(unit.xmlelement)
self.units.append(unit)
if pluralnum < sourcel:
for string in sources[pluralnum:]:
unit = self.UnitClass(src)
unit.xmlelement.set("translate", "no")
unit.setid("%d[%d]" % (self._messagenum, pluralnum))
pluralnum += 1
group.append(unit.xmlelement)
self.units.append(unit)
return self.units[-pluralnum]
def parse(self, xml):
"""Populates this object from the given xml string"""
# TODO: Make more robust
def ispluralgroup(node):
"""determines whether the xml node refers to a getttext plural"""
return node.get("restype") == "x-gettext-plurals"
def isnonpluralunit(node):
"""determindes whether the xml node contains a plural like id.
We want to filter out all the plural nodes, except the very first
one in each group.
"""
return re.match(r".+\[[123456]\]$", node.get("id") or "") is None
def pluralunits(pluralgroups):
for pluralgroup in pluralgroups:
yield self.UnitClass.createfromxmlElement(
pluralgroup, namespace=self.namespace
)
self.filename = getattr(xml, "name", "")
if hasattr(xml, "read"):
xml.seek(0)
xmlsrc = xml.read()
xml = xmlsrc
parser = etree.XMLParser(resolve_entities=False)
self.document = etree.fromstring(xml, parser).getroottree()
self.initbody()
root_node = self.document.getroot()
assert root_node.tag == self.namespaced(self.rootNode)
groups = root_node.iterdescendants(self.namespaced("group"))
pluralgroups = filter(ispluralgroup, groups)
termEntries = root_node.iterdescendants(
self.namespaced(self.UnitClass.rootNode)
)
singularunits = list(filter(isnonpluralunit, termEntries))
if len(singularunits) == 0:
return
pluralunit_iter = pluralunits(pluralgroups)
nextplural = next(pluralunit_iter, None)
for entry in singularunits:
term = self.UnitClass.createfromxmlElement(entry, namespace=self.namespace)
if nextplural and str(term.getid()) == ("%s[0]" % nextplural.getid()):
self.addunit(nextplural, new=False)
nextplural = next(pluralunit_iter, None)
else:
self.addunit(term, new=False)
|
import os
import sys
import unittest
from unittest import mock
import time
import logging
import tempfile
from os.path import join
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from gallery_dl import config, extractor # noqa E402
class TestCookiejar(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.path = tempfile.TemporaryDirectory()
cls.cookiefile = join(cls.path.name, "cookies.txt")
with open(cls.cookiefile, "w") as file:
file.write("""# HTTP Cookie File
.example.org\tTRUE\t/\tFALSE\t253402210800\tNAME\tVALUE
""")
cls.invalid_cookiefile = join(cls.path.name, "invalid.txt")
with open(cls.invalid_cookiefile, "w") as file:
file.write("""# asd
.example.org\tTRUE/FALSE\t253402210800\tNAME\tVALUE
""")
@classmethod
def tearDownClass(cls):
cls.path.cleanup()
config.clear()
def test_cookiefile(self):
config.set((), "cookies", self.cookiefile)
cookies = extractor.find("test:").session.cookies
self.assertEqual(len(cookies), 1)
cookie = next(iter(cookies))
self.assertEqual(cookie.domain, ".example.org")
self.assertEqual(cookie.path , "/")
self.assertEqual(cookie.name , "NAME")
self.assertEqual(cookie.value , "VALUE")
def test_invalid_cookiefile(self):
self._test_warning(self.invalid_cookiefile, ValueError)
def test_invalid_filename(self):
self._test_warning(join(self.path.name, "nothing"), FileNotFoundError)
def _test_warning(self, filename, exc):
config.set((), "cookies", filename)
log = logging.getLogger("test")
with mock.patch.object(log, "warning") as mock_warning:
cookies = extractor.find("test:").session.cookies
self.assertEqual(len(cookies), 0)
self.assertEqual(mock_warning.call_count, 1)
self.assertEqual(mock_warning.call_args[0][0], "cookies: %s")
self.assertIsInstance(mock_warning.call_args[0][1], exc)
class TestCookiedict(unittest.TestCase):
def setUp(self):
self.cdict = {"NAME1": "VALUE1", "NAME2": "VALUE2"}
config.set((), "cookies", self.cdict)
def tearDown(self):
config.clear()
def test_dict(self):
cookies = extractor.find("test:").session.cookies
self.assertEqual(len(cookies), len(self.cdict))
self.assertEqual(sorted(cookies.keys()), sorted(self.cdict.keys()))
self.assertEqual(sorted(cookies.values()), sorted(self.cdict.values()))
def test_domain(self):
for category in ["exhentai", "idolcomplex", "nijie", "seiga"]:
extr = _get_extractor(category)
cookies = extr.session.cookies
for key in self.cdict:
self.assertTrue(key in cookies)
for c in cookies:
self.assertEqual(c.domain, extr.cookiedomain)
class TestCookieLogin(unittest.TestCase):
def tearDown(self):
config.clear()
def test_cookie_login(self):
extr_cookies = {
"exhentai" : ("ipb_member_id", "ipb_pass_hash"),
"idolcomplex": ("login", "pass_hash"),
"nijie" : ("nemail", "nlogin"),
"seiga" : ("user_session",),
}
for category, cookienames in extr_cookies.items():
cookies = {name: "value" for name in cookienames}
config.set((), "cookies", cookies)
extr = _get_extractor(category)
with mock.patch.object(extr, "_login_impl") as mock_login:
extr.login()
mock_login.assert_not_called()
class TestCookieUtils(unittest.TestCase):
def test_check_cookies(self):
extr = extractor.find("test:")
self.assertFalse(extr._cookiejar, "empty")
self.assertFalse(extr.cookiedomain, "empty")
# always returns False when checking for empty cookie list
self.assertFalse(extr._check_cookies(()))
self.assertFalse(extr._check_cookies(("a",)))
self.assertFalse(extr._check_cookies(("a", "b")))
self.assertFalse(extr._check_cookies(("a", "b", "c")))
extr._cookiejar.set("a", "1")
self.assertTrue(extr._check_cookies(("a",)))
self.assertFalse(extr._check_cookies(("a", "b")))
self.assertFalse(extr._check_cookies(("a", "b", "c")))
extr._cookiejar.set("b", "2")
self.assertTrue(extr._check_cookies(("a",)))
self.assertTrue(extr._check_cookies(("a", "b")))
self.assertFalse(extr._check_cookies(("a", "b", "c")))
def test_check_cookies_domain(self):
extr = extractor.find("test:")
self.assertFalse(extr._cookiejar, "empty")
extr.cookiedomain = ".example.org"
self.assertFalse(extr._check_cookies(("a",)))
self.assertFalse(extr._check_cookies(("a", "b")))
extr._cookiejar.set("a", "1")
self.assertFalse(extr._check_cookies(("a",)))
extr._cookiejar.set("a", "1", domain=extr.cookiedomain)
self.assertTrue(extr._check_cookies(("a",)))
extr._cookiejar.set("a", "1", domain="www" + extr.cookiedomain)
self.assertEqual(len(extr._cookiejar), 3)
self.assertTrue(extr._check_cookies(("a",)))
extr._cookiejar.set("b", "2", domain=extr.cookiedomain)
extr._cookiejar.set("c", "3", domain=extr.cookiedomain)
self.assertTrue(extr._check_cookies(("a", "b", "c")))
def test_check_cookies_expires(self):
extr = extractor.find("test:")
self.assertFalse(extr._cookiejar, "empty")
self.assertFalse(extr.cookiedomain, "empty")
now = int(time.time())
log = logging.getLogger("test")
extr._cookiejar.set("a", "1", expires=now-100)
with mock.patch.object(log, "warning") as mw:
self.assertFalse(extr._check_cookies(("a",)))
self.assertEqual(mw.call_count, 1)
self.assertEqual(mw.call_args[0], ("Cookie '%s' has expired", "a"))
extr._cookiejar.set("a", "1", expires=now+100)
with mock.patch.object(log, "warning") as mw:
self.assertFalse(extr._check_cookies(("a",)))
self.assertEqual(mw.call_count, 1)
self.assertEqual(mw.call_args[0], (
"Cookie '%s' will expire in less than %s hour%s", "a", 1, ""))
extr._cookiejar.set("a", "1", expires=now+100+7200)
with mock.patch.object(log, "warning") as mw:
self.assertFalse(extr._check_cookies(("a",)))
self.assertEqual(mw.call_count, 1)
self.assertEqual(mw.call_args[0], (
"Cookie '%s' will expire in less than %s hour%s", "a", 3, "s"))
extr._cookiejar.set("a", "1", expires=now+100+24*3600)
with mock.patch.object(log, "warning") as mw:
self.assertTrue(extr._check_cookies(("a",)))
self.assertEqual(mw.call_count, 0)
def _get_extractor(category):
for extr in extractor.extractors():
if extr.category == category and hasattr(extr, "_login_impl"):
url = next(extr._get_tests())[0]
return extr.from_url(url)
if __name__ == "__main__":
unittest.main()
|
from os import path
try:
from lib.settings_build import Configure
except ImportError:
import sys
from os.path import expanduser, join
sys.path.append(join(expanduser("~"), 'workspace/automation/launchy'))
from lib.settings_build import Configure
class Default(Configure):
def __init__(self):
self.beta = False
self.local = False
self.project = 'nedcompost'
self.php = True
self.database_name = self.project
self.database_user = self.project
self.path_project_root = path.join('/mnt', self.project)
self.setDefaults()
if getattr(self, 'host', False):
self.setHost()
class Local(Default):
def __init__(self):
self.beta = True
self.local = True
self.database_root_password = 'password'
super(Local, self).__init__()
class Production(Default):
def __init__(self):
self.host = ['aws-php-3', ]
self.domain = 'nedcompost.org'
self.database_root_password = 'password'
# self.database_password = 'iNcJ%kx87[M>L:!6pkY$fXZIu'
self.database_password = 'zHR-mp)@ZZydJ=s9R}*S+4,!a'
super(Production, self).__init__()
class Beta(Default):
def __init__(self):
self.beta = True
self.host = ['aws-php-3', ]
self.domain = 'nedcompost.mitesdesign.com'
self.database_root_password = 'password'
self.database_password = 'zHR-mp)@ZZydJ=s9R}*S+4,!a'
super(Beta, self).__init__()
try:
from local_settings import *
except ImportError:
pass
|
from numpy import *
from matplotlib.pyplot import *
import scipy.constants as sc
import copy
import scipy.integrate as integ
def hw5(m1, m2, a, e, tmax, tstep=0.001, tplot=0.025, method='leapfrog'):
if method != 'leapfrog' and method != 'odeint':
print("That's not a method")
return()
# initialize commonly used variables
period = sqrt((4*(pi**2)*(a**3)) / (sc.G*(m1 + m2)))
dt = period*tstep
# initialize objects at time 0
q = m1 / m2
r0 = (1-e)*a/(1+q)
v0 = (1/(1+q))*sqrt((1+e)/(1-e))*sqrt(sc.G*(m1+m2)/a)
rv = array([r0, 0, 0, v0, -q*r0, 0, 0, -q*v0])
# set up figure
figure(1)
gca().set_aspect('equal')
xlim([-2*a, 2*a])
ylim([-2*a, 2*a])
rv_list = []
if method == 'leapfrog':
timeCounter = 0
frameCounter = 0
while timeCounter < tmax:
# plot positions if tplot time has passed
if frameCounter >= tplot:
frameCounter = 0
rv_list.append(copy.deepcopy(rv))
# calc positions
rv[0] = rv[0] + rv[2]*dt
rv[1] = rv[1] + rv[3]*dt
rv[4] = rv[4] + rv[6]*dt
rv[5] = rv[5] + rv[7]*dt
# calc acceleration
r = array([rv[0] - rv[4], rv[1] - rv[5]])
force = ((sc.G*m1*m2)/(np.linalg.norm(r)**2))*(r/np.linalg.norm(r))
# calc velocity
rv[2] = rv[2] - (force[0]/m1)*dt
rv[3] = rv[3] - (force[1]/m1)*dt
rv[6] = rv[6] + (force[0]/m2)*dt
rv[7] = rv[7] + (force[1]/m2)*dt
# increment counters
timeCounter += tstep
frameCounter += tstep
# plot final position
rv_list.append(copy.deepcopy(rv))
rv_list_plot = rv_list
else:
# odeint
rv_list = integ.odeint(deriv, rv, arange(0, tmax*period, dt), (m1, m2))
# needed to calculate using tstep, but we want to plot
# using tplot,
t_interval = tplot / tstep
rv_list_plot = rv_list[::t_interval]
# plot
for i in range(len(rv_list_plot)):
plot(rv_list_plot[i][0],rv_list_plot[i][1],'bo')
plot(rv_list_plot[i][4],rv_list_plot[i][5],'go')
draw()
def deriv(rv, dt, m1, m2):
# calc position deriv
rv_copy = zeros(8)
rv_copy[0] = rv[2]
rv_copy[1] = rv[3]
rv_copy[4] = rv[6]
rv_copy[5] = rv[7]
# calc velocity deriv
r = array([rv[0] - rv[4], rv[1] - rv[5]])
force = ((sc.G*m1*m2)/(np.linalg.norm(r)**2))*(r/np.linalg.norm(r))
rv_copy[2] = - (force[0]/m1)
rv_copy[3] = - (force[1]/m1)
rv_copy[6] = + (force[0]/m2)
rv_copy[7] = + (force[1]/m2)
return rv_copy
|
from django import forms
from getresults_aliquot.models import Aliquot
from .models import Order
class OrderForm(forms.ModelForm):
def clean_aliquot_identifier(self):
aliquot_identifier = self.cleaned_data.get('aliquot_identifier')
try:
Aliquot.objects.get(aliquot_identifier=aliquot_identifier)
except Aliquot.DoesNotExist:
raise forms.ValidationError('Invalid Aliquot Identifier. Got {}'.format(aliquot_identifier))
class Meta:
model = Order
|
import tempfile
import os
import storage.fileUtils as fileUtils
import testValidation
from testrunner import VdsmTestCase as TestCaseBase
class DirectFileTests(TestCaseBase):
@classmethod
def getConfigTemplate(cls):
return {}
def testRead(self):
data = """Vestibulum. Libero leo nostra, pede nunc eu. Pellentesque
platea lacus morbi nisl montes ve. Ac. A, consectetuer erat, justo eu.
Elementum et, phasellus fames et rutrum donec magnis eu bibendum. Arcu,
ante aliquam ipsum ut facilisis ad."""
srcFd, srcPath = tempfile.mkstemp()
f = os.fdopen(srcFd, "wb")
f.write(data)
f.flush()
f.close()
with fileUtils.open_ex(srcPath, "dr") as f:
self.assertEquals(f.read(), data)
os.unlink(srcPath)
def testSeekRead(self):
data = """
Habitasse ipsum at fusce litora metus, placerat dui purus aenean ante,
ve. Pede hymenaeos ut primis cum, rhoncus, lectus, nunc. Vestibulum
curabitur vitae etiam magna auctor velit, mi tempus vivamus orci eros.
Pellentesque curabitur risus fermentum eget. Elementum curae, donec
nisl egestas ve, ut odio eu nunc elit felis primis id. Ridiculus metus
morbi nulla erat, amet nisi. Amet ligula nisi, id penatibus risus in.
Purus velit duis. Aenean eget, pellentesque eu rhoncus arcu et
consectetuer laoreet, augue nisi dictum lacinia urna. Fermentum
torquent. Ut interdum vivamus duis. Felis consequat nec pede. Orci
sollicitudin parturient orci felis. Enim, diam velit sapien
condimentum fames semper nibh. Integer at, egestas pede consectetuer
ac augue pharetra dolor non placerat quisque id cursus ultricies.
Ligula mi senectus sit. Habitasse. Integer sollicitudin dapibus cum
quam.
"""
self.assertTrue(len(data) > 512)
srcFd, srcPath = tempfile.mkstemp()
f = os.fdopen(srcFd, "wb")
f.write(data)
f.flush()
f.close()
with fileUtils.open_ex(srcPath, "dr") as f:
f.seek(512)
self.assertEquals(f.read(), data[512:])
os.unlink(srcPath)
def testWrite(self):
data = """In ut non platea egestas, quisque magnis nunc nostra ac etiam
suscipit nec integer sociosqu. Fermentum. Ante orci luctus, ipsum
ullamcorper enim arcu class neque inceptos class. Ut, sagittis
torquent, commodo facilisi."""
srcFd, srcPath = tempfile.mkstemp()
os.close(srcFd)
with fileUtils.open_ex(srcPath, "dw") as f:
f.write(data)
with fileUtils.open_ex(srcPath, "r") as f:
self.assertEquals(f.read(len(data)), data)
os.unlink(srcPath)
def testSmallWrites(self):
data = """
Aliquet habitasse tellus. Fringilla faucibus tortor parturient
consectetuer sodales, venenatis platea habitant. Hendrerit nostra nunc
odio. Primis porttitor consequat enim ridiculus. Taciti nascetur,
nibh, convallis sit, cum dis mi. Nonummy justo odio cursus, ac hac
curabitur nibh. Tellus. Montes, ut taciti orci ridiculus facilisis
nunc. Donec. Risus adipiscing habitant donec vehicula non vitae class,
porta vitae senectus. Nascetur felis laoreet integer, tortor ligula.
Pellentesque vestibulum cras nostra. Ut sollicitudin posuere, per
accumsan curabitur id, nisi fermentum vel, eget netus tristique per,
donec, curabitur senectus ut fusce. A. Mauris fringilla senectus et
eni facilisis magna inceptos eu, cursus habitant fringilla neque.
Nibh. Elit facilisis sed, elit, nostra ve torquent dictumst, aenean
sapien quam, habitasse in. Eu tempus aptent, diam, nisi risus
pharetra, ac, condimentum orci, consequat mollis. Cras lacus augue
ultrices proin fermentum nibh sed urna. Ve ipsum ultrices curae,
feugiat faucibus proin et elementum vivamus, lectus. Torquent. Tempus
facilisi. Cras suspendisse euismod consectetuer ornare nostra. Fusce
amet cum amet diam.
"""
self.assertTrue(len(data) > 512)
srcFd, srcPath = tempfile.mkstemp()
os.close(srcFd)
with fileUtils.open_ex(srcPath, "dw") as f:
f.write(data[:512])
f.write(data[512:])
with fileUtils.open_ex(srcPath, "r") as f:
self.assertEquals(f.read(len(data)), data)
os.unlink(srcPath)
def testUpdateRead(self):
data = """
Aliquet. Aliquam eni ac nullam iaculis cras ante, adipiscing. Enim
eget egestas pretium. Ultricies. Urna cubilia in, hac. Curabitur.
Nibh. Purus ridiculus natoque sed id. Feugiat lacus quam, arcu
maecenas nec egestas. Hendrerit duis nunc eget dis lacus porttitor per
sodales class diam condimentum quisque condimentum nisi ligula.
Dapibus blandit arcu nam non ac feugiat diam, dictumst. Ante eget
fames eu penatibus in, porta semper accumsan adipiscing tellus in
sagittis. Est parturient parturient mi fermentum commodo, per
fermentum. Quis duis velit at quam risus mi. Facilisi id fames.
Turpis, conubia rhoncus. Id. Elit eni tellus gravida, ut, erat morbi.
Euismod, enim a ante vestibulum nibh. Curae curae primis vulputate
adipiscing arcu ipsum suspendisse quam hymenaeos primis accumsan
vestibulum.
"""
self.assertTrue(len(data) > 512)
srcFd, srcPath = tempfile.mkstemp()
os.close(srcFd)
with fileUtils.open_ex(srcPath, "wd") as f:
f.write(data[:512])
with fileUtils.open_ex(srcPath, "r+d") as f:
f.seek(512)
f.write(data[512:])
with fileUtils.open_ex(srcPath, "r") as f:
self.assertEquals(f.read(len(data)), data)
os.unlink(srcPath)
class ChownTests(TestCaseBase):
@testValidation.ValidateRunningAsRoot
def test(self):
targetId = 666
srcFd, srcPath = tempfile.mkstemp()
os.close(srcFd)
fileUtils.chown(srcPath, targetId, targetId)
stat = os.stat(srcPath)
self.assertTrue(stat.st_uid == stat.st_gid == targetId)
os.unlink(srcPath)
@testValidation.ValidateRunningAsRoot
def testNames(self):
# I convert to some id because I have no
# idea what users are defined and what
# there IDs are apart from root
tmpId = 666
srcFd, srcPath = tempfile.mkstemp()
os.close(srcFd)
fileUtils.chown(srcPath, tmpId, tmpId)
stat = os.stat(srcPath)
self.assertTrue(stat.st_uid == stat.st_gid == tmpId)
fileUtils.chown(srcPath, "root", "root")
stat = os.stat(srcPath)
self.assertTrue(stat.st_uid == stat.st_gid == 0)
class CopyUserModeToGroupTests(TestCaseBase):
MODE_MASK = 0777
# format: initialMode, expectedMode
modesList = [
(0770, 0770), (0700, 0770), (0750, 0770), (0650, 0660),
]
def testCopyUserModeToGroup(self):
fd, path = tempfile.mkstemp()
try:
os.close(fd)
for initialMode, expectedMode in self.modesList:
os.chmod(path, initialMode)
fileUtils.copyUserModeToGroup(path)
self.assertEquals(os.stat(path).st_mode & self.MODE_MASK,
expectedMode)
finally:
os.unlink(path)
|
try:
from django.conf.urls import url, patterns
except ImportError:
from django.conf.urls.defaults import url, patterns
from actstream import feeds
from actstream import views
from django.contrib.auth.decorators import login_required
urlpatterns = patterns('actstream.views',
# Syndication Feeds
url(r'^feed/(?P<content_type_id>\d+)/(?P<object_id>\d+)/atom/$',
feeds.AtomObjectActivityFeed(), name='actstream_object_feed_atom'),
url(r'^feed/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$',
feeds.ObjectActivityFeed(), name='actstream_object_feed'),
url(r'^feed/(?P<content_type_id>\d+)/atom/$',
feeds.AtomModelActivityFeed(), name='actstream_model_feed_atom'),
url(r'^feed/(?P<content_type_id>\d+)/(?P<object_id>\d+)/as/$',
feeds.ActivityStreamsObjectActivityFeed(),
name='actstream_object_feed_as'),
url(r'^feed/(?P<content_type_id>\d+)/$',
feeds.ModelActivityFeed(), name='actstream_model_feed'),
url(r'^feed/$', feeds.UserActivityFeed(), name='actstream_feed'),
url(r'^feed/atom/$', feeds.AtomUserActivityFeed(),
name='actstream_feed_atom'),
# Follow/Unfollow API
url(r'^follow/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$',
'follow_unfollow', name='actstream_follow'),
url(r'^follow_all/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$',
'follow_unfollow', {'actor_only': False}, name='actstream_follow_all'),
url(r'^unfollow/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$',
'follow_unfollow', {'do_follow': False}, name='actstream_unfollow'),
# Follower and Actor lists
url(r'^followers/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$',
'followers', name='actstream_followers'),
url(r'^actors/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$',
'actor', name='actstream_actor'),
url(r'^actors/(?P<content_type_id>\d+)/$',
'model', name='actstream_model'),
url(r'^new_wall_post/$', view=login_required (views.new_wall_post), name='new_wall_post'),
url(r'^detail/(?P<action_id>\d+)/$', view=login_required(views.detail), name='actstream_detail'),
url(r'^(?P<username>[-\w]+)/$', view=login_required (views.user), name='actstream_user'),
url(r'^$', view=login_required (views.stream), name='actstream'),
url(r'^new_group_post', view=login_required (views.new_group_post), name='new_group_post'),
)
|
class Song(object):
def __init__(self, lyrics):
self.lyrics = lyrics
def sing_me_a_song(self):
for line in self.lyrics:
print line
happy_bday = Song(["Happy birthday to you",
"I don't want to get sued",
"So I'll stop right here"])
bulls_on_parade = Song(["They rally around tha family",
"With pockets full of shells"])
happy_bday.sing_me_a_song()
bulls_on_parade.sing_me_a_song()
|
from __future__ import absolute_import
import os
import sys
import shutil
import unittest
import xml.dom.minidom
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parentdir)
from pcs_test_functions import pcs,ac
import rule
empty_cib = "empty.xml"
temp_cib = "temp.xml"
class DateValueTest(unittest.TestCase):
def testParse(self):
for value, item in enumerate(rule.DateCommonValue.allowed_items, 1):
self.assertEquals(
str(value),
rule.DateCommonValue("%s=%s" % (item, value)).parts[item]
)
value = rule.DateCommonValue(
"hours=1 monthdays=2 weekdays=3 yeardays=4 months=5 weeks=6 "
"years=7 weekyears=8 moon=9"
)
self.assertEquals("1", value.parts["hours"])
self.assertEquals("2", value.parts["monthdays"])
self.assertEquals("3", value.parts["weekdays"])
self.assertEquals("4", value.parts["yeardays"])
self.assertEquals("5", value.parts["months"])
self.assertEquals("6", value.parts["weeks"])
self.assertEquals("7", value.parts["years"])
self.assertEquals("8", value.parts["weekyears"])
self.assertEquals("9", value.parts["moon"])
value = rule.DateCommonValue("hours=1 monthdays=2 hours=3")
self.assertEquals("2", value.parts["monthdays"])
self.assertEquals("3", value.parts["hours"])
value = rule.DateCommonValue(" hours=1 monthdays=2 hours=3 ")
self.assertEquals("2", value.parts["monthdays"])
self.assertEquals("3", value.parts["hours"])
self.assertSyntaxError(
"missing one of 'hours=', 'monthdays=', 'weekdays=', 'yeardays=', "
"'months=', 'weeks=', 'years=', 'weekyears=', 'moon=' in date-spec",
"",
rule.DateSpecValue
)
self.assertSyntaxError(
"missing value after 'hours=' in date-spec",
"hours=",
rule.DateSpecValue
)
self.assertSyntaxError(
"missing =value after 'hours' in date-spec",
"hours",
rule.DateSpecValue
)
self.assertSyntaxError(
"unexpected 'foo=bar' in date-spec",
"foo=bar",
rule.DateSpecValue
)
self.assertSyntaxError(
"unexpected 'foo=bar' in date-spec",
"hours=1 foo=bar",
rule.DateSpecValue
)
def testDurationValidate(self):
for value, item in enumerate(rule.DateCommonValue.allowed_items, 1):
self.assertEquals(
str(value),
rule.DateDurationValue("%s=%s" % (item, value)).parts[item]
)
for item in rule.DateCommonValue.allowed_items:
self.assertSyntaxError(
"invalid %s '%s' in 'duration'" % (item, "foo"),
"%s=foo" % item,
rule.DateDurationValue
)
self.assertSyntaxError(
"invalid %s '%s' in 'duration'" % (item, "-1"),
"%s=-1" % item,
rule.DateDurationValue
)
self.assertSyntaxError(
"invalid %s '%s' in 'duration'" % (item, "2foo"),
"%s=2foo" % item,
rule.DateDurationValue
)
def testDateSpecValidation(self):
for item in rule.DateCommonValue.allowed_items:
value = 1
self.assertEquals(
str(value),
rule.DateSpecValue("%s=%s" % (item, value)).parts[item]
)
self.assertEquals(
"%s-%s" % (value, value + 1),
rule.DateSpecValue(
"%s=%s-%s" % (item, value, value + 1)
).parts[item]
)
self.assertEquals(
"hours=9-16 weekdays=1-5",
str(rule.DateSpecValue("hours=9-16 weekdays=1-5"))
)
for item in rule.DateCommonValue.allowed_items:
self.assertSyntaxError(
"invalid %s '%s' in 'date-spec'" % (item, "foo"),
"%s=foo" % item,
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid %s '%s' in 'date-spec'" % (item, "1-foo"),
"%s=1-foo" % item,
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid %s '%s' in 'date-spec'" % (item, "foo-1"),
"%s=foo-1" % item,
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid %s '%s' in 'date-spec'" % (item, "1-2-3"),
"%s=1-2-3" % item,
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid %s '%s' in 'date-spec'" % (item, "2-1"),
"%s=2-1" % item,
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid hours '24' in 'date-spec'",
"hours=24",
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid monthdays '32' in 'date-spec'",
"monthdays=32",
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid weekdays '8' in 'date-spec'",
"weekdays=8",
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid yeardays '367' in 'date-spec'",
"yeardays=367",
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid months '13' in 'date-spec'",
"months=13",
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid weeks '54' in 'date-spec'",
"weeks=54",
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid weekyears '54' in 'date-spec'",
"weekyears=54",
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid moon '8' in 'date-spec'",
"moon=8",
rule.DateSpecValue
)
self.assertSyntaxError(
"invalid hours '12-8' in 'date-spec'",
"hours=12-8",
rule.DateSpecValue
)
def assertSyntaxError(self, syntax_error, parts_string, value_class=None):
value_class = value_class if value_class else rule.DateCommonValue
self.assertRaises(rule.SyntaxError, value_class, parts_string)
try:
value_class(parts_string)
except rule.SyntaxError as e:
self.assertEquals(syntax_error, str(e))
class ParserTest(unittest.TestCase):
def setUp(self):
self.parser = rule.RuleParser()
def testEmptyInput(self):
self.assertRaises(rule.UnexpectedEndOfInput, self.parser.parse, [])
def testSingleLiteral(self):
self.assertSyntaxError(
"missing one of 'eq', 'ne', 'lt', 'gt', 'lte', 'gte', 'in_range', "
"'defined', 'not_defined', 'date-spec'",
["#uname"]
)
self.assertSyntaxError(
"missing one of 'eq', 'ne', 'lt', 'gt', 'lte', 'gte', 'in_range', "
"'defined', 'not_defined', 'date-spec'",
["string", "node1"]
)
def testSingleLiteralDatespec(self):
self.assertEquals(
"(date-spec (literal hours=1))",
str(self.parser.parse(["date-spec", "hours=1"]))
)
self.assertEquals(
"(date-spec (literal hours=1-14 months=1 monthdays=20-30))",
str(self.parser.parse([
"date-spec", "hours=1-14 months=1 monthdays=20-30"
]))
)
self.assertUnexpectedEndOfInput(["date-spec"])
def testSimpleExpression(self):
self.assertEquals(
"(eq (literal #uname) (literal node1))",
str(self.parser.parse(["#uname", "eq", "node1"]))
)
self.assertEquals(
"(ne (literal #uname) (literal node2))",
str(self.parser.parse(["#uname", "ne", "node2"]))
)
self.assertEquals(
"(gt (literal int) (literal 123))",
str(self.parser.parse(["int", "gt", "123"]))
)
self.assertEquals(
"(gte (literal int) (literal 123))",
str(self.parser.parse(["int", "gte", "123"]))
)
self.assertEquals(
"(lt (literal int) (literal 123))",
str(self.parser.parse(["int", "lt", "123"]))
)
self.assertEquals(
"(lte (literal int) (literal 123))",
str(self.parser.parse(["int", "lte", "123"]))
)
def testSimpleExpressionBad(self):
self.assertSyntaxError(
"unexpected 'eq'",
["eq"]
)
self.assertUnexpectedEndOfInput(["#uname", "eq"])
self.assertSyntaxError(
"unexpected 'node1'",
["#uname", "node1"]
)
self.assertSyntaxError(
"unexpected 'eq'",
["eq", "#uname"]
)
self.assertSyntaxError(
"unexpected 'eq'",
["eq", "lt"]
)
self.assertSyntaxError(
"unexpected 'string' before 'eq'",
["string", "#uname", "eq", "node1"]
)
self.assertSyntaxError(
"unexpected 'date-spec' before 'eq'",
["date-spec", "hours=1", "eq", "node1"]
)
self.assertSyntaxError(
"unexpected 'date-spec' after 'eq'",
["#uname", "eq", "date-spec", "hours=1"]
)
self.assertSyntaxError(
"unexpected 'duration' before 'eq'",
["duration", "hours=1", "eq", "node1"]
)
self.assertSyntaxError(
"unexpected 'duration' after 'eq'",
["#uname", "eq", "duration", "hours=1"]
)
def testDefinedExpression(self):
self.assertEquals(
"(defined (literal pingd))",
str(self.parser.parse(["defined", "pingd"]))
)
self.assertEquals(
"(not_defined (literal pingd))",
str(self.parser.parse(["not_defined", "pingd"]))
)
def testDefinedExpressionBad(self):
self.assertUnexpectedEndOfInput(["defined"])
self.assertUnexpectedEndOfInput(["not_defined"])
self.assertSyntaxError(
"unexpected 'eq'",
["defined", "eq"]
)
self.assertSyntaxError(
"unexpected 'and'",
["defined", "and"]
)
self.assertSyntaxError(
"unexpected 'string' after 'defined'",
["defined", "string", "pingd"]
)
self.assertSyntaxError(
"unexpected 'date-spec' after 'defined'",
["defined", "date-spec", "hours=1"]
)
self.assertSyntaxError(
"unexpected 'duration' after 'defined'",
["defined", "duration", "hours=1"]
)
def testTypeExpression(self):
self.assertEquals(
"(eq (literal #uname) (string (literal node1)))",
str(self.parser.parse(["#uname", "eq", "string", "node1"]))
)
self.assertEquals(
"(eq (literal #uname) (integer (literal 12345)))",
str(self.parser.parse(["#uname", "eq", "integer", "12345"]))
)
self.assertEquals(
"(eq (literal #uname) (integer (literal -12345)))",
str(self.parser.parse(["#uname", "eq", "integer", "-12345"]))
)
self.assertEquals(
"(eq (literal #uname) (version (literal 1)))",
str(self.parser.parse(["#uname", "eq", "version", "1"]))
)
self.assertEquals(
"(eq (literal #uname) (version (literal 1.2.3)))",
str(self.parser.parse(["#uname", "eq", "version", "1.2.3"]))
)
self.assertEquals(
"(eq (literal #uname) (string (literal string)))",
str(self.parser.parse(["#uname", "eq", "string", "string"]))
)
self.assertEquals(
"(eq (literal #uname) (string (literal and)))",
str(self.parser.parse(["#uname", "eq", "string", "and"]))
)
self.assertEquals(
"(and "
"(ne (literal #uname) (string (literal integer))) "
"(ne (literal #uname) (string (literal version)))"
")",
str(self.parser.parse([
"#uname", "ne", "string", "integer",
"and",
"#uname", "ne", "string", "version"
]))
)
def testTypeExpressionBad(self):
self.assertUnexpectedEndOfInput(["string"])
self.assertUnexpectedEndOfInput(["#uname", "eq", "string"])
self.assertSyntaxError(
"unexpected 'string' before 'eq'",
["string", "#uname", "eq", "node1"]
)
self.assertSyntaxError(
"invalid integer value 'node1'",
["#uname", "eq", "integer", "node1"]
)
self.assertSyntaxError(
"invalid version value 'node1'",
["#uname", "eq", "version", "node1"]
)
def testDateExpression(self):
self.assertEquals(
"(gt (literal date) (literal 2014-06-26))",
str(self.parser.parse(["date", "gt", "2014-06-26"]))
)
self.assertEquals(
"(lt (literal date) (literal 2014-06-26))",
str(self.parser.parse(["date", "lt", "2014-06-26"]))
)
self.assertEquals(
"(in_range "
"(literal date) (literal 2014-06-26) (literal 2014-07-26)"
")",
str(self.parser.parse([
"date", "in_range", "2014-06-26", "to", "2014-07-26"
]))
)
self.assertEquals(
"(in_range "
"(literal date) "
"(literal 2014-06-26) (duration (literal years=1))"
")",
str(self.parser.parse([
"date", "in_range", "2014-06-26", "to", "duration", "years=1"
]))
)
def testDateExpressionBad(self):
self.assertUnexpectedEndOfInput(
["date", "in_range"]
)
self.assertSyntaxError(
"missing 'to'",
["date", "in_range", '2014-06-26']
)
self.assertUnexpectedEndOfInput(
["date", "in_range", "2014-06-26", "to"]
)
self.assertSyntaxError(
"unexpected 'in_range'",
["in_range", '2014-06-26', "to", "2014-07-26"]
)
self.assertSyntaxError(
"expecting 'to', got 'eq'",
["date", "in_range", '#uname', "eq", "node1", "to", "2014-07-26"]
)
self.assertSyntaxError(
"invalid date '#uname' in 'in_range ... to'",
["date", "in_range", "2014-06-26", "to", '#uname', "eq", "node1"]
)
self.assertSyntaxError(
"unexpected 'defined' after 'in_range'",
["date", "in_range", "defined", "pingd", "to", "2014-07-26"]
)
self.assertSyntaxError(
"unexpected 'defined' after 'in_range ... to'",
["date", "in_range", "2014-06-26", "to", "defined", "pingd"]
)
self.assertSyntaxError(
"unexpected 'string' before 'in_range'",
["string", "date", "in_range", '2014-06-26', "to", "2014-07-26"]
)
self.assertSyntaxError(
"unexpected 'string' after 'in_range'",
["date", "in_range", "string", '2014-06-26', "to", "2014-07-26"]
)
self.assertSyntaxError(
"unexpected 'string' after 'in_range ... to'",
["date", "in_range", '2014-06-26', "to", "string", "2014-07-26"]
)
self.assertSyntaxError(
"unexpected 'string' after '2014-06-26'",
["date", "in_range", '2014-06-26', "string", "to", "2014-07-26"]
)
self.assertSyntaxError(
"unexpected '#uname' before 'in_range'",
["#uname", "in_range", '2014-06-26', "to", "2014-07-26"]
)
self.assertSyntaxError(
"invalid date '2014-13-26' in 'in_range ... to'",
["date", "in_range", '2014-13-26', "to", "2014-07-26"]
)
self.assertSyntaxError(
"invalid date '2014-13-26' in 'in_range ... to'",
["date", "in_range", '2014-06-26', "to", "2014-13-26"]
)
def testAndOrExpression(self):
self.assertEquals(
"(and "
"(ne (literal #uname) (literal node1)) "
"(ne (literal #uname) (literal node2))"
")",
str(self.parser.parse([
"#uname", "ne", "node1", "and", "#uname", "ne", "node2"
]))
)
self.assertEquals(
"(or "
"(eq (literal #uname) (literal node1)) "
"(eq (literal #uname) (literal node2))"
")",
str(self.parser.parse([
"#uname", "eq", "node1", "or", "#uname", "eq", "node2"
]))
)
self.assertEquals(
"(and "
"(and "
"(ne (literal #uname) (literal node1)) "
"(ne (literal #uname) (literal node2))"
") "
"(ne (literal #uname) (literal node3))"
")",
str(self.parser.parse([
"#uname", "ne", "node1",
"and", "#uname", "ne", "node2",
"and", "#uname", "ne", "node3"
]))
)
self.assertEquals(
"(or "
"(and "
"(ne (literal #uname) (literal node1)) "
"(ne (literal #uname) (literal node2))"
") "
"(eq (literal #uname) (literal node3))"
")",
str(self.parser.parse([
"#uname", "ne", "node1",
"and", "#uname", "ne", "node2",
"or", "#uname", "eq", "node3"
]))
)
self.assertEquals(
"(and "
"(or "
"(eq (literal #uname) (literal node1)) "
"(eq (literal #uname) (literal node2))"
") "
"(ne (literal #uname) (literal node3))"
")",
str(self.parser.parse([
"#uname", "eq", "node1",
"or", "#uname", "eq", "node2",
"and", "#uname", "ne", "node3"
]))
)
self.assertEquals(
"(and "
"(defined (literal pingd)) "
"(lte (literal pingd) (literal 1))"
")",
str(self.parser.parse([
"defined", "pingd", "and", "pingd", "lte", "1"
]))
)
self.assertEquals(
"(or "
"(gt (literal pingd) (literal 1)) "
"(not_defined (literal pingd))"
")",
str(self.parser.parse([
"pingd", "gt", "1", "or", "not_defined", "pingd"
]))
)
def testAndOrExpressionDateSpec(self):
self.assertEquals(
"(and "
"(ne (literal #uname) (literal node1)) "
"(date-spec (literal hours=1-12))"
")",
str(self.parser.parse([
"#uname", "ne", "node1", "and", "date-spec", "hours=1-12"
]))
)
self.assertEquals(
"(or "
"(date-spec (literal monthdays=1-12)) "
"(ne (literal #uname) (literal node1))"
")",
str(self.parser.parse([
"date-spec", "monthdays=1-12", "or", "#uname", "ne", "node1"
]))
)
self.assertEquals(
"(or "
"(date-spec (literal monthdays=1-10)) "
"(date-spec (literal monthdays=11-20))"
")",
str(self.parser.parse([
"date-spec", "monthdays=1-10",
"or",
"date-spec", "monthdays=11-20"
]))
)
def testAndOrExpressionDate(self):
self.assertEquals(
"(and "
"(ne (literal #uname) (literal node1)) "
"(in_range "
"(literal date) (literal 2014-06-26) (literal 2014-07-26)"
")"
")",
str(self.parser.parse([
"#uname", "ne", "node1",
"and",
"date", "in_range", "2014-06-26", "to", "2014-07-26"
]))
)
self.assertEquals(
"(and "
"(in_range "
"(literal date) (literal 2014-06-26) (literal 2014-07-26)"
") "
"(ne (literal #uname) (literal node1))"
")",
str(self.parser.parse([
"date", "in_range", "2014-06-26", "to", "2014-07-26",
"and",
"#uname", "ne", "node1"
]))
)
def testAndOrExpressionBad(self):
self.assertSyntaxError(
"unexpected 'and'",
["and"]
)
self.assertSyntaxError(
"unexpected 'or'",
["or"]
)
self.assertSyntaxError(
"unexpected '#uname' before 'and'",
["#uname", "and", "node1"]
)
self.assertSyntaxError(
"unexpected '#uname' before 'or'",
["#uname", "or", "node1"]
)
self.assertSyntaxError(
"unexpected '#uname' before 'or'",
["#uname", "or", "eq"]
)
self.assertSyntaxError(
"unexpected 'node2' after 'and'",
["#uname", "eq", "node1", "and", "node2"]
)
self.assertUnexpectedEndOfInput(["#uname", "eq", "node1", "and"])
self.assertUnexpectedEndOfInput(
["#uname", "eq", "node1", "and", "#uname", "eq"]
)
self.assertSyntaxError(
"unexpected 'and'",
["and", "#uname", "eq", "node1"]
)
self.assertSyntaxError(
"unexpected 'duration' after 'and'",
["#uname", "ne", "node1", "and", "duration", "hours=1"]
)
self.assertSyntaxError(
"unexpected 'duration' before 'or'",
["duration", "monthdays=1", "or", "#uname", "ne", "node1"]
)
def testParenthesizedExpression(self):
self.assertSyntaxError(
"missing one of 'eq', 'ne', 'lt', 'gt', 'lte', 'gte', 'in_range', "
"'defined', 'not_defined', 'date-spec'",
["(", "#uname", ")"]
)
self.assertEquals(
"(date-spec (literal hours=1))",
str(self.parser.parse(["(", "date-spec", "hours=1", ")"]))
)
self.assertEquals(
"(eq (literal #uname) (literal node1))",
str(self.parser.parse(["(", "#uname", "eq", "node1", ")"]))
)
self.assertEquals(
"(defined (literal pingd))",
str(self.parser.parse(["(", "defined", "pingd", ")"]))
)
self.assertEquals(
"(and "
"(ne (literal #uname) (literal node1)) "
"(ne (literal #uname) (literal node2))"
")",
str(self.parser.parse([
"(",
"#uname", "ne", "node1", "and", "#uname", "ne", "node2",
")"
]))
)
self.assertEquals(
"(and "
"(ne (literal #uname) (literal node1)) "
"(ne (literal #uname) (literal node2))"
")",
str(self.parser.parse([
"(", "#uname", "ne", "node1", ")",
"and",
"(", "#uname", "ne", "node2", ")"
]))
)
self.assertEquals(
"(or "
"(and "
"(ne (literal #uname) (literal node1)) "
"(ne (literal #uname) (literal node2))"
") "
"(eq (literal #uname) (literal node3))"
")",
str(self.parser.parse([
"(",
"#uname", "ne", "node1", "and", "#uname", "ne", "node2",
")",
"or", "#uname", "eq", "node3"
]))
)
self.assertEquals(
"(and "
"(ne (literal #uname) (literal node1)) "
"(or "
"(ne (literal #uname) (literal node2)) "
"(eq (literal #uname) (literal node3))"
")"
")",
str(self.parser.parse([
"#uname", "ne", "node1",
"and",
"(", "#uname", "ne", "node2", "or", "#uname", "eq", "node3", ")"
]))
)
self.assertEquals(
"(and "
"(ne (literal #uname) (literal node1)) "
"(or "
"(ne (literal #uname) (literal node2)) "
"(eq (literal #uname) (literal node3))"
")"
")",
str(self.parser.parse([
"(", "(",
"(", "#uname", "ne", "node1", ")",
"and",
"(", "(",
"(", "#uname", "ne", "node2", ")",
"or",
"(", "#uname", "eq", "node3", ")",
")", ")",
")", ")"
]))
)
self.assertEquals(
"(in_range "
"(literal date) (literal 2014-06-26) (literal 2014-07-26)"
")",
str(self.parser.parse([
"(", "date", "in_range", "2014-06-26", "to", "2014-07-26", ")"
]))
)
def testParenthesizedExpressionBad(self):
self.assertUnexpectedEndOfInput(["("])
self.assertSyntaxError(
"unexpected ')'",
["(", ")"]
)
self.assertSyntaxError(
"missing ')'",
["(", "#uname"]
)
self.assertUnexpectedEndOfInput(["(", "#uname", "eq"])
self.assertSyntaxError(
"missing ')'",
["(", "#uname", "eq", "node1"]
)
def assertUnexpectedEndOfInput(self, program):
self.assertRaises(rule.UnexpectedEndOfInput, self.parser.parse, program)
def assertSyntaxError(self, syntax_error, program):
self.assertRaises(
rule.SyntaxError, self.parser.parse, program
)
try:
self.parser.parse(program)
except rule.SyntaxError as e:
self.assertEquals(syntax_error, str(e))
class CibBuilderTest(unittest.TestCase):
def setUp(self):
self.parser = rule.RuleParser()
self.builder = rule.CibBuilder()
def testSingleLiteralDatespec(self):
self.assertExpressionXml(
["date-spec", "hours=1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<date_expression id="location-dummy-rule-expr" operation="date_spec">
<date_spec hours="1" id="location-dummy-rule-expr-datespec"/>
</date_expression>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["date-spec", "hours=1-14 monthdays=20-30 months=1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<date_expression id="location-dummy-rule-expr" operation="date_spec">
<date_spec hours="1-14" id="location-dummy-rule-expr-datespec" monthdays="20-30" months="1"/>
</date_expression>
</rule>
</rsc_location>
"""
)
def testSimpleExpression(self):
self.assertExpressionXml(
["#uname", "eq", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["#uname", "ne", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="ne" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["#uname", "gt", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="gt" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["#uname", "gte", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="gte" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["#uname", "lt", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="lt" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["#uname", "lte", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="lte" value="node1"/>
</rule>
</rsc_location>
"""
)
def testTypeExpression(self):
self.assertExpressionXml(
["#uname", "eq", "string", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" type="string" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["#uname", "eq", "integer", "12345"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" type="number" value="12345"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["#uname", "eq", "version", "1.2.3"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" type="version" value="1.2.3"/>
</rule>
</rsc_location>
"""
)
def testDefinedExpression(self):
self.assertExpressionXml(
["defined", "pingd"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="pingd" id="location-dummy-rule-expr" operation="defined"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["not_defined", "pingd"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="pingd" id="location-dummy-rule-expr" operation="not_defined"/>
</rule>
</rsc_location>
"""
)
def testDateExpression(self):
self.assertExpressionXml(
["date", "gt", "2014-06-26"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<date_expression id="location-dummy-rule-expr" operation="gt" start="2014-06-26"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["date", "lt", "2014-06-26"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<date_expression end="2014-06-26" id="location-dummy-rule-expr" operation="lt"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["date", "in_range", "2014-06-26", "to", "2014-07-26"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<date_expression end="2014-07-26" id="location-dummy-rule-expr" operation="in_range" start="2014-06-26"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["date", "in_range", "2014-06-26", "to", "duration", "years=1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<date_expression id="location-dummy-rule-expr" operation="in_range" start="2014-06-26">
<duration id="location-dummy-rule-expr-duration" years="1"/>
</date_expression>
</rule>
</rsc_location>
"""
)
def testNotDateExpression(self):
self.assertExpressionXml(
["date", "eq", "2014-06-26"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="date" id="location-dummy-rule-expr" operation="eq" value="2014-06-26"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["date", "gt", "string", "2014-06-26"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="date" id="location-dummy-rule-expr" operation="gt" type="string" value="2014-06-26"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["date", "gt", "integer", "12345"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="date" id="location-dummy-rule-expr" operation="gt" type="number" value="12345"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["date", "gt", "version", "1.2.3"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule">
<expression attribute="date" id="location-dummy-rule-expr" operation="gt" type="version" value="1.2.3"/>
</rule>
</rsc_location>
"""
)
def testAndOrExpression(self):
self.assertExpressionXml(
["#uname", "ne", "node1", "and", "#uname", "ne", "node2"],
"""
<rsc_location id="location-dummy">
<rule boolean-op="and" id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="ne" value="node1"/>
<expression attribute="#uname" id="location-dummy-rule-expr-1" operation="ne" value="node2"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["#uname", "eq", "node1", "or", "#uname", "eq", "node2"],
"""
<rsc_location id="location-dummy">
<rule boolean-op="or" id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" value="node1"/>
<expression attribute="#uname" id="location-dummy-rule-expr-1" operation="eq" value="node2"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
[
"#uname", "ne", "node1",
"and", "#uname", "ne", "node2",
"and", "#uname", "ne", "node3"
],
"""
<rsc_location id="location-dummy">
<rule boolean-op="and" id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="ne" value="node1"/>
<expression attribute="#uname" id="location-dummy-rule-expr-1" operation="ne" value="node2"/>
<expression attribute="#uname" id="location-dummy-rule-expr-2" operation="ne" value="node3"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
[
"#uname", "ne", "node1",
"and", "#uname", "ne", "node2",
"or", "#uname", "eq", "node3"
],
"""
<rsc_location id="location-dummy">
<rule boolean-op="or" id="location-dummy-rule">
<rule boolean-op="and" id="location-dummy-rule-rule">
<expression attribute="#uname" id="location-dummy-rule-rule-expr" operation="ne" value="node1"/>
<expression attribute="#uname" id="location-dummy-rule-rule-expr-1" operation="ne" value="node2"/>
</rule>
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" value="node3"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
[
"#uname", "eq", "node1",
"or", "#uname", "eq", "node2",
"and", "#uname", "ne", "node3"
],
"""
<rsc_location id="location-dummy">
<rule boolean-op="and" id="location-dummy-rule">
<rule boolean-op="or" id="location-dummy-rule-rule">
<expression attribute="#uname" id="location-dummy-rule-rule-expr" operation="eq" value="node1"/>
<expression attribute="#uname" id="location-dummy-rule-rule-expr-1" operation="eq" value="node2"/>
</rule>
<expression attribute="#uname" id="location-dummy-rule-expr" operation="ne" value="node3"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["defined", "pingd", "and", "pingd", "lte", "1"],
"""
<rsc_location id="location-dummy">
<rule boolean-op="and" id="location-dummy-rule">
<expression attribute="pingd" id="location-dummy-rule-expr" operation="defined"/>
<expression attribute="pingd" id="location-dummy-rule-expr-1" operation="lte" value="1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["pingd", "gt", "1", "or", "not_defined", "pingd"],
"""
<rsc_location id="location-dummy">
<rule boolean-op="or" id="location-dummy-rule">
<expression attribute="pingd" id="location-dummy-rule-expr" operation="gt" value="1"/>
<expression attribute="pingd" id="location-dummy-rule-expr-1" operation="not_defined"/>
</rule>
</rsc_location>
"""
)
def testAndOrExpressionDateSpec(self):
self.assertExpressionXml(
["#uname", "ne", "node1", "and", "date-spec", "hours=1-12"],
"""
<rsc_location id="location-dummy">
<rule boolean-op="and" id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="ne" value="node1"/>
<date_expression id="location-dummy-rule-expr-1" operation="date_spec">
<date_spec hours="1-12" id="location-dummy-rule-expr-1-datespec"/>
</date_expression>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["date-spec", "monthdays=1-12", "or", "#uname", "ne", "node1"],
"""
<rsc_location id="location-dummy">
<rule boolean-op="or" id="location-dummy-rule">
<date_expression id="location-dummy-rule-expr" operation="date_spec">
<date_spec id="location-dummy-rule-expr-datespec" monthdays="1-12"/>
</date_expression>
<expression attribute="#uname" id="location-dummy-rule-expr-1" operation="ne" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["date-spec", "monthdays=1-10", "or", "date-spec", "monthdays=11-20"],
"""
<rsc_location id="location-dummy">
<rule boolean-op="or" id="location-dummy-rule">
<date_expression id="location-dummy-rule-expr" operation="date_spec">
<date_spec id="location-dummy-rule-expr-datespec" monthdays="1-10"/>
</date_expression>
<date_expression id="location-dummy-rule-expr-1" operation="date_spec">
<date_spec id="location-dummy-rule-expr-1-datespec" monthdays="11-20"/>
</date_expression>
</rule>
</rsc_location>
"""
)
def testParenthesizedExpression(self):
self.assertExpressionXml(
[
"(",
"#uname", "ne", "node1", "and", "#uname", "ne", "node2",
")",
"or", "#uname", "eq", "node3"
],
"""
<rsc_location id="location-dummy">
<rule boolean-op="or" id="location-dummy-rule">
<rule boolean-op="and" id="location-dummy-rule-rule">
<expression attribute="#uname" id="location-dummy-rule-rule-expr" operation="ne" value="node1"/>
<expression attribute="#uname" id="location-dummy-rule-rule-expr-1" operation="ne" value="node2"/>
</rule>
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" value="node3"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
[
"#uname", "ne", "node1",
"and",
"(", "#uname", "ne", "node2", "or", "#uname", "eq", "node3", ")"
],
"""
<rsc_location id="location-dummy">
<rule boolean-op="and" id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="ne" value="node1"/>
<rule boolean-op="or" id="location-dummy-rule-rule">
<expression attribute="#uname" id="location-dummy-rule-rule-expr" operation="ne" value="node2"/>
<expression attribute="#uname" id="location-dummy-rule-rule-expr-1" operation="eq" value="node3"/>
</rule>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
[
"(",
"#uname", "ne", "node1", "and", "#uname", "ne", "node2",
")",
"or",
"(",
"#uname", "ne", "node3", "and", "#uname", "ne", "node4",
")",
],
"""
<rsc_location id="location-dummy">
<rule boolean-op="or" id="location-dummy-rule">
<rule boolean-op="and" id="location-dummy-rule-rule">
<expression attribute="#uname" id="location-dummy-rule-rule-expr" operation="ne" value="node1"/>
<expression attribute="#uname" id="location-dummy-rule-rule-expr-1" operation="ne" value="node2"/>
</rule>
<rule boolean-op="and" id="location-dummy-rule-rule-1">
<expression attribute="#uname" id="location-dummy-rule-rule-1-expr" operation="ne" value="node3"/>
<expression attribute="#uname" id="location-dummy-rule-rule-1-expr-1" operation="ne" value="node4"/>
</rule>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
[
"(",
"#uname", "ne", "node1", "and", "#uname", "ne", "node2",
")",
"and",
"(",
"#uname", "ne", "node3", "and", "#uname", "ne", "node4",
")",
],
"""
<rsc_location id="location-dummy">
<rule boolean-op="and" id="location-dummy-rule">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="ne" value="node1"/>
<expression attribute="#uname" id="location-dummy-rule-expr-1" operation="ne" value="node2"/>
<expression attribute="#uname" id="location-dummy-rule-expr-2" operation="ne" value="node3"/>
<expression attribute="#uname" id="location-dummy-rule-expr-3" operation="ne" value="node4"/>
</rule>
</rsc_location>
"""
)
def assertExpressionXml(self, rule_expression, rule_xml):
cib_dom = xml.dom.minidom.parse("empty.xml")
constraints = cib_dom.getElementsByTagName("constraints")[0]
constraint_el = constraints.appendChild(
cib_dom.createElement("rsc_location")
)
constraint_el.setAttribute("id", "location-dummy")
ac(
self.builder.build(
constraint_el,
self.parser.parse(rule_expression)
).parentNode.toprettyxml(indent=" "),
rule_xml.lstrip().rstrip(" ")
)
class TokenPreprocessorTest(unittest.TestCase):
def setUp(self):
self.preprocessor = rule.TokenPreprocessor()
def testNoChanges(self):
self.assertEquals([], self.preprocessor.run([]))
self.assertEquals(
["#uname", "eq", "node1"],
self.preprocessor.run(["#uname", "eq", "node1"])
)
def testDateSpec(self):
self.assertEquals(
["date-spec"],
self.preprocessor.run(["date-spec"])
)
self.assertEquals(
["date-spec", "hours=14"],
self.preprocessor.run(["date-spec", "hours=14"])
)
self.assertEquals(
["date-spec", "hours weeks=6 months= moon=1"],
self.preprocessor.run(
["date-spec", "hours", "weeks=6", "months=", "moon=1"]
)
)
self.assertEquals(
["date-spec", "foo", "hours=14"],
self.preprocessor.run(["date-spec", "foo", "hours=14"])
)
self.assertEquals(
["date-spec", "hours=14", "foo", "hours=14"],
self.preprocessor.run(["date-spec", "hours=14", "foo", "hours=14"])
)
self.assertEquals(
[
"date-spec",
"hours=1 monthdays=2 weekdays=3 yeardays=4 months=5 "
"weeks=6 years=7 weekyears=8 moon=9"
],
self.preprocessor.run([
"date-spec",
"hours=1", "monthdays=2", "weekdays=3", "yeardays=4",
"months=5","weeks=6", "years=7", "weekyears=8", "moon=9"
])
)
self.assertEquals(
["#uname", "eq", "node1", "or", "date-spec", "hours=14"],
self.preprocessor.run([
"#uname", "eq", "node1", "or", "date-spec", "hours=14"
])
)
self.assertEquals(
["date-spec", "hours=14", "or", "#uname", "eq", "node1"],
self.preprocessor.run([
"date-spec", "hours=14", "or", "#uname", "eq", "node1",
])
)
def testDuration(self):
self.assertEquals(
["duration"],
self.preprocessor.run(["duration"])
)
self.assertEquals(
["duration", "hours=14"],
self.preprocessor.run(["duration", "hours=14"])
)
self.assertEquals(
["duration", "hours weeks=6 months= moon=1"],
self.preprocessor.run(
["duration", "hours", "weeks=6", "months=", "moon=1"]
)
)
self.assertEquals(
["duration", "foo", "hours=14"],
self.preprocessor.run(["duration", "foo", "hours=14"])
)
self.assertEquals(
["duration", "hours=14", "foo", "hours=14"],
self.preprocessor.run(["duration", "hours=14", "foo", "hours=14"])
)
self.assertEquals(
[
"duration",
"hours=1 monthdays=2 weekdays=3 yeardays=4 months=5 "
"weeks=6 years=7 weekyears=8 moon=9"
],
self.preprocessor.run([
"duration",
"hours=1", "monthdays=2", "weekdays=3", "yeardays=4",
"months=5","weeks=6", "years=7", "weekyears=8", "moon=9"
])
)
self.assertEquals(
["#uname", "eq", "node1", "or", "duration", "hours=14"],
self.preprocessor.run([
"#uname", "eq", "node1", "or", "duration", "hours=14"
])
)
self.assertEquals(
["duration", "hours=14", "or", "#uname", "eq", "node1"],
self.preprocessor.run([
"duration", "hours=14", "or", "#uname", "eq", "node1",
])
)
def testOperationDatespec(self):
self.assertEquals(
["date-spec", "weeks=6 moon=1"],
self.preprocessor.run(
["date-spec", "operation=date_spec", "weeks=6", "moon=1"]
)
)
self.assertEquals(
["date-spec", "weeks=6 moon=1"],
self.preprocessor.run(
["date-spec", "weeks=6", "operation=date_spec", "moon=1"]
)
)
self.assertEquals(
["date-spec", "weeks=6", "foo", "moon=1"],
self.preprocessor.run(
["date-spec", "weeks=6", "operation=date_spec", "foo", "moon=1"]
)
)
self.assertEquals(
["date-spec", "weeks=6", "foo", "operation=date_spec", "moon=1"],
self.preprocessor.run(
["date-spec", "weeks=6", "foo", "operation=date_spec", "moon=1"]
)
)
self.assertEquals(
["date-spec", "weeks=6 moon=1"],
self.preprocessor.run(
["date-spec", "weeks=6", "moon=1", "operation=date_spec"]
)
)
self.assertEquals(
["date-spec", "weeks=6 moon=1", "foo"],
self.preprocessor.run(
["date-spec", "weeks=6", "moon=1", "operation=date_spec", "foo"]
)
)
self.assertEquals(
["date-spec"],
self.preprocessor.run(
["date-spec", "operation=date_spec"]
)
)
self.assertEquals(
["date-spec", "weeks=6", "operation=foo", "moon=1"],
self.preprocessor.run(
["date-spec", "weeks=6", "operation=foo", "moon=1"]
)
)
def testDateLegacySyntax(self):
# valid syntax
self.assertEquals(
["date", "gt", "2014-06-26"],
self.preprocessor.run([
"date", "start=2014-06-26", "gt"
])
)
self.assertEquals(
["date", "lt", "2014-06-26"],
self.preprocessor.run([
"date", "end=2014-06-26", "lt"
])
)
self.assertEquals(
["date", "in_range", "2014-06-26", "to", "2014-07-26"],
self.preprocessor.run([
"date", "start=2014-06-26", "end=2014-07-26", "in_range"
])
)
self.assertEquals(
["date", "in_range", "2014-06-26", "to", "2014-07-26"],
self.preprocessor.run([
"date", "end=2014-07-26", "start=2014-06-26", "in_range"
])
)
self.assertEquals(
["date", "gt", "2014-06-26", "foo"],
self.preprocessor.run([
"date", "start=2014-06-26", "gt", "foo"
])
)
self.assertEquals(
["date", "lt", "2014-06-26", "foo"],
self.preprocessor.run([
"date", "end=2014-06-26", "lt", "foo"
])
)
self.assertEquals(
["date", "in_range", "2014-06-26", "to", "2014-07-26", "foo"],
self.preprocessor.run([
"date", "start=2014-06-26", "end=2014-07-26", "in_range", "foo"
])
)
self.assertEquals(
["date", "in_range", "2014-06-26", "to", "2014-07-26", "foo"],
self.preprocessor.run([
"date", "end=2014-07-26", "start=2014-06-26", "in_range", "foo"
])
)
# invalid syntax - no change
self.assertEquals(
["date"],
self.preprocessor.run([
"date"
])
)
self.assertEquals(
["date", "start=2014-06-26"],
self.preprocessor.run([
"date", "start=2014-06-26"
])
)
self.assertEquals(
["date", "end=2014-06-26"],
self.preprocessor.run([
"date", "end=2014-06-26"
])
)
self.assertEquals(
["date", "start=2014-06-26", "end=2014-07-26"],
self.preprocessor.run([
"date", "start=2014-06-26", "end=2014-07-26"
])
)
self.assertEquals(
["date", "start=2014-06-26", "end=2014-07-26", "lt"],
self.preprocessor.run([
"date", "start=2014-06-26", "end=2014-07-26", "lt"
])
)
self.assertEquals(
["date", "start=2014-06-26", "lt", "foo"],
self.preprocessor.run([
"date", "start=2014-06-26", "lt", "foo"
])
)
self.assertEquals(
["date", "start=2014-06-26", "end=2014-07-26", "gt", "foo"],
self.preprocessor.run([
"date", "start=2014-06-26", "end=2014-07-26", "gt", "foo"
])
)
self.assertEquals(
["date", "end=2014-06-26", "gt"],
self.preprocessor.run([
"date", "end=2014-06-26", "gt"
])
)
self.assertEquals(
["date", "start=2014-06-26", "in_range", "foo"],
self.preprocessor.run([
"date", "start=2014-06-26", "in_range", "foo"
])
)
self.assertEquals(
["date", "end=2014-07-26", "in_range"],
self.preprocessor.run([
"date", "end=2014-07-26", "in_range"
])
)
self.assertEquals(
["foo", "start=2014-06-26", "gt"],
self.preprocessor.run([
"foo", "start=2014-06-26", "gt"
])
)
self.assertEquals(
["foo", "end=2014-06-26", "lt"],
self.preprocessor.run([
"foo", "end=2014-06-26", "lt"
])
)
def testParenthesis(self):
self.assertEquals(
["("],
self.preprocessor.run(["("])
)
self.assertEquals(
[")"],
self.preprocessor.run([")"])
)
self.assertEquals(
["(", "(", ")", ")"],
self.preprocessor.run(["(", "(", ")", ")"])
)
self.assertEquals(
["(", "(", ")", ")"],
self.preprocessor.run(["(())"])
)
self.assertEquals(
["a", "(", "b", ")", "c"],
self.preprocessor.run(["a", "(", "b", ")", "c"])
)
self.assertEquals(
["a", "(", "b", "c", ")", "d"],
self.preprocessor.run(["a", "(", "b", "c", ")", "d"])
)
self.assertEquals(
["a", ")", "b", "(", "c"],
self.preprocessor.run(["a", ")", "b", "(", "c"])
)
self.assertEquals(
["a", "(", "b", ")", "c"],
self.preprocessor.run(["a", "(b)", "c"])
)
self.assertEquals(
["a", "(", "b", ")", "c"],
self.preprocessor.run(["a(", "b", ")c"])
)
self.assertEquals(
["a", "(", "b", ")", "c"],
self.preprocessor.run(["a(b)c"])
)
self.assertEquals(
["aA", "(", "bB", ")", "cC"],
self.preprocessor.run(["aA(bB)cC"])
)
self.assertEquals(
["(", "aA", "(", "bB", ")", "cC", ")"],
self.preprocessor.run(["(aA(bB)cC)"])
)
self.assertEquals(
["(", "aA", "(", "(", "bB", ")", "cC", ")"],
self.preprocessor.run(["(aA(", "(bB)cC)"])
)
self.assertEquals(
["(", "aA", "(", "(", "(", "bB", ")", "cC", ")"],
self.preprocessor.run(["(aA(", "(", "(bB)cC)"])
)
class ExportAsExpressionTest(unittest.TestCase):
def test_success(self):
self.assertXmlExport(
"""
<rule id="location-dummy-rule" score="INFINITY">
<expression attribute="#uname" id="location-dummy-rule-expr"
operation="eq" value="node1"/>
</rule>
""",
"#uname eq node1",
"#uname eq string node1"
)
self.assertXmlExport(
"""
<rule id="location-dummy-rule" score="INFINITY">
<expression attribute="foo" id="location-dummy-rule-expr"
operation="gt" type="version" value="1.2.3"/>
</rule>
""",
"foo gt version 1.2.3",
"foo gt version 1.2.3"
)
self.assertXmlExport(
"""
<rule boolean-op="or" id="complexRule" score="INFINITY">
<rule boolean-op="and" id="complexRule-rule-1" score="0">
<date_expression id="complexRule-rule-1-expr" operation="date_spec">
<date_spec id="complexRule-rule-1-expr-datespec" weekdays="1-5" hours="12-23"/>
</date_expression>
<date_expression id="complexRule-rule-1-expr-1" operation="in_range" start="2014-07-26">
<duration id="complexRule-rule-1-expr-1-duration" months="1"/>
</date_expression>
</rule>
<rule boolean-op="and" id="complexRule-rule" score="0">
<expression attribute="foo" id="complexRule-rule-expr-1" operation="gt" type="version" value="1.2"/>
<expression attribute="#uname" id="complexRule-rule-expr" operation="eq" value="node3 4"/>
</rule>
</rule>
""",
"(date-spec hours=12-23 weekdays=1-5 and date in_range 2014-07-26 to duration months=1) or (foo gt version 1.2 and #uname eq \"node3 4\")",
"(#uname eq string \"node3 4\" and foo gt version 1.2) or (date in_range 2014-07-26 to duration months=1 and date-spec hours=12-23 weekdays=1-5)"
)
def assertXmlExport(self, rule_xml, export, export_normalized):
ac(
export + "\n",
rule.ExportAsExpression().get_string(
xml.dom.minidom.parseString(rule_xml).documentElement,
normalize=False
) + "\n"
)
ac(
export_normalized + "\n",
rule.ExportAsExpression().get_string(
xml.dom.minidom.parseString(rule_xml).documentElement,
normalize=True
) + "\n"
)
class DomRuleAddTest(unittest.TestCase):
def setUp(self):
shutil.copy(empty_cib, temp_cib)
output, returnVal = pcs(temp_cib, "resource create dummy1 Dummy")
assert returnVal == 0 and output == ""
def test_success_xml(self):
self.assertExpressionXml(
["#uname", "eq", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule" score="INFINITY">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["id=myRule", "#uname", "eq", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="myRule" score="INFINITY">
<expression attribute="#uname" id="myRule-expr" operation="eq" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["score=INFINITY", "#uname", "eq", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule" score="INFINITY">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["score=100", "#uname", "eq", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule" score="100">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["score-attribute=pingd", "#uname", "eq", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule" score-attribute="pingd">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["role=master", "#uname", "eq", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule" role="master" score="INFINITY">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["role=slave", "#uname", "eq", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="location-dummy-rule" role="slave" score="INFINITY">
<expression attribute="#uname" id="location-dummy-rule-expr" operation="eq" value="node1"/>
</rule>
</rsc_location>
"""
)
self.assertExpressionXml(
["score=100", "id=myRule", "role=master", "#uname", "eq", "node1"],
"""
<rsc_location id="location-dummy">
<rule id="myRule" role="master" score="100">
<expression attribute="#uname" id="myRule-expr" operation="eq" value="node1"/>
</rule>
</rsc_location>
"""
)
def test_success(self):
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule #uname eq node1"
)
ac(output, "")
self.assertEquals(0, returnVal)
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule id=MyRule score=100 role=master #uname eq node2"
)
ac(output, "")
self.assertEquals(0, returnVal)
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule id=complexRule (#uname eq node3 and foo gt version 1.2) or (date-spec hours=12-23 weekdays=1-5 and date in_range 2014-07-26 to duration months=1)"
)
ac(output, "")
self.assertEquals(0, returnVal)
output, returnVal = pcs(temp_cib, "constraint location show --full")
ac(output, """\
Location Constraints:
Resource: dummy1
Constraint: location-dummy1
Rule: score=INFINITY (id:location-dummy1-rule)
Expression: #uname eq node1 (id:location-dummy1-rule-expr)
Constraint: location-dummy1-1
Rule: score=100 role=master (id:MyRule)
Expression: #uname eq node2 (id:MyRule-expr)
Constraint: location-dummy1-2
Rule: score=INFINITY boolean-op=or (id:complexRule)
Rule: score=0 boolean-op=and (id:complexRule-rule)
Expression: #uname eq node3 (id:complexRule-rule-expr)
Expression: foo gt version 1.2 (id:complexRule-rule-expr-1)
Rule: score=0 boolean-op=and (id:complexRule-rule-1)
Expression: (id:complexRule-rule-1-expr)
Date Spec: hours=12-23 weekdays=1-5 (id:complexRule-rule-1-expr-datespec)
Expression: date in_range 2014-07-26 to duration (id:complexRule-rule-1-expr-1)
Duration: months=1 (id:complexRule-rule-1-expr-1-duration)
""")
self.assertEquals(0, returnVal)
output, returnVal = pcs(temp_cib, "constraint location show")
ac(output, """\
Location Constraints:
Resource: dummy1
Constraint: location-dummy1
Rule: score=INFINITY
Expression: #uname eq node1
Constraint: location-dummy1-1
Rule: score=100 role=master
Expression: #uname eq node2
Constraint: location-dummy1-2
Rule: score=INFINITY boolean-op=or
Rule: score=0 boolean-op=and
Expression: #uname eq node3
Expression: foo gt version 1.2
Rule: score=0 boolean-op=and
Expression:
Date Spec: hours=12-23 weekdays=1-5
Expression: date in_range 2014-07-26 to duration
Duration: months=1
""")
self.assertEquals(0, returnVal)
def test_invalid_score(self):
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule score=pingd defined pingd"
)
ac(
output,
"Warning: invalid score 'pingd', setting score-attribute=pingd "
"instead\n"
)
self.assertEquals(0, returnVal)
output, returnVal = pcs(temp_cib, "constraint location show --full")
ac(output, """\
Location Constraints:
Resource: dummy1
Constraint: location-dummy1
Rule: score-attribute=pingd (id:location-dummy1-rule)
Expression: defined pingd (id:location-dummy1-rule-expr)
""")
self.assertEquals(0, returnVal)
def test_invalid_rule(self):
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule score=100"
)
ac(output, "Error: no rule expression was specified\n")
self.assertEquals(1, returnVal)
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule #uname eq"
)
ac(
output,
"Error: '#uname eq' is not a valid rule expression: unexpected end "
"of rule\n"
)
self.assertEquals(1, returnVal)
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule string #uname eq node1"
)
ac(
output,
"Error: 'string #uname eq node1' is not a valid rule expression: "
"unexpected 'string' before 'eq'\n"
)
self.assertEquals(1, returnVal)
def test_ivalid_options(self):
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule role=foo #uname eq node1"
)
ac(output, "Error: invalid role 'foo', use 'master' or 'slave'\n")
self.assertEquals(1, returnVal)
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule score=100 score-attribute=pingd #uname eq node1"
)
ac(output, "Error: can not specify both score and score-attribute\n")
self.assertEquals(1, returnVal)
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule id=1foo #uname eq node1"
)
ac(
output,
"Error: invalid rule id '1foo', '1' is not a valid first character "
"for a rule id\n"
)
self.assertEquals(1, returnVal)
output, returnVal = pcs(temp_cib, "constraint location show --full")
ac(output, "Location Constraints:\n")
self.assertEquals(0, returnVal)
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule id=MyRule #uname eq node1"
)
ac(output, "")
self.assertEquals(0, returnVal)
output, returnVal = pcs(temp_cib, "constraint location show --full")
ac(output, """\
Location Constraints:
Resource: dummy1
Constraint: location-dummy1
Rule: score=INFINITY (id:MyRule)
Expression: #uname eq node1 (id:MyRule-expr)
""")
self.assertEquals(0, returnVal)
output, returnVal = pcs(
temp_cib,
"constraint location dummy1 rule id=MyRule #uname eq node1"
)
ac(
output,
"Error: id 'MyRule' is already in use, please specify another one\n"
)
self.assertEquals(1, returnVal)
def assertExpressionXml(self, rule_expression, rule_xml):
cib_dom = xml.dom.minidom.parse("empty.xml")
constraints = cib_dom.getElementsByTagName("constraints")[0]
constraint_el = constraints.appendChild(
cib_dom.createElement("rsc_location")
)
constraint_el.setAttribute("id", "location-dummy")
options, rule_argv = rule.parse_argv(rule_expression)
rule.dom_rule_add(constraint_el, options, rule_argv)
ac(
constraint_el.toprettyxml(indent=" "),
rule_xml.lstrip().rstrip(" ")
)
if __name__ == "__main__":
unittest.main()
|
"""
.. module:: poes
:synopsis: A module for reading, writing, and storing poes Data
.. moduleauthor:: AJ, 20130129
*********************
**Module**: gme.sat.poes
*********************
**Classes**:
* :class:`poesRec`
**Functions**:
* :func:`readPoes`
* :func:`readPoesFtp`
* :func:`mapPoesMongo`
* :func:`overlayPoesTed`
"""
from davitpy.gme.base.gmeBase import gmeData
class poesRec(gmeData):
"""a class to represent a record of poes data. Extends :class:`gmeBase.gmeData`. Insight on the class members can be obtained from `the NOAA NGDC site <ftp://satdat.ngdc.noaa.gov/sem/poes/data/readme.txt>`_. Note that Poes data is available from 1998-present day (or whatever the latest NOAA has uploaded is). **The data are the 16-second averages**
**Members**:
* **time** (`datetime <http://tinyurl.com/bl352yx>`_): an object identifying which time these data are for
* **info** (str): information about where the data come from. *Please be courteous and give credit to data providers when credit is due.*
* **dataSet** (str): the name of the data set
* **satnum** (ind): the noaa satellite number
* **sslat** (float): Geographic Latitude of sub-satellite point, degrees
* **sslon** (float): Geographic Longitude of sub-satellite point, degrees
* **folat** (float): Geographic Latitude of foot-of-field-line, degrees
* **folon** (float): Geographic Longitude of foot-of-field-line, degrees
* **lval** (float): L-value
* **mlt** (float): Magnetic local time of foot-of-field-line, degrees
* **pas0** (float): MEPED-0 pitch angle at satellite, degrees
* **pas90** (float): MEPED-90 pitch angle at satellite, degrees
* **mep0e1** (float): MEPED-0 > 30 keV electrons, counts/sec
* **mep0e2** (float): MEPED-0 > 100 keV electrons, counts/sec
* **mep0e3** (float): MEPED-0 > 300 keV electrons, counts/sec
* **mep0p1** (float):MEPED-0 30 keV to 80 keV protons, counts/sec
* **mep0p2** (float): MEPED-0 80 keV to 240 keV protons, counts/sec
* **mep0p3** (float): 240 kev to 800 keV protons, counts/sec
* **mep0p4** (float): MEPED-0 800 keV to 2500 keV protons, counts/sec
* **mep0p5** (float): MEPED-0 2500 keV to 6900 keV protons, counts/sec
* **mep0p6** (float): MEPED-0 > 6900 keV protons, counts/sec,
* **mep90e1** (float): MEPED-90 > 30 keV electrons, counts/sec,
* **mep90e2** (float): MEPED-90 > 100 keV electrons, counts/sec
* **mep90e3** (float): MEPED-90 > 300 keV electrons, counts/sec
* **mep90p1** (float): MEPED-90 30 keV to 80 keV protons, counts/sec
* **mep90p2** (float): MEPED-90 80 keV to 240 keV protons, counts/sec
* **mep90p3** (float): MEPED-90 240 kev to 800 keV protons, counts/sec,
* **mep90p4** (float): MEPED-90 800 keV to 2500 keV protons, counts/sec
* **mep90p5** (float): MEPED-90 2500 keV to 6900 keV protons, counts/sec
* **mep90p6** (float):MEPED-90 > 6900 keV protons, counts/sec
* **mepomp6** (float): MEPED omni-directional > 16 MeV protons, counts/sec
* **mepomp7** (float): MEPED omni-directional > 36 Mev protons, counts/sec
* **mepomp8** (float): MEPED omni-directional > 70 MeV protons, counts/sec
* **mepomp9** (float): MEPED omni-directional >= 140 MeV protons
* **ted** (float): TED, Total Energy Detector Average, ergs/cm2/sec
* **echar** (float): TED characteristic energy of electrons, eV
* **pchar** (float): TED characteristic energy of protons, eV
* **econtr** (float): TED electron contribution, Electron Energy/Total Energy
.. note::
If any of the members have a value of None, this means that they could not be read for that specific time
**Methods**:
* :func:`parseFtp`
**Example**:
::
emptyPoesObj = gme.sat.poesRec()
written by AJ, 20130131
"""
def parseFtp(self,line, header):
"""This method is used to convert a line of poes data read from the NOAA NGDC FTP site into a :class:`poesRec` object.
.. note::
In general, users will not need to worry about this.
**Belongs to**: :class:`poesRec`
**Args**:
* **line** (str): the ASCII line from the FTP server
**Returns**:
* Nothing.
**Example**:
::
myPoesObj.parseFtp(ftpLine)
written by AJ, 20130131
"""
import datetime as dt
#split the line into cols
cols = line.split()
head = header.split()
self.time = dt.datetime(int(cols[0]), int(cols[1]), int(cols[2]), int(cols[3]),int(cols[4]), \
int(float(cols[5])),int(round((float(cols[5])-int(float(cols[5])))*1e6)))
for key in self.__dict__.iterkeys():
if(key == 'dataSet' or key == 'info' or key == 'satnum' or key == 'time'): continue
try: ind = head.index(key)
except Exception,e:
print e
print 'problem setting attribute',key
#check for a good value
if(float(cols[ind]) != -999.): setattr(self,key,float(cols[ind]))
def __init__(self, ftpLine=None, dbDict=None, satnum=None, header=None):
"""the intialization fucntion for a :class:`omniRec` object.
.. note::
In general, users will not need to worry about this.
**Belongs to**: :class:`omniRec`
**Args**:
* [**ftpLine**] (str): an ASCII line from the FTP server. if this is provided, the object is initialized from it. header must be provided in conjunction with this. default=None
* [**header**] (str): the header from the ASCII FTP file. default=None
* [**dbDict**] (dict): a dictionary read from the mongodb. if this is provided, the object is initialized from it. default = None
* [**satnum**] (int): the satellite nuber. default=None
**Returns**:
* Nothing.
**Example**:
::
myPoesObj = poesRec(ftpLine=aftpLine)
written by AJ, 20130131
"""
#note about where data came from
self.dataSet = 'Poes'
self.info = 'These data were downloaded from NASA SPDF. *Please be courteous and give credit to data providers when credit is due.*'
self.satnum = satnum
self.sslat = None
self.sslon = None
self.folat = None
self.folon = None
self.lval = None
self.mlt = None
self.pas0 = None
self.pas90 = None
self.mep0e1 = None
self.mep0e2 = None
self.mep0e3 = None
self.mep0p1 = None
self.mep0p2 = None
self.mep0p3 = None
self.mep0p4 = None
self.mep0p5 = None
self.mep0p6 = None
self.mep90e1 = None
self.mep90e2 = None
self.mep90e3 = None
self.mep90p1 = None
self.mep90p2 = None
self.mep90p3 = None
self.mep90p4 = None
self.mep90p5 = None
self.mep90p6 = None
self.mepomp6 = None
self.mepomp7 = None
self.mepomp8 = None
self.mepomp9 = None
self.ted = None
self.echar = None
self.pchar = None
self.econtr = None
#if we're initializing from an object, do it!
if(ftpLine != None): self.parseFtp(ftpLine,header)
if(dbDict != None): self.parseDb(dbDict)
def readPoes(sTime,eTime=None,satnum=None,folat=None,folon=None,ted=None,echar=None,pchar=None):
"""This function reads poes data. First, it will try to get it from the mongodb, and if it can't find it, it will look on the NOAA NGDC FTP server using :func:`readPoesFtp`. The data are 16-second averages
**Args**:
* **sTime** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the earliest time you want data for
* [**eTime**] (`datetime <http://tinyurl.com/bl352yx>`_ or None): the latest time you want data for. if this is None, end Time will be 1 day after sTime. default = None
* [**satnum**] (int): the satellite you want data for. eg 17 for noaa17. if this is None, data for all satellites will be returned. default = None
* [**satnum**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bx values in the range [a,b] will be returned. default = None
* [**folat**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bx values in the range [a,b] will be returned. default = None
* [**folon**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bye values in the range [a,b] will be returned. default = None
* [**ted**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bze values in the range [a,b] will be returned. default = None
* [**echar**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bym values in the range [a,b] will be returned. default = None
* [**pchar**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bzm values in the range [a,b] will be returned. default = None
**Returns**:
* **poesList** (list or None): if data is found, a list of :class:`poesRec` objects matching the input parameters is returned. If no data is found, None is returned.
**Example**:
::
import datetime as dt
poesList = gme.sat.readPoes(sTime=dt.datetime(2011,1,1),eTime=dt.datetime(2011,6,1),folat=[60,80])
written by AJ, 20130131
"""
import datetime as dt
import davitpy.pydarn.sdio.dbUtils as db
#check all the inputs for validity
assert(isinstance(sTime,dt.datetime)), \
'error, sTime must be a datetime object'
assert(eTime == None or isinstance(eTime,dt.datetime)), \
'error, eTime must be either None or a datetime object'
assert(satnum == None or isinstance(satnum,int)), 'error, satnum must be an int'
var = locals()
for name in ['folat','folon','ted','echar','pchar']:
assert(var[name] == None or (isinstance(var[name],list) and \
isinstance(var[name][0],(int,float)) and isinstance(var[name][1],(int,float)))), \
'error,'+name+' must None or a list of 2 numbers'
if(eTime == None): eTime = sTime+dt.timedelta(days=1)
qryList = []
#if arguments are provided, query for those
qryList.append({'time':{'$gte':sTime}})
if(eTime != None): qryList.append({'time':{'$lte':eTime}})
if(satnum != None): qryList.append({'satnum':satnum})
var = locals()
for name in ['folat','folon','ted','echar','pchar']:
if(var[name] != None):
qryList.append({name:{'$gte':min(var[name])}})
qryList.append({name:{'$lte':max(var[name])}})
#construct the final query definition
qryDict = {'$and': qryList}
#connect to the database
poesData = db.getDataConn(dbName='gme',collName='poes')
#do the query
if(qryList != []): qry = poesData.find(qryDict)
else: qry = poesData.find()
if(qry.count() > 0):
poesList = []
for rec in qry.sort('time'):
poesList.append(poesRec(dbDict=rec))
print '\nreturning a list with',len(poesList),'records of poes data'
return poesList
#if we didn't find anything on the mongodb
else:
print '\ncould not find requested data in the mongodb'
return None
#print 'we will look on the ftp server, but your conditions will be (mostly) ignored'
##read from ftp server
#poesList = readPoesFtp(sTime, eTime)
#if(poesList != None):
#print '\nreturning a list with',len(poesList),'recs of poes data'
#return poesList
#else:
#print '\n no data found on FTP server, returning None...'
#return None
def readPoesFtp(sTime,eTime=None):
"""This function reads poes data from the NOAA NGDC server via anonymous FTP connection.
.. warning::
You should not use this. Use the general function :func:`readPoes` instead.
**Args**:
* **sTime** (`datetime <http://tinyurl.com/bl352yx>`_): the earliest time you want data for
* [**eTime**] (`datetime <http://tinyurl.com/bl352yx>`_ or None): the latest time you want data for. if this is None, eTime will be equal 1 day after sTime. default = None
**Returns**:
* **poesList** (list or None): if data is found, a list of :class:`poesRec` objects matching the input parameters is returned. If no data is found, None is returned.
**Example**:
::
import datetime as dt
poesList = gme.sat.readpoesFtp(dt.datetime(2011,1,1,1,50),eTime=dt.datetime(2011,1,1,10,0))
written by AJ, 20130128
"""
from ftplib import FTP
import datetime as dt
assert(isinstance(sTime,dt.datetime)),'error, sTime must be datetime'
if(eTime == None): eTime=sTime+dt.timedelta(days=1)
assert(isinstance(eTime,dt.datetime)),'error, eTime must be datetime'
assert(eTime >= sTime), 'error, end time greater than start time'
#connect to the server
try: ftp = FTP('satdat.ngdc.noaa.gov')
except Exception,e:
print e
print 'problem connecting to NOAA server'
return None
#login as anonymous
try: l=ftp.login()
except Exception,e:
print e
print 'problem logging in to NOAA server'
return None
myPoes = []
#get the poes data
myTime = dt.datetime(sTime.year,sTime.month,sTime.day)
while(myTime <= eTime):
#go to the data directory
try: ftp.cwd('/sem/poes/data/avg/txt/'+str(myTime.year))
except Exception,e:
print e
print 'error getting to data directory'
return None
#list directory contents
dirlist = ftp.nlst()
for dire in dirlist:
#check for satellite directory
if(dire.find('noaa') == -1): continue
satnum = dire.replace('noaa','')
#chege to file directory
ftp.cwd('/sem/poes/data/avg/txt/'+str(myTime.year)+'/'+dire)
fname = 'poes_n'+satnum+'_'+myTime.strftime("%Y%m%d")+'.txt'
print 'poes: RETR '+fname
#list to hold the lines
lines = []
#get the data
try: ftp.retrlines('RETR '+fname,lines.append)
except Exception,e:
print e
print 'error retrieving',fname
#convert the ascii lines into a list of poesRec objects
#skip first (header) line
for line in lines[1:]:
cols = line.split()
t = dt.datetime(int(cols[0]), int(cols[1]), int(cols[2]), int(cols[3]),int(cols[4]))
if(sTime <= t <= eTime):
myPoes.append(poesRec(ftpLine=line,satnum=int(satnum),header=lines[0]))
#increment myTime
myTime += dt.timedelta(days=1)
if(len(myPoes) > 0): return myPoes
else: return None
def mapPoesMongo(sYear,eYear=None):
"""This function reads poes data from the NOAA NGDC FTP server via anonymous FTP connection and maps it to the mongodb.
.. warning::
In general, nobody except the database admins will need to use this function
**Args**:
* **sYear** (int): the year to begin mapping data
* [**eYear**] (int or None): the end year for mapping data. if this is None, eYear will be sYear
**Returns**:
* Nothing.
**Example**:
::
gme.sat.mapPoesMongo(2004)
written by AJ, 20130131
"""
import davitpy.pydarn.sdio.dbUtils as db
from davitpy import rcParams
import datetime as dt
#check inputs
assert(isinstance(sYear,int)),'error, sYear must be int'
if(eYear == None): eYear=sYear
assert(isinstance(eYear,int)),'error, sYear must be None or int'
assert(eYear >= sYear), 'error, end year greater than start year'
#get data connection
mongoData = db.getDataConn(username=rcParams['DBWRITEUSER'],password=rcParams['DBWRITEPASS'],\
dbAddress=rcParams['SDDB'],dbName='gme',collName='poes')
#set up all of the indices
mongoData.ensure_index('time')
mongoData.ensure_index('satnum')
mongoData.ensure_index('folat')
mongoData.ensure_index('folon')
mongoData.ensure_index('ted')
mongoData.ensure_index('echar')
mongoData.ensure_index('pchar')
#read the poes data from the FTP server
myTime = dt.datetime(sYear,1,1)
while(myTime < dt.datetime(eYear+1,1,1)):
#10 day at a time, to not fill up RAM
templist = readPoesFtp(myTime,myTime+dt.timedelta(days=10))
if(templist == None): continue
for rec in templist:
#check if a duplicate record exists
qry = mongoData.find({'$and':[{'time':rec.time},{'satnum':rec.satnum}]})
print rec.time, rec.satnum
tempRec = rec.toDbDict()
cnt = qry.count()
#if this is a new record, insert it
if(cnt == 0): mongoData.insert(tempRec)
#if this is an existing record, update it
elif(cnt == 1):
print 'foundone!!'
dbDict = qry.next()
temp = dbDict['_id']
dbDict = tempRec
dbDict['_id'] = temp
mongoData.save(dbDict)
else:
print 'strange, there is more than 1 record for',rec.time
del templist
myTime += dt.timedelta(days=10)
def overlayPoesTed( baseMapObj, axisHandle, startTime, endTime = None, coords = 'geo', \
hemi = 1, folat = [45., 90.], satNum = None, param='ted', scMin=-3.,scMax=0.5) :
"""This function overlays POES TED data onto a map object.
**Args**:
* **baseMapObj** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the map object you want data to be overlayed on.
* **axisHandle** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the Axis Handle used.
* **startTime** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the starttime you want data for. If endTime is not given overlays data from satellites with in +/- 45 min of the startTime
* [**endTime**] (`datetime <http://tinyurl.com/bl352yx>`_ or None): the latest time you want data for. if this is None, data from satellites with in +/- 45 min of the startTime is overlayed. default = None
* [**satnum**] (int): the satellite you want data for. eg 17 for noaa17. if this is None, data for all satellites will be returned. default = None
* [**coords**] (str): Coordinates of the map object on which you want data to be overlayed on, 'geo', 'mag', 'mlt'. Default 'geo'
* [**hemi**] (list or None): Hemisphere of the map object on which you want data to be overlayed on. Value is 1 for northern hemisphere and -1 for the southern hemisphere.Default 1
[**folat**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with latitude values in the range [a,b] will be returned. default = None
* [**param**] (str): the name of the poes parameter to be plotted. default='ted'
**Returns**:
POES TED data is overlayed on the map object. If no data is found, None is returned.
**Example**:
::
import datetime as dt
poesList = gme.sat.overlayPoesTed(MapObj, sTime=dt.datetime(2011,3,4,4))
written by Bharat Kunduri, 20130216
"""
import utils
import matplotlib as mp
import datetime
import numpy
import matplotlib.pyplot as plt
import gme.sat.poes as Poes
import math
import models
import matplotlib.cm as cm
from scipy import optimize
#check all the inputs for validity
assert(isinstance(startTime,datetime.datetime)), \
'error, sTime must be a datetime object'
assert(endTime == None or isinstance(endTime,datetime.datetime)), \
'error, eTime must be either None or a datetime object'
var = locals()
assert(var['satNum'] == None or (isinstance(var['satNum'],list) )), \
'error, satNum must None or a list of satellite (integer) numbers'
if satNum != None :
assert( len(satNum) <= 5 ), \
'error, there are only 5 POES satellites in operation (atleast when I wrote this code)'
assert(var['folat'] == None or (isinstance(var['folat'],list) and \
isinstance(var['folat'][0],(int,float)) and isinstance(var['folat'][1],(int,float)))), \
'error, folat must None or a list of 2 numbers'
# Check the hemisphere and get the appropriate folat
folat = [ math.fabs( folat[0] ) * hemi, math.fabs( folat[1] ) * hemi ]
# Check if the endTime is given in which case the user wants a specific time interval to search for
# If not we'll give him the best available passes for the selected start time...
if ( endTime != None ) :
timeRange = numpy.array( [ startTime, endTime ] )
else :
timeRange = None
pltTimeInterval = numpy.array( datetime.timedelta( minutes = 45 ) )
# check if the timeRange is set... if not set the timeRange to +/- pltTimeInterval of the startTime
if timeRange == None:
timeRange = numpy.array( [ startTime - pltTimeInterval, startTime + pltTimeInterval ] )
# SatNums - currently operational POES satellites are 15, 16, 17, 18, 19
if satNum == None:
satNum = [None]
# If any particular satellite number is not chosen by user loop through all the available one's
satNum = numpy.array( satNum ) # I like numpy arrays better that's why I'm converting the satNum list to a numpy array
latPoesAll = [[] for j in range(len(satNum))]
lonPoesAll = [[] for j in range(len(satNum))]
tedPoesAll = [[] for j in range(len(satNum))]
timePoesAll = [[] for j in range(len(satNum))]
lenDataAll = [[] for j in range(len(satNum))]
goodFlg=False
for sN in range(len(satNum)) :
if(satNum[sN] != None):
currPoesList = Poes.readPoes(timeRange[0], eTime = timeRange[1], satnum = int(satNum[sN]), folat = folat)
else:
currPoesList = Poes.readPoes(timeRange[0], eTime = timeRange[1], satnum = satNum[sN], folat = folat)
# Check if the data is loaded...
if currPoesList == None :
print 'No data found'
continue
#return None
else:
goodFlg=True
# Loop through the list and store the data into arrays
lenDataAll.append(len(currPoesList))
for l in currPoesList :
# Store our data in arrays
try:
tedPoesAll[sN].append(math.log10(getattr(l,param)))
if coords == 'mag' or coords == 'mlt':
lat,lon,_ = models.aacgm.aacgmConv(l.folat,l.folon, 0., l.time.year, 0)
latPoesAll[sN].append(lat)
if coords == 'mag':
lonPoesAll[sN].append(lon)
else:
lonPoesAll[sN].append(models.aacgm.mltFromEpoch(utils.timeUtils.datetimeToEpoch(l.time),lon)*360./24.)
else:
latPoesAll[sN].append(l.folat)
lonPoesAll[sN].append(l.folon)
timePoesAll[sN].append(l.time)
except Exception,e:
print e
print 'could not get parameter for time',l.time
if(not goodFlg): return None
latPoesAll = numpy.array( latPoesAll )
lonPoesAll = numpy.array( lonPoesAll )
tedPoesAll = numpy.array( tedPoesAll )
timePoesAll = numpy.array( timePoesAll )
lenDataAll = numpy.array( lenDataAll )
poesTicks = [ -3.0, -2.5, -2.0, -1.5, -1.0, -0.5, 0.0, 0.5 ]
# get the axis of the figure...
ax = axisHandle
for nn in range( len(satNum) ) :
x, y = baseMapObj(lonPoesAll[nn], latPoesAll[nn])
bpltpoes = baseMapObj.scatter(x,y,c=tedPoesAll[nn], vmin=scMin, vmax=scMax, alpha = 0.7, cmap=cm.jet, zorder = 7., edgecolor='none')
timeCurr = timePoesAll[nn]
for aa in range( len(latPoesAll[nn]) ) :
if aa % 10 == 0:
str_curr = str(timeCurr[aa].hour)+':'+str(timeCurr[aa].minute)
ax.annotate( str_curr, xy =( x[aa], y[aa] ), size = 5, zorder = 6. )
#cbar = plt.colorbar(bpltpoes, ticks = poesTicks, orientation='horizontal')
#cbar.ax.set_xticklabels(poesTicks)
#cbar.set_label(r"Total Log Energy Flux [ergs cm$^{-2}$ s$^{-1}$]")
return bpltpoes
def overlayPoesBnd( baseMapObj, axisHandle, startTime, coords = 'geo', hemi = 1, equBnd = True, polBnd = False ) :
"""This function reads POES TED data with in +/- 45min of the given time, fits the auroral oval boundaries and overlays them on a map object. The poleward boundary is not accurate all the times due to lesser number of satellite passes identifying it.
**Args**:
* **baseMapObj** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the map object you want data to be overlayed on.
* **axisHandle** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the Axis Handle used.
* **startTime** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the starttime you want data for. If endTime is not given overlays data from satellites with in +/- 45 min of the startTime
* [**coords**] (list or None): Coordinates of the map object on which you want data to be overlayed on. Default 'geo'
* [**hemi**] (list or None): Hemisphere of the map object on which you want data to be overlayed on. Value is 1 for northern hemisphere and -1 for the southern hemisphere.Default 1
* [**equBnd**] (list or None): If this is True the equatorward auroral oval boundary fit from the TED data is overlayed on the map object. Default True
* [**polBnd**] (list or None): If this is True the poleward auroral oval boundary fit from the TED data is overlayed on the map object. Default False
**Returns**:
POES TED data is overlayed on the map object. If no data is found, None is returned.
**Example**:
::
import datetime as dt
poesList = gme.sat.overlayPoesTed(MapObj, sTime=dt.datetime(2011,3,4,4))
written by Bharat Kunduri, 20130216
"""
import utils
import matplotlib as mp
import datetime
import numpy
import matplotlib.pyplot as plt
import gme.sat.poes as Poes
import math
import matplotlib.cm as cm
from scipy import optimize
import models
#check all the inputs for validity
assert(isinstance(startTime,datetime.datetime)), \
'error, sTime must be a datetime object'
# Check the hemisphere and get the appropriate folat
folat = [ 45. * hemi, 90. * hemi ]
# Get the time range we choose +/- 45 minutes....
pltTimeInterval = numpy.array( datetime.timedelta( minutes = 45 ) )
timeRange = numpy.array( [ startTime - pltTimeInterval, startTime + pltTimeInterval ] )
satNum = [ 15, 16, 17, 18, 19 ]
# We set the TED cut-off value to -0.75,
# From observed cases this appeared to do well...
# though fails sometimes especially during geomagnetically quiet times...
# However this is version 1.0 and there always is room for improvement
equBndCutoffVal = -0.75
# If any particular satellite number is not chosen by user loop through all the available one's
satNum = numpy.array( satNum ) # I like numpy arrays better that's why I'm converting the satNum list to a numpy array
latPoesAll = [[] for j in range(len(satNum))]
lonPoesAll = [[] for j in range(len(satNum))]
tedPoesAll = [[] for j in range(len(satNum))]
timePoesAll = [[] for j in range(len(satNum))]
lenDataAll = [[] for j in range(len(satNum))]
for sN in range( len(satNum) ) :
currPoesList = Poes.readPoes( timeRange[0], eTime = timeRange[1], satnum = int(satNum[sN]), folat = folat )
# Check if the data is loaded...
if currPoesList == None :
print 'No data found'
continue
# Loop through the list and store the data into arrays
lenDataAll.append( len( currPoesList ) )
for l in range( lenDataAll[-1] ) :
# Store our data in arrays if the TED data value is > than the cutoff value
try:
x = math.log10(currPoesList[l].ted)
except:
continue
if x > equBndCutoffVal:
if coords == 'mag' or coords == 'mlt':
lat,lon,_ = models.aacgm.aacgmConv(currPoesList[l].folat,currPoesList[l].folon, 0., currPoesList[l].time.year, 0)
latPoesAll[sN].append(lat)
if coords == 'mag':
lonPoesAll[sN].append(lon)
else:
lonPoesAll[sN].append(models.aacgm.mltFromEpoch(utils.timeUtils.datetimeToEpoch(currPoesList[l].time),lon)*360./24.)
else:
latPoesAll[sN].append(currPoesList[l].folat)
lonPoesAll[sN].append(currPoesList[l].folon)
# latPoesAll[sN].append( currPoesList[l].folat )
# lonPoesAll[sN].append( currPoesList[l].folon )
tedPoesAll[sN].append( math.log10(currPoesList[l].ted) )
timePoesAll[sN].append( currPoesList[l].time )
latPoesAll = numpy.array( latPoesAll )
lonPoesAll = numpy.array( lonPoesAll )
tedPoesAll = numpy.array( tedPoesAll )
timePoesAll = numpy.array( timePoesAll )
lenDataAll = numpy.array( lenDataAll )
# Now to identify the boundaries...
# Also need to check if the boundary is equatorward or poleward..
# When satellite is moving from high-lat to low-lat decrease in flux would mean equatorward boundary
# When satellite is moving from low-lat to high-lat increase in flux would mean equatorward boundary
# that is what we are trying to check here
eqBndLats = []
eqBndLons = []
poBndLats = []
poBndLons = []
for n1 in range( len(satNum) ) :
currSatLats = latPoesAll[n1]
currSatLons = lonPoesAll[n1]
currSatTeds = tedPoesAll[n1]
testLatArrLtoh = []
testLonArrLtoh = []
testLatArrHtol = []
testLonArrHtol = []
testLatArrLtohP = []
testLonArrLtohP = []
testLatArrHtolP = []
testLonArrHtolP = []
for n2 in range( len(currSatLats)-1 ) :
#Check if the satellite is moving form low-lat to high-lat or otherwise
if ( math.fabs( currSatLats[n2] ) < math.fabs( currSatLats[n2+1] ) ) :
if ( currSatTeds[n2] < currSatTeds[n2+1] ) :
testLatArrLtoh.append( currSatLats[n2] )
testLonArrLtoh.append( currSatLons[n2] )
if ( currSatTeds[n2] > currSatTeds[n2+1] ) :
testLatArrLtohP.append( currSatLats[n2] )
testLonArrLtohP.append( currSatLons[n2] )
if ( math.fabs( currSatLats[n2] ) > math.fabs( currSatLats[n2+1] ) ) :
if ( currSatTeds[n2] > currSatTeds[n2+1] ) :
testLatArrHtol.append( currSatLats[n2] )
testLonArrHtol.append( currSatLons[n2] )
if ( currSatTeds[n2] < currSatTeds[n2+1] ) :
testLatArrHtolP.append( currSatLats[n2] )
testLonArrHtolP.append( currSatLons[n2] )
# I do this to find the index of the min lat...
if ( testLatArrLtoh != [] ) :
testLatArrLtoh = numpy.array( testLatArrLtoh )
testLonArrLtoh = numpy.array( testLonArrLtoh )
VarEqLat1 = testLatArrLtoh[ numpy.where( testLatArrLtoh == min(testLatArrLtoh) ) ]
VarEqLon1 = testLonArrLtoh[ numpy.where( testLatArrLtoh == min(testLatArrLtoh) ) ]
eqBndLats.append( VarEqLat1[0] )
eqBndLons.append( VarEqLon1[0] )
if ( testLatArrHtol != [] ) :
testLatArrHtol = numpy.array( testLatArrHtol )
testLonArrHtol = numpy.array( testLonArrHtol )
VarEqLat2 = testLatArrHtol[ numpy.where( testLatArrHtol == min(testLatArrHtol) ) ]
VarEqLon2 = testLonArrHtol[ numpy.where( testLatArrHtol == min(testLatArrHtol) ) ]
eqBndLats.append( VarEqLat2[0] )
eqBndLons.append( VarEqLon2[0] )
if ( testLatArrLtohP != [] ) :
testLatArrLtohP = numpy.array( testLatArrLtohP )
testLonArrLtohP = numpy.array( testLonArrLtohP )
VarEqLatP1 = testLatArrLtohP[ numpy.where( testLatArrLtohP == min(testLatArrLtohP) ) ]
VarEqLonP1 = testLonArrLtohP[ numpy.where( testLatArrLtohP == min(testLatArrLtohP) ) ]
if VarEqLatP1[0] > 64. :
poBndLats.append( VarEqLatP1[0] )
poBndLons.append( VarEqLonP1[0] )
if ( testLatArrHtolP != [] ) :
testLatArrHtolP = numpy.array( testLatArrHtolP )
testLonArrHtolP = numpy.array( testLonArrHtolP )
VarEqLatP2 = testLatArrHtolP[ numpy.where( testLatArrHtolP == min(testLatArrHtolP) ) ]
VarEqLonP2 = testLonArrHtolP[ numpy.where( testLatArrHtolP == min(testLatArrHtolP) ) ]
if VarEqLatP2[0] > 64 :
poBndLats.append( VarEqLatP2[0] )
poBndLons.append( VarEqLonP2[0] )
eqBndLats = numpy.array( eqBndLats )
eqBndLons = numpy.array( eqBndLons )
poBndLats = numpy.array( poBndLats )
poBndLons = numpy.array( poBndLons )
#get the axis Handle used
ax = axisHandle
# Now we do the fitting part...
fitfunc = lambda p, x: p[0] + p[1]*numpy.cos(2*math.pi*(x/360.)+p[2]) # Target function
errfunc = lambda p, x, y: fitfunc(p, x) - y # Distance to the target function
# Initial guess for the parameters
# Equatorward boundary
p0Equ = [ 1., 1., 1.]
p1Equ, successEqu = optimize.leastsq(errfunc, p0Equ[:], args=(eqBndLons, eqBndLats))
if polBnd == True :
p0Pol = [ 1., 1., 1.]
p1Pol, successPol = optimize.leastsq(errfunc, p0Pol[:], args=(poBndLons, poBndLats))
allPlotLons = numpy.linspace(0., 360., 25.)
allPlotLons[-1] = 0.
eqPlotLats = []
if polBnd == True :
poPlotLats = []
for xx in allPlotLons :
if equBnd == True :
eqPlotLats.append( p1Equ[0] + p1Equ[1]*numpy.cos(2*math.pi*(xx/360.)+p1Equ[2] ) )
if polBnd == True :
poPlotLats.append( p1Pol[0] + p1Pol[1]*numpy.cos(2*math.pi*(xx/360.)+p1Pol[2] ) )
xEqu, yEqu = baseMapObj(allPlotLons, eqPlotLats)
bpltpoes = baseMapObj.plot( xEqu,yEqu, zorder = 7., color = 'b' )
if polBnd == True :
xPol, yPol = baseMapObj(allPlotLons, poPlotLats)
bpltpoes = baseMapObj.plot( xPol,yPol, zorder = 7., color = 'r' )
|
"""
This file is part of Commix Project (http://commixproject.com).
Copyright (c) 2014-2017 Anastasios Stasinopoulos (@ancst).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
For more see the file 'readme/COPYING' for copying permission.
"""
import os
import sys
import time
import base64
import sqlite3
import urllib2
from src.utils import menu
from src.utils import settings
from src.thirdparty.colorama import Fore, Back, Style, init
"""
Session handler via SQLite3 db.
"""
no_such_table = False
"""
Generate table name for SQLite3 db.
"""
def table_name(url):
host = url.split('//', 1)[1].split('/', 1)[0]
table_name = "session_" + host.replace(".","_").replace(":","_").replace("-","_")
return table_name
"""
Flush session.
"""
def flush(url):
info_msg = "Flushing the stored session from the session file... "
sys.stdout.write(settings.print_info_msg(info_msg))
sys.stdout.flush()
try:
conn = sqlite3.connect(settings.SESSION_FILE)
tables = list(conn.execute("SELECT name FROM sqlite_master WHERE type is 'table'"))
conn.executescript(';'.join(["DROP TABLE IF EXISTS %s" %i for i in tables]))
conn.commit()
conn.close()
print "[ " + Fore.GREEN + "SUCCEED" + Style.RESET_ALL + " ]"
except sqlite3.OperationalError, err_msg:
print "[ " + Fore.RED + "FAILED" + Style.RESET_ALL + " ]"
err_msg = "Unable to flush the session file." + str(err_msg).title()
print settings.print_critical_msg(err_msg)
"""
Clear injection point records
except latest for every technique.
"""
def clear(url):
try:
if no_such_table:
conn = sqlite3.connect(settings.SESSION_FILE)
conn.execute("DELETE FROM " + table_name(url) + "_ip WHERE "\
"id NOT IN (SELECT MAX(id) FROM " + \
table_name(url) + "_ip GROUP BY technique);")
conn.commit()
conn.close()
except sqlite3.OperationalError, err_msg:
print settings.print_critical_msg(err_msg)
except:
settings.LOAD_SESSION = False
return False
"""
Import successful injection points to session file.
"""
def injection_point_importation(url, technique, injection_type, separator, shell, vuln_parameter, prefix, suffix, TAG, alter_shell, payload, http_request_method, url_time_response, timesec, how_long, output_length, is_vulnerable):
try:
conn = sqlite3.connect(settings.SESSION_FILE)
conn.execute("CREATE TABLE IF NOT EXISTS " + table_name(url) + "_ip" + \
"(id INTEGER PRIMARY KEY, url VARCHAR, technique VARCHAR, injection_type VARCHAR, separator VARCHAR," \
"shell VARCHAR, vuln_parameter VARCHAR, prefix VARCHAR, suffix VARCHAR, "\
"TAG VARCHAR, alter_shell VARCHAR, payload VARCHAR, http_header VARCHAR, http_request_method VARCHAR, url_time_response INTEGER, "\
"timesec INTEGER, how_long INTEGER, output_length INTEGER, is_vulnerable VARCHAR);")
conn.execute("INSERT INTO " + table_name(url) + "_ip(url, technique, injection_type, separator, "\
"shell, vuln_parameter, prefix, suffix, TAG, alter_shell, payload, http_header, http_request_method, "\
"url_time_response, timesec, how_long, output_length, is_vulnerable) "\
"VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", \
(str(url), str(technique), str(injection_type), \
str(separator), str(shell), str(vuln_parameter), str(prefix), str(suffix), \
str(TAG), str(alter_shell), str(payload), str(settings.HTTP_HEADER), str(http_request_method), \
int(url_time_response), int(timesec), int(how_long), \
int(output_length), str(is_vulnerable)))
conn.commit()
conn.close()
if settings.INJECTION_CHECKER == False:
settings.INJECTION_CHECKER = True
except sqlite3.OperationalError, err_msg:
err_msg = str(err_msg)[:1].upper() + str(err_msg)[1:] + "."
err_msg += " You are advised to rerun with switch '--flush-session'."
print settings.print_critical_msg(err_msg)
sys.exit(0)
except sqlite3.DatabaseError, err_msg:
err_msg = "An error occurred while accessing session file ('"
err_msg += settings.SESSION_FILE + "'). "
err_msg += "If the problem persists use the '--flush-session' option."
print "\n" + settings.print_critical_msg(err_msg)
sys.exit(0)
"""
Export successful applied techniques from session file.
"""
def applied_techniques(url, http_request_method):
try:
conn = sqlite3.connect(settings.SESSION_FILE)
if settings.TESTABLE_PARAMETER:
applied_techniques = conn.execute("SELECT technique FROM " + table_name(url) + "_ip WHERE "\
"url = '" + url + "' AND "\
"vuln_parameter = '" + settings.TESTABLE_PARAMETER + "' AND "\
"http_request_method = '" + http_request_method + "' "\
"ORDER BY id DESC ;")
else:
applied_techniques = conn.execute("SELECT technique FROM " + table_name(url) + "_ip WHERE "\
"url = '" + url + "' AND "\
"vuln_parameter = '" + settings.INJECT_TAG + "' AND "\
"http_request_method = '" + http_request_method + "' "\
"ORDER BY id DESC ;")
values = []
for session in applied_techniques:
if "tempfile" in session[0][:8]:
settings.TEMPFILE_BASED_STATE = True
session = session[0][4:]
elif "dynamic" in session[0][:7]:
settings.EVAL_BASED_STATE = True
session = session[0][13:]
values += session[0][:1]
applied_techniques = ''.join(list(set(values)))
return applied_techniques
except sqlite3.OperationalError, err_msg:
#print settings.print_critical_msg(err_msg)
settings.LOAD_SESSION = False
return False
except:
settings.LOAD_SESSION = False
return False
"""
Export successful applied techniques from session file.
"""
def applied_levels(url, http_request_method):
try:
conn = sqlite3.connect(settings.SESSION_FILE)
if settings.TESTABLE_PARAMETER:
applied_level = conn.execute("SELECT is_vulnerable FROM " + table_name(url) + "_ip WHERE "\
"url = '" + url + "' AND "\
"vuln_parameter = '" + settings.TESTABLE_PARAMETER + "' AND "\
"http_request_method = '" + http_request_method + "' "\
"ORDER BY id DESC;")
else:
applied_level = conn.execute("SELECT is_vulnerable FROM " + table_name(url) + "_ip WHERE "\
"url = '" + url + "' AND "\
"vuln_parameter = '" + settings.INJECT_TAG + "' AND "\
"http_request_method = '" + http_request_method + "' "\
"ORDER BY id DESC;")
for session in applied_level:
return session[0]
except sqlite3.OperationalError, err_msg:
#print settings.print_critical_msg(err_msg)
settings.LOAD_SESSION = False
return False
except:
settings.LOAD_SESSION = False
return False
"""
Export successful injection points from session file.
"""
def injection_point_exportation(url, http_request_method):
try:
if not menu.options.flush_session:
conn = sqlite3.connect(settings.SESSION_FILE)
result = conn.execute("SELECT * FROM sqlite_master WHERE name = '" + \
table_name(url) + "_ip' AND type = 'table';")
if result:
if menu.options.tech[:1] == "c":
select_injection_type = "R"
elif menu.options.tech[:1] == "e":
settings.EVAL_BASED_STATE = True
select_injection_type = "R"
elif menu.options.tech[:1] == "t":
select_injection_type = "B"
else:
select_injection_type = "S"
if settings.TEMPFILE_BASED_STATE and select_injection_type == "S":
check_injection_technique = "t"
elif settings.EVAL_BASED_STATE and select_injection_type == "R":
check_injection_technique = "d"
else:
check_injection_technique = menu.options.tech[:1]
if settings.TESTABLE_PARAMETER:
cursor = conn.execute("SELECT * FROM " + table_name(url) + "_ip WHERE "\
"url = '" + url + "' AND "\
"injection_type like '" + select_injection_type + "%' AND "\
"technique like '" + check_injection_technique + "%' AND "\
"vuln_parameter = '" + settings.TESTABLE_PARAMETER + "' AND "\
"http_request_method = '" + http_request_method + "' "\
"ORDER BY id DESC limit 1;")
else:
cursor = conn.execute("SELECT * FROM " + table_name(url) + "_ip WHERE "\
"url = '" + url + "' AND "\
"injection_type like '" + select_injection_type + "%' AND "\
"technique like '" + check_injection_technique + "%' AND "\
"http_header = '" + settings.HTTP_HEADER + "' AND "\
"http_request_method = '" + http_request_method + "' "\
"ORDER BY id DESC limit 1;")
for session in cursor:
url = session[1]
technique = session[2]
injection_type = session[3]
separator = session[4]
shell = session[5]
vuln_parameter = session[6]
prefix = session[7]
suffix = session[8]
TAG = session[9]
alter_shell = session[10]
payload = session[11]
http_request_method = session[13]
url_time_response = session[14]
timesec = session[15]
how_long = session[16]
output_length = session[17]
is_vulnerable = session[18]
return url, technique, injection_type, separator, shell, vuln_parameter, prefix, suffix, TAG, alter_shell, payload, http_request_method, url_time_response, timesec, how_long, output_length, is_vulnerable
else:
no_such_table = True
pass
except sqlite3.OperationalError, err_msg:
#print settings.print_critical_msg(err_msg)
settings.LOAD_SESSION = False
return False
except:
settings.LOAD_SESSION = False
return False
"""
Notification about session.
"""
def notification(url, technique, injection_type):
try:
if settings.LOAD_SESSION == True:
success_msg = "A previously stored session has been held against that host."
print settings.print_success_msg(success_msg)
while True:
if not menu.options.batch:
question_msg = "Do you want to resume to the "
question_msg += "(" + injection_type.split(" ")[0] + ") "
question_msg += technique.rsplit(' ', 2)[0]
question_msg += " injection point? [Y/n] > "
sys.stdout.write(settings.print_question_msg(question_msg))
settings.LOAD_SESSION = sys.stdin.readline().replace("\n","").lower()
else:
settings.LOAD_SESSION = ""
if len(settings.LOAD_SESSION) == 0:
settings.LOAD_SESSION = "y"
if settings.LOAD_SESSION in settings.CHOICE_YES:
return True
elif settings.LOAD_SESSION in settings.CHOICE_NO:
settings.LOAD_SESSION = False
if technique[:1] != "c":
while True:
question_msg = "Which technique do you want to re-evaluate? [(C)urrent/(a)ll/(n)one] > "
sys.stdout.write(settings.print_question_msg(question_msg))
proceed_option = sys.stdin.readline().replace("\n","").lower()
if len(proceed_option) == 0:
proceed_option = "c"
if proceed_option.lower() in settings.CHOICE_PROCEED :
if proceed_option.lower() == "a":
settings.RETEST = True
break
elif proceed_option.lower() == "c" :
settings.RETEST = False
break
elif proceed_option.lower() == "n":
raise SystemExit()
else:
pass
else:
err_msg = "'" + proceed_option + "' is not a valid answer."
print settings.print_error_msg(err_msg)
pass
if settings.SESSION_APPLIED_TECHNIQUES:
menu.options.tech = ''.join(settings.AVAILABLE_TECHNIQUES)
return False
elif settings.LOAD_SESSION in settings.CHOICE_QUIT:
raise SystemExit()
else:
err_msg = "'" + settings.LOAD_SESSION + "' is not a valid answer."
print settings.print_error_msg(err_msg)
pass
except sqlite3.OperationalError, err_msg:
print settings.print_critical_msg(err_msg)
"""
Check for specific stored parameter.
"""
def check_stored_parameter(url, http_request_method):
if injection_point_exportation(url, http_request_method):
if injection_point_exportation(url, http_request_method)[16] == str(menu.options.level):
# Check for stored alternative shell
if injection_point_exportation(url, http_request_method)[9] != "":
menu.options.alter_shell = injection_point_exportation(url, http_request_method)[9]
return True
else:
return False
else:
return False
"""
Import successful command execution outputs to session file.
"""
def store_cmd(url, cmd, shell, vuln_parameter):
try:
conn = sqlite3.connect(settings.SESSION_FILE)
conn.execute("CREATE TABLE IF NOT EXISTS " + table_name(url) + "_ir" + \
"(cmd VARCHAR, output VARCHAR, vuln_parameter VARCHAR);")
if settings.TESTABLE_PARAMETER:
conn.execute("INSERT INTO " + table_name(url) + "_ir(cmd, output, vuln_parameter) "\
"VALUES(?,?,?)", \
(str(base64.b64encode(cmd)), str(base64.b64encode(shell)), str(vuln_parameter)))
else:
conn.execute("INSERT INTO " + table_name(url) + "_ir(cmd, output, vuln_parameter) "\
"VALUES(?,?,?)", \
(str(base64.b64encode(cmd)), str(base64.b64encode(shell)), str(settings.HTTP_HEADER)))
conn.commit()
conn.close()
except sqlite3.OperationalError, err_msg:
print settings.print_critical_msg(err_msg)
except TypeError, err_msg:
pass
"""
Export successful command execution outputs from session file.
"""
def export_stored_cmd(url, cmd, vuln_parameter):
try:
if not menu.options.flush_session:
conn = sqlite3.connect(settings.SESSION_FILE)
output = None
conn = sqlite3.connect(settings.SESSION_FILE)
if settings.TESTABLE_PARAMETER:
cursor = conn.execute("SELECT output FROM " + table_name(url) + \
"_ir WHERE cmd='" + base64.b64encode(cmd) + "' AND "\
"vuln_parameter= '" + vuln_parameter + "';").fetchall()
else:
cursor = conn.execute("SELECT output FROM " + table_name(url) + \
"_ir WHERE cmd='" + base64.b64encode(cmd) + "' AND "\
"vuln_parameter= '" + settings.HTTP_HEADER + "';").fetchall()
conn.commit()
conn.close()
for session in cursor:
output = base64.b64decode(session[0])
return output
else:
no_such_table = True
pass
except sqlite3.OperationalError, err_msg:
pass
"""
Import valid credentials to session file.
"""
def import_valid_credentials(url, authentication_type, admin_panel, username, password):
try:
conn = sqlite3.connect(settings.SESSION_FILE)
conn.execute("CREATE TABLE IF NOT EXISTS " + table_name(url) + "_creds" + \
"(id INTEGER PRIMARY KEY, url VARCHAR, authentication_type VARCHAR, admin_panel VARCHAR, "\
"username VARCHAR, password VARCHAR);")
conn.execute("INSERT INTO " + table_name(url) + "_creds(url, authentication_type, "\
"admin_panel, username, password) VALUES(?,?,?,?,?)", \
(str(url), str(authentication_type), str(admin_panel), \
str(username), str(password)))
conn.commit()
conn.close()
except sqlite3.OperationalError, err_msg:
print settings.print_critical_msg(err_msg)
except sqlite3.DatabaseError, err_msg:
err_msg = "An error occurred while accessing session file ('"
err_msg += settings.SESSION_FILE + "'). "
err_msg += "If the problem persists use the '--flush-session' option."
print "\n" + settings.print_critical_msg(err_msg)
sys.exit(0)
"""
Export valid credentials from session file.
"""
def export_valid_credentials(url, authentication_type):
try:
if not menu.options.flush_session:
conn = sqlite3.connect(settings.SESSION_FILE)
output = None
conn = sqlite3.connect(settings.SESSION_FILE)
cursor = conn.execute("SELECT username, password FROM " + table_name(url) + \
"_creds WHERE url='" + url + "' AND "\
"authentication_type= '" + authentication_type + "';").fetchall()
cursor = ":".join(cursor[0])
return cursor
else:
no_such_table = True
pass
except sqlite3.OperationalError, err_msg:
pass
|
import bpy
from bpy.props import *
from PyHSPlasma import *
from .base import PlasmaModifierProperties
from ..prop_world import game_versions
from ...exporter import ExportError
from ... import idprops
class PlasmaVersionedNodeTree(idprops.IDPropMixin, bpy.types.PropertyGroup):
name = StringProperty(name="Name")
version = EnumProperty(name="Version",
description="Plasma versions this node tree exports under",
items=game_versions,
options={"ENUM_FLAG"},
default=set(list(zip(*game_versions))[0]))
node_tree = PointerProperty(name="Node Tree",
description="Node Tree to export",
type=bpy.types.NodeTree)
node_name = StringProperty(name="Node Ref",
description="Attach a reference to this node")
@classmethod
def _idprop_mapping(cls):
return {"node_tree": "node_tree_name"}
def _idprop_sources(self):
return {"node_tree_name": bpy.data.node_groups}
class PlasmaAdvancedLogic(PlasmaModifierProperties):
pl_id = "advanced_logic"
bl_category = "Logic"
bl_label = "Advanced"
bl_description = "Plasma Logic Nodes"
bl_icon = "NODETREE"
logic_groups = CollectionProperty(type=PlasmaVersionedNodeTree)
active_group_index = IntProperty(options={"HIDDEN"})
def export(self, exporter, bo, so):
version = exporter.mgr.getVer()
for i in self.logic_groups:
our_versions = [globals()[j] for j in i.version]
if version in our_versions:
if i.node_tree is None:
raise ExportError("'{}': Advanced Logic is missing a node tree for '{}'".format(bo.name, i.version))
# If node_name is defined, then we're only adding a reference. We will make sure that
# the entire node tree is exported once before the post_export step, however.
if i.node_name:
exporter.want_node_trees[i.node_tree.name] = (bo, so)
node = i.node_tree.nodes.get(i.node_name, None)
if node is None:
raise ExportError("Node '{}' does not exist in '{}'".format(i.node_name, i.node_tree.name))
# We are going to assume get_key will do the adding correctly. Single modifiers
# should fetch the appropriate SceneObject before doing anything, so this will
# be a no-op in that case. Multi modifiers should accept any SceneObject, however
node.get_key(exporter, so)
else:
exporter.node_trees_exported.add(i.node_tree.name)
i.node_tree.export(exporter, bo, so)
def harvest_actors(self):
actors = set()
for i in self.logic_groups:
actors.update(i.node_tree.harvest_actors())
return actors
class PlasmaSpawnPoint(PlasmaModifierProperties):
pl_id = "spawnpoint"
bl_category = "Logic"
bl_label = "Spawn Point"
bl_description = "Point at which avatars link into the Age"
def export(self, exporter, bo, so):
# Not much to this modifier... It's basically a flag that tells the engine, "hey, this is a
# place the avatar can show up." Nice to have a simple one to get started with.
spawn = exporter.mgr.add_object(pl=plSpawnModifier, so=so, name=self.key_name)
@property
def requires_actor(self):
return True
class PlasmaMaintainersMarker(PlasmaModifierProperties):
pl_id = "maintainersmarker"
bl_category = "Logic"
bl_label = "Maintainer's Marker"
bl_description = "Designates an object as the D'ni coordinate origin point of the Age."
bl_icon = "OUTLINER_DATA_EMPTY"
calibration = EnumProperty(name="Calibration",
description="State of repair for the Marker",
items=[
("kBroken", "Broken",
"A marker which reports scrambled coordinates to the KI."),
("kRepaired", "Repaired",
"A marker which reports blank coordinates to the KI."),
("kCalibrated", "Calibrated",
"A marker which reports accurate coordinates to the KI.")
])
def export(self, exporter, bo, so):
maintmark = exporter.mgr.add_object(pl=plMaintainersMarkerModifier, so=so, name=self.key_name)
maintmark.calibration = getattr(plMaintainersMarkerModifier, self.calibration)
@property
def requires_actor(self):
return True
|
import os.path
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.auth import get_user_model
from avatar.settings import AVATAR_DEFAULT_URL, AVATAR_MAX_AVATARS_PER_USER
from avatar.util import get_primary_avatar
from avatar.models import Avatar
try:
from PIL import Image
dir(Image) # Placate PyFlakes
except ImportError:
import Image
def upload_helper(o, filename):
f = open(os.path.join(o.testdatapath, filename), "rb")
response = o.client.post(reverse('avatar_add'), {
'avatar': f,
}, follow=True)
f.close()
return response
class AvatarUploadTests(TestCase):
def setUp(self):
self.testdatapath = os.path.join(os.path.dirname(__file__), "testdata")
self.user = get_user_model().objects.create_user('test', 'lennon@thebeatles.com', 'testpassword')
self.user.save()
self.client.login(username='test', password='testpassword')
Image.init()
def testNonImageUpload(self):
response = upload_helper(self, "nonimagefile")
self.failUnlessEqual(response.status_code, 200)
self.failIfEqual(response.context['upload_avatar_form'].errors, {})
def testNormalImageUpload(self):
response = upload_helper(self, "test.png")
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 1)
self.failUnlessEqual(response.context['upload_avatar_form'].errors, {})
avatar = get_primary_avatar(self.user)
self.failIfEqual(avatar, None)
def testImageWithoutExtension(self):
# use with AVATAR_ALLOWED_FILE_EXTS = ('.jpg', '.png')
response = upload_helper(self, "imagefilewithoutext")
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 0) # Redirect only if it worked
self.failIfEqual(response.context['upload_avatar_form'].errors, {})
def testImageWithWrongExtension(self):
# use with AVATAR_ALLOWED_FILE_EXTS = ('.jpg', '.png')
response = upload_helper(self, "imagefilewithwrongext.ogg")
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 0) # Redirect only if it worked
self.failIfEqual(response.context['upload_avatar_form'].errors, {})
def testImageTooBig(self):
# use with AVATAR_MAX_SIZE = 1024 * 1024
response = upload_helper(self, "testbig.png")
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 0) # Redirect only if it worked
self.failIfEqual(response.context['upload_avatar_form'].errors, {})
def testDefaultUrl(self):
response = self.client.get(reverse('avatar_render_primary', kwargs={
'user': self.user.username,
'size': 80,
}))
loc = response['Location']
base_url = getattr(settings, 'STATIC_URL', None)
if not base_url:
base_url = settings.MEDIA_URL
self.assertTrue(base_url in loc)
self.assertTrue(loc.endswith(AVATAR_DEFAULT_URL))
def testNonExistingUser(self):
a = get_primary_avatar("nonexistinguser")
self.failUnlessEqual(a, None)
def testThereCanBeOnlyOnePrimaryAvatar(self):
for i in range(1, 10):
self.testNormalImageUpload()
count = Avatar.objects.filter(user=self.user, primary=True).count()
self.failUnlessEqual(count, 1)
def testDeleteAvatar(self):
self.testNormalImageUpload()
avatar = Avatar.objects.filter(user=self.user)
self.failUnlessEqual(len(avatar), 1)
response = self.client.post(reverse('avatar_delete'), {
'choices': [avatar[0].id],
}, follow=True)
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 1)
count = Avatar.objects.filter(user=self.user).count()
self.failUnlessEqual(count, 0)
def testDeletePrimaryAvatarAndNewPrimary(self):
self.testThereCanBeOnlyOnePrimaryAvatar()
primary = get_primary_avatar(self.user)
oid = primary.id
response = self.client.post(reverse('avatar_delete'), {
'choices': [oid],
})
primaries = Avatar.objects.filter(user=self.user, primary=True)
self.failUnlessEqual(len(primaries), 1)
self.failIfEqual(oid, primaries[0].id)
avatars = Avatar.objects.filter(user=self.user)
self.failUnlessEqual(avatars[0].id, primaries[0].id)
def testTooManyAvatars(self):
for i in range(0, AVATAR_MAX_AVATARS_PER_USER):
self.testNormalImageUpload()
count_before = Avatar.objects.filter(user=self.user).count()
response = upload_helper(self, "test.png")
count_after = Avatar.objects.filter(user=self.user).count()
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 0) # Redirect only if it worked
self.failIfEqual(response.context['upload_avatar_form'].errors, {})
self.failUnlessEqual(count_before, count_after)
# def testAvatarOrder
# def testReplaceAvatarWhenMaxIsOne
# def testHashFileName
# def testHashUserName
# def testChangePrimaryAvatar
# def testDeleteThumbnailAndRecreation
# def testAutomaticThumbnailCreation
|
"""Output a CSV file that can be imported to Petra"""
import os
import sys
import calendar
import csv
from csv_dict import CSVDict, CSVKeyMissing
def split_csv(table_file='Tabell.csv'):
"""Split account, cost center and project into three tables"""
account = []
cost_center = []
project = []
with open(table_file, newline='') as tablefile:
tablereader = csv.reader(tablefile, delimiter=';')
for row in tablereader:
if row[0] != '' and row[1] != '':
account.append([row[0], row[1]])
if row[3] != '' and row[4] != '':
cost_center.append([row[3], row[4]])
if row[6] != '' and row[7] != '':
project.append([row[6], row[7]])
with open('Konto.csv', 'w', newline='') as accountfile:
accountwriter = csv.writer(accountfile, delimiter=';')
for row in account:
accountwriter.writerow(row)
with open('Costcenter.csv', 'w', newline='') as ccfile:
ccwriter = csv.writer(ccfile, delimiter=';')
for row in cost_center:
ccwriter.writerow(row)
with open('Projekt.csv', 'w', newline='') as projectfile:
projectwriter = csv.writer(projectfile, delimiter=';')
for row in project:
projectwriter.writerow(row)
def _parse_trans_objects(trans):
"""
Handle an object list of a transaction.
The object list contains a cost center and project, formatted like so
['1', 'K0000', '6', 'P-00000000'].
Cost center (resultatenhet) is preceeded by a '1' and project by a '6', but the order
of the two could be reversed. Cost center always begins with 'K' and
project with 'P-'. The object list could also be empty.
Returns a tuple (cost_center, project), where any of the two could be
None in case the information is missing from the object list.
"""
cost_center = project = None
trans_it = iter(trans)
for idx in trans_it:
obj = next(trans_it)
if idx == '1' and obj.startswith('K'):
cost_center = obj
elif idx == '6' and obj.startswith('P-'):
project = obj
return (cost_center, project)
class PetraOutput:
"""Form an output file based on an SieData object and translation table"""
def __init__(self, sie_data, account_file, cost_center_file, project_file,
default_petra_cc='3200'):
self.sie_data = sie_data
self.default_petra_cc = default_petra_cc
# self.parse_tables(account_file, cost_center_file, project_file)
self.account = CSVDict(account_file)
self.cost_center = CSVDict(cost_center_file)
self.project = CSVDict(project_file)
self.table = []
self.ver_month = None
def populate_output_table(self):
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
# pylint: disable=invalid-name
"""Extract interesting informatin from the Sie data and form output"""
header = ['', 'CC', 'Account', 'Narrative', 'Reference', 'Date', 'Dt',
'Ct']
self.table.append(header)
program = self.sie_data.get_data('#PROGRAM')[0].data[0].split()[0]
verifications = self.sie_data.get_data('#VER')
ver_date = next(v.verdatum for v in verifications if v.verdatum.has_date)
self.ver_month = ver_date.format("%Y-%m")
description = "Imported from {} {}".format(program, self.ver_month)
checksum = format(sum(ver.sum_debit() for ver in verifications),
'.2f').rstrip('0').rstrip('.').replace('.',',')
day = calendar.monthrange(ver_date.year, ver_date.month)[1]
last_date_month = "{}/{:02}/{}".format(day, ver_date.month, ver_date.year)
self.table.append(['B', description, checksum, last_date_month, '', '', '',
''])
for ver in verifications:
if not ver.in_balance():
raise Exception('Inte i balans:', ver)
"""
# Contains 'Swetzén'
if ver.serie == 'A' and ver.vernr == '170071':
print(ver)
# Contains stange characters
if ver.serie == 'C' and ver.vernr == '170058':
print(ver)
# CC with 'XXXX'
if ver.serie == 'C' and ver.vernr == '170064':
print(ver)
# Rounding error?
if ver.serie == 'C' and ver.vernr == '170067':
print(ver)
"""
ref = "Visma Ver {}{}".format(ver.serie, ver.vernr)
text = "{} - {}".format(ref, ver.vertext)
date = ver.verdatum.format("%d/%m/%Y")
self.table.append(['J', text, 'GL', 'STD', 'SEK', '1', date, ''])
narr = ver.vertext # Default
for trans in ver.trans_list:
(visma_cc, visma_proj) = _parse_trans_objects(trans.objekt)
if not visma_proj or visma_proj == 'P-32000000': # Use visma_cc instead
if not visma_cc: # Use default
cc = self.default_petra_cc
else:
cc = self.cost_center[str(visma_cc)]['P_CC']
else:
cc = self.project[str(visma_proj)]['P_CC']
acct = self.account[str(trans.kontonr)]['P_Acct']
if trans.transtext and trans.kvantitet:
kvantitet = format(trans.kvantitet,
'.2f').rstrip('0').rstrip('.').replace('.',',')
narr = "{} {}".format(trans.transtext, kvantitet)
elif trans.transtext:
narr = trans.transtext
dt = trans.debit
ct = trans.credit
self.table.append(['T', cc, acct, narr, ref, date, dt, ct])
def print_output(self):
"""Print csv output to stdout"""
print("\n".join(','.join(str(r) for r in row) for row in self.table))
def write_output(self, filename=None, overwrite=False):
"""Write csv to file, abort if it already exists"""
writemode = 'w' if overwrite else 'x'
try:
for encoding in ['utf_8']:
if not filename:
filename = 'CSV/PYTHON/VtP_' + self.ver_month + encoding + '.csv'
try:
with open(filename, writemode, newline='', encoding=encoding) as csvfile:
csvwriter = csv.writer(csvfile, delimiter=';')
csvwriter.writerows(self.table)
# print("Encoding with ", encoding, "successful!")
except UnicodeEncodeError as err:
print("Encoding failed: ", err)
os.remove(filename)
except FileExistsError:
sys.exit("Kan inte skriva " + filename + ", filen finns redan.")
|
"""
José Vicente Pérez
Granada University (Spain)
March, 2017
Testing suite for profiler.py
Last modified: 19 June 2017
"""
import time
import profiler as p
import praster as pr
import numpy as np
import matplotlib.pyplot as plt
print("Tests for TProfiler methods")
def test01():
"""
Creates a TProfiler from an array with profile_data
Test for get_x, get_y
"""
inicio = time.time()
print("=" * 40)
print("Test 01 para TProfiler")
print("Testing functions get_x(), get_y()")
print("Test in progress...")
# Test parameters
pf_data = np.load("data/in/darro_pfdata.npy")
dem = "data/in/darro25.tif"
demraster = pr.open_raster(dem)
srs = demraster.proj
cellsize = demraster.cellsize
# Creates the profile
perfil = p.TProfile(pf_data, cellsize, srs=srs)
# Test 01 get and print x and y arrays
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
xi1 = perfil.get_x(True)
yi1 = perfil.get_y(True)
xi2 = perfil.get_x(False)
yi2 = perfil.get_y(False)
ax1.plot(xi1, yi1)
ax2.plot(xi2, yi2)
ax1.set_title("head = True")
ax2.set_title("head = False")
fig.tight_layout()
plt.show()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
def test02():
"""
Creates a TProfiler from an array with profile_data
Test for get_l, get_z
"""
inicio = time.time()
print("=" * 40)
print("Test 02 para TProfiler")
print("Testing functions get_l(), get_z()")
print("Test in progress...")
# Test parameters
pf_data = np.load("data/in/darro_pfdata.npy")
dem = "data/in/darro25.tif"
demraster = pr.open_raster(dem)
srs = demraster.proj
cellsize = demraster.cellsize
# Creates the profile
perfil = p.TProfile(pf_data, cellsize, srs=srs)
# Test 01 get and print x and y arrays
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
li1 = perfil.get_l(True)
zi1 = perfil.get_z(True)
ax1.plot(li1, zi1)
ax1.set_title("head = True")
li2 = perfil.get_l(False)
zi2 = perfil.get_z(False)
ax2.plot(li2, zi2)
ax2.set_title("head = False")
zi3 = perfil.get_z(True, True)
ax3.plot(li1, zi3)
ax3.set_title("Relative elevations, head = True")
zi4 = perfil.get_z(False, True)
ax4.plot(li2, zi4)
ax4.set_title("Relative elevations, head = False")
fig.tight_layout()
plt.show()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
def test03():
"""
Creates a TProfiler from an array with profile_data
Test for raw_elevations and smooth
"""
inicio = time.time()
print("=" * 40)
print("Test 03 para TProfiler")
print("Testing functions smooth() and get_raw_z()")
print("Test in progress...")
# Test parameters
pf_data = np.load("data/in/darro_pfdata.npy")
dem = "data/in/darro25.tif"
demraster = pr.open_raster(dem)
srs = demraster.proj
cellsize = demraster.cellsize
# Creates the profile
perfil = p.TProfile(pf_data, cellsize, srs=srs)
# Print raw elevations vs peaks removed elevations
fig = plt.figure(figsize=(12, 6))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
li = perfil.get_l(True)
zi = perfil.get_z(True)
raw_zi = perfil.get_raw_z(True)
ax1.plot(li, zi, label="Peaks removed")
ax1.plot(li, raw_zi, label="Raw elevations")
ax1.set_title("Raw elevations vs peak removed")
ax1.legend()
ax1.set_xlim((6850, 8950))
ax1.set_ylim((950, 1050))
# Test for smooth function
distance = 0
for n in range(5):
li = perfil.get_l(True)
zi = perfil.get_z(True)
perfil.smooth(distance)
ax2.plot(li, zi, label=str(distance) + " m")
distance += 50
ax2.set_title("Smooth with different distances")
ax2.legend()
ax2.set_xlim((8000, 9000))
ax2.set_ylim((950, 1000))
fig.tight_layout()
plt.show()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
def test04():
"""
Creates a TProfiler from an array with profile_data
Test for get_area and get_slopes
"""
inicio = time.time()
print("=" * 40)
print("Test 04 para TProfiler")
print("Testing functions get_area() and get_slopes()")
print("Test in progress...")
# Test parameters
pf_data = np.load("data/in/darro_pfdata.npy")
dem = "data/in/darro25.tif"
demraster = pr.open_raster(dem)
srs = demraster.proj
cellsize = demraster.cellsize
# Creates the profile
perfil = p.TProfile(pf_data, cellsize, srs=srs)
# Get slope area and plot in log scale
fig = plt.figure(figsize=(12, 6))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
for ax in (ax1, ax2, ax3, ax4):
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlim((1000000, 100000000))
ax.set_ylim((0.001, 1))
ai = perfil.get_area(True)
s1 = perfil.get_slope()
ax1.plot(ai, s1, "b+")
ax1.set_title("Raw slopes (all)")
s2 = perfil.get_slope(threshold=0.9)
ax2.plot(ai, s2, "b+")
ax2.set_title("Slopes with threshold >= 0.9")
s3, lq3 = perfil.get_slope(threshold=0.9, lq=True)
ax3.plot(ai, lq3, "r+")
ax3.plot(ai, s3, "b+")
ax3.set_title("Slopes and low quality slopes (threshold 0.9)")
s4, lq4 = perfil.get_slope(threshold=0.9, lq=True, head=True)
a2 = perfil.get_area(head=True)
ax4.plot(a2, lq4, "r+")
ax4.plot(a2, s4, "b+")
ax4.set_title("Example 3 with head=True")
fig.tight_layout(pad=1)
plt.show()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
def test05():
"""
Creates a TProfiler from an array with profile_data
Test for calculate slopes
"""
inicio = time.time()
print("=" * 40)
print("Test 05 para TProfiler")
print("Testing functions calculate slopes")
print("Test in progress...")
# Test parameters
pf_data = np.load("data/in/darro_pfdata.npy")
dem = "data/in/darro25.tif"
demraster = pr.open_raster(dem)
srs = demraster.proj
cellsize = demraster.cellsize
# Creates the profile
perfil = p.TProfile(pf_data, cellsize, srs=srs)
reg_points = 4
# Get slope area and plot in log scale
fig = plt.figure(figsize=(12, 6))
for n in range(1, 9, 2):
ax1 = fig.add_subplot(4, 2, n)
ax2 = fig.add_subplot(4, 2, n+1)
perfil.calculate_slope(reg_points)
si = perfil.get_slope()
ai = perfil.get_area()
ax1.plot(ai, si, "b+")
ax1.set_xscale("log")
ax1.set_yscale("log")
ax1.set_xlim((1000000, 100000000))
ax1.set_ylim((0.001, 1))
ax1.set_title("reg_points = " + str(reg_points) + " (normal elevations)")
perfil.calculate_slope(reg_points, True)
si = perfil.get_slope(0.9)
ax2.plot(ai, si, "b+")
ax2.set_xscale("log")
ax2.set_yscale("log")
ax2.set_xlim((1000000, 100000000))
ax2.set_ylim((0.001, 1))
ax2.set_title("reg_points = " + str(reg_points) + " (raw elevations)")
reg_points += 4
fig.tight_layout(pad=1)
plt.show()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
def test06():
"""
Creates a TProfiler from an array with profile_data
Test for calculate_chi() and get_chi()
"""
inicio = time.time()
print("=" * 40)
print("Test 06 para TProfiler")
print("Testing functions get_chi() and calculate_chi()")
print("Test in progress...")
# Test parameters
pf_data = np.load("data/in/darro_pfdata.npy")
dem = "data/in/darro25.tif"
demraster = pr.open_raster(dem)
srs = demraster.proj
cellsize = demraster.cellsize
# Creates the profile
perfil = p.TProfile(pf_data, cellsize, srs=srs)
# Get slope area and plot in log scale
fig = plt.figure()
theta = 0.35
for n in range(1, 10):
ax = fig.add_subplot(3, 3, n)
perfil.thetaref = theta
perfil.calculate_chi()
chi = perfil.get_chi(False, True)
zi = perfil.get_z(False, True)
ax.plot(chi, zi)
ax.set_title("Thetaref = {0:.2f}".format(theta))
theta += 0.05
fig.tight_layout(pad=1)
plt.show()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
def test07():
"""
Creates a TProfiler from an array with profile_data
Test for get_ksn()
"""
inicio = time.time()
print("=" * 40)
print("Test 07 para TProfiler")
print("Testing function get_ksn()")
print("Test in progress...")
# Test parameters
pf_data = np.load("data/in/darro_pfdata.npy")
dem = "data/in/darro25.tif"
demraster = pr.open_raster(dem)
srs = demraster.proj
cellsize = demraster.cellsize
# Creates the profile
perfil = p.TProfile(pf_data, cellsize, srs=srs)
# Get slope area and plot in log scale
fig = plt.figure(figsize=(12, 6))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
li = perfil.get_l(True)
ksn1 = perfil.get_ksn()
ax1.plot(li, ksn1, "b+")
ax1.set_title("Raw ksn (all)")
ksn2 = perfil.get_ksn(threshold=0.9)
ax2.plot(li, ksn2, "b+")
ax2.set_title("Ksn with threshold >= 0.9")
ksn3, lq3 = perfil.get_ksn(threshold=0.9, lq=True)
ax3.plot(li, lq3, "r+")
ax3.plot(li, ksn3, "b+")
ax3.set_title("Ksn and low quality ksn (threshold 0.9)")
ksn4, lq4 = perfil.get_ksn(threshold=0.9, lq=True, head=False)
l2 = perfil.get_l(head=False)
ax4.plot(l2, lq4, "r+")
ax4.plot(l2, ksn4, "b+")
ax4.set_title("Example 3 with head=False")
fig.tight_layout(pad=1)
plt.show()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
def test08():
"""
Creates a TProfiler from an array with profile_data
Test for calculate_ksn
"""
inicio = time.time()
print("=" * 40)
print("Test 08 para TProfiler")
print("Testing functions calculate_ksn()")
print("Test in progress...")
# Test parameters
pf_data = np.load("data/in/darro_pfdata.npy")
dem = "data/in/darro25.tif"
demraster = pr.open_raster(dem)
srs = demraster.proj
cellsize = demraster.cellsize
# Creates the profile
perfil = p.TProfile(pf_data, cellsize, srs=srs)
reg_points = 4
fig = plt.figure(figsize=(12, 6))
for n in range(1, 9, 2):
ax1 = fig.add_subplot(4, 2, n)
ax2 = fig.add_subplot(4, 2, n + 1)
perfil.calculate_ksn(reg_points)
ksn = perfil.get_ksn()
li = perfil.get_l()
ax1.plot(li, ksn)
ax1.set_title("KSN with reg_points = " + str(reg_points) + " (normal elevations)")
perfil.calculate_ksn(reg_points, raw_z=True)
ksn = perfil.get_ksn()
ax2.plot(li, ksn)
ax2.set_title("KSN with reg_points = " + str(reg_points) + " (raw elevations)")
reg_points += 4
fig.tight_layout(pad=1)
plt.show()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
def test09():
"""
Creates a TProfiler from an array with profile_data
Test for calculate_ksn
"""
inicio = time.time()
print("=" * 40)
print("Test 09 para TProfiler")
print("Testing ksn and SL plots")
print("Test in progress...")
# Test parameters
pf_data = np.load("data/in/darro_pfdata.npy")
dem = "data/in/darro25.tif"
demraster = pr.open_raster(dem)
srs = demraster.proj
cellsize = demraster.cellsize
# Creates the profile
perfil = p.TProfile(pf_data, cellsize, srs=srs)
reg_points = 12
fig = plt.figure()
ax = fig.add_subplot(111)
perfil.calculate_ksn(reg_points=reg_points)
perfil.calculate_slope(reg_points=reg_points)
li = perfil.get_l()
slope = perfil.get_slope()
ksn = perfil.get_ksn()
sl = slope * li
sl, = ax.plot(li, sl)
ax.set_ylabel("SL index")
ax.set_xlabel("Distance (m)")
twax = ax.twinx()
ksn, = twax.plot(li, ksn, color="r")
twax.set_ylabel("Ksn index")
twax.legend((sl, ksn), ("SL", "ksn"))
plt.show()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("=" * 40)
test01()
test02()
test03()
test04()
test05()
test06()
test07()
test08()
test09()
|
import os
import sys
import binascii
from smtplib import SMTPException
from django.db import models
from django.dispatch import receiver
from django.conf import settings
from django.contrib.auth.signals import user_logged_in
from django.db.models.signals import post_save, post_migrate
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import Group, User, Permission
from django.utils import translation as django_translation
from django.template.loader import render_to_string
from django.core.mail import EmailMultiAlternatives, get_connection
from django.utils.translation import LANGUAGE_SESSION_KEY
from social.apps.django_app.default.models import UserSocialAuth
from weblate.lang.models import Language
from weblate.trans.site import get_site_url, get_site_domain
from weblate.accounts.avatar import get_user_display
from weblate.trans.util import report_error
from weblate.trans.signals import user_pre_delete
from weblate import VERSION
from weblate.logger import LOGGER
from weblate.appsettings import ANONYMOUS_USER_NAME, SITE_TITLE
def send_mails(mails):
"""Sends multiple mails in single connection."""
try:
connection = get_connection()
connection.send_messages(mails)
except SMTPException as error:
LOGGER.error('Failed to send email: %s', error)
report_error(error, sys.exc_info())
def get_author_name(user, email=True):
"""Returns formatted author name with email."""
# Get full name from database
full_name = user.first_name
# Use username if full name is empty
if full_name == '':
full_name = user.username
# Add email if we are asked for it
if not email:
return full_name
return '%s <%s>' % (full_name, user.email)
def notify_merge_failure(subproject, error, status):
'''
Notification on merge failure.
'''
subscriptions = Profile.objects.subscribed_merge_failure(
subproject.project,
)
users = set()
mails = []
for subscription in subscriptions:
mails.append(
subscription.notify_merge_failure(subproject, error, status)
)
users.add(subscription.user_id)
for owner in subproject.project.owners.all():
mails.append(
owner.profile.notify_merge_failure(
subproject, error, status
)
)
# Notify admins
mails.append(
get_notification_email(
'en',
'ADMINS',
'merge_failure',
subproject,
{
'subproject': subproject,
'status': status,
'error': error,
}
)
)
send_mails(mails)
def notify_new_string(translation):
'''
Notification on new string to translate.
'''
mails = []
subscriptions = Profile.objects.subscribed_new_string(
translation.subproject.project, translation.language
)
for subscription in subscriptions:
mails.append(
subscription.notify_new_string(translation)
)
send_mails(mails)
def notify_new_language(subproject, language, user):
'''
Notify subscribed users about new language requests
'''
mails = []
subscriptions = Profile.objects.subscribed_new_language(
subproject.project,
user
)
users = set()
for subscription in subscriptions:
mails.append(
subscription.notify_new_language(subproject, language, user)
)
users.add(subscription.user_id)
for owner in subproject.project.owners.all():
mails.append(
owner.profile.notify_new_language(
subproject, language, user
)
)
# Notify admins
mails.append(
get_notification_email(
'en',
'ADMINS',
'new_language',
subproject,
{
'language': language,
'user': user,
},
user=user,
)
)
send_mails(mails)
def notify_new_translation(unit, oldunit, user):
'''
Notify subscribed users about new translation
'''
mails = []
subscriptions = Profile.objects.subscribed_any_translation(
unit.translation.subproject.project,
unit.translation.language,
user
)
for subscription in subscriptions:
mails.append(
subscription.notify_any_translation(unit, oldunit)
)
send_mails(mails)
def notify_new_contributor(unit, user):
'''
Notify about new contributor.
'''
mails = []
subscriptions = Profile.objects.subscribed_new_contributor(
unit.translation.subproject.project,
unit.translation.language,
user
)
for subscription in subscriptions:
mails.append(
subscription.notify_new_contributor(
unit.translation, user
)
)
send_mails(mails)
def notify_new_suggestion(unit, suggestion, user):
'''
Notify about new suggestion.
'''
mails = []
subscriptions = Profile.objects.subscribed_new_suggestion(
unit.translation.subproject.project,
unit.translation.language,
user
)
for subscription in subscriptions:
mails.append(
subscription.notify_new_suggestion(
unit.translation,
suggestion,
unit
)
)
send_mails(mails)
def notify_new_comment(unit, comment, user, report_source_bugs):
'''
Notify about new comment.
'''
mails = []
subscriptions = Profile.objects.subscribed_new_comment(
unit.translation.subproject.project,
comment.language,
user
)
for subscription in subscriptions:
mails.append(
subscription.notify_new_comment(unit, comment, user)
)
# Notify upstream
if comment.language is None and report_source_bugs != '':
send_notification_email(
'en',
report_source_bugs,
'new_comment',
unit.translation,
{
'unit': unit,
'comment': comment,
'subproject': unit.translation.subproject,
},
user=user,
)
send_mails(mails)
def get_notification_email(language, email, notification,
translation_obj=None, context=None, headers=None,
user=None, info=None):
'''
Renders notification email.
'''
cur_language = django_translation.get_language()
context = context or {}
headers = headers or {}
references = None
if 'unit' in context:
unit = context['unit']
references = '{0}/{1}/{2}/{3}'.format(
unit.translation.subproject.project.slug,
unit.translation.subproject.slug,
unit.translation.language.code,
unit.id
)
if references is not None:
references = '<{0}@{1}>'.format(references, get_site_domain())
headers['In-Reply-To'] = references
headers['References'] = references
try:
if info is None:
info = translation_obj.__unicode__()
LOGGER.info(
'sending notification %s on %s to %s',
notification,
info,
email
)
# Load user language
if language is not None:
django_translation.activate(language)
# Template name
context['subject_template'] = 'mail/{}_subject.txt'.format(
notification
)
# Adjust context
context['current_site_url'] = get_site_url()
if translation_obj is not None:
context['translation'] = translation_obj
context['translation_url'] = get_site_url(
translation_obj.get_absolute_url()
)
context['site_title'] = SITE_TITLE
# Render subject
subject = render_to_string(
context['subject_template'],
context
).strip()
# Render body
body = render_to_string(
'mail/{}.txt'.format(notification),
context
)
html_body = render_to_string(
'mail/{}.html'.format(notification),
context
)
# Define headers
headers['Auto-Submitted'] = 'auto-generated'
headers['X-AutoGenerated'] = 'yes'
headers['Precedence'] = 'bulk'
headers['X-Mailer'] = 'Weblate {}'.format(VERSION)
# Reply to header
if user is not None:
headers['Reply-To'] = user.email
# List of recipients
if email == 'ADMINS':
emails = [a[1] for a in settings.ADMINS]
else:
emails = [email]
# Create message
email = EmailMultiAlternatives(
settings.EMAIL_SUBJECT_PREFIX + subject,
body,
to=emails,
headers=headers,
)
email.attach_alternative(
html_body,
'text/html'
)
# Return the mail
return email
finally:
django_translation.activate(cur_language)
def send_notification_email(language, email, notification,
translation_obj=None, context=None, headers=None,
user=None, info=None):
'''
Renders and sends notification email.
'''
email = get_notification_email(
language, email, notification, translation_obj, context, headers,
user, info
)
send_mails([email])
class VerifiedEmail(models.Model):
'''
Storage for verified emails from auth backends.
'''
social = models.ForeignKey(UserSocialAuth)
email = models.EmailField(max_length=254)
def __unicode__(self):
return u'{0} - {1}'.format(
self.social.user.username,
self.email
)
class ProfileManager(models.Manager):
'''
Manager providing shortcuts for subscription queries.
'''
# pylint: disable=W0232
def subscribed_any_translation(self, project, language, user):
return self.filter(
subscribe_any_translation=True,
subscriptions=project,
languages=language
).exclude(
user=user
)
def subscribed_new_language(self, project, user):
return self.filter(
subscribe_new_language=True,
subscriptions=project,
).exclude(
user=user
)
def subscribed_new_string(self, project, language):
return self.filter(
subscribe_new_string=True,
subscriptions=project,
languages=language
)
def subscribed_new_suggestion(self, project, language, user):
ret = self.filter(
subscribe_new_suggestion=True,
subscriptions=project,
languages=language
)
# We don't want to filter out anonymous user
if user is not None and user.is_authenticated():
ret = ret.exclude(user=user)
return ret
def subscribed_new_contributor(self, project, language, user):
return self.filter(
subscribe_new_contributor=True,
subscriptions=project,
languages=language
).exclude(
user=user
)
def subscribed_new_comment(self, project, language, user):
ret = self.filter(
subscribe_new_comment=True,
subscriptions=project
).exclude(
user=user
)
# Source comments go to every subscriber
if language is not None:
ret = ret.filter(languages=language)
return ret
def subscribed_merge_failure(self, project):
return self.filter(subscribe_merge_failure=True, subscriptions=project)
class Profile(models.Model):
'''
User profiles storage.
'''
user = models.OneToOneField(User, unique=True, editable=False)
language = models.CharField(
verbose_name=_(u"Interface Language"),
max_length=10,
choices=settings.LANGUAGES
)
languages = models.ManyToManyField(
Language,
verbose_name=_('Translated languages'),
blank=True,
help_text=_('Choose languages to which you can translate.')
)
secondary_languages = models.ManyToManyField(
Language,
verbose_name=_('Secondary languages'),
related_name='secondary_profile_set',
blank=True,
)
suggested = models.IntegerField(default=0, db_index=True)
translated = models.IntegerField(default=0, db_index=True)
hide_completed = models.BooleanField(
verbose_name=_('Hide completed translations on dashboard'),
default=False
)
secondary_in_zen = models.BooleanField(
verbose_name=_('Show secondary translations in zen mode'),
default=True
)
hide_source_secondary = models.BooleanField(
verbose_name=_('Hide source if there is secondary language'),
default=False
)
subscriptions = models.ManyToManyField(
'trans.Project',
verbose_name=_('Subscribed projects'),
blank=True,
)
subscribe_any_translation = models.BooleanField(
verbose_name=_('Notification on any translation'),
default=False
)
subscribe_new_string = models.BooleanField(
verbose_name=_('Notification on new string to translate'),
default=False
)
subscribe_new_suggestion = models.BooleanField(
verbose_name=_('Notification on new suggestion'),
default=False
)
subscribe_new_contributor = models.BooleanField(
verbose_name=_('Notification on new contributor'),
default=False
)
subscribe_new_comment = models.BooleanField(
verbose_name=_('Notification on new comment'),
default=False
)
subscribe_merge_failure = models.BooleanField(
verbose_name=_('Notification on merge failure'),
default=False
)
subscribe_new_language = models.BooleanField(
verbose_name=_('Notification on new language request'),
default=False
)
SUBSCRIPTION_FIELDS = (
'subscribe_any_translation',
'subscribe_new_string',
'subscribe_new_suggestion',
'subscribe_new_contributor',
'subscribe_new_comment',
'subscribe_merge_failure',
'subscribe_new_language',
)
objects = ProfileManager()
def __unicode__(self):
return self.user.username
def get_user_display(self):
return get_user_display(self.user)
def get_user_display_link(self):
return get_user_display(self.user, True, True)
def get_user_name(self):
return get_user_display(self.user, False)
@models.permalink
def get_absolute_url(self):
return ('user_page', (), {
'user': self.user.username
})
@property
def last_change(self):
'''
Returns date of last change user has done in Weblate.
'''
try:
return self.user.change_set.all()[0].timestamp
except IndexError:
return None
def notify_user(self, notification, translation_obj,
context=None, headers=None, user=None):
'''
Wrapper for sending notifications to user.
'''
if context is None:
context = {}
if headers is None:
headers = {}
# Check whether user is still allowed to access this project
if not translation_obj.has_acl(self.user):
return
# Generate notification
return get_notification_email(
self.language,
self.user.email,
notification,
translation_obj,
context,
headers,
user=user
)
def notify_any_translation(self, unit, oldunit):
'''
Sends notification on translation.
'''
if oldunit.translated:
template = 'changed_translation'
else:
template = 'new_translation'
return self.notify_user(
template,
unit.translation,
{
'unit': unit,
'oldunit': oldunit,
}
)
def notify_new_language(self, subproject, language, user):
'''
Sends notification on new language request.
'''
return self.notify_user(
'new_language',
subproject,
{
'language': language,
'user': user,
},
user=user
)
def notify_new_string(self, translation):
'''
Sends notification on new strings to translate.
'''
return self.notify_user(
'new_string',
translation,
)
def notify_new_suggestion(self, translation, suggestion, unit):
'''
Sends notification on new suggestion.
'''
return self.notify_user(
'new_suggestion',
translation,
{
'suggestion': suggestion,
'unit': unit,
}
)
def notify_new_contributor(self, translation, user):
'''
Sends notification on new contributor.
'''
return self.notify_user(
'new_contributor',
translation,
{
'user': user,
}
)
def notify_new_comment(self, unit, comment, user):
'''
Sends notification about new comment.
'''
return self.notify_user(
'new_comment',
unit.translation,
{
'unit': unit,
'comment': comment,
'subproject': unit.translation.subproject,
},
user=user,
)
def notify_merge_failure(self, subproject, error, status):
'''
Sends notification on merge failure.
'''
return self.notify_user(
'merge_failure',
subproject,
{
'subproject': subproject,
'error': error,
'status': status,
}
)
@property
def full_name(self):
'''
Returns user's full name.
'''
return self.user.first_name
def set_lang(request, profile):
"""
Sets session language based on user preferences.
"""
request.session[LANGUAGE_SESSION_KEY] = profile.language
@receiver(user_logged_in)
def post_login_handler(sender, request, user, **kwargs):
'''
Signal handler for setting user language and
migrating profile if needed.
'''
# Warning about setting password
if (getattr(user, 'backend', '').endswith('.EmailAuth') and
not user.has_usable_password()):
request.session['show_set_password'] = True
# Ensure user has a profile
profile = Profile.objects.get_or_create(user=user)[0]
# Migrate django-registration based verification to python-social-auth
if (user.has_usable_password() and
not user.social_auth.filter(provider='email').exists()):
social = user.social_auth.create(
provider='email',
uid=user.email,
)
VerifiedEmail.objects.create(
social=social,
email=user.email,
)
# Set language for session based on preferences
set_lang(request, profile)
def create_groups(update):
'''
Creates standard groups and gives them permissions.
'''
guest_group, created = Group.objects.get_or_create(name='Guests')
if created or update:
guest_group.permissions.add(
Permission.objects.get(codename='can_see_git_repository'),
Permission.objects.get(codename='add_suggestion'),
)
group, created = Group.objects.get_or_create(name='Users')
if created or update:
group.permissions.add(
Permission.objects.get(codename='upload_translation'),
Permission.objects.get(codename='overwrite_translation'),
Permission.objects.get(codename='save_translation'),
Permission.objects.get(codename='save_template'),
Permission.objects.get(codename='accept_suggestion'),
Permission.objects.get(codename='delete_suggestion'),
Permission.objects.get(codename='vote_suggestion'),
Permission.objects.get(codename='ignore_check'),
Permission.objects.get(codename='upload_dictionary'),
Permission.objects.get(codename='add_dictionary'),
Permission.objects.get(codename='change_dictionary'),
Permission.objects.get(codename='delete_dictionary'),
Permission.objects.get(codename='lock_translation'),
Permission.objects.get(codename='can_see_git_repository'),
Permission.objects.get(codename='add_comment'),
Permission.objects.get(codename='add_suggestion'),
Permission.objects.get(codename='use_mt'),
)
owner_permissions = (
Permission.objects.get(codename='author_translation'),
Permission.objects.get(codename='upload_translation'),
Permission.objects.get(codename='overwrite_translation'),
Permission.objects.get(codename='commit_translation'),
Permission.objects.get(codename='update_translation'),
Permission.objects.get(codename='push_translation'),
Permission.objects.get(codename='automatic_translation'),
Permission.objects.get(codename='save_translation'),
Permission.objects.get(codename='save_template'),
Permission.objects.get(codename='accept_suggestion'),
Permission.objects.get(codename='vote_suggestion'),
Permission.objects.get(codename='override_suggestion'),
Permission.objects.get(codename='delete_comment'),
Permission.objects.get(codename='delete_suggestion'),
Permission.objects.get(codename='ignore_check'),
Permission.objects.get(codename='upload_dictionary'),
Permission.objects.get(codename='add_dictionary'),
Permission.objects.get(codename='change_dictionary'),
Permission.objects.get(codename='delete_dictionary'),
Permission.objects.get(codename='lock_subproject'),
Permission.objects.get(codename='reset_translation'),
Permission.objects.get(codename='lock_translation'),
Permission.objects.get(codename='can_see_git_repository'),
Permission.objects.get(codename='add_comment'),
Permission.objects.get(codename='delete_comment'),
Permission.objects.get(codename='add_suggestion'),
Permission.objects.get(codename='use_mt'),
Permission.objects.get(codename='edit_priority'),
Permission.objects.get(codename='edit_flags'),
Permission.objects.get(codename='manage_acl'),
Permission.objects.get(codename='download_changes'),
Permission.objects.get(codename='view_reports'),
)
group, created = Group.objects.get_or_create(name='Managers')
if created or update:
group.permissions.add(*owner_permissions)
group, created = Group.objects.get_or_create(name='Owners')
if created or update:
group.permissions.add(*owner_permissions)
created = True
try:
anon_user = User.objects.get(
username=ANONYMOUS_USER_NAME,
)
created = False
if anon_user.is_active:
raise ValueError(
'Anonymous user ({}) already exists and enabled, '
'please change ANONYMOUS_USER_NAME setting.'.format(
ANONYMOUS_USER_NAME,
)
)
except User.DoesNotExist:
anon_user = User.objects.create(
username=ANONYMOUS_USER_NAME,
is_active=False,
)
if created or update:
anon_user.set_unusable_password()
anon_user.groups.clear()
anon_user.groups.add(guest_group)
def move_users():
'''
Moves users to default group.
'''
group = Group.objects.get(name='Users')
for user in User.objects.all():
user.groups.add(group)
def remove_user(user):
'''
Removes user account.
'''
# Send signal (to commit any pending changes)
user_pre_delete.send(instance=user, sender=user.__class__)
# Change username
user.username = 'deleted-{0}'.format(user.pk)
while User.objects.filter(username=user.username).exists():
user.username = 'deleted-{0}-{1}'.format(
user.pk,
binascii.b2a_hex(os.urandom(5))
)
# Remove user information
user.first_name = 'Deleted User'
user.last_name = ''
user.email = 'noreply@weblate.org'
# Disable the user
user.is_active = False
user.set_unusable_password()
user.save()
# Remove all social auth associations
user.social_auth.all().delete()
@receiver(post_migrate)
def sync_create_groups(sender, **kwargs):
'''
Create groups on syncdb.
'''
if sender.label == 'accounts':
create_groups(False)
@receiver(post_save, sender=User)
def create_profile_callback(sender, instance, created=False, **kwargs):
'''
Automatically adds user to Users group.
'''
if created:
# Add user to Users group if it exists
try:
group = Group.objects.get(name='Users')
instance.groups.add(group)
except Group.DoesNotExist:
pass
|
from __future__ import unicode_literals, absolute_import
import logging
import json
from django.utils.dateparse import parse_datetime
from django.utils import timezone
from wechatpy.exceptions import WeChatClientException
from common import wechat_client
from .local_parser import LocalParser
from remind.models import Remind
from .exceptions import ParseError
logger = logging.getLogger(__name__)
def parse(text, **kwargs):
"""Returns a Remind"""
# Try to parse by rules and then turn to wechat API since wechat API is unstable and inaccurate.
logger.info('Trying to parse "%s" using rules.', text)
reminder = LocalParser().parse_by_rules(text)
if not reminder:
logger.info('Failed to parse time from "%s" using rules, try wechat api.', text)
reminder = parse_by_wechat_api(text, **kwargs)
if reminder.time <= timezone.now(): # GMT and UTC time can compare with each other
raise ParseError('/:no%s已经过去了,请重设一个将来的提醒。\n\n消息: %s' % (
reminder.time.strftime('%Y-%m-%d %H:%M'), text))
return reminder
def parse_by_wechat_api(text, **kwargs):
"""
{
"errcode": 0,
"query": "提醒我上午十点开会",
"semantic": {
"details": {
"answer": "",
"context_info": {},
"datetime": {
"date": "2015-12-23",
"date_lunar": "2015-11-13",
"time": "10:00:00",
"time_ori": "上午十点",
"type": "DT_ORI",
"week": "3"
},
"event": "开会",
"hit_str": "提醒 我 上午 十点 开会 ",
"remind_type": "0"
},
"intent": "SEARCH"
},
"type": "remind"
}
"""
try:
wechat_result = wechat_client.semantic.search(
query=text,
category='remind',
city='上海', # F**k, weixin always needs the city param, hard-code one.
**kwargs
)
except WeChatClientException as e:
logger.info('Failed to parse using wechat api ' + str(e))
raise
# wechat_result = json.loads(parse_by_wechat_api.__doc__)
logger.debug('Semantic result from wechat, %s',
json.dumps(wechat_result, ensure_ascii=False))
dt_str = '%s %s+08:00' % (
wechat_result['semantic']['details']['datetime']['date'],
wechat_result['semantic']['details']['datetime']['time'],
) # there could be nothing in details
dt = parse_datetime(dt_str)
return Remind(time=dt,
desc=wechat_result.get('query', ''),
event=wechat_result['semantic']['details'].get('event', ''))
def parse_by_boson(text):
pass
|
from settings import CONTENT_SERVER
"""
context processor applied to all requests
"""
def settings_cp(request):
return {'content_server': CONTENT_SERVER}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.