repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
abhishekgahlot/or-tools | examples/python/nonogram_table2.py | 34 | 6833 | # Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Nonogram (Painting by numbers) in Google CP Solver.
http://en.wikipedia.org/wiki/Nonogram
'''
Nonograms or Paint by Numbers are picture logic puzzles in which cells in a
grid have to be colored or left blank according to numbers given at the
side of the grid to reveal a hidden picture. In this puzzle type, the
numbers measure how many unbroken lines of filled-in squares there are
in any given row or column. For example, a clue of '4 8 3' would mean
there are sets of four, eight, and three filled squares, in that order,
with at least one blank square between successive groups.
'''
See problem 12 at http://www.csplib.org/.
http://www.puzzlemuseum.com/nonogram.htm
Haskell solution:
http://twan.home.fmf.nl/blog/haskell/Nonograms.details
Brunetti, Sara & Daurat, Alain (2003)
'An algorithm reconstructing convex lattice sets'
http://geodisi.u-strasbg.fr/~daurat/papiers/tomoqconv.pdf
The Comet model (http://www.hakank.org/comet/nonogram_regular.co)
was a major influence when writing this Google CP solver model.
I have also blogged about the development of a Nonogram solver in Comet
using the regular constraint.
* 'Comet: Nonogram improved: solving problem P200 from 1:30 minutes
to about 1 second'
http://www.hakank.org/constraint_programming_blog/2009/03/comet_nonogram_improved_solvin_1.html
* 'Comet: regular constraint, a much faster Nonogram with the regular
constraint,
some OPL models, and more'
http://www.hakank.org/constraint_programming_blog/2009/02/comet_regular_constraint_a_muc_1.html
Compare with the other models:
* Gecode/R: http://www.hakank.org/gecode_r/nonogram.rb (using 'regexps')
* MiniZinc: http://www.hakank.org/minizinc/nonogram_regular.mzn
* MiniZinc: http://www.hakank.org/minizinc/nonogram_create_automaton.mzn
* MiniZinc: http://www.hakank.org/minizinc/nonogram_create_automaton2.mzn
Note: nonogram_create_automaton2.mzn is the preferred model
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
from ortools.constraint_solver import pywrapcp
#
# Make a transition (automaton) list of tuples from a
# single pattern, e.g. [3,2,1]
#
def make_transition_tuples(pattern):
p_len = len(pattern)
num_states = p_len + sum(pattern)
tuples = []
# this is for handling 0-clues. It generates
# just the minimal state
if num_states == 0:
tuples.append((1, 0, 1))
return (tuples, 1)
# convert pattern to a 0/1 pattern for easy handling of
# the states
tmp = [0]
c = 0
for pattern_index in range(p_len):
tmp.extend([1] * pattern[pattern_index])
tmp.append(0)
for i in range(num_states):
state = i + 1
if tmp[i] == 0:
tuples.append((state, 0, state))
tuples.append((state, 1, state + 1))
else:
if i < num_states - 1:
if tmp[i + 1] == 1:
tuples.append((state, 1, state + 1))
else:
tuples.append((state, 0, state + 1))
tuples.append((num_states, 0, num_states))
return (tuples, num_states)
#
# check each rule by creating an automaton and transition constraint.
#
def check_rule(rules, y):
cleaned_rule = [rules[i] for i in range(len(rules)) if rules[i] > 0]
(transition_tuples, last_state) = make_transition_tuples(cleaned_rule)
initial_state = 1
accepting_states = [last_state]
solver = y[0].solver()
solver.Add(solver.TransitionConstraint(y,
transition_tuples,
initial_state,
accepting_states))
def main(rows, row_rule_len, row_rules, cols, col_rule_len, col_rules):
# Create the solver.
solver = pywrapcp.Solver('Regular test')
#
# variables
#
board = {}
for i in range(rows):
for j in range(cols):
board[i, j] = solver.IntVar(0, 1, 'board[%i, %i]' % (i, j))
board_flat = [board[i, j] for i in range(rows) for j in range(cols)]
# Flattened board for labeling.
# This labeling was inspired by a suggestion from
# Pascal Van Hentenryck about my Comet nonogram model.
board_label = []
if rows * row_rule_len < cols * col_rule_len:
for i in range(rows):
for j in range(cols):
board_label.append(board[i, j])
else:
for j in range(cols):
for i in range(rows):
board_label.append(board[i, j])
#
# constraints
#
for i in range(rows):
check_rule(row_rules[i], [board[i, j] for j in range(cols)])
for j in range(cols):
check_rule(col_rules[j], [board[i, j] for i in range(rows)])
#
# solution and search
#
db = solver.Phase(board_label,
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
print 'before solver, wall time = ', solver.WallTime(), 'ms'
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
print
num_solutions += 1
for i in range(rows):
row = [board[i, j].Value() for j in range(cols)]
row_pres = []
for j in row:
if j == 1:
row_pres.append('#')
else:
row_pres.append(' ')
print ' ', ''.join(row_pres)
print
print ' ', '-' * cols
if num_solutions >= 2:
print '2 solutions is enough...'
break
solver.EndSearch()
print
print 'num_solutions:', num_solutions
print 'failures:', solver.Failures()
print 'branches:', solver.Branches()
print 'WallTime:', solver.WallTime(), 'ms'
#
# Default problem
#
# From http://twan.home.fmf.nl/blog/haskell/Nonograms.details
# The lambda picture
#
rows = 12
row_rule_len = 3
row_rules = [
[0, 0, 2],
[0, 1, 2],
[0, 1, 1],
[0, 0, 2],
[0, 0, 1],
[0, 0, 3],
[0, 0, 3],
[0, 2, 2],
[0, 2, 1],
[2, 2, 1],
[0, 2, 3],
[0, 2, 2]
]
cols = 10
col_rule_len = 2
col_rules = [
[2, 1],
[1, 3],
[2, 4],
[3, 4],
[0, 4],
[0, 3],
[0, 3],
[0, 3],
[0, 2],
[0, 2]
]
if __name__ == '__main__':
if len(sys.argv) > 1:
file = sys.argv[1]
execfile(file)
main(rows, row_rule_len, row_rules, cols, col_rule_len, col_rules)
| apache-2.0 |
adrixcx/netpln-project | contrib/pyminer/pyminer.py | 1257 | 6438 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file license.txt or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit |
gandalfcode/gandalf | tests/paper_tests/adsodtest.py | 1 | 7950 | #==============================================================================
# adsodtest.py
#==============================================================================
from gandalf.analysis.facade import *
import matplotlib.pyplot as plt
from matplotlib import rc
from mpl_toolkits.axes_grid1 import AxesGrid
import time
#rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
rc('font', **{'family': 'normal', 'weight' : 'bold', 'size' : 14})
rc('text', usetex=True)
# Set all plot limits
xmin = -9.9
xmax = 9.9
rhomin = 0.15
rhomax = 1.15
vxmin = -0.15
vxmax = 0.95
umin = 1.45
umax = 2.98
Nleft = 240
Nright = 60
Nunequal = 60
#Nleft = 80
#Nright = 20
# Extract data from Grad-h SPH simulation
gradhsphsim = newsim("adsod-gradhsph.dat")
gradhsphsim.SetParam('Nlattice1[0]',Nleft)
gradhsphsim.SetParam('Nlattice2[0]',Nright)
setupsim()
run()
x0 = get_data('x') #, sim=0, snap=10)
rho0 = get_data('rho') #, sim=0, snap=10)
vx0 = get_data('vx') #, sim=0, snap=10)
u0 = get_data('u') #, sim=0, snap=10)
# Extract data from Grad-h SPH simulation
gradhsphsim_unequal = newsim("adsod-gradhsph.dat")
gradhsphsim_unequal.SetParam('Nlattice1[0]',Nunequal)
gradhsphsim_unequal.SetParam('Nlattice2[0]',Nunequal)
setupsim()
run()
x1 = get_data('x') #, sim=0, snap=10)
rho1 = get_data('rho') #, sim=0, snap=10)
vx1 = get_data('vx') #, sim=0, snap=10)
u1 = get_data('u') #, sim=0, snap=10)
# Extract data from MFV simulation
mfvsim = newsim("adsod-mfv-moving.dat")
mfvsim.SetParam('Nlattice1[0]',Nleft)
mfvsim.SetParam('Nlattice2[0]',Nright)
setupsim()
run()
x2 = get_data('x') #, sim=1, snap=10)
rho2 = get_data('rho') #, sim=1, snap=10)
vx2 = get_data('vx') #, sim=1, snap=10)
u2 = get_data('u') #, sim=1, snap=10)
# Extract data from MFV simulation
mfvsim_unequal = newsim("adsod-mfv-moving.dat")
mfvsim_unequal.SetParam('Nlattice1[0]',Nunequal)
mfvsim_unequal.SetParam('Nlattice2[0]',Nunequal)
setupsim()
run()
x3 = get_data('x') #, sim=1, snap=10)
rho3 = get_data('rho') #, sim=1, snap=10)
vx3 = get_data('vx') #, sim=1, snap=10)
u3 = get_data('u') #, sim=1, snap=10)
# Extract data from MFV simulation
mfmsim = newsim("adsod-mfm-moving.dat")
mfmsim.SetParam("riemann_solver", "exact")
setupsim()
run()
x4 = get_data('x') #, sim=2, snap=10)
rho4 = get_data('rho') #, sim=2, snap=10)
vx4 = get_data('vx') #, sim=2, snap=10)
u4 = get_data('u') #, sim=2, snap=10)
# Extract data from MFV simulation
mfmsim_unequal = newsim("adsod-mfm-moving.dat")
mfmsim_unequal.SetParam("riemann_solver", "exact")
mfmsim_unequal.SetParam('Nlattice1[0]',Nunequal)
mfmsim_unequal.SetParam('Nlattice2[0]',Nunequal)
setupsim()
run()
x5 = get_data('x') #, sim=1, snap=10)
rho5 = get_data('rho') #, sim=1, snap=10)
vx5 = get_data('vx') #, sim=1, snap=10)
u5 = get_data('u') #, sim=1, snap=10)
# Extract data from MFV simulation
mfmsim_hllc = newsim("adsod-mfm-moving.dat")
mfmsim_hllc.SetParam("riemann_solver", "hllc")
setupsim()
run()
x6 = get_data('x') #, sim=2, snap=10)
rho6 = get_data('rho') #, sim=2, snap=10)
vx6 = get_data('vx') #, sim=2, snap=10)
u6 = get_data('u') #, sim=2, snap=10)
# Extract data from MFV simulation
mfmsim_unequal_hllc = newsim("adsod-mfm-moving.dat")
mfmsim_unequal_hllc.SetParam("riemann_solver", "hllc")
mfmsim_unequal_hllc.SetParam('Nlattice1[0]',Nunequal)
mfmsim_unequal_hllc.SetParam('Nlattice2[0]',Nunequal)
setupsim()
run()
x7 = get_data('x') #, sim=1, snap=10)
rho7 = get_data('rho') #, sim=1, snap=10)
vx7 = get_data('vx') #, sim=1, snap=10)
u7 = get_data('u') #, sim=1, snap=10)
# Extract data for analytical solution
rhodata = get_analytical_data("x","rho") #,sim=0,snap=10)
vxdata = get_analytical_data("x","vx") #,sim=0,snap=10)
udata = get_analytical_data("x","u") #,sim=0,snap=10)
# Create matplotlib figure object with shared x-axis
fig, axarr = plt.subplots(3, 3, sharex='col', sharey='row', figsize=(13,9))
fig.subplots_adjust(hspace=0.0001, wspace=0.0001)
fig.subplots_adjust(bottom=0.07, top=0.99, left=0.045, right=0.99)
axarr[0,0].set_ylabel(r"$\rho$")
axarr[0,0].set_xlim([xmin, xmax])
axarr[0,0].set_ylim([rhomin, rhomax])
axarr[0,0].plot(rhodata.x_data, rhodata.y_data, linestyle='-', color="red", label='Exact solution')
axarr[0,0].scatter(x0, rho0, marker='o', facecolors='none', edgecolors='blue', s=10, label='Gradh-SPH, equal-mass')
axarr[0,0].scatter(x1, rho1, color='black', marker='+', s=32, label='Gradh-SPH, unequal-mass')
axarr[0,0].legend(fontsize=10)
axarr[1,0].set_ylabel(r"$v_x$")
axarr[1,0].set_ylim([vxmin, vxmax])
axarr[1,0].plot(vxdata.x_data, vxdata.y_data, linestyle='-', color="red")
axarr[1,0].scatter(x1, vx1, color='black', marker='+', s=32)
axarr[1,0].scatter(x0, vx0, marker='o', facecolors='none', edgecolors='blue', s=10)
axarr[2,0].set_xlabel(r"$x$")
axarr[2,0].set_ylabel(r"$u$")
axarr[2,0].set_ylim([umin, umax])
axarr[2,0].plot(udata.x_data, udata.y_data, linestyle='-', color="red")
axarr[2,0].scatter(x1, u1, color='black', marker='+', s=32)
axarr[2,0].scatter(x0, u0, marker='o', facecolors='none', edgecolors='blue', s=10)
axarr[0,1].set_xlim([xmin, xmax])
axarr[0,1].set_ylim([rhomin, rhomax])
axarr[0,1].plot(rhodata.x_data, rhodata.y_data, linestyle='-', color="red", label='Exact solution')
axarr[0,1].scatter(x2, rho2, marker='o', facecolors='none', edgecolors='blue', s=10, label='MFV, equal-mass')
axarr[0,1].scatter(x3, rho3, color='black', marker='+', s=32, label='MFV, unequal-mass')
axarr[0,1].legend(fontsize=10)
axarr[1,1].set_ylim([vxmin, vxmax])
axarr[1,1].plot(vxdata.x_data, vxdata.y_data, linestyle='-', color="red")
axarr[1,1].scatter(x3, vx3, color='black', marker='+', s=32)
axarr[1,1].scatter(x2, vx2, marker='o', facecolors='none', edgecolors='blue', s=10)
axarr[2,1].set_xlabel(r"$x$")
axarr[2,1].set_ylim([umin, umax])
axarr[2,1].plot(udata.x_data, udata.y_data, linestyle='-', color="red")
axarr[2,1].scatter(x3, u3, color='black', marker='+', s=32)
axarr[2,1].scatter(x2, u2, marker='o', facecolors='none', edgecolors='blue', s=10)
axarr[0,2].set_xlim([xmin, xmax])
axarr[0,2].set_ylim([rhomin, rhomax])
axarr[0,2].plot(rhodata.x_data, rhodata.y_data, linestyle='-', color="red", label='Exact solution')
axarr[0,2].scatter(x4, rho4, marker='o', facecolors='none', edgecolors='blue', s=10, label='MFM, equal-mass')
axarr[0,2].scatter(x5, rho5, color='black', marker='+', s=32, label='MFM, unequal-mass')
axarr[0,2].legend(fontsize=10)
axarr[1,2].set_ylim([vxmin, vxmax])
axarr[1,2].plot(vxdata.x_data, vxdata.y_data, linestyle='-', color="red")
axarr[1,2].scatter(x5, vx5, color='black', marker='+', s=32)
axarr[1,2].scatter(x4, vx4, marker='o', facecolors='none', edgecolors='blue', s=10)
axarr[2,2].set_xlabel(r"$x$")
axarr[2,2].set_ylim([umin, umax])
axarr[2,2].plot(udata.x_data, udata.y_data, linestyle='-', color="red")
axarr[2,2].scatter(x5, u5, color='black', marker='+', s=32)
axarr[2,2].scatter(x4, u4, marker='o', facecolors='none', edgecolors='blue', s=10)
#axarr[0,3].set_xlim([xmin, xmax])
#axarr[0,3].set_ylim([rhomin, rhomax])
#axarr[0,3].plot(rhodata.x_data, rhodata.y_data, linestyle='-', color="red", label='Exact solution')
#axarr[0,3].scatter(x6, rho6, color='black', marker='+', s=32, label='MFM-HLLC, equal-mass')
#axarr[0,3].scatter(x7, rho7, marker='o', facecolors='none', edgecolors='blue', s=10, label='MFM-HLLC, unequal-mass')
#axarr[0,3].legend(fontsize=10)
#axarr[1,3].set_ylim([vxmin, vxmax])
#axarr[1,3].plot(vxdata.x_data, vxdata.y_data, linestyle='-', color="red")
#axarr[1,3].scatter(x6, vx6, color='black', marker='+', s=32)
#axarr[1,3].scatter(x7, vx7, marker='o', facecolors='none', edgecolors='blue', s=10)
#axarr[2,3].set_xlabel(r"$x$")
#axarr[2,3].set_ylim([umin, umax])
#axarr[2,3].plot(udata.x_data, udata.y_data, linestyle='-', color="red")
#axarr[2,3].scatter(x6, u6, color='black', marker='+', s=32)
#axarr[2,3].scatter(x7, u7, marker='o', facecolors='none', edgecolors='blue', s=10)
plt.show()
fig.savefig('adsod.pdf', dpi=50)
| gpl-2.0 |
trankmichael/scikit-learn | sklearn/utils/tests/test_testing.py | 144 | 4121 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LDA()
tree = DecisionTreeClassifier()
# LDA doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
gauravbose/digital-menu | digimenu2/tests/user_commands/management/commands/hal.py | 372 | 1024 | from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = "Useless command."
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='*',
help='Specify the app label(s) to works on.')
parser.add_argument('--empty', action='store_true', dest='empty', default=False,
help="Do nothing.")
def handle(self, *app_labels, **options):
app_labels = set(app_labels)
if options['empty']:
self.stdout.write("Dave, I can't do that.")
return
if not app_labels:
raise CommandError("I'm sorry Dave, I'm afraid I can't do that.")
# raise an error if some --parameter is flowing from options to args
for app_label in app_labels:
if app_label.startswith('--'):
raise CommandError("Sorry, Dave, I can't let you do that.")
self.stdout.write("Dave, my mind is going. I can feel it. I can feel it.")
| bsd-3-clause |
googleapis/googleapis-gen | google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/enums/types/ad_group_ad_rotation_mode.py | 1 | 1211 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v6.enums',
marshal='google.ads.googleads.v6',
manifest={
'AdGroupAdRotationModeEnum',
},
)
class AdGroupAdRotationModeEnum(proto.Message):
r"""Container for enum describing possible ad rotation modes of
ads within an ad group.
"""
class AdGroupAdRotationMode(proto.Enum):
r"""The possible ad rotation modes of an ad group."""
UNSPECIFIED = 0
UNKNOWN = 1
OPTIMIZE = 2
ROTATE_FOREVER = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
kuri65536/python-for-android | python-modules/twisted/twisted/names/cache.py | 55 | 2752 | # -*- test-case-name: twisted.names.test -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
import time
from zope.interface import implements
from twisted.names import dns
from twisted.python import failure, log
from twisted.internet import interfaces, defer
import common
class CacheResolver(common.ResolverBase):
"""A resolver that serves records from a local, memory cache."""
implements(interfaces.IResolver)
cache = None
def __init__(self, cache = None, verbose = 0):
common.ResolverBase.__init__(self)
if cache is None:
cache = {}
self.cache = cache
self.verbose = verbose
self.cancel = {}
def __setstate__(self, state):
self.__dict__ = state
now = time.time()
for (k, (when, (ans, add, ns))) in self.cache.items():
diff = now - when
for rec in ans + add + ns:
if rec.ttl < diff:
del self.cache[k]
break
def __getstate__(self):
for c in self.cancel.values():
c.cancel()
self.cancel.clear()
return self.__dict__
def _lookup(self, name, cls, type, timeout):
now = time.time()
q = dns.Query(name, type, cls)
try:
when, (ans, auth, add) = self.cache[q]
except KeyError:
if self.verbose > 1:
log.msg('Cache miss for ' + repr(name))
return defer.fail(failure.Failure(dns.DomainError(name)))
else:
if self.verbose:
log.msg('Cache hit for ' + repr(name))
diff = now - when
return defer.succeed((
[dns.RRHeader(str(r.name), r.type, r.cls, r.ttl - diff, r.payload) for r in ans],
[dns.RRHeader(str(r.name), r.type, r.cls, r.ttl - diff, r.payload) for r in auth],
[dns.RRHeader(str(r.name), r.type, r.cls, r.ttl - diff, r.payload) for r in add]
))
def lookupAllRecords(self, name, timeout = None):
return defer.fail(failure.Failure(dns.DomainError(name)))
def cacheResult(self, query, payload):
if self.verbose > 1:
log.msg('Adding %r to cache' % query)
self.cache[query] = (time.time(), payload)
if self.cancel.has_key(query):
self.cancel[query].cancel()
s = list(payload[0]) + list(payload[1]) + list(payload[2])
m = s[0].ttl
for r in s:
m = min(m, r.ttl)
from twisted.internet import reactor
self.cancel[query] = reactor.callLater(m, self.clearEntry, query)
def clearEntry(self, query):
del self.cache[query]
del self.cancel[query]
| apache-2.0 |
whbruce/mraa | tests/mock/i2c_checks_read_byte_data.py | 21 | 2029 | #!/usr/bin/env python
# Author: Alex Tereschenko <alext.mkrs@gmail.com>
# Copyright (c) 2016 Alex Tereschenko.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import mraa as m
import unittest as u
from i2c_checks_shared import *
class I2cChecksReadByteData(u.TestCase):
def setUp(self):
self.i2c = m.I2c(MRAA_I2C_BUS_NUM)
def tearDown(self):
del self.i2c
def test_i2c_read_byte_data(self):
self.i2c.address(MRAA_MOCK_I2C_ADDR)
expected_res = MRAA_MOCK_I2C_DATA_INIT_BYTE
res = self.i2c.readReg(MRAA_MOCK_I2C_DATA_LEN - 1)
self.assertEqual(res, expected_res, "I2C readReg() returned unexpected data")
def test_i2c_read_byte_data_invalid_addr(self):
self.i2c.address(MRAA_MOCK_I2C_ADDR - 1)
self.assertRaises(ValueError, self.i2c.readReg, MRAA_MOCK_I2C_DATA_LEN - 1)
def test_i2c_read_byte_data_invalid_reg(self):
self.i2c.address(MRAA_MOCK_I2C_ADDR)
self.assertRaises(ValueError, self.i2c.readReg, MRAA_MOCK_I2C_DATA_LEN)
if __name__ == "__main__":
u.main()
| mit |
kinooo/Sick-Beard | cherrypy/_cpserver.py | 45 | 4782 | """Manage HTTP servers with CherryPy."""
import warnings
import cherrypy
from cherrypy.lib import attributes
# We import * because we want to export check_port
# et al as attributes of this module.
from cherrypy.process.servers import *
class Server(ServerAdapter):
"""An adapter for an HTTP server.
You can set attributes (like socket_host and socket_port)
on *this* object (which is probably cherrypy.server), and call
quickstart. For example:
cherrypy.server.socket_port = 80
cherrypy.quickstart()
"""
socket_port = 8080
_socket_host = '127.0.0.1'
def _get_socket_host(self):
return self._socket_host
def _set_socket_host(self, value):
if value == '':
raise ValueError("The empty string ('') is not an allowed value. "
"Use '0.0.0.0' instead to listen on all active "
"interfaces (INADDR_ANY).")
self._socket_host = value
socket_host = property(_get_socket_host, _set_socket_host,
doc="""The hostname or IP address on which to listen for connections.
Host values may be any IPv4 or IPv6 address, or any valid hostname.
The string 'localhost' is a synonym for '127.0.0.1' (or '::1', if
your hosts file prefers IPv6). The string '0.0.0.0' is a special
IPv4 entry meaning "any active interface" (INADDR_ANY), and '::'
is the similar IN6ADDR_ANY for IPv6. The empty string or None are
not allowed.""")
socket_file = None
socket_queue_size = 5
socket_timeout = 10
shutdown_timeout = 5
protocol_version = 'HTTP/1.1'
reverse_dns = False
thread_pool = 10
thread_pool_max = -1
max_request_header_size = 500 * 1024
max_request_body_size = 100 * 1024 * 1024
instance = None
ssl_context = None
ssl_certificate = None
ssl_certificate_chain = None
ssl_private_key = None
ssl_module = 'pyopenssl'
nodelay = True
wsgi_version = (1, 1)
def __init__(self):
self.bus = cherrypy.engine
self.httpserver = None
self.interrupt = None
self.running = False
def httpserver_from_self(self, httpserver=None):
"""Return a (httpserver, bind_addr) pair based on self attributes."""
if httpserver is None:
httpserver = self.instance
if httpserver is None:
from cherrypy import _cpwsgi_server
httpserver = _cpwsgi_server.CPWSGIServer(self)
if isinstance(httpserver, basestring):
# Is anyone using this? Can I add an arg?
httpserver = attributes(httpserver)(self)
return httpserver, self.bind_addr
def start(self):
"""Start the HTTP server."""
if not self.httpserver:
self.httpserver, self.bind_addr = self.httpserver_from_self()
ServerAdapter.start(self)
start.priority = 75
def _get_bind_addr(self):
if self.socket_file:
return self.socket_file
if self.socket_host is None and self.socket_port is None:
return None
return (self.socket_host, self.socket_port)
def _set_bind_addr(self, value):
if value is None:
self.socket_file = None
self.socket_host = None
self.socket_port = None
elif isinstance(value, basestring):
self.socket_file = value
self.socket_host = None
self.socket_port = None
else:
try:
self.socket_host, self.socket_port = value
self.socket_file = None
except ValueError:
raise ValueError("bind_addr must be a (host, port) tuple "
"(for TCP sockets) or a string (for Unix "
"domain sockets), not %r" % value)
bind_addr = property(_get_bind_addr, _set_bind_addr)
def base(self):
"""Return the base (scheme://host[:port] or sock file) for this server."""
if self.socket_file:
return self.socket_file
host = self.socket_host
if host in ('0.0.0.0', '::'):
# 0.0.0.0 is INADDR_ANY and :: is IN6ADDR_ANY.
# Look up the host name, which should be the
# safest thing to spit out in a URL.
import socket
host = socket.gethostname()
port = self.socket_port
if self.ssl_certificate:
scheme = "https"
if port != 443:
host += ":%s" % port
else:
scheme = "http"
if port != 80:
host += ":%s" % port
return "%s://%s" % (scheme, host)
| gpl-3.0 |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python2.7/encodings/cp1140.py | 593 | 13361 | """ Python Character Mapping Codec cp1140 generated from 'python-mappings/CP1140.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1140',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'\xa2' # 0x4A -> CENT SIGN
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'|' # 0x4F -> VERTICAL LINE
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'!' # 0x5A -> EXCLAMATION MARK
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'\xac' # 0x5F -> NOT SIGN
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xa6' # 0x6A -> BROKEN BAR
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
u'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\u20ac' # 0x9F -> EURO SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
u'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
u'\xae' # 0xAF -> REGISTERED SIGN
u'^' # 0xB0 -> CIRCUMFLEX ACCENT
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'[' # 0xBA -> LEFT SQUARE BRACKET
u']' # 0xBB -> RIGHT SQUARE BRACKET
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-2.0 |
tobias47n9e/mplstereonet | mplstereonet/stereonet_math.py | 2 | 21876 | """
Utilities to convert between strike/dip, etc and points/lines in lat, long
space.
A stereonet in <long,lat> coordinates:
<0,90>
***
* *
<-90,0> * *<90,0>
* *
* *
***
<0,-90>
If strike=0, plotting lines, rakes, planes or poles to planes is simple. For a
plane, it's a line of constant longitude at long=90-dip. For a line, it's a
point at long=0,lat=90-dip. For a rake, it's a point at long=90-dip,
lat=90-rake. These points can then be rotated to the proper strike. (A
rotation matrix around the X-axis is much simpler than the trig otherwise
necessary!)
All of these assume that strikes and dips follow the "right-hand-rule".
In other words, if we're facing in the direction given for the strike, the plane
dips to our right.
"""
import numpy as np
def sph2cart(lon, lat):
"""
Converts a longitude and latitude (or sequence of lons and lats) given in
_radians_ to cartesian coordinates, `x`, `y`, `z`, where x=0, y=0, z=0 is
the center of the globe.
Parameters
----------
lon : array-like
Longitude in radians
lat : array-like
Latitude in radians
Returns
-------
`x`, `y`, `z` : Arrays of cartesian coordinates
"""
x = np.cos(lat)*np.cos(lon)
y = np.cos(lat)*np.sin(lon)
z = np.sin(lat)
return x, y, z
def cart2sph(x, y, z):
"""
Converts cartesian coordinates `x`, `y`, `z` into a longitude and latitude.
x=0, y=0, z=0 is assumed to correspond to the center of the globe.
Returns lon and lat in radians.
Parameters
----------
`x`, `y`, `z` : Arrays of cartesian coordinates
Returns
-------
lon : Longitude in radians
lat : Latitude in radians
"""
r = np.sqrt(x**2 + y**2 + z**2)
lat = np.arcsin(z/r)
lon = np.arctan2(y, x)
return lon, lat
def _rotate(lon, lat, theta, axis='x'):
"""
Rotate "lon", "lat" coords (in _degrees_) about the X-axis by "theta"
degrees. This effectively simulates rotating a physical stereonet.
Returns rotated lon, lat coords in _radians_).
"""
# Convert input to numpy arrays in radians
lon, lat = np.atleast_1d(lon, lat)
lon, lat = map(np.radians, [lon, lat])
theta = np.radians(theta)
# Convert to cartesian coords for the rotation
x, y, z = sph2cart(lon, lat)
lookup = {'x':_rotate_x, 'y':_rotate_y, 'z':_rotate_z}
X, Y, Z = lookup[axis](x, y, z, theta)
# Now convert back to spherical coords (longitude and latitude, ignore R)
lon, lat = cart2sph(X,Y,Z)
return lon, lat # in radians!
def _rotate_x(x, y, z, theta):
X = x
Y = y*np.cos(theta) + z*np.sin(theta)
Z = -y*np.sin(theta) + z*np.cos(theta)
return X, Y, Z
def _rotate_y(x, y, z, theta):
X = x*np.cos(theta) + -z*np.sin(theta)
Y = y
Z = x*np.sin(theta) + z*np.cos(theta)
return X, Y, Z
def _rotate_z(x, y, z, theta):
X = x*np.cos(theta) + -y*np.sin(theta)
Y = x*np.sin(theta) + y*np.cos(theta)
Z = z
return X, Y, Z
def antipode(lon, lat):
"""
Calculates the antipode (opposite point on the globe) of the given point or
points. Input and output is expected to be in radians.
Parameters
----------
lon : number or sequence of numbers
Longitude in radians
lat : number or sequence of numbers
Latitude in radians
Returns
-------
lon, lat : arrays
Sequences (regardless of whether or not the input was a single value or
a sequence) of longitude and latitude in radians.
"""
x, y, z = sph2cart(lon, lat)
return cart2sph(-x, -y, -z)
def plane(strike, dip, segments=100):
"""
Calculates the longitude and latitude of `segments` points along the
stereonet projection of each plane with a given `strike` and `dip` in
degrees.
Parameters
----------
strike : number or sequence of numbers
The strike of the plane(s) in degrees, with dip direction indicated by
the azimuth (e.g. 315 vs. 135) specified following the "right hand
rule".
dip : number or sequence of numbers
The dip of the plane(s) in degrees.
segments : number or sequence of numbers
The number of points in the returned `lon` and `lat` arrays. Defaults
to 100 segments.
Returns
-------
lon, lat : arrays
`num_segments` x `num_strikes` arrays of longitude and latitude in
radians.
"""
strikes, dips = np.atleast_1d(strike, dip)
lons = np.zeros((segments, strikes.size), dtype=np.float)
lats = lons.copy()
for i, (strike, dip) in enumerate(zip(strikes, dips)):
# We just plot a line of constant longitude and rotate it by the strike.
dip = 90 - dip
lon = dip * np.ones(segments)
lat = np.linspace(-90, 90, segments)
lon, lat = _rotate(lon, lat, strike)
lons[:,i] = lon
lats[:,i] = lat
return lons, lats
def pole(strike, dip):
"""
Calculates the longitude and latitude of the pole(s) to the plane(s)
specified by `strike` and `dip`, given in degrees.
Parameters
----------
strike : number or sequence of numbers
The strike of the plane(s) in degrees, with dip direction indicated by
the azimuth (e.g. 315 vs. 135) specified following the "right hand
rule".
dip : number or sequence of numbers
The dip of the plane(s) in degrees.
Returns
-------
lon, lat : Arrays of longitude and latitude in radians.
"""
strike, dip = np.atleast_1d(strike, dip)
mask = dip > 90
dip[mask] = 180 - dip[mask]
strike[mask] += 180
# Plot the approriate point for a strike of 0 and rotate it
lon, lat = -dip, 0.0
lon, lat = _rotate(lon, lat, strike)
return lon, lat
def rake(strike, dip, rake_angle):
"""
Calculates the longitude and latitude of the linear feature(s) specified by
`strike`, `dip`, and `rake_angle`.
Parameters
----------
strike : number or sequence of numbers
The strike of the plane(s) in degrees, with dip direction indicated by
the azimuth (e.g. 315 vs. 135) specified following the "right hand
rule".
dip : number or sequence of numbers
The dip of the plane(s) in degrees.
rake_angle : number or sequence of numbers
The angle of the lineation on the plane measured in degrees downward
from horizontal. Zero degrees corresponds to the "right- hand"
direction indicated by the strike, while 180 degrees or a negative
angle corresponds to the opposite direction.
Returns
-------
lon, lat : Arrays of longitude and latitude in radians.
"""
strike, dip, rake_angle = np.atleast_1d(strike, dip, rake_angle)
# Plot the approriate point for a strike of 0 and rotate it
dip = 90 - dip
lon = dip
rake_angle = rake_angle.copy()
rake_angle[rake_angle < 0] += 180
lat = 90 - rake_angle
lon, lat = _rotate(lon, lat, strike)
return lon, lat
def line(plunge, bearing):
"""
Calculates the longitude and latitude of the linear feature(s) specified by
`plunge` and `bearing`.
Parameters
----------
plunge : number or sequence of numbers
The plunge of the line(s) in degrees. The plunge is measured in degrees
downward from the end of the feature specified by the bearing.
bearing : number or sequence of numbers
The bearing (azimuth) of the line(s) in degrees.
Returns
-------
lon, lat : Arrays of longitude and latitude in radians.
"""
plunge, bearing = np.atleast_1d(plunge, bearing)
# Plot the approriate point for a bearing of 0 and rotate it
lat = 90 - plunge
lon = 0
lon, lat = _rotate(lon, lat, bearing)
return lon, lat
def cone(plunge, bearing, angle, segments=100):
"""
Calculates the longitude and latitude of the small circle (i.e. a cone)
centered at the given *plunge* and *bearing* with an apical angle of
*angle*, all in degrees.
Parameters
----------
plunge : number or sequence of numbers
The plunge of the center of the cone(s) in degrees. The plunge is
measured in degrees downward from the end of the feature specified by
the bearing.
bearing : number or sequence of numbers
The bearing (azimuth) of the center of the cone(s) in degrees.
angle : number or sequence of numbers
The apical angle (i.e. radius) of the cone(s) in degrees.
segments : int, optional
The number of vertices in the small circle.
Returns
-------
lon, lat : arrays
`num_measurements` x `num_segments` arrays of longitude and latitude in
radians.
"""
plunges, bearings, angles = np.atleast_1d(plunge, bearing, angle)
lons, lats = [], []
for plunge, bearing, angle in zip(plunges, bearings, angles):
lat = (90 - angle) * np.ones(segments, dtype=float)
lon = np.linspace(-180, 180, segments)
lon, lat = _rotate(lon, lat, -plunge, axis='y')
lon, lat = _rotate(np.degrees(lon), np.degrees(lat), bearing, axis='x')
lons.append(lon)
lats.append(lat)
return np.vstack(lons), np.vstack(lats)
def plunge_bearing2pole(plunge, bearing):
"""
Converts the given `plunge` and `bearing` in degrees to a strike and dip
of the plane whose pole would be parallel to the line specified. (i.e. The
pole to the plane returned would plot at the same point as the specified
plunge and bearing.)
Parameters
----------
plunge : number or sequence of numbers
The plunge of the line(s) in degrees. The plunge is measured in degrees
downward from the end of the feature specified by the bearing.
bearing : number or sequence of numbers
The bearing (azimuth) of the line(s) in degrees.
Returns
-------
strike, dip : arrays
Arrays of strikes and dips in degrees following the right-hand-rule.
"""
plunge, bearing = np.atleast_1d(plunge, bearing)
strike = bearing + 90
dip = 90 - plunge
strike[strike >= 360] -= 360
return strike, dip
def pole2plunge_bearing(strike, dip):
"""
Converts the given *strike* and *dip* in dgrees of a plane(s) to a plunge
and bearing of its pole.
Parameters
----------
strike : number or sequence of numbers
The strike of the plane(s) in degrees, with dip direction indicated by
the azimuth (e.g. 315 vs. 135) specified following the "right hand
rule".
dip : number or sequence of numbers
The dip of the plane(s) in degrees.
Returns
-------
plunge, bearing : arrays
Arrays of plunges and bearings of the pole to the plane(s) in degrees.
"""
strike, dip = np.atleast_1d(strike, dip)
bearing = strike - 90
plunge = 90 - dip
bearing[bearing < 0] += 360
return plunge, bearing
def mean_vector(lons, lats):
"""
Returns the resultant vector from a series of longitudes and latitudes
Parameters
----------
lons : array-like
A sequence of longitudes (in radians)
lats : array-like
A sequence of latitudes (in radians)
Returns
-------
mean_vec : tuple
(lon, lat) in radians
r_value : number
The magnitude of the resultant vector (between 0 and 1) This represents
the degree of clustering in the data.
"""
xyz = sph2cart(lons, lats)
xyz = np.vstack(xyz).T
mean_vec = xyz.mean(axis=0)
r_value = np.linalg.norm(mean_vec)
mean_vec = cart2sph(*mean_vec)
return mean_vec, r_value
def fisher_stats(lons, lats, conf=95):
"""
Returns the resultant vector from a series of longitudes and latitudes. If
a confidence is set the function additionally returns the opening angle
of the confidence small circle (Fisher, 19..) and the dispersion factor
(kappa).
Parameters
----------
lons : array-like
A sequence of longitudes (in radians)
lats : array-like
A sequence of latitudes (in radians)
conf : confidence value
The confidence used for the calculation (float). Defaults to None.
Returns
-------
mean vector: tuple
The point that lies in the center of a set of vectors.
(Longitude, Latitude) in radians.
If 1 vector is passed to the function it returns two None-values. For
more than one vector the following 3 values are returned as a tuple:
r_value: float
The magnitude of the resultant vector (between 0 and 1) This represents
the degree of clustering in the data.
angle: float
The opening angle of the small circle that corresponds to confidence
of the calculated direction.
kappa: float
A measure for the amount of dispersion of a group of layers. For
one vector the factor is undefined. Approaches infinity for nearly
parallel vectors and zero for highly dispersed vectors.
"""
xyz = sph2cart(lons, lats)
xyz = np.vstack(xyz).T
mean_vec = xyz.mean(axis=0)
r_value = np.linalg.norm(mean_vec)
num = xyz.shape[0]
mean_vec = cart2sph(*mean_vec)
if num > 1:
p = (100 - conf) / 100
vector_sum = xyz.sum(axis=0)
result_vect = np.sqrt(np.sum(np.square(vector_sum)))
fract1 = (num - result_vect) / result_vect
fract3 = 1 / (num - 1)
angle = np.arccos(1 - fract1 * ((1 / p) ** fract3 - 1))
angle = np.degrees(angle)
kappa = (num - 1) / (num - result_vect)
return mean_vec, (r_value, angle, kappa)
else:
return None, None
def geographic2pole(lon, lat):
"""
Converts a longitude and latitude (from a stereonet) into the strike and dip
of the plane whose pole lies at the given longitude(s) and latitude(s).
Parameters
----------
lon : array-like
A sequence of longitudes (or a single longitude) in radians
lat : array-like
A sequence of latitudes (or a single latitude) in radians
Returns
-------
strike : array
A sequence of strikes in degrees
dip : array
A sequence of dips in degrees
"""
plunge, bearing = geographic2plunge_bearing(lon, lat)
strike = bearing + 90
strike[strike >= 360] -= 360
dip = 90 - plunge
return strike, dip
def geographic2plunge_bearing(lon, lat):
"""
Converts longitude and latitude in stereonet coordinates into a
plunge/bearing.
Parameters
----------
lon, lat : numbers or sequences of numbers
Longitudes and latitudes in radians as measured from a
lower-hemisphere stereonet
Returns
-------
plunge : array
The plunge of the vector in degrees downward from horizontal.
bearing : array
The bearing of the vector in degrees clockwise from north.
"""
lon, lat = np.atleast_1d(lon, lat)
x, y, z = sph2cart(lon, lat)
# Bearing will be in the y-z plane...
bearing = np.arctan2(z, y)
# Plunge is the angle between the line and the y-z plane
r = np.sqrt(x*x + y*y + z*z)
r[r == 0] = 1e-15
plunge = np.arcsin(x / r)
# Convert back to azimuths in degrees..
plunge, bearing = np.degrees(plunge), np.degrees(bearing)
bearing = 90 - bearing
bearing[bearing < 0] += 360
# If the plunge angle is upwards, get the opposite end of the line
upwards = plunge < 0
plunge[upwards] *= -1
bearing[upwards] -= 180
bearing[upwards & (bearing < 0)] += 360
return plunge, bearing
def plane_intersection(strike1, dip1, strike2, dip2):
"""
Finds the intersection of two planes. Returns a plunge/bearing of the linear
intersection of the two planes.
Also accepts sequences of strike1s, dip1s, strike2s, dip2s.
Parameters
----------
strike1, dip1 : numbers or sequences of numbers
The strike and dip (in degrees, following the right-hand-rule) of the
first plane(s).
strike2, dip2 : numbers or sequences of numbers
The strike and dip (in degrees, following the right-hand-rule) of the
second plane(s).
Returns
-------
plunge, bearing : arrays
The plunge and bearing(s) (in degrees) of the line representing the
intersection of the two planes.
"""
norm1 = sph2cart(*pole(strike1, dip1))
norm2 = sph2cart(*pole(strike2, dip2))
norm1, norm2 = np.array(norm1), np.array(norm2)
lon, lat = cart2sph(*np.cross(norm1, norm2, axis=0))
return geographic2plunge_bearing(lon, lat)
def project_onto_plane(strike, dip, plunge, bearing):
"""
Projects a linear feature(s) onto the surface of a plane. Returns a rake
angle(s) along the plane.
This is also useful for finding the rake angle of a feature that already
intersects the plane in question.
Parameters
----------
strike, dip : numbers or sequences of numbers
The strike and dip (in degrees, following the right-hand-rule) of the
plane(s).
plunge, bearing : numbers or sequences of numbers
The plunge and bearing (in degrees) or of the linear feature(s) to be
projected onto the plane.
Returns
-------
rake : array
A sequence of rake angles measured downwards from horizontal in
degrees. Zero degrees corresponds to the "right- hand" direction
indicated by the strike, while a negative angle corresponds to the
opposite direction. Rakes returned by this function will always be
between -90 and 90 (inclusive).
"""
# Project the line onto the plane
norm = sph2cart(*pole(strike, dip))
feature = sph2cart(*line(plunge, bearing))
norm, feature = np.array(norm), np.array(feature)
perp = np.cross(norm, feature, axis=0)
on_plane = np.cross(perp, norm, axis=0)
on_plane /= np.sqrt(np.sum(on_plane**2, axis=0))
# Calculate the angle between the projected feature and horizontal
# This is just a dot product, but we need to work with multiple measurements
# at once, so einsum is quicker than apply_along_axis.
strike_vec = sph2cart(*line(0, strike))
dot = np.einsum('ij,ij->j', on_plane, strike_vec)
rake = np.degrees(np.arccos(dot))
# Convert rakes over 90 to negative rakes...
rake[rake > 90] -= 180
rake[rake < -90] += 180
return rake
def azimuth2rake(strike, dip, azimuth):
"""
Projects an azimuth of a linear feature onto a plane as a rake angle.
Parameters
----------
strike, dip : numbers
The strike and dip of the plane in degrees following the
right-hand-rule.
azimuth : numbers
The azimuth of the linear feature in degrees clockwise from north (i.e.
a 0-360 azimuth).
Returns
-------
rake : number
A rake angle in degrees measured downwards from horizontal. Negative
values correspond to the opposite end of the strike.
"""
plunge, bearing = plane_intersection(strike, dip, azimuth, 90)
rake = project_onto_plane(strike, dip, plunge, bearing)
return rake
def xyz2stereonet(x, y, z):
"""
Converts x, y, z in _world_ cartesian coordinates into lower-hemisphere
stereonet coordinates.
Parameters
----------
x, y, z : array-likes
Sequences of world coordinates
Returns
-------
lon, lat : arrays
Sequences of longitudes and latitudes (in radians)
"""
x, y, z = np.atleast_1d(x, y, z)
return cart2sph(-z, x, y)
def stereonet2xyz(lon, lat):
"""
Converts a sequence of longitudes and latitudes from a lower-hemisphere
stereonet into _world_ x,y,z coordinates.
Parameters
----------
lon, lat : array-likes
Sequences of longitudes and latitudes (in radians) from a
lower-hemisphere stereonet
Returns
-------
x, y, z : arrays
The world x,y,z components of the vectors represented by the lon, lat
coordinates on the stereonet.
"""
lon, lat = np.atleast_1d(lon, lat)
x, y, z = sph2cart(lon, lat)
return y, z, -x
def vector2plunge_bearing(x, y, z):
"""
Converts a vector or series of vectors given as x, y, z in world
coordinates into plunge/bearings.
Parameters
----------
x : number or sequence of numbers
The x-component(s) of the normal vector
y : number or sequence of numbers
The y-component(s) of the normal vector
z : number or sequence of numbers
The z-component(s) of the normal vector
Returns
-------
plunge : array
The plunge of the vector in degrees downward from horizontal.
bearing : array
The bearing of the vector in degrees clockwise from north.
"""
return geographic2plunge_bearing(*xyz2stereonet(x,y,z))
def vector2pole(x, y, z):
"""
Converts a vector or series of vectors given as x, y, z in world
coordinates into the strike/dip of the planes whose normal vectors are
parallel to the specified vectors. (In other words, each xi,yi,zi is
treated as a normal vector and this returns the strike/dip of the
corresponding plane.)
Parameters
----------
x : number or sequence of numbers
The x-component(s) of the normal vector
y : number or sequence of numbers
The y-component(s) of the normal vector
z : number or sequence of numbers
The z-component(s) of the normal vector
Returns
-------
strike : array
The strike of the plane, in degrees clockwise from north. Dip
direction is indicated by the "right hand rule".
dip : array
The dip of the plane, in degrees downward from horizontal.
"""
return geographic2pole(*xyz2stereonet(x, y, z))
| mit |
nightjean/Deep-Learning | tensorflow/contrib/keras/python/keras/utils/layer_utils.py | 29 | 8220 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to Keras layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.utils.conv_utils import convert_kernel
def print_summary(model, line_length=None, positions=None):
"""Prints a summary of a model.
Arguments:
model: Keras model instance.
line_length: total length of printed lines
positions: relative or absolute positions of log elements in each line.
If not provided, defaults to `[.33, .55, .67, 1.]`.
"""
if model.__class__.__name__ == 'Sequential':
sequential_like = True
else:
sequential_like = True
for v in model.nodes_by_depth.values():
if len(v) > 1:
sequential_like = False
if sequential_like:
line_length = line_length or 65
positions = positions or [.45, .85, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #']
else:
line_length = line_length or 100
positions = positions or [.33, .55, .67, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Connected to']
relevant_nodes = []
for v in model.nodes_by_depth.values():
relevant_nodes += v
def print_row(fields, positions):
line = ''
for i in range(len(fields)):
if i > 0:
line = line[:-1] + ' '
line += str(fields[i])
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print(line)
print('_' * line_length)
print_row(to_display, positions)
print('=' * line_length)
def print_layer_summary(layer):
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
name = layer.name
cls_name = layer.__class__.__name__
fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params()]
print_row(fields, positions)
def print_layer_summary_with_connections(layer):
"""Prints a summary for a single layer.
Arguments:
layer: target layer.
"""
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
connections = []
for node_index, node in enumerate(layer.inbound_nodes):
if relevant_nodes:
node_key = layer.name + '_ib-' + str(node_index)
if node_key not in relevant_nodes:
# node is node part of the current network
continue
for i in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[i].name
inbound_node_index = node.node_indices[i]
inbound_tensor_index = node.tensor_indices[i]
connections.append(inbound_layer + '[' + str(inbound_node_index) + ']['
+ str(inbound_tensor_index) + ']')
name = layer.name
cls_name = layer.__class__.__name__
if not connections:
first_connection = ''
else:
first_connection = connections[0]
fields = [
name + ' (' + cls_name + ')', output_shape, layer.count_params(),
first_connection
]
print_row(fields, positions)
if len(connections) > 1:
for i in range(1, len(connections)):
fields = ['', '', '', connections[i]]
print_row(fields, positions)
layers = model.layers
for i in range(len(layers)):
if sequential_like:
print_layer_summary(layers[i])
else:
print_layer_summary_with_connections(layers[i])
if i == len(layers) - 1:
print('=' * line_length)
else:
print('_' * line_length)
trainable_count, non_trainable_count = count_total_params(
layers, layer_set=None)
print('Total params: {:,}'.format(trainable_count + non_trainable_count))
print('Trainable params: {:,}'.format(trainable_count))
print('Non-trainable params: {:,}'.format(non_trainable_count))
print('_' * line_length)
def count_total_params(layers, layer_set=None):
"""Counts the number of parameters in a list of layers.
Arguments:
layers: list of layers.
layer_set: set of layers already seen
(so that we don't count their weights twice).
Returns:
A tuple (count of trainable weights, count of non-trainable weights.)
"""
if layer_set is None:
layer_set = set()
trainable_count = 0
non_trainable_count = 0
for layer in layers:
if layer in layer_set:
continue
layer_set.add(layer)
if hasattr(layer, 'layers'):
t, nt = count_total_params(layer.layers, layer_set)
trainable_count += t
non_trainable_count += nt
else:
trainable_count += np.sum(
[K.count_params(p) for p in layer.trainable_weights])
non_trainable_count += np.sum(
[K.count_params(p) for p in layer.non_trainable_weights])
return int(trainable_count), int(non_trainable_count)
def convert_all_kernels_in_model(model):
"""Converts all convolution kernels in a model from Theano to TensorFlow.
Also works from TensorFlow to Theano.
Arguments:
model: target model for the conversion.
"""
# Note: SeparableConvolution not included
# since only supported by TF.
conv_classes = {
'Conv1D',
'Conv2D',
'Conv3D',
'Conv2DTranspose',
}
to_assign = []
for layer in model.layers:
if layer.__class__.__name__ in conv_classes:
original_kernel = K.get_value(layer.kernel)
converted_kernel = convert_kernel(original_kernel)
to_assign.append((layer.kernel, converted_kernel))
K.batch_set_value(to_assign)
def convert_dense_weights_data_format(dense,
previous_feature_map_shape,
target_data_format='channels_first'):
"""Utility useful when changing a convnet's `data_format`.
When porting the weights of a convnet from one data format to the other,
if the convnet includes a `Flatten` layer
(applied to the last convolutional feature map)
followed by a `Dense` layer, the weights of that `Dense` layer
should be updated to reflect the new dimension ordering.
Arguments:
dense: The target `Dense` layer.
previous_feature_map_shape: A shape tuple of 3 integers,
e.g. `(512, 7, 7)`. The shape of the convolutional
feature map right before the `Flatten` layer that
came before the target `Dense` layer.
target_data_format: One of "channels_last", "channels_first".
Set it "channels_last"
if converting a "chnnels_first" model to "channels_last",
or reciprocally.
"""
assert target_data_format in {'channels_last', 'channels_first'}
kernel, bias = dense.get_weights()
for i in range(kernel.shape[1]):
if target_data_format == 'channels_first':
c, h, w = previous_feature_map_shape
original_fm_shape = (h, w, c)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (2, 0, 1)) # last -> first
else:
h, w, c = previous_feature_map_shape
original_fm_shape = (c, h, w)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (1, 2, 0)) # first -> last
kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))
dense.set_weights([kernel, bias])
| apache-2.0 |
sdickreuter/python-pistage | PIStage/Controllers/E545.py | 1 | 5348 | __author__ = 'sei'
from PIStage._base import Controller
import math
class E545(Controller):
def __init__(self, ip=None, port=None, coordinate_mapping = None, z_correction_angle = None):
#super(E545, self).__init__()
super().__init__(ip=ip,port=port,coordinate_mapping=coordinate_mapping,z_correction_angle=z_correction_angle)
self._sock.send(bytes('ONL 1 1 2 1 3 1\n','UTF-8'))
self._sock.send(bytes('SVO A 1 B 1 C 1\n','UTF-8'))
self._sock.send(bytes('DCO A 1 B 1 C 1\n','UTF-8'))
print('E545 initialized')
print('All Channels in Online Mode, Servo Control on, Drift Compensation on')
#self.moveabs(10, 10, 10)
self.query_pos()
#self._x.value, self._y.value, self._z.value = self.query_pos()
print('Position: ' + str(self._x.value) + " " + str(self._y.value) + " " + str(self._z.value))
def query_pos(self):
self._lock.acquire()
try:
self._sock.send(bytes("POS?\n",'UTF-8'))
pos = self._sock.recv(self._buffer_size)
# self._sock.send("ERR?\n")
# print self._sock.recv(self._buffer_size)
except:
self._sock.close()
pos = None
raise RuntimeError('Lost Connection to Controller')
pos = str(pos,'UTF-8')
pos = pos.split("\n")
self._lock.release()
self._x.value = float(pos[0][2:12])
self._y.value = float(pos[1][2:12])
self._z.value = float(pos[2][2:12])
return self._x.value, self._y.value, self._z.value
def moveabs(self, x=None, y=None, z=None):
xbuf, ybuf, zbuf = self.last_pos()
dx = None
dy = None
dz = None
if x is not None:
dx = x - xbuf
if y is not None:
dy = y - ybuf
if z is not None:
dz = z - zbuf
self.moverel(dx,dy,dz)
# if z is not None:
# if y is None:
# xbuf, y, zbuf = self.last_pos()
# else:
# xbuf, ybuf, zbuf = self.last_pos()
# dz = z - zbuf
# dy = dz*math.cos( math.pi * (90-self.z_correction_angle)/180 )
# y = y + dy
#
# x,y,z = self.map_coordinates(x,y,z)
# com = 'MOV '
# if x is not None:
# if (x > 0) & (x < 200):
# com += 'A ' + str(round(x, 4))
# self._x.value = x
# if y is not None:
# if (y > 0) & (y < 200):
# if len(com) > 4:
# com += ' '
# com += 'B ' + str(round(y, 4))
# self._y.value = y
# if z is not None:
# if (z > 0) & (z < 200):
# if len(com) > 4:
# com += ' '
# com += 'C ' + str(round(z, 4))
# self._z.value = z
# if len(com) > 4:
# self._lock.acquire()
# try:
# self._sock.send(bytes(com + "\n",'UTF-8'))
# except:
# self._sock.close()
# RuntimeError('Lost Connection to Controller')
# finally:
# self._lock.release()
#self.query_pos()
def moverel(self, dx=None, dy=None, dz=None):
if dz is not None:
if dy is None:
dy = dz*math.cos( math.pi * (90-self.z_correction_angle)/180 )
else:
dy += dz * math.cos(math.pi * (90 - self.z_correction_angle) / 180)
dx,dy,dz = self.map_coordinates(dx,dy,dz)
#com = 'MVR '
com = 'MOV '
if dx is not None:
if ((self._x.value + dx) > 0) & ((self._x.value + dx) < 200):
#com += 'A ' + str(round(dx, 4))
self._x.value += dx
com += 'A ' + str(round(self._x.value, 4))
if dy is not None:
if ((self._y.value + dy) > 0) & ((self._y.value + dy) < 200):
if len(com) > 4:
com += ' '
#com += 'B ' + str(round(dy, 4))
self._y.value += dy
com += 'B ' + str(round(self._y.value, 4))
if dz is not None:
if ((self._z.value + dz) > 0) & ((self._z.value + dz) < 200):
if len(com) > 4:
com += ' '
#com += 'C ' + str(round(dz, 4))
self._z.value += dz
com += 'C ' + str(round(self._z.value, 4))
if len(com) > 4:
self._lock.acquire()
try:
self._sock.send(bytes(com + "\n",'UTF-8'))
except:
self._sock.close()
RuntimeError('Lost Connection to Controller')
self._lock.release()
#self.query_pos()
def home(self):
"""
homes all axes of the stage
:return: returns counter values after homing
"""
self._lock.acquire()
try:
self._sock.send(bytes("GOH\n",'UTF-8'))
self.query_pos()
except:
self._sock.close()
RuntimeError('Lost Connection to Controller')
self._lock.release()
# Unit test code
if __name__ == '__main__':
stage = None
stage = E545()
while True:
stage.query_pos()
print(stage.last_pos()) | mit |
dweinstein/finsky | finsky/protos/billing_profile_protos_pb2.py | 2 | 10326 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: billing_profile_protos.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import common_pb2 as common__pb2
import common_device_pb2 as common__device__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='billing_profile_protos.proto',
package='BillingProfileProtos',
syntax='proto2',
serialized_pb=_b('\n\x1c\x62illing_profile_protos.proto\x12\x14\x42illingProfileProtos\x1a\x0c\x63ommon.proto\x1a\x13\x63ommon_device.proto\"\x84\x02\n\x0e\x42illingProfile\x12,\n\ninstrument\x18\x01 \x03(\x0b\x32\x18.CommonDevice.Instrument\x12$\n\x1cselectedExternalInstrumentId\x18\x02 \x01(\t\x12H\n\x14\x62illingProfileOption\x18\x03 \x03(\x0b\x32*.BillingProfileProtos.BillingProfileOption\x12%\n\x1dpaymentsIntegratorCommonToken\x18\x06 \x01(\x0c\x12-\n\x16remindMeLaterIconImage\x18\x07 \x01(\x0b\x32\r.Common.Image\"\x8f\x03\n\x14\x42illingProfileOption\x12\x0c\n\x04type\x18\x01 \x01(\x05\x12\x14\n\x0c\x64isplayTitle\x18\x02 \x01(\t\x12\x1c\n\x14\x65xternalInstrumentId\x18\x03 \x01(\t\x12*\n\ttopupInfo\x18\x04 \x01(\x0b\x32\x17.CommonDevice.TopupInfo\x12T\n\x1e\x63\x61rrierBillingInstrumentStatus\x18\x05 \x01(\x0b\x32,.CommonDevice.CarrierBillingInstrumentStatus\x12:\n\x11genericInstrument\x18\x06 \x01(\x0b\x32\x1f.CommonDevice.GenericInstrument\x12)\n!paymentsIntegratorInstrumentToken\x18\x07 \x01(\x0c\x12 \n\ticonImage\x18\x08 \x01(\x0b\x32\r.Common.Image\x12\x18\n\x10serverLogsCookie\x18\t \x01(\x0c\x12\x10\n\x08typeName\x18\n \x01(\tB8\n com.google.android.finsky.protosB\x14\x42illingProfileProtos')
,
dependencies=[common__pb2.DESCRIPTOR,common__device__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_BILLINGPROFILE = _descriptor.Descriptor(
name='BillingProfile',
full_name='BillingProfileProtos.BillingProfile',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instrument', full_name='BillingProfileProtos.BillingProfile.instrument', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='selectedExternalInstrumentId', full_name='BillingProfileProtos.BillingProfile.selectedExternalInstrumentId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='billingProfileOption', full_name='BillingProfileProtos.BillingProfile.billingProfileOption', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='paymentsIntegratorCommonToken', full_name='BillingProfileProtos.BillingProfile.paymentsIntegratorCommonToken', index=3,
number=6, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='remindMeLaterIconImage', full_name='BillingProfileProtos.BillingProfile.remindMeLaterIconImage', index=4,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=90,
serialized_end=350,
)
_BILLINGPROFILEOPTION = _descriptor.Descriptor(
name='BillingProfileOption',
full_name='BillingProfileProtos.BillingProfileOption',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='BillingProfileProtos.BillingProfileOption.type', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='displayTitle', full_name='BillingProfileProtos.BillingProfileOption.displayTitle', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='externalInstrumentId', full_name='BillingProfileProtos.BillingProfileOption.externalInstrumentId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='topupInfo', full_name='BillingProfileProtos.BillingProfileOption.topupInfo', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='carrierBillingInstrumentStatus', full_name='BillingProfileProtos.BillingProfileOption.carrierBillingInstrumentStatus', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='genericInstrument', full_name='BillingProfileProtos.BillingProfileOption.genericInstrument', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='paymentsIntegratorInstrumentToken', full_name='BillingProfileProtos.BillingProfileOption.paymentsIntegratorInstrumentToken', index=6,
number=7, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='iconImage', full_name='BillingProfileProtos.BillingProfileOption.iconImage', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='serverLogsCookie', full_name='BillingProfileProtos.BillingProfileOption.serverLogsCookie', index=8,
number=9, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='typeName', full_name='BillingProfileProtos.BillingProfileOption.typeName', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=353,
serialized_end=752,
)
_BILLINGPROFILE.fields_by_name['instrument'].message_type = common__device__pb2._INSTRUMENT
_BILLINGPROFILE.fields_by_name['billingProfileOption'].message_type = _BILLINGPROFILEOPTION
_BILLINGPROFILE.fields_by_name['remindMeLaterIconImage'].message_type = common__pb2._IMAGE
_BILLINGPROFILEOPTION.fields_by_name['topupInfo'].message_type = common__device__pb2._TOPUPINFO
_BILLINGPROFILEOPTION.fields_by_name['carrierBillingInstrumentStatus'].message_type = common__device__pb2._CARRIERBILLINGINSTRUMENTSTATUS
_BILLINGPROFILEOPTION.fields_by_name['genericInstrument'].message_type = common__device__pb2._GENERICINSTRUMENT
_BILLINGPROFILEOPTION.fields_by_name['iconImage'].message_type = common__pb2._IMAGE
DESCRIPTOR.message_types_by_name['BillingProfile'] = _BILLINGPROFILE
DESCRIPTOR.message_types_by_name['BillingProfileOption'] = _BILLINGPROFILEOPTION
BillingProfile = _reflection.GeneratedProtocolMessageType('BillingProfile', (_message.Message,), dict(
DESCRIPTOR = _BILLINGPROFILE,
__module__ = 'billing_profile_protos_pb2'
# @@protoc_insertion_point(class_scope:BillingProfileProtos.BillingProfile)
))
_sym_db.RegisterMessage(BillingProfile)
BillingProfileOption = _reflection.GeneratedProtocolMessageType('BillingProfileOption', (_message.Message,), dict(
DESCRIPTOR = _BILLINGPROFILEOPTION,
__module__ = 'billing_profile_protos_pb2'
# @@protoc_insertion_point(class_scope:BillingProfileProtos.BillingProfileOption)
))
_sym_db.RegisterMessage(BillingProfileOption)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n com.google.android.finsky.protosB\024BillingProfileProtos'))
# @@protoc_insertion_point(module_scope)
| mit |
justinpotts/mozillians | mozillians/users/migrations/0032_auto__add_field_userprofile_date_mozillian.py | 3 | 11020 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.date_mozillian'
db.add_column('profile', 'date_mozillian', self.gf('django.db.models.fields.DateField')(default=None, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.date_mozillian'
db.delete_column('profile', 'date_mozillian')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 7, 18, 6, 27, 26, 93145)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 7, 18, 6, 27, 26, 93072)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'groups.group': {
'Meta': {'object_name': 'Group', 'db_table': "'group'"},
'always_auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irc_channel': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'steward': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.UserProfile']", 'null': 'True', 'blank': 'True'}),
'system': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'url': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'wiki': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
},
'groups.language': {
'Meta': {'object_name': 'Language'},
'always_auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'url': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'})
},
'groups.skill': {
'Meta': {'object_name': 'Skill'},
'always_auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'url': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'})
},
'users.usernameblacklist': {
'Meta': {'ordering': "['value']", 'object_name': 'UsernameBlacklist'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_regex': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'users.userprofile': {
'Meta': {'ordering': "['full_name']", 'object_name': 'UserProfile', 'db_table': "'profile'"},
'allows_community_sites': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allows_mozilla_sites': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'basket_token': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'date_mozillian': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'date_vouched': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'members'", 'blank': 'True', 'to': "orm['groups.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ircname': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'is_vouched': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'members'", 'blank': 'True', 'to': "orm['groups.Language']"}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'photo': ('sorl.thumbnail.fields.ImageField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'privacy_bio': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3'}),
'privacy_city': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3'}),
'privacy_country': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3'}),
'privacy_email': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3'}),
'privacy_full_name': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3'}),
'privacy_groups': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3'}),
'privacy_ircname': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3'}),
'privacy_languages': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3'}),
'privacy_photo': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3'}),
'privacy_region': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3'}),
'privacy_skills': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3'}),
'privacy_vouched_by': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3'}),
'privacy_website': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'skills': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'members'", 'blank': 'True', 'to': "orm['groups.Skill']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'vouched_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'vouchees'", 'null': 'True', 'blank': 'True', 'to': "orm['users.UserProfile']"}),
'website': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['users']
| bsd-3-clause |
Huskerboy/startbootstrap-freelancer | freelancer_env/Lib/site-packages/werkzeug/wsgi.py | 85 | 42838 | # -*- coding: utf-8 -*-
"""
werkzeug.wsgi
~~~~~~~~~~~~~
This module implements WSGI related helpers.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import posixpath
import mimetypes
from itertools import chain
from zlib import adler32
from time import time, mktime
from datetime import datetime
from functools import partial, update_wrapper
from werkzeug._compat import iteritems, text_type, string_types, \
implements_iterator, make_literal_wrapper, to_unicode, to_bytes, \
wsgi_get_bytes, try_coerce_native, PY2, BytesIO
from werkzeug._internal import _empty_stream, _encode_idna
from werkzeug.http import is_resource_modified, http_date
from werkzeug.urls import uri_to_iri, url_quote, url_parse, url_join
from werkzeug.filesystem import get_filesystem_encoding
def responder(f):
"""Marks a function as responder. Decorate a function with it and it
will automatically call the return value as WSGI application.
Example::
@responder
def application(environ, start_response):
return Response('Hello World!')
"""
return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
def get_current_url(environ, root_only=False, strip_querystring=False,
host_only=False, trusted_hosts=None):
"""A handy helper function that recreates the full URL as IRI for the
current request or parts of it. Here's an example:
>>> from werkzeug.test import create_environ
>>> env = create_environ("/?param=foo", "http://localhost/script")
>>> get_current_url(env)
'http://localhost/script/?param=foo'
>>> get_current_url(env, root_only=True)
'http://localhost/script/'
>>> get_current_url(env, host_only=True)
'http://localhost/'
>>> get_current_url(env, strip_querystring=True)
'http://localhost/script/'
This optionally it verifies that the host is in a list of trusted hosts.
If the host is not in there it will raise a
:exc:`~werkzeug.exceptions.SecurityError`.
Note that the string returned might contain unicode characters as the
representation is an IRI not an URI. If you need an ASCII only
representation you can use the :func:`~werkzeug.urls.iri_to_uri`
function:
>>> from werkzeug.urls import iri_to_uri
>>> iri_to_uri(get_current_url(env))
'http://localhost/script/?param=foo'
:param environ: the WSGI environment to get the current URL from.
:param root_only: set `True` if you only want the root URL.
:param strip_querystring: set to `True` if you don't want the querystring.
:param host_only: set to `True` if the host URL should be returned.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
tmp = [environ['wsgi.url_scheme'], '://', get_host(environ, trusted_hosts)]
cat = tmp.append
if host_only:
return uri_to_iri(''.join(tmp) + '/')
cat(url_quote(wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))).rstrip('/'))
cat('/')
if not root_only:
cat(url_quote(wsgi_get_bytes(environ.get('PATH_INFO', '')).lstrip(b'/')))
if not strip_querystring:
qs = get_query_string(environ)
if qs:
cat('?' + qs)
return uri_to_iri(''.join(tmp))
def host_is_trusted(hostname, trusted_list):
"""Checks if a host is trusted against a list. This also takes care
of port normalization.
.. versionadded:: 0.9
:param hostname: the hostname to check
:param trusted_list: a list of hostnames to check against. If a
hostname starts with a dot it will match against
all subdomains as well.
"""
if not hostname:
return False
if isinstance(trusted_list, string_types):
trusted_list = [trusted_list]
def _normalize(hostname):
if ':' in hostname:
hostname = hostname.rsplit(':', 1)[0]
return _encode_idna(hostname)
try:
hostname = _normalize(hostname)
except UnicodeError:
return False
for ref in trusted_list:
if ref.startswith('.'):
ref = ref[1:]
suffix_match = True
else:
suffix_match = False
try:
ref = _normalize(ref)
except UnicodeError:
return False
if ref == hostname:
return True
if suffix_match and hostname.endswith('.' + ref):
return True
return False
def get_host(environ, trusted_hosts=None):
"""Return the real host for the given WSGI environment. This first checks
the `X-Forwarded-Host` header, then the normal `Host` header, and finally
the `SERVER_NAME` environment variable (using the first one it finds).
Optionally it verifies that the host is in a list of trusted hosts.
If the host is not in there it will raise a
:exc:`~werkzeug.exceptions.SecurityError`.
:param environ: the WSGI environment to get the host of.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
if 'HTTP_X_FORWARDED_HOST' in environ:
rv = environ['HTTP_X_FORWARDED_HOST'].split(',', 1)[0].strip()
elif 'HTTP_HOST' in environ:
rv = environ['HTTP_HOST']
else:
rv = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
rv += ':' + environ['SERVER_PORT']
if trusted_hosts is not None:
if not host_is_trusted(rv, trusted_hosts):
from werkzeug.exceptions import SecurityError
raise SecurityError('Host "%s" is not trusted' % rv)
return rv
def get_content_length(environ):
"""Returns the content length from the WSGI environment as
integer. If it's not available `None` is returned.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the content length from.
"""
content_length = environ.get('CONTENT_LENGTH')
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
def get_input_stream(environ, safe_fallback=True):
"""Returns the input stream from the WSGI environment and wraps it
in the most sensible way possible. The stream returned is not the
raw WSGI stream in most cases but one that is safe to read from
without taking into account the content length.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the stream from.
:param safe: indicates whether the function should use an empty
stream as safe fallback or just return the original
WSGI input stream if it can't wrap it safely. The
default is to return an empty string in those cases.
"""
stream = environ['wsgi.input']
content_length = get_content_length(environ)
# A wsgi extension that tells us if the input is terminated. In
# that case we return the stream unchanged as we know we can safely
# read it until the end.
if environ.get('wsgi.input_terminated'):
return stream
# If we don't have a content length we fall back to an empty stream
# in case of a safe fallback, otherwise we return the stream unchanged.
# The non-safe fallback is not recommended but might be useful in
# some situations.
if content_length is None:
return safe_fallback and _empty_stream or stream
# Otherwise limit the stream to the content length
return LimitedStream(stream, content_length)
def get_query_string(environ):
"""Returns the `QUERY_STRING` from the WSGI environment. This also takes
care about the WSGI decoding dance on Python 3 environments as a
native string. The string returned will be restricted to ASCII
characters.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the query string from.
"""
qs = wsgi_get_bytes(environ.get('QUERY_STRING', ''))
# QUERY_STRING really should be ascii safe but some browsers
# will send us some unicode stuff (I am looking at you IE).
# In that case we want to urllib quote it badly.
return try_coerce_native(url_quote(qs, safe=':&%=+$!*\'(),'))
def get_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the `PATH_INFO` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path info, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('PATH_INFO', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def get_script_name(environ, charset='utf-8', errors='replace'):
"""Returns the `SCRIPT_NAME` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def pop_path_info(environ, charset='utf-8', errors='replace'):
"""Removes and returns the next segment of `PATH_INFO`, pushing it onto
`SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`.
If the `charset` is set to `None` a bytestring is returned.
If there are empty segments (``'/foo//bar``) these are ignored but
properly pushed to the `SCRIPT_NAME`:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> pop_path_info(env)
'a'
>>> env['SCRIPT_NAME']
'/foo/a'
>>> pop_path_info(env)
'b'
>>> env['SCRIPT_NAME']
'/foo/a/b'
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is modified.
"""
path = environ.get('PATH_INFO')
if not path:
return None
script_name = environ.get('SCRIPT_NAME', '')
# shift multiple leading slashes over
old_path = path
path = path.lstrip('/')
if path != old_path:
script_name += '/' * (len(old_path) - len(path))
if '/' not in path:
environ['PATH_INFO'] = ''
environ['SCRIPT_NAME'] = script_name + path
rv = wsgi_get_bytes(path)
else:
segment, path = path.split('/', 1)
environ['PATH_INFO'] = '/' + path
environ['SCRIPT_NAME'] = script_name + segment
rv = wsgi_get_bytes(segment)
return to_unicode(rv, charset, errors, allow_none_charset=True)
def peek_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the next segment on the `PATH_INFO` or `None` if there
is none. Works like :func:`pop_path_info` without modifying the
environment:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> peek_path_info(env)
'a'
>>> peek_path_info(env)
'a'
If the `charset` is set to `None` a bytestring is returned.
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is checked.
"""
segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1)
if segments:
return to_unicode(wsgi_get_bytes(segments[0]),
charset, errors, allow_none_charset=True)
def extract_path_info(environ_or_baseurl, path_or_url, charset='utf-8',
errors='replace', collapse_http_schemes=True):
"""Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a unicode string, not a bytestring
suitable for a WSGI environment. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
.. versionadded:: 0.6
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL. Also accepts IRIs and unicode
parameters.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
"""
def _normalize_netloc(scheme, netloc):
parts = netloc.split(u'@', 1)[-1].split(u':', 1)
if len(parts) == 2:
netloc, port = parts
if (scheme == u'http' and port == u'80') or \
(scheme == u'https' and port == u'443'):
port = None
else:
netloc = parts[0]
port = None
if port is not None:
netloc += u':' + port
return netloc
# make sure whatever we are working on is a IRI and parse it
path = uri_to_iri(path_or_url, charset, errors)
if isinstance(environ_or_baseurl, dict):
environ_or_baseurl = get_current_url(environ_or_baseurl,
root_only=True)
base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
cur_scheme, cur_netloc, cur_path, = \
url_parse(url_join(base_iri, path))[:3]
# normalize the network location
base_netloc = _normalize_netloc(base_scheme, base_netloc)
cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
# is that IRI even on a known HTTP scheme?
if collapse_http_schemes:
for scheme in base_scheme, cur_scheme:
if scheme not in (u'http', u'https'):
return None
else:
if not (base_scheme in (u'http', u'https') and
base_scheme == cur_scheme):
return None
# are the netlocs compatible?
if base_netloc != cur_netloc:
return None
# are we below the application path?
base_path = base_path.rstrip(u'/')
if not cur_path.startswith(base_path):
return None
return u'/' + cur_path[len(base_path):].lstrip(u'/')
class SharedDataMiddleware(object):
"""A WSGI middleware that provides static content for development
environments or simple server setups. Usage is quite simple::
import os
from werkzeug.wsgi import SharedDataMiddleware
app = SharedDataMiddleware(app, {
'/shared': os.path.join(os.path.dirname(__file__), 'shared')
})
The contents of the folder ``./shared`` will now be available on
``http://example.com/shared/``. This is pretty useful during development
because a standalone media server is not required. One can also mount
files on the root folder and still continue to use the application because
the shared data middleware forwards all unhandled requests to the
application, even if the requests are below one of the shared folders.
If `pkg_resources` is available you can also tell the middleware to serve
files from package data::
app = SharedDataMiddleware(app, {
'/shared': ('myapplication', 'shared_files')
})
This will then serve the ``shared_files`` folder in the `myapplication`
Python package.
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
rules for files that are not accessible from the web. If `cache` is set to
`False` no caching headers are sent.
Currently the middleware does not support non ASCII filenames. If the
encoding on the file system happens to be the encoding of the URI it may
work but this could also be by accident. We strongly suggest using ASCII
only file names for static files.
The middleware will guess the mimetype using the Python `mimetype`
module. If it's unable to figure out the charset it will fall back
to `fallback_mimetype`.
.. versionchanged:: 0.5
The cache timeout is configurable now.
.. versionadded:: 0.6
The `fallback_mimetype` parameter was added.
:param app: the application to wrap. If you don't want to wrap an
application you can pass it :exc:`NotFound`.
:param exports: a dict of exported files and folders.
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
:param fallback_mimetype: the fallback mimetype for unknown files.
:param cache: enable or disable caching headers.
:param cache_timeout: the cache timeout in seconds for the headers.
"""
def __init__(self, app, exports, disallow=None, cache=True,
cache_timeout=60 * 60 * 12, fallback_mimetype='text/plain'):
self.app = app
self.exports = {}
self.cache = cache
self.cache_timeout = cache_timeout
for key, value in iteritems(exports):
if isinstance(value, tuple):
loader = self.get_package_loader(*value)
elif isinstance(value, string_types):
if os.path.isfile(value):
loader = self.get_file_loader(value)
else:
loader = self.get_directory_loader(value)
else:
raise TypeError('unknown def %r' % value)
self.exports[key] = loader
if disallow is not None:
from fnmatch import fnmatch
self.is_allowed = lambda x: not fnmatch(x, disallow)
self.fallback_mimetype = fallback_mimetype
def is_allowed(self, filename):
"""Subclasses can override this method to disallow the access to
certain files. However by providing `disallow` in the constructor
this method is overwritten.
"""
return True
def _opener(self, filename):
return lambda: (
open(filename, 'rb'),
datetime.utcfromtimestamp(os.path.getmtime(filename)),
int(os.path.getsize(filename))
)
def get_file_loader(self, filename):
return lambda x: (os.path.basename(filename), self._opener(filename))
def get_package_loader(self, package, package_path):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
loadtime = datetime.utcnow()
provider = get_provider(package)
manager = ResourceManager()
filesystem_bound = isinstance(provider, DefaultProvider)
def loader(path):
if path is None:
return None, None
path = posixpath.join(package_path, path)
if not provider.has_resource(path):
return None, None
basename = posixpath.basename(path)
if filesystem_bound:
return basename, self._opener(
provider.get_resource_filename(manager, path))
s = provider.get_resource_string(manager, path)
return basename, lambda: (
BytesIO(s),
loadtime,
len(s)
)
return loader
def get_directory_loader(self, directory):
def loader(path):
if path is not None:
path = os.path.join(directory, path)
else:
path = directory
if os.path.isfile(path):
return os.path.basename(path), self._opener(path)
return None, None
return loader
def generate_etag(self, mtime, file_size, real_filename):
if not isinstance(real_filename, bytes):
real_filename = real_filename.encode(get_filesystem_encoding())
return 'wzsdm-%d-%s-%s' % (
mktime(mtime.timetuple()),
file_size,
adler32(real_filename) & 0xffffffff
)
def __call__(self, environ, start_response):
cleaned_path = get_path_info(environ)
if PY2:
cleaned_path = cleaned_path.encode(get_filesystem_encoding())
# sanitize the path for non unix systems
cleaned_path = cleaned_path.strip('/')
for sep in os.sep, os.altsep:
if sep and sep != '/':
cleaned_path = cleaned_path.replace(sep, '/')
path = '/' + '/'.join(x for x in cleaned_path.split('/')
if x and x != '..')
file_loader = None
for search_path, loader in iteritems(self.exports):
if search_path == path:
real_filename, file_loader = loader(None)
if file_loader is not None:
break
if not search_path.endswith('/'):
search_path += '/'
if path.startswith(search_path):
real_filename, file_loader = loader(path[len(search_path):])
if file_loader is not None:
break
if file_loader is None or not self.is_allowed(real_filename):
return self.app(environ, start_response)
guessed_type = mimetypes.guess_type(real_filename)
mime_type = guessed_type[0] or self.fallback_mimetype
f, mtime, file_size = file_loader()
headers = [('Date', http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(mtime, file_size, real_filename)
headers += [
('Etag', '"%s"' % etag),
('Cache-Control', 'max-age=%d, public' % timeout)
]
if not is_resource_modified(environ, etag, last_modified=mtime):
f.close()
start_response('304 Not Modified', headers)
return []
headers.append(('Expires', http_date(time() + timeout)))
else:
headers.append(('Cache-Control', 'public'))
headers.extend((
('Content-Type', mime_type),
('Content-Length', str(file_size)),
('Last-Modified', http_date(mtime))
))
start_response('200 OK', headers)
return wrap_file(environ, f)
class DispatcherMiddleware(object):
"""Allows one to mount middlewares or applications in a WSGI application.
This is useful if you want to combine multiple WSGI applications::
app = DispatcherMiddleware(app, {
'/app2': app2,
'/app3': app3
})
"""
def __init__(self, app, mounts=None):
self.app = app
self.mounts = mounts or {}
def __call__(self, environ, start_response):
script = environ.get('PATH_INFO', '')
path_info = ''
while '/' in script:
if script in self.mounts:
app = self.mounts[script]
break
script, last_item = script.rsplit('/', 1)
path_info = '/%s%s' % (last_item, path_info)
else:
app = self.mounts.get(script, self.app)
original_script_name = environ.get('SCRIPT_NAME', '')
environ['SCRIPT_NAME'] = original_script_name + script
environ['PATH_INFO'] = path_info
return app(environ, start_response)
@implements_iterator
class ClosingIterator(object):
"""The WSGI specification requires that all middlewares and gateways
respect the `close` callback of an iterator. Because it is useful to add
another close action to a returned iterator and adding a custom iterator
is a boring task this class can be used for that::
return ClosingIterator(app(environ, start_response), [cleanup_session,
cleanup_locals])
If there is just one close function it can be passed instead of the list.
A closing iterator is not needed if the application uses response objects
and finishes the processing if the response is started::
try:
return response(environ, start_response)
finally:
cleanup_session()
cleanup_locals()
"""
def __init__(self, iterable, callbacks=None):
iterator = iter(iterable)
self._next = partial(next, iterator)
if callbacks is None:
callbacks = []
elif callable(callbacks):
callbacks = [callbacks]
else:
callbacks = list(callbacks)
iterable_close = getattr(iterator, 'close', None)
if iterable_close:
callbacks.insert(0, iterable_close)
self._callbacks = callbacks
def __iter__(self):
return self
def __next__(self):
return self._next()
def close(self):
for callback in self._callbacks:
callback()
def wrap_file(environ, file, buffer_size=8192):
"""Wraps a file. This uses the WSGI server's file wrapper if available
or otherwise the generic :class:`FileWrapper`.
.. versionadded:: 0.5
If the file wrapper from the WSGI server is used it's important to not
iterate over it from inside the application but to pass it through
unchanged. If you want to pass out a file wrapper inside a response
object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
More information about file wrappers are available in :pep:`333`.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
return environ.get('wsgi.file_wrapper', FileWrapper)(file, buffer_size)
@implements_iterator
class FileWrapper(object):
"""This class can be used to convert a :class:`file`-like object into
an iterable. It yields `buffer_size` blocks until the file is fully
read.
You should not use this class directly but rather use the
:func:`wrap_file` function that uses the WSGI server's file wrapper
support if it's available.
.. versionadded:: 0.5
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
def __init__(self, file, buffer_size=8192):
self.file = file
self.buffer_size = buffer_size
def close(self):
if hasattr(self.file, 'close'):
self.file.close()
def seekable(self):
if hasattr(self.file, 'seekable'):
return self.file.seekable()
if hasattr(self.file, 'seek'):
return True
return False
def seek(self, *args):
if hasattr(self.file, 'seek'):
self.file.seek(*args)
def tell(self):
if hasattr(self.file, 'tell'):
return self.file.tell()
return None
def __iter__(self):
return self
def __next__(self):
data = self.file.read(self.buffer_size)
if data:
return data
raise StopIteration()
@implements_iterator
class _RangeWrapper(object):
# private for now, but should we make it public in the future ?
"""This class can be used to convert an iterable object into
an iterable that will only yield a piece of the underlying content.
It yields blocks until the underlying stream range is fully read.
The yielded blocks will have a size that can't exceed the original
iterator defined block size, but that can be smaller.
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param iterable: an iterable object with a :meth:`__next__` method.
:param start_byte: byte from which read will start.
:param byte_range: how many bytes to read.
"""
def __init__(self, iterable, start_byte=0, byte_range=None):
self.iterable = iter(iterable)
self.byte_range = byte_range
self.start_byte = start_byte
self.end_byte = None
if byte_range is not None:
self.end_byte = self.start_byte + self.byte_range
self.read_length = 0
self.seekable = hasattr(iterable, 'seekable') and iterable.seekable()
self.end_reached = False
def __iter__(self):
return self
def _next_chunk(self):
try:
chunk = next(self.iterable)
self.read_length += len(chunk)
return chunk
except StopIteration:
self.end_reached = True
raise
def _first_iteration(self):
chunk = None
if self.seekable:
self.iterable.seek(self.start_byte)
self.read_length = self.iterable.tell()
contextual_read_length = self.read_length
else:
while self.read_length <= self.start_byte:
chunk = self._next_chunk()
if chunk is not None:
chunk = chunk[self.start_byte - self.read_length:]
contextual_read_length = self.start_byte
return chunk, contextual_read_length
def _next(self):
if self.end_reached:
raise StopIteration()
chunk = None
contextual_read_length = self.read_length
if self.read_length == 0:
chunk, contextual_read_length = self._first_iteration()
if chunk is None:
chunk = self._next_chunk()
if self.end_byte is not None and self.read_length >= self.end_byte:
self.end_reached = True
return chunk[:self.end_byte - contextual_read_length]
return chunk
def __next__(self):
chunk = self._next()
if chunk:
return chunk
self.end_reached = True
raise StopIteration()
def close(self):
if hasattr(self.iterable, 'close'):
self.iterable.close()
def _make_chunk_iter(stream, limit, buffer_size):
"""Helper for the line and chunk iter functions."""
if isinstance(stream, (bytes, bytearray, text_type)):
raise TypeError('Passed a string or byte object instead of '
'true iterator or stream.')
if not hasattr(stream, 'read'):
for item in stream:
if item:
yield item
return
if not isinstance(stream, LimitedStream) and limit is not None:
stream = LimitedStream(stream, limit)
_read = stream.read
while 1:
item = _read(buffer_size)
if not item:
break
yield item
def make_line_iter(stream, limit=None, buffer_size=10 * 1024,
cap_at_buffer=False):
"""Safely iterates line-based over an input stream. If the input stream
is not a :class:`LimitedStream` the `limit` parameter is mandatory.
This uses the stream's :meth:`~file.read` method internally as opposite
to the :meth:`~file.readline` method that is unsafe and can only be used
in violation of the WSGI specification. The same problem applies to the
`__iter__` function of the input stream which calls :meth:`~file.readline`
without arguments.
If you need line-by-line processing it's strongly recommended to iterate
over the input stream using this helper function.
.. versionchanged:: 0.8
This function now ensures that the limit was reached.
.. versionadded:: 0.9
added support for iterators as input stream.
.. versionadded:: 0.11.10
added support for the `cap_at_buffer` parameter.
:param stream: the stream or iterate to iterate over.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
:param cap_at_buffer: if this is set chunks are split if they are longer
than the buffer size. Internally this is implemented
that the buffer size might be exhausted by a factor
of two however.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
s = make_literal_wrapper(first_item)
empty = s('')
cr = s('\r')
lf = s('\n')
crlf = s('\r\n')
_iter = chain((first_item,), _iter)
def _iter_basic_lines():
_join = empty.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
new_buf = []
buf_size = 0
for item in chain(buffer, new_data.splitlines(True)):
new_buf.append(item)
buf_size += len(item)
if item and item[-1:] in crlf:
yield _join(new_buf)
new_buf = []
elif cap_at_buffer and buf_size >= buffer_size:
rv = _join(new_buf)
while len(rv) >= buffer_size:
yield rv[:buffer_size]
rv = rv[buffer_size:]
new_buf = [rv]
buffer = new_buf
if buffer:
yield _join(buffer)
# This hackery is necessary to merge 'foo\r' and '\n' into one item
# of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
previous = empty
for item in _iter_basic_lines():
if item == lf and previous[-1:] == cr:
previous += item
item = empty
if previous:
yield previous
previous = item
if previous:
yield previous
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024,
cap_at_buffer=False):
"""Works like :func:`make_line_iter` but accepts a separator
which divides chunks. If you want newline based processing
you should use :func:`make_line_iter` instead as it
supports arbitrary newline markers.
.. versionadded:: 0.8
.. versionadded:: 0.9
added support for iterators as input stream.
.. versionadded:: 0.11.10
added support for the `cap_at_buffer` parameter.
:param stream: the stream or iterate to iterate over.
:param separator: the separator that divides chunks.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is otherwise already limited).
:param buffer_size: The optional buffer size.
:param cap_at_buffer: if this is set chunks are split if they are longer
than the buffer size. Internally this is implemented
that the buffer size might be exhausted by a factor
of two however.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
_iter = chain((first_item,), _iter)
if isinstance(first_item, text_type):
separator = to_unicode(separator)
_split = re.compile(r'(%s)' % re.escape(separator)).split
_join = u''.join
else:
separator = to_bytes(separator)
_split = re.compile(b'(' + re.escape(separator) + b')').split
_join = b''.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
chunks = _split(new_data)
new_buf = []
buf_size = 0
for item in chain(buffer, chunks):
if item == separator:
yield _join(new_buf)
new_buf = []
buf_size = 0
else:
buf_size += len(item)
new_buf.append(item)
if cap_at_buffer and buf_size >= buffer_size:
rv = _join(new_buf)
while len(rv) >= buffer_size:
yield rv[:buffer_size]
rv = rv[buffer_size:]
new_buf = [rv]
buf_size = len(rv)
buffer = new_buf
if buffer:
yield _join(buffer)
@implements_iterator
class LimitedStream(object):
"""Wraps a stream so that it doesn't read more than n bytes. If the
stream is exhausted and the caller tries to get more bytes from it
:func:`on_exhausted` is called which by default returns an empty
string. The return value of that function is forwarded
to the reader function. So if it returns an empty string
:meth:`read` will return an empty string as well.
The limit however must never be higher than what the stream can
output. Otherwise :meth:`readlines` will try to read past the
limit.
.. admonition:: Note on WSGI compliance
calls to :meth:`readline` and :meth:`readlines` are not
WSGI compliant because it passes a size argument to the
readline methods. Unfortunately the WSGI PEP is not safely
implementable without a size argument to :meth:`readline`
because there is no EOF marker in the stream. As a result
of that the use of :meth:`readline` is discouraged.
For the same reason iterating over the :class:`LimitedStream`
is not portable. It internally calls :meth:`readline`.
We strongly suggest using :meth:`read` only or using the
:func:`make_line_iter` which safely iterates line-based
over a WSGI input stream.
:param stream: the stream to wrap.
:param limit: the limit for the stream, must not be longer than
what the string can provide if the stream does not
end with `EOF` (like `wsgi.input`)
"""
def __init__(self, stream, limit):
self._read = stream.read
self._readline = stream.readline
self._pos = 0
self.limit = limit
def __iter__(self):
return self
@property
def is_exhausted(self):
"""If the stream is exhausted this attribute is `True`."""
return self._pos >= self.limit
def on_exhausted(self):
"""This is called when the stream tries to read past the limit.
The return value of this function is returned from the reading
function.
"""
# Read null bytes from the stream so that we get the
# correct end of stream marker.
return self._read(0)
def on_disconnect(self):
"""What should happen if a disconnect is detected? The return
value of this function is returned from read functions in case
the client went away. By default a
:exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
"""
from werkzeug.exceptions import ClientDisconnected
raise ClientDisconnected()
def exhaust(self, chunk_size=1024 * 64):
"""Exhaust the stream. This consumes all the data left until the
limit is reached.
:param chunk_size: the size for a chunk. It will read the chunk
until the stream is exhausted and throw away
the results.
"""
to_read = self.limit - self._pos
chunk = chunk_size
while to_read > 0:
chunk = min(to_read, chunk)
self.read(chunk)
to_read -= chunk
def read(self, size=None):
"""Read `size` bytes or if size is not provided everything is read.
:param size: the number of bytes read.
"""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None or size == -1: # -1 is for consistence with file
size = self.limit
to_read = min(self.limit - self._pos, size)
try:
read = self._read(to_read)
except (IOError, ValueError):
return self.on_disconnect()
if to_read and len(read) != to_read:
return self.on_disconnect()
self._pos += len(read)
return read
def readline(self, size=None):
"""Reads one line from the stream."""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None:
size = self.limit - self._pos
else:
size = min(size, self.limit - self._pos)
try:
line = self._readline(size)
except (ValueError, IOError):
return self.on_disconnect()
if size and not line:
return self.on_disconnect()
self._pos += len(line)
return line
def readlines(self, size=None):
"""Reads a file into a list of strings. It calls :meth:`readline`
until the file is read to the end. It does support the optional
`size` argument if the underlaying stream supports it for
`readline`.
"""
last_pos = self._pos
result = []
if size is not None:
end = min(self.limit, last_pos + size)
else:
end = self.limit
while 1:
if size is not None:
size -= last_pos - self._pos
if self._pos >= end:
break
result.append(self.readline(size))
if size is not None:
last_pos = self._pos
return result
def tell(self):
"""Returns the position of the stream.
.. versionadded:: 0.9
"""
return self._pos
def __next__(self):
line = self.readline()
if not line:
raise StopIteration()
return line
| mit |
IronLanguages/ironpython2 | Src/StdLib/Lib/test/test_pyexpat.py | 2 | 30922 | # XXX TypeErrors on calling handlers, or on bad return values from a
# handler, are obscure and unhelpful.
import StringIO, sys
import unittest
from xml.parsers import expat
from test import test_support
from test.test_support import sortdict, run_unittest
class SetAttributeTest(unittest.TestCase):
def setUp(self):
self.parser = expat.ParserCreate(namespace_separator='!')
def test_buffer_text(self):
self.assertIs(self.parser.buffer_text, False)
for x in 0, 1, 2, 0:
self.parser.buffer_text = x
self.assertIs(self.parser.buffer_text, bool(x))
def test_namespace_prefixes(self):
self.assertIs(self.parser.namespace_prefixes, False)
for x in 0, 1, 2, 0:
self.parser.namespace_prefixes = x
self.assertIs(self.parser.namespace_prefixes, bool(x))
def test_returns_unicode(self):
self.assertIs(self.parser.returns_unicode, test_support.have_unicode)
for x in 0, 1, 2, 0:
self.parser.returns_unicode = x
self.assertIs(self.parser.returns_unicode, bool(x))
def test_ordered_attributes(self):
self.assertIs(self.parser.ordered_attributes, False)
for x in 0, 1, 2, 0:
self.parser.ordered_attributes = x
self.assertIs(self.parser.ordered_attributes, bool(x))
def test_specified_attributes(self):
self.assertIs(self.parser.specified_attributes, False)
for x in 0, 1, 2, 0:
self.parser.specified_attributes = x
self.assertIs(self.parser.specified_attributes, bool(x))
def test_invalid_attributes(self):
with self.assertRaises(AttributeError):
self.parser.foo = 1
with self.assertRaises(AttributeError):
self.parser.foo
data = '''\
<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
<?xml-stylesheet href="stylesheet.css"?>
<!-- comment data -->
<!DOCTYPE quotations SYSTEM "quotations.dtd" [
<!ELEMENT root ANY>
<!NOTATION notation SYSTEM "notation.jpeg">
<!ENTITY acirc "â">
<!ENTITY external_entity SYSTEM "entity.file">
<!ENTITY unparsed_entity SYSTEM "entity.file" NDATA notation>
%unparsed_entity;
]>
<root attr1="value1" attr2="value2ὀ">
<myns:subelement xmlns:myns="http://www.python.org/namespace">
Contents of subelements
</myns:subelement>
<sub2><![CDATA[contents of CDATA section]]></sub2>
&external_entity;
</root>
'''
# Produce UTF-8 output
class ParseTest(unittest.TestCase):
class Outputter:
def __init__(self):
self.out = []
def StartElementHandler(self, name, attrs):
self.out.append('Start element: ' + repr(name) + ' ' +
sortdict(attrs))
def EndElementHandler(self, name):
self.out.append('End element: ' + repr(name))
def CharacterDataHandler(self, data):
data = data.strip()
if data:
self.out.append('Character data: ' + repr(data))
def ProcessingInstructionHandler(self, target, data):
self.out.append('PI: ' + repr(target) + ' ' + repr(data))
def StartNamespaceDeclHandler(self, prefix, uri):
self.out.append('NS decl: ' + repr(prefix) + ' ' + repr(uri))
def EndNamespaceDeclHandler(self, prefix):
self.out.append('End of NS decl: ' + repr(prefix))
def StartCdataSectionHandler(self):
self.out.append('Start of CDATA section')
def EndCdataSectionHandler(self):
self.out.append('End of CDATA section')
def CommentHandler(self, text):
self.out.append('Comment: ' + repr(text))
def NotationDeclHandler(self, *args):
name, base, sysid, pubid = args
self.out.append('Notation declared: %s' %(args,))
def UnparsedEntityDeclHandler(self, *args):
entityName, base, systemId, publicId, notationName = args
self.out.append('Unparsed entity decl: %s' %(args,))
def NotStandaloneHandler(self, userData):
self.out.append('Not standalone')
return 1
def ExternalEntityRefHandler(self, *args):
context, base, sysId, pubId = args
self.out.append('External entity ref: %s' %(args[1:],))
return 1
def DefaultHandler(self, userData):
pass
def DefaultHandlerExpand(self, userData):
pass
handler_names = [
'StartElementHandler', 'EndElementHandler',
'CharacterDataHandler', 'ProcessingInstructionHandler',
'UnparsedEntityDeclHandler', 'NotationDeclHandler',
'StartNamespaceDeclHandler', 'EndNamespaceDeclHandler',
'CommentHandler', 'StartCdataSectionHandler',
'EndCdataSectionHandler',
'DefaultHandler', 'DefaultHandlerExpand',
#'NotStandaloneHandler',
'ExternalEntityRefHandler'
]
def test_utf8(self):
out = self.Outputter()
parser = expat.ParserCreate(namespace_separator='!')
for name in self.handler_names:
setattr(parser, name, getattr(out, name))
parser.returns_unicode = 0
parser.Parse(data, 1)
# Verify output
op = out.out
# https://github.com/IronLanguages/ironpython2/issues/464
if sys.platform == 'cli':
self.assertEqual(op[0], 'PI: \'xml-stylesheet\' \'href="stylesheet.css"\'')
self.assertEqual(op[1], "Comment: ' comment data '")
#self.assertEqual(op[2], "Notation declared: ('notation', None, 'notation.jpeg', None)")
#self.assertEqual(op[3], "Unparsed entity decl: ('unparsed_entity', None, 'entity.file', None, 'notation')")
self.assertEqual(op[2], "Start element: 'root' {'attr1': 'value1', 'attr2': u'value2\u1f40'}")
self.assertEqual(op[3], "NS decl: 'myns' 'http://www.python.org/namespace'")
self.assertEqual(op[4], "Start element: 'http://www.python.org/namespace!subelement' {}")
self.assertEqual(op[5], "Character data: 'Contents of subelements'")
self.assertEqual(op[6], "End element: 'http://www.python.org/namespace!subelement'")
self.assertEqual(op[7], "End of NS decl: 'myns'")
self.assertEqual(op[8], "Start element: 'sub2' {}")
self.assertEqual(op[9], 'Start of CDATA section')
self.assertEqual(op[10], "Character data: 'contents of CDATA section'")
self.assertEqual(op[11], 'End of CDATA section')
self.assertEqual(op[12], "End element: 'sub2'")
#self.assertEqual(op[15], "External entity ref: (None, 'entity.file', None)")
self.assertEqual(op[13], "End element: 'root'")
else:
self.assertEqual(op[0], 'PI: \'xml-stylesheet\' \'href="stylesheet.css"\'')
self.assertEqual(op[1], "Comment: ' comment data '")
self.assertEqual(op[2], "Notation declared: ('notation', None, 'notation.jpeg', None)")
self.assertEqual(op[3], "Unparsed entity decl: ('unparsed_entity', None, 'entity.file', None, 'notation')")
self.assertEqual(op[4], "Start element: 'root' {'attr1': 'value1', 'attr2': 'value2\\xe1\\xbd\\x80'}")
self.assertEqual(op[5], "NS decl: 'myns' 'http://www.python.org/namespace'")
self.assertEqual(op[6], "Start element: 'http://www.python.org/namespace!subelement' {}")
self.assertEqual(op[7], "Character data: 'Contents of subelements'")
self.assertEqual(op[8], "End element: 'http://www.python.org/namespace!subelement'")
self.assertEqual(op[9], "End of NS decl: 'myns'")
self.assertEqual(op[10], "Start element: 'sub2' {}")
self.assertEqual(op[11], 'Start of CDATA section')
self.assertEqual(op[12], "Character data: 'contents of CDATA section'")
self.assertEqual(op[13], 'End of CDATA section')
self.assertEqual(op[14], "End element: 'sub2'")
self.assertEqual(op[15], "External entity ref: (None, 'entity.file', None)")
self.assertEqual(op[16], "End element: 'root'")
@unittest.skipIf(sys.platform=='cli', 'https://github.com/IronLanguages/ironpython2/issues/464')
def test_unicode(self):
# Try the parse again, this time producing Unicode output
out = self.Outputter()
parser = expat.ParserCreate(namespace_separator='!')
parser.returns_unicode = 1
for name in self.handler_names:
setattr(parser, name, getattr(out, name))
parser.Parse(data, 1)
op = out.out
self.assertEqual(op[0], 'PI: u\'xml-stylesheet\' u\'href="stylesheet.css"\'')
self.assertEqual(op[1], "Comment: u' comment data '")
self.assertEqual(op[2], "Notation declared: (u'notation', None, u'notation.jpeg', None)")
self.assertEqual(op[3], "Unparsed entity decl: (u'unparsed_entity', None, u'entity.file', None, u'notation')")
self.assertEqual(op[4], "Start element: u'root' {u'attr1': u'value1', u'attr2': u'value2\\u1f40'}")
self.assertEqual(op[5], "NS decl: u'myns' u'http://www.python.org/namespace'")
self.assertEqual(op[6], "Start element: u'http://www.python.org/namespace!subelement' {}")
self.assertEqual(op[7], "Character data: u'Contents of subelements'")
self.assertEqual(op[8], "End element: u'http://www.python.org/namespace!subelement'")
self.assertEqual(op[9], "End of NS decl: u'myns'")
self.assertEqual(op[10], "Start element: u'sub2' {}")
self.assertEqual(op[11], 'Start of CDATA section')
self.assertEqual(op[12], "Character data: u'contents of CDATA section'")
self.assertEqual(op[13], 'End of CDATA section')
self.assertEqual(op[14], "End element: u'sub2'")
self.assertEqual(op[15], "External entity ref: (None, u'entity.file', None)")
self.assertEqual(op[16], "End element: u'root'")
def test_parse_file(self):
# Try parsing a file
out = self.Outputter()
parser = expat.ParserCreate(namespace_separator='!')
parser.returns_unicode = 1
for name in self.handler_names:
setattr(parser, name, getattr(out, name))
file = StringIO.StringIO(data)
parser.ParseFile(file)
op = out.out
# https://github.com/IronLanguages/ironpython2/issues/464
if sys.platform == 'cli':
self.assertEqual(op[0], 'PI: \'xml-stylesheet\' \'href="stylesheet.css"\'')
self.assertEqual(op[1], "Comment: ' comment data '")
#self.assertEqual(op[2], "Notation declared: ('notation', None, 'notation.jpeg', None)")
#self.assertEqual(op[3], "Unparsed entity decl: ('unparsed_entity', None, 'entity.file', None, 'notation')")
self.assertEqual(op[2], "Start element: 'root' {'attr1': 'value1', 'attr2': u'value2\\u1f40'}")
self.assertEqual(op[3], "NS decl: 'myns' 'http://www.python.org/namespace'")
self.assertEqual(op[4], "Start element: 'http://www.python.org/namespace!subelement' {}")
self.assertEqual(op[5], "Character data: 'Contents of subelements'")
self.assertEqual(op[6], "End element: 'http://www.python.org/namespace!subelement'")
self.assertEqual(op[7], "End of NS decl: 'myns'")
self.assertEqual(op[8], "Start element: 'sub2' {}")
self.assertEqual(op[9], 'Start of CDATA section')
self.assertEqual(op[10], "Character data: 'contents of CDATA section'")
self.assertEqual(op[11], 'End of CDATA section')
self.assertEqual(op[12], "End element: 'sub2'")
#self.assertEqual(op[13], "External entity ref: (None, 'entity.file', None)")
self.assertEqual(op[13], "End element: 'root'")
else:
self.assertEqual(op[0], 'PI: u\'xml-stylesheet\' u\'href="stylesheet.css"\'')
self.assertEqual(op[1], "Comment: u' comment data '")
self.assertEqual(op[2], "Notation declared: (u'notation', None, u'notation.jpeg', None)")
self.assertEqual(op[3], "Unparsed entity decl: (u'unparsed_entity', None, u'entity.file', None, u'notation')")
self.assertEqual(op[4], "Start element: u'root' {u'attr1': u'value1', u'attr2': u'value2\\u1f40'}")
self.assertEqual(op[5], "NS decl: u'myns' u'http://www.python.org/namespace'")
self.assertEqual(op[6], "Start element: u'http://www.python.org/namespace!subelement' {}")
self.assertEqual(op[7], "Character data: u'Contents of subelements'")
self.assertEqual(op[8], "End element: u'http://www.python.org/namespace!subelement'")
self.assertEqual(op[9], "End of NS decl: u'myns'")
self.assertEqual(op[10], "Start element: u'sub2' {}")
self.assertEqual(op[11], 'Start of CDATA section')
self.assertEqual(op[12], "Character data: u'contents of CDATA section'")
self.assertEqual(op[13], 'End of CDATA section')
self.assertEqual(op[14], "End element: u'sub2'")
self.assertEqual(op[15], "External entity ref: (None, u'entity.file', None)")
self.assertEqual(op[16], "End element: u'root'")
# Issue 4877: expat.ParseFile causes segfault on a closed file.
fp = open(test_support.TESTFN, 'wb')
try:
fp.close()
parser = expat.ParserCreate()
with self.assertRaises(ValueError):
parser.ParseFile(fp)
finally:
test_support.unlink(test_support.TESTFN)
def test_parse_again(self):
parser = expat.ParserCreate()
file = StringIO.StringIO(data)
parser.ParseFile(file)
# Issue 6676: ensure a meaningful exception is raised when attempting
# to parse more than one XML document per xmlparser instance,
# a limitation of the Expat library.
with self.assertRaises(expat.error) as cm:
parser.ParseFile(file)
self.assertEqual(expat.ErrorString(cm.exception.code),
expat.errors.XML_ERROR_FINISHED)
class NamespaceSeparatorTest(unittest.TestCase):
def test_legal(self):
# Tests that make sure we get errors when the namespace_separator value
# is illegal, and that we don't for good values:
expat.ParserCreate()
expat.ParserCreate(namespace_separator=None)
expat.ParserCreate(namespace_separator=' ')
def test_illegal(self):
try:
expat.ParserCreate(namespace_separator=42)
self.fail()
except TypeError, e:
self.assertEqual(str(e),
'ParserCreate() argument 2 must be string or None, not int')
try:
expat.ParserCreate(namespace_separator='too long')
self.fail()
except ValueError, e:
self.assertEqual(str(e),
'namespace_separator must be at most one character, omitted, or None')
def test_zero_length(self):
# ParserCreate() needs to accept a namespace_separator of zero length
# to satisfy the requirements of RDF applications that are required
# to simply glue together the namespace URI and the localname. Though
# considered a wart of the RDF specifications, it needs to be supported.
#
# See XML-SIG mailing list thread starting with
# http://mail.python.org/pipermail/xml-sig/2001-April/005202.html
#
expat.ParserCreate(namespace_separator='') # too short
class InterningTest(unittest.TestCase):
def test(self):
# Test the interning machinery.
p = expat.ParserCreate()
L = []
def collector(name, *args):
L.append(name)
p.StartElementHandler = collector
p.EndElementHandler = collector
p.Parse("<e> <e/> <e></e> </e>", 1)
tag = L[0]
self.assertEqual(len(L), 6)
for entry in L:
# L should have the same string repeated over and over.
self.assertTrue(tag is entry)
class BufferTextTest(unittest.TestCase):
def setUp(self):
self.stuff = []
self.parser = expat.ParserCreate()
self.parser.buffer_text = 1
self.parser.CharacterDataHandler = self.CharacterDataHandler
def check(self, expected, label):
self.assertEqual(self.stuff, expected,
"%s\nstuff = %r\nexpected = %r"
% (label, self.stuff, map(unicode, expected)))
def CharacterDataHandler(self, text):
self.stuff.append(text)
def StartElementHandler(self, name, attrs):
self.stuff.append("<%s>" % name)
bt = attrs.get("buffer-text")
if bt == "yes":
self.parser.buffer_text = 1
elif bt == "no":
self.parser.buffer_text = 0
def EndElementHandler(self, name):
self.stuff.append("</%s>" % name)
def CommentHandler(self, data):
self.stuff.append("<!--%s-->" % data)
def setHandlers(self, handlers=[]):
for name in handlers:
setattr(self.parser, name, getattr(self, name))
def test_default_to_disabled(self):
parser = expat.ParserCreate()
self.assertFalse(parser.buffer_text)
def test_buffering_enabled(self):
# Make sure buffering is turned on
self.assertTrue(self.parser.buffer_text)
self.parser.Parse("<a>1<b/>2<c/>3</a>", 1)
self.assertEqual(self.stuff, ['123'],
"buffered text not properly collapsed")
def test1(self):
# XXX This test exposes more detail of Expat's text chunking than we
# XXX like, but it tests what we need to concisely.
self.setHandlers(["StartElementHandler"])
self.parser.Parse("<a>1<b buffer-text='no'/>2\n3<c buffer-text='yes'/>4\n5</a>", 1)
self.assertEqual(self.stuff,
["<a>", "1", "<b>", "2", "\n", "3", "<c>", "4\n5"],
"buffering control not reacting as expected")
def test2(self):
self.parser.Parse("<a>1<b/><2><c/> \n 3</a>", 1)
self.assertEqual(self.stuff, ["1<2> \n 3"],
"buffered text not properly collapsed")
def test3(self):
self.setHandlers(["StartElementHandler"])
self.parser.Parse("<a>1<b/>2<c/>3</a>", 1)
self.assertEqual(self.stuff, ["<a>", "1", "<b>", "2", "<c>", "3"],
"buffered text not properly split")
def test4(self):
self.setHandlers(["StartElementHandler", "EndElementHandler"])
self.parser.CharacterDataHandler = None
self.parser.Parse("<a>1<b/>2<c/>3</a>", 1)
self.assertEqual(self.stuff,
["<a>", "<b>", "</b>", "<c>", "</c>", "</a>"])
def test5(self):
self.setHandlers(["StartElementHandler", "EndElementHandler"])
self.parser.Parse("<a>1<b></b>2<c/>3</a>", 1)
self.assertEqual(self.stuff,
["<a>", "1", "<b>", "</b>", "2", "<c>", "</c>", "3", "</a>"])
def test6(self):
self.setHandlers(["CommentHandler", "EndElementHandler",
"StartElementHandler"])
self.parser.Parse("<a>1<b/>2<c></c>345</a> ", 1)
self.assertEqual(self.stuff,
["<a>", "1", "<b>", "</b>", "2", "<c>", "</c>", "345", "</a>"],
"buffered text not properly split")
def test7(self):
self.setHandlers(["CommentHandler", "EndElementHandler",
"StartElementHandler"])
self.parser.Parse("<a>1<b/>2<c></c>3<!--abc-->4<!--def-->5</a> ", 1)
self.assertEqual(self.stuff,
["<a>", "1", "<b>", "</b>", "2", "<c>", "</c>", "3",
"<!--abc-->", "4", "<!--def-->", "5", "</a>"],
"buffered text not properly split")
# Test handling of exception from callback:
class HandlerExceptionTest(unittest.TestCase):
def StartElementHandler(self, name, attrs):
raise RuntimeError(name)
def test(self):
parser = expat.ParserCreate()
parser.StartElementHandler = self.StartElementHandler
try:
parser.Parse("<a><b><c/></b></a>", 1)
self.fail()
except RuntimeError, e:
self.assertEqual(e.args[0], 'a',
"Expected RuntimeError for element 'a', but" + \
" found %r" % e.args[0])
# Test Current* members:
@unittest.skipIf(sys.platform=='cli', 'https://github.com/IronLanguages/ironpython2/issues/464')
class PositionTest(unittest.TestCase):
def StartElementHandler(self, name, attrs):
self.check_pos('s')
def EndElementHandler(self, name):
self.check_pos('e')
def check_pos(self, event):
pos = (event,
self.parser.CurrentByteIndex,
self.parser.CurrentLineNumber,
self.parser.CurrentColumnNumber)
self.assertTrue(self.upto < len(self.expected_list),
'too many parser events')
expected = self.expected_list[self.upto]
self.assertEqual(pos, expected,
'Expected position %s, got position %s' %(pos, expected))
self.upto += 1
def test(self):
self.parser = expat.ParserCreate()
self.parser.StartElementHandler = self.StartElementHandler
self.parser.EndElementHandler = self.EndElementHandler
self.upto = 0
self.expected_list = [('s', 0, 1, 0), ('s', 5, 2, 1), ('s', 11, 3, 2),
('e', 15, 3, 6), ('e', 17, 4, 1), ('e', 22, 5, 0)]
xml = '<a>\n <b>\n <c/>\n </b>\n</a>'
self.parser.Parse(xml, 1)
class sf1296433Test(unittest.TestCase):
def test_parse_only_xml_data(self):
# http://python.org/sf/1296433
#
xml = "<?xml version='1.0' encoding='iso8859'?><s>%s</s>" % ('a' * 1025)
# this one doesn't crash
#xml = "<?xml version='1.0'?><s>%s</s>" % ('a' * 10000)
class SpecificException(Exception):
pass
def handler(text):
raise SpecificException
parser = expat.ParserCreate()
parser.CharacterDataHandler = handler
# https://github.com/IronLanguages/ironpython2/issues/464
if sys.platform == 'cli':
self.assertRaises(Exception, parser.Parse, xml, True)
else:
self.assertRaises(Exception, parser.Parse, xml)
class ChardataBufferTest(unittest.TestCase):
"""
test setting of chardata buffer size
"""
@unittest.skipIf(sys.platform=='cli', 'https://github.com/IronLanguages/ironpython2/issues/464')
def test_1025_bytes(self):
self.assertEqual(self.small_buffer_test(1025), 2)
@unittest.skipIf(sys.platform=='cli', 'https://github.com/IronLanguages/ironpython2/issues/464')
def test_1000_bytes(self):
self.assertEqual(self.small_buffer_test(1000), 1)
def test_wrong_size(self):
parser = expat.ParserCreate()
parser.buffer_text = 1
with self.assertRaises(ValueError):
parser.buffer_size = -1
with self.assertRaises(ValueError):
parser.buffer_size = 0
with self.assertRaises(TypeError):
parser.buffer_size = 512.0
with self.assertRaises(TypeError):
parser.buffer_size = sys.maxint+1
@unittest.skipIf(sys.platform=='cli', 'https://github.com/IronLanguages/ironpython2/issues/464')
def test_unchanged_size(self):
xml1 = ("<?xml version='1.0' encoding='iso8859'?><s>%s" % ('a' * 512))
xml2 = 'a'*512 + '</s>'
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_size = 512
parser.buffer_text = 1
# Feed 512 bytes of character data: the handler should be called
# once.
self.n = 0
parser.Parse(xml1)
self.assertEqual(self.n, 1)
# Reassign to buffer_size, but assign the same size.
parser.buffer_size = parser.buffer_size
self.assertEqual(self.n, 1)
# Try parsing rest of the document
parser.Parse(xml2)
self.assertEqual(self.n, 2)
@unittest.skipIf(sys.platform=='cli', 'https://github.com/IronLanguages/ironpython2/issues/464')
def test_disabling_buffer(self):
xml1 = "<?xml version='1.0' encoding='iso8859'?><a>%s" % ('a' * 512)
xml2 = ('b' * 1024)
xml3 = "%s</a>" % ('c' * 1024)
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_text = 1
parser.buffer_size = 1024
self.assertEqual(parser.buffer_size, 1024)
# Parse one chunk of XML
self.n = 0
parser.Parse(xml1, 0)
self.assertEqual(parser.buffer_size, 1024)
self.assertEqual(self.n, 1)
# Turn off buffering and parse the next chunk.
parser.buffer_text = 0
self.assertFalse(parser.buffer_text)
self.assertEqual(parser.buffer_size, 1024)
for i in range(10):
parser.Parse(xml2, 0)
self.assertEqual(self.n, 11)
parser.buffer_text = 1
self.assertTrue(parser.buffer_text)
self.assertEqual(parser.buffer_size, 1024)
parser.Parse(xml3, 1)
self.assertEqual(self.n, 12)
def make_document(self, bytes):
return ("<?xml version='1.0'?><tag>" + bytes * 'a' + '</tag>')
def counting_handler(self, text):
self.n += 1
def small_buffer_test(self, buffer_len):
xml = "<?xml version='1.0' encoding='iso8859'?><s>%s</s>" % ('a' * buffer_len)
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_size = 1024
parser.buffer_text = 1
self.n = 0
parser.Parse(xml)
return self.n
def test_change_size_1(self):
if sys.platform=='cli': # https://github.com/IronLanguages/ironpython2/issues/513
xml1 = "<?xml version='1.0' encoding='iso8859-1'?><a><s>%s" % ('a' * 1024)
else:
xml1 = "<?xml version='1.0' encoding='iso8859'?><a><s>%s" % ('a' * 1024)
xml2 = "aaa</s><s>%s</s></a>" % ('a' * 1025)
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_text = 1
parser.buffer_size = 1024
self.assertEqual(parser.buffer_size, 1024)
self.n = 0
parser.Parse(xml1, 0)
parser.buffer_size *= 2
self.assertEqual(parser.buffer_size, 2048)
parser.Parse(xml2, 1)
self.assertEqual(self.n, 2)
@unittest.skipIf(sys.platform=='cli', 'https://github.com/IronLanguages/ironpython2/issues/464')
def test_change_size_2(self):
xml1 = "<?xml version='1.0' encoding='iso8859'?><a>a<s>%s" % ('a' * 1023)
xml2 = "aaa</s><s>%s</s></a>" % ('a' * 1025)
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_text = 1
parser.buffer_size = 2048
self.assertEqual(parser.buffer_size, 2048)
self.n=0
parser.Parse(xml1, 0)
parser.buffer_size //= 2
self.assertEqual(parser.buffer_size, 1024)
parser.Parse(xml2, 1)
self.assertEqual(self.n, 4)
class MalformedInputText(unittest.TestCase):
@unittest.skipIf(sys.platform=='cli', 'https://github.com/IronLanguages/ironpython2/issues/464')
def test1(self):
xml = "\0\r\n"
parser = expat.ParserCreate()
try:
parser.Parse(xml, True)
self.fail()
except expat.ExpatError as e:
self.assertEqual(str(e), 'unclosed token: line 2, column 0')
def test2(self):
xml = "<?xml version\xc2\x85='1.0'?>\r\n"
parser = expat.ParserCreate()
err_pattern = r'XML declaration not well-formed: line 1, column \d+'
with self.assertRaisesRegexp(expat.ExpatError, err_pattern):
parser.Parse(xml, True)
class ForeignDTDTests(unittest.TestCase):
"""
Tests for the UseForeignDTD method of expat parser objects.
"""
@unittest.skipIf(sys.platform=='cli', 'https://github.com/IronLanguages/ironpython2/issues/464')
def test_use_foreign_dtd(self):
"""
If UseForeignDTD is passed True and a document without an external
entity reference is parsed, ExternalEntityRefHandler is first called
with None for the public and system ids.
"""
handler_call_args = []
def resolve_entity(context, base, system_id, public_id):
handler_call_args.append((public_id, system_id))
return 1
parser = expat.ParserCreate()
parser.UseForeignDTD(True)
parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
parser.ExternalEntityRefHandler = resolve_entity
parser.Parse("<?xml version='1.0'?><element/>")
self.assertEqual(handler_call_args, [(None, None)])
# test UseForeignDTD() is equal to UseForeignDTD(True)
handler_call_args[:] = []
parser = expat.ParserCreate()
parser.UseForeignDTD()
parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
parser.ExternalEntityRefHandler = resolve_entity
parser.Parse("<?xml version='1.0'?><element/>")
self.assertEqual(handler_call_args, [(None, None)])
@unittest.skipIf(sys.platform=='cli', 'Currently failing on IronPython because SetParamEntityParsing is not implemented')
def test_ignore_use_foreign_dtd(self):
"""
If UseForeignDTD is passed True and a document with an external
entity reference is parsed, ExternalEntityRefHandler is called with
the public and system ids from the document.
"""
handler_call_args = []
def resolve_entity(context, base, system_id, public_id):
handler_call_args.append((public_id, system_id))
return 1
parser = expat.ParserCreate()
parser.UseForeignDTD(True)
parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
parser.ExternalEntityRefHandler = resolve_entity
parser.Parse(
"<?xml version='1.0'?><!DOCTYPE foo PUBLIC 'bar' 'baz'><element/>")
self.assertEqual(handler_call_args, [("bar", "baz")])
def test_main():
run_unittest(SetAttributeTest,
ParseTest,
NamespaceSeparatorTest,
InterningTest,
BufferTextTest,
HandlerExceptionTest,
PositionTest,
sf1296433Test,
ChardataBufferTest,
MalformedInputText,
ForeignDTDTests)
if __name__ == "__main__":
test_main()
| apache-2.0 |
kik01/tpb | tpb/schema/user.py | 1 | 1133 | import graphene
class UserField(graphene.ObjectType):
id = graphene.String().NonNull
email = graphene.String().NonNull
name = graphene.String().NonNull
username = graphene.String().NonNull
groups = graphene.List('GroupField')
class CreateUser(graphene.Mutation):
user = graphene.Field(UserField)
class Input(object):
email = graphene.String().NonNull
name = graphene.String().NonNull
username = graphene.String().NonNull
password = graphene.String().NonNull
@classmethod
def mutate(cls, instance, args, info):
pass
class UpdateUser(graphene.Mutation):
user = graphene.Field(UserField)
class Input(object):
email = graphene.String()
name = graphene.String()
username = graphene.String()
password = graphene.String()
@classmethod
def mutate(cls, instance, args, info):
pass
class DeleteUser(graphene.Mutation):
user = graphene.Field(UserField)
class Input(object):
id = graphene.String().NonNull
@classmethod
def mutate(cls, instance, args, info):
pass
| mit |
tboyce021/home-assistant | tests/components/heos/test_init.py | 13 | 7342 | """Tests for the init module."""
import asyncio
from pyheos import CommandFailedError, HeosError, const
import pytest
from homeassistant.components.heos import (
ControllerManager,
async_setup_entry,
async_unload_entry,
)
from homeassistant.components.heos.const import (
DATA_CONTROLLER_MANAGER,
DATA_SOURCE_MANAGER,
DOMAIN,
)
from homeassistant.components.media_player.const import DOMAIN as MEDIA_PLAYER_DOMAIN
from homeassistant.const import CONF_HOST
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.setup import async_setup_component
from tests.async_mock import Mock, patch
async def test_async_setup_creates_entry(hass, config):
"""Test component setup creates entry from config."""
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
entry = entries[0]
assert entry.title == "Controller (127.0.0.1)"
assert entry.data == {CONF_HOST: "127.0.0.1"}
assert entry.unique_id == DOMAIN
async def test_async_setup_updates_entry(hass, config_entry, config, controller):
"""Test component setup updates entry from config."""
config[DOMAIN][CONF_HOST] = "127.0.0.2"
config_entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
entry = entries[0]
assert entry.title == "Controller (127.0.0.2)"
assert entry.data == {CONF_HOST: "127.0.0.2"}
assert entry.unique_id == DOMAIN
async def test_async_setup_returns_true(hass, config_entry, config):
"""Test component setup from config."""
config_entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0] == config_entry
async def test_async_setup_no_config_returns_true(hass, config_entry):
"""Test component setup from entry only."""
config_entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0] == config_entry
async def test_async_setup_entry_loads_platforms(
hass, config_entry, controller, input_sources, favorites
):
"""Test load connects to heos, retrieves players, and loads platforms."""
config_entry.add_to_hass(hass)
with patch.object(hass.config_entries, "async_forward_entry_setup") as forward_mock:
assert await async_setup_entry(hass, config_entry)
# Assert platforms loaded
await hass.async_block_till_done()
assert forward_mock.call_count == 1
assert controller.connect.call_count == 1
assert controller.get_players.call_count == 1
assert controller.get_favorites.call_count == 1
assert controller.get_input_sources.call_count == 1
controller.disconnect.assert_not_called()
assert hass.data[DOMAIN][DATA_CONTROLLER_MANAGER].controller == controller
assert hass.data[DOMAIN][MEDIA_PLAYER_DOMAIN] == controller.players
assert hass.data[DOMAIN][DATA_SOURCE_MANAGER].favorites == favorites
assert hass.data[DOMAIN][DATA_SOURCE_MANAGER].inputs == input_sources
async def test_async_setup_entry_not_signed_in_loads_platforms(
hass, config_entry, controller, input_sources, caplog
):
"""Test setup does not retrieve favorites when not logged in."""
config_entry.add_to_hass(hass)
controller.is_signed_in = False
controller.signed_in_username = None
with patch.object(hass.config_entries, "async_forward_entry_setup") as forward_mock:
assert await async_setup_entry(hass, config_entry)
# Assert platforms loaded
await hass.async_block_till_done()
assert forward_mock.call_count == 1
assert controller.connect.call_count == 1
assert controller.get_players.call_count == 1
assert controller.get_favorites.call_count == 0
assert controller.get_input_sources.call_count == 1
controller.disconnect.assert_not_called()
assert hass.data[DOMAIN][DATA_CONTROLLER_MANAGER].controller == controller
assert hass.data[DOMAIN][MEDIA_PLAYER_DOMAIN] == controller.players
assert hass.data[DOMAIN][DATA_SOURCE_MANAGER].favorites == {}
assert hass.data[DOMAIN][DATA_SOURCE_MANAGER].inputs == input_sources
assert (
"127.0.0.1 is not logged in to a HEOS account and will be unable to retrieve "
"HEOS favorites: Use the 'heos.sign_in' service to sign-in to a HEOS account"
in caplog.text
)
async def test_async_setup_entry_connect_failure(hass, config_entry, controller):
"""Connection failure raises ConfigEntryNotReady."""
config_entry.add_to_hass(hass)
controller.connect.side_effect = HeosError()
with pytest.raises(ConfigEntryNotReady):
await async_setup_entry(hass, config_entry)
await hass.async_block_till_done()
assert controller.connect.call_count == 1
assert controller.disconnect.call_count == 1
controller.connect.reset_mock()
controller.disconnect.reset_mock()
async def test_async_setup_entry_player_failure(hass, config_entry, controller):
"""Failure to retrieve players/sources raises ConfigEntryNotReady."""
config_entry.add_to_hass(hass)
controller.get_players.side_effect = HeosError()
with pytest.raises(ConfigEntryNotReady):
await async_setup_entry(hass, config_entry)
await hass.async_block_till_done()
assert controller.connect.call_count == 1
assert controller.disconnect.call_count == 1
controller.connect.reset_mock()
controller.disconnect.reset_mock()
async def test_unload_entry(hass, config_entry, controller):
"""Test entries are unloaded correctly."""
controller_manager = Mock(ControllerManager)
hass.data[DOMAIN] = {DATA_CONTROLLER_MANAGER: controller_manager}
with patch.object(
hass.config_entries, "async_forward_entry_unload", return_value=True
) as unload:
assert await async_unload_entry(hass, config_entry)
await hass.async_block_till_done()
assert controller_manager.disconnect.call_count == 1
assert unload.call_count == 1
assert DOMAIN not in hass.data
async def test_update_sources_retry(hass, config_entry, config, controller, caplog):
"""Test update sources retries on failures to max attempts."""
config_entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, config)
controller.get_favorites.reset_mock()
controller.get_input_sources.reset_mock()
source_manager = hass.data[DOMAIN][DATA_SOURCE_MANAGER]
source_manager.retry_delay = 0
source_manager.max_retry_attempts = 1
controller.get_favorites.side_effect = CommandFailedError("Test", "test", 0)
controller.dispatcher.send(
const.SIGNAL_CONTROLLER_EVENT, const.EVENT_SOURCES_CHANGED, {}
)
# Wait until it's finished
while "Unable to update sources" not in caplog.text:
await asyncio.sleep(0.1)
assert controller.get_favorites.call_count == 2
| apache-2.0 |
tboyce021/home-assistant | tests/components/geo_location/test_trigger.py | 3 | 9380 | """The tests for the geolocation trigger."""
import pytest
from homeassistant.components import automation, zone
from homeassistant.const import ATTR_ENTITY_ID, ENTITY_MATCH_ALL, SERVICE_TURN_OFF
from homeassistant.core import Context
from homeassistant.setup import async_setup_component
from tests.common import async_mock_service, mock_component
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
mock_component(hass, "group")
hass.loop.run_until_complete(
async_setup_component(
hass,
zone.DOMAIN,
{
"zone": {
"name": "test",
"latitude": 32.880837,
"longitude": -117.237561,
"radius": 250,
}
},
)
)
async def test_if_fires_on_zone_enter(hass, calls):
"""Test for firing on zone enter."""
context = Context()
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.881011, "longitude": -117.234758, "source": "test_source"},
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "geo_location",
"source": "test_source",
"zone": "zone.test",
"event": "enter",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"zone.name",
)
)
},
},
}
},
)
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.880586, "longitude": -117.237564},
context=context,
)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context.id
assert (
calls[0].data["some"]
== "geo_location - geo_location.entity - hello - hello - test"
)
# Set out of zone again so we can trigger call
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.881011, "longitude": -117.234758},
)
await hass.async_block_till_done()
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.880586, "longitude": -117.237564},
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_not_fires_for_enter_on_zone_leave(hass, calls):
"""Test for not firing on zone leave."""
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.880586, "longitude": -117.237564, "source": "test_source"},
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "geo_location",
"source": "test_source",
"zone": "zone.test",
"event": "enter",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.881011, "longitude": -117.234758},
)
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_fires_on_zone_leave(hass, calls):
"""Test for firing on zone leave."""
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.880586, "longitude": -117.237564, "source": "test_source"},
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "geo_location",
"source": "test_source",
"zone": "zone.test",
"event": "leave",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.881011, "longitude": -117.234758, "source": "test_source"},
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_not_fires_for_leave_on_zone_enter(hass, calls):
"""Test for not firing on zone enter."""
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.881011, "longitude": -117.234758, "source": "test_source"},
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "geo_location",
"source": "test_source",
"zone": "zone.test",
"event": "leave",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.880586, "longitude": -117.237564},
)
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_fires_on_zone_appear(hass, calls):
"""Test for firing if entity appears in zone."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "geo_location",
"source": "test_source",
"zone": "zone.test",
"event": "enter",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"zone.name",
)
)
},
},
}
},
)
# Entity appears in zone without previously existing outside the zone.
context = Context()
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.880586, "longitude": -117.237564, "source": "test_source"},
context=context,
)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context.id
assert (
calls[0].data["some"] == "geo_location - geo_location.entity - - hello - test"
)
async def test_if_fires_on_zone_disappear(hass, calls):
"""Test for firing if entity disappears from zone."""
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.880586, "longitude": -117.237564, "source": "test_source"},
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "geo_location",
"source": "test_source",
"zone": "zone.test",
"event": "leave",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"zone.name",
)
)
},
},
}
},
)
# Entity disappears from zone without new coordinates outside the zone.
hass.states.async_remove("geo_location.entity")
await hass.async_block_till_done()
assert len(calls) == 1
assert (
calls[0].data["some"] == "geo_location - geo_location.entity - hello - - test"
)
| apache-2.0 |
GustavoRD78/78Kernel-MOB31E.Z1.3657 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
albertliangcode/DiceRoll | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/timeout.py | 1004 | 9544 | # The default socket timeout, used by httplib to indicate that no timeout was
# specified by the user
from socket import _GLOBAL_DEFAULT_TIMEOUT
import time
from ..exceptions import TimeoutStateError
# A sentinel value to indicate that no timeout was specified by the user in
# urllib3
_Default = object()
def current_time():
"""
Retrieve the current time. This function is mocked out in unit testing.
"""
return time.time()
class Timeout(object):
""" Timeout configuration.
Timeouts can be defined as a default for a pool::
timeout = Timeout(connect=2.0, read=7.0)
http = PoolManager(timeout=timeout)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``::
no_timeout = Timeout(connect=None, read=None)
response = http.request('GET', 'http://example.com/, timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid.
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is
used to specify in error messages.
:return: The validated and casted version of the given value.
:raises ValueError: If the type is not an integer or a float, or if it
is a numeric value less than zero.
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value
passed to this function.
:param timeout: The legacy timeout value.
:type timeout: integer, float, sentinel default object, or None
:return: Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: Connect timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: Value to use for the read timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# In case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
| mit |
Jorginho211/Gali-eiro | galiñeiro.py | 1 | 10846 | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
import RPi.GPIO as GPIO # importamos libreria entradas-salidas
import time # importamos libreria temporizadores
from flask import Flask, jsonify, render_template # servicio WEB
#import pygame, sys # Chamadas do sistema
#from pygame.locals import * # Aplicación para as fotos da camara
#import pygame.camera # Funcions para manejar webcam
import threading # Libreria de fios (semaforo)
import subprocess
import os
from datetime import datetime
#modulos propios
import auth
#establecemos sistema de numeracion
GPIO.setmode(GPIO.BCM)
#Configuramos salidas
GPIO.setup(7, GPIO.OUT) # Lampara Incandescente Cortello
GPIO.output(7, True)
GPIO.setup(15, GPIO.OUT) # Alimentación Transformado 24 V CC
GPIO.output(15, True)
GPIO.setup(18, GPIO.OUT) # - 0 V CC Motor Puerta
GPIO.output(18, True)
GPIO.setup(23, GPIO.OUT) # + 24 V CC Motor Puerta (SEMTIDO 1)
GPIO.output(23, True)
GPIO.setup(24, GPIO.OUT) # + 24 V CC Motor Puerta (Sentido 2)
GPIO.output(24, True)
GPIO.setup(25, GPIO.OUT) # Luz Pulsador MAN/AUTO
GPIO.output(25, True)
#Configuramos entradas
GPIO.setup(20, GPIO.IN, pull_up_down=GPIO.PUD_UP) # Pulsador
GPIO.setup(16, GPIO.IN, pull_up_down=GPIO.PUD_UP) # Noche
GPIO.setup(12, GPIO.IN, pull_up_down=GPIO.PUD_UP) # Dia
#DECLARAMOS VARIABLES
porta = 0
Incandescente = 0
IncandescenteMovil = 0
Pulsador = 0
manAuto = 0 # 0: Automatico 1: Manual
cerreManual = 0 #Evitar a espera de 20 minutos se xa foi feita no peche
CandadoAbrirCerrarPorta = threading.Lock() # SEMAFORO
CandadoCamara = threading.Lock() #SEMAFORO
CandadoPrograma = threading.Lock() # SEMAFORO
#Funciones predifinidas
app = Flask(__name__) # Acceso rapido de funcions
# REINICIAR
@app.route('/galinheiro/reiniciar', methods=['GET']) #URL FUNCION
def Reiniciar():
os.system("/home/pi/reiniciar.sh")
return jsonify({"boolean": 1})
# WEBCAM
@app.route('/galinheiro/snapshot', methods=['GET']) #URL FUNCION
def Camara():
CandadoCamara.acquire()
cameraProcess = subprocess.Popen("fswebcam -d /dev/video0 -i gspca_zc3xx -r 320x232 -S 10 --jpeg 80 --no-banner --save /var/www/html/snapshot.jpg".split());
cameraProcess.wait()
CandadoCamara.release()
"""
pygame.init()
pygame.camera.init()
cam = pygame.camera.Camera("/dev/video0",(320,240),"RGB")
cam.start()
image = cam.get_image()
pygame.image.save(image, "/var/www/html/snapshot.jpg")
cam.stop()
"""
return jsonify({"boolean":1})
# ENCENDER INCANDESCENTE
def Encender_Incandescente(dispositivo):
global Incandescente
global IncandescenteMovil
GPIO.output(7, False) # Encender luz
IncandescenteMovil = 1
print "Encender", dispositivo
@app.route('/galinheiro/encender_incandescente/', methods=['GET'])
def Encender_Incandescente_Movil():
global Incandescente
global IncandescenteMovil
if Incandescente == 0:
Encender_Incandescente(1)
return jsonify({"incandescente": IncandescenteMovil})
# APAGAR INCANDESCENTE
def Apagar_Incandescente(dispositivo):
global Incandescente
global IncandescenteMovil
GPIO.output(7, True) # Encender luz
IncandescenteMovil = 0
print "Apagar", dispositivo
@app.route('/galinheiro/apagar_incandescente/', methods=['GET'])
def Apagar_Incandescente_Movil():
global Incandescente
global IncandescenteMovil
if Incandescente == 0:
Apagar_Incandescente(1)
return jsonify({"incandescente": IncandescenteMovil})
# ABRIR PORTA
def Abrir_Porta(dispositivo):
global porta
CandadoAbrirCerrarPorta.acquire()
porta = 1
print "Abrir", dispositivo
GPIO.output(15, False) # Alimentar Fuente
time.sleep(1)
GPIO.output(24, False) # Alimentamos Motor (Sentido 2)
time.sleep(22)
if Pulsador == 0: # Se esta manual no galiñeiro non se apaga a fonte (Bombilla pulsador encendida)
GPIO.output(15, True) # Desconectamos Fuente
GPIO.output(24, True) # Desconectamos Motor
CandadoAbrirCerrarPorta.release()
@app.route('/galinheiro/abrir_porta/', methods=['GET'])
def Abrir_Porta_Movil():
global Pulsador
global manAuto
global porta
if Pulsador == 0 and manAuto == 1:
if porta == 0:
Abrir_Porta(1)
return jsonify({"codigo": True})
else:
return jsonify({"codigo": False})
def Cerrar_Porta(dispositivo):
global porta
CandadoAbrirCerrarPorta.acquire()
porta = 0
print "Cerrar", dispositivo
GPIO.output(15, False) # Alimentar Fuente
time.sleep(1)
GPIO.output(18, False) # Alimentamos - 0 V CC
time.sleep(1)
GPIO.output(23, False) # Alimentamos Motor (Sentido 1)
time.sleep(22)
if Pulsador == 0: # Se esta manual no galiñeiro non se apaga a fonte (Bombilla pulsador encendida)
GPIO.output(15, True) # Desconectamos Fuente
GPIO.output(23, True) # Desconectamos Motor
time.sleep(1)
GPIO.output(18, True) # Desconectamos -0V CC
CandadoAbrirCerrarPorta.release()
@app.route('/galinheiro/cerrar_porta/', methods=['GET'])
def Cerrar_Porta_Movil():
global Pulsador
global manAuto
global porta
if Pulsador == 0 and manAuto == 1:
if porta == 1:
Cerrar_Porta(1)
return jsonify({"codigo": True})
else:
return jsonify({"codigo": False})
def Encender_Luz_Estado_Pulsador():
GPIO.output(15, False)
time.sleep(1)
GPIO.output(25, False)
def Apagar_Luz_Estado_Pulsador():
GPIO.output(15, True)
GPIO.output(25, True)
@app.route('/galinheiro/parametros', methods=['GET'])
def Parametros():
global porta
global manAuto
global IncandescenteMovil
return jsonify({"porta": porta, "manAuto": manAuto, "incandescente": IncandescenteMovil})
@app.route('/galinheiro/automatico_manual/<int:estado>', methods=['GET'])
def Automatico_Manual(estado):
global Pulsador
global manAuto
if Pulsador == 0:
manAuto = estado
return jsonify({"manAuto": manAuto})
def fioManAuto():
global manAuto
while True:
tempo = 0
while manAuto == 0:
time.sleep(10)
while manAuto == 1 and tempo < 600:
time.sleep(1)
tempo += 1
manAuto = 0
@app.route("/")
@auth.require
def hello():
return render_template('index.html')
@app.route('/galinheiro/', methods=['GET'])
def Programa():
CandadoPrograma.acquire()
global Pulsador
global manAuto
global Incandescente
global cerreManual
fioEstadoManAuto = threading.Thread(target=fioManAuto)
fioEstadoManAuto.start()
Cerrar_Porta(0) # Cerramos portal como primeira instrucción para determinar a posición do portal
horaApertura = datetime.strptime("11:00:00", "%X").time()
horaPeche = datetime.strptime("22:30:00", "%X").time()
abriuFalloFotocelula = False
while True: # Bucle de funcionamento do Programa
# CICLO MANUAL
if GPIO.input(20) == False:
time.sleep(0.2)
if GPIO.input(20) == False:
if Pulsador == 0:
print "Manual 1"
Pulsador = 1
manAuto = 0 #Prioriza o pulsador cuadro sobre mobil
Encender_Luz_Estado_Pulsador()
Incandescente = 1
Encender_Incandescente(0)
if porta == 0:
Abrir_Porta(0)
elif Pulsador == 1:
print "Automatico"
Apagar_Incandescente(0)
Incandescente = 0
Apagar_Luz_Estado_Pulsador()
Pulsador = 0
while GPIO.input(20) == False:
continue
if Pulsador == 0 and manAuto == 0: # CICLO AUTOMATICO
horaActual = datetime.now().time()
if GPIO.input(12) == False:
abriuFalloFotocelula = False
if (GPIO.input(16) == False or horaActual > horaPeche) and porta == 1: # Cerrar Porta Noite
if abriuFalloFotocelula and horaActual <= horaPeche:
continue
Incandescente = 1
if cerreManual == 1:
Encender_Incandescente(0)
time.sleep(1200)
cerreManual = 0
Cerrar_Porta(0)
Apagar_Incandescente(0)
Incandescente = 0
#if horaActual > horaApertura and horaActual <= horaPeche:
if (GPIO.input(12) == False or (horaActual > horaApertura and horaActual <= horaPeche)) and porta == 0: # Abrir Porta Día
if GPIO.input(12):
abriuFalloFotocelula = True
Abrir_Porta(0)
cerreManual = 1
CandadoPrograma.release()
#-------------------------------------------------------------------------
# VideoVixilancia
#-------------------------------------------------------------------------
@app.route('/videovixilancia/activar_grabacion', methods=['GET'])
def Activar_Videovixilancia():
process = subprocess.Popen(['/usr/local/etc/motion/scriptMotion.sh', '0'], stdout=subprocess.PIPE)
process.wait()
return jsonify({"codigo": True})
@app.route('/videovixilancia/desactivar_grabacion', methods=['GET'])
def Desactivar_Videovixilancia():
process = subprocess.Popen(['/usr/local/etc/motion/scriptMotion.sh', '1'], stdout=subprocess.PIPE)
process.wait()
return jsonify({"codigo": True})
@app.route('/videovixilancia/activar_mostrar', methods=['GET'])
def Activar_Videovixilancia_Mostrar():
process = subprocess.Popen(['/usr/local/etc/motion/scriptMotionOnlyShow.sh', '0'], stdout=subprocess.PIPE)
process.wait()
return jsonify({"codigo": True})
@app.route('/videovixilancia/desactivar_mostrar', methods=['GET'])
def Desactivar_Videovixilancia_Mostrar():
process = subprocess.Popen(['/usr/local/etc/motion/scriptMotionOnlyShow.sh', '1'], stdout=subprocess.PIPE)
process.wait()
return jsonify({"codigo": True})
@app.route('/videovixilancia/parametros', methods=['GET'])
def Parametros_Videovixilancia():
process = subprocess.Popen(['/usr/local/etc/motion/scriptMotionOnlyShow.sh', '3'], stdout=subprocess.PIPE)
process.wait()
output = process.communicate()
motionSoloMostrar = int(output[0])
process2 = subprocess.Popen(['/usr/local/etc/motion/scriptMotion.sh', '3'], stdout=subprocess.PIPE)
process2.wait()
output = process2.communicate()
motionGrabar = int(output[0])
return jsonify({"motionSoloMostrar": motionSoloMostrar, "motionGrabar": motionGrabar })
if __name__ == '__main__': # Indica si se esta cargando desde un arquivo principal ou é un modulos esterno
app.run(host='0.0.0.0', debug=True, threaded=True)
| gpl-3.0 |
Dave667/service | script.module.html5lib/lib/html5lib/ihatexml.py | 1727 | 16581 | from __future__ import absolute_import, division, unicode_literals
import re
import warnings
from .constants import DataLossWarning
baseChar = """
[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] |
[#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] |
[#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] |
[#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 |
[#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] |
[#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] |
[#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] |
[#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] |
[#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 |
[#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] |
[#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] |
[#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D |
[#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] |
[#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] |
[#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] |
[#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] |
[#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] |
[#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] |
[#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 |
[#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] |
[#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] |
[#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] |
[#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] |
[#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] |
[#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] |
[#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] |
[#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] |
[#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] |
[#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] |
[#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A |
#x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 |
#x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] |
#x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] |
[#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] |
[#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C |
#x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 |
[#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] |
[#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] |
[#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 |
[#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] |
[#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B |
#x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE |
[#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] |
[#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 |
[#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] |
[#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]"""
ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]"""
combiningCharacter = """
[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] |
[#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 |
[#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] |
[#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] |
#x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] |
[#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] |
[#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 |
#x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] |
[#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC |
[#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] |
#x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] |
[#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] |
[#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] |
[#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] |
[#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] |
[#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] |
#x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 |
[#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] |
#x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] |
[#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] |
[#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] |
#x3099 | #x309A"""
digit = """
[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] |
[#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] |
[#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] |
[#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]"""
extender = """
#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 |
#[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]"""
letter = " | ".join([baseChar, ideographic])
# Without the
name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter,
extender])
nameFirst = " | ".join([letter, "_"])
reChar = re.compile(r"#x([\d|A-F]{4,4})")
reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]")
def charStringToList(chars):
charRanges = [item.strip() for item in chars.split(" | ")]
rv = []
for item in charRanges:
foundMatch = False
for regexp in (reChar, reCharRange):
match = regexp.match(item)
if match is not None:
rv.append([hexToInt(item) for item in match.groups()])
if len(rv[-1]) == 1:
rv[-1] = rv[-1] * 2
foundMatch = True
break
if not foundMatch:
assert len(item) == 1
rv.append([ord(item)] * 2)
rv = normaliseCharList(rv)
return rv
def normaliseCharList(charList):
charList = sorted(charList)
for item in charList:
assert item[1] >= item[0]
rv = []
i = 0
while i < len(charList):
j = 1
rv.append(charList[i])
while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1:
rv[-1][1] = charList[i + j][1]
j += 1
i += j
return rv
# We don't really support characters above the BMP :(
max_unicode = int("FFFF", 16)
def missingRanges(charList):
rv = []
if charList[0] != 0:
rv.append([0, charList[0][0] - 1])
for i, item in enumerate(charList[:-1]):
rv.append([item[1] + 1, charList[i + 1][0] - 1])
if charList[-1][1] != max_unicode:
rv.append([charList[-1][1] + 1, max_unicode])
return rv
def listToRegexpStr(charList):
rv = []
for item in charList:
if item[0] == item[1]:
rv.append(escapeRegexp(chr(item[0])))
else:
rv.append(escapeRegexp(chr(item[0])) + "-" +
escapeRegexp(chr(item[1])))
return "[%s]" % "".join(rv)
def hexToInt(hex_str):
return int(hex_str, 16)
def escapeRegexp(string):
specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}",
"[", "]", "|", "(", ")", "-")
for char in specialCharacters:
string = string.replace(char, "\\" + char)
return string
# output from the above
nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
# Simpler things
nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\-\'()+,./:=?;!*#@$_%]")
class InfosetFilter(object):
replacementRegexp = re.compile(r"U[\dA-F]{5,5}")
def __init__(self, replaceChars=None,
dropXmlnsLocalName=False,
dropXmlnsAttrNs=False,
preventDoubleDashComments=False,
preventDashAtCommentEnd=False,
replaceFormFeedCharacters=True,
preventSingleQuotePubid=False):
self.dropXmlnsLocalName = dropXmlnsLocalName
self.dropXmlnsAttrNs = dropXmlnsAttrNs
self.preventDoubleDashComments = preventDoubleDashComments
self.preventDashAtCommentEnd = preventDashAtCommentEnd
self.replaceFormFeedCharacters = replaceFormFeedCharacters
self.preventSingleQuotePubid = preventSingleQuotePubid
self.replaceCache = {}
def coerceAttribute(self, name, namespace=None):
if self.dropXmlnsLocalName and name.startswith("xmlns:"):
warnings.warn("Attributes cannot begin with xmlns", DataLossWarning)
return None
elif (self.dropXmlnsAttrNs and
namespace == "http://www.w3.org/2000/xmlns/"):
warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning)
return None
else:
return self.toXmlName(name)
def coerceElement(self, name, namespace=None):
return self.toXmlName(name)
def coerceComment(self, data):
if self.preventDoubleDashComments:
while "--" in data:
warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning)
data = data.replace("--", "- -")
return data
def coerceCharacters(self, data):
if self.replaceFormFeedCharacters:
for i in range(data.count("\x0C")):
warnings.warn("Text cannot contain U+000C", DataLossWarning)
data = data.replace("\x0C", " ")
# Other non-xml characters
return data
def coercePubid(self, data):
dataOutput = data
for char in nonPubidCharRegexp.findall(data):
warnings.warn("Coercing non-XML pubid", DataLossWarning)
replacement = self.getReplacementCharacter(char)
dataOutput = dataOutput.replace(char, replacement)
if self.preventSingleQuotePubid and dataOutput.find("'") >= 0:
warnings.warn("Pubid cannot contain single quote", DataLossWarning)
dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'"))
return dataOutput
def toXmlName(self, name):
nameFirst = name[0]
nameRest = name[1:]
m = nonXmlNameFirstBMPRegexp.match(nameFirst)
if m:
warnings.warn("Coercing non-XML name", DataLossWarning)
nameFirstOutput = self.getReplacementCharacter(nameFirst)
else:
nameFirstOutput = nameFirst
nameRestOutput = nameRest
replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest))
for char in replaceChars:
warnings.warn("Coercing non-XML name", DataLossWarning)
replacement = self.getReplacementCharacter(char)
nameRestOutput = nameRestOutput.replace(char, replacement)
return nameFirstOutput + nameRestOutput
def getReplacementCharacter(self, char):
if char in self.replaceCache:
replacement = self.replaceCache[char]
else:
replacement = self.escapeChar(char)
return replacement
def fromXmlName(self, name):
for item in set(self.replacementRegexp.findall(name)):
name = name.replace(item, self.unescapeChar(item))
return name
def escapeChar(self, char):
replacement = "U%05X" % ord(char)
self.replaceCache[char] = replacement
return replacement
def unescapeChar(self, charcode):
return chr(int(charcode[1:], 16))
| gpl-2.0 |
samuelchong/libcloud | libcloud/common/nfsn.py | 29 | 4137 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import random
import string
import time
from libcloud.common.base import ConnectionUserAndKey
from libcloud.common.base import JsonResponse
from libcloud.common.types import InvalidCredsError, ProviderError
from libcloud.utils.py3 import basestring, httplib, urlencode
SALT_CHARACTERS = string.ascii_letters + string.digits
class NFSNException(ProviderError):
def __init__(self, value, http_code, code, driver=None):
self.code = code
super(NFSNException, self).__init__(value, http_code, driver)
class NFSNResponse(JsonResponse):
def parse_error(self):
if self.status == httplib.UNAUTHORIZED:
raise InvalidCredsError('Invalid provider credentials')
body = self.parse_body()
if isinstance(body, basestring):
return body + ' (HTTP Code: %d)' % self.status
error = body.get('error', None)
debug = body.get('debug', None)
# If we only have one of "error" or "debug", use the one that we have.
# If we have both, use both, with a space character in between them.
value = 'No message specified'
if error is not None:
value = error
if debug is not None:
value = debug
if error is not None and value is not None:
value = error + ' ' + value
value = value + ' (HTTP Code: %d)' % self.status
return value
class NFSNConnection(ConnectionUserAndKey):
host = 'api.nearlyfreespeech.net'
responseCls = NFSNResponse
allow_insecure = False
def _header(self, action, data):
""" Build the contents of the X-NFSN-Authentication HTTP header. See
https://members.nearlyfreespeech.net/wiki/API/Introduction for
more explanation. """
login = self.user_id
timestamp = self._timestamp()
salt = self._salt()
api_key = self.key
data = urlencode(data)
data_hash = hashlib.sha1(data.encode('utf-8')).hexdigest()
string = ';'.join((login, timestamp, salt, api_key, action, data_hash))
string_hash = hashlib.sha1(string.encode('utf-8')).hexdigest()
return ';'.join((login, timestamp, salt, string_hash))
def request(self, action, params=None, data='', headers=None,
method='GET'):
""" Add the X-NFSN-Authentication header to an HTTP request. """
if not headers:
headers = {}
if not params:
params = {}
header = self._header(action, data)
headers['X-NFSN-Authentication'] = header
if method == 'POST':
headers['Content-Type'] = 'application/x-www-form-urlencoded'
return ConnectionUserAndKey.request(self, action, params, data,
headers, method)
def encode_data(self, data):
""" NFSN expects the body to be regular key-value pairs that are not
JSON-encoded. """
if data:
data = urlencode(data)
return data
def _salt(self):
""" Return a 16-character alphanumeric string. """
r = random.SystemRandom()
return ''.join(r.choice(SALT_CHARACTERS) for _ in range(16))
def _timestamp(self):
""" Return the current number of seconds since the Unix epoch,
as a string. """
return str(int(time.time()))
| apache-2.0 |
calculuscowboy/cookiecutter-django | {{cookiecutter.project_slug}}/config/settings/local.py | 2 | 2686 | # -*- coding: utf-8 -*-
"""
Local settings
- Run in Debug mode
{% if cookiecutter.use_mailhog == 'y' and cookiecutter.use_docker == 'y' %}
- Use mailhog for emails
{% else %}
- Use console backend for emails
{% endif %}
- Add Django Debug Toolbar
- Add django-extensions as app
"""
import socket
import os
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='CHANGEME!!!')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
{% if cookiecutter.use_mailhog == 'y' and cookiecutter.use_docker == 'y' %}
EMAIL_HOST = env("EMAIL_HOST", default='mailhog')
{% else %}
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
{% endif %}
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
# tricks to have debug toolbar when developing with docker
if os.environ.get('USE_DOCKER') == 'yes':
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + "1"]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
{% if cookiecutter.use_celery == 'y' %}
########## CELERY
# In development, all tasks will be executed locally by blocking until the task returns
CELERY_ALWAYS_EAGER = True
########## END CELERY
{% endif %}
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| bsd-3-clause |
fubecka/f5-dashboard | flask/lib/python2.6/site-packages/pip/_vendor/requests/packages/chardet/hebrewprober.py | 2929 | 13359 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
| apache-2.0 |
luxnovalabs/enjigo_door | web_interface/django/contrib/localflavor/se/forms.py | 110 | 5659 | # -*- coding: utf-8 -*-
"""
Swedish specific Form helpers
"""
from __future__ import absolute_import, unicode_literals
import re
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.core.validators import EMPTY_VALUES
from django.contrib.localflavor.se.se_counties import COUNTY_CHOICES
from django.contrib.localflavor.se.utils import (id_number_checksum,
validate_id_birthday, format_personal_id_number, valid_organisation,
format_organisation_number)
__all__ = ('SECountySelect', 'SEOrganisationNumberField',
'SEPersonalIdentityNumberField', 'SEPostalCodeField')
SWEDISH_ID_NUMBER = re.compile(r'^(?P<century>\d{2})?(?P<year>\d{2})(?P<month>\d{2})(?P<day>\d{2})(?P<sign>[\-+])?(?P<serial>\d{3})(?P<checksum>\d)$')
SE_POSTAL_CODE = re.compile(r'^[1-9]\d{2} ?\d{2}$')
class SECountySelect(forms.Select):
"""
A Select form widget that uses a list of the Swedish counties (län) as its
choices.
The cleaned value is the official county code -- see
http://en.wikipedia.org/wiki/Counties_of_Sweden for a list.
"""
def __init__(self, attrs=None):
super(SECountySelect, self).__init__(attrs=attrs,
choices=COUNTY_CHOICES)
class SEOrganisationNumberField(forms.CharField):
"""
A form field that validates input as a Swedish organisation number
(organisationsnummer).
It accepts the same input as SEPersonalIdentityField (for sole
proprietorships (enskild firma). However, co-ordination numbers are not
accepted.
It also accepts ordinary Swedish organisation numbers with the format
NNNNNNNNNN.
The return value will be YYYYMMDDXXXX for sole proprietors, and NNNNNNNNNN
for other organisations.
"""
default_error_messages = {
'invalid': _('Enter a valid Swedish organisation number.'),
}
def clean(self, value):
value = super(SEOrganisationNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
match = SWEDISH_ID_NUMBER.match(value)
if not match:
raise forms.ValidationError(self.error_messages['invalid'])
gd = match.groupdict()
# Compare the calculated value with the checksum
if id_number_checksum(gd) != int(gd['checksum']):
raise forms.ValidationError(self.error_messages['invalid'])
# First: check if this is a real organisation_number
if valid_organisation(gd):
return format_organisation_number(gd)
# Is this a single properitor (enskild firma)?
try:
birth_day = validate_id_birthday(gd, False)
return format_personal_id_number(birth_day, gd)
except ValueError:
raise forms.ValidationError(self.error_messages['invalid'])
class SEPersonalIdentityNumberField(forms.CharField):
"""
A form field that validates input as a Swedish personal identity number
(personnummer).
The correct formats are YYYYMMDD-XXXX, YYYYMMDDXXXX, YYMMDD-XXXX,
YYMMDDXXXX and YYMMDD+XXXX.
A + indicates that the person is older than 100 years, which will be taken
into consideration when the date is validated.
The checksum will be calculated and checked. The birth date is checked to
be a valid date.
By default, co-ordination numbers (samordningsnummer) will be accepted. To
only allow real personal identity numbers, pass the keyword argument
coordination_number=False to the constructor.
The cleaned value will always have the format YYYYMMDDXXXX.
"""
def __init__(self, coordination_number=True, *args, **kwargs):
self.coordination_number = coordination_number
super(SEPersonalIdentityNumberField, self).__init__(*args, **kwargs)
default_error_messages = {
'invalid': _('Enter a valid Swedish personal identity number.'),
'coordination_number': _('Co-ordination numbers are not allowed.'),
}
def clean(self, value):
value = super(SEPersonalIdentityNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
match = SWEDISH_ID_NUMBER.match(value)
if match is None:
raise forms.ValidationError(self.error_messages['invalid'])
gd = match.groupdict()
# compare the calculated value with the checksum
if id_number_checksum(gd) != int(gd['checksum']):
raise forms.ValidationError(self.error_messages['invalid'])
# check for valid birthday
try:
birth_day = validate_id_birthday(gd)
except ValueError:
raise forms.ValidationError(self.error_messages['invalid'])
# make sure that co-ordination numbers do not pass if not allowed
if not self.coordination_number and int(gd['day']) > 60:
raise forms.ValidationError(self.error_messages['coordination_number'])
return format_personal_id_number(birth_day, gd)
class SEPostalCodeField(forms.RegexField):
"""
A form field that validates input as a Swedish postal code (postnummer).
Valid codes consist of five digits (XXXXX). The number can optionally be
formatted with a space after the third digit (XXX XX).
The cleaned value will never contain the space.
"""
default_error_messages = {
'invalid': _('Enter a Swedish postal code in the format XXXXX.'),
}
def __init__(self, *args, **kwargs):
super(SEPostalCodeField, self).__init__(SE_POSTAL_CODE, *args, **kwargs)
def clean(self, value):
return super(SEPostalCodeField, self).clean(value).replace(' ', '')
| unlicense |
c0defreak/python-for-android | python3-alpha/python3-src/Lib/test/test_scope.py | 56 | 18755 | import unittest
from test.support import check_syntax_error, run_unittest
class ScopeTests(unittest.TestCase):
def testSimpleNesting(self):
def make_adder(x):
def adder(y):
return x + y
return adder
inc = make_adder(1)
plus10 = make_adder(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testExtraNesting(self):
def make_adder2(x):
def extra(): # check freevars passing through non-use scopes
def adder(y):
return x + y
return adder
return extra()
inc = make_adder2(1)
plus10 = make_adder2(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testSimpleAndRebinding(self):
def make_adder3(x):
def adder(y):
return x + y
x = x + 1 # check tracking of assignment to x in defining scope
return adder
inc = make_adder3(0)
plus10 = make_adder3(9)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testNestingGlobalNoFree(self):
def make_adder4(): # XXX add exta level of indirection
def nest():
def nest():
def adder(y):
return global_x + y # check that plain old globals work
return adder
return nest()
return nest()
global_x = 1
adder = make_adder4()
self.assertEqual(adder(1), 2)
global_x = 10
self.assertEqual(adder(-2), 8)
def testNestingThroughClass(self):
def make_adder5(x):
class Adder:
def __call__(self, y):
return x + y
return Adder()
inc = make_adder5(1)
plus10 = make_adder5(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testNestingPlusFreeRefToGlobal(self):
def make_adder6(x):
global global_nest_x
def adder(y):
return global_nest_x + y
global_nest_x = x
return adder
inc = make_adder6(1)
plus10 = make_adder6(10)
self.assertEqual(inc(1), 11) # there's only one global
self.assertEqual(plus10(-2), 8)
def testNearestEnclosingScope(self):
def f(x):
def g(y):
x = 42 # check that this masks binding in f()
def h(z):
return x + z
return h
return g(2)
test_func = f(10)
self.assertEqual(test_func(5), 47)
def testMixedFreevarsAndCellvars(self):
def identity(x):
return x
def f(x, y, z):
def g(a, b, c):
a = a + x # 3
def h():
# z * (4 + 9)
# 3 * 13
return identity(z * (b + y))
y = c + z # 9
return h
return g
g = f(1, 2, 3)
h = g(2, 4, 6)
self.assertEqual(h(), 39)
def testFreeVarInMethod(self):
def test():
method_and_var = "var"
class Test:
def method_and_var(self):
return "method"
def test(self):
return method_and_var
def actual_global(self):
return str("global")
def str(self):
return str(self)
return Test()
t = test()
self.assertEqual(t.test(), "var")
self.assertEqual(t.method_and_var(), "method")
self.assertEqual(t.actual_global(), "global")
method_and_var = "var"
class Test:
# this class is not nested, so the rules are different
def method_and_var(self):
return "method"
def test(self):
return method_and_var
def actual_global(self):
return str("global")
def str(self):
return str(self)
t = Test()
self.assertEqual(t.test(), "var")
self.assertEqual(t.method_and_var(), "method")
self.assertEqual(t.actual_global(), "global")
def testCellIsKwonlyArg(self):
# Issue 1409: Initialisation of a cell value,
# when it comes from a keyword-only parameter
def foo(*, a=17):
def bar():
return a + 5
return bar() + 3
self.assertEqual(foo(a=42), 50)
self.assertEqual(foo(), 25)
def testRecursion(self):
def f(x):
def fact(n):
if n == 0:
return 1
else:
return n * fact(n - 1)
if x >= 0:
return fact(x)
else:
raise ValueError("x must be >= 0")
self.assertEqual(f(6), 720)
def testUnoptimizedNamespaces(self):
check_syntax_error(self, """if 1:
def unoptimized_clash1(strip):
def f(s):
from sys import *
return getrefcount(s) # ambiguity: free or local
return f
""")
check_syntax_error(self, """if 1:
def unoptimized_clash2():
from sys import *
def f(s):
return getrefcount(s) # ambiguity: global or local
return f
""")
check_syntax_error(self, """if 1:
def unoptimized_clash2():
from sys import *
def g():
def f(s):
return getrefcount(s) # ambiguity: global or local
return f
""")
check_syntax_error(self, """if 1:
def f():
def g():
from sys import *
return getrefcount # global or local?
""")
def testLambdas(self):
f1 = lambda x: lambda y: x + y
inc = f1(1)
plus10 = f1(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(5), 15)
f2 = lambda x: (lambda : lambda y: x + y)()
inc = f2(1)
plus10 = f2(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(5), 15)
f3 = lambda x: lambda y: global_x + y
global_x = 1
inc = f3(None)
self.assertEqual(inc(2), 3)
f8 = lambda x, y, z: lambda a, b, c: lambda : z * (b + y)
g = f8(1, 2, 3)
h = g(2, 4, 6)
self.assertEqual(h(), 18)
def testUnboundLocal(self):
def errorInOuter():
print(y)
def inner():
return y
y = 1
def errorInInner():
def inner():
return y
inner()
y = 1
self.assertRaises(UnboundLocalError, errorInOuter)
self.assertRaises(NameError, errorInInner)
def testUnboundLocal_AfterDel(self):
# #4617: It is now legal to delete a cell variable.
# The following functions must obviously compile,
# and give the correct error when accessing the deleted name.
def errorInOuter():
y = 1
del y
print(y)
def inner():
return y
def errorInInner():
def inner():
return y
y = 1
del y
inner()
self.assertRaises(UnboundLocalError, errorInOuter)
self.assertRaises(NameError, errorInInner)
def testUnboundLocal_AugAssign(self):
# test for bug #1501934: incorrect LOAD/STORE_GLOBAL generation
exec("""if 1:
global_x = 1
def f():
global_x += 1
try:
f()
except UnboundLocalError:
pass
else:
fail('scope of global_x not correctly determined')
""", {'fail': self.fail})
def testComplexDefinitions(self):
def makeReturner(*lst):
def returner():
return lst
return returner
self.assertEqual(makeReturner(1,2,3)(), (1,2,3))
def makeReturner2(**kwargs):
def returner():
return kwargs
return returner
self.assertEqual(makeReturner2(a=11)()['a'], 11)
def testScopeOfGlobalStmt(self):
# Examples posted by Samuele Pedroni to python-dev on 3/1/2001
exec("""if 1:
# I
x = 7
def f():
x = 1
def g():
global x
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 7)
self.assertEqual(x, 7)
# II
x = 7
def f():
x = 1
def g():
x = 2
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 2)
self.assertEqual(x, 7)
# III
x = 7
def f():
x = 1
def g():
global x
x = 2
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 2)
self.assertEqual(x, 2)
# IV
x = 7
def f():
x = 3
def g():
global x
x = 2
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 2)
self.assertEqual(x, 2)
# XXX what about global statements in class blocks?
# do they affect methods?
x = 12
class Global:
global x
x = 13
def set(self, val):
x = val
def get(self):
return x
g = Global()
self.assertEqual(g.get(), 13)
g.set(15)
self.assertEqual(g.get(), 13)
""")
def testLeaks(self):
class Foo:
count = 0
def __init__(self):
Foo.count += 1
def __del__(self):
Foo.count -= 1
def f1():
x = Foo()
def f2():
return x
f2()
for i in range(100):
f1()
self.assertEqual(Foo.count, 0)
def testClassAndGlobal(self):
exec("""if 1:
def test(x):
class Foo:
global x
def __call__(self, y):
return x + y
return Foo()
x = 0
self.assertEqual(test(6)(2), 8)
x = -1
self.assertEqual(test(3)(2), 5)
looked_up_by_load_name = False
class X:
# Implicit globals inside classes are be looked up by LOAD_NAME, not
# LOAD_GLOBAL.
locals()['looked_up_by_load_name'] = True
passed = looked_up_by_load_name
self.assertTrue(X.passed)
""")
def testLocalsFunction(self):
def f(x):
def g(y):
def h(z):
return y + z
w = x + y
y += 3
return locals()
return g
d = f(2)(4)
self.assertIn('h', d)
del d['h']
self.assertEqual(d, {'x': 2, 'y': 7, 'w': 6})
def testLocalsClass(self):
# This test verifies that calling locals() does not pollute
# the local namespace of the class with free variables. Old
# versions of Python had a bug, where a free variable being
# passed through a class namespace would be inserted into
# locals() by locals() or exec or a trace function.
#
# The real bug lies in frame code that copies variables
# between fast locals and the locals dict, e.g. when executing
# a trace function.
def f(x):
class C:
x = 12
def m(self):
return x
locals()
return C
self.assertEqual(f(1).x, 12)
def f(x):
class C:
y = x
def m(self):
return x
z = list(locals())
return C
varnames = f(1).z
self.assertNotIn("x", varnames)
self.assertIn("y", varnames)
def testLocalsClass_WithTrace(self):
# Issue23728: after the trace function returns, the locals()
# dictionary is used to update all variables, this used to
# include free variables. But in class statements, free
# variables are not inserted...
import sys
sys.settrace(lambda a,b,c:None)
try:
x = 12
class C:
def f(self):
return x
self.assertEqual(x, 12) # Used to raise UnboundLocalError
finally:
sys.settrace(None)
def testBoundAndFree(self):
# var is bound and free in class
def f(x):
class C:
def m(self):
return x
a = x
return C
inst = f(3)()
self.assertEqual(inst.a, inst.m())
def testInteractionWithTraceFunc(self):
import sys
def tracer(a,b,c):
return tracer
def adaptgetter(name, klass, getter):
kind, des = getter
if kind == 1: # AV happens when stepping from this line to next
if des == "":
des = "_%s__%s" % (klass.__name__, name)
return lambda obj: getattr(obj, des)
class TestClass:
pass
sys.settrace(tracer)
adaptgetter("foo", TestClass, (1, ""))
sys.settrace(None)
self.assertRaises(TypeError, sys.settrace)
def testEvalExecFreeVars(self):
def f(x):
return lambda: x + 1
g = f(3)
self.assertRaises(TypeError, eval, g.__code__)
try:
exec(g.__code__, {})
except TypeError:
pass
else:
self.fail("exec should have failed, because code contained free vars")
def testListCompLocalVars(self):
try:
print(bad)
except NameError:
pass
else:
print("bad should not be defined")
def x():
[bad for s in 'a b' for bad in s.split()]
x()
try:
print(bad)
except NameError:
pass
def testEvalFreeVars(self):
def f(x):
def g():
x
eval("x + 1")
return g
f(4)()
def testFreeingCell(self):
# Test what happens when a finalizer accesses
# the cell where the object was stored.
class Special:
def __del__(self):
nestedcell_get()
def testNonLocalFunction(self):
def f(x):
def inc():
nonlocal x
x += 1
return x
def dec():
nonlocal x
x -= 1
return x
return inc, dec
inc, dec = f(0)
self.assertEqual(inc(), 1)
self.assertEqual(inc(), 2)
self.assertEqual(dec(), 1)
self.assertEqual(dec(), 0)
def testNonLocalMethod(self):
def f(x):
class c:
def inc(self):
nonlocal x
x += 1
return x
def dec(self):
nonlocal x
x -= 1
return x
return c()
c = f(0)
self.assertEqual(c.inc(), 1)
self.assertEqual(c.inc(), 2)
self.assertEqual(c.dec(), 1)
self.assertEqual(c.dec(), 0)
def testGlobalInParallelNestedFunctions(self):
# A symbol table bug leaked the global statement from one
# function to other nested functions in the same block.
# This test verifies that a global statement in the first
# function does not affect the second function.
local_ns = {}
global_ns = {}
exec("""if 1:
def f():
y = 1
def g():
global y
return y
def h():
return y + 1
return g, h
y = 9
g, h = f()
result9 = g()
result2 = h()
""", local_ns, global_ns)
self.assertEqual(2, global_ns["result2"])
self.assertEqual(9, global_ns["result9"])
def testNonLocalClass(self):
def f(x):
class c:
nonlocal x
x += 1
def get(self):
return x
return c()
c = f(0)
self.assertEqual(c.get(), 1)
self.assertNotIn("x", c.__class__.__dict__)
def testNonLocalGenerator(self):
def f(x):
def g(y):
nonlocal x
for i in range(y):
x += 1
yield x
return g
g = f(0)
self.assertEqual(list(g(5)), [1, 2, 3, 4, 5])
def testNestedNonLocal(self):
def f(x):
def g():
nonlocal x
x -= 2
def h():
nonlocal x
x += 4
return x
return h
return g
g = f(1)
h = g()
self.assertEqual(h(), 3)
def testTopIsNotSignificant(self):
# See #9997.
def top(a):
pass
def b():
global a
def test_main():
run_unittest(ScopeTests)
if __name__ == '__main__':
test_main()
| apache-2.0 |
zeratul2099/home_observe | home_observe.py | 1 | 6529 | #!/usr/bin/env python3
import os
import time
import socket
from datetime import datetime, timedelta
import argparse
import pickle
import random
import requests
from sqlalchemy import asc
from settings import network, last_seen_delta, notify_offline, notify_blacklist, pa_app_token, pa_user_key
from common import get_host_shortname, get_database, get_homedump, get_active_hosts, get_status
offline_notified = set()
def get_addresses(host):
try:
ipv4 = socket.getaddrinfo(host, None, socket.AF_INET)[0][4][0]
except socket.gaierror:
ipv4 = ''
try:
ipv6_set = set()
for ipv6_info in socket.getaddrinfo(host, None, socket.AF_INET6):
ipv6_set.add(ipv6_info[4][0])
ipv6 = ','.join(list(ipv6_set))
except socket.gaierror:
ipv6 = ''
return ipv4, ipv6
def show_database_log(hostname=None):
log = get_database()
select = log.select().order_by(asc(log.c.timestamp))
if hostname:
select = select.where(log.c.hostname.contains(hostname))
rows = select.execute()
for row in rows.fetchall():
entry = dict(
shortname=get_host_shortname(row.hostname),
status='\033[92mOnline\033[0m' if row.status == 1 else '\033[91mOffline\033[0m',
timestamp=row.timestamp.strftime('%d.%m.%Y %H:%M:%S'),
ipv4=row.ipv4,
ipv6=row.ipv6,
mac=row.mac,
)
print('{shortname:20s}\t{status}\t{timestamp}\t{ipv4:15}\t{ipv6}\t{mac}'.format(**entry))
def send_message_retry(header, message, retries=3):
for retry in range(retries):
try:
r = requests.post(
'https://api.pushover.net/1/messages.json',
data={'token': pa_app_token, 'user': pa_user_key, 'message': message},
)
print(r.text)
break
except socket.gaierror:
print('retry')
time.sleep(1)
continue
def home(log):
now = datetime.utcnow()
global offline_notified
print('offline notified', offline_notified)
homedump = get_homedump()
excluded_hosts = get_active_hosts(homedump)
if excluded_hosts:
nmap_command = 'nmap -sP -PR --exclude %s %s' % (','.join(excluded_hosts), network)
else:
nmap_command = 'nmap -sP -PR %s' % network
print(nmap_command)
result = os.popen(nmap_command)
notify_list = []
seen_hosts = []
lines = result.readlines()
for idx, line in enumerate(lines):
if 'done' in line:
print(line)
if 'scan report' in line:
print(line)
host = line.split(' ')[4].strip()
last_seen = homedump.get(host, datetime(1970, 1, 1, 0, 0))
seen_hosts.append(host)
ago = now - last_seen
print('%s last seen %s ago' % (host, now - last_seen))
try:
mac_line = lines[idx+2]
if 'MAC Address' in mac_line:
mac = mac_line.split(' ')[2]
else:
mac = None
except IndexError:
mac = None
if ago > timedelta(minutes=last_seen_delta):
print('NOTIFY', host)
ipv4, ipv6 = get_addresses(host)
insert = log.insert()
insert.execute(hostname=host, status=1, timestamp=now, ipv4=ipv4, ipv6=ipv6, mac=mac)
try:
offline_notified.remove(host)
except KeyError:
pass
if host.lower() not in notify_blacklist:
notify_list.append('%s (%s)' % (get_host_shortname(host), ago))
homedump[host] = now
notify_offline_list = []
for host, last_seen in homedump.items():
ago = now - last_seen
if ago > timedelta(minutes=last_seen_delta) and ago < timedelta(minutes=last_seen_delta + 1):
if host not in offline_notified:
print('NOTIFY OFFLINE', host)
ipv4, ipv6 = get_addresses(host)
insert = log.insert()
insert.execute(hostname=host, status=0, timestamp=now, ipv4=ipv4, ipv6=ipv6)
if host.lower() not in notify_blacklist:
notify_offline_list.append(get_host_shortname(host))
offline_notified.add(host)
if pa_app_token and pa_user_key and (len(notify_list) > 0 or len(notify_offline_list) > 0):
if len(notify_list) > 0:
send_message_retry('New devices online', ', '.join(notify_list))
if len(notify_offline_list) > 0 and notify_offline is True:
if random.randint(0, 1) == 0:
send_message_retry('Devices offline', ', '.join(notify_offline_list) + ' is off the grid')
else:
if len(notify_offline_list) == 1:
send_message_retry('Devices offline', ', '.join(notify_offline_list) + ' has left the building')
else:
send_message_retry('Devices offline', ', '.join(notify_offline_list) + ' have left the building')
with open('homedump.pkl', 'wb') as dumpfile:
pickle.dump(homedump, dumpfile)
last_excluded_hosts = excluded_hosts
def main():
parser = argparse.ArgumentParser(description='HomeObserve, a local network observer')
parser.add_argument('-d', '--daemon', default=False, action='store_true', help='daemon mode')
parser.add_argument('-s', '--status', default=False, action='store_true', help='print status and exit')
parser.add_argument('-a', '--active', default=False, action='store_true', help='print active hosts and exit')
parser.add_argument('-l', '--log', default=False, action='store_true', help='print database log')
parser.add_argument('-o', '--host', default=None, help='show only logs of host')
parser.add_argument('--sleep', default=1, type=int, help='sleep after every scan (in seconds)')
args = parser.parse_args()
if args.active:
print('\n'.join(get_active_hosts(get_homedump())))
return
if args.status:
print('\n'.join(['%s:\n\t%s\n' % (host, delta) for host, delta in get_status().items()]))
return
if args.log:
show_database_log(hostname=args.host)
return
if args.daemon:
log = get_database()
while True:
home(log=log)
time.sleep(args.sleep)
else:
log = get_database()
home(log=log)
if __name__ == '__main__':
main()
| gpl-3.0 |
mattHawthorn/sk-torch | sktorch/seqdata.py | 1 | 17152 | #coding:utf-8
from itertools import repeat, chain
from itertools import takewhile
from operator import itemgetter
from random import sample
from typing import Iterable, List, Dict, Tuple, Iterator, Optional as Opt, Sequence as Seq, Mapping as Map
from torch import stack
from torch.nn.utils.rnn import pack_padded_sequence
from torch.utils.data import Dataset
# from torch.utils.data.dataloader import default_collate
from .data import H, T1, str_to_int_tensor_type, IntTensorType, FloatTensorType, NumericTensorTypes
#####################################################################
# Sequence data utils #
#####################################################################
class SpecialToken(str):
def __init__(self, token):
self.name = self.__class__.__name__
self = token
def __str__(self):
return self
def __repr__(self):
return '%s("%s")' % (self.name, self)
OOV = type('OOV', (SpecialToken,), {})
EOS = type('EOS', (SpecialToken,), {})
Null = type('Null', (SpecialToken,), {})
DEFAULT_OOV = "<oov>"
DEFAULT_EOS = "<eos>"
DEFAULT_NULL = "<null>"
class Vocabulary:
"""
A two-way hash table mapping entities to int IDs and a reverse hash mapping IDs to entities.
"""
def __init__(self, oov_token: H=DEFAULT_OOV):
# hash unigram --> ID
self.token2id = dict()
# hash ID --> unigram
self.id2token = dict()
self.maxID = -1
if oov_token is not None:
self.oov_token = oov_token
@property
def size(self):
return len(self.token2id)
@property
def oov_token(self):
return self._oov_token
@oov_token.setter
def oov_token(self, oov_token: H):
oov_token = OOV(oov_token)
self._oov_token = oov_token
self.add(oov_token)
self._oov_id = self.token2id[oov_token]
@property
def oov_id(self):
return self._oov_id
def add(self, token: H):
self.add_many((token,))
def add_many(self, tokens: Iterable[H]):
for token in tokens:
if token not in self.token2id:
# increment the maxID and vocabSize
self.maxID += 1
# set both mappings
self.token2id[token] = self.maxID
self.id2token[self.maxID] = token
def __len__(self):
return len(self.token2id)
def get_ids(self, tokens: Seq[H]) -> List[int]:
encoder = self.token2id.get
oov = self._oov_id
return [encoder(t, oov) for t in tokens]
def get_tokens(self, ids: Seq[int]) -> List[H]:
return list(self.get_tokens_iter(ids))
def get_tokens_iter(self, ids: Seq[int]) -> Iterator[H]:
decoder = self.id2token.get
oov = self._oov_token
return (decoder(i, oov) for i in ids)
@classmethod
def from_token2id(cls, token2id: Dict[H, int], oov_token: H=DEFAULT_OOV):
return cls.from_token_id_tuples(token2id.items(), oov_token)
@classmethod
def from_id2token(cls, id2token: Dict[int, T1], oov_token: H=DEFAULT_OOV):
tuples = ((token, i) for i, token in id2token.items())
return cls.from_token_id_tuples(tuples, oov_token)
@classmethod
def from_token_id_tuples(cls, token_id_tuples: Iterable[Tuple[T1, int]], oov_token: H=DEFAULT_OOV):
# don't want to take up the 0 id at the start
vocab = Vocabulary(oov_token=None)
token2id = vocab.token2id
id2token = vocab.id2token
for token, i in token_id_tuples:
if token in token2id:
raise ValueError("Multiple ids for token {}".format(token))
if i in id2token:
raise ValueError("Multiple tokens for id {}".format(i))
token2id[token] = i
id2token[i] = token
vocab.maxID = max(vocab.maxID, i)
vocab.oov_token = oov_token
return vocab
class SequenceTensorEncoder:
def __init__(self, vocab: Vocabulary, append_eos: bool = True, eos_token: Opt[H] = DEFAULT_EOS,
batch_first: bool = True,
pack_sequences: bool=False, null_token: H = DEFAULT_NULL, int_id_type: str = 'long'):
"""Encoder/decoder for going from sequences to tensors and back. The encoder() and decoder() methods
can be passed to a TorchModel as the input_encoder and output_decoder kwargs. Additionally, the
collate_batch method can be passed as the collate_fn arg to a DataLoader instance for wrapping up sequences
as tensors.
:param vocab: instance of Vocabulary() to use for encoding/decoding tokens.
:param int_id_type: string indicating the type of int ids to use. Must be a key of data.str_to_int_tensor_type.
:param pack_sequences: bool indicating whether to return batches as PackedSequences or simply padded tensors.
:param null_token: Optional hashable to use for padding sequences. Added to the vocab, unless none is passed
and none is built, in which case this is considered to be an int id.
Numpy aliases for integer types are valid, as well as 'long', 'short', 'byte', 'char'.
The default 'long' is recommended, as only LongTensors can be used to index Embeddings in pytorch.
"""
self.vocab = vocab
self.tensor_type = str_to_int_tensor_type[int_id_type.lower()]
self.eos_token = eos_token
self.null_token = null_token
self.pack_sequences = pack_sequences
self.batch_first = batch_first
self.append_eos = append_eos
@property
def eos_token(self):
return self._eos_token
@eos_token.setter
def eos_token(self, eos_token: H):
if eos_token is not None:
self._eos_token = EOS(eos_token)
self.vocab.add(self._eos_token)
self._eos_id = self.vocab.token2id[self._eos_token]
else:
self._eos_token = None
self._eos_id = None
@property
def eos_id(self):
return self._eos_id
@property
def null_token(self):
return self._null_token
@null_token.setter
def null_token(self, null_token: H):
self._null_token = Null(null_token)
self.vocab.add(self._null_token)
self._null_id = self.vocab.token2id[self._null_token]
@property
def null_id(self):
return self._null_id
@property
def vocab_size(self):
return len(self.vocab)
def encode(self, tokens: Seq[H]) -> List[int]:
ids = self.vocab.get_ids(tokens)
if self.append_eos:
ids.append(self._eos_id)
return ids
def encode_tensor(self, tokens: Seq[H]) -> IntTensorType:
return self.tensor_type(self.encode(tokens))
def pad_encode(self, tokens: Seq[H], length: int) -> List[int]:
ids = self.vocab.get_ids(tokens)
if self.append_eos:
ids.append(self._eos_id)
padding = repeat(self.null_id, max(0,length - len(tokens)))
return list(chain(ids, padding))
def pad_encode_tensor(self, tokens: Seq[H], length: int) -> IntTensorType:
return self.tensor_type(self.pad_encode(tokens, length))
def pad_tensor(self, tensor: IntTensorType, length: int) -> IntTensorType:
l = len(tensor)
if l >= length:
return tensor
else:
_tensor = tensor.resize_(length)
_tensor[l:] = self._null_id
return _tensor
def decode(self, tensor: IntTensorType) -> IntTensorType:
# tensor seq of int ids
tokens = self.vocab.get_tokens_iter(tensor)
return list(tokens)
def pad_decode(self, tensor: IntTensorType) -> IntTensorType:
# tensor seq of int ids
null = self.null_token
tokens = takewhile(lambda i: i != null, self.vocab.get_tokens_iter(tensor))
return list(tokens)
def decode_preds(self, tensor: FloatTensorType) -> List[H]:
# tensor.size() = (seq_len, vocab_size)
vals, ixs = tensor.max(1)
ids = list(ixs.squeeze())
return self.vocab.get_tokens(ids)
def collate_batch(self, batch):
encode = self.encode_batch if not self.pack_sequences else self.encode_batch_packed
if isinstance(batch[0], tuple):
# for the x and y case
batch = zip(*batch)
return [encode(x) for x in batch]
elif isinstance(batch, list):
return encode(batch)
else:
raise TypeError("Unsure how to collate batch; data must be a list of tuples or a list of lists of tokens,"
"not: \n{}".format(batch))
def encode_batch(self, batch: List[Seq[H]]):
seq_lens = zip(batch, map(len, batch))
seqs, lens = zip(*seq_lens)
max_len = max(*lens)
tensor = self.package_tensor(seqs, max_len)
return tensor if self.batch_first else tensor.transpose_(0,1)
def encode_batch_packed(self, batch: List[Seq[H]]):
seq_lens = sorted(zip(batch, map(len, batch)), key=itemgetter(1), reverse=True)
seqs, lens = zip(*seq_lens)
max_len = lens[0]
tensor = self.package_tensor(seqs, max_len)
return pack_padded_sequence(tensor, lengths=lens, batch_first=self.batch_first)
def package_tensor(self, seqs, max_len):
if isinstance(seqs[0], NumericTensorTypes):
return stack([self.pad_tensor(t, max_len) for t in seqs])
else:
return self.tensor_type([self.pad_encode(tokens, max_len) for tokens in seqs])
class RNNSequencePredictorDataset(Dataset):
"""Subclass of torch.utils.data.Dataset - can be safely passed to a torch.utils.data.DataLoader for multithreaded
data loading. Assumes a sequence prediction (rather than classification) task; dependent variable is simply the
independent variable sequence offset by 1."""
def __init__(self, sequences: Map[int, Seq[H]], encoder: SequenceTensorEncoder, max_len: Opt[int]=None,
null_token: H=DEFAULT_NULL, shuffle: bool=True):
"""
:param sequences: Iterable of sequences of tokens or any other discrete entity
:param max_len: maximum length of sequences to return in training batches
:param encoder: instance of SequenceTensorEncoder
:param shuffle: bool; whether to shuffle examples when iterating over this dataset directly (as opposed to
using a DataLoader to load batches).
"""
self.encoder = encoder
self.encode = encoder.encode
self.decode = encoder.decode
self.collate_fn = encoder.collate_batch
self.null_token = null_token
self.sequences = sequences
self.shuffle = shuffle
self.max_len = max_len
if self.max_len is not None:
def ix_pairs(tup):
i, seq = tup
addon = 1 if not encoder.append_eos else 2
return zip(repeat(i), range(max(len(seq) - max_len + addon, 1)))
self.seq_idxs = list(chain.from_iterable(map(ix_pairs, enumerate(self.sequences))))
else:
self.seq_idxs = None
@property
def vocab_size(self):
return self.encoder.vocab_size
def __len__(self):
return len(self.seq_idxs) if self.max_len is not None else len(self.sequences)
def __getitem__(self, idx) -> IntTensorType:
encode = self.encoder.encode_tensor
if self.max_len is not None:
seq_idx, position = self.seq_idxs[idx]
seq = encode(self.sequences[seq_idx])
x = seq[position:position + self.max_len]
y = seq[(position + 1):(position + self.max_len + 1)]
else:
seq = encode(self.sequences[idx])
x, y = seq[0:-1], seq[1:]
return x, y
def __iter__(self):
idxs = range(len(self))
if self.shuffle:
idxs = sample(list(idxs), len(idxs))
getsample = self.__getitem__
return (getsample(i) for i in idxs)
@classmethod
def from_vocab(cls, sequences: Map[int, Seq[H]], vocab: Vocabulary, max_len: int, pack_sequences: bool=False,
append_eos: bool=True, eos_token: Opt[H]=DEFAULT_EOS, null_token: H=DEFAULT_NULL,
int_id_type: str='long', shuffle: bool=True):
"""
:param vocab: instance of Vocabulary to use for encoding/decoding tokens
:param max_len: maximum length of sequences to sample
:param pack_sequences: bool indicating whether to return regular Tensors or PackedSequence instances.
:param int_id_type: string indicating the type of int ids to use. Must be a key of data.str_to_int_tensor_type.
:param eos_token: string or hashable to append to mark end-of-sequence in encoding
:param null_token: Optional hashable to use for padding sequences. Added to the vocab, unless none is passed
and none is built, in which case this is considered to be an int id.
Numpy aliases for integer types are valid, as well as 'long', 'short', 'byte', 'char'.
The default 'long' is recommended, as only LongTensors can be used to index Embeddings in pytorch.
"""
encoder = SequenceTensorEncoder(vocab, append_eos=append_eos, eos_token=eos_token, null_token=null_token,
int_id_type=int_id_type)
return cls(sequences=sequences, encoder=encoder, max_len=max_len, pack_sequences=pack_sequences,
null_token=null_token, shuffle=shuffle)
@classmethod
def from_token2id(cls, sequences: Map[int, Seq[H]], token2id: Dict[H, int],
max_len: int, pack_sequences: bool=False,
append_eos: bool=True, eos_token: Opt[H]=DEFAULT_EOS,
null_token: H=DEFAULT_NULL, oov_token: H=DEFAULT_OOV,
int_id_type: str='long', shuffle: bool=True):
"""
:param token2id: mapping of tokens to int ids
:param max_len: maximum length of sequences to sample
:param pack_sequences: bool indicating whether to return regular Tensors or PackedSequence instances.
:param int_id_type: string indicating the type of int ids to use. Must be a key of data.str_to_int_tensor_type.
:param oov_token: hashable to insert for out-of-vocab tokens when encoding
:param eos_token: string or hashable to append to mark end-of-sequence in encoding
:param null_token: Optional hashable to use for padding sequences. Added to the vocab, unless none is passed
and none is built, in which case this is considered to be an int id.
Numpy aliases for integer types are valid, as well as 'long', 'short', 'byte', 'char'.
The default 'long' is recommended, as only LongTensors can be used to index Embeddings in pytorch.
"""
vocab = Vocabulary.from_token2id(token2id, oov_token=oov_token)
encoder = SequenceTensorEncoder(vocab, append_eos=append_eos, eos_token=eos_token, null_token=null_token,
int_id_type=int_id_type)
return cls(sequences=sequences, encoder=encoder, max_len=max_len, pack_sequences=pack_sequences,
null_token=null_token, shuffle=shuffle)
@classmethod
def from_id2token(cls, sequences: Map[int, Seq[H]], id2token: Dict[H, int],
max_len: int, pack_sequences: bool=False,
append_eos: bool=True, eos_token: Opt[H]=DEFAULT_EOS,
null_token: H=DEFAULT_NULL, oov_token: H=DEFAULT_OOV,
int_id_type: str='long', shuffle: bool=True):
"""
:param id2token: mapping of int ids to tokens
:param max_len: maximum length of sequences to sample
:param pack_sequences: bool indicating whether to return regular Tensors or PackedSequence instances.
:param int_id_type: string indicating the type of int ids to use. Must be a key of data.str_to_int_tensor_type.
:param oov_token: hashable to insert for out-of-vocab tokens when encoding
:param eos_token: hashable to append to mark end-of-sequence in encoding
:param null_token: hashable to use for padding sequences. Added to the vocab, unless none is passed
and none is built, in which case this is considered to be an int id.
Numpy aliases for integer types are valid, as well as 'long', 'short', 'byte', 'char'.
The default 'long' is recommended, as only LongTensors can be used to index Embeddings in pytorch.
"""
vocab = Vocabulary.from_id2token(id2token, oov_token=oov_token)
encoder = SequenceTensorEncoder(vocab, append_eos=append_eos, eos_token=eos_token, null_token=null_token,
int_id_type=int_id_type)
return cls(sequences=sequences, encoder=encoder, max_len=max_len, pack_sequences=pack_sequences,
null_token=null_token, shuffle=shuffle) | mit |
mark-ignacio/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/model/svnrevision.py | 143 | 1735 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import db
class SVNRevision(db.Model):
number = db.IntegerProperty()
broken_bots = db.StringListProperty(default=[])
date = db.DateTimeProperty(auto_now_add=True)
| bsd-3-clause |
lfcnassif/MultiContentViewer | release/modules/ext/libreoffice/program/python-core-3.3.0/lib/encodings/koi8_u.py | 272 | 13762 | """ Python Character Mapping Codec koi8_u generated from 'python-mappings/KOI8-U.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='koi8-u',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u2500' # 0x80 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u2502' # 0x81 -> BOX DRAWINGS LIGHT VERTICAL
'\u250c' # 0x82 -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2510' # 0x83 -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x84 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2518' # 0x85 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u251c' # 0x86 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2524' # 0x87 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u252c' # 0x88 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u2534' # 0x89 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u253c' # 0x8A -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u2580' # 0x8B -> UPPER HALF BLOCK
'\u2584' # 0x8C -> LOWER HALF BLOCK
'\u2588' # 0x8D -> FULL BLOCK
'\u258c' # 0x8E -> LEFT HALF BLOCK
'\u2590' # 0x8F -> RIGHT HALF BLOCK
'\u2591' # 0x90 -> LIGHT SHADE
'\u2592' # 0x91 -> MEDIUM SHADE
'\u2593' # 0x92 -> DARK SHADE
'\u2320' # 0x93 -> TOP HALF INTEGRAL
'\u25a0' # 0x94 -> BLACK SQUARE
'\u2219' # 0x95 -> BULLET OPERATOR
'\u221a' # 0x96 -> SQUARE ROOT
'\u2248' # 0x97 -> ALMOST EQUAL TO
'\u2264' # 0x98 -> LESS-THAN OR EQUAL TO
'\u2265' # 0x99 -> GREATER-THAN OR EQUAL TO
'\xa0' # 0x9A -> NO-BREAK SPACE
'\u2321' # 0x9B -> BOTTOM HALF INTEGRAL
'\xb0' # 0x9C -> DEGREE SIGN
'\xb2' # 0x9D -> SUPERSCRIPT TWO
'\xb7' # 0x9E -> MIDDLE DOT
'\xf7' # 0x9F -> DIVISION SIGN
'\u2550' # 0xA0 -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u2551' # 0xA1 -> BOX DRAWINGS DOUBLE VERTICAL
'\u2552' # 0xA2 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO
'\u0454' # 0xA4 -> CYRILLIC SMALL LETTER UKRAINIAN IE
'\u2554' # 0xA5 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u0456' # 0xA6 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0457' # 0xA7 -> CYRILLIC SMALL LETTER YI (UKRAINIAN)
'\u2557' # 0xA8 -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u2558' # 0xA9 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2559' # 0xAA -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u255a' # 0xAB -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u255b' # 0xAC -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u0491' # 0xAD -> CYRILLIC SMALL LETTER UKRAINIAN GHE WITH UPTURN
'\u255d' # 0xAE -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255e' # 0xAF -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0xB0 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u2560' # 0xB1 -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2561' # 0xB2 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO
'\u0404' # 0xB4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
'\u2563' # 0xB5 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u0406' # 0xB6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0407' # 0xB7 -> CYRILLIC CAPITAL LETTER YI (UKRAINIAN)
'\u2566' # 0xB8 -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2567' # 0xB9 -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0xBA -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2569' # 0xBB -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u256a' # 0xBC -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u0490' # 0xBD -> CYRILLIC CAPITAL LETTER UKRAINIAN GHE WITH UPTURN
'\u256c' # 0xBE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\xa9' # 0xBF -> COPYRIGHT SIGN
'\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU
'\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A
'\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE
'\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE
'\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE
'\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE
'\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF
'\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE
'\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA
'\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I
'\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I
'\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA
'\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL
'\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM
'\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN
'\u043e' # 0xCF -> CYRILLIC SMALL LETTER O
'\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE
'\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA
'\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER
'\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES
'\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE
'\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U
'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
'\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE
'\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN
'\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU
'\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE
'\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA
'\u044d' # 0xDC -> CYRILLIC SMALL LETTER E
'\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA
'\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE
'\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN
'\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU
'\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A
'\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE
'\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE
'\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE
'\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE
'\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF
'\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE
'\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA
'\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I
'\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I
'\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA
'\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL
'\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM
'\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN
'\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O
'\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE
'\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA
'\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER
'\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES
'\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE
'\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U
'\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE
'\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE
'\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN
'\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU
'\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE
'\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA
'\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E
'\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA
'\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE
'\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| lgpl-3.0 |
ryokochang/Slab-GCS | Scripts/TAKEOFF.py | 60 | 3583 | launch_altitude = 100
cruise_altitude = 200
takeoff_azimuth = 270
turn_direction = 0 #1 = right, 0 = left
#DONT MESS WITH ANYTHING BELOW THIS LINE
import sys
import math
import clr
import time
import System
from System import Byte
clr.AddReference("MissionPlanner")
import MissionPlanner
clr.AddReference("MissionPlanner.Utilities") # includes the Utilities class
from MissionPlanner.Utilities import Locationwp
clr.AddReference("MAVLink") # includes the Utilities class
import MAVLink
idmavcmd = MAVLink.MAV_CMD.WAYPOINT
id = int(idmavcmd)
pitch = 10
climb_ratio = 1.0/15.0 #1 meter descent for every 15 meters traveled
turn_dist = 200
lathome = MAV.getWP(0).lat;
lnghome = MAV.getWP(0).lng;
climb_delta = cruise_altitude - launch_altitude
takeoff_dist = (2.0/3.0)*(climb_delta/climb_ratio)
alt_mid = launch_altitude+(2.0/3.0)*(cruise_altitude-launch_altitude)
satellites = cs.satcount
hdop = cs.gpshdop
if satellites < 6:
print'GPS FAILED. NO MISSION CREATED. WAIT FOR BETTER GPS'
print ''
elif hdop > 4:
print'GPS FAILED. NO MISSION CREATED. WAIT FOR BETTER GPS.'
print ''
else:
print'GPS PASSED.'
print ''
if launch_altitude < 50:
print 'LAUNCH ALTITUDE LESS THAN 50 METERS'
print 'NO MISSION CREATED'
elif launch_altitude >100:
print 'LAUNCH ALTITUDE GREATER THAN 100 METERS'
print 'NO MISSION CREATED'
elif cruise_altitude < launch_altitude:
print 'CRUISE ALTITUDE LESS THAN LAUNCH ALTITUDE'
print 'NO MISSION CREATED'
else:
lathome = cs.lat
lnghome = cs.lng
rad_earth = 6378100
circ_earth = rad_earth*2*math.pi
deglatpermeter = 360/circ_earth
rad_lat = math.cos(lathome*2*math.pi/360)*rad_earth
circ_lat = rad_lat*2*math.pi
deglngpermeter = 360/circ_lat
lat_delta1 = math.cos(takeoff_azimuth*2*math.pi/360)*takeoff_dist*deglatpermeter
lng_delta1 = math.sin(takeoff_azimuth*2*math.pi/360)*takeoff_dist*deglngpermeter
lat1 = lathome + lat_delta1
lng1 = lnghome + lng_delta1
if turn_direction == 1:
turn1_azimuth = takeoff_azimuth + 135
turn2_azimuth = turn1_azimuth + 90
else:
turn1_azimuth = takeoff_azimuth - 135
turn2_azimuth = turn1_azimuth - 90
lat_delta2 = math.cos(turn1_azimuth*2*math.pi/360)*turn_dist*deglatpermeter
lng_delta2 = math.sin(turn1_azimuth*2*math.pi/360)*turn_dist*deglngpermeter
lat2 = lat1 + lat_delta2
lng2 = lng1 + lng_delta2
lat_delta3 = math.cos(turn2_azimuth*2*math.pi/360)*turn_dist*deglatpermeter
lng_delta3 = math.sin(turn2_azimuth*2*math.pi/360)*turn_dist*deglngpermeter
lat3 = lat2 + lat_delta3
lng3 = lng2 + lng_delta3
takeoff = Locationwp()
Locationwp.id.SetValue(takeoff, int(MAVLink.MAV_CMD.TAKEOFF))
Locationwp.p1.SetValue(takeoff, pitch)
Locationwp.alt.SetValue(takeoff, launch_altitude)
wp1 = Locationwp().Set(lat1,lng1,alt_mid, id)
wp2 = Locationwp().Set(lat2,lng2,alt_mid, id)
wp3 = Locationwp().Set(lat3,lng3,alt_mid, id)
wp4 = Locationwp().Set(lathome,lnghome,cruise_altitude, id)
MAV.setWPTotal(6) #set wp total
MAV.setWP(MAV.getWP(0),0,MAVLink.MAV_FRAME.GLOBAL_RELATIVE_ALT); #upload home - reset on arm
MAV.setWP(takeoff,1,MAVLink.MAV_FRAME.GLOBAL_RELATIVE_ALT); #upload takeoff
MAV.setWP(wp1,2,MAVLink.MAV_FRAME.GLOBAL_RELATIVE_ALT); #upload wp1
MAV.setWP(wp2,3,MAVLink.MAV_FRAME.GLOBAL_RELATIVE_ALT); #upload wp2
MAV.setWP(wp3,4,MAVLink.MAV_FRAME.GLOBAL_RELATIVE_ALT); #upload wp3
MAV.setWP(wp4,5,MAVLink.MAV_FRAME.GLOBAL_RELATIVE_ALT); #upload wp4
MAV.setWPCurrent(1); #restart mission to waypoint 0
MAV.setWPACK(); #final ack
print 'AUTOGENERATED TAKEOFF MISSION' | gpl-3.0 |
rdezavalia/ansible | lib/ansible/playbook/role/definition.py | 22 | 8278 | # (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.six import iteritems, string_types
import os
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.become import Become
from ansible.playbook.conditional import Conditional
from ansible.playbook.taggable import Taggable
from ansible.template import Templar
from ansible.utils.path import unfrackpath
__all__ = ['RoleDefinition']
class RoleDefinition(Base, Become, Conditional, Taggable):
_role = FieldAttribute(isa='string')
def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None):
self._play = play
self._variable_manager = variable_manager
self._loader = loader
self._role_path = None
self._role_basedir = role_basedir
self._role_params = dict()
super(RoleDefinition, self).__init__()
#def __repr__(self):
# return 'ROLEDEF: ' + self._attributes.get('role', '<no name set>')
@staticmethod
def load(data, variable_manager=None, loader=None):
raise AnsibleError("not implemented")
def preprocess_data(self, ds):
# role names that are simply numbers can be parsed by PyYAML
# as integers even when quoted, so turn it into a string type
if isinstance(ds, int):
ds = "%s" % ds
assert isinstance(ds, dict) or isinstance(ds, string_types) or isinstance(ds, AnsibleBaseYAMLObject)
if isinstance(ds, dict):
ds = super(RoleDefinition, self).preprocess_data(ds)
# save the original ds for use later
self._ds = ds
# we create a new data structure here, using the same
# object used internally by the YAML parsing code so we
# can preserve file:line:column information if it exists
new_ds = AnsibleMapping()
if isinstance(ds, AnsibleBaseYAMLObject):
new_ds.ansible_pos = ds.ansible_pos
# first we pull the role name out of the data structure,
# and then use that to determine the role path (which may
# result in a new role name, if it was a file path)
role_name = self._load_role_name(ds)
(role_name, role_path) = self._load_role_path(role_name)
# next, we split the role params out from the valid role
# attributes and update the new datastructure with that
# result and the role name
if isinstance(ds, dict):
(new_role_def, role_params) = self._split_role_params(ds)
new_ds.update(new_role_def)
self._role_params = role_params
# set the role name in the new ds
new_ds['role'] = role_name
# we store the role path internally
self._role_path = role_path
# and return the cleaned-up data structure
return new_ds
def _load_role_name(self, ds):
'''
Returns the role name (either the role: or name: field) from
the role definition, or (when the role definition is a simple
string), just that string
'''
if isinstance(ds, string_types):
return ds
role_name = ds.get('role', ds.get('name'))
if not role_name or not isinstance(role_name, string_types):
raise AnsibleError('role definitions must contain a role name', obj=ds)
# if we have the required datastructures, and if the role_name
# contains a variable, try and template it now
if self._variable_manager:
all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play)
templar = Templar(loader=self._loader, variables=all_vars)
if templar._contains_vars(role_name):
role_name = templar.template(role_name)
return role_name
def _load_role_path(self, role_name):
'''
the 'role', as specified in the ds (or as a bare string), can either
be a simple name or a full path. If it is a full path, we use the
basename as the role name, otherwise we take the name as-given and
append it to the default role path
'''
# we always start the search for roles in the base directory of the playbook
role_search_paths = [
os.path.join(self._loader.get_basedir(), u'roles'),
self._loader.get_basedir(),
]
# also search in the configured roles path
if C.DEFAULT_ROLES_PATH:
role_search_paths.extend(C.DEFAULT_ROLES_PATH)
# finally, append the roles basedir, if it was set, so we can
# search relative to that directory for dependent roles
if self._role_basedir:
role_search_paths.append(self._role_basedir)
# create a templar class to template the dependency names, in
# case they contain variables
if self._variable_manager is not None:
all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play)
else:
all_vars = dict()
templar = Templar(loader=self._loader, variables=all_vars)
role_name = templar.template(role_name)
# now iterate through the possible paths and return the first one we find
for path in role_search_paths:
path = templar.template(path)
role_path = unfrackpath(os.path.join(path, role_name))
if self._loader.path_exists(role_path):
return (role_name, role_path)
# if not found elsewhere try to extract path from name
role_path = unfrackpath(role_name)
if self._loader.path_exists(role_path):
role_name = os.path.basename(role_name)
return (role_name, role_path)
raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(role_search_paths)), obj=self._ds)
def _split_role_params(self, ds):
'''
Splits any random role params off from the role spec and store
them in a dictionary of params for parsing later
'''
role_def = dict()
role_params = dict()
base_attribute_names = frozenset(self._get_base_attributes().keys())
for (key, value) in iteritems(ds):
# use the list of FieldAttribute values to determine what is and is not
# an extra parameter for this role (or sub-class of this role)
# FIXME: hard-coded list of exception key names here corresponds to the
# connection fields in the Base class. There may need to be some
# other mechanism where we exclude certain kinds of field attributes,
# or make this list more automatic in some way so we don't have to
# remember to update it manually.
if key not in base_attribute_names or key in ('connection', 'port', 'remote_user'):
# this key does not match a field attribute, so it must be a role param
role_params[key] = value
else:
# this is a field attribute, so copy it over directly
role_def[key] = value
return (role_def, role_params)
def get_role_params(self):
return self._role_params.copy()
def get_role_path(self):
return self._role_path
| gpl-3.0 |
yeming233/rally | rally/plugins/openstack/cfg/opts.py | 1 | 2210 | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.plugins.openstack.cfg import cinder
from rally.plugins.openstack.cfg import ec2
from rally.plugins.openstack.cfg import glance
from rally.plugins.openstack.cfg import heat
from rally.plugins.openstack.cfg import ironic
from rally.plugins.openstack.cfg import magnum
from rally.plugins.openstack.cfg import manila
from rally.plugins.openstack.cfg import mistral
from rally.plugins.openstack.cfg import monasca
from rally.plugins.openstack.cfg import murano
from rally.plugins.openstack.cfg import neutron
from rally.plugins.openstack.cfg import nova
from rally.plugins.openstack.cfg import profiler
from rally.plugins.openstack.cfg import sahara
from rally.plugins.openstack.cfg import senlin
from rally.plugins.openstack.cfg import vm
from rally.plugins.openstack.cfg import watcher
from rally.plugins.openstack.cfg import tempest
from rally.plugins.openstack.cfg import keystone_roles
from rally.plugins.openstack.cfg import keystone_users
from rally.plugins.openstack.cfg import cleanup
def list_opts():
opts = {}
for l_opts in (cinder.OPTS, ec2.OPTS, heat.OPTS, ironic.OPTS, magnum.OPTS,
manila.OPTS, mistral.OPTS, monasca.OPTS, murano.OPTS,
nova.OPTS, profiler.OPTS, sahara.OPTS, vm.OPTS, glance.OPTS,
watcher.OPTS, tempest.OPTS, keystone_roles.OPTS,
keystone_users.OPTS, cleanup.OPTS, senlin.OPTS,
neutron.OPTS):
for category, opt in l_opts.items():
opts.setdefault(category, [])
opts[category].extend(opt)
return opts
| apache-2.0 |
hashamali/pyScss | scss/tests/test_types.py | 3 | 4336 | """Tests for the type system."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from scss.types import Color, List, Null, Number, String
import pytest
# Operators: arithmetic (+ - * / %), unary (+ -), comparison (== != < > <= >=), boolean
# Types: numbers, colors, strings, booleans, lists
# Test them all!
def test_addition():
# Numbers are a little complicated, what with all the units
# Simple case
assert Number(123) + Number(456) == Number(579)
# Simple equal units
assert Number(1, "px") + Number(2, "px") == Number(3, "px")
# Unitless values inherit units of the other operand
assert Number(5) + Number(6, "px") == Number(11, "px")
# Zero values can cast to any units
assert Number(0, "in") + Number(24, "deg") == Number(24, "deg")
# With different units, the left operand wins
assert Number(10, "cm") + Number(100, "mm") == Number(20, "cm")
assert Number(100, "mm") + Number(10, "cm") == Number(200, "mm")
# Unconvertible units raise an error
with pytest.raises(ValueError):
Number(1, "px") + Number(1, "em")
# Adding anything to a string makes a string
assert Number(123) + String('abc') == String('123abc')
assert String('abc') + Number(123) == String('abc123')
ret = String('abc', quotes=None) + String('def', quotes=None)
assert ret == String('abcdef')
assert ret.quotes is None
ret = String('abc', quotes='"') + String('def', quotes=None)
assert ret == String('abcdef')
assert ret.quotes == '"'
ret = String('abc', quotes=None) + String('def', quotes='"')
assert ret == String('abcdef')
assert ret.quotes is None
assert Color.from_hex('#010305') + Color.from_hex('#050301') == Color.from_hex('#060606')
assert Color.from_name('white') + Color.from_name('white') == Color.from_name('white')
def test_subtraction():
assert Number(123) - Number(456) == Number(-333)
assert Number(456) - Number(123) == Number(333)
# TODO test that subtracting e.g. strings doesn't work
assert Color.from_hex('#0f0f0f') - Color.from_hex('#050505') == Color.from_hex('#0a0a0a')
def test_division():
assert Number(5, "px") / Number(5, "px") == Number(1)
assert Number(1, "in") / Number(6, "pt") == Number(12)
def test_comparison_numeric():
lo = Number(123)
hi = Number(456)
assert lo < hi
assert lo <= hi
assert lo <= lo
assert hi > lo
assert hi >= lo
assert hi >= hi
assert lo == lo
assert lo != hi
# Same tests, negated
assert not lo > hi
assert not lo >= hi
assert not hi < lo
assert not hi <= lo
assert not lo != lo
assert not lo == hi
# Numbers with units should also auto-cast numbers with units
units = Number(123, "px")
plain = Number(123)
assert units == plain
assert units <= plain
assert units >= plain
assert not units != plain
assert not units < plain
assert not units > plain
# Incompatible units have... rules.
ems = Number(100, "em")
pxs = Number(100, "px")
with pytest.raises(ValueError):
ems < pxs
with pytest.raises(ValueError):
ems > pxs
with pytest.raises(ValueError):
ems <= pxs
with pytest.raises(ValueError):
ems >= pxs
assert not ems == pxs
assert ems != pxs
def test_comparison_stringerific():
abc = String('abc')
xyz = String('xyz')
assert abc == abc
assert abc != xyz
assert not abc == xyz
assert not abc != abc
# Interaction with other types
assert Number(123) != String('123')
assert String('123') != Number(123)
# Sass strings don't support ordering
with pytest.raises(TypeError):
abc < xyz
with pytest.raises(TypeError):
abc <= xyz
with pytest.raises(TypeError):
abc > xyz
with pytest.raises(TypeError):
abc >= xyz
with pytest.raises(TypeError):
Number(123) < String('123')
def test_comparison_null():
null = Null()
assert null == null
assert null != Number(0)
with pytest.raises(TypeError):
null < null
def test_unrenderable():
# Empty lists can't be rendered as CSS
with pytest.raises(ValueError):
List([]).render()
# TODO write more! i'm lazy.
| mit |
cloudbau/glance | glance/api/middleware/cache.py | 1 | 10596 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Transparent image file caching middleware, designed to live on
Glance API nodes. When images are requested from the API node,
this middleware caches the returned image file to local filesystem.
When subsequent requests for the same image file are received,
the local cached copy of the image file is returned.
"""
import re
import webob
from glance.api.common import size_checked_iter
from glance.api import policy
from glance.api.v1 import images
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
import glance.db
from glance import image_cache
import glance.openstack.common.log as logging
from glance import notifier
import glance.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
PATTERNS = {
('v1', 'GET'): re.compile(r'^/v1/images/([^\/]+)$'),
('v1', 'DELETE'): re.compile(r'^/v1/images/([^\/]+)$'),
('v2', 'GET'): re.compile(r'^/v2/images/([^\/]+)/file$'),
('v2', 'DELETE'): re.compile(r'^/v2/images/([^\/]+)$')
}
class CacheFilter(wsgi.Middleware):
def __init__(self, app):
self.cache = image_cache.ImageCache()
self.serializer = images.ImageSerializer()
self.policy = policy.Enforcer()
LOG.info(_("Initialized image cache middleware"))
super(CacheFilter, self).__init__(app)
def _verify_metadata(self, image_meta):
"""
Sanity check the 'deleted' and 'size' metadata values.
"""
# NOTE: admins can see image metadata in the v1 API, but shouldn't
# be able to download the actual image data.
if image_meta['deleted']:
raise exception.NotFound()
if not image_meta['size']:
# override image size metadata with the actual cached
# file size, see LP Bug #900959
image_meta['size'] = self.cache.get_image_size(image_meta['id'])
@staticmethod
def _match_request(request):
"""Determine the version of the url and extract the image id
:returns tuple of version and image id if the url is a cacheable,
otherwise None
"""
for ((version, method), pattern) in PATTERNS.items():
match = pattern.match(request.path_info)
try:
assert request.method == method
image_id = match.group(1)
# Ensure the image id we got looks like an image id to filter
# out a URI like /images/detail. See LP Bug #879136
assert image_id != 'detail'
except (AttributeError, AssertionError):
continue
else:
return (version, method, image_id)
def _enforce(self, req, action):
"""Authorize an action against our policies"""
try:
self.policy.enforce(req.context, action, {})
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=unicode(e), request=req)
def process_request(self, request):
"""
For requests for an image file, we check the local image
cache. If present, we return the image file, appending
the image metadata in headers. If not present, we pass
the request on to the next application in the pipeline.
"""
match = self._match_request(request)
try:
(version, method, image_id) = match
except TypeError:
# Trying to unpack None raises this exception
return None
self._stash_request_info(request, image_id, method)
if request.method != 'GET' or not self.cache.is_cached(image_id):
return None
try:
self._enforce(request, 'download_image')
except webob.exc.HTTPForbidden:
return None
LOG.debug(_("Cache hit for image '%s'"), image_id)
image_iterator = self.get_from_cache(image_id)
method = getattr(self, '_process_%s_request' % version)
try:
return method(request, image_id, image_iterator)
except exception.NotFound:
msg = _("Image cache contained image file for image '%s', "
"however the registry did not contain metadata for "
"that image!") % image_id
LOG.error(msg)
self.cache.delete_cached_image(image_id)
@staticmethod
def _stash_request_info(request, image_id, method):
"""
Preserve the image id and request method for later retrieval
"""
request.environ['api.cache.image_id'] = image_id
request.environ['api.cache.method'] = method
@staticmethod
def _fetch_request_info(request):
"""
Preserve the cached image id for consumption by the
process_response method of this middleware
"""
try:
image_id = request.environ['api.cache.image_id']
method = request.environ['api.cache.method']
except KeyError:
return None
else:
return (image_id, method)
def _process_v1_request(self, request, image_id, image_iterator):
image_meta = registry.get_image_metadata(request.context, image_id)
# Don't display location
if 'location' in image_meta:
del image_meta['location']
image_meta.pop('location_data', None)
self._verify_metadata(image_meta)
response = webob.Response(request=request)
raw_response = {
'image_iterator': image_iterator,
'image_meta': image_meta,
}
return self.serializer.show(response, raw_response)
def _process_v2_request(self, request, image_id, image_iterator):
# We do some contortions to get the image_metadata so
# that we can provide it to 'size_checked_iter' which
# will generate a notification.
# TODO(mclaren): Make notification happen more
# naturally once caching is part of the domain model.
db_api = glance.db.get_api()
image_repo = glance.db.ImageRepo(request.context, db_api)
image = image_repo.get(image_id)
image_meta = glance.notifier.format_image_notification(image)
self._verify_metadata(image_meta)
response = webob.Response(request=request)
response.app_iter = size_checked_iter(response, image_meta,
image_meta['size'],
image_iterator,
notifier.Notifier())
# NOTE (flwang): Set the content-type, content-md5 and content-length
# explicitly to be consistent with the non-cache scenario.
# Besides, it's not worth the candle to invoke the "download" method
# of ResponseSerializer under image_data. Because method "download"
# will reset the app_iter. Then we have to call method
# "size_checked_iter" to avoid missing any notification. But after
# call "size_checked_iter", we will lose the content-md5 and
# content-length got by the method "download" because of this issue:
# https://github.com/Pylons/webob/issues/86
response.headers['Content-Type'] = 'application/octet-stream'
response.headers['Content-MD5'] = image.checksum
response.headers['Content-Length'] = str(image.size)
return response
def process_response(self, resp):
"""
We intercept the response coming back from the main
images Resource, removing image file from the cache
if necessary
"""
if not 200 <= self.get_status_code(resp) < 300:
return resp
try:
(image_id, method) = self._fetch_request_info(resp.request)
except TypeError:
return resp
method_str = '_process_%s_response' % method
try:
process_response_method = getattr(self, method_str)
except AttributeError:
LOG.error(_('could not find %s') % method_str)
# Nothing to do here, move along
return resp
else:
return process_response_method(resp, image_id)
def _process_DELETE_response(self, resp, image_id):
if self.cache.is_cached(image_id):
LOG.debug(_("Removing image %s from cache"), image_id)
self.cache.delete_cached_image(image_id)
return resp
def _process_GET_response(self, resp, image_id):
image_checksum = resp.headers.get('Content-MD5', None)
if not image_checksum:
# API V1 stores the checksum in a different header:
image_checksum = resp.headers.get('x-image-meta-checksum', None)
if not image_checksum:
LOG.error(_("Checksum header is missing."))
# NOTE(zhiyan): image_cache return a generator object and set to
# response.app_iter, it will be called by eventlet.wsgi later.
# So we need enforce policy firstly but do it by application
# since eventlet.wsgi could not catch webob.exc.HTTPForbidden and
# return 403 error to client then.
self._enforce(resp.request, 'download_image')
resp.app_iter = self.cache.get_caching_iter(image_id, image_checksum,
resp.app_iter)
return resp
def get_status_code(self, response):
"""
Returns the integer status code from the response, which
can be either a Webob.Response (used in testing) or httplib.Response
"""
if hasattr(response, 'status_int'):
return response.status_int
return response.status
def get_from_cache(self, image_id):
"""Called if cache hit"""
with self.cache.open_for_read(image_id) as cache_file:
chunks = utils.chunkiter(cache_file)
for chunk in chunks:
yield chunk
| apache-2.0 |
Polymer/lit-element | docs/main.py | 2 | 1481 | import os
import jinja2
import webapp2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
# Skip old landing page, go direct to guides.
HOME_PAGE = '/guide'
# Match HTML pages from path; similar to behavior of Jekyll on GitHub Pages.
def find_template(path):
if path.endswith('/'):
# / -> /index.html, /try/ -> /try/index.html
return JINJA_ENVIRONMENT.get_template(path + 'index.html')
elif path.endswith('.html'):
# /index.html, /try/create.html
return JINJA_ENVIRONMENT.get_template(path)
try:
# /try/create -> /try/create.html
return JINJA_ENVIRONMENT.get_template(path + '.html')
except jinja2.exceptions.TemplateNotFound:
pass
# /try -> /try/index.html
return JINJA_ENVIRONMENT.get_template(path + '/index.html')
class MainPage(webapp2.RequestHandler):
def get(self):
if (self.request.path == '/'):
self.redirect(HOME_PAGE, permanent=True)
return
try:
template = find_template(self.request.path)
self.response.headers['Cache-Control'] = 'public, max-age=60'
except jinja2.exceptions.TemplateNotFound:
template = find_template('/404.html')
self.response.set_status(404)
except Exception:
template = find_template('/500.html')
self.response.set_status(500)
self.response.write(template.render({}))
app = webapp2.WSGIApplication([
('/.*', MainPage),
])
| bsd-3-clause |
sajuptpm/neutron-ipam | neutron/db/migration/alembic_migrations/versions/35c7c198ddea_lbaas_healthmon_del_status.py | 20 | 1819 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""remove status from HealthMonitor
Revision ID: 35c7c198ddea
Revises: 11c6e18605c8
Create Date: 2013-08-02 23:14:54.037976
"""
# revision identifiers, used by Alembic.
revision = '35c7c198ddea'
down_revision = '11c6e18605c8'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.services.loadbalancer.plugin.LoadBalancerPlugin',
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_column('healthmonitors', 'status')
op.drop_column('healthmonitors', 'status_description')
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.add_column('healthmonitors', sa.Column('status',
sa.String(16),
nullable=False))
op.add_column('healthmonitors', sa.Column('status_description',
sa.String(255)))
| apache-2.0 |
adelomana/cassandra | conditionedFitness/figureClonal/clonal.3.3.py | 2 | 3270 | import matplotlib,numpy,sys,scipy,pickle
import matplotlib.pyplot
sys.path.append('../lib')
import calculateStatistics
### MAIN
matplotlib.rcParams.update({'font.size':36,'font.family':'Times New Roman','xtick.labelsize':28,'ytick.labelsize':28})
thePointSize=12
jarDir='/Users/adriandelomana/scratch/'
# clonal 2
xSignal=numpy.array([[175, 153, 186, 189, 157],[37, 59, 46, 67, 70]])
xNoSignal=numpy.array([[200, 202, 224, 194, 193],[71, 66, 71, 87, 60]])
cf_mu_0, cf_sd_0, pvalue_0 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[25, 28, 19, 18, 16],[0, 9, 4, 9, 1]])
xNoSignal=numpy.array([[24, 16, 29, 17, 23],[4, 7, 5, 3, 4]])
cf_mu_50, cf_sd_50, pvalue_50 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[96, 97, 94, 127, 80],[32, 36, 36, 42, 36]])
xNoSignal=numpy.array([[104, 137, 110, 128, 113],[52, 36, 32, 50, 41]])
cf_mu_100, cf_sd_100, pvalue_100 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[204, 223, 199, 249, 193],[141, 131, 125, 154, 139]])
xNoSignal=numpy.array([[171, 217, 240, 200, 168],[166, 192, 163, 196, 170]])
cf_mu_150, cf_sd_150, pvalue_150 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[197, 216, 224, 219, 208],[181, 182, 186, 179, 116]])
xNoSignal=numpy.array([[261, 227, 229, 188, 236],[179, 169, 174, 183, 164]])
cf_mu_200, cf_sd_200, pvalue_200 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[226, 214, 222, 224, 211],[235, 199, 177, 199, 184]])
xNoSignal=numpy.array([[223, 230, 215, 273, 245],[204, 199, 247, 220, 204]])
cf_mu_250, cf_sd_250, pvalue_250 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[222, 235, 253, 234, 189],[175, 160, 194, 156, 178]])
xNoSignal=numpy.array([[212, 222, 246, 228, 220],[191, 192, 198, 217, 199]])
cf_mu_300, cf_sd_300, pvalue_300 = calculateStatistics.main(xSignal, xNoSignal)
x = [0, 50, 100, 150, 200, 250, 300]
y = [cf_mu_0, cf_mu_50, cf_mu_100, cf_mu_150, cf_mu_200, cf_mu_250, cf_mu_300]
z = [cf_sd_0, cf_sd_50, cf_sd_100, cf_sd_150, cf_sd_200, cf_sd_250, cf_sd_300]
w = [pvalue_0, pvalue_50, pvalue_100, pvalue_150, pvalue_200, pvalue_250, pvalue_300]
matplotlib.pyplot.errorbar(x,y,yerr=z,fmt=':o',color='green',ecolor='green',markeredgecolor='green',capsize=0,ms=thePointSize,mew=0)
for i in range(len(w)):
if y[i] > 0.:
sp=y[i]+z[i]+0.02
else:
sp=y[i]-z[i]-0.02
if w[i] < 0.05 and w[i] >= 0.01:
matplotlib.pyplot.scatter(x[i], sp, s=75, c='black', marker=r"${*}$", edgecolors='none')
if w[i] < 0.01:
matplotlib.pyplot.scatter(x[i]-3, sp, s=75, c='black', marker=r"${*}$", edgecolors='none')
matplotlib.pyplot.scatter(x[i]+3, sp, s=75, c='black', marker=r"${*}$", edgecolors='none')
matplotlib.pyplot.plot([0,300],[0,0],'--',color='black')
matplotlib.pyplot.xlim([-25,325])
matplotlib.pyplot.ylim([-0.4,0.4])
matplotlib.pyplot.yticks([-0.4,-0.2,0,0.2,0.4])
matplotlib.pyplot.xlabel('Generation')
matplotlib.pyplot.ylabel('Conditioned Fitness')
matplotlib.pyplot.tight_layout(pad=0.5)
matplotlib.pyplot.savefig('figure.clonal.3.3.pdf')
# save processed data alternative plotting
trajectory=[x,y,z]
jarFile=jarDir+'clonal.3.3.pickle'
f=open(jarFile,'wb')
pickle.dump(trajectory,f)
f.close()
| gpl-3.0 |
ceridwen/combinators | prototypes/spf.py | 1 | 19322 | #!/usr/bin/python3
from __future__ import print_function
import collections
import itertools
import weakref
import pprint
try:
from functools import singledispatch
except ImportError:
from singledispatch import singledispatch
class Tree(collections.Sized, collections.Iterable, collections.Container):
"""A dict-of-nodes digraph representation of a tree.
_nodes is a weak dictionary mapping labels to node objects, a weak
dictionary so that combinators can add nodes to the dictionary in
parse branches that eventually fail and have those nodes
garbage-collected. Node objects contain strong references to
other node objects, and self.root is a strong reference to the
root node to anchor the entire tree. Trees shouldn't contain
packed nodes or nodes with more than one parent (more than one
strong reference from another node), but shared packed forests
can. The references to packed nodes in Tree methods are intended
for TreeViews, which inherit the methods.
"""
__slots__ = ('root', '_nodes')
def __init__(self, root=None, nodes=None):
self.root = root
self._nodes = weakref.WeakValueDictionary() if nodes is None else nodes
def __getitem__(self, key):
item = self.root[key]
if isinstance(item, Leaf):
return item.value
else:
return TreeView(item, self._nodes)
def __len__(self):
return sum(1 for n in self._nodes.values() if not isinstance(n, Node))
def __contains__(self, item):
return any(item == n for n in self._nodes.values())
def __iter__(self):
"""Iterates over subtrees."""
for i in self.root:
if isinstance(i, PackedNode):
yield TreeView(i, self._nodes, self._unpacked_nodes)
elif isinstance(i, Node):
yield TreeView(i, self._nodes)
else:
yield i.value
def leaves(self):
"""Iterator over this tree's leaves.
Returns the objects the tree contains as a flat iterator.
Preorder and postorder traversal will return the leaves in the
same order, and preorder is simpler, so I use it here.
"""
to_visit = list(reversed(self.root))
while to_visit:
node = to_visit.pop()
if isinstance(node, PackedNode):
to_visit.append(self._unpacked_nodes[node])
elif isinstance(node, Node):
to_visit.extend(reversed(node))
else:
yield node.value
def __str__(self):
"""Using WeakValueDictionary's __repr__ creates infinite recursion in
pprint, so I still have to use .items() to avoid it.
"""
return pprint.pformat(dict(self._nodes.items()), width=80)
def __repr__(self):
return '%s(%s)' % (type(self).__name__, pprint.pformat(dict(self._nodes.items()), width=80))
class SharedPackedForest(Tree):
"""The only differences between the digraph representation of a tree
and of an SPF is that a tree iterates over its leaves while an
SPF iterates over its trees, and SPFs can contain packed nodes
and nodes with more than one parent.
"""
__slots__ = ()
def __iter__(self):
return self.trees(self.root)
def trees(self, root):
"""This algorithm uses two stacks, to_visit to keep track of ordinary
nodes and packed_nodes to keep track of places where the SPF
branches into multiple trees. Over ordinary nodes, the
algorithm is identical to preorder tree traversal. When it
encounters a packed node, it saves the current state of the
traversal by pushing the packed node, an iterator over the
nodes in the packed node, a copy of the current state of the
stack of ordinary nodes (to_visit), and a record of the
choices made at previous packed nodes onto the packed_nodes
stack. It then proceeds along the first possible choice in
the packed node. This record, called unpacked_nodes, is a
dictionary that's designed to shadow all the packed nodes in
the SPF that occur in one particular tree. After it's
completed a full ordinary tree traversal (to_visit is empty),
it yields a TreeView for that tree and then backtracks to the
last place it made a choice at a packed node, reading the
state off the top of the packed_nodes stack. Once it's
checked all possible choices at all packed nodes (packed_nodes
is empty), it terminates. Looking at the stack of stacks, it
should be obvious why in the case of highly-ambiguous grammars
it will become unbounded polynomial, and I'm concerned it may
actually be exponential in the number of packed nodes. It
also won't terminate in the case of an SPF with cycles.
"""
packed_nodes = []
unpacked_nodes = weakref.WeakValueDictionary()
to_visit = [root]
while True:
while to_visit:
node = to_visit.pop()
while isinstance(node, PackedNode):
nodes = iter(node)
packed_node = node
packed_nodes.append((packed_node, nodes, to_visit[:], unpacked_nodes.copy()))
node = next(nodes)
unpacked_nodes[packed_node] = node
if isinstance(node, Node):
to_visit.extend(reversed(node))
# Note to self: the problem is here. What happens is that
# root is a PackedNode with a Leaf inside it. The
# iterator creates TreeView objects with the PackedNode as
# their root, and the constructor then turns that into a
# TreeView with only a Leaf as its root.
node = root
while isinstance(node, PackedNode):
node = unpacked_nodes[node]
if isinstance(node, Leaf):
yield node
else:
yield TreeView(root, self._nodes, unpacked_nodes)
while packed_nodes:
packed_node, nodes, to_visit, unpacked_nodes = packed_nodes[-1]
try:
node = next(nodes)
unpacked_nodes[packed_node] = node
to_visit.append(node)
break
except StopIteration:
packed_nodes.pop()
else:
return
def leaves(self):
"""Traversal of this SPF's leaves."""
to_visit = [self.root]
visited = set()
while to_visit:
node = to_visit.pop()
if isinstance(node, PackedNode):
to_visit.extend(node)
elif isinstance(node, Node):
to_visit.extend(reversed(node))
else:
if node not in visited:
yield node.value
visited.add(node)
def istree(self):
"""If this SPF is a tree (has no packed nodes), returns True."""
if any(isinstance(n, PackedNode) for n in self._nodes.values()):
return False
else:
return True
class TreeView(Tree):
"""For __len__ and __contains__, we have to traverse the tree because
there may be nodes unreachable from the root in _nodes. The root
node itself should never be a packed node.
"""
__slots__ = ('_unpacked_nodes')
def __init__(self, root, nodes, unpacked_nodes=None):
self._unpacked_nodes = weakref.WeakValueDictionary() if unpacked_nodes is None else unpacked_nodes
while isinstance(root, PackedNode):
root = unpacked_nodes[root]
self.root = root
self._nodes = nodes
def __getitem__(self, key):
item = self.root[key]
while isinstance(item, PackedNode):
item = self._unpacked_nodes[item]
if isinstance(item, Leaf):
return item.value
else:
return TreeView(item, self._nodes, self._unpacked_nodes)
def __len__(self):
return sum(1 for n in self.leaves())
def __contains__(self, item):
return any(item == n for n in self.leaves())
def __str__(self):
"""Because this is for debugging/output, efficiency doesn't matter so
I use the much simpler recursive implementation for postorder
traversing the tree and building a nested representation.
"""
def nested(root):
if isinstance(root, PackedNode):
return nested(self._unpacked_nodes[root])
elif isinstance(root, SeqNode):
return [nested(n) for n in root]
elif isinstance(root, MapNode):
return {k: nested(v) for k, v in root.items()}
else:
return root.value
return pprint.pformat(nested(self.root))
class Visitor(object):
def __init__(self):
self.visit = singledispatch(self.visit)
self.visit.register(Node, self.visit_node)
self.visit.register(PackedNode, self.visit_packed_node)
def __call__(self, tree, node = None):
self.to_visit = [tree.root] if not node else [node]
result = None
while self.to_visit:
node = self.to_visit.pop()
result = self.visit(node, result)
return result
def visit(self, node, result):
pass
def visit_node(self, node, result):
self.to_visit.extend(reversed(node))
def visit_packed_node(self, node, result):
self.to_visit.extend(node)
class Node(object):
"""Abstract parent class for nodes.
All nodes have to be weak-referencable, and PackedNodes must be
both weak-referencable and hashable---and since PackedNodes
contain other nodes, other nodes also have to be hashable. As the
only built-in Python type with both properties is frozenset and I
need types with intrinsic order, I have to make my own.
Unfortunately, it's impossible to set __hash and __weakref__ on
Node, for some reason, they don't inherit correctly.
"""
__slots__ = ()
class SeqNode(list, Node):
""" Holds nodes and other Python objects."""
__slots__ = ('__hash', '__weakref__')
def __hash__(self):
if not hasattr(self, '__hash'):
self.__hash = hash(tuple(self))
return self.__hash
def __setitem__(self, *args, **kws):
raise TypeError("'%s' object does not support item assignment" % type(self))
def __delitem__(self, *args, **kws):
raise TypeError("'%s' object does not support item deletion" % type(self))
def __immutable(self, *args, **kws):
raise TypeError("'%s' object is immutable" % type(self))
append = __immutable
clear = __immutable
copy = __immutable
extend = __immutable
insert = __immutable
pop = __immutable
remove = __immutable
reverse = __immutable
class PackedNode(frozenset, Node):
"""PackedNodes hold other nodes, including in some cases PackedNodes.
Inheriting from frozenset prevents PackedNodes from duplicating
nodes but costs memory and means that nodes are returned in an
arbitrary order. It may be better to use a list instead.
(Alternately, I should check and see if Hettinger ever actually
added memory-efficient hash tables to set.) Note that frozensets
can be weak-referenced so I don't need to add __weakref__ to
slots.
"""
__slots__ = ()
class MapNode(dict, Node):
"""Node that is a non-standard mapping of names to nodes and other
objects.
Unlike collections.OrderedDict, this doesn't need a linked list
because it doesn't need to handle insertions or deletions.
Unfortunately, this class claims to be a MutableMapping because it
subclasses dict even though it's immutable. This is an ideal
candidate for replacement with Hettinger's memory-efficient dict.
"""
__slots__ = ('__values', '__hash', '__weakref__')
def __init__(self, other):
if isinstance(other, collections.Mapping):
self.__values = tuple(other.values())
super(MapNode, self).__init__(other)
else:
super(MapNode, self).__init__()
values = []
for k, v in other:
values.append(v)
super(MapNode, self).__setitem__(k, v)
self.__values = tuple(values)
@classmethod
def fromkeys(iterable, value = None):
super(MapNode, self).__init__(zip(iterable, itertools.repeat(value)))
self.__values = (value,)*len(self)
def __iter__(self):
return iter(self.__values)
def __reversed__(self):
return reversed(self.__values)
def copy(self):
return MapNode(self)
def __hash__(self):
if not hasattr(self, self.__hash):
self.__hash = hash(frozenset(self.items()))
return self.__hash
def __contains__(self, value):
return (value in self.__values)
def __setitem__(self, *args, **kws):
raise TypeError("'%s' object does not support item assignment" % type(self))
def __delitem__(self, *args, **kws):
raise TypeError("'%s' object does not support item deletion" % type(self))
def __immutable(self, *args, **kws):
raise TypeError("'%s' object is immutable" % type(self))
clear = __immutable
update = __immutable
setdefault = __immutable
pop = __immutable
popitem = __immutable
class Leaf(object):
"""This is a minimal container for other Python objects, allowing them
to be weak-referenced. Because nodes contain Leaves, Leaves also
must be hashable.
"""
__slots__ = ('value', '__weakref__') # '__hash',
def __init__(self, value):
self.value = value
# self.__hash = hash(self.value)
# def __hash__(self):
# return self.__hash
# def __eq__(self, other):
# if isinstance(other, Leaf):
# return self.value == other.value
# else:
# return self.value == other
def __str__(self):
return str(self.value)
__repr__ = __str__
if __name__ == '__main__':
# PyPy doesn't have sys.getsizeof().
import platform
pypy = True if platform.python_implementation() == 'PyPy' else False
if not pypy:
import hettinger_total_size
import functools
total_size = functools.partial(hettinger_total_size.total_size, handlers = {Tree: lambda t: itertools.chain((t.root,), itertools.chain.from_iterable(t._nodes.items()))})
# (0, 21)
# / \
# (0, 3) (3, 21)
# / \ / | \
# (0, 1) (1, 3) (3, 15) (15, 17) (17, 21)
# | / \ | | |
# 'a' (1, 2) (2, 3) 'd' 'e' 'f'
# | |
# 'b' 'c'
def make_tree(tree, seqnode, mapnode):
a = Leaf('a')
b = Leaf('b')
c = Leaf('c')
d = Leaf('d')
e = Leaf('e')
f = Leaf('f')
t0 = seqnode([b, c])
t1 = seqnode([a, t0])
t2 = mapnode([('eeny', d), ('meeny', e), ('miny', f)])
t3 = seqnode([t1, t2])
nodes = {(0, 1): a,
(1, 2): b,
(2, 3): c,
(3, 15): d,
(15, 17): e,
(17, 21): f,
(0, 21): t3,
(0, 3): t1,
(1, 3): t0,
(3, 21): t2}
return Tree(t3, weakref.WeakValueDictionary(nodes))
tree = make_tree(Tree, SeqNode, MapNode)
print('Tree tests.')
print(tree)
print('Root %s, length %s' % (tree.root, len(tree)))
print('Iterate over subtrees.')
for i in tree:
print(i)
print('Leaves.')
for i in tree.leaves():
print(i)
print('Contains: "a", %s; 0, %s' % ('a' in tree, 0 in tree))
tree_view = tree[1]
print('tree[1]: %s; tree[1]["eeny"]: %s' % (tree_view, tree_view['eeny']))
print('Tree view tests.')
print('Root %s, length %s' % (tree_view.root, len(tree_view)))
print('Iterate over subtrees.')
for i in tree_view:
print(i)
print('Leaves.')
for i in tree_view.leaves():
print(i)
print('Contains: "a", %s; "f", %s' % ('a' in tree_view, 'f' in tree_view))
if not pypy:
print('Input, memory in bytes:', total_size('abcdef'))
print('Tree, memory:', total_size(tree))
def make_ambiguous_trees(tree, node):
"""Grammar: S -> AB, S -> SC, B -> BC, A -> a, B -> b, C -> c
S -> aB | Sc
B -> Bc
Input: 'abcc'
These trees already share a subtree.
"""
a = Leaf('a')
b = Leaf('b')
c0 = Leaf('c')
c1 = Leaf('d')
bc = node([b, c0])
ab = node([a, b])
bbc = node([bc, c1])
n0 = node([a, bbc])
abc0 = node([a, bc])
n1 = node([abc0, c1])
abc1 = node([ab, c0])
n2 = node([abc1, c1])
leaves = {(0, 1): a,
(1, 2): b,
(2, 3): c0,
(3, 4): c1}
t0 = {(0, 4): n0,
(1, 4): bbc,
(1, 3): bc}
t0.update(leaves)
t1 = {(0, 4): n1,
(0, 3): abc0,
(1, 3): bc}
t1.update(leaves)
t2 = {(0, 4): n2,
(0, 3): abc1,
(0, 2): ab}
t2.update(leaves)
return [tree(n0, weakref.WeakValueDictionary(t0)),
tree(n1, weakref.WeakValueDictionary(t1)),
tree(n2, weakref.WeakValueDictionary(t2))]
ambiguous_trees = make_ambiguous_trees(Tree, SeqNode)
pprint.pprint(ambiguous_trees)
if not pypy:
print('Ambiguous trees, memory:', total_size(ambiguous_trees))
def make_spf(node):
a = Leaf('a')
b = Leaf('b')
c0 = Leaf('c')
c1 = Leaf('c')
bc = node([b, c0])
ab = node([a, b])
bbc = node([bc, c1])
n0 = node([a, bbc])
abc0 = node([a, bc])
n1 = node([abc0, c1])
abc1 = node([ab, c0])
abc = PackedNode([abc0, abc1])
abcc = PackedNode([node([a, bbc]), node([abc, c1])])
nodes = {(0, 1): a,
(1, 2): b,
(2, 3): c0,
(3, 4): c1,
(0, 2): ab,
(1, 3): bc,
(0, 3): abc,
(1, 4): bbc,
(0, 4): abcc}
return SharedPackedForest(abcc, weakref.WeakValueDictionary(nodes))
spf = make_spf(SeqNode)
print('SPF tests.')
print(spf)
print('Leaves.')
for i in spf.leaves():
print(i)
print('Is this a tree?', spf.istree())
for t in spf:
print('Tree:', t)
for s in t:
print('Subtree:', s)
for i in t.leaves():
print('Leaf:', i)
visitor = Visitor()
visitor(spf)
if not pypy:
print('Input, memory in bytes:', total_size('abcc'))
print('SPF, memory:', total_size(spf))
| mit |
adlius/osf.io | addons/box/models.py | 6 | 9169 | from rest_framework import status as http_status
import logging
import os
import requests
from addons.base.models import (BaseOAuthNodeSettings, BaseOAuthUserSettings,
BaseStorageAddon)
from boxsdk import Client, OAuth2
from boxsdk.exception import BoxAPIException
from django.db import models
from framework.auth import Auth
from framework.exceptions import HTTPError
from oauthlib.oauth2 import InvalidGrantError
from osf.models.external import ExternalProvider
from osf.models.files import File, Folder, BaseFileNode
from osf.utils.fields import ensure_str
from urllib3.exceptions import MaxRetryError
from addons.base import exceptions
from addons.box import settings
from addons.box.serializer import BoxSerializer
from website.util import api_v2_url
logger = logging.getLogger(__name__)
class BoxFileNode(BaseFileNode):
_provider = 'box'
class BoxFolder(BoxFileNode, Folder):
pass
class BoxFile(BoxFileNode, File):
@property
def _hashes(self):
try:
return {'sha1': self._history[-1]['extra']['hashes']['sha1']}
except (IndexError, KeyError):
return None
class Provider(ExternalProvider):
name = 'Box'
short_name = 'box'
client_id = settings.BOX_KEY
client_secret = settings.BOX_SECRET
auth_url_base = settings.BOX_OAUTH_AUTH_ENDPOINT
callback_url = settings.BOX_OAUTH_TOKEN_ENDPOINT
auto_refresh_url = callback_url
refresh_time = settings.REFRESH_TIME
expiry_time = settings.EXPIRY_TIME
default_scopes = ['root_readwrite']
def handle_callback(self, response):
"""View called when the Oauth flow is completed. Adds a new UserSettings
record to the user and saves the user's access token and account info.
"""
client = Client(OAuth2(
access_token=response['access_token'],
refresh_token=response['refresh_token'],
client_id=settings.BOX_KEY,
client_secret=settings.BOX_SECRET,
))
about = client.user().get()
return {
'provider_id': about['id'],
'display_name': about['name'],
'profile_url': 'https://app.box.com/profile/{0}'.format(about['id'])
}
class UserSettings(BaseOAuthUserSettings):
"""Stores user-specific box information
"""
oauth_provider = Provider
serializer = BoxSerializer
def revoke_remote_oauth_access(self, external_account):
try:
# TODO: write client for box, stop using third-party lib
requests.request(
'POST',
settings.BOX_OAUTH_REVOKE_ENDPOINT,
params={
'client_id': settings.BOX_KEY,
'client_secret': settings.BOX_SECRET,
'token': external_account.oauth_key,
}
)
except requests.HTTPError:
pass
class NodeSettings(BaseOAuthNodeSettings, BaseStorageAddon):
oauth_provider = Provider
serializer = BoxSerializer
folder_id = models.TextField(null=True, blank=True)
folder_name = models.TextField(null=True, blank=True)
folder_path = models.TextField(null=True, blank=True)
user_settings = models.ForeignKey(UserSettings, null=True, blank=True, on_delete=models.CASCADE)
_api = None
@property
def api(self):
"""authenticated ExternalProvider instance"""
if self._api is None:
self._api = Provider(self.external_account)
return self._api
@property
def display_name(self):
return '{0}: {1}'.format(self.config.full_name, self.folder_id)
def fetch_full_folder_path(self):
return self.folder_path
def get_folders(self, **kwargs):
folder_id = kwargs.get('folder_id')
if folder_id is None:
return [{
'id': '0',
'path': '/',
'addon': 'box',
'kind': 'folder',
'name': '/ (Full Box)',
'urls': {
# 'folders': node.api_url_for('box_folder_list', folderId=0),
'folders': api_v2_url('nodes/{}/addons/box/folders/'.format(self.owner._id),
params={'id': '0'}
)
}
}]
try:
Provider(self.external_account).refresh_oauth_key()
oauth = OAuth2(client_id=settings.BOX_KEY, client_secret=settings.BOX_SECRET, access_token=ensure_str(self.external_account.oauth_key))
client = Client(oauth)
except BoxAPIException:
raise HTTPError(http_status.HTTP_403_FORBIDDEN)
try:
metadata = client.folder(folder_id).get()
except BoxAPIException:
raise HTTPError(http_status.HTTP_404_NOT_FOUND)
except MaxRetryError:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
folder_path = '/'.join(
[
x['name']
for x in metadata['path_collection']['entries']
] + [metadata['name']]
)
return [
{
'addon': 'box',
'kind': 'folder',
'id': item['id'],
'name': item['name'],
'path': os.path.join(folder_path, item['name']).replace('All Files', ''),
'urls': {
'folders': api_v2_url('nodes/{}/addons/box/folders/'.format(self.owner._id),
params={'id': item['id']}
)
}
}
for item in metadata['item_collection']['entries']
if item['type'] == 'folder'
]
def set_folder(self, folder_id, auth):
self.folder_id = str(folder_id)
self.folder_name, self.folder_path = self._folder_data(folder_id)
self.nodelogger.log(action='folder_selected', save=True)
def _folder_data(self, folder_id):
# Split out from set_folder for ease of testing, due to
# outgoing requests. Should only be called by set_folder
try:
Provider(self.external_account).refresh_oauth_key(force=True)
except InvalidGrantError:
raise exceptions.InvalidAuthError()
try:
oauth = OAuth2(client_id=settings.BOX_KEY, client_secret=settings.BOX_SECRET, access_token=ensure_str(self.external_account.oauth_key))
client = Client(oauth)
folder_data = client.folder(self.folder_id).get()
except BoxAPIException:
raise exceptions.InvalidFolderError()
folder_name = folder_data['name'].replace('All Files', '') or '/ (Full Box)'
folder_path = '/'.join(
[x['name'] for x in folder_data['path_collection']['entries'] if x['name']] +
[folder_data['name']]
).replace('All Files', '') or '/'
return folder_name, folder_path
def clear_settings(self):
self.folder_id = None
self.folder_name = None
self.folder_path = None
def deauthorize(self, auth=None, add_log=True):
"""Remove user authorization from this node and log the event."""
folder_id = self.folder_id
self.clear_settings()
if add_log:
extra = {'folder_id': folder_id}
self.nodelogger.log(action='node_deauthorized', extra=extra, save=True)
self.clear_auth()
def serialize_waterbutler_credentials(self):
if not self.has_auth:
raise exceptions.AddonError('Addon is not authorized')
try:
Provider(self.external_account).refresh_oauth_key()
return {'token': self.external_account.oauth_key}
except BoxAPIException as error:
raise HTTPError(error.status_code, data={'message_long': error.message})
def serialize_waterbutler_settings(self):
if self.folder_id is None:
raise exceptions.AddonError('Folder is not configured')
return {'folder': self.folder_id}
def create_waterbutler_log(self, auth, action, metadata):
self.owner.add_log(
'box_{0}'.format(action),
auth=auth,
params={
'path': metadata['materialized'],
'project': self.owner.parent_id,
'node': self.owner._id,
'folder': self.folder_id,
'urls': {
'view': self.owner.web_url_for('addon_view_or_download_file',
provider='box',
action='view',
path=metadata['path']
),
'download': self.owner.web_url_for('addon_view_or_download_file',
provider='box',
action='download',
path=metadata['path']
),
},
},
)
##### Callback overrides #####
def after_delete(self, user=None):
self.deauthorize(Auth(user=user), add_log=True)
self.save()
def on_delete(self):
self.deauthorize(add_log=False)
self.save()
| apache-2.0 |
goltermann/kubernetes | examples/cluster-dns/images/frontend/client.py | 468 | 1227 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import requests
import socket
from urlparse import urlparse
def CheckServiceAddress(address):
hostname = urlparse(address).hostname
service_address = socket.gethostbyname(hostname)
print service_address
def GetServerResponse(address):
print 'Send request to:', address
response = requests.get(address)
print response
print response.content
def Main():
parser = argparse.ArgumentParser()
parser.add_argument('address')
args = parser.parse_args()
CheckServiceAddress(args.address)
GetServerResponse(args.address)
if __name__ == "__main__":
Main()
| apache-2.0 |
nttks/edx-platform | common/lib/xmodule/xmodule/modulestore/draft_and_published.py | 71 | 5876 | """
This module provides an abstraction for Module Stores that support Draft and Published branches.
"""
import threading
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
from . import ModuleStoreEnum, BulkOperationsMixin
# Things w/ these categories should never be marked as version=DRAFT
DIRECT_ONLY_CATEGORIES = ['course', 'chapter', 'sequential', 'about', 'static_tab', 'course_info']
class BranchSettingMixin(object):
"""
A mixin to manage a module store's branch setting.
The order of override is (from higher precedence to lower):
1. thread-specific setting temporarily set using the branch_setting contextmanager
2. the return value of the branch_setting_func passed into this mixin's init method
3. the default branch setting being ModuleStoreEnum.Branch.published_only
"""
def __init__(self, *args, **kwargs):
"""
:param branch_setting_func: a function that returns the default branch setting for this object.
If not specified, ModuleStoreEnum.Branch.published_only is used as the default setting.
"""
self.default_branch_setting_func = kwargs.pop(
'branch_setting_func',
lambda: ModuleStoreEnum.Branch.published_only
)
super(BranchSettingMixin, self).__init__(*args, **kwargs)
# cache the branch setting on a local thread to support a multi-threaded environment
self.thread_cache = threading.local()
@contextmanager
def branch_setting(self, branch_setting, course_id=None): # pylint: disable=unused-argument
"""
A context manager for temporarily setting a store's branch value on the current thread.
"""
previous_thread_branch_setting = getattr(self.thread_cache, 'branch_setting', None)
try:
self.thread_cache.branch_setting = branch_setting
yield
finally:
self.thread_cache.branch_setting = previous_thread_branch_setting
def get_branch_setting(self, course_id=None): # pylint: disable=unused-argument
"""
Returns the current branch_setting on the store.
Returns the thread-local setting, if set.
Otherwise, returns the default value of the setting function set during the store's initialization.
"""
# first check the thread-local cache
thread_local_branch_setting = getattr(self.thread_cache, 'branch_setting', None)
if thread_local_branch_setting:
return thread_local_branch_setting
else:
# return the default value
return self.default_branch_setting_func()
class ModuleStoreDraftAndPublished(BranchSettingMixin, BulkOperationsMixin):
"""
A mixin for a read-write database backend that supports two branches, Draft and Published, with
options to prefer Draft and fallback to Published.
"""
__metaclass__ = ABCMeta
@abstractmethod
def delete_item(self, location, user_id, revision=None, **kwargs):
raise NotImplementedError
@abstractmethod
def get_parent_location(self, location, revision=None, **kwargs):
raise NotImplementedError
@abstractmethod
def has_changes(self, xblock):
raise NotImplementedError
@abstractmethod
def publish(self, location, user_id):
raise NotImplementedError
@abstractmethod
def unpublish(self, location, user_id):
"""
Turn the published version into a draft, removing the published version.
Raises: InvalidVersionError if called on a DIRECT_ONLY_CATEGORY
"""
raise NotImplementedError
@abstractmethod
def revert_to_published(self, location, user_id):
raise NotImplementedError
@abstractmethod
def has_published_version(self, xblock):
raise NotImplementedError
@abstractmethod
def convert_to_draft(self, location, user_id):
raise NotImplementedError
@abstractmethod
def import_xblock(self, user_id, course_key, block_type, block_id, fields=None, runtime=None, **kwargs):
"""
Import the given xblock into the current branch setting: import completely overwrites any
existing block of the same id.
In ModuleStoreDraftAndPublished, importing a published block ensures that access from the draft
will get a block (either the one imported or a preexisting one). See xml_importer
"""
raise NotImplementedError
def _flag_publish_event(self, course_key):
"""
Wrapper around calls to fire the course_published signal
Unless we're nested in an active bulk operation, this simply fires the signal
otherwise a publish will be signalled at the end of the bulk operation
Arguments:
course_key - course_key to which the signal applies
"""
if self.signal_handler:
bulk_record = self._get_bulk_ops_record(course_key) if isinstance(self, BulkOperationsMixin) else None
if bulk_record and bulk_record.active:
bulk_record.has_publish_item = True
else:
# We remove the branch, because publishing always means copying from draft to published
self.signal_handler.send("course_published", course_key=course_key.for_branch(None))
class UnsupportedRevisionError(ValueError):
"""
This error is raised if a method is called with an unsupported revision parameter.
"""
def __init__(self, allowed_revisions=None):
if not allowed_revisions:
allowed_revisions = [
None,
ModuleStoreEnum.RevisionOption.published_only,
ModuleStoreEnum.RevisionOption.draft_only
]
super(UnsupportedRevisionError, self).__init__('revision not one of {}'.format(allowed_revisions))
| agpl-3.0 |
grancier/linux-3.10.37-chromeos | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
nysan/yocto-autobuilder | lib/python2.6/site-packages/Twisted-11.0.0-py2.6-linux-x86_64.egg/twisted/words/im/basechat.py | 18 | 11844 | # -*- test-case-name: twisted.words.test.test_basechat -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Base classes for Instance Messenger clients.
"""
from twisted.words.im.locals import OFFLINE, ONLINE, AWAY
class ContactsList:
"""A GUI object that displays a contacts list"""
def __init__(self, chatui):
"""
@param chatui: ???
@type chatui: L{ChatUI}
"""
self.chatui = chatui
self.contacts = {}
self.onlineContacts = {}
self.clients = []
def setContactStatus(self, person):
"""Inform the user that a person's status has changed.
@type person: L{Person<interfaces.IPerson>}
"""
if not self.contacts.has_key(person.name):
self.contacts[person.name] = person
if not self.onlineContacts.has_key(person.name) and \
(person.status == ONLINE or person.status == AWAY):
self.onlineContacts[person.name] = person
if self.onlineContacts.has_key(person.name) and \
person.status == OFFLINE:
del self.onlineContacts[person.name]
def registerAccountClient(self, client):
"""Notify the user that an account client has been signed on to.
@type client: L{Client<interfaces.IClient>}
"""
if not client in self.clients:
self.clients.append(client)
def unregisterAccountClient(self, client):
"""Notify the user that an account client has been signed off
or disconnected from.
@type client: L{Client<interfaces.IClient>}
"""
if client in self.clients:
self.clients.remove(client)
def contactChangedNick(self, person, newnick):
oldname = person.name
if self.contacts.has_key(oldname):
del self.contacts[oldname]
person.name = newnick
self.contacts[newnick] = person
if self.onlineContacts.has_key(oldname):
del self.onlineContacts[oldname]
self.onlineContacts[newnick] = person
class Conversation:
"""A GUI window of a conversation with a specific person"""
def __init__(self, person, chatui):
"""
@type person: L{Person<interfaces.IPerson>}
@type chatui: L{ChatUI}
"""
self.chatui = chatui
self.person = person
def show(self):
"""Displays the ConversationWindow"""
raise NotImplementedError("Subclasses must implement this method")
def hide(self):
"""Hides the ConversationWindow"""
raise NotImplementedError("Subclasses must implement this method")
def sendText(self, text):
"""Sends text to the person with whom the user is conversing.
@returntype: L{Deferred<twisted.internet.defer.Deferred>}
"""
self.person.sendMessage(text, None)
def showMessage(self, text, metadata=None):
"""Display a message sent from the person with whom she is conversing
@type text: string
@type metadata: dict
"""
raise NotImplementedError("Subclasses must implement this method")
def contactChangedNick(self, person, newnick):
"""Change a person's name.
@type person: L{Person<interfaces.IPerson>}
@type newnick: string
"""
self.person.name = newnick
class GroupConversation:
"""A conversation with a group of people."""
def __init__(self, group, chatui):
"""
@type group: L{Group<interfaces.IGroup>}
@param chatui: ???
@type chatui: L{ChatUI}
"""
self.chatui = chatui
self.group = group
self.members = []
def show(self):
"""Displays the GroupConversationWindow."""
raise NotImplementedError("Subclasses must implement this method")
def hide(self):
"""Hides the GroupConversationWindow."""
raise NotImplementedError("Subclasses must implement this method")
def sendText(self, text):
"""Sends text to the group.
@type text: string
@returntype: L{Deferred<twisted.internet.defer.Deferred>}
"""
self.group.sendGroupMessage(text, None)
def showGroupMessage(self, sender, text, metadata=None):
"""Displays to the user a message sent to this group from the given sender
@type sender: string (XXX: Not Person?)
@type text: string
@type metadata: dict
"""
raise NotImplementedError("Subclasses must implement this method")
def setGroupMembers(self, members):
"""Sets the list of members in the group and displays it to the user
"""
self.members = members
def setTopic(self, topic, author):
"""Displays the topic (from the server) for the group conversation window
@type topic: string
@type author: string (XXX: Not Person?)
"""
raise NotImplementedError("Subclasses must implement this method")
def memberJoined(self, member):
"""Adds the given member to the list of members in the group conversation
and displays this to the user
@type member: string (XXX: Not Person?)
"""
if not member in self.members:
self.members.append(member)
def memberChangedNick(self, oldnick, newnick):
"""Changes the oldnick in the list of members to newnick and displays this
change to the user
@type oldnick: string
@type newnick: string
"""
if oldnick in self.members:
self.members.remove(oldnick)
self.members.append(newnick)
#self.chatui.contactChangedNick(oldnick, newnick)
def memberLeft(self, member):
"""Deletes the given member from the list of members in the group
conversation and displays the change to the user
@type member: string
"""
if member in self.members:
self.members.remove(member)
class ChatUI:
"""
A GUI chat client.
@type conversations: C{dict} of L{Conversation}.
@ivar conversations: A cache of all the direct windows.
@type groupConversations: C{dict} of L{GroupConversation}.
@ivar groupConversations: A cache of all the group windows.
@type persons: C{dict} with keys that are a C{tuple} of (C{str},
L{basesupport.AbstractAccount}) and values that are
L{Person<interfaces.IPerson>}.
@ivar persons: A cache of all the users associated with this client.
@type groups: C{dict} with keys that are a C{tuple} of (C{str},
L{basesupport.AbstractAccount}) and values that are
L{Group<interfaces.IGroup>}
@ivar groups: A cache of all the user groups associated with this client.
@type onlineClients: C{list} of L{Client<interfaces.IClient>}
@ivar onlineClients: A list of message sources currently online.
@type contactsList: L{ContactsList}
@ivar contactsList: A contacts list.
"""
def __init__(self):
self.conversations = {}
self.groupConversations = {}
self.persons = {}
self.groups = {}
self.onlineClients = []
self.contactsList = ContactsList(self)
def registerAccountClient(self, client):
"""
Notifies user that an account has been signed on to.
@type client: L{Client<interfaces.IClient>}
@returns: client, so that I may be used in a callback chain
"""
print "signing onto", client.accountName
self.onlineClients.append(client)
self.contactsList.registerAccountClient(client)
return client
def unregisterAccountClient(self, client):
"""
Notifies user that an account has been signed off or disconnected
@type client: L{Client<interfaces.IClient>}
"""
print "signing off from", client.accountName
self.onlineClients.remove(client)
self.contactsList.unregisterAccountClient(client)
def getContactsList(self):
"""
@returntype: L{ContactsList}
"""
return self.contactsList
def getConversation(self, person, Class=Conversation, stayHidden=0):
"""
For the given person object, returns the conversation window
or creates and returns a new conversation window if one does not exist.
@type person: L{Person<interfaces.IPerson>}
@type Class: L{Conversation<interfaces.IConversation>} class
@type stayHidden: boolean
@returntype: L{Conversation<interfaces.IConversation>}
"""
conv = self.conversations.get(person)
if not conv:
conv = Class(person, self)
self.conversations[person] = conv
if stayHidden:
conv.hide()
else:
conv.show()
return conv
def getGroupConversation(self,group,Class=GroupConversation,stayHidden=0):
"""
For the given group object, returns the group conversation window or
creates and returns a new group conversation window if it doesn't exist
@type group: L{Group<interfaces.IGroup>}
@type Class: L{Conversation<interfaces.IConversation>} class
@type stayHidden: boolean
@returntype: L{GroupConversation<interfaces.IGroupConversation>}
"""
conv = self.groupConversations.get(group)
if not conv:
conv = Class(group, self)
self.groupConversations[group] = conv
if stayHidden:
conv.hide()
else:
conv.show()
return conv
def getPerson(self, name, client):
"""
For the given name and account client, returns the instance of the
AbstractPerson subclass, or creates and returns a new AbstractPerson
subclass of the type Class
@type name: string
@type client: L{Client<interfaces.IClient>}
@returntype: L{Person<interfaces.IPerson>}
"""
account = client.account
p = self.persons.get((name, account))
if not p:
p = account.getPerson(name)
self.persons[name, account] = p
return p
def getGroup(self, name, client):
"""
For the given name and account client, returns the instance of the
AbstractGroup subclass, or creates and returns a new AbstractGroup
subclass of the type Class
@type name: string
@type client: L{Client<interfaces.IClient>}
@returntype: L{Group<interfaces.IGroup>}
"""
# I accept 'client' instead of 'account' in my signature for
# backwards compatibility. (Groups changed to be Account-oriented
# in CVS revision 1.8.)
account = client.account
g = self.groups.get((name, account))
if not g:
g = account.getGroup(name)
self.groups[name, account] = g
return g
def contactChangedNick(self, person, newnick):
"""
For the given C{person}, change the C{person}'s C{name} to C{newnick}
and tell the contact list and any conversation windows with that
C{person} to change as well.
@type person: L{Person<interfaces.IPerson>}
@param person: The person whose nickname will get changed.
@type newnick: C{str}
@param newnick: The new C{name} C{person} will take.
"""
oldnick = person.name
if self.persons.has_key((oldnick, person.account)):
conv = self.conversations.get(person)
if conv:
conv.contactChangedNick(person, newnick)
self.contactsList.contactChangedNick(person, newnick)
del self.persons[oldnick, person.account]
person.name = newnick
self.persons[person.name, person.account] = person
| gpl-2.0 |
tangentlabs/django-oscar-fancypages | tests/functional/test_dashboard.py | 1 | 1717 | import os
import shutil
import tempfile
from PIL import Image
from django.db.models import get_model
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from fancypages import test
PageType = get_model('fancypages', 'PageType')
FancyPage = get_model('fancypages', 'FancyPage')
TEMP_IMAGE_DIR = tempfile.mkdtemp(suffix='_page_tests_images')
TEMP_MEDIA_ROOT = tempfile.mkdtemp(suffix='_page_tests_media')
@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)
class TestAnImageForAFancyPage(test.FancyPagesWebTest):
is_staff = True
def tearDown(self):
super(TestAnImageForAFancyPage, self).tearDown()
if os.path.exists(TEMP_MEDIA_ROOT):
shutil.rmtree(TEMP_MEDIA_ROOT)
if os.path.exists(TEMP_IMAGE_DIR):
shutil.rmtree(TEMP_IMAGE_DIR)
def test_can_be_added_in_the_dashboard(self):
fancy_page = FancyPage.add_root(name='Sample Page')
self.assertEquals(fancy_page.image, None)
im = Image.new("RGB", (320, 240), "red")
__, filename = tempfile.mkstemp(suffix='.jpg', dir=TEMP_IMAGE_DIR)
im.save(filename, "JPEG")
page = self.get(
reverse('fp-dashboard:page-update', args=(fancy_page.id,))
)
settings_form = page.form
settings_form['image'] = (filename,)
list_page = settings_form.submit()
self.assertRedirects(list_page, reverse('fp-dashboard:page-list'))
pages_path = os.path.join(TEMP_MEDIA_ROOT, 'categories')
fancy_page = FancyPage.objects.get(id=fancy_page.id)
self.assertEquals(
fancy_page.image.path,
os.path.join(pages_path, filename.rsplit('/')[-1])
)
| bsd-3-clause |
chenjun0210/tensorflow | tensorflow/contrib/framework/python/framework/tensor_util.py | 48 | 10771 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
__all__ = [
'assert_same_float_dtype',
'assert_scalar',
'assert_scalar_int',
'convert_to_tensor_or_sparse_tensor',
'is_tensor',
'reduce_sum_n',
'remove_squeezable_dimensions',
'with_shape',
'with_same_shape']
# Temporary for backwards compatibility
is_tensor = tensor_util.is_tensor
assert_same_float_dtype = check_ops.assert_same_float_dtype
assert_scalar = check_ops.assert_scalar
convert_to_tensor_or_sparse_tensor = (
sparse_tensor.convert_to_tensor_or_sparse_tensor)
def reduce_sum_n(tensors, name=None):
"""Reduce tensors to a scalar sum.
This reduces each tensor in `tensors` to a scalar via `tf.reduce_sum`, then
adds them via `tf.add_n`.
Args:
tensors: List of tensors, all of the same numeric type.
name: Tensor name, and scope for all other ops.
Returns:
Total loss tensor, or None if no losses have been configured.
Raises:
ValueError: if `losses` is missing or empty.
"""
if not tensors:
raise ValueError('No tensors provided.')
with ops.name_scope(name, 'reduce_sum_n', tensors) as name_scope:
tensors = [
math_ops.reduce_sum(t, name='%s/sum' % t.op.name) for t in tensors]
if len(tensors) == 1:
return tensors[0]
return math_ops.add_n(tensors, name=name_scope)
def remove_squeezable_dimensions(predictions, labels, name=None):
"""Squeeze last dim if ranks of `predictions` and `labels` differ by 1.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
labels: Label values, a `Tensor` whose dimensions match `predictions`.
name: Name of the op.
Returns:
Tuple of `predictions` and `labels`, possibly with last dim squeezed.
"""
with ops.name_scope(name, 'remove_squeezable_dimensions',
[predictions, labels]):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
labels_shape = labels.get_shape()
labels_rank = labels_shape.ndims
if (labels_rank is not None) and (predictions_rank is not None):
# Use static rank.
rank_diff = predictions_rank - labels_rank
if rank_diff == -1:
labels = array_ops.squeeze(labels, [-1])
elif rank_diff == 1:
predictions = array_ops.squeeze(predictions, [-1])
return predictions, labels
# Use dynamic rank.
rank_diff = array_ops.rank(predictions) - array_ops.rank(labels)
if (predictions_rank is None) or (
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = control_flow_ops.cond(
math_ops.equal(1, rank_diff),
lambda: array_ops.squeeze(predictions, [-1]),
lambda: predictions)
if (labels_rank is None) or (
labels_shape.dims[-1].is_compatible_with(1)):
labels = control_flow_ops.cond(
math_ops.equal(-1, rank_diff),
lambda: array_ops.squeeze(labels, [-1]),
lambda: labels)
return predictions, labels
def _all_equal(tensor0, tensor1):
with ops.name_scope('all_equal', values=[tensor0, tensor1]) as scope:
return math_ops.reduce_all(
math_ops.equal(tensor0, tensor1, name='equal'), name=scope)
def _is_rank(expected_rank, actual_tensor):
"""Returns whether actual_tensor's rank is expected_rank.
Args:
expected_rank: Integer defining the expected rank, or tensor of same.
actual_tensor: Tensor to test.
Returns:
New tensor.
"""
with ops.name_scope('is_rank', values=[actual_tensor]) as scope:
expected = ops.convert_to_tensor(expected_rank, name='expected')
actual = array_ops.rank(actual_tensor, name='actual')
return math_ops.equal(expected, actual, name=scope)
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
"""Returns whether actual_tensor's shape is expected_shape.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_tensor: Tensor to test.
actual_shape: Shape of actual_tensor, if we already have it.
Returns:
New tensor.
"""
with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
if actual_shape is None:
actual_shape = array_ops.shape(actual_tensor, name='actual')
shape_equal = _all_equal(
ops.convert_to_tensor(expected_shape, name='expected'),
actual_shape)
return math_ops.logical_and(is_rank, shape_equal, name=scope)
def _assert_shape_op(expected_shape, actual_tensor):
"""Asserts actual_tensor's shape is expected_shape.
Args:
expected_shape: List of integers defining the expected shape, or tensor of
same.
actual_tensor: Tensor to test.
Returns:
New assert tensor.
"""
with ops.name_scope('assert_shape', values=[actual_tensor]) as scope:
actual_shape = array_ops.shape(actual_tensor, name='actual')
is_shape = _is_shape(expected_shape, actual_tensor, actual_shape)
return control_flow_ops.Assert(
is_shape, [
'Wrong shape for %s [expected] [actual].' % actual_tensor.name,
expected_shape,
actual_shape
], name=scope)
def with_same_shape(expected_tensor, tensor):
"""Assert tensors are the same shape, from the same graph.
Args:
expected_tensor: Tensor with expected shape.
tensor: Tensor of actual values.
Returns:
Tuple of (actual_tensor, label_tensor), possibly with assert ops added.
"""
with ops.name_scope('%s/' % tensor.op.name, values=[expected_tensor, tensor]):
tensor_shape = expected_tensor.get_shape()
expected_shape = (
tensor_shape.as_list() if tensor_shape.is_fully_defined()
else array_ops.shape(expected_tensor, name='expected_shape'))
return with_shape(expected_shape, tensor)
def with_shape(expected_shape, tensor):
"""Asserts tensor has expected shape.
If tensor shape and expected_shape, are fully defined, assert they match.
Otherwise, add assert op that will validate the shape when tensor is
evaluated, and set shape on tensor.
Args:
expected_shape: Expected shape to assert, as a 1D array of ints, or tensor
of same.
tensor: Tensor whose shape we're validating.
Returns:
tensor, perhaps with a dependent assert operation.
Raises:
ValueError: if tensor has an invalid shape.
"""
if isinstance(tensor, sparse_tensor.SparseTensor):
raise ValueError('SparseTensor not supported.')
# Shape type must be 1D int32.
if tensor_util.is_tensor(expected_shape):
if expected_shape.dtype.base_dtype != dtypes.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
if isinstance(expected_shape, (list, tuple)):
if not expected_shape:
expected_shape = np.asarray([], dtype=np.int32)
else:
np_expected_shape = np.asarray(expected_shape)
expected_shape = (
np.asarray(expected_shape, dtype=np.int32)
if np_expected_shape.dtype == np.int64 else np_expected_shape)
if isinstance(expected_shape, np.ndarray):
if expected_shape.ndim > 1:
raise ValueError(
'Invalid rank %s for shape %s expected of tensor %s.' % (
expected_shape.ndim, expected_shape, tensor.name))
if expected_shape.dtype != np.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
actual_shape = tensor.get_shape()
if (not actual_shape.is_fully_defined()
or tensor_util.is_tensor(expected_shape)):
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
if (not tensor_util.is_tensor(expected_shape)
and (len(expected_shape) < 1)):
# TODO(irving): Remove scalar special case
return array_ops.reshape(tensor, [])
with ops.control_dependencies([_assert_shape_op(expected_shape, tensor)]):
result = array_ops.identity(tensor)
if not tensor_util.is_tensor(expected_shape):
result.set_shape(expected_shape)
return result
if (not tensor_util.is_tensor(expected_shape) and
not actual_shape.is_compatible_with(expected_shape)):
if (len(expected_shape) < 1) and actual_shape.is_compatible_with([1]):
# TODO(irving): Remove scalar special case.
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
return array_ops.reshape(tensor, [])
raise ValueError('Invalid shape for tensor %s, expected %s, got %s.' % (
tensor.name, expected_shape, actual_shape))
return tensor
def assert_scalar_int(tensor, name=None):
"""Assert `tensor` is 0-D, of type `tf.int32` or `tf.int64`.
Args:
tensor: `Tensor` to test.
name: Name of the op and of the new `Tensor` if one is created.
Returns:
`tensor`, for chaining.
Raises:
ValueError: if `tensor` is not 0-D, of integer type.
"""
with ops.name_scope(name, 'assert_scalar_int', [tensor]) as name_scope:
tensor = ops.convert_to_tensor(tensor)
data_type = tensor.dtype
if not data_type.base_dtype.is_integer:
raise ValueError('Expected integer type for %s, received type: %s.'
% (tensor.name, data_type))
return check_ops.assert_scalar(tensor, name=name_scope)
| apache-2.0 |
cloudnull/ansible-modules-extras | monitoring/airbrake_deployment.py | 51 | 3918 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Bruce Pennypacker <bruce@pennypacker.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: airbrake_deployment
version_added: "1.2"
author: Bruce Pennypacker
short_description: Notify airbrake about app deployments
description:
- Notify airbrake about app deployments (see http://help.airbrake.io/kb/api-2/deploy-tracking)
options:
token:
description:
- API token.
required: true
environment:
description:
- The airbrake environment name, typically 'production', 'staging', etc.
required: true
user:
description:
- The username of the person doing the deployment
required: false
repo:
description:
- URL of the project repository
required: false
revision:
description:
- A hash, number, tag, or other identifier showing what revision was deployed
required: false
url:
description:
- Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit.
required: false
default: "https://airbrake.io/deploys"
version_added: "1.5"
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
# informational: requirements for nodes
requirements: [ urllib, urllib2 ]
'''
EXAMPLES = '''
- airbrake_deployment: token=AAAAAA
environment='staging'
user='ansible'
revision=4.2
'''
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True),
environment=dict(required=True),
user=dict(required=False),
repo=dict(required=False),
revision=dict(required=False),
url=dict(required=False, default='https://api.airbrake.io/deploys.txt'),
validate_certs=dict(default='yes', type='bool'),
),
supports_check_mode=True
)
# build list of params
params = {}
if module.params["environment"]:
params["deploy[rails_env]"] = module.params["environment"]
if module.params["user"]:
params["deploy[local_username]"] = module.params["user"]
if module.params["repo"]:
params["deploy[scm_repository]"] = module.params["repo"]
if module.params["revision"]:
params["deploy[scm_revision]"] = module.params["revision"]
params["api_key"] = module.params["token"]
url = module.params.get('url')
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=True)
# Send the data to airbrake
data = urllib.urlencode(params)
response, info = fetch_url(module, url, data=data)
if info['status'] == 200:
module.exit_json(changed=True)
else:
module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 |
bepitulaz/huntingdimana | env/Lib/site-packages/pip/_vendor/html5lib/filters/whitespace.py | 1730 | 1142 | from __future__ import absolute_import, division, unicode_literals
import re
from . import _base
from ..constants import rcdataElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
SPACES_REGEX = re.compile("[%s]+" % spaceCharacters)
class Filter(_base.Filter):
spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
def __iter__(self):
preserve = 0
for token in _base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag" \
and (preserve or token["name"] in self.spacePreserveElements):
preserve += 1
elif type == "EndTag" and preserve:
preserve -= 1
elif not preserve and type == "SpaceCharacters" and token["data"]:
# Test on token["data"] above to not introduce spaces where there were not
token["data"] = " "
elif not preserve and type == "Characters":
token["data"] = collapse_spaces(token["data"])
yield token
def collapse_spaces(text):
return SPACES_REGEX.sub(' ', text)
| gpl-3.0 |
shannara/subuser | logic/subuserlib/builtInCommands/repository.py | 1 | 1800 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#external imports
import sys
import optparse
#internal imports
from subuserlib.classes.user import User
import subuserlib.resolve
import subuserlib.repository
import subuserlib.commandLineArguments
import subuserlib.profile
def parseCliArgs(sysargs):
usage = "usage: subuser repository [options] [add|remove] NAME <URL>"
description = """Add or remove a new named repository.
- EXAMPLE
Add a new repository named foo with the URI http://www.example.com/repo.git.
$ subuser repository add foo http://www.example.com/repo.git
$ #You can also add a local repository:
$ subuser repository add local-foo file:///home/timothy/my-local-repo/
- EXAMPLE
Remove the repository named foo.
$subuser repository remove foo
"""
parser=optparse.OptionParser(usage=usage,description=description,formatter=subuserlib.commandLineArguments.HelpFormatterThatDoesntReformatDescription())
return parser.parse_args(args=sysargs)
@subuserlib.profile.do_cprofile
def runCommand(sysargs):
"""
Manage named subuser repositories.
"""
options,args = parseCliArgs(sysargs)
user = User()
try:
action = args[0]
except IndexError:
sys.exit("Use subuser repository --help for help.")
if action == "add":
if not len(args) == 3:
sys.exit("Use subuser repository --help for help.")
name = args[1]
url = args[2]
with user.getRegistry().getLock():
subuserlib.repository.add(user,name,url)
elif action == "remove":
if not len(args) == 2:
sys.exit("Use subuser repository --help for help.")
name = args[1]
with user.getRegistry().getLock():
subuserlib.repository.remove(user,name)
else:
sys.exit("Action "+args[0]+" not supported. Please see:\n subuser repository --help")
| lgpl-3.0 |
bemineni/eldam | test6/test/remove.py | 1 | 1179 |
import os
import sys
import yaml
import traceback
import transaction
import json
from elasticsearch import Elasticsearch
if __name__ == "__main__":
test_name = "Add"
configpath = os.path.abspath('./edm.yml') if os.path.exists('./edm.yml') else sys.exit(1)
config=None
with open(configpath , 'r') as stream:
try:
config = yaml.load(stream)
except yaml.YAMLError as e:
raise Exception from e
ret = 0
#print(config)
try:
connection = Elasticsearch( config['elasticsearch_hosts'],
# sniff before doing anything
sniff_on_start=True,
# refresh nodes after a node fails to respond
sniff_on_connection_fail=True,
# and also every 60 seconds
sniffer_timeout=60)
try:
out = connection.delete(index=config['default_index'],doc_type='group',id='2')
data = connection.get_source(index=config['default_index'],doc_type="group",id='2')
print(json.dumps(data,indent=4,sort_keys=True))
except Exception as e:
pass
finally:
pass
except Exception as e:
print("Failed to add item")
print("Test failed")
traceback.print_exc()
ret = 1
finally:
print(test_name + " Test complete")
sys.exit(ret) | mit |
hep-mirrors/herwig | Models/Feynrules/exampleInputs/write_param_card.py | 1 | 2234 |
__date__ = "3 june 2010"
__author__ = 'olivier.mattelaer@uclouvain.be'
class ParamCardWriter(object):
header = \
"""######################################################################\n""" + \
"""## PARAM_CARD AUTOMATICALY GENERATED BY THE UFO #####################\n""" + \
"""######################################################################\n"""
def __init__(self, filename, list_of_parameters=None):
"""write a valid param_card.dat"""
if not list_of_parameters:
from parameters import all_parameters
list_of_parameters = [param for param in all_parameters if \
param.nature=='external']
self.fsock = open(filename, 'w')
self.fsock.write(self.header)
self.write_card(list_of_parameters)
def write_card(self, all_ext_param):
""" """
# list all lhablock
all_lhablock = set([param.lhablock for param in all_ext_param])
# ordonate lhablock alphabeticaly
list(all_lhablock).sort()
for lhablock in all_lhablock:
self.write_block(lhablock)
[self.write_param(param, lhablock) for param in all_ext_param if \
param.lhablock == lhablock]
def write_block(self, name):
""" write a comment for a block"""
self.fsock.writelines(
"""\n###################################""" + \
"""\n## INFORMATION FOR %s""" % name.upper() +\
"""\n###################################\n"""
)
if name!='DECAY':
self.fsock.write("""Block %s \n""" % name)
def write_param(self, param, lhablock):
lhacode=' '.join(['%3s' % key for key in param.lhacode])
if lhablock != 'DECAY':
text = """ %s %e # %s \n""" % (lhacode, param.value, param.name )
else:
text = '''DECAY %s %e \n''' % (lhacode, param.value)
self.fsock.write(text)
if '__main__' == __name__:
ParamCardWriter('./param_card.dat')
print 'done'
| gpl-3.0 |
cosmoharrigan/pylearn2 | pylearn2/models/dbm/inference_procedure.py | 44 | 58299 | """
Various InferenceProcedures for use with the DBM class.
"""
__authors__ = ["Ian Goodfellow", "Vincent Dumoulin"]
__copyright__ = "Copyright 2012-2013, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
import functools
import logging
from theano.compat.six.moves import xrange
from theano import gof
import theano.tensor as T
import theano
from theano.gof.op import get_debug_values
from pylearn2.models.dbm import block, flatten
from pylearn2.models.dbm.layer import Softmax
from pylearn2.utils import safe_izip, block_gradient, safe_zip
logger = logging.getLogger(__name__)
class InferenceProcedure(object):
"""
A class representing a procedure for performing mean field inference in a
DBM.
Different subclasses can implement different specific procedures, such as
updating the layers in different orders, or using different strategies to
initialize the mean field expectations.
"""
def set_dbm(self, dbm):
"""
Associates the InferenceProcedure with a specific DBM.
Parameters
----------
dbm : pylearn2.models.dbm.DBM instance
The model to perform inference in.
"""
self.dbm = dbm
def mf(self, V, Y=None, return_history=False, niter=None, block_grad=None):
"""
Perform mean field inference. Subclasses must implement.
Parameters
----------
V : Input space batch
The values of the input features modeled by the DBM.
Y : (Optional) Target space batch
The values of the labels modeled by the DBM. Must be omitted
if the DBM does not model labels. If the DBM does model
labels, they may be included to perform inference over the
hidden layers only, or included to perform inference over the
labels.
return_history : (Optional) bool
Default: False
If True, returns the full sequence of mean field updates.
niter : (Optional) int
block_grad : (Optional) int
Default: None
If not None, blocks the gradient after `block_grad`
iterations, so that only the last `niter` - `block_grad`
iterations need to be stored when using the backpropagation
algorithm.
Returns
-------
result : list
If not `return_history` (default), a list with one element
per inferred layer, containing the full mean field state
of that layer.
Otherwise, a list of such lists, with the outer list
containing one element for each step of inference.
"""
raise NotImplementedError(str(type(self)) + " does not implement mf.")
def set_batch_size(self, batch_size):
"""
If the inference procedure is dependent on a batch size at all, makes
the necessary internal configurations to work with that batch size.
Parameters
----------
batch_size : int
The number of examples in the batch
"""
# Default implementation is no-op, because default procedure does
# not depend on the batch size.
def multi_infer(self, V, return_history=False, niter=None,
block_grad=None):
"""
Inference using "the multi-inference trick." See
"Multi-prediction deep Boltzmann machines", Goodfellow et al 2013.
Subclasses may implement this method, however it is not needed for
any training algorithm, and only expected to work at evaluation
time if the model was trained with multi-prediction training.
Parameters
----------
V : input space batch
return_history : bool
If True, returns the complete history of the mean field
iterations, rather than just the final values
niter : int
The number of mean field iterations to run
block_grad : int
If not None, block the gradient after this number of iterations
Returns
-------
result : list
A list of mean field states, or if return_history is True, a
list of such lists with one element per mean field iteration
"""
raise NotImplementedError(str(type(self)) + " does not implement"
" multi_infer.")
def do_inpainting(self, V, Y=None, drop_mask=None, drop_mask_Y=None,
return_history=False, noise=False, niter=None,
block_grad=None):
"""
Does the inference required for multi-prediction training.
If you use this method in your research work, please cite:
Multi-prediction deep Boltzmann machines. Ian J. Goodfellow,
Mehdi Mirza, Aaron Courville, and Yoshua Bengio. NIPS 2013.
Gives the mean field expression for units masked out by drop_mask.
Uses self.niter mean field updates.
Comes in two variants, unsupervised and supervised:
* unsupervised: Y and drop_mask_Y are not passed to the method. The
method produces V_hat, an inpainted version of V
* supervised: Y and drop_mask_Y are passed to the method. The method
produces V_hat and Y_hat
Parameters
----------
V : tensor_like
Theano batch in `model.input_space`
Y : tensor_like
Theano batch in `model.output_space`, i.e. in the output space of
the last hidden layer. (It's not really a hidden layer anymore,
but oh well. It's convenient to code it this way because the
labels are sort of "on top" of everything else.) *** Y is always
assumed to be a matrix of one-hot category labels. ***
drop_mask : tensor_like
Theano batch in `model.input_space`. Should be all binary, with
1s indicating that the corresponding element of X should be
"dropped", i.e. hidden from the algorithm and filled in as part
of the inpainting process
drop_mask_Y : tensor_like
Theano vector. Since we assume Y is a one-hot matrix, each row is
a single categorical variable. `drop_mask_Y` is a binary mask
specifying which *rows* to drop.
return_history : bool, optional
WRITEME
noise : bool, optional
WRITEME
niter : int, optional
WRITEME
block_grad : WRITEME
Returns
-------
WRITEME
"""
raise NotImplementedError(str(type(self)) + " does not implement "
"do_inpainting.")
class WeightDoubling(InferenceProcedure):
"""
An inference procedure that initializes all states to zero and
doubles the bottom-up weights on the first pass of mean field
inference. The weight doubling helps to compensate for the
lack of top-down input on the first pass. This approach is
described in "Deep Boltzmann Machines", Salakhutdinov and
Hinton, 2008.
"""
@functools.wraps(InferenceProcedure.mf)
def mf(self, V, Y=None, return_history=False, niter=None, block_grad=None):
dbm = self.dbm
assert Y not in [True, False, 0, 1]
assert return_history in [True, False, 0, 1]
if Y is not None:
dbm.hidden_layers[-1].get_output_space().validate(Y)
if niter is None:
niter = dbm.niter
H_hat = []
for i in xrange(0, len(dbm.hidden_layers) - 1):
# do double weights update for_layer_i
if i == 0:
H_hat.append(dbm.hidden_layers[i].mf_update(
state_above=None,
double_weights=True,
state_below=dbm.visible_layer.upward_state(V),
iter_name='0'))
else:
H_hat.append(dbm.hidden_layers[i].mf_update(
state_above=None,
double_weights=True,
state_below=dbm.hidden_layers[
i - 1].upward_state(H_hat[i - 1]),
iter_name='0'))
# last layer does not need its weights doubled, even on the first pass
if len(dbm.hidden_layers) > 1:
H_hat.append(dbm.hidden_layers[-1].mf_update(
state_above=None,
state_below=dbm.hidden_layers[-2].upward_state(H_hat[-1])))
else:
H_hat.append(dbm.hidden_layers[-1].mf_update(
state_above=None,
state_below=dbm.visible_layer.upward_state(V)))
# Make corrections for if we're also running inference on Y
if Y is not None:
state_above = dbm.hidden_layers[-1].downward_state(Y)
layer_above = dbm.hidden_layers[-1]
assert len(dbm.hidden_layers) > 1
# Last layer before Y does not need its weights doubled
# because it already has top down input
if len(dbm.hidden_layers) > 2:
state_below = dbm.hidden_layers[-3].upward_state(H_hat[-3])
else:
state_below = dbm.visible_layer.upward_state(V)
H_hat[-2] = dbm.hidden_layers[-2].mf_update(
state_below=state_below,
state_above=state_above,
layer_above=layer_above)
# Last layer is clamped to Y
H_hat[-1] = Y
if block_grad == 1:
H_hat = block(H_hat)
history = [list(H_hat)]
# we only need recurrent inference if there are multiple layers
if len(H_hat) > 1:
for i in xrange(1, niter):
for j in xrange(0, len(H_hat), 2):
if j == 0:
state_below = dbm.visible_layer.upward_state(V)
else:
state_below = dbm.hidden_layers[
j - 1].upward_state(H_hat[j - 1])
if j == len(H_hat) - 1:
state_above = None
layer_above = None
else:
state_above = dbm.hidden_layers[
j + 1].downward_state(H_hat[j + 1])
layer_above = dbm.hidden_layers[j + 1]
H_hat[j] = dbm.hidden_layers[j].mf_update(
state_below=state_below,
state_above=state_above,
layer_above=layer_above)
if Y is not None:
H_hat[-1] = Y
for j in xrange(1, len(H_hat), 2):
state_below = dbm.hidden_layers[
j - 1].upward_state(H_hat[j - 1])
if j == len(H_hat) - 1:
state_above = None
state_above = None
else:
state_above = dbm.hidden_layers[
j + 1].downward_state(H_hat[j + 1])
layer_above = dbm.hidden_layers[j + 1]
H_hat[j] = dbm.hidden_layers[j].mf_update(
state_below=state_below,
state_above=state_above,
layer_above=layer_above)
# end ifelse
# end for odd layer
if Y is not None:
H_hat[-1] = Y
if block_grad == i:
H_hat = block(H_hat)
history.append(list(H_hat))
# end for mf iter
# end if recurrent
# Run some checks on the output
for layer, state in safe_izip(dbm.hidden_layers, H_hat):
upward_state = layer.upward_state(state)
layer.get_output_space().validate(upward_state)
if Y is not None:
inferred = H_hat[:-1]
else:
inferred = H_hat
for elem in flatten(inferred):
# This check doesn't work with ('c', 0, 1, 'b')
# because 'b' is no longer axis 0
# for value in get_debug_values(elem):
# assert value.shape[0] == dbm.batch_size
assert V in gof.graph.ancestors([elem])
if Y is not None:
assert Y in gof.graph.ancestors([elem])
if Y is not None:
assert all([elem[-1] is Y for elem in history])
assert H_hat[-1] is Y
if return_history:
return history
else:
return H_hat
@functools.wraps(InferenceProcedure.multi_infer)
def multi_infer(self, V, return_history=False, niter=None,
block_grad=None):
dbm = self.dbm
assert return_history in [True, False, 0, 1]
if niter is None:
niter = dbm.niter
new_V = 0.5 * V + 0.5 * dbm.visible_layer.init_inpainting_state(
V, drop_mask=None, noise=False, return_unmasked=False)
H_hat = []
for i in xrange(0, len(dbm.hidden_layers) - 1):
# do double weights update for_layer_i
if i == 0:
H_hat.append(dbm.hidden_layers[i].mf_update(
state_above=None,
double_weights=True,
state_below=
dbm.visible_layer.upward_state(new_V),
iter_name='0'))
else:
H_hat.append(dbm.hidden_layers[i].mf_update(
state_above=None,
double_weights=True,
state_below=dbm.hidden_layers[i - 1].upward_state(
H_hat[i - 1]),
iter_name='0'))
# last layer does not need its weights doubled, even on the first pass
if len(dbm.hidden_layers) > 1:
H_hat.append(dbm.hidden_layers[-1].mf_update(
state_above=None,
state_below=dbm.hidden_layers[-2].upward_state(H_hat[-1])))
else:
H_hat.append(dbm.hidden_layers[-1].mf_update(
state_above=None,
state_below=dbm.visible_layer.upward_state(V)))
if block_grad == 1:
H_hat = block(H_hat)
history = [(new_V, list(H_hat))]
# we only need recurrent inference if there are multiple layers
if len(H_hat) > 1:
for i in xrange(1, niter):
for j in xrange(0, len(H_hat), 2):
if j == 0:
state_below = dbm.visible_layer.upward_state(new_V)
else:
state_below = dbm.hidden_layers[
j - 1].upward_state(H_hat[j - 1])
if j == len(H_hat) - 1:
state_above = None
layer_above = None
else:
state_above = dbm.hidden_layers[
j + 1].downward_state(H_hat[j + 1])
layer_above = dbm.hidden_layers[j + 1]
H_hat[j] = dbm.hidden_layers[j].mf_update(
state_below=state_below,
state_above=state_above,
layer_above=layer_above)
V_hat = dbm.visible_layer.inpaint_update(
state_above=dbm.hidden_layers[
0].downward_state(
H_hat[
0]),
layer_above=dbm.hidden_layers[
0],
V=V,
drop_mask=None)
new_V = 0.5 * V_hat + 0.5 * V
for j in xrange(1, len(H_hat), 2):
state_below = dbm.hidden_layers[
j - 1].upward_state(H_hat[j - 1])
if j == len(H_hat) - 1:
state_above = None
state_above = None
else:
state_above = dbm.hidden_layers[
j + 1].downward_state(H_hat[j + 1])
layer_above = dbm.hidden_layers[j + 1]
H_hat[j] = dbm.hidden_layers[j].mf_update(
state_below=state_below,
state_above=state_above,
layer_above=layer_above)
# end ifelse
# end for odd layer
if block_grad == i:
H_hat = block(H_hat)
V_hat = block_gradient(V_hat)
history.append((new_V, list(H_hat)))
# end for mf iter
# end if recurrent
# Run some checks on the output
for layer, state in safe_izip(dbm.hidden_layers, H_hat):
upward_state = layer.upward_state(state)
layer.get_output_space().validate(upward_state)
inferred = H_hat
for elem in flatten(inferred):
for value in get_debug_values(elem):
assert value.shape[0] == dbm.batch_size
assert V in gof.graph.ancestors([elem])
if return_history:
return history
else:
return H_hat[-1]
def do_inpainting(self, V, Y=None, drop_mask=None, drop_mask_Y=None,
return_history=False, noise=False, niter=None,
block_grad=None):
"""
.. todo::
WRITEME properly
Gives the mean field expression for units masked out by drop_mask.
Uses self.niter mean field updates.
If you use this method in your research work, please cite:
Multi-prediction deep Boltzmann machines. Ian J. Goodfellow,
Mehdi Mirza, Aaron Courville, and Yoshua Bengio. NIPS 2013.
Comes in two variants, unsupervised and supervised:
* unsupervised: Y and drop_mask_Y are not passed to the method. The
method produces V_hat, an inpainted version of V.
* supervised: Y and drop_mask_Y are passed to the method. The method
produces V_hat and Y_hat
Parameters
----------
V : tensor_like
Theano batch in `model.input_space`
Y : tensor_like
Theano batch in `model.output_space`, i.e. in the output space of
the last hidden layer. (It's not really a hidden layer anymore,
but oh well. It's convenient to code it this way because the
labels are sort of "on top" of everything else.) *** Y is always
assumed to be a matrix of one-hot category labels. ***
drop_mask : tensor_like
Theano batch in `model.input_space`. Should be all binary, with
1s indicating that the corresponding element of X should be
"dropped", i.e. hidden from the algorithm and filled in as part
of the inpainting process
drop_mask_Y : tensor_like
Theano vector. Since we assume Y is a one-hot matrix, each row is
a single categorical variable. `drop_mask_Y` is a binary mask
specifying which *rows* to drop.
return_history : bool, optional
WRITEME
noise : bool, optional
WRITEME
niter : int, optional
WRITEME
block_grad : WRITEME
Returns
-------
WRITEME
"""
dbm = self.dbm
"""TODO: Should add unit test that calling this with a batch of
different inputs should yield the same output for each
if noise is False and drop_mask is all 1s"""
if niter is None:
niter = dbm.niter
assert drop_mask is not None
assert return_history in [True, False]
assert noise in [True, False]
if Y is None:
if drop_mask_Y is not None:
raise ValueError("do_inpainting got drop_mask_Y but not Y.")
else:
if drop_mask_Y is None:
raise ValueError("do_inpainting got Y but not drop_mask_Y.")
if Y is not None:
assert isinstance(dbm.hidden_layers[-1], Softmax)
if drop_mask_Y.ndim != 1:
raise ValueError("do_inpainting assumes Y is a matrix of "
"one-hot labels,"
"so each example is only one variable. "
"drop_mask_Y should "
"therefore be a vector, but we got something"
"with ndim " +
str(drop_mask_Y.ndim))
drop_mask_Y = drop_mask_Y.dimshuffle(0, 'x')
orig_V = V
orig_drop_mask = drop_mask
history = []
V_hat, V_hat_unmasked = dbm.visible_layer.init_inpainting_state(
V, drop_mask, noise, return_unmasked=True)
assert V_hat_unmasked.ndim > 1
H_hat = []
for i in xrange(0, len(dbm.hidden_layers) - 1):
# do double weights update for_layer_i
if i == 0:
H_hat.append(dbm.hidden_layers[i].mf_update(
state_above=None,
double_weights=True,
state_below=dbm.visible_layer.upward_state(V_hat),
iter_name='0'))
else:
H_hat.append(dbm.hidden_layers[i].mf_update(
state_above=None,
double_weights=True,
state_below=dbm.hidden_layers[
i - 1].upward_state(H_hat[i - 1]),
iter_name='0'))
# Last layer does not need its weights doubled, even on the first pass
if len(dbm.hidden_layers) > 1:
H_hat.append(dbm.hidden_layers[-1].mf_update(
state_above=None,
# layer_above = None,
state_below=dbm.hidden_layers[-2].upward_state(H_hat[-1])))
else:
H_hat.append(dbm.hidden_layers[-1].mf_update(
state_above=None,
state_below=dbm.visible_layer.upward_state(V_hat)))
if Y is not None:
Y_hat_unmasked = dbm.hidden_layers[
-1].init_inpainting_state(Y, noise)
dirty_term = drop_mask_Y * Y_hat_unmasked
clean_term = (1 - drop_mask_Y) * Y
Y_hat = dirty_term + clean_term
H_hat[-1] = Y_hat
if len(dbm.hidden_layers) > 1:
i = len(dbm.hidden_layers) - 2
if i == 0:
H_hat[i] = dbm.hidden_layers[i].mf_update(
state_above=Y_hat,
layer_above=dbm.hidden_layers[-1],
state_below=dbm.visible_layer.upward_state(V_hat),
iter_name='0')
else:
H_hat[i] = dbm.hidden_layers[i].mf_update(
state_above=Y_hat,
layer_above=dbm.hidden_layers[-1],
state_below=dbm.hidden_layers[
i - 1].upward_state(H_hat[i - 1]),
iter_name='0')
def update_history():
assert V_hat_unmasked.ndim > 1
d = {'V_hat': V_hat, 'H_hat': list(
H_hat), 'V_hat_unmasked': V_hat_unmasked}
if Y is not None:
d['Y_hat_unmasked'] = Y_hat_unmasked
d['Y_hat'] = H_hat[-1]
history.append(d)
if block_grad == 1:
V_hat = block_gradient(V_hat)
V_hat_unmasked = block_gradient(V_hat_unmasked)
H_hat = block(H_hat)
update_history()
for i in xrange(niter - 1):
for j in xrange(0, len(H_hat), 2):
if j == 0:
state_below = dbm.visible_layer.upward_state(V_hat)
else:
state_below = dbm.hidden_layers[
j - 1].upward_state(H_hat[j - 1])
if j == len(H_hat) - 1:
state_above = None
layer_above = None
else:
state_above = dbm.hidden_layers[
j + 1].downward_state(H_hat[j + 1])
layer_above = dbm.hidden_layers[j + 1]
H_hat[j] = dbm.hidden_layers[j].mf_update(
state_below=state_below,
state_above=state_above,
layer_above=layer_above)
if Y is not None and j == len(dbm.hidden_layers) - 1:
Y_hat_unmasked = H_hat[j]
H_hat[j] = drop_mask_Y * H_hat[j] + (1 - drop_mask_Y) * Y
V_hat, V_hat_unmasked = dbm.visible_layer.inpaint_update(
state_above=dbm.hidden_layers[0].downward_state(H_hat[0]),
layer_above=dbm.hidden_layers[0],
V=V,
drop_mask=drop_mask, return_unmasked=True)
V_hat.name = 'V_hat[%d](V_hat = %s)' % (i, V_hat.name)
for j in xrange(1, len(H_hat), 2):
state_below = dbm.hidden_layers[
j - 1].upward_state(H_hat[j - 1])
if j == len(H_hat) - 1:
state_above = None
layer_above = None
else:
state_above = dbm.hidden_layers[
j + 1].downward_state(H_hat[j + 1])
layer_above = dbm.hidden_layers[j + 1]
# end if j
H_hat[j] = dbm.hidden_layers[j].mf_update(
state_below=state_below,
state_above=state_above,
layer_above=layer_above)
if Y is not None and j == len(dbm.hidden_layers) - 1:
Y_hat_unmasked = H_hat[j]
H_hat[j] = drop_mask_Y * H_hat[j] + (1 - drop_mask_Y) * Y
# end if y
# end for j
if block_grad == i:
V_hat = block_gradient(V_hat)
V_hat_unmasked = block_gradient(V_hat_unmasked)
H_hat = block(H_hat)
update_history()
# end for i
# debugging, make sure V didn't get changed in this function
assert V is orig_V
assert drop_mask is orig_drop_mask
Y_hat = H_hat[-1]
assert V in theano.gof.graph.ancestors([V_hat])
if Y is not None:
assert V in theano.gof.graph.ancestors([Y_hat])
if return_history:
return history
else:
if Y is not None:
return V_hat, Y_hat
return V_hat
# Originally WeightDoubling did not support multi-prediction training,
# while a separate class called SuperWeightDoubling did. Now they are
# the same class, but we maintain the SuperWeightDoubling class for
# backwards compatibility. May be removed on or after 2015-04-20.
SuperWeightDoubling = WeightDoubling
class MoreConsistent(WeightDoubling):
"""
There's an oddity in WeightDoubling where during the inpainting, we
initialize Y_hat to sigmoid(biases) if a clean Y is passed in and
2 * weights otherwise. I believe but ought to check that mf always
does weight doubling.
This class makes the two more consistent by just implementing mf as
calling inpainting with Y masked out.
"""
@functools.wraps(InferenceProcedure.mf)
def mf(self, V, Y=None, return_history=False, niter=None, block_grad=None):
drop_mask = T.zeros_like(V)
if Y is not None:
# Y is observed, specify that it's fully observed
drop_mask_Y = T.zeros_like(Y)
else:
# Y is not observed
last_layer = self.dbm.hidden_layers[-1]
if isinstance(last_layer, Softmax):
# Y is not observed, the model has a Y variable, fill in a
# dummy one
# and specify that no element of it is observed
batch_size = self.dbm.get_input_space().batch_size(V)
num_classes = self.dbm.hidden_layers[-1].n_classes
assert isinstance(num_classes, int)
Y = T.alloc(1., batch_size, num_classes)
drop_mask_Y = T.alloc(1., batch_size)
else:
# Y is not observed because the model has no Y variable
drop_mask_Y = None
history = self.do_inpainting(V=V,
Y=Y,
return_history=True,
drop_mask=drop_mask,
drop_mask_Y=drop_mask_Y,
noise=False,
niter=niter,
block_grad=block_grad)
assert history[-1]['H_hat'][0] is not history[-2]['H_hat'][0] # rm
if return_history:
return [elem['H_hat'] for elem in history]
rval = history[-1]['H_hat']
if 'Y_hat_unmasked' in history[-1]:
rval[-1] = history[-1]['Y_hat_unmasked']
return rval
class MoreConsistent2(WeightDoubling):
"""
Makes `do_inpainting` even more consistent with `mf` than in the
`MoreConsistent` class. TODO-- look up exactly which inconsistency
was removed.
"""
@functools.wraps(InferenceProcedure.do_inpainting)
def do_inpainting(self, V, Y=None, drop_mask=None, drop_mask_Y=None,
return_history=False, noise=False, niter=None,
block_grad=None):
dbm = self.dbm
"""TODO: Should add unit test that calling this with a batch of
different inputs should yield the same output for each
if noise is False and drop_mask is all 1s"""
if niter is None:
niter = dbm.niter
assert drop_mask is not None
assert return_history in [True, False]
assert noise in [True, False]
if Y is None:
if drop_mask_Y is not None:
raise ValueError("do_inpainting got drop_mask_Y but not Y.")
else:
if drop_mask_Y is None:
raise ValueError("do_inpainting got Y but not drop_mask_Y.")
if Y is not None:
assert isinstance(dbm.hidden_layers[-1], Softmax)
if drop_mask_Y.ndim != 1:
raise ValueError("do_inpainting assumes Y is a matrix of"
" one-hot labels,"
"so each example is only one variable. "
"drop_mask_Y should "
"therefore be a vector, but we got "
"something with ndim " +
str(drop_mask_Y.ndim))
drop_mask_Y = drop_mask_Y.dimshuffle(0, 'x')
orig_V = V
orig_drop_mask = drop_mask
history = []
V_hat, V_hat_unmasked = dbm.visible_layer.init_inpainting_state(
V, drop_mask, noise, return_unmasked=True)
assert V_hat_unmasked.ndim > 1
H_hat = []
for i in xrange(0, len(dbm.hidden_layers) - 1):
# do double weights update for_layer_i
if i == 0:
H_hat.append(dbm.hidden_layers[i].mf_update(
state_above=None,
double_weights=True,
state_below=dbm.visible_layer.upward_state(V_hat),
iter_name='0'))
else:
H_hat.append(dbm.hidden_layers[i].mf_update(
state_above=None,
double_weights=True,
state_below=dbm.hidden_layers[
i - 1].upward_state(H_hat[i - 1]),
iter_name='0'))
# Last layer does not need its weights doubled, even on the first pass
if len(dbm.hidden_layers) > 1:
H_hat.append(dbm.hidden_layers[-1].mf_update(
state_above=None,
# layer_above = None,
state_below=dbm.hidden_layers[-2].upward_state(H_hat[-1])))
else:
H_hat.append(dbm.hidden_layers[-1].mf_update(
state_above=None,
state_below=dbm.visible_layer.upward_state(V_hat)))
if Y is not None:
Y_hat_unmasked = H_hat[-1]
dirty_term = drop_mask_Y * Y_hat_unmasked
clean_term = (1 - drop_mask_Y) * Y
Y_hat = dirty_term + clean_term
H_hat[-1] = Y_hat
def update_history():
assert V_hat_unmasked.ndim > 1
d = {'V_hat': V_hat, 'H_hat': list(
H_hat), 'V_hat_unmasked': V_hat_unmasked}
if Y is not None:
d['Y_hat_unmasked'] = Y_hat_unmasked
d['Y_hat'] = H_hat[-1]
history.append(d)
if block_grad == 1:
V_hat = block_gradient(V_hat)
V_hat_unmasked = block_gradient(V_hat_unmasked)
H_hat = block(H_hat)
update_history()
for i in xrange(niter - 1):
for j in xrange(0, len(H_hat), 2):
if j == 0:
state_below = dbm.visible_layer.upward_state(V_hat)
else:
state_below = dbm.hidden_layers[
j - 1].upward_state(H_hat[j - 1])
if j == len(H_hat) - 1:
state_above = None
layer_above = None
else:
state_above = dbm.hidden_layers[
j + 1].downward_state(H_hat[j + 1])
layer_above = dbm.hidden_layers[j + 1]
H_hat[j] = dbm.hidden_layers[j].mf_update(
state_below=state_below,
state_above=state_above,
layer_above=layer_above)
if Y is not None and j == len(dbm.hidden_layers) - 1:
Y_hat_unmasked = H_hat[j]
H_hat[j] = drop_mask_Y * H_hat[j] + (1 - drop_mask_Y) * Y
V_hat, V_hat_unmasked = dbm.visible_layer.inpaint_update(
state_above=dbm.hidden_layers[0].downward_state(H_hat[0]),
layer_above=dbm.hidden_layers[0],
V=V,
drop_mask=drop_mask, return_unmasked=True)
V_hat.name = 'V_hat[%d](V_hat = %s)' % (i, V_hat.name)
for j in xrange(1, len(H_hat), 2):
state_below = dbm.hidden_layers[
j - 1].upward_state(H_hat[j - 1])
if j == len(H_hat) - 1:
state_above = None
layer_above = None
else:
state_above = dbm.hidden_layers[
j + 1].downward_state(H_hat[j + 1])
layer_above = dbm.hidden_layers[j + 1]
# end if j
H_hat[j] = dbm.hidden_layers[j].mf_update(
state_below=state_below,
state_above=state_above,
layer_above=layer_above)
if Y is not None and j == len(dbm.hidden_layers) - 1:
Y_hat_unmasked = H_hat[j]
H_hat[j] = drop_mask_Y * H_hat[j] + (1 - drop_mask_Y) * Y
# end if y
# end for j
if block_grad == i:
V_hat = block_gradient(V_hat)
V_hat_unmasked = block_gradient(V_hat_unmasked)
H_hat = block(H_hat)
update_history()
# end for i
# debugging, make sure V didn't get changed in this function
assert V is orig_V
assert drop_mask is orig_drop_mask
Y_hat = H_hat[-1]
assert V in theano.gof.graph.ancestors([V_hat])
if Y is not None:
assert V in theano.gof.graph.ancestors([Y_hat])
if return_history:
return history
else:
if Y is not None:
return V_hat, Y_hat
return V_hat
class BiasInit(InferenceProcedure):
"""
An InferenceProcedure that initializes the mean field parameters
based on the biases in the model. This InferenceProcedure uses
the same weights at every iteration, rather than doubling the
weights on the first pass.
"""
@functools.wraps(InferenceProcedure.mf)
def mf(self, V, Y=None, return_history=False, niter=None, block_grad=None):
dbm = self.dbm
assert Y not in [True, False, 0, 1]
assert return_history in [True, False, 0, 1]
if Y is not None:
dbm.hidden_layers[-1].get_output_space().validate(Y)
if niter is None:
niter = dbm.niter
H_hat = [None] + [layer.init_mf_state()
for layer in dbm.hidden_layers[1:]]
# Make corrections for if we're also running inference on Y
if Y is not None:
# Last layer is clamped to Y
H_hat[-1] = Y
history = [list(H_hat)]
# we only need recurrent inference if there are multiple layers
assert (niter > 1) == (len(dbm.hidden_layers) > 1)
for i in xrange(niter):
for j in xrange(0, len(H_hat), 2):
if j == 0:
state_below = dbm.visible_layer.upward_state(V)
else:
state_below = dbm.hidden_layers[
j - 1].upward_state(H_hat[j - 1])
if j == len(H_hat) - 1:
state_above = None
layer_above = None
else:
state_above = dbm.hidden_layers[
j + 1].downward_state(H_hat[j + 1])
layer_above = dbm.hidden_layers[j + 1]
H_hat[j] = dbm.hidden_layers[j].mf_update(
state_below=state_below,
state_above=state_above,
layer_above=layer_above)
if Y is not None:
H_hat[-1] = Y
for j in xrange(1, len(H_hat), 2):
state_below = dbm.hidden_layers[
j - 1].upward_state(H_hat[j - 1])
if j == len(H_hat) - 1:
state_above = None
state_above = None
else:
state_above = dbm.hidden_layers[
j + 1].downward_state(H_hat[j + 1])
layer_above = dbm.hidden_layers[j + 1]
H_hat[j] = dbm.hidden_layers[j].mf_update(
state_below=state_below,
state_above=state_above,
layer_above=layer_above)
# end ifelse
# end for odd layer
if Y is not None:
H_hat[-1] = Y
for i, elem in enumerate(H_hat):
if elem is Y:
assert i == len(H_hat) - 1
continue
else:
assert elem not in history[-1]
if block_grad == i + 1:
H_hat = block(H_hat)
history.append(list(H_hat))
# end for mf iter
# Run some checks on the output
for layer, state in safe_izip(dbm.hidden_layers, H_hat):
upward_state = layer.upward_state(state)
layer.get_output_space().validate(upward_state)
if Y is not None:
assert H_hat[-1] is Y
inferred = H_hat[:-1]
else:
inferred = H_hat
for elem in flatten(inferred):
for value in get_debug_values(elem):
assert value.shape[0] == dbm.batch_size
if V not in theano.gof.graph.ancestors([elem]):
logger.error("{0} "
"does not have V as an ancestor!".format(elem))
logger.error(theano.printing.min_informative_str(V))
if elem is V:
logger.error("this variational parameter *is* V")
else:
logger.error("this variational parameter "
"is not the same as V")
logger.error("V is {0}".format(V))
assert False
if Y is not None:
assert Y in theano.gof.graph.ancestors([elem])
if Y is not None:
assert all([elem[-1] is Y for elem in history])
assert H_hat[-1] is Y
for elem in history:
assert len(elem) == len(dbm.hidden_layers)
if return_history:
for hist_elem, H_elem in safe_zip(history[-1], H_hat):
assert hist_elem is H_elem
return history
else:
return H_hat
def do_inpainting(self, V, Y=None, drop_mask=None, drop_mask_Y=None,
return_history=False, noise=False, niter=None,
block_grad=None):
"""
Gives the mean field expression for units masked out by drop_mask.
Uses self.niter mean field updates.
Comes in two variants, unsupervised and supervised:
* unsupervised: Y and drop_mask_Y are not passed to the method. The
method produces V_hat, an inpainted version of V.
* supervised: Y and drop_mask_Y are passed to the method. The method
produces V_hat and Y_hat.
If you use this method in your research work, please cite:
Multi-prediction deep Boltzmann machines. Ian J. Goodfellow,
Mehdi Mirza, Aaron Courville, and Yoshua Bengio. NIPS 2013.
Parameters
----------
V : tensor_like
Theano batch in `model.input_space`
Y : tensor_like
Theano batch in model.output_space, ie, in the output space of
the last hidden layer (it's not really a hidden layer anymore,
but oh well. It's convenient to code it this way because the
labels are sort of "on top" of everything else). *** Y is always
assumed to be a matrix of one-hot category labels. ***
drop_mask : tensor_like
A theano batch in `model.input_space`. Should be all binary, with
1s indicating that the corresponding element of X should be
"dropped", ie, hidden from the algorithm and filled in as part of
the inpainting process
drop_mask_Y : tensor_like
Theano vector. Since we assume Y is a one-hot matrix, each row is
a single categorical variable. `drop_mask_Y` is a binary mask
specifying which *rows* to drop.
"""
dbm = self.dbm
"""TODO: Should add unit test that calling this with a batch of
different inputs should yield the same output for each
if noise is False and drop_mask is all 1s"""
if niter is None:
niter = dbm.niter
assert drop_mask is not None
assert return_history in [True, False]
assert noise in [True, False]
if Y is None:
if drop_mask_Y is not None:
raise ValueError("do_inpainting got drop_mask_Y but not Y.")
else:
if drop_mask_Y is None:
raise ValueError("do_inpainting got Y but not drop_mask_Y.")
if Y is not None:
assert isinstance(dbm.hidden_layers[-1], Softmax)
if drop_mask_Y.ndim != 1:
raise ValueError("do_inpainting assumes Y is a matrix of "
"one-hot labels,"
"so each example is only one variable. "
"drop_mask_Y should therefore be a vector,"
"but we got something with ndim " +
str(drop_mask_Y.ndim))
drop_mask_Y = drop_mask_Y.dimshuffle(0, 'x')
orig_V = V
orig_drop_mask = drop_mask
history = []
V_hat, V_hat_unmasked = dbm.visible_layer.init_inpainting_state(
V, drop_mask, noise, return_unmasked=True)
assert V_hat_unmasked.ndim > 1
H_hat = [None] + [layer.init_mf_state()
for layer in dbm.hidden_layers[1:]]
if Y is not None:
Y_hat_unmasked = dbm.hidden_layers[
-1].init_inpainting_state(Y, noise)
Y_hat = drop_mask_Y * Y_hat_unmasked + (1 - drop_mask_Y) * Y
H_hat[-1] = Y_hat
def update_history():
assert V_hat_unmasked.ndim > 1
d = {'V_hat': V_hat, 'H_hat': H_hat,
'V_hat_unmasked': V_hat_unmasked}
if Y is not None:
d['Y_hat_unmasked'] = Y_hat_unmasked
d['Y_hat'] = H_hat[-1]
history.append(d)
update_history()
for i in xrange(niter):
for j in xrange(0, len(H_hat), 2):
if j == 0:
state_below = dbm.visible_layer.upward_state(V_hat)
else:
state_below = dbm.hidden_layers[
j - 1].upward_state(H_hat[j - 1])
if j == len(H_hat) - 1:
state_above = None
layer_above = None
else:
state_above = dbm.hidden_layers[
j + 1].downward_state(H_hat[j + 1])
layer_above = dbm.hidden_layers[j + 1]
H_hat[j] = dbm.hidden_layers[j].mf_update(
state_below=state_below,
state_above=state_above,
layer_above=layer_above)
if Y is not None and j == len(dbm.hidden_layers) - 1:
Y_hat_unmasked = H_hat[j]
H_hat[j] = drop_mask_Y * H_hat[j] + (1 - drop_mask_Y) * Y
V_hat, V_hat_unmasked = dbm.visible_layer.inpaint_update(
state_above=dbm.hidden_layers[0].downward_state(
H_hat[0]),
layer_above=dbm.hidden_layers[0],
V=V,
drop_mask=drop_mask, return_unmasked=True)
V_hat.name = 'V_hat[%d](V_hat = %s)' % (i, V_hat.name)
for j in xrange(1, len(H_hat), 2):
state_below = dbm.hidden_layers[
j - 1].upward_state(H_hat[j - 1])
if j == len(H_hat) - 1:
state_above = None
layer_above = None
else:
state_above = dbm.hidden_layers[
j + 1].downward_state(H_hat[j + 1])
layer_above = dbm.hidden_layers[j + 1]
# end if j
H_hat[j] = dbm.hidden_layers[j].mf_update(
state_below=state_below,
state_above=state_above,
layer_above=layer_above)
if Y is not None and j == len(dbm.hidden_layers) - 1:
Y_hat_unmasked = H_hat[j]
H_hat[j] = drop_mask_Y * H_hat[j] + (1 - drop_mask_Y) * Y
# end if y
# end for j
if block_grad == i + 1:
V_hat = block_gradient(V_hat)
V_hat_unmasked = block_gradient(V_hat_unmasked)
H_hat = block(H_hat)
update_history()
# end for i
# debugging, make sure V didn't get changed in this function
assert V is orig_V
assert drop_mask is orig_drop_mask
Y_hat = H_hat[-1]
assert V in theano.gof.graph.ancestors([V_hat])
if Y is not None:
assert V in theano.gof.graph.ancestors([Y_hat])
if return_history:
return history
else:
if Y is not None:
return V_hat, Y_hat
return V_hat
class UpDown(InferenceProcedure):
"""
An InferenceProcedure that initializes the mean field parameters
based on the biases in the model, then alternates between updating
each of the layers bottom-to-top
and updating each of the layers top-to-bottom.
"""
@functools.wraps(InferenceProcedure.mf)
def mf(self, V, Y=None, return_history=False, niter=None, block_grad=None):
"""
.. todo::
WRITEME
"""
dbm = self.dbm
assert Y not in [True, False, 0, 1]
assert return_history in [True, False, 0, 1]
if Y is not None:
dbm.hidden_layers[-1].get_output_space().validate(Y)
if niter is None:
niter = dbm.niter
H_hat = [None] + [layer.init_mf_state()
for layer in dbm.hidden_layers[1:]]
# Make corrections for if we're also running inference on Y
if Y is not None:
# Last layer is clamped to Y
H_hat[-1] = Y
history = [list(H_hat)]
# we only need recurrent inference if there are multiple layers
assert (niter > 1) == (len(dbm.hidden_layers) > 1)
for i in xrange(niter):
# Determine whether to go up or down on this iteration
if i % 2 == 0:
start = 0
stop = len(H_hat)
inc = 1
else:
start = len(H_hat) - 1
stop = -1
inc = -1
# Do the mean field updates
for j in xrange(start, stop, inc):
if j == 0:
state_below = dbm.visible_layer.upward_state(V)
else:
state_below = dbm.hidden_layers[
j - 1].upward_state(H_hat[j - 1])
if j == len(H_hat) - 1:
state_above = None
layer_above = None
else:
state_above = dbm.hidden_layers[
j + 1].downward_state(H_hat[j + 1])
layer_above = dbm.hidden_layers[j + 1]
H_hat[j] = dbm.hidden_layers[j].mf_update(
state_below=state_below,
state_above=state_above,
layer_above=layer_above)
if Y is not None:
H_hat[-1] = Y
if Y is not None:
H_hat[-1] = Y
if block_grad == i + 1:
H_hat = block(H_hat)
history.append(list(H_hat))
# end for mf iter
# Run some checks on the output
for layer, state in safe_izip(dbm.hidden_layers, H_hat):
upward_state = layer.upward_state(state)
layer.get_output_space().validate(upward_state)
if Y is not None:
assert all([elem[-1] is Y for elem in history])
assert H_hat[-1] is Y
if return_history:
return history
else:
return H_hat
def do_inpainting(self, V, Y=None, drop_mask=None, drop_mask_Y=None,
return_history=False, noise=False, niter=None,
block_grad=None):
"""
.. todo::
WRITEME properly
Gives the mean field expression for units masked out by drop_mask.
Uses self.niter mean field updates.
Comes in two variants, unsupervised and supervised:
* unsupervised: Y and drop_mask_Y are not passed to the method. The
method produces V_hat, an inpainted version of V.
* supervised: Y and drop_mask_Y are passed to the method. The method
produces V_hat and Y_hat.
If you use this method in your research work, please cite:
Multi-prediction deep Boltzmann machines. Ian J. Goodfellow,
Mehdi Mirza, Aaron Courville, and Yoshua Bengio. NIPS 2013.
Parameters
----------
V : tensor_like
Theano batch in `model.input_space`
Y : tensor_like
Theano batch in model.output_space, ie, in the output space of
the last hidden layer (it's not really a hidden layer anymore,
but oh well. It's convenient to code it this way because the
labels are sort of "on top" of everything else). *** Y is always
assumed to be a matrix of one-hot category labels. ***
drop_mask : tensor_like
A theano batch in `model.input_space`. Should be all binary, with
1s indicating that the corresponding element of X should be
"dropped", ie, hidden from the algorithm and filled in as part of
the inpainting process
drop_mask_Y : tensor_like
Theano vector. Since we assume Y is a one-hot matrix, each row is
a single categorical variable. `drop_mask_Y` is a binary mask
specifying which *rows* to drop.
"""
if Y is not None:
assert isinstance(self.hidden_layers[-1], Softmax)
model = self.dbm
"""TODO: Should add unit test that calling this with a batch of
different inputs should yield the same output for each
if noise is False and drop_mask is all 1s"""
if niter is None:
niter = model.niter
assert drop_mask is not None
assert return_history in [True, False]
assert noise in [True, False]
if Y is None:
if drop_mask_Y is not None:
raise ValueError("do_inpainting got drop_mask_Y but not Y.")
else:
if drop_mask_Y is None:
raise ValueError("do_inpainting got Y but not drop_mask_Y.")
if Y is not None:
assert isinstance(model.hidden_layers[-1], Softmax)
if drop_mask_Y.ndim != 1:
raise ValueError("do_inpainting assumes Y is a matrix of"
"one-hot labels,"
"so each example is only one variable. "
"drop_mask_Y should "
"therefore be a vector, but we got "
"something with ndim " +
str(drop_mask_Y.ndim))
drop_mask_Y = drop_mask_Y.dimshuffle(0, 'x')
orig_V = V
orig_drop_mask = drop_mask
history = []
V_hat, V_hat_unmasked = model.visible_layer.init_inpainting_state(
V, drop_mask, noise, return_unmasked=True)
assert V_hat_unmasked.ndim > 1
H_hat = [None] + [layer.init_mf_state()
for layer in model.hidden_layers[1:]]
if Y is not None:
Y_hat_unmasked = model.hidden_layers[
-1].init_inpainting_state(Y, noise)
Y_hat = drop_mask_Y * Y_hat_unmasked + (1 - drop_mask_Y) * Y
H_hat[-1] = Y_hat
def update_history():
assert V_hat_unmasked.ndim > 1
d = {'V_hat': V_hat, 'H_hat': H_hat,
'V_hat_unmasked': V_hat_unmasked}
if Y is not None:
d['Y_hat_unmasked'] = Y_hat_unmasked
d['Y_hat'] = H_hat[-1]
history.append(d)
update_history()
for i in xrange(niter):
if i % 2 == 0:
start = 0
stop = len(H_hat)
inc = 1
if i > 0:
# Don't start by updating V_hat on iteration 0 or
# this will throw out the noise
V_hat, V_hat_unmasked = model.visible_layer.inpaint_update(
state_above=model.hidden_layers[0].downward_state(
H_hat[0]),
layer_above=model.hidden_layers[0],
V=V,
drop_mask=drop_mask, return_unmasked=True)
V_hat.name = 'V_hat[%d](V_hat = %s)' % (i, V_hat.name)
else:
start = len(H_hat) - 1
stop = -1
inc = -1
for j in xrange(start, stop, inc):
if j == 0:
state_below = model.visible_layer.upward_state(V_hat)
else:
state_below = model.hidden_layers[
j - 1].upward_state(H_hat[j - 1])
if j == len(H_hat) - 1:
state_above = None
layer_above = None
else:
state_above = model.hidden_layers[
j + 1].downward_state(H_hat[j + 1])
layer_above = model.hidden_layers[j + 1]
H_hat[j] = model.hidden_layers[j].mf_update(
state_below=state_below,
state_above=state_above,
layer_above=layer_above)
if Y is not None and j == len(model.hidden_layers) - 1:
Y_hat_unmasked = H_hat[j]
H_hat[j] = drop_mask_Y * H_hat[j] + (1 - drop_mask_Y) * Y
if i % 2 == 1:
V_hat, V_hat_unmasked = model.visible_layer.inpaint_update(
state_above=model.hidden_layers[0].downward_state(
H_hat[0]),
layer_above=model.hidden_layers[0],
V=V,
drop_mask=drop_mask, return_unmasked=True)
V_hat.name = 'V_hat[%d](V_hat = %s)' % (i, V_hat.name)
if block_grad == i + 1:
V_hat = block_gradient(V_hat)
V_hat_unmasked = block_gradient(V_hat_unmasked)
H_hat = block(H_hat)
update_history()
# end for i
# debugging, make sure V didn't get changed in this function
assert V is orig_V
assert drop_mask is orig_drop_mask
Y_hat = H_hat[-1]
assert V in theano.gof.graph.ancestors([V_hat])
if Y is not None:
assert V in theano.gof.graph.ancestors([Y_hat])
if return_history:
return history
else:
if Y is not None:
return V_hat, Y_hat
return V_hat
| bsd-3-clause |
chjacobsen/mystery-murder-generator | mmgen/models/mystery.py | 1 | 2176 | from __future__ import absolute_import
import random
import datetime
import logging
from mmgen.data import murder
from mmgen.models import person
from mmgen.models import relationship
from mmgen.util.randomize import weighted_roll
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Mystery:
"""
A full mystery
"""
timeline = None
people = []
num_characters = 0
start_date = None
current_date = None
def populate(self):
logger.info("Generating {0} characters".format(self.num_characters))
# Generate some basic information for each person
for n in range(self.num_characters):
pers = person.Person()
logger.info("Created person: {0} {1}".format(pers.first_name, pers.last_name))
# Generate a birth date. Each person has to be at least 18 years old
pers.birth_date = random.randint(self.start_date, self.current_date - (3600 * 24 * 365 * 18))
logger.info("Born: {0}".format(datetime.datetime.fromtimestamp(pers.birth_date).strftime("%Y-%m-%d")))
self.people.append(pers)
self.method = weighted_roll(murder.MURDER_METHOD)
self.motive = weighted_roll(murder.MURDER_MOTIVE)
self.people[0].is_victim = True
self.people[1].is_murderer = True
for pa in range(len(self.people)):
pers_a = self.people[pa]
for pb in range(pa + 1, len(self.people)):
pers_b = self.people[pb]
relationship.generate(pers_a, pers_b)
def encode(self):
"""
Encodes the object as a data structure
Useful when writing output
"""
out = {
"mystery_name": "Notorious crime",
"start_date": datetime.datetime.fromtimestamp(self.start_date).strftime("%Y-%m-%d"),
"current_date": datetime.datetime.fromtimestamp(self.current_date).strftime("%Y-%m-%d"),
"murder_method": self.method,
"murder_motive": self.motive,
"characters": []
}
for p in self.people:
out["characters"].append(p.encode())
return out
| lgpl-3.0 |
JingZhou0404/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/python.py | 120 | 5538 | # Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Supports checking WebKit style in Python files."""
import re
from StringIO import StringIO
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.thirdparty.autoinstalled import pep8
from webkitpy.thirdparty.autoinstalled.pylint import lint
from webkitpy.thirdparty.autoinstalled.pylint.reporters.text import ParseableTextReporter
class PythonChecker(object):
"""Processes text lines for checking style."""
def __init__(self, file_path, handle_style_error):
self._file_path = file_path
self._handle_style_error = handle_style_error
def check(self, lines):
self._check_pep8(lines)
self._check_pylint(lines)
def _check_pep8(self, lines):
# Initialize pep8.options, which is necessary for
# Checker.check_all() to execute.
pep8.process_options(arglist=[self._file_path])
pep8_checker = pep8.Checker(self._file_path)
def _pep8_handle_error(line_number, offset, text, check):
# FIXME: Incorporate the character offset into the error output.
# This will require updating the error handler __call__
# signature to include an optional "offset" parameter.
pep8_code = text[:4]
pep8_message = text[5:]
category = "pep8/" + pep8_code
self._handle_style_error(line_number, category, 5, pep8_message)
pep8_checker.report_error = _pep8_handle_error
pep8_errors = pep8_checker.check_all()
def _check_pylint(self, lines):
pylinter = Pylinter()
# FIXME: for now, we only report pylint errors, but we should be catching and
# filtering warnings using the rules in style/checker.py instead.
output = pylinter.run(['-E', self._file_path])
lint_regex = re.compile('([^:]+):([^:]+): \[([^]]+)\] (.*)')
for error in output.getvalue().splitlines():
match_obj = lint_regex.match(error)
assert(match_obj)
line_number = int(match_obj.group(2))
category_and_method = match_obj.group(3).split(', ')
category = 'pylint/' + (category_and_method[0])
if len(category_and_method) > 1:
message = '[%s] %s' % (category_and_method[1], match_obj.group(4))
else:
message = match_obj.group(4)
self._handle_style_error(line_number, category, 5, message)
class Pylinter(object):
# We filter out these messages because they are bugs in pylint that produce false positives.
# FIXME: Does it make sense to combine these rules with the rules in style/checker.py somehow?
FALSE_POSITIVES = [
# possibly http://www.logilab.org/ticket/98613 ?
"Instance of 'Popen' has no 'poll' member",
"Instance of 'Popen' has no 'returncode' member",
"Instance of 'Popen' has no 'stdin' member",
"Instance of 'Popen' has no 'stdout' member",
"Instance of 'Popen' has no 'stderr' member",
"Instance of 'Popen' has no 'wait' member",
"Instance of 'Popen' has no 'pid' member",
]
def __init__(self):
self._pylintrc = WebKitFinder(FileSystem()).path_from_webkit_base('Tools', 'Scripts', 'webkitpy', 'pylintrc')
def run(self, argv):
output = _FilteredStringIO(self.FALSE_POSITIVES)
lint.Run(['--rcfile', self._pylintrc] + argv, reporter=ParseableTextReporter(output=output), exit=False)
return output
class _FilteredStringIO(StringIO):
def __init__(self, bad_messages):
StringIO.__init__(self)
self.dropped_last_msg = False
self.bad_messages = bad_messages
def write(self, msg=''):
if not self._filter(msg):
StringIO.write(self, msg)
def _filter(self, msg):
if any(bad_message in msg for bad_message in self.bad_messages):
self.dropped_last_msg = True
return True
if self.dropped_last_msg:
# We drop the newline after a dropped message as well.
self.dropped_last_msg = False
if msg == '\n':
return True
return False
| bsd-3-clause |
hskang9/WebVR | inventory/views.py | 1 | 1123 | from django.shortcuts import render
from django.http import Http404
from django.http import HttpResponse
from inventory.models import *
# Mobile Web UI
def index(request):
houses = House.objects.all()
return render(request, 'mobile_web/login/login.html', {
'houses': houses,
})
def map(request):
return render(request, 'mobile_web/map/map.html')
def list(request):
return render(request, 'mobile_web/list/list.html')
def house(request):
return render(request, 'mobile_web/house/house.html')
# VR
def panorama(request):
return render(request, 'VR/panorama.html')
def hackingroom1(request):
return render(request, 'VR/hackingroom1.html')
def hackingroom2(request):
return render(request, 'VR/hackingroom2.html')
def hallway(request):
return render(request, 'VR/hallway.html')
def lobby(request):
return render(request, 'VR/lobby.html')
def office(request):
return render(request, 'VR/office.html')
def toilet4man(request):
return render(request, 'VR/toilet4man.html')
def customer_center(request):
return render(request, 'VR/customer_center.html')
| gpl-3.0 |
guewen/account-financial-tools | __unported__/account_credit_control/__init__.py | 10 | 1159 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi, Guewen Baconnier
# Copyright 2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import mail
from . import run
from . import line
from . import account
from . import partner
from . import policy
from . import company
from . import wizard
from . import report
from . import invoice
| agpl-3.0 |
ema/conpaas | conpaas-services/contrib/libcloud/loadbalancer/base.py | 4 | 8899 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.common.base import ConnectionKey, BaseDriver
from libcloud.common.types import LibcloudError
__all__ = [
"Member",
"LoadBalancer",
"Driver",
"Algorithm"
]
class Member(object):
def __init__(self, id, ip, port, balancer=None, extra=None):
self.id = str(id) if id else None
self.ip = ip
self.port = port
self.balancer = balancer
self.extra = extra or {}
def __repr__(self):
return ('<Member: id=%s, address=%s:%s>' % (self.id,
self.ip, self.port))
class Algorithm(object):
RANDOM = 0
ROUND_ROBIN = 1
LEAST_CONNECTIONS = 2
WEIGHTED_ROUND_ROBIN = 3
WEIGHTED_LEAST_CONNECTIONS = 4
DEFAULT_ALGORITHM = Algorithm.ROUND_ROBIN
class LoadBalancer(object):
"""
Provide a common interface for handling Load Balancers.
"""
name = None
website = None
def __init__(self, id, name, state, ip, port, driver, extra=None):
self.id = str(id) if id else None
self.name = name
self.state = state
self.ip = ip
self.port = port
self.driver = driver
self.extra = extra or {}
def attach_compute_node(self, node):
return self.driver.balancer_attach_compute_node(balancer=self,
node=node)
def attach_member(self, member):
return self.driver.balancer_attach_member(balancer=self,
member=member)
def detach_member(self, member):
return self.driver.balancer_detach_member(balancer=self,
member=member)
def list_members(self):
return self.driver.balancer_list_members(balancer=self)
def destroy(self):
return self.driver.destroy_balancer(balancer=self)
def __repr__(self):
return ('<LoadBalancer: id=%s, name=%s, state=%s>' % (self.id,
self.name, self.state))
class Driver(BaseDriver):
"""
A base LBDriver class to derive from
This class is always subclassed by a specific driver.
"""
connectionCls = ConnectionKey
_ALGORITHM_TO_VALUE_MAP = {}
_VALUE_TO_ALGORITHM_MAP = {}
def __init__(self, key, secret=None, secure=True, host=None,
port=None, **kwargs):
super(Driver, self).__init__(key=key, secret=secret, secure=secure,
host=host, port=port, **kwargs)
def list_protocols(self):
"""
Return a list of supported protocols.
@rtype: C{list} of C{str}
"""
raise NotImplementedError(
'list_protocols not implemented for this driver')
def list_balancers(self):
"""
List all loadbalancers
@rtype: C{list} of L{LoadBalancer}
"""
raise NotImplementedError(
'list_balancers not implemented for this driver')
def create_balancer(self, name, port, protocol, algorithm, members):
"""
Create a new load balancer instance
@param name: Name of the new load balancer (required)
@type name: C{str}
@param port: Port the load balancer should listen on, defaults to 80
@type port: C{str}
@param protocol: Loadbalancer protocol, defaults to http.
@type protocol: C{str}
@param members: list of Members to attach to balancer
@type members: C{list} of L{Member}
@param algorithm: Load balancing algorithm, defaults to ROUND_ROBIN
@type algorithm: L{Algorithm}
@rtype: L{LoadBalancer}
"""
raise NotImplementedError(
'create_balancer not implemented for this driver')
def destroy_balancer(self, balancer):
"""Destroy a load balancer
@param balancer: LoadBalancer which should be used
@type balancer: L{LoadBalancer}
@return: True if the destroy was successful, otherwise False
@rtype: C{bool}
"""
raise NotImplementedError(
'destroy_balancer not implemented for this driver')
def get_balancer(self, balancer_id):
"""
Return a L{LoadBalancer} object.
@param balancer_id: id of a load balancer you want to fetch
@type balancer_id: C{str}
@rtype: L{LoadBalancer}
"""
raise NotImplementedError(
'get_balancer not implemented for this driver')
def update_balancer(self, balancer, **kwargs):
"""
Sets the name, algorithm, protocol, or port on a load balancer.
@param balancer: LoadBalancer which should be used
@type balancer: L{LoadBalancer}
@keyword name: New load balancer name
@type name: C{str}
@keyword algorithm: New load balancer algorithm
@type algorithm: L{Algorithm}
@keyword protocol: New load balancer protocol
@type protocol: C{str}
@keyword port: New load balancer port
@type port: C{int}
@rtype: L{LoadBalancer}
"""
raise NotImplementedError(
'update_balancer not implemented for this driver')
def balancer_attach_compute_node(self, balancer, node):
"""
Attach a compute node as a member to the load balancer.
@param balancer: LoadBalancer which should be used
@type balancer: L{LoadBalancer}
@param node: Node to join to the balancer
@type node: L{Node}
@return: Member after joining the balancer.
@rtype: L{Member}
"""
return self.balancer_attach_member(balancer, Member(id=None,
ip=node.public_ips[0],
port=balancer.port))
def balancer_attach_member(self, balancer, member):
"""
Attach a member to balancer
@param balancer: LoadBalancer which should be used
@type balancer: L{LoadBalancer}
@param member: Member to join to the balancer
@type member: L{Member}
@return: Member after joining the balancer.
@rtype: L{Member}
"""
raise NotImplementedError(
'balancer_attach_member not implemented for this driver')
def balancer_detach_member(self, balancer, member):
"""
Detach member from balancer
@param balancer: LoadBalancer which should be used
@type balancer: L{LoadBalancer}
@param member: Member which should be used
@type member: L{Member}
@return: True if member detach was successful, otherwise False
@rtype: C{bool}
"""
raise NotImplementedError(
'balancer_detach_member not implemented for this driver')
def balancer_list_members(self, balancer):
"""
Return list of members attached to balancer
@param balancer: LoadBalancer which should be used
@type balancer: L{LoadBalancer}
@rtype: C{list} of L{Member}
"""
raise NotImplementedError(
'balancer_list_members not implemented for this driver')
def _value_to_algorithm(self, value):
"""
Return C{LBAlgorithm} based on the value.
"""
try:
return self._VALUE_TO_ALGORITHM_MAP[value]
except KeyError:
raise LibcloudError(value='Invalid value: %s' % (value),
driver=self)
def _algorithm_to_value(self, algorithm):
"""
Return value based in the algorithm (C{LBAlgorithm}).
"""
try:
return self._ALGORITHM_TO_VALUE_MAP[algorithm]
except KeyError:
raise LibcloudError(value='Invalid algorithm: %s' % (algorithm),
driver=self)
def list_supported_algorithms(self):
"""
Return algorithms supported by this driver.
@rtype: C{list} of C{str}
"""
return list(self._ALGORITHM_TO_VALUE_MAP.keys())
| bsd-3-clause |
fjbatresv/odoo | addons/account_followup/tests/__init__.py | 261 | 1088 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_account_followup
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dfunckt/django | django/contrib/redirects/migrations/0001_initial.py | 308 | 1561 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Redirect',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('site', models.ForeignKey(
to='sites.Site',
to_field='id',
on_delete=models.CASCADE,
verbose_name='site',
)),
('old_path', models.CharField(
help_text=(
"This should be an absolute path, excluding the domain name. Example: '/events/search/'."
), max_length=200, verbose_name='redirect from', db_index=True
)),
('new_path', models.CharField(
help_text="This can be either an absolute path (as above) or a full URL starting with 'http://'.",
max_length=200, verbose_name='redirect to', blank=True
)),
],
options={
'ordering': ('old_path',),
'unique_together': set([('site', 'old_path')]),
'db_table': 'django_redirect',
'verbose_name': 'redirect',
'verbose_name_plural': 'redirects',
},
bases=(models.Model,),
),
]
| bsd-3-clause |
mozilla/captain | vendor/lib/python/django/contrib/admin/templatetags/log.py | 104 | 2124 | from django import template
from django.contrib.admin.models import LogEntry
register = template.Library()
class AdminLogNode(template.Node):
def __init__(self, limit, varname, user):
self.limit, self.varname, self.user = limit, varname, user
def __repr__(self):
return "<GetAdminLog Node>"
def render(self, context):
if self.user is None:
context[self.varname] = LogEntry.objects.all().select_related('content_type', 'user')[:self.limit]
else:
user_id = self.user
if not user_id.isdigit():
user_id = context[self.user].id
context[self.varname] = LogEntry.objects.filter(user__id__exact=user_id).select_related('content_type', 'user')[:int(self.limit)]
return ''
@register.tag
def get_admin_log(parser, token):
"""
Populates a template variable with the admin log for the given criteria.
Usage::
{% get_admin_log [limit] as [varname] for_user [context_var_containing_user_obj] %}
Examples::
{% get_admin_log 10 as admin_log for_user 23 %}
{% get_admin_log 10 as admin_log for_user user %}
{% get_admin_log 10 as admin_log %}
Note that ``context_var_containing_user_obj`` can be a hard-coded integer
(user ID) or the name of a template context variable containing the user
object whose ID you want.
"""
tokens = token.contents.split()
if len(tokens) < 4:
raise template.TemplateSyntaxError(
"'get_admin_log' statements require two arguments")
if not tokens[1].isdigit():
raise template.TemplateSyntaxError(
"First argument to 'get_admin_log' must be an integer")
if tokens[2] != 'as':
raise template.TemplateSyntaxError(
"Second argument to 'get_admin_log' must be 'as'")
if len(tokens) > 4:
if tokens[4] != 'for_user':
raise template.TemplateSyntaxError(
"Fourth argument to 'get_admin_log' must be 'for_user'")
return AdminLogNode(limit=tokens[1], varname=tokens[3], user=(len(tokens) > 5 and tokens[5] or None))
| mpl-2.0 |
apache-spark-on-k8s/spark | examples/src/main/python/ml/string_indexer_example.py | 123 | 1402 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import StringIndexer
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("StringIndexerExample")\
.getOrCreate()
# $example on$
df = spark.createDataFrame(
[(0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c")],
["id", "category"])
indexer = StringIndexer(inputCol="category", outputCol="categoryIndex")
indexed = indexer.fit(df).transform(df)
indexed.show()
# $example off$
spark.stop()
| apache-2.0 |
renndieG/androguard | androguard/core/data/data.py | 38 | 12662 | # This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networkx import DiGraph
import os
from xml.sax.saxutils import escape
from androguard.core.analysis import analysis
try :
from androguard.core.analysis.libsign.libsign import entropy
except ImportError :
import math
def entropy(data):
entropy = 0
if len(data) == 0 :
return entropy
for x in range(256):
p_x = float(data.count(chr(x)))/len(data)
if p_x > 0:
entropy += - p_x*math.log(p_x, 2)
return entropy
DEFAULT_SIGNATURE = analysis.SIGNATURE_L0_4
def create_entropies(vmx, m) :
try :
default_signature = vmx.get_method_signature(m, predef_sign = DEFAULT_SIGNATURE).get_string()
l = [ default_signature,
entropy( vmx.get_method_signature(m, "L4", { "L4" : { "arguments" : ["Landroid"] } } ).get_string() ),
entropy( vmx.get_method_signature(m, "L4", { "L4" : { "arguments" : ["Ljava"] } } ).get_string() ),
entropy( vmx.get_method_signature(m, "hex" ).get_string() ),
entropy( vmx.get_method_signature(m, "L2" ).get_string() ),
]
return l
except KeyError :
return [ "", 0.0, 0.0, 0.0, 0.0 ]
def create_info(vmx, m) :
E = create_entropies(vmx, m)
H = {}
H["signature"] = E[0]
H["signature_entropy"] = entropy( E[0] )
H["android_api_entropy"] = E[1]
H["java_api_entropy"] = E[2]
H["hex_entropy"] = E[3]
H["exceptions_entropy"] = E[4]
return H
class Data :
def __init__(self, vm, vmx, gvmx, a=None) :
self.vm = vm
self.vmx = vmx
self.gvmx = gvmx
self.a = a
self.apk_data = None
self.dex_data = None
if self.a != None :
self.apk_data = ApkViewer( self.a )
self.dex_data = DexViewer( vm, vmx, gvmx )
self.gvmx.set_new_attributes( create_info )
self.export_methods_to_gml()
def export_methodcalls_to_gml(self) :
return self.gvmx.export_to_gml()
def export_methods_to_gml(self) :
print self.gvmx.G
for node in self.gvmx.G.nodes() :
print self.gvmx.nodes_id[ node ].method_name, self.gvmx.nodes_id[ node ].get_attributes()
def export_apk_to_gml(self) :
if self.apk_data != None :
return self.apk_data.export_to_gml()
def export_dex_to_gml(self) :
if self.dex_data != None :
return self.dex_data.export_to_gml()
class DexViewer :
def __init__(self, vm, vmx, gvmx) :
self.vm = vm
self.vmx = vmx
self.gvmx = gvmx
def _create_node(self, id, height, width, color, label) :
buff = "<node id=\"%d\">\n" % id
buff += "<data key=\"d6\">\n"
buff += "<y:ShapeNode>\n"
buff += "<y:Geometry height=\"%f\" width=\"%f\"/>\n" % (16 * height, 7.5 * width)
buff += "<y:Fill color=\"#%s\" transparent=\"false\"/>\n" % color
buff += "<y:NodeLabel alignment=\"left\" autoSizePolicy=\"content\" fontFamily=\"Dialog\" fontSize=\"13\" fontStyle=\"plain\" hasBackgroundColor=\"false\" hasLineColor=\"false\" modelName=\"internal\" modelPosition=\"c\" textColor=\"#000000\" visible=\"true\">\n"
buff += escape(label)
buff += "</y:NodeLabel>\n"
buff += "</y:ShapeNode>\n"
buff += "</data>\n"
buff += "</node>\n"
return buff
def add_exception_node(self, exception, id_i) :
buff = ""
# 9933FF
height = 2
width = 0
label = ""
label += "%x:%x\n" % (exception.start, exception.end)
for i in exception.exceptions :
c_label = "\t(%s -> %x %s)\n" % (i[0], i[1], i[2].get_name())
label += c_label
width = max(len(c_label), width)
height += 1
return self._create_node( id_i, height, width, "9333FF", label )
def add_method_node(self, i, id_i) :
height = 0
width = 0
label = ""
label += i.get_name() + "\n"
label += i.get_descriptor()
height = 3
width = len(label)
return self._create_node( id_i, height, width, "FF0000", label )
def add_node(self, i, id_i) :
height = 0
width = 0
idx = i.start
label = ""
for ins in i.get_instructions() :
c_label = "%x %s\n" % (idx, self.vm.dotbuff(ins, idx))
idx += ins.get_length()
label += c_label
width = max(width, len(c_label))
height += 1
if height < 10 :
height += 3
return self._create_node( id_i, height, width, "FFCC00", label )
def add_edge(self, i, id_i, j, id_j, l_eid, val) :
buff = "<edge id=\"%d\" source=\"%d\" target=\"%d\">\n" % (len(l_eid), id_i, id_j)
buff += "<data key=\"d9\">\n"
buff += "<y:PolyLineEdge>\n"
buff += "<y:Arrows source=\"none\" target=\"standard\"/>\n"
if val == 0 :
buff += "<y:LineStyle color=\"#00FF00\" type=\"line\" width=\"1.0\"/>\n"
elif val == 1 :
buff += "<y:LineStyle color=\"#FF0000\" type=\"line\" width=\"1.0\"/>\n"
else :
buff += "<y:LineStyle color=\"#0000FF\" type=\"line\" width=\"1.0\"/>\n"
buff += "</y:PolyLineEdge>\n"
buff += "</data>\n"
buff += "</edge>\n"
l_eid[ "%d+%d" % (id_i, id_j) ] = len(l_eid)
return buff
def new_id(self, i, l) :
try :
return l[i]
except KeyError :
l[i] = len(l)
return l[i]
def export_to_gml(self) :
H = {}
for _class in self.vm.get_classes() :
name = _class.get_name()
name = name[1:-1]
buff = ""
buff += "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n"
buff += "<graphml xmlns=\"http://graphml.graphdrawing.org/xmlns\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:y=\"http://www.yworks.com/xml/graphml\" xmlns:yed=\"http://www.yworks.com/xml/yed/3\" xsi:schemaLocation=\"http://graphml.graphdrawing.org/xmlns http://www.yworks.com/xml/schema/graphml/1.1/ygraphml.xsd\">\n"
buff += "<key attr.name=\"description\" attr.type=\"string\" for=\"node\" id=\"d5\"/>\n"
buff += "<key for=\"node\" id=\"d6\" yfiles.type=\"nodegraphics\"/>\n"
buff += "<key for=\"edge\" id=\"d9\" yfiles.type=\"edgegraphics\"/>\n"
buff += "<graph edgedefault=\"directed\" id=\"G\">\n"
print name
buff_nodes = ""
buff_edges = ""
l_id = {}
l_eid = {}
for method in _class.get_methods() :
mx = self.vmx.get_method( method )
exceptions = mx.exceptions
id_method = self.new_id(method, l_id)
buff_nodes += self.add_method_node(method, id_method)
for i in mx.basic_blocks.get() :
id_i = self.new_id(i, l_id)
print i, id_i, i.exception_analysis
buff_nodes += self.add_node( i, id_i )
# add childs nodes
val = 0
if len(i.childs) > 1 :
val = 1
elif len(i.childs) == 1 :
val = 2
for j in i.childs :
print "\t", j
id_j = self.new_id(j[-1], l_id)
buff_edges += self.add_edge(i, id_i, j[-1], id_j, l_eid, val)
if val == 1 :
val = 0
# add exceptions node
if i.exception_analysis != None :
id_exceptions = self.new_id(i.exception_analysis, l_id)
buff_nodes += self.add_exception_node(i.exception_analysis, id_exceptions)
buff_edges += self.add_edge(None, id_exceptions, None, id_i, l_eid, 2)
buff_edges += self.add_edge(None, id_method, None, id_method+1, l_eid, 2)
buff += buff_nodes
buff += buff_edges
buff += "</graph>\n"
buff += "</graphml>\n"
H[ name ] = buff
return H
class Directory :
def __init__(self, name) :
self.name = name
self.basename = os.path.basename(name)
self.color = "FF0000"
self.width = len(self.name)
def set_color(self, color) :
self.color = color
class File :
def __init__(self, name, file_type, file_crc) :
self.name = name
self.basename = os.path.basename(name)
self.file_type = file_type
self.file_crc = file_crc
self.color = "FFCC00"
self.width = max(len(self.name), len(self.file_type))
def splitall(path, z) :
if len(path) == 0 :
return
l = os.path.split( path )
z.append(l[0])
for i in l :
return splitall( i, z )
class ApkViewer :
def __init__(self, a) :
self.a = a
self.G = DiGraph()
self.all_files = {}
self.ids = {}
root = Directory( "APK" )
root.set_color( "00FF00" )
self.ids[ root ] = len(self.ids)
self.G.add_node( root )
for x, y, z in self.a.get_files_information() :
print x, y, z, os.path.basename(x)
l = []
splitall( x, l )
l.reverse()
l.pop(0)
last = root
for i in l :
if i not in self.all_files :
tmp = Directory( i )
self.ids[ tmp ] = len(self.ids)
self.all_files[ i ] = tmp
else :
tmp = self.all_files[ i ]
self.G.add_edge(last, tmp)
last = tmp
n1 = last
n2 = File( x, y, z )
self.G.add_edge(n1, n2)
self.ids[ n2 ] = len(self.ids)
def export_to_gml(self) :
buff = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n"
buff += "<graphml xmlns=\"http://graphml.graphdrawing.org/xmlns\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:y=\"http://www.yworks.com/xml/graphml\" xmlns:yed=\"http://www.yworks.com/xml/yed/3\" xsi:schemaLocation=\"http://graphml.graphdrawing.org/xmlns http://www.yworks.com/xml/schema/graphml/1.1/ygraphml.xsd\">\n"
buff += "<key attr.name=\"description\" attr.type=\"string\" for=\"node\" id=\"d5\"/>\n"
buff += "<key for=\"node\" id=\"d6\" yfiles.type=\"nodegraphics\"/>\n"
buff += "<graph edgedefault=\"directed\" id=\"G\">\n"
for node in self.G.nodes() :
print node
buff += "<node id=\"%d\">\n" % self.ids[node]
buff += "<data key=\"d6\">\n"
buff += "<y:ShapeNode>\n"
buff += "<y:Geometry height=\"%f\" width=\"%f\"/>\n" % (60.0, 7 * node.width)
buff += "<y:Fill color=\"#%s\" transparent=\"false\"/>\n" % node.color
buff += "<y:NodeLabel>\n"
buff += "%s\n" % node.basename
if isinstance(node, File) :
buff += "%s\n" % node.file_type
buff += "%s\n" % hex(node.file_crc)
buff += "</y:NodeLabel>\n"
buff += "</y:ShapeNode>\n"
buff += "</data>\n"
buff += "</node>\n"
nb = 0
for edge in self.G.edges() :
buff += "<edge id=\"%d\" source=\"%d\" target=\"%d\">\n" % (nb, self.ids[edge[0]], self.ids[edge[1]])
buff += "</edge>\n"
nb += 1
buff += "</graph>\n"
buff += "</graphml>\n"
return buff
| apache-2.0 |
jmaris/keepsafe-server | db.py | 1 | 2816 | from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
ALLOWED_ORIGINS = ['http://localhost:4200']
Base = declarative_base()
engine = create_engine('mysql+pymysql://keepsafe:7k77xJuGeqZYu97gpFfS8xsheXYHGHjB@localhost/keepsafe_db')
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key = True)
public_key = Column(Binary(32))
class Captcha(Base):
__tablename__ = 'captcha'
uuid = Column(String(36), primary_key = True)
answer_hash = Column(Binary(32))
created = Column(DateTime, default = func.now())
ip_address_hash = Column(Binary(32))
user_agent_hash = Column(Binary(32))
class Challenge(Base):
__tablename__ = 'challenge'
uuid = Column(String(36), primary_key = True)
user_id = Column(Integer, ForeignKey("user.id"))
answer_hash = Column(Binary(32))
created = Column(DateTime, default = func.now())
user = relationship("User")
class UserSession(Base):
__tablename__ = 'session'
uuid = Column(String(36), primary_key = True)
user_id = Column(Integer, ForeignKey("user.id"))
created = Column(DateTime, default = func.now())
ip_address_hash = Column(Binary(32))
user_agent_hash = Column(Binary(32))
user = relationship("User")
class Devices(Base):
__tablename__ = 'devices'
uuid = Column(String(36), primary_key = True)
userID = Column(Integer)
answer = Column(String(6))
public_key = Column(String(44))
expires = Column(DateTime)
permissions = Column(String(4))
Base.metadata.create_all(engine)
Session = sessionmaker(bind = engine)
session = Session()
class CorsMiddleware(object):
def process_request(self, request, response):
origin = request.get_header('Origin')
if origin in ALLOWED_ORIGINS:
response.set_header('Access-Control-Allow-Origin', origin)
response.set_header('Access-Control-Allow-Headers', 'Origin, Content-Type, Accept')
class DatabaseSessionMiddleware(object):
def __init__(self, db_session):
self._db_session = db_session
# When a new request comes in, add the database session to the request context
def process_request(self, req, res, resource = None):
req.context['db_session'] = self._db_session
# When a response is provided to a request, close the session
def process_response(self, req, res, resource = None):
db_session = req.context['db_session']
db_session.close()
class ConfigurationMiddleware(object):
def __init__(self, server_key_pair):
self._configuration = {
'server_key_pair': server_key_pair
}
def process_request(self, req, res, resource = None):
req.context['configuration'] = self._configuration
| gpl-3.0 |
jkettleb/iris | lib/iris/tests/unit/analysis/regrid/test_RectilinearRegridder.py | 1 | 49795 | # (C) British Crown Copyright 2014 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for :class:`iris.analysis._regrid.RectilinearRegridder`."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import mock
import numpy as np
from iris.analysis._regrid import RectilinearRegridder as Regridder
from iris.aux_factory import HybridHeightFactory
from iris.coord_systems import GeogCS, OSGB
from iris.coords import AuxCoord, DimCoord
from iris.cube import Cube
from iris.tests.stock import global_pp, lat_lon_cube, realistic_4d
RESULT_DIR = ('analysis', 'regrid')
# Convenience to access Regridder static method.
regrid = Regridder._regrid
class Test__regrid__linear(tests.IrisTest):
def setUp(self):
self.x = DimCoord(np.linspace(-2, 57, 60))
self.y = DimCoord(np.linspace(0, 49, 50))
self.xs, self.ys = np.meshgrid(self.x.points, self.y.points)
transformation = lambda x, y: x + y ** 2
# Construct a function which adds dimensions to the 2D data array
# so that we can test higher dimensional functionality.
dim_extender = lambda arr: (arr[np.newaxis, ..., np.newaxis] * [1, 2])
self.data = dim_extender(transformation(self.xs, self.ys))
target_x = np.linspace(-3, 60, 4)
target_y = np.linspace(0.5, 51, 3)
self.target_x, self.target_y = np.meshgrid(target_x, target_y)
#: Expected values, which not quite the analytical value, but
#: representative of the bilinear interpolation scheme.
self.expected = np.array([[[[np.nan, np.nan],
[18.5, 37.],
[39.5, 79.],
[np.nan, np.nan]],
[[np.nan, np.nan],
[681.25, 1362.5],
[702.25, 1404.5],
[np.nan, np.nan]],
[[np.nan, np.nan],
[np.nan, np.nan],
[np.nan, np.nan],
[np.nan, np.nan]]]])
self.x_dim = 2
self.y_dim = 1
def assert_values(self, values):
# values is a list of [x, y, [val1, val2]]
xs, ys, expecteds = zip(*values)
expecteds = np.array(expecteds)[None, None, ...]
result = regrid(self.data, self.x_dim, self.y_dim,
self.x, self.y,
np.array([xs]), np.array([ys]))
self.assertArrayAllClose(result, expecteds, rtol=1e-04)
# Check that transposing the input data results in the same values
ndim = self.data.ndim
result2 = regrid(self.data.T, ndim - self.x_dim - 1,
ndim - self.y_dim - 1,
self.x, self.y,
np.array([xs]), np.array([ys]))
self.assertArrayEqual(result.T, result2)
def test_single_values(self):
# Check that the values are sensible e.g. (3 + 4**2 == 19)
self.assert_values([[3, 4, [19, 38]],
[-2, 0, [-2, -4]],
[-2.01, 0, [np.nan, np.nan]],
[2, -0.01, [np.nan, np.nan]],
[57, 0, [57, 114]],
[57.01, 0, [np.nan, np.nan]],
[57, 49, [2458, 4916]],
[57, 49.01, [np.nan, np.nan]]])
def test_simple_result(self):
result = regrid(self.data, self.x_dim, self.y_dim,
self.x, self.y,
self.target_x, self.target_y)
self.assertArrayEqual(result, self.expected)
def test_simple_masked(self):
data = np.ma.MaskedArray(self.data, mask=True)
data.mask[:, 1:30, 1:30] = False
result = regrid(data, self.x_dim, self.y_dim,
self.x, self.y,
self.target_x, self.target_y)
expected_mask = np.array([[[[True, True], [True, True],
[True, True], [True, True]],
[[True, True], [False, False],
[True, True], [True, True]],
[[True, True], [True, True],
[True, True], [True, True]]]], dtype=bool)
expected = np.ma.MaskedArray(self.expected,
mask=expected_mask)
self.assertMaskedArrayEqual(result, expected)
def test_simple_masked_no_mask(self):
data = np.ma.MaskedArray(self.data, mask=False)
result = regrid(data, self.x_dim, self.y_dim,
self.x, self.y,
self.target_x, self.target_y)
self.assertIsInstance(result, np.ma.MaskedArray)
def test_result_transpose_shape(self):
ndim = self.data.ndim
result = regrid(self.data.T, ndim - self.x_dim - 1,
ndim - self.y_dim - 1, self.x, self.y,
self.target_x, self.target_y)
self.assertArrayEqual(result, self.expected.T)
def test_reverse_x_coord(self):
index = [slice(None)] * self.data.ndim
index[self.x_dim] = slice(None, None, -1)
result = regrid(self.data[index], self.x_dim,
self.y_dim, self.x[::-1], self.y,
self.target_x, self.target_y)
self.assertArrayEqual(result, self.expected)
def test_circular_x_coord(self):
# Check that interpolation of a circular src coordinate doesn't result
# in an out of bounds value.
self.x.circular = True
self.x.units = 'degree'
result = regrid(self.data, self.x_dim, self.y_dim,
self.x, self.y, np.array([[58]]),
np.array([[0]]))
self.assertArrayAlmostEqual(result,
np.array([56.80398671, 113.60797342],
ndmin=self.data.ndim))
# Check what happens to NaN values, extrapolated values, and
# masked values.
class Test__regrid__extrapolation_modes(tests.IrisTest):
values_by_method = {'linear': [[np.nan, np.nan, 2, 3, np.nan],
[np.nan, np.nan, 6, 7, np.nan],
[8, 9, 10, 11, np.nan]],
'nearest': [[np.nan, 1, 2, 3, np.nan],
[4, 5, 6, 7, np.nan],
[8, 9, 10, 11, np.nan]]}
extrapolate_values_by_method = {'linear': [[np.nan, np.nan, 2, 3, 4],
[np.nan, np.nan, 6, 7, 8],
[8, 9, 10, 11, 12]],
'nearest': [[np.nan, 1, 2, 3, 3],
[4, 5, 6, 7, 7],
[8, 9, 10, 11, 11]]}
def setUp(self):
self.methods = ('linear', 'nearest')
def _regrid(self, data, method, extrapolation_mode=None):
x = np.arange(4)
y = np.arange(3)
x_coord = DimCoord(x)
y_coord = DimCoord(y)
x_dim, y_dim = 1, 0
grid_x, grid_y = np.meshgrid(np.arange(5), y)
kwargs = dict(method=method)
if extrapolation_mode is not None:
kwargs['extrapolation_mode'] = extrapolation_mode
result = regrid(data, x_dim, y_dim, x_coord, y_coord,
grid_x, grid_y, **kwargs)
return result
def test_default_ndarray(self):
# NaN -> NaN
# Extrapolated -> NaN
data = np.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
for method in self.methods:
result = self._regrid(data, method)
self.assertNotIsInstance(result, np.ma.MaskedArray)
expected = self.values_by_method[method]
self.assertArrayEqual(result, expected)
def test_default_maskedarray(self):
# NaN -> NaN
# Extrapolated -> Masked
# Masked -> Masked
data = np.ma.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
data[2, 3] = np.ma.masked
for method in self.methods:
result = self._regrid(data, method)
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 1, 1]]
values = self.values_by_method[method]
expected = np.ma.MaskedArray(values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_default_maskedarray_none_masked(self):
# NaN -> NaN
# Extrapolated -> Masked
# Masked -> N/A
data = np.ma.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
for method in self.methods:
result = self._regrid(data, method)
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1]]
values = self.values_by_method[method]
expected = np.ma.MaskedArray(values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_default_maskedarray_none_masked_expanded(self):
# NaN -> NaN
# Extrapolated -> Masked
# Masked -> N/A
data = np.ma.arange(12, dtype=np.float).reshape(3, 4)
# Make sure the mask has been expanded
data.mask = False
data[0, 0] = np.nan
for method in self.methods:
result = self._regrid(data, method)
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1]]
values = self.values_by_method[method]
expected = np.ma.MaskedArray(values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_method_ndarray(self):
# NaN -> NaN
# Extrapolated -> linear
data = np.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
for method in self.methods:
result = self._regrid(data, method, 'extrapolate')
self.assertNotIsInstance(result, np.ma.MaskedArray)
expected = self.extrapolate_values_by_method[method]
self.assertArrayEqual(result, expected)
def test_method_maskedarray(self):
# NaN -> NaN
# Extrapolated -> linear
# Masked -> Masked
data = np.ma.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
data[2, 3] = np.ma.masked
for method in self.methods:
result = self._regrid(data, method, 'extrapolate')
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 1]]
values = self.extrapolate_values_by_method[method]
expected = np.ma.MaskedArray(values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_nan_ndarray(self):
# NaN -> NaN
# Extrapolated -> NaN
data = np.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
for method in self.methods:
result = self._regrid(data, method, 'nan')
self.assertNotIsInstance(result, np.ma.MaskedArray)
expected = self.values_by_method[method]
self.assertArrayEqual(result, expected)
def test_nan_maskedarray(self):
# NaN -> NaN
# Extrapolated -> NaN
# Masked -> Masked
data = np.ma.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
data[2, 3] = np.ma.masked
for method in self.methods:
result = self._regrid(data, method, 'nan')
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0]]
values = self.values_by_method[method]
expected = np.ma.MaskedArray(values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_error_ndarray(self):
# Values irrelevant - the function raises an error.
data = np.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
for method in self.methods:
with self.assertRaisesRegexp(ValueError, 'out of bounds'):
self._regrid(data, method, 'error')
def test_error_maskedarray(self):
# Values irrelevant - the function raises an error.
data = np.ma.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
data[2, 3] = np.ma.masked
for method in self.methods:
with self.assertRaisesRegexp(ValueError, 'out of bounds'):
self._regrid(data, method, 'error')
def test_mask_ndarray(self):
# NaN -> NaN
# Extrapolated -> Masked (this is different from all the other
# modes)
data = np.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
for method in self.methods:
result = self._regrid(data, method, 'mask')
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1]]
values = self.values_by_method[method]
expected = np.ma.MaskedArray(values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_mask_maskedarray(self):
# NaN -> NaN
# Extrapolated -> Masked
# Masked -> Masked
data = np.ma.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
data[2, 3] = np.ma.masked
for method in self.methods:
result = self._regrid(data, method, 'mask')
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 1, 1]]
values = self.values_by_method[method]
expected = np.ma.MaskedArray(values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_nanmask_ndarray(self):
# NaN -> NaN
# Extrapolated -> NaN
data = np.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
for method in self.methods:
result = self._regrid(data, method, 'nanmask')
self.assertNotIsInstance(result, np.ma.MaskedArray)
expected = self.values_by_method[method]
self.assertArrayEqual(result, expected)
def test_nanmask_maskedarray(self):
# NaN -> NaN
# Extrapolated -> Masked
# Masked -> Masked
data = np.ma.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
data[2, 3] = np.ma.masked
for method in self.methods:
result = self._regrid(data, method, 'nanmask')
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 1, 1]]
values = self.values_by_method[method]
expected = np.ma.MaskedArray(values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_invalid(self):
data = np.arange(12, dtype=np.float).reshape(3, 4)
emsg = 'Invalid extrapolation mode'
for method in self.methods:
with self.assertRaisesRegexp(ValueError, emsg):
self._regrid(data, method, 'BOGUS')
class Test___call____invalid_types(tests.IrisTest):
def setUp(self):
self.cube = lat_lon_cube()
# Regridder method and extrapolation-mode.
self.args = ('linear', 'mask')
self.regridder = Regridder(self.cube, self.cube, *self.args)
def test_src_as_array(self):
arr = np.zeros((3, 4))
with self.assertRaises(TypeError):
Regridder(arr, self.cube, *self.args)
with self.assertRaises(TypeError):
self.regridder(arr)
def test_grid_as_array(self):
with self.assertRaises(TypeError):
Regridder(self.cube, np.zeros((3, 4)), *self.args)
def test_src_as_int(self):
with self.assertRaises(TypeError):
Regridder(42, self.cube, *self.args)
with self.assertRaises(TypeError):
self.regridder(42)
def test_grid_as_int(self):
with self.assertRaises(TypeError):
Regridder(self.cube, 42, *self.args)
class Test___call____missing_coords(tests.IrisTest):
def setUp(self):
self.args = ('linear', 'mask')
def ok_bad(self, coord_names):
# Deletes the named coords from `bad`.
ok = lat_lon_cube()
bad = lat_lon_cube()
for name in coord_names:
bad.remove_coord(name)
return ok, bad
def test_src_missing_lat(self):
ok, bad = self.ok_bad(['latitude'])
with self.assertRaises(ValueError):
Regridder(bad, ok, *self.args)
regridder = Regridder(ok, ok, *self.args)
with self.assertRaises(ValueError):
regridder(bad)
def test_grid_missing_lat(self):
ok, bad = self.ok_bad(['latitude'])
with self.assertRaises(ValueError):
Regridder(ok, bad, *self.args)
def test_src_missing_lon(self):
ok, bad = self.ok_bad(['longitude'])
with self.assertRaises(ValueError):
Regridder(bad, ok, *self.args)
regridder = Regridder(ok, ok, *self.args)
with self.assertRaises(ValueError):
regridder(bad)
def test_grid_missing_lon(self):
ok, bad = self.ok_bad(['longitude'])
with self.assertRaises(ValueError):
Regridder(ok, bad, *self.args)
def test_src_missing_lat_lon(self):
ok, bad = self.ok_bad(['latitude', 'longitude'])
with self.assertRaises(ValueError):
Regridder(bad, ok, *self.args)
regridder = Regridder(ok, ok, *self.args)
with self.assertRaises(ValueError):
regridder(bad)
def test_grid_missing_lat_lon(self):
ok, bad = self.ok_bad(['latitude', 'longitude'])
with self.assertRaises(ValueError):
Regridder(ok, bad, *self.args)
class Test___call____not_dim_coord(tests.IrisTest):
def setUp(self):
self.args = ('linear', 'mask')
def ok_bad(self, coord_name):
# Demotes the named DimCoord on `bad` to an AuxCoord.
ok = lat_lon_cube()
bad = lat_lon_cube()
coord = bad.coord(coord_name)
dims = bad.coord_dims(coord)
bad.remove_coord(coord_name)
aux_coord = AuxCoord.from_coord(coord)
bad.add_aux_coord(aux_coord, dims)
return ok, bad
def test_src_with_aux_lat(self):
ok, bad = self.ok_bad('latitude')
with self.assertRaises(ValueError):
Regridder(bad, ok, *self.args)
regridder = Regridder(ok, ok, *self.args)
with self.assertRaises(ValueError):
regridder(bad)
def test_grid_with_aux_lat(self):
ok, bad = self.ok_bad('latitude')
with self.assertRaises(ValueError):
Regridder(ok, bad, *self.args)
def test_src_with_aux_lon(self):
ok, bad = self.ok_bad('longitude')
with self.assertRaises(ValueError):
Regridder(bad, ok, *self.args)
regridder = Regridder(ok, ok, *self.args)
with self.assertRaises(ValueError):
regridder(bad)
def test_grid_with_aux_lon(self):
ok, bad = self.ok_bad('longitude')
with self.assertRaises(ValueError):
Regridder(ok, bad, *self.args)
class Test___call____not_dim_coord_share(tests.IrisTest):
def setUp(self):
self.args = ('linear', 'mask')
def ok_bad(self):
# Make lat/lon share a single dimension on `bad`.
ok = lat_lon_cube()
bad = lat_lon_cube()
lat = bad.coord('latitude')
bad = bad[0, :lat.shape[0]]
bad.remove_coord('latitude')
bad.add_aux_coord(lat, 0)
return ok, bad
def test_src_shares_dim(self):
ok, bad = self.ok_bad()
with self.assertRaises(ValueError):
Regridder(bad, ok, *self.args)
regridder = Regridder(ok, ok, *self.args)
with self.assertRaises(ValueError):
regridder(bad)
def test_grid_shares_dim(self):
ok, bad = self.ok_bad()
with self.assertRaises(ValueError):
Regridder(ok, bad, *self.args)
class Test___call____bad_georeference(tests.IrisTest):
def setUp(self):
self.args = ('linear', 'mask')
def ok_bad(self, lat_cs, lon_cs):
# Updates `bad` to use the given coordinate systems.
ok = lat_lon_cube()
bad = lat_lon_cube()
bad.coord('latitude').coord_system = lat_cs
bad.coord('longitude').coord_system = lon_cs
return ok, bad
def test_src_no_cs(self):
ok, bad = self.ok_bad(None, None)
regridder = Regridder(bad, ok, *self.args)
with self.assertRaises(ValueError):
regridder(bad)
def test_grid_no_cs(self):
ok, bad = self.ok_bad(None, None)
regridder = Regridder(ok, bad, *self.args)
with self.assertRaises(ValueError):
regridder(ok)
def test_src_one_cs(self):
ok, bad = self.ok_bad(None, GeogCS(6371000))
with self.assertRaises(ValueError):
Regridder(bad, ok, *self.args)
def test_grid_one_cs(self):
ok, bad = self.ok_bad(None, GeogCS(6371000))
with self.assertRaises(ValueError):
Regridder(ok, bad, *self.args)
def test_src_inconsistent_cs(self):
ok, bad = self.ok_bad(GeogCS(6370000), GeogCS(6371000))
with self.assertRaises(ValueError):
Regridder(bad, ok, *self.args)
def test_grid_inconsistent_cs(self):
ok, bad = self.ok_bad(GeogCS(6370000), GeogCS(6371000))
with self.assertRaises(ValueError):
Regridder(ok, bad, *self.args)
class Test___call____bad_angular_units(tests.IrisTest):
def ok_bad(self):
# Changes the longitude coord to radians on `bad`.
ok = lat_lon_cube()
bad = lat_lon_cube()
bad.coord('longitude').units = 'radians'
return ok, bad
def test_src_radians(self):
ok, bad = self.ok_bad()
regridder = Regridder(bad, ok, 'linear', 'mask')
with self.assertRaises(ValueError):
regridder(bad)
def test_grid_radians(self):
ok, bad = self.ok_bad()
with self.assertRaises(ValueError):
Regridder(ok, bad, 'linear', 'mask')
def uk_cube():
data = np.arange(12, dtype=np.float32).reshape(3, 4)
uk = Cube(data)
cs = OSGB()
y_coord = DimCoord(np.arange(3), 'projection_y_coordinate', units='m',
coord_system=cs)
x_coord = DimCoord(np.arange(4), 'projection_x_coordinate', units='m',
coord_system=cs)
uk.add_dim_coord(y_coord, 0)
uk.add_dim_coord(x_coord, 1)
surface = AuxCoord(data * 10, 'surface_altitude', units='m')
uk.add_aux_coord(surface, (0, 1))
uk.add_aux_factory(HybridHeightFactory(orography=surface))
return uk
class Test___call____bad_linear_units(tests.IrisTest):
def ok_bad(self):
# Defines `bad` with an x coordinate in km.
ok = lat_lon_cube()
bad = uk_cube()
bad.coord(axis='x').units = 'km'
return ok, bad
def test_src_km(self):
ok, bad = self.ok_bad()
regridder = Regridder(bad, ok, 'linear', 'mask')
with self.assertRaises(ValueError):
regridder(bad)
def test_grid_km(self):
ok, bad = self.ok_bad()
with self.assertRaises(ValueError):
Regridder(ok, bad, 'linear', 'mask')
class Test___call____no_coord_systems(tests.IrisTest):
# Test behaviour in the absence of any coordinate systems.
def setUp(self):
self.mode = 'mask'
self.methods = ('linear', 'nearest')
def remove_coord_systems(self, cube):
for coord in cube.coords():
coord.coord_system = None
def test_ok(self):
# Ensure regridding is supported when the coordinate definitions match.
# NB. We change the coordinate *values* to ensure that does not
# prevent the regridding operation.
src = uk_cube()
self.remove_coord_systems(src)
grid = src.copy()
for coord in grid.dim_coords:
coord.points = coord.points + 1
for method in self.methods:
regridder = Regridder(src, grid, method, self.mode)
result = regridder(src)
for coord in result.dim_coords:
self.assertEqual(coord, grid.coord(coord))
expected = np.ma.arange(12).reshape((3, 4)) + 5
expected[:, 3] = np.ma.masked
expected[2, :] = np.ma.masked
self.assertMaskedArrayEqual(result.data, expected)
def test_matching_units(self):
# Check we are insensitive to the units provided they match.
# NB. We change the coordinate *values* to ensure that does not
# prevent the regridding operation.
src = uk_cube()
self.remove_coord_systems(src)
# Move to unusual units (i.e. not metres or degrees).
for coord in src.dim_coords:
coord.units = 'feet'
grid = src.copy()
for coord in grid.dim_coords:
coord.points = coord.points + 1
for method in self.methods:
regridder = Regridder(src, grid, method, self.mode)
result = regridder(src)
for coord in result.dim_coords:
self.assertEqual(coord, grid.coord(coord))
expected = np.ma.arange(12).reshape((3, 4)) + 5
expected[:, 3] = np.ma.masked
expected[2, :] = np.ma.masked
self.assertMaskedArrayEqual(result.data, expected)
def test_different_units(self):
src = uk_cube()
self.remove_coord_systems(src)
# Move to unusual units (i.e. not metres or degrees).
for coord in src.coords():
coord.units = 'feet'
grid = src.copy()
grid.coord('projection_y_coordinate').units = 'yards'
# We change the coordinate *values* to ensure that does not
# prevent the regridding operation.
for coord in grid.dim_coords:
coord.points = coord.points + 1
for method in self.methods:
regridder = Regridder(src, grid, method, self.mode)
emsg = 'matching coordinate metadata'
with self.assertRaisesRegexp(ValueError, emsg):
regridder(src)
def test_coord_metadata_mismatch(self):
# Check for failure when coordinate definitions differ.
uk = uk_cube()
self.remove_coord_systems(uk)
lat_lon = lat_lon_cube()
self.remove_coord_systems(lat_lon)
for method in self.methods:
regridder = Regridder(uk, lat_lon, method, self.mode)
with self.assertRaises(ValueError):
regridder(uk)
class Test___call____extrapolation_modes(tests.IrisTest):
values = [[np.nan, 6, 7, np.nan],
[9, 10, 11, np.nan],
[np.nan, np.nan, np.nan, np.nan]]
extrapolate_values_by_method = {'linear': [[np.nan, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]],
'nearest': [[np.nan, 6, 7, 7],
[9, 10, 11, 11],
[9, 10, 11, 11]]}
surface_values = [[50, 60, 70, np.nan],
[90, 100, 110, np.nan],
[np.nan, np.nan, np.nan, np.nan]]
def setUp(self):
self.methods = ('linear', 'nearest')
def _ndarray_cube(self, method):
assert method in self.methods
src = uk_cube()
index = (0, 0) if method == 'linear' else (1, 1)
src.data[index] = np.nan
return src
def _masked_cube(self, method):
assert method in self.methods
src = uk_cube()
src.data = np.ma.asarray(src.data)
nan_index = (0, 0) if method == 'linear' else (1, 1)
mask_index = (2, 3)
src.data[nan_index] = np.nan
src.data[mask_index] = np.ma.masked
return src
def _regrid(self, src, method, extrapolation_mode='mask'):
grid = src.copy()
for coord in grid.dim_coords:
coord.points = coord.points + 1
regridder = Regridder(src, grid, method, extrapolation_mode)
result = regridder(src)
surface = result.coord('surface_altitude').points
self.assertNotIsInstance(surface, np.ma.MaskedArray)
self.assertArrayEqual(surface, self.surface_values)
return result.data
def test_default_ndarray(self):
# NaN -> NaN
# Extrapolated -> Masked
for method in self.methods:
src = self._ndarray_cube(method)
result = self._regrid(src, method)
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 1],
[0, 0, 0, 1],
[1, 1, 1, 1]]
expected = np.ma.MaskedArray(self.values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_default_maskedarray(self):
# NaN -> NaN
# Extrapolated -> Masked
# Masked -> Masked
for method in self.methods:
src = self._masked_cube(method)
result = self._regrid(src, method)
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 1],
[0, 0, 1, 1],
[1, 1, 1, 1]]
expected = np.ma.MaskedArray(self.values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_default_maskedarray_none_masked(self):
# NaN -> NaN
# Extrapolated -> Masked
# Masked -> N/A
for method in self.methods:
src = uk_cube()
src.data = np.ma.asarray(src.data)
index = (0, 0) if method == 'linear' else (1, 1)
src.data[index] = np.nan
result = self._regrid(src, method)
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 1],
[0, 0, 0, 1],
[1, 1, 1, 1]]
expected = np.ma.MaskedArray(self.values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_default_maskedarray_none_masked_expanded(self):
# NaN -> NaN
# Extrapolated -> Masked
# Masked -> N/A
for method in self.methods:
src = uk_cube()
src.data = np.ma.asarray(src.data)
# Make sure the mask has been expanded
src.data.mask = False
index = (0, 0) if method == 'linear' else (1, 1)
src.data[index] = np.nan
result = self._regrid(src, method)
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 1],
[0, 0, 0, 1],
[1, 1, 1, 1]]
expected = np.ma.MaskedArray(self.values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_method_ndarray(self):
# NaN -> NaN
# Extrapolated -> linear
for method in self.methods:
src = self._ndarray_cube(method)
result = self._regrid(src, method, 'extrapolate')
self.assertNotIsInstance(result, np.ma.MaskedArray)
expected = self.extrapolate_values_by_method[method]
self.assertArrayEqual(result, expected)
def test_nan_ndarray(self):
# NaN -> NaN
# Extrapolated -> NaN
for method in self.methods:
src = self._ndarray_cube(method)
result = self._regrid(src, method, 'nan')
self.assertNotIsInstance(result, np.ma.MaskedArray)
self.assertArrayEqual(result, self.values)
def test_nan_maskedarray(self):
# NaN -> NaN
# Extrapolated -> NaN
# Masked -> Masked
for method in self.methods:
src = self._masked_cube(method)
result = self._regrid(src, method, 'nan')
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]]
expected = np.ma.MaskedArray(self.values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_error_ndarray(self):
# Values irrelevant - the function raises an error.
for method in self.methods:
src = self._ndarray_cube(method)
with self.assertRaisesRegexp(ValueError, 'out of bounds'):
self._regrid(src, method, 'error')
def test_error_maskedarray(self):
# Values irrelevant - the function raises an error.
for method in self.methods:
src = self._masked_cube(method)
with self.assertRaisesRegexp(ValueError, 'out of bounds'):
self._regrid(src, method, 'error')
def test_mask_ndarray(self):
# NaN -> NaN
# Extrapolated -> Masked (this is different from all the other
# modes)
for method in self.methods:
src = self._ndarray_cube(method)
result = self._regrid(src, method, 'mask')
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 1],
[0, 0, 0, 1],
[1, 1, 1, 1]]
expected = np.ma.MaskedArray(self.values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_mask_maskedarray(self):
# NaN -> NaN
# Extrapolated -> Masked
# Masked -> Masked
for method in self.methods:
src = self._masked_cube(method)
result = self._regrid(src, method, 'mask')
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 1],
[0, 0, 1, 1],
[1, 1, 1, 1]]
expected = np.ma.MaskedArray(self.values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_nanmask_ndarray(self):
# NaN -> NaN
# Extrapolated -> NaN
for method in self.methods:
src = self._ndarray_cube(method)
result = self._regrid(src, method, 'nanmask')
self.assertNotIsInstance(result, np.ma.MaskedArray)
self.assertArrayEqual(result, self.values)
def test_nanmask_maskedarray(self):
# NaN -> NaN
# Extrapolated -> Masked
# Masked -> Masked
for method in self.methods:
src = self._masked_cube(method)
result = self._regrid(src, method, 'nanmask')
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 1],
[0, 0, 1, 1],
[1, 1, 1, 1]]
expected = np.ma.MaskedArray(self.values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_invalid(self):
src = uk_cube()
emsg = 'Invalid extrapolation mode'
for method in self.methods:
with self.assertRaisesRegexp(ValueError, emsg):
self._regrid(src, method, 'BOGUS')
@tests.skip_data
class Test___call____rotated_to_lat_lon(tests.IrisTest):
def setUp(self):
self.src = realistic_4d()[:5, :2, ::40, ::30]
self.mode = 'mask'
self.methods = ('linear', 'nearest')
def test_single_point(self):
src = self.src[0, 0]
grid = global_pp()[:1, :1]
# These coordinate values have been derived by converting the
# rotated coordinates of src[1, 1] into lat/lon by using cs2cs.
grid.coord('longitude').points = -3.144870
grid.coord('latitude').points = 52.406444
for method in self.methods:
regridder = Regridder(src, grid, method, self.mode)
result = regridder(src)
self.assertEqual(src.data[1, 1], result.data)
def test_transposed_src(self):
# The source dimensions are in a non-standard order.
src = self.src
src.transpose([3, 1, 2, 0])
grid = self._grid_subset()
for method in self.methods:
regridder = Regridder(src, grid, method, self.mode)
result = regridder(src)
result.transpose([3, 1, 2, 0])
cml = RESULT_DIR + ('{}_subset.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def _grid_subset(self):
# The destination grid points are entirely contained within the
# src grid points.
grid = global_pp()[:4, :5]
grid.coord('longitude').points = np.linspace(-3.182, -3.06, 5)
grid.coord('latitude').points = np.linspace(52.372, 52.44, 4)
return grid
def test_reversed(self):
src = self.src
grid = self._grid_subset()
for method in self.methods:
cml = RESULT_DIR + ('{}_subset.cml'.format(method),)
regridder = Regridder(src, grid[::-1], method, self.mode)
result = regridder(src)
self.assertCMLApproxData(result[:, :, ::-1], cml)
sample = src[:, :, ::-1]
regridder = Regridder(sample, grid[::-1], method, self.mode)
result = regridder(sample)
self.assertCMLApproxData(result[:, :, ::-1], cml)
sample = src[:, :, :, ::-1]
regridder = Regridder(sample, grid[::-1], method, self.mode)
result = regridder(sample)
self.assertCMLApproxData(result[:, :, ::-1], cml)
sample = src[:, :, ::-1, ::-1]
regridder = Regridder(sample, grid[::-1], method, self.mode)
result = regridder(sample)
self.assertCMLApproxData(result[:, :, ::-1], cml)
regridder = Regridder(src, grid[:, ::-1], method, self.mode)
result = regridder(src)
self.assertCMLApproxData(result[:, :, :, ::-1], cml)
sample = src[:, :, ::-1]
regridder = Regridder(sample, grid[:, ::-1], method, self.mode)
result = regridder(sample)
self.assertCMLApproxData(result[:, :, :, ::-1], cml)
sample = src[:, :, :, ::-1]
regridder = Regridder(sample, grid[:, ::-1], method, self.mode)
result = regridder(sample)
self.assertCMLApproxData(result[:, :, :, ::-1], cml)
sample = src[:, :, ::-1, ::-1]
regridder = Regridder(sample, grid[:, ::-1], method, self.mode)
result = regridder(sample)
self.assertCMLApproxData(result[:, :, :, ::-1], cml)
regridder = Regridder(src, grid[::-1, ::-1], method, self.mode)
result = regridder(src)
self.assertCMLApproxData(result[:, :, ::-1, ::-1], cml)
sample = src[:, :, ::-1]
regridder = Regridder(sample, grid[::-1, ::-1], method, self.mode)
result = regridder(sample)
self.assertCMLApproxData(result[:, :, ::-1, ::-1], cml)
sample = src[:, :, :, ::-1]
regridder = Regridder(sample, grid[::-1, ::-1], method, self.mode)
result = regridder(sample)
self.assertCMLApproxData(result[:, :, ::-1, ::-1], cml)
sample = src[:, :, ::-1, ::-1]
regridder = Regridder(sample, grid[::-1, ::-1], method, self.mode)
result = regridder(sample)
self.assertCMLApproxData(result[:, :, ::-1, ::-1], cml)
def test_grid_subset(self):
# The destination grid points are entirely contained within the
# src grid points.
grid = self._grid_subset()
for method in self.methods:
regridder = Regridder(self.src, grid, method, self.mode)
result = regridder(self.src)
cml = RESULT_DIR + ('{}_subset.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def _big_grid(self):
grid = self._grid_subset()
big_grid = Cube(np.zeros((5, 10, 3, 4, 5)))
big_grid.add_dim_coord(grid.coord('latitude'), 3)
big_grid.add_dim_coord(grid.coord('longitude'), 4)
return big_grid
def test_grid_subset_big(self):
# Add some extra dimensions to the destination Cube and
# these should be safely ignored.
big_grid = self._big_grid()
for method in self.methods:
regridder = Regridder(self.src, big_grid, method, self.mode)
result = regridder(self.src)
cml = RESULT_DIR + ('{}_subset.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def test_grid_subset_big_transposed(self):
# The order of the grid's dimensions (including the X and Y
# dimensions) must not affect the result.
big_grid = self._big_grid()
big_grid.transpose([4, 0, 3, 1, 2])
for method in self.methods:
regridder = Regridder(self.src, big_grid, method, self.mode)
result = regridder(self.src)
cml = RESULT_DIR + ('{}_subset.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def test_grid_subset_anon(self):
# Must cope OK with anonymous source dimensions.
src = self.src
src.remove_coord('time')
grid = self._grid_subset()
for method in self.methods:
regridder = Regridder(src, grid, method, self.mode)
result = regridder(src)
cml = RESULT_DIR + ('{}_subset_anon.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def test_grid_subset_missing_data_1(self):
# The destination grid points are entirely contained within the
# src grid points AND we have missing data.
src = self.src
src.data = np.ma.MaskedArray(src.data)
src.data[:, :, 0, 0] = np.ma.masked
grid = self._grid_subset()
for method in self.methods:
regridder = Regridder(src, grid, method, self.mode)
result = regridder(src)
cml = RESULT_DIR + ('{}_subset_masked_1.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def test_grid_subset_missing_data_2(self):
# The destination grid points are entirely contained within the
# src grid points AND we have missing data.
src = self.src
src.data = np.ma.MaskedArray(src.data)
src.data[:, :, 1, 2] = np.ma.masked
grid = self._grid_subset()
for method in self.methods:
regridder = Regridder(src, grid, method, self.mode)
result = regridder(src)
cml = RESULT_DIR + ('{}_subset_masked_2.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def test_grid_partial_overlap(self):
# The destination grid points are partially contained within the
# src grid points.
grid = global_pp()[:4, :4]
grid.coord('longitude').points = np.linspace(-3.3, -3.06, 4)
grid.coord('latitude').points = np.linspace(52.377, 52.43, 4)
for method in self.methods:
regridder = Regridder(self.src, grid, method, self.mode)
result = regridder(self.src)
cml = RESULT_DIR + ('{}_partial_overlap.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def test_grid_no_overlap(self):
# The destination grid points are NOT contained within the
# src grid points.
grid = global_pp()[:4, :4]
grid.coord('longitude').points = np.linspace(-3.3, -3.2, 4)
grid.coord('latitude').points = np.linspace(52.377, 52.43, 4)
for method in self.methods:
regridder = Regridder(self.src, grid, method, self.mode)
result = regridder(self.src)
self.assertCMLApproxData(result, RESULT_DIR + ('no_overlap.cml',))
def test_grid_subset_missing_data_aux(self):
# The destination grid points are entirely contained within the
# src grid points AND we have missing data on the aux coordinate.
src = self.src
src.coord('surface_altitude').points[1, 2] = np.ma.masked
grid = self._grid_subset()
for method in self.methods:
regridder = Regridder(src, grid, method, self.mode)
result = regridder(src)
cml = RESULT_DIR + ('{}_masked_altitude.cml'.format(method),)
self.assertCMLApproxData(result, cml)
class Test___call____NOP(tests.IrisTest):
def setUp(self):
# The destination grid points are exactly the same as the
# src grid points.
self.src = realistic_4d()[:5, :2, ::40, ::30]
self.grid = self.src.copy()
def test_nop__linear(self):
regridder = Regridder(self.src, self.grid, 'linear', 'mask')
result = regridder(self.src)
self.assertEqual(result, self.src)
def test_nop__nearest(self):
regridder = Regridder(self.src, self.grid, 'nearest', 'mask')
result = regridder(self.src)
self.assertEqual(result, self.src)
@tests.skip_data
class Test___call____circular(tests.IrisTest):
def setUp(self):
src = global_pp()[::10, ::10]
level_height = AuxCoord(0, long_name='level_height', units='m',
attributes={'positive': 'up'})
sigma = AuxCoord(1, long_name='sigma')
surface_altitude = AuxCoord((src.data - src.data.min()) * 50,
'surface_altitude', units='m')
src.add_aux_coord(level_height)
src.add_aux_coord(sigma)
src.add_aux_coord(surface_altitude, [0, 1])
hybrid_height = HybridHeightFactory(level_height, sigma,
surface_altitude)
src.add_aux_factory(hybrid_height)
self.src = src
grid = global_pp()[:4, :4]
grid.coord('longitude').points = grid.coord('longitude').points - 5
self.grid = grid
self.mode = 'mask'
self.methods = ('linear', 'nearest')
def test_non_circular(self):
# Non-circular src -> non-circular grid
for method in self.methods:
regridder = Regridder(self.src, self.grid, method, self.mode)
result = regridder(self.src)
self.assertFalse(result.coord('longitude').circular)
cml = RESULT_DIR + ('{}_non_circular.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def test_circular_src(self):
# Circular src -> non-circular grid
src = self.src
src.coord('longitude').circular = True
for method in self.methods:
regridder = Regridder(src, self.grid, method, self.mode)
result = regridder(src)
self.assertFalse(result.coord('longitude').circular)
cml = RESULT_DIR + ('{}_circular_src.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def test_circular_grid(self):
# Non-circular src -> circular grid
grid = self.grid
grid.coord('longitude').circular = True
for method in self.methods:
regridder = Regridder(self.src, grid, method, self.mode)
result = regridder(self.src)
self.assertTrue(result.coord('longitude').circular)
cml = RESULT_DIR + ('{}_circular_grid.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def test_circular_src_and_grid(self):
# Circular src -> circular grid
src = self.src
src.coord('longitude').circular = True
grid = self.grid
grid.coord('longitude').circular = True
for method in self.methods:
regridder = Regridder(src, grid, method, self.mode)
result = regridder(src)
self.assertTrue(result.coord('longitude').circular)
cml = RESULT_DIR + ('{}_both_circular.cml'.format(method),)
self.assertCMLApproxData(result, cml)
if __name__ == '__main__':
tests.main()
| lgpl-3.0 |
ThierryM/bCNC | bCNC/plugins/difference.py | 3 | 5254 | #!/usr/bin/python
# -*- coding: ascii -*-
# Author: @harvie Tomas Mudrunka
# Date: 7 july 2018
from __future__ import print_function
from __future__ import print_function
__author__ = "@harvie Tomas Mudrunka"
#__email__ = ""
__name__ = _("Difference")
__version__ = "0.0.1"
import math
import os.path
import re
from CNC import CNC,Block
from ToolsPage import Plugin
from math import pi, sqrt, sin, cos, asin, acos, atan2, hypot, degrees, radians, copysign, fmod
from bpath import EPS,eq,Path, Segment
from bmath import Vector
from copy import deepcopy
class Tool(Plugin):
__doc__ = _("""Difference of two shapes""") #<<< This comment will be show as tooltip for the ribbon button
def __init__(self, master):
Plugin.__init__(self, master,"Difference")
#Helical_Descent: is the name of the plugin show in the tool ribbon button
self.icon = "diff" #<<< This is the name of file used as icon for the ribbon button. It will be search in the "icons" subfolder
self.group = "Development" #<<< This is the name of group that plugin belongs
self.oneshot = True
#Here we are creating the widgets presented to the user inside the plugin
#Name, Type , Default value, Description
self.variables = [ #<<< Define a list of components for the GUI
("name" , "db" , "", _("Name")), #used to store plugin settings in the internal database
]
self.buttons.append("exe") #<<< This is the button added at bottom to call the execute method below
# ----------------------------------------------------------------------
# This method is executed when user presses the plugin execute button
# ----------------------------------------------------------------------
def execute(self, app):
#print("go!")
blocks = []
paths_base = []
paths_isl = []
for bid in app.editor.getSelectedBlocks():
if app.gcode[bid].operationTest('island'):
paths_isl.extend(app.gcode.toPath(bid))
else:
paths_base.extend(app.gcode.toPath(bid))
for island in paths_isl:
paths_newbase = []
while len(paths_base) > 0:
base = paths_base.pop()
base.intersectPath(island)
island.intersectPath(base)
newbase = Path("diff")
#Add segments from outside of islands:
for i,seg in enumerate(base):
if not island.isInside(seg.midPoint()):
newbase.append(seg)
#Add segments from islands to base
for i,seg in enumerate(island):
if base.isInside(seg.midPoint()): #and base.isInside(seg.A) and base.isInside(seg.B):
newbase.append(seg)
#Eulerize
paths_newbase.extend(newbase.eulerize())
#paths_newbase.extend(newbase.split2contours())
paths_base = paths_newbase
for base in paths_base:
print(base)
#base = base.eulerize(True)
block = Block("diff")
block.extend(app.gcode.fromPath(base))
blocks.append(block)
#active = app.activeBlock()
app.gcode.insBlocks(-1, blocks, "Diff") #<<< insert blocks over active block in the editor
app.refresh() #<<< refresh editor
app.setStatus(_("Generated: Diff")) #<<< feed back result
#app.gcode.blocks.append(block)
##############################################
def pol2car(self, r, phi, a=[0,0]):
return [round(a[0]+r*cos(phi),4),round(a[1]+r*sin(phi),4)]
#def findSegment(self, path,A,B): #FIXME: not used for now...
# for seg in path:
# if seg.A == A and seg.B == B:
# return seg
# elif seg.A == B and seg.B == A:
# seg.invert()
# return seg
# else: return Segment(1, A, B)
def findSubpath(self, path,A,B ,inside):
path = deepcopy(path)
newpath = self._findSubpath(path,A,B,inside)
if newpath is None:
path.invert()
newpath = self._findSubpath(path,A,B,inside)
return newpath
def _findSubpath(self, path,A,B, inside):
print("finding", A, B)
sub = None
for i in xrange(0,len(path)*2): #iterate twice with wrap around
j = i%len(path)
seg = path[j]
if inside.isInside(seg.midPoint()):
if eq(seg.A,A): sub = Path("subp")
print("seg", sub is None, seg)
if sub is not None: sub.append(seg)
if eq(seg.B,B): break
print("found", sub)
return sub
def pathBoolIntersection(self, basepath, islandpath):
basepath.intersectPath(islandpath)
islandpath.intersectPath(basepath)
#basepath = deepcopy(basepath)
#islandpath = deepcopy(islandpath)
#find first intersecting segment
first = None
for i,segment in enumerate(basepath):
if islandpath.isInside(segment.midPoint()): first = i
if first is None:
print("not intersecting paths")
return None
#generate intersected path
newisland = Path("new")
A = None
for i in xrange(first,2*len(basepath)+first):
j = i%len(basepath)
segment = basepath[j]
if segment.length()<EPS: continue #ignore zero length segments
if not islandpath.isInside(segment.midPoint()):
if A is None:
A = segment.A
newisland.append(segment)
else:
if A is not None:
newisland.extend(self.findSubpath(islandpath,A,segment.A,basepath))
print("new",newisland)
A = None
#newisland.append(segment)
#for i,seg in enumerate(newisland):
# newisland[i].correct();
print("new2",newisland)
return newisland
| gpl-2.0 |
bitspill/electrum-doged | lib/account.py | 10 | 13664 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2013 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import bitcoin
from bitcoin import *
from i18n import _
from transaction import Transaction, is_extended_pubkey
from util import print_msg, InvalidPassword
class Account(object):
def __init__(self, v):
self.receiving_pubkeys = v.get('receiving', [])
self.change_pubkeys = v.get('change', [])
# addresses will not be stored on disk
self.receiving_addresses = map(self.pubkeys_to_address, self.receiving_pubkeys)
self.change_addresses = map(self.pubkeys_to_address, self.change_pubkeys)
def dump(self):
return {'receiving':self.receiving_pubkeys, 'change':self.change_pubkeys}
def get_pubkey(self, for_change, n):
pubkeys_list = self.change_pubkeys if for_change else self.receiving_pubkeys
return pubkeys_list[n]
def get_address(self, for_change, n):
addr_list = self.change_addresses if for_change else self.receiving_addresses
return addr_list[n]
def get_pubkeys(self, for_change, n):
return [ self.get_pubkey(for_change, n)]
def get_addresses(self, for_change):
addr_list = self.change_addresses if for_change else self.receiving_addresses
return addr_list[:]
def derive_pubkeys(self, for_change, n):
pass
def create_new_address(self, for_change):
pubkeys_list = self.change_pubkeys if for_change else self.receiving_pubkeys
addr_list = self.change_addresses if for_change else self.receiving_addresses
n = len(pubkeys_list)
pubkeys = self.derive_pubkeys(for_change, n)
address = self.pubkeys_to_address(pubkeys)
pubkeys_list.append(pubkeys)
addr_list.append(address)
print_msg(address)
return address
def pubkeys_to_address(self, pubkey):
return public_key_to_bc_address(pubkey.decode('hex'))
def has_change(self):
return True
def get_name(self, k):
return _('Main account')
def redeem_script(self, for_change, n):
return None
def synchronize_sequence(self, wallet, for_change):
limit = wallet.gap_limit_for_change if for_change else wallet.gap_limit
while True:
addresses = self.get_addresses(for_change)
if len(addresses) < limit:
address = self.create_new_address(for_change)
wallet.add_address(address)
continue
if map( lambda a: wallet.address_is_old(a), addresses[-limit:] ) == limit*[False]:
break
else:
address = self.create_new_address(for_change)
wallet.add_address(address)
def synchronize(self, wallet):
self.synchronize_sequence(wallet, False)
self.synchronize_sequence(wallet, True)
class PendingAccount(Account):
def __init__(self, v):
self.pending_address = v['address']
self.change_pubkeys = []
self.receiving_pubkeys = [ v['pubkey'] ]
def synchronize(self, wallet):
return
def get_addresses(self, is_change):
return [] if is_change else [self.pending_address]
def has_change(self):
return False
def dump(self):
return {'pending':True, 'address':self.pending_address, 'pubkey':self.receiving_pubkeys[0] }
def get_name(self, k):
return _('Pending account')
def get_master_pubkeys(self):
return []
def get_type(self):
return _('pending')
def get_xpubkeys(self, for_change, n):
return self.get_pubkeys(for_change, n)
class ImportedAccount(Account):
def __init__(self, d):
self.keypairs = d['imported']
def synchronize(self, wallet):
return
def get_addresses(self, for_change):
return [] if for_change else sorted(self.keypairs.keys())
def get_pubkey(self, *sequence):
for_change, i = sequence
assert for_change == 0
addr = self.get_addresses(0)[i]
return self.keypairs[addr][0]
def get_xpubkeys(self, for_change, n):
return self.get_pubkeys(for_change, n)
def get_private_key(self, sequence, wallet, password):
from wallet import pw_decode
for_change, i = sequence
assert for_change == 0
address = self.get_addresses(0)[i]
pk = pw_decode(self.keypairs[address][1], password)
# this checks the password
if address != address_from_private_key(pk):
raise InvalidPassword()
return [pk]
def has_change(self):
return False
def add(self, address, pubkey, privkey, password):
from wallet import pw_encode
self.keypairs[address] = (pubkey, pw_encode(privkey, password ))
def remove(self, address):
self.keypairs.pop(address)
def dump(self):
return {'imported':self.keypairs}
def get_name(self, k):
return _('Imported keys')
def update_password(self, old_password, new_password):
for k, v in self.keypairs.items():
pubkey, a = v
b = pw_decode(a, old_password)
c = pw_encode(b, new_password)
self.keypairs[k] = (pubkey, c)
class OldAccount(Account):
""" Privatekey(type,n) = Master_private_key + H(n|S|type) """
def __init__(self, v):
Account.__init__(self, v)
self.mpk = v['mpk'].decode('hex')
@classmethod
def mpk_from_seed(klass, seed):
curve = SECP256k1
secexp = klass.stretch_key(seed)
master_private_key = ecdsa.SigningKey.from_secret_exponent( secexp, curve = SECP256k1 )
master_public_key = master_private_key.get_verifying_key().to_string().encode('hex')
return master_public_key
@classmethod
def stretch_key(self,seed):
oldseed = seed
for i in range(100000):
seed = hashlib.sha256(seed + oldseed).digest()
return string_to_number( seed )
@classmethod
def get_sequence(self, mpk, for_change, n):
return string_to_number( Hash( "%d:%d:"%(n,for_change) + mpk ) )
def get_address(self, for_change, n):
pubkey = self.get_pubkey(for_change, n)
address = public_key_to_bc_address( pubkey.decode('hex') )
return address
@classmethod
def get_pubkey_from_mpk(self, mpk, for_change, n):
curve = SECP256k1
z = self.get_sequence(mpk, for_change, n)
master_public_key = ecdsa.VerifyingKey.from_string( mpk, curve = SECP256k1 )
pubkey_point = master_public_key.pubkey.point + z*curve.generator
public_key2 = ecdsa.VerifyingKey.from_public_point( pubkey_point, curve = SECP256k1 )
return '04' + public_key2.to_string().encode('hex')
def derive_pubkeys(self, for_change, n):
return self.get_pubkey_from_mpk(self.mpk, for_change, n)
def get_private_key_from_stretched_exponent(self, for_change, n, secexp):
order = generator_secp256k1.order()
secexp = ( secexp + self.get_sequence(self.mpk, for_change, n) ) % order
pk = number_to_string( secexp, generator_secp256k1.order() )
compressed = False
return SecretToASecret( pk, compressed )
def get_private_key(self, sequence, wallet, password):
seed = wallet.get_seed(password)
self.check_seed(seed)
for_change, n = sequence
secexp = self.stretch_key(seed)
pk = self.get_private_key_from_stretched_exponent(for_change, n, secexp)
return [pk]
def check_seed(self, seed):
curve = SECP256k1
secexp = self.stretch_key(seed)
master_private_key = ecdsa.SigningKey.from_secret_exponent( secexp, curve = SECP256k1 )
master_public_key = master_private_key.get_verifying_key().to_string()
if master_public_key != self.mpk:
print_error('invalid password (mpk)', self.mpk.encode('hex'), master_public_key.encode('hex'))
raise InvalidPassword()
return True
def get_master_pubkeys(self):
return [self.mpk.encode('hex')]
def get_type(self):
return _('Old Electrum format')
def get_xpubkeys(self, for_change, n):
s = ''.join(map(lambda x: bitcoin.int_to_hex(x,2), (for_change, n)))
mpk = self.mpk.encode('hex')
x_pubkey = 'fe' + mpk + s
return [ x_pubkey ]
@classmethod
def parse_xpubkey(self, x_pubkey):
assert is_extended_pubkey(x_pubkey)
pk = x_pubkey[2:]
mpk = pk[0:128]
dd = pk[128:]
s = []
while dd:
n = int(bitcoin.rev_hex(dd[0:4]), 16)
dd = dd[4:]
s.append(n)
assert len(s) == 2
return mpk, s
class BIP32_Account(Account):
def __init__(self, v):
Account.__init__(self, v)
self.xpub = v['xpub']
self.xpub_receive = None
self.xpub_change = None
def dump(self):
d = Account.dump(self)
d['xpub'] = self.xpub
return d
def first_address(self):
pubkeys = self.derive_pubkeys(0, 0)
addr = self.pubkeys_to_address(pubkeys)
return addr, pubkeys
def get_master_pubkeys(self):
return [self.xpub]
@classmethod
def derive_pubkey_from_xpub(self, xpub, for_change, n):
_, _, _, c, cK = deserialize_xkey(xpub)
for i in [for_change, n]:
cK, c = CKD_pub(cK, c, i)
return cK.encode('hex')
def get_pubkey_from_xpub(self, xpub, for_change, n):
xpubs = self.get_master_pubkeys()
i = xpubs.index(xpub)
pubkeys = self.get_pubkeys(for_change, n)
return pubkeys[i]
def derive_pubkeys(self, for_change, n):
xpub = self.xpub_change if for_change else self.xpub_receive
if xpub is None:
xpub = bip32_public_derivation(self.xpub, "", "/%d"%for_change)
if for_change:
self.xpub_change = xpub
else:
self.xpub_receive = xpub
_, _, _, c, cK = deserialize_xkey(xpub)
cK, c = CKD_pub(cK, c, n)
result = cK.encode('hex')
return result
def get_private_key(self, sequence, wallet, password):
out = []
xpubs = self.get_master_pubkeys()
roots = [k for k, v in wallet.master_public_keys.iteritems() if v in xpubs]
for root in roots:
xpriv = wallet.get_master_private_key(root, password)
if not xpriv:
continue
_, _, _, c, k = deserialize_xkey(xpriv)
pk = bip32_private_key( sequence, k, c )
out.append(pk)
return out
def get_type(self):
return _('Standard 1 of 1')
def get_xpubkeys(self, for_change, n):
# unsorted
s = ''.join(map(lambda x: bitcoin.int_to_hex(x,2), (for_change,n)))
xpubs = self.get_master_pubkeys()
return map(lambda xpub: 'ff' + bitcoin.DecodeBase58Check(xpub).encode('hex') + s, xpubs)
@classmethod
def parse_xpubkey(self, pubkey):
assert is_extended_pubkey(pubkey)
pk = pubkey.decode('hex')
pk = pk[1:]
xkey = bitcoin.EncodeBase58Check(pk[0:78])
dd = pk[78:]
s = []
while dd:
n = int( bitcoin.rev_hex(dd[0:2].encode('hex')), 16)
dd = dd[2:]
s.append(n)
assert len(s) == 2
return xkey, s
def get_name(self, k):
return "Main account" if k == '0' else "Account " + k
class BIP32_Account_2of2(BIP32_Account):
def __init__(self, v):
BIP32_Account.__init__(self, v)
self.xpub2 = v['xpub2']
def dump(self):
d = BIP32_Account.dump(self)
d['xpub2'] = self.xpub2
return d
def get_pubkeys(self, for_change, n):
return self.get_pubkey(for_change, n)
def derive_pubkeys(self, for_change, n):
return map(lambda x: self.derive_pubkey_from_xpub(x, for_change, n), self.get_master_pubkeys())
def redeem_script(self, for_change, n):
pubkeys = self.get_pubkeys(for_change, n)
return Transaction.multisig_script(sorted(pubkeys), 2)
def pubkeys_to_address(self, pubkeys):
redeem_script = Transaction.multisig_script(sorted(pubkeys), 2)
address = hash_160_to_bc_address(hash_160(redeem_script.decode('hex')), 5)
return address
def get_address(self, for_change, n):
return self.pubkeys_to_address(self.get_pubkeys(for_change, n))
def get_master_pubkeys(self):
return [self.xpub, self.xpub2]
def get_type(self):
return _('Multisig 2 of 2')
class BIP32_Account_2of3(BIP32_Account_2of2):
def __init__(self, v):
BIP32_Account_2of2.__init__(self, v)
self.xpub3 = v['xpub3']
def dump(self):
d = BIP32_Account_2of2.dump(self)
d['xpub3'] = self.xpub3
return d
def get_master_pubkeys(self):
return [self.xpub, self.xpub2, self.xpub3]
def get_type(self):
return _('Multisig 2 of 3')
| gpl-3.0 |
winksaville/gen_srcs | gen_srcs.py | 1 | 21491 | #!/usr/bin/env python3
# Generate C source files
import os.path, sys, argparse, shutil
version = '0.0.1'
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--version',
action='store_true',
dest='print_version',
default=False,
help='Print version.')
parser.add_argument('builder', help='<builder: cmake meson>', nargs=1)
parser.add_argument('hierarchy_path', help='<file path>', nargs=1)
parser.add_argument('library_count', help='<Library count>', nargs=1)
parser.add_argument('function_count_per_library',
help='<function count per library>',
nargs=1)
class Header:
'''
Generate C header files
'''
# Public fields
file_path = ''
comments = None
includes = None
sys_includes = None
func_declarations = None
# Initializer
def __init__(self,
file_path='',
comments=None,
includes=None,
sys_includes=None,
type_declarations=None,
func_declarations=None):
self.file_path = file_path
self.comments = comments if comments else []
self.includes = includes if includes else []
self.sys_includes = sys_includes if sys_includes else []
self.type_declarations = type_declarations if type_declarations else []
self.func_declarations = func_declarations if func_declarations else []
def get_name(self):
return os.path.basename(self.file_path)
def append_func_declaration(self, func_sig):
self.func_declarations.append(func_sig)
def append_type_declaration(self, type_declaration):
self.type_declarations.append(func)
def write(self, f):
path = os.path.abspath(self.file_path)
parts = []
while True:
head, tail = os.path.split(path)
if tail == '':
break
parts.append(tail)
path = head
conditional_name = '__'
for part in reversed(parts):
conditional_name += part.upper()
conditional_name += '_'
conditional_name += '_'
conditional_name = conditional_name.replace('.', '_')
for line in self.comments:
print('// {0}'.format(line), file=f)
print('', file=f)
print('#ifndef {0}'.format(conditional_name), file=f)
print('#define {0}'.format(conditional_name), file=f)
print('', file=f)
if self.includes:
for line in self.includes:
print('#include "{0}"'.format(line), file=f)
print('', file=f)
if self.sys_includes:
for line in self.sys_includes:
print('#include <{0}>'.format(line), file=f)
print('', file=f)
if self.type_declarations:
for line in self.type_declarations:
print('{0};'.format(line), file=f)
print('', file=f)
if self.func_declarations:
for line in self.func_declarations:
print('{0};'.format(line), file=f)
print('', file=f)
print('#endif // {0}'.format(conditional_name), file=f)
class Function:
'''
Generate a C function
'''
# Public fields
name = None
rettype = None
params = None
comments = None
local_declarations = None
body = None
# Initializer
def __init__(self,
name='',
rettype='void',
params=None,
comments=None,
local_declarations=None,
body=None):
self.name = name
self.rettype = rettype
self.params = params if params else []
self.comments = comments if comments else []
self.local_declarations = local_declarations if local_declarations else []
self.body = body if body else []
def getName(self):
return self.name
def getParams(self):
return self.params
def func_sig(self):
s = '{0} {1}('.format(self.rettype, self.name)
if self.params:
first_param = True
for param in self.params:
if not first_param:
s += ', '
first_param = False
s += '{0}'.format(param)
else:
s += 'void'
s += ')'
return s
def write(self, f):
for line in self.comments:
print('// {0}'.format(line), file=f)
signature = '{0} {{'.format(self.func_sig())
print(signature, file=f)
if self.local_declarations:
for line in self.local_declarations:
print(' {0};'.format(line), file=f)
print('')
if self.body:
for line in self.body:
print(' {0};'.format(line), file=f)
print('}', file=f)
class LibrarySrc:
'''
Generate C library source file
'''
# Public fields
file_path = ''
comments = None
includes = None
sys_includes = None
type_declarations = None
header = None
func_range = range(0, 1)
functions = None
# Initializer
def __init__(self,
file_path='',
func_range=range(0, 1),
comments=None,
includes=None,
sys_includes=None,
type_declarations=None,
header=None):
self.file_path = file_path
self.func_range = func_range
self.comments = comments if comments else []
self.includes = includes if includes else []
self.sys_includes = sys_includes if sys_includes else []
self.type_declarations = type_declarations if type_declarations else []
self.header = header
self.functions = []
def __append_func(self, func):
self.header.append_func_declaration(func.func_sig())
self.functions.append(func)
def getFunctions(self):
return self.functions
def write(self, f):
for i in self.func_range:
func_name = 'func{0}'.format(i)
func = Function(
comments=['{0}'.format(func_name)],
rettype='void',
name=func_name,
params=[],
body=['printf("{0}\\n")'.format(func_name)])
self.__append_func(func)
for line in self.comments:
print('// {0}'.format(line), file=f)
print('', file=f)
if self.includes:
for line in self.includes:
print('#include "{0}"'.format(line), file=f)
print('', file=f)
if self.sys_includes:
for line in self.sys_includes:
print('#include <{0}>'.format(line), file=f)
print('', file=f)
if self.type_declarations:
for line in self.type_declarations:
print('{0};'.format(line), file=f)
print('', file=f)
print('', file=f)
if self.functions:
for func in self.functions:
func.write(f)
print('', file=f)
print('', file=f)
class Library:
'''
Generate C library
'''
# Public fields
lib_path = None
func_range = None
__lib_header = None
__lib_source = None
# Initializer
def __init__(self, path='', func_range=range(0, 1)):
self.lib_path = path
self.func_range = func_range
self.__lib_header = None
self.__lib_source = None
def getLibPath(self):
return self.lib_path
def getLibName(self):
return os.path.basename(self.lib_path)
def getLibHeaderName(self):
return os.path.basename(self.__lib_header.get_name())
def getFunctions(self):
return self.__lib_source.getFunctions()
def create(self):
'''
Create a library with the name of defined by the basename(lib_path)
it will include a src/ and an include/ directory.
'''
lib_name = self.getLibName()
os.makedirs(self.lib_path, exist_ok=True)
header_path = self.lib_path + '/include/' + lib_name + '.h'
os.makedirs(os.path.dirname(header_path), exist_ok=True)
h = open(header_path, 'w')
src_path = self.lib_path + '/src/' + lib_name + '.c'
os.makedirs(os.path.dirname(src_path), exist_ok=True)
f = open(src_path, 'w')
self.__lib_header = Header(
file_path=header_path,
comments=['header....'],
sys_includes=['stdio.h'],
type_declarations=['typedef int {0}_status'.format(lib_name)])
self.__lib_source = LibrarySrc(file_path=src_path,
func_range=self.func_range,
comments=['Test library 1'],
includes=[self.__lib_header.get_name()],
header=self.__lib_header)
self.__lib_source.write(f)
self.__lib_header.write(h)
f.close()
h.close()
class Application:
'''
Generate C test application
'''
__app_path = None
__libraries = None
# Initializer
def __init__(self, path='', libraries=None):
self.__app_path = path
self.__libraries = libraries if libraries else []
def getAppName(self):
return os.path.basename(self.__app_path)
def getLibraries(self):
return self.__libraries
def getAppPath(self):
return self.__app_path
def create(self):
'''
Create a test application with a src/main.c file
and is dependent upon all of the libraries and invokes
every library function.
'''
# Create a list of includes and body statements
includes = []
includes.append('<stdio.h>')
body = []
for lib in self.__libraries:
includes.append('"{0}"'.format(lib.getLibHeaderName()))
for func in lib.getFunctions():
if len(func.params) != 0:
raise Exception(
'Only handles functions with no parameters: {0}:{1}'.format(
lib.getLibName(), func.func_sig()))
body.append('{0}();'.format(func.getName()))
# Create the test app
src_path = self.__app_path + '/src/main.c'
os.makedirs(os.path.dirname(src_path), exist_ok=True)
f = open(src_path, 'w')
for inc in includes:
print('#include {0}'.format(inc), file=f)
print('int main(void) {', file=f)
for statement in body:
print(' {0}'.format(statement), file=f)
print(' return 0; // ok', file=f)
print('}', file=f)
f.close()
class MesonBuilder:
__libraries_file = None
__apps_file = None
def __init__(self):
pass
def begRoot(self, root_path, applications_path, libraries_path):
apps_rel_path = os.path.relpath(applications_path, root_path)
libs_rel_path = os.path.relpath(libraries_path, root_path)
r = open(root_path + '/meson.build', 'w')
print("project('hierarchy', 'c')\n"
"add_global_arguments('-std=c99', language : 'c')\n"
"\n"
"subdir(\'{0}\')\n"
"subdir(\'{1}\')\n".format(libs_rel_path, apps_rel_path),
file=r)
r.close()
def endRoot(self):
pass
def begAppBuilder(self, app_path):
apps_path = app_path + '/meson.build'
self.__apps_file = open(apps_path, 'w')
def endAppBuilder(self):
self.__apps_file.close()
def addAppToAppBuilder(self, app):
builder_path = app.getAppPath() + '/meson.build'
os.makedirs(os.path.dirname(builder_path), exist_ok=True)
b = open(builder_path, 'w')
print("executable('{0}',\n"
" 'src/main.c',\n"
" install : true,".format(app.getAppName()),
file=b)
print(" dependencies : [", file=b)
for lib in app.getLibraries():
print(' lib{0}_dep,'.format(lib.getLibName()), file=b)
print(" ])", file=b)
# Add a line for this library in the parent directory
print('subdir(\'{0}\')'.format(app.getAppName()), file=self.__apps_file)
b.close()
def begLibBuilder(self, libraries_path):
libraries_path = libraries_path + '/meson.build'
self.__libraries_file = open(libraries_path, 'w')
def endLibBuilder(self):
self.__libraries_file.close()
def addLibToLibBuilder(self, library):
builder_path = library.getLibPath() + '/meson.build'
os.makedirs(os.path.dirname(builder_path), exist_ok=True)
b = open(builder_path, 'w')
print(
"incs = include_directories('include')\n"
"lib{0} = static_library('{0}', 'src/{0}.c', include_directories: incs)\n"
"lib{0}_dep = declare_dependency(include_directories : incs, link_with : lib{0})\n".format(
library.getLibName()),
file=b)
# Add a line for this library in the parent directory
print('subdir(\'{0}\')'.format(library.getLibName()),
file=self.__libraries_file)
b.close()
class CMakeBuilder:
__libraries_file = None
__apps_file = None
def __init__(self):
pass
def begRoot(self, root_path, applications_path, libraries_path):
apps_rel_path = os.path.relpath(applications_path, root_path)
libs_rel_path = os.path.relpath(libraries_path, root_path)
r = open(root_path + '/CMakeLists.txt', 'w')
print('cmake_minimum_required (VERSION 3.2)\n'
'project("hierarchy")\n'
'enable_language(C)\n'
'\n'
'find_program(CCACHE_FOUND ccache)\n'
'if(CCACHE_FOUND)\n'
' set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache)\n'
' set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache)\n'
' endif(CCACHE_FOUND)\n'
'\n'
'add_subdirectory("{0}")\n'
'add_subdirectory("{1}")\n'.format(libs_rel_path, apps_rel_path),
file=r)
r.close()
def endRoot(self):
pass
def begAppBuilder(self, app_path):
apps_path = app_path + '/CMakeLists.txt'
self.__apps_file = open(apps_path, 'w')
def endAppBuilder(self):
self.__apps_file.close()
def addAppToAppBuilder(self, app):
builder_path = app.getAppPath() + '/CMakeLists.txt'
os.makedirs(os.path.dirname(builder_path), exist_ok=True)
b = open(builder_path, 'w')
print('add_executable({0} src/main.c)\n'
'target_link_libraries({0}'.format(app.getAppName()),
file=b)
for lib in app.getLibraries():
print(' {0}'.format(lib.getLibName()), file=b)
print(")", file=b)
# Add a line for this library in the parent directory
print('add_subdirectory("{0}")'.format(app.getAppName()),
file=self.__apps_file)
b.close()
def begLibBuilder(self, libraries_path):
libraries_path = libraries_path + '/CMakeLists.txt'
self.__libraries_file = open(libraries_path, 'w')
def endLibBuilder(self):
self.__libraries_file.close()
def addLibToLibBuilder(self, library):
builder_path = library.getLibPath() + '/CMakeLists.txt'
os.makedirs(os.path.dirname(builder_path), exist_ok=True)
b = open(builder_path, 'w')
print('add_library({0} STATIC\n'
' src/{0}.c\n'
')\n'
'target_include_directories({0} PUBLIC "include")\n'.format(
library.getLibName()),
file=b)
# Add a line for this library in the parent directory
print('add_subdirectory("{0}")'.format(library.getLibName()),
file=self.__libraries_file)
b.close()
class CraftrBuilder(object):
def begRoot(self, root_path, applications_path, libraries_path):
from os.path import join, relpath, isdir
apps_path = relpath(applications_path, root_path)
libs_path = relpath(libraries_path, root_path)
shutil.copy(template('craftr/hierarchy.craftr'), join(root_path, 'Craftfile'))
tmpdir = join(root_path, '.craftr')
if not isdir(tmpdir):
os.mkdir(tmpdir)
shutil.copy(template('craftr/libs.template.craftr'), tmpdir)
shutil.copy(template('craftr/apps.template.craftr'), tmpdir)
shutil.copy(template('craftr/utils.ccache.craftr'), tmpdir)
def endRoot(self):
pass
def begAppBuilder(self, app_path):
self._apps_file = open(os.path.join(app_path, 'Craftfile'), 'w')
self._apps_file.write('# craftr_module(apps)\n')
def endAppBuilder(self):
self._apps_file.close()
del self._apps_file
def addAppToAppBuilder(self, app):
self._apps_file.write("load_module('apps.{0}')\n".format(app.getAppName()))
with open(os.path.join(app.getAppPath(), 'Craftfile'), 'w') as fp:
requires = []
for lib in app.getLibraries():
requires.append('libs.' + lib.getLibName())
fp.write('# craftr_module(apps.{0})\n'.format(app.getAppName()))
fp.write('requires = {0!r}\n'.format(requires))
fp.write("extends('apps.template')\n")
def begLibBuilder(self, libraries_path):
self._libs_file = open(os.path.join(libraries_path, 'Craftfile'), 'w')
self._libs_file.write('# craftr_module(libs)\n')
def endLibBuilder(self):
self._libs_file.close()
del self._libs_file
def addLibToLibBuilder(self, lib):
self._libs_file.write("load_module('libs.{0}')\n".format(lib.getLibName()))
with open(os.path.join(lib.getLibPath(), 'Craftfile'), 'w') as fp:
fp.write('# craftr_module(libs.{0})\n'.format(lib.getLibName()))
fp.write("extends('libs.template')\n")
class Hierarchy:
'''
Generate a Hierarchy of C code
'''
hierarchy_path = None
lib_count = None
func_count_per_lib = None
__builder = None
def __init__(self, hierarchy_path, lib_count, func_count_per_lib, builder):
self.hierarchy_path = hierarchy_path
self.lib_count = int(lib_count)
self.func_count_per_lib = int(func_count_per_lib)
self.__builder = builder
def create(self):
'''
Create files
'''
# Create root
os.makedirs(self.hierarchy_path, exist_ok=True)
# Create the apps and libs directories
apps_path = self.hierarchy_path + '/apps'
libraries_path = self.hierarchy_path + '/libs'
# Create the libraries
libraries = []
for i in range(0, self.lib_count):
base = i * self.func_count_per_lib
lib_path = libraries_path + '/L{:03d}'.format(base)
lib = Library(
path=lib_path,
func_range=range(base + 1, base + self.func_count_per_lib + 1))
lib.create()
libraries.append(lib)
# Create a test app that invokes all of the library functions
apps = []
app_path = apps_path + '/testapp'
app = Application(app_path, libraries=libraries)
app.create()
apps.append(app)
# Create the Meson Builder in all of the directories
#mesonBuilder = MesonBuilder()
self.__builder.begRoot(self.hierarchy_path, apps_path, libraries_path)
self.__builder.begAppBuilder(apps_path)
for app in apps:
self.__builder.addAppToAppBuilder(app)
self.__builder.endAppBuilder
self.__builder.begLibBuilder(libraries_path)
for lib in libraries:
self.__builder.addLibToLibBuilder(lib)
self.__builder.endLibBuilder()
self.__builder.endRoot()
def template(path):
''' Returns the path for the template at the specified *path*.
The *path* must be relative to the `templates/` directory in this
project. '''
if os.path.isabs(path):
raise ValueError('expected relative path')
return os.path.join(os.path.dirname(__file__), 'templates', path)
def main(args):
'''
Main program
'''
if sys.version_info < (3, 4):
print('Need python 3.4+ current version is %s' % sys.version)
sys.exit(1)
options = parser.parse_args(args[1:])
hierarchy_path = options.hierarchy_path
library_count = options.library_count
function_count_per_library = options.function_count_per_library
if options.print_version:
print('Version %s' % version)
return 0
builders = {'cmake': CMakeBuilder(), 'meson': MesonBuilder(),
'craftr': CraftrBuilder()}
try:
builder = builders[options.builder[0]]
except:
print("option builder is '{0}' must be 'cmake', "
"'craftr' or 'meson'".format(options.builder))
return 1
hierarchy = Hierarchy(hierarchy_path[0], library_count[0],
function_count_per_library[0], builder)
hierarchy.create()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[:]))
| bsd-2-clause |
smurn/cprojecttemplate | gmock-1.7.0/scripts/generator/cpp/keywords.py | 1157 | 2004 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""C++ keywords and helper utilities for determining keywords."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
if not hasattr(builtins, 'set'):
# Nominal support for Python 2.3.
from sets import Set as set
TYPES = set('bool char int long short double float void wchar_t unsigned signed'.split())
TYPE_MODIFIERS = set('auto register const inline extern static virtual volatile mutable'.split())
ACCESS = set('public protected private friend'.split())
CASTS = set('static_cast const_cast dynamic_cast reinterpret_cast'.split())
OTHERS = set('true false asm class namespace using explicit this operator sizeof'.split())
OTHER_TYPES = set('new delete typedef struct union enum typeid typename template'.split())
CONTROL = set('case switch default if else return goto'.split())
EXCEPTION = set('try catch throw'.split())
LOOP = set('while do for break continue'.split())
ALL = TYPES | TYPE_MODIFIERS | ACCESS | CASTS | OTHERS | OTHER_TYPES | CONTROL | EXCEPTION | LOOP
def IsKeyword(token):
return token in ALL
def IsBuiltinType(token):
if token in ('virtual', 'inline'):
# These only apply to methods, they can't be types by themselves.
return False
return token in TYPES or token in TYPE_MODIFIERS
| bsd-3-clause |
apark263/tensorflow | tensorflow/python/tools/optimize_for_inference_test.py | 12 | 13544 | # pylint: disable=g-bad-file-header
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.graph_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import math_ops # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
from tensorflow.python.tools import optimize_for_inference_lib
class OptimizeForInferenceTest(test.TestCase):
def create_node_def(self, op, name, inputs):
new_node = node_def_pb2.NodeDef()
new_node.op = op
new_node.name = name
for input_name in inputs:
new_node.input.extend([input_name])
return new_node
def create_constant_node_def(self, name, value, dtype, shape=None):
node = self.create_node_def("Const", name, [])
self.set_attr_dtype(node, "dtype", dtype)
self.set_attr_tensor(node, "value", value, dtype, shape)
return node
def set_attr_dtype(self, node, key, value):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(type=value.as_datatype_enum))
def set_attr_tensor(self, node, key, value, dtype, shape=None):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
value, dtype=dtype, shape=shape)))
def testOptimizeForInference(self):
self.maxDiff = 1000
unused_constant_name = "unused_constant"
unconnected_add_name = "unconnected_add"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
unused_output_add_name = "unused_output_add"
graph_def = graph_pb2.GraphDef()
unused_constant = self.create_constant_node_def(
unused_constant_name, value=0, dtype=dtypes.float32, shape=[])
graph_def.node.extend([unused_constant])
unconnected_add_node = self.create_node_def(
"Add", unconnected_add_name,
[unused_constant_name, unused_constant_name])
self.set_attr_dtype(unconnected_add_node, "T", dtypes.float32)
graph_def.node.extend([unconnected_add_node])
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = self.create_node_def("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = self.create_node_def(
"Identity", a_identity_name, [a_constant_name, "^" + a_check_name])
graph_def.node.extend([a_identity_node])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = self.create_node_def("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = self.create_node_def(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = self.create_node_def("Add", add_name,
[a_identity_name, b_identity_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
unused_output_add_node = self.create_node_def("Add", unused_output_add_name,
[add_name, b_constant_name])
self.set_attr_dtype(unused_output_add_node, "T", dtypes.float32)
graph_def.node.extend([unused_output_add_node])
expected_output = graph_pb2.GraphDef()
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = self.create_node_def("Add", add_name,
[a_constant_name, b_constant_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
output = optimize_for_inference_lib.optimize_for_inference(
graph_def, [], [add_name], dtypes.float32.as_datatype_enum)
self.assertProtoEquals(expected_output, output)
@test_util.run_deprecated_v1
def testFoldBatchNorms(self):
with self.cached_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 1, 6, 2], dtype=dtypes.float32)
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
conv_op = nn_ops.conv2d(
input_op, weights_op, [1, 1, 1, 1], padding="SAME", name="conv_op")
mean_op = constant_op.constant(
np.array([10, 20]), shape=[2], dtype=dtypes.float32)
variance_op = constant_op.constant(
np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32)
beta_op = constant_op.constant(
np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32)
gamma_op = constant_op.constant(
np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32)
test_util.set_producer_version(ops.get_default_graph(), 8)
gen_nn_ops._batch_norm_with_global_normalization(
conv_op,
mean_op,
variance_op,
beta_op,
gamma_op,
0.00001,
False,
name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fold_batch_norms(
original_graph_def)
with self.cached_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("BatchNormWithGlobalNormalization", node.op)
@test_util.run_deprecated_v1
def testFoldFusedBatchNorms(self):
for data_format, use_gpu in [("NHWC", False), ("NCHW", True)]:
with self.cached_session(use_gpu=use_gpu) as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs),
shape=[1, 1, 6, 2] if data_format == "NHWC" else [1, 2, 1, 6],
dtype=dtypes.float32)
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
conv_op = nn_ops.conv2d(
input_op,
weights_op, [1, 1, 1, 1],
padding="SAME",
data_format=data_format,
name="conv_op")
mean_op = constant_op.constant(
np.array([10, 20]), shape=[2], dtype=dtypes.float32)
variance_op = constant_op.constant(
np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32)
beta_op = constant_op.constant(
np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32)
gamma_op = constant_op.constant(
np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32)
ops.get_default_graph().graph_def_versions.producer = 9
gen_nn_ops._fused_batch_norm(
conv_op,
gamma_op,
beta_op,
mean_op,
variance_op,
0.00001,
is_training=False,
data_format=data_format,
name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fold_batch_norms(
original_graph_def)
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(
original_result, optimized_result, rtol=1e-04, atol=1e-06)
for node in optimized_graph_def.node:
self.assertNotEqual("FusedBatchNorm", node.op)
@test_util.run_deprecated_v1
def testFuseResizePadAndConv(self):
with self.cached_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
resize_op = image_ops.resize_bilinear(
input_op, [12, 4], align_corners=False)
pad_op = array_ops.pad(resize_op, [[0, 0], [1, 1], [2, 2], [0, 0]],
mode="REFLECT")
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
nn_ops.conv2d(
pad_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
original_graph_def, ["output"])
with self.cached_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("Conv2D", node.op)
self.assertNotEqual("MirrorPad", node.op)
self.assertNotEqual("ResizeBilinear", node.op)
@test_util.run_deprecated_v1
def testFuseResizeAndConv(self):
with self.cached_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
resize_op = image_ops.resize_bilinear(
input_op, [12, 4], align_corners=False)
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
nn_ops.conv2d(
resize_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
original_graph_def, ["output"])
with self.cached_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("Conv2D", node.op)
self.assertNotEqual("MirrorPad", node.op)
@test_util.run_deprecated_v1
def testFusePadAndConv(self):
with self.cached_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
pad_op = array_ops.pad(input_op, [[0, 0], [1, 1], [2, 2], [0, 0]],
mode="REFLECT")
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
nn_ops.conv2d(
pad_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
original_graph_def, ["output"])
with self.cached_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("Conv2D", node.op)
self.assertNotEqual("ResizeBilinear", node.op)
if __name__ == "__main__":
test.main()
| apache-2.0 |
andrewcmyers/tensorflow | tensorflow/python/framework/op_def_library_test.py | 92 | 65609 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.op_def_library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.framework.op_def_library import OpDefLibrary
from tensorflow.python.platform import googletest
def _unknown_shape(op):
"""Shape function for use with ops whose output shapes are unknown."""
return [tensor_shape.unknown_shape() for _ in op.outputs]
# NOTE(mrry): Dummy shape registrations for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape("Attr")(_unknown_shape)
ops.RegisterShape("AttrBool")(_unknown_shape)
ops.RegisterShape("AttrBoolList")(_unknown_shape)
ops.RegisterShape("AttrDefault")(_unknown_shape)
ops.RegisterShape("AttrEmptyListDefault")(_unknown_shape)
ops.RegisterShape("AttrEnum")(_unknown_shape)
ops.RegisterShape("AttrEnumList")(_unknown_shape)
ops.RegisterShape("AttrFloat")(_unknown_shape)
ops.RegisterShape("AttrListDefault")(_unknown_shape)
ops.RegisterShape("AttrListMin")(_unknown_shape)
ops.RegisterShape("AttrMin")(_unknown_shape)
ops.RegisterShape("AttrShape")(_unknown_shape)
ops.RegisterShape("AttrShapeList")(_unknown_shape)
ops.RegisterShape("AttrPartialShape")(_unknown_shape)
ops.RegisterShape("AttrPartialShapeList")(_unknown_shape)
ops.RegisterShape("AttrTypeDefault")(_unknown_shape)
ops.RegisterShape("AttrListTypeDefault")(_unknown_shape)
ops.RegisterShape("Binary")(_unknown_shape)
ops.RegisterShape("ComplexStruct")(_unknown_shape)
ops.RegisterShape("InPolymorphicTwice")(_unknown_shape)
ops.RegisterShape("MixedStruct")(_unknown_shape)
ops.RegisterShape("NInPolymorphicTwice")(_unknown_shape)
ops.RegisterShape("NInTwice")(_unknown_shape)
ops.RegisterShape("NInTwoTypeVariables")(_unknown_shape)
ops.RegisterShape("NIntsIn")(_unknown_shape)
ops.RegisterShape("NIntsOut")(_unknown_shape)
ops.RegisterShape("NIntsOutDefault")(_unknown_shape)
ops.RegisterShape("NPolymorphicIn")(_unknown_shape)
ops.RegisterShape("NPolymorphicOut")(_unknown_shape)
ops.RegisterShape("NPolymorphicOutDefault")(_unknown_shape)
ops.RegisterShape("NPolymorphicRestrictIn")(_unknown_shape)
ops.RegisterShape("NPolymorphicRestrictOut")(_unknown_shape)
ops.RegisterShape("OutT")(_unknown_shape)
ops.RegisterShape("OutTypeList")(_unknown_shape)
ops.RegisterShape("OutTypeListRestrict")(_unknown_shape)
ops.RegisterShape("Polymorphic")(_unknown_shape)
ops.RegisterShape("PolymorphicDefaultOut")(_unknown_shape)
ops.RegisterShape("PolymorphicOut")(_unknown_shape)
ops.RegisterShape("RefIn")(_unknown_shape)
ops.RegisterShape("RefOut")(_unknown_shape)
ops.RegisterShape("ReservedAttr")(_unknown_shape)
ops.RegisterShape("ReservedInput")(_unknown_shape)
ops.RegisterShape("Restrict")(_unknown_shape)
ops.RegisterShape("Simple")(_unknown_shape)
ops.RegisterShape("SimpleStruct")(_unknown_shape)
ops.RegisterShape("TwoRefsIn")(_unknown_shape)
ops.RegisterShape("TypeList")(_unknown_shape)
ops.RegisterShape("TypeListRestrict")(_unknown_shape)
ops.RegisterShape("TypeListTwice")(_unknown_shape)
class OpDefLibraryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._lib = OpDefLibrary()
self._g = ops.Graph()
self._default_graph_controller = self._g.as_default()
self._default_graph_controller.__enter__()
self._add_op("name: 'Simple' input_arg { name: 'a' type: DT_INT32 } "
"output_arg { name: 'out' type: DT_FLOAT }")
self._add_op("name: 'OutT' output_arg { name: 'a' type_attr: 'T' } "
"attr { name: 'T' type: 'type' }")
def tearDown(self):
self._default_graph_controller.__exit__(None, None, None)
def _add_op(self, ascii):
op_def = op_def_pb2.OpDef()
text_format.Merge(ascii, op_def)
self._lib.add_op(op_def)
def Tensor(self, t, name="in"):
return self._lib.apply_op("OutT", T=t, name=name)
def testNoRegisteredOpFails(self):
with self.assertRaises(RuntimeError) as cm:
self._lib.apply_op("unknown")
self.assertEqual(str(cm.exception), "Unrecognized Op name unknown")
def testAddOpValidation(self):
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'MissingTypeAttr' "
"input_arg { name: 'a' type_attr: 'T' } ")
self.assertEqual(str(cm.exception),
"Inconsistent OpDef for 'MissingTypeAttr', "
"missing attr 'T'")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'BadTypeAttr' "
"output_arg { name: 'a' type_attr: 'T' } "
"attr { name: 'T' type: 'int' }")
self.assertEqual(
str(cm.exception),
"Attr 'T' of 'BadTypeAttr' used as a type_attr but has type int")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'MissingNumberAttr' "
"input_arg { name: 'a' type: DT_INT32 number_attr: 'N' } ")
self.assertEqual(str(cm.exception),
"Inconsistent OpDef for 'MissingNumberAttr', "
"missing attr 'N'")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'BadNumberAttr' "
"output_arg { name: 'a' type: DT_INT32 number_attr: 'N' } "
"attr { name: 'N' type: 'type' }")
self.assertEqual(
str(cm.exception),
"Attr 'N' of 'BadNumberAttr' used as a number_attr but has type type")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'TwoTypesA' "
"input_arg { name: 'a' type: DT_INT32 type_attr: 'T' } "
"attr { name: 'T' type: 'type' }")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'TwoTypesA' must have one type field not 2")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'TwoTypesB' "
"input_arg { name: 'a' type: DT_INT32 type_list_attr: 'T' } "
"attr { name: 'T' type: 'list(type)' }")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'TwoTypesB' must have one type field not 2")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'ThreeTypes' "
"input_arg { name: 'a' type: DT_INT32 type_attr: 'T' "
"type_list_attr: 'U' } "
"attr { name: 'T' type: 'type' } "
"attr { name: 'U' type: 'list(type)' }")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'ThreeTypes' must have one type field not 3")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'NoTypes' output_arg { name: 'a' } ")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'NoTypes' must have one type field not 0")
def testSimple(self):
out = self._lib.apply_op("Simple", a=3)
self.assertEqual(dtypes.float32, out.dtype)
self.assertProtoEquals("""
name: 'Simple' op: 'Simple' input: 'Simple/a'
""", out.op.node_def)
out = self._lib.apply_op("Simple", a=4)
self.assertProtoEquals("""
name: 'Simple_1' op: 'Simple' input: 'Simple_1/a'
""", out.op.node_def)
out = self._lib.apply_op("Simple", a=5, name="named")
self.assertProtoEquals("""
name: 'named' op: 'Simple' input: 'named/a'
""", out.op.node_def)
out = self._lib.apply_op("Simple", a=[[1, 2, 3], [4, 5, 6]], name="two_d")
self.assertProtoEquals("""
name: 'two_d' op: 'Simple' input: 'two_d/a'
""", out.op.node_def)
def testSimpleFailures(self):
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a="Bad string")
self.assertEqual(str(cm.exception),
"Expected int32 passed to parameter 'a' of op 'Simple', "
"got 'Bad string' of type 'str' instead.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a=self.Tensor(dtypes.string))
self.assertEqual(str(cm.exception),
"Input 'a' of 'Simple' Op has type string "
"that does not match expected type of int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a=6, extra="bogus")
self.assertEqual(str(cm.exception),
"apply_op() got unexpected keyword arguments: extra")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a=6, extra1="bogus", extra2="also_bogus")
self.assertEqual(str(cm.exception),
"apply_op() got unexpected keyword arguments: extra1, "
"extra2")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple")
self.assertEqual(str(cm.exception), "No argument for input a")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", wrong=7)
self.assertEqual(str(cm.exception), "No argument for input a")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a={"label": 1})
self.assertEqual(str(cm.exception),
"Expected int32 passed to parameter 'a' of op 'Simple', "
"got {'label': 1} of type 'dict' instead.")
def testReservedInput(self):
self._add_op("name: 'ReservedInput' "
"input_arg { name: 'input' type: DT_INT32 } ")
op = self._lib.apply_op("ReservedInput", input_=7, name="x")
self.assertProtoEquals("""
name: 'x' op: 'ReservedInput' input: 'x/input'
""", op.node_def)
def testPolymorphic(self):
self._add_op("name: 'Polymorphic' "
"input_arg { name: 'a' type_attr: 'T' } "
"output_arg { name: 'out' type_attr: 'T' } "
"attr { name: 'T' type: 'type' }")
out = self._lib.apply_op("Polymorphic", a=7, name="p")
self.assertEqual(dtypes.int32, out.dtype)
self.assertProtoEquals("""
name: 'p' op: 'Polymorphic' input: 'p/a'
attr { key: 'T' value { type: DT_INT32 } }
""", out.op.node_def)
out = self._lib.apply_op("Polymorphic", a="s", name="q")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'q' op: 'Polymorphic' input: 'q/a'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
out = self._lib.apply_op("Polymorphic", a=["s", "t", "u"], name="r")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'r' op: 'Polymorphic' input: 'r/a'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Polymorphic", a="s", T=dtypes.string)
self.assertEqual(str(cm.exception),
"Should not specify value for inferred attr 'T'.")
def testPolymorphicOut(self):
self._add_op("name: 'PolymorphicOut' "
"output_arg { name: 'out' type_attr: 'T' } "
"attr { name: 'T' type: 'type' }")
out = self._lib.apply_op("PolymorphicOut", T=dtypes.int32, name="p")
self.assertEqual(dtypes.int32, out.dtype)
self.assertProtoEquals("""
name: 'p' op: 'PolymorphicOut'
attr { key: 'T' value { type: DT_INT32 } }
""", out.op.node_def)
out = self._lib.apply_op("PolymorphicOut", T=dtypes.bool, name="q")
self.assertEqual(dtypes.bool, out.dtype)
self.assertProtoEquals("""
name: 'q' op: 'PolymorphicOut'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("PolymorphicOut")
self.assertEqual(str(cm.exception),
"No argument for attr T")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("PolymorphicOut", T=None)
self.assertEqual(str(cm.exception),
"Expected DataType for argument 'T' not None.")
def testPolymorphicDefaultOut(self):
self._add_op("name: 'PolymorphicDefaultOut' "
"output_arg { name: 'out' type_attr: 'T' } "
"attr { name: 'T' type: 'type' "
" default_value { type: DT_STRING } }")
out = self._lib.apply_op("PolymorphicDefaultOut", T=None, name="p")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'p' op: 'PolymorphicDefaultOut'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
out = self._lib.apply_op("PolymorphicDefaultOut", T=dtypes.bool, name="q")
self.assertEqual(dtypes.bool, out.dtype)
self.assertProtoEquals("""
name: 'q' op: 'PolymorphicDefaultOut'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
def testBinary(self):
self._add_op("name: 'Binary' "
"input_arg { name: 'a' type_attr: 'T' } "
"input_arg { name: 'b' type_attr: 'T' } "
"output_arg { name: 'out' type_attr: 'T' } "
"attr { name: 'T' type: 'type' }")
out = self._lib.apply_op("Binary", a=8, b=9, name="b")
self.assertEqual(dtypes.int32, out.dtype)
self.assertProtoEquals("""
name: 'b' op: 'Binary' input: 'b/a' input: 'b/b'
attr { key: 'T' value { type: DT_INT32 } }
""", out.op.node_def)
out = self._lib.apply_op("Binary", a="left", b="right", name="c")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'c' op: 'Binary' input: 'c/a' input: 'c/b'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Binary", a="left", b=12)
self.assertEqual(str(cm.exception),
"Expected string passed to parameter 'b' of op 'Binary', "
"got 12 of type 'int' instead.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Binary",
a=self.Tensor(dtypes.string),
b=self.Tensor(dtypes.int32))
self.assertEqual(str(cm.exception),
"Input 'b' of 'Binary' Op has type int32 "
"that does not match type string of argument 'a'.")
def testRestrict(self):
self._add_op("name: 'Restrict' "
"input_arg { name: 'a' type_attr: 'T' } "
"output_arg { name: 'out' type_attr: 'T' } "
"attr { name: 'T' type: 'type' allowed_values { list { "
" type: DT_STRING type: DT_BOOL } } }")
out = self._lib.apply_op("Restrict", a="foo", name="g")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'g' op: 'Restrict' input: 'g/a'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
out = self._lib.apply_op("Restrict", a=True, name="h")
self.assertEqual(dtypes.bool, out.dtype)
self.assertProtoEquals("""
name: 'h' op: 'Restrict' input: 'h/a'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Restrict", a=17)
self.assertEqual(str(cm.exception),
"Value passed to parameter 'a' has DataType int32 "
"not in list of allowed values: string, bool")
def testTypeList(self):
self._add_op("name: 'TypeList' "
"input_arg { name: 'a' type_list_attr: 'T' } "
"attr { name: 'T' type: 'list(type)' }")
op = self._lib.apply_op("TypeList", a=["foo"], name="z")
self.assertProtoEquals("""
name: 'z' op: 'TypeList' input: 'z/a_0'
attr { key: 'T' value { list { type: DT_STRING } } }
""", op.node_def)
op = self._lib.apply_op("TypeList", a=[True, 12], name="y")
self.assertProtoEquals("""
name: 'y' op: 'TypeList' input: 'y/a_0' input: 'y/a_1'
attr { key: 'T' value { list { type: DT_BOOL type: DT_INT32 } } }
""", op.node_def)
op = self._lib.apply_op("TypeList", a=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'TypeList' attr { key: 'T' value { list { } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeList", a=17)
self.assertStartsWith(str(cm.exception),
"Expected list for 'a' "
"argument to 'TypeList' Op, not ")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeList", a=[self.Tensor(dtypes.int32), None])
self.assertStartsWith(str(cm.exception),
"Tensors in list passed to 'a' of 'TypeList' Op "
"have types [int32, <NOT CONVERTIBLE TO TENSOR>]")
def testTypeListTwice(self):
self._add_op("name: 'TypeListTwice' "
"input_arg { name: 'a' type_list_attr: 'T' } "
"input_arg { name: 'b' type_list_attr: 'T' } "
"attr { name: 'T' type: 'list(type)' }")
op = self._lib.apply_op("TypeListTwice",
a=["foo", True],
b=["bar", False],
name="z")
self.assertProtoEquals("""
name: 'z' op: 'TypeListTwice'
input: 'z/a_0' input: 'z/a_1' input: 'z/b_0' input: 'z/b_1'
attr { key: 'T' value { list { type: DT_STRING type: DT_BOOL } } }
""", op.node_def)
op = self._lib.apply_op("TypeListTwice", a=[], b=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'TypeListTwice' attr { key: 'T' value { list { } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeListTwice", a=["foo", True], b=["bar", 6])
self.assertEqual(str(cm.exception),
"Input 'b' of 'TypeListTwice' Op has type list of "
"string, int32 that does not match type list "
"string, bool of argument 'a'.")
def testOutTypeList(self):
self._add_op("name: 'OutTypeList' "
"output_arg { name: 'out' type_list_attr: 'T' } "
"attr { name: 'T' type: 'list(type)' }")
out, = self._lib.apply_op("OutTypeList", T=[dtypes.float32], name="x")
self.assertEqual(dtypes.float32, out.dtype)
self.assertProtoEquals("""
name: 'x' op: 'OutTypeList'
attr { key: 'T' value { list { type: DT_FLOAT } } }
""", out.op.node_def)
out1, out2 = self._lib.apply_op("OutTypeList",
T=[dtypes.int32, dtypes.bool],
name="w")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertProtoEquals("""
name: 'w' op: 'OutTypeList'
attr { key: 'T' value { list { type: DT_INT32 type: DT_BOOL } } }
""", out1.op.node_def)
out = self._lib.apply_op("OutTypeList", T=[], name="empty")
self.assertEqual([], out)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("OutTypeList", T=dtypes.int32)
self.assertEqual(str(cm.exception), "Expected list for attr T")
def testTypeListRestrict(self):
self._add_op("name: 'TypeListRestrict' "
"input_arg { name: 'a' type_list_attr: 'T' } "
"attr { name: 'T' type: 'list(type)' allowed_values { list { "
" type: DT_STRING type: DT_BOOL } } }")
op = self._lib.apply_op("TypeListRestrict", a=["foo", False], name="v")
self.assertProtoEquals("""
name: 'v' op: 'TypeListRestrict' input: 'v/a_0' input: 'v/a_1'
attr { key: 'T' value { list { type: DT_STRING type: DT_BOOL } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeListRestrict", a=[True, 12])
self.assertEqual(str(cm.exception),
"Value passed to parameter 'a' has DataType int32 "
"not in list of allowed values: string, bool")
def testOutTypeListRestrict(self):
self._add_op("name: 'OutTypeListRestrict' "
"output_arg { name: 'out' type_list_attr: 't' } "
"attr { name: 't' type: 'list(type)' allowed_values { list { "
" type: DT_STRING type: DT_BOOL } } }")
out1, out2 = self._lib.apply_op("OutTypeListRestrict",
t=[dtypes.bool, dtypes.string],
name="u")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.string, out2.dtype)
self.assertProtoEquals("""
name: 'u' op: 'OutTypeListRestrict'
attr { key: 't' value { list { type: DT_BOOL type: DT_STRING } } }
""", out1.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("OutTypeListRestrict", t=[dtypes.string, dtypes.int32])
self.assertEqual(str(cm.exception),
"Value passed to parameter 't' has DataType int32 "
"not in list of allowed values: string, bool")
def testAttr(self):
self._add_op("name: 'Attr' attr { name: 'a' type: 'int' }")
op = self._lib.apply_op("Attr", a=12, name="t")
self.assertProtoEquals("""
name: 't' op: 'Attr' attr { key: 'a' value { i: 12 } }
""", op.node_def)
op = self._lib.apply_op("Attr", a=tensor_shape.Dimension(13), name="u")
self.assertProtoEquals("""
name: 'u' op: 'Attr' attr { key: 'a' value { i: 13 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr", a="bad")
self.assertEqual(str(cm.exception),
"Expected int for argument 'a' not 'bad'.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr", a=[12])
self.assertEqual(str(cm.exception),
"Expected int for argument 'a' not [12].")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr", a=None)
self.assertEqual(str(cm.exception),
"Expected int for argument 'a' not None.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr")
self.assertEqual(str(cm.exception), "No argument for attr a")
def testAttrFloat(self):
self._add_op("name: 'AttrFloat' attr { name: 'a' type: 'float' }")
op = self._lib.apply_op("AttrFloat", a=1.2, name="t")
self.assertProtoEquals("""
name: 't' op: 'AttrFloat' attr { key: 'a' value { f: 1.2 } }
""", op.node_def)
op = self._lib.apply_op("AttrFloat", a=12, name="u")
self.assertProtoEquals("""
name: 'u' op: 'AttrFloat' attr { key: 'a' value { f: 12 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrFloat", a="bad")
self.assertEqual(str(cm.exception),
"Expected float for argument 'a' not 'bad'.")
def testAttrBool(self):
self._add_op("name: 'AttrBool' attr { name: 'a' type: 'bool' }")
op = self._lib.apply_op("AttrBool", a=True, name="t")
self.assertProtoEquals("""
name: 't' op: 'AttrBool' attr { key: 'a' value { b: true } }
""", op.node_def)
op = self._lib.apply_op("AttrBool", a=False, name="u")
self.assertProtoEquals("""
name: 'u' op: 'AttrBool' attr { key: 'a' value { b: false } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBool", a=0)
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not 0.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBool", a=1)
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not 1.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBool", a=[])
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not [].")
def testAttrBoolList(self):
self._add_op("name: 'AttrBoolList' attr { name: 'a' type: 'list(bool)' }")
op = self._lib.apply_op("AttrBoolList", a=[True, False, True], name="t")
self.assertProtoEquals("""
name: 't' op: 'AttrBoolList'
attr { key: 'a' value { list { b: true b: false b:true } } }
""", op.node_def)
op = self._lib.apply_op("AttrBoolList", a=[], name="u")
self.assertProtoEquals("""
name: 'u' op: 'AttrBoolList' attr { key: 'a' value { list { } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBoolList", a=[0])
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not 0.")
def testAttrMin(self):
self._add_op("name: 'AttrMin' attr { name: 'a' type: 'int' "
"has_minimum: true minimum: 5 }")
op = self._lib.apply_op("AttrMin", a=12, name="s")
self.assertProtoEquals("""
name: 's' op: 'AttrMin' attr { key: 'a' value { i: 12 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrMin", a=2)
self.assertEqual(str(cm.exception),
"Attr 'a' of 'AttrMin' Op passed 2 less than minimum 5.")
def testAttrListMin(self):
self._add_op("name: 'AttrListMin' attr { name: 'a' type: 'list(int)' "
"has_minimum: true minimum: 2 }")
op = self._lib.apply_op("AttrListMin", a=[1, 2], name="r")
self.assertProtoEquals("""
name: 'r' op: 'AttrListMin'
attr { key: 'a' value { list { i: 1 i: 2 } } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrListMin", a=[17])
self.assertEqual(str(cm.exception),
"Attr 'a' of 'AttrListMin' Op "
"passed list of length 1 less than minimum 2.")
def testAttrEnum(self):
self._add_op("name: 'AttrEnum' "
"attr { name: 'a' type: 'string' "
" allowed_values { list { s: 'apples' s: 'oranges' } } }")
op = self._lib.apply_op("AttrEnum", a="oranges", name="e")
self.assertProtoEquals("""
name: 'e' op: 'AttrEnum' attr { key: 'a' value { s: 'oranges' } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrEnum", a="invalid")
self.assertEqual(str(cm.exception),
'Attr \'a\' of \'AttrEnum\' Op '
'passed string \'invalid\' not in: '
'"apples", "oranges".')
def testAttrEnumList(self):
self._add_op("name: 'AttrEnumList' "
"attr { name: 'a' type: 'list(string)' "
" allowed_values { list { s: 'apples' s: 'oranges' } } }")
op = self._lib.apply_op("AttrEnumList", a=["oranges", "apples"], name="f")
self.assertProtoEquals("""
name: 'f' op: 'AttrEnumList'
attr { key: 'a' value { list { s: 'oranges' s: 'apples' } } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrEnumList", a=["apples", "invalid", "oranges"])
self.assertEqual(str(cm.exception),
'Attr \'a\' of \'AttrEnumList\' Op '
'passed string \'invalid\' not '
'in: "apples", "oranges".')
def testAttrShape(self):
self._add_op("name: 'AttrShape' attr { name: 'a' type: 'shape' }")
op = self._lib.apply_op("AttrShape", a=[5], name="s1")
self.assertProtoEquals("""
name: 's1' op: 'AttrShape'
attr { key: 'a' value { shape { dim { size: 5 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrShape", a=(4, 3, 2), name="s2")
self.assertProtoEquals("""
name: 's2' op: 'AttrShape'
attr { key: 'a' value {
shape { dim { size: 4 } dim { size: 3 } dim { size: 2 } } } }
""", op.node_def)
op = self._lib.apply_op(
"AttrShape", a=tensor_shape.TensorShape([3, 2]), name="s3")
self.assertProtoEquals("""
name: 's3' op: 'AttrShape'
attr { key: 'a' value {
shape { dim { size: 3 } dim { size: 2 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrShape", a=[], name="s4")
self.assertProtoEquals("""
name: 's4' op: 'AttrShape' attr { key: 'a' value { shape { } } }
""", op.node_def)
shape = tensor_shape_pb2.TensorShapeProto()
shape.dim.add().size = 6
shape.dim.add().size = 3
op = self._lib.apply_op("AttrShape", a=shape, name="s5")
self.assertProtoEquals("""
name: 's5' op: 'AttrShape'
attr { key: 'a' value { shape { dim { size: 6 } dim { size: 3 } } } }
""", op.node_def)
# TODO(josh11b): Re-enable this test once we stop promoting scalars to shapes.
# with self.assertRaises(TypeError) as cm:
# self._lib.apply_op("AttrShape", a=5)
# self.assertEqual(str(cm.exception),
# "Don't know how to convert 5 to a TensorShapeProto for "
# "argument 'a'")
with self.assertRaises(TypeError):
self._lib.apply_op("AttrShape", a="ABC")
def testAttrShapeList(self):
self._add_op("name: 'AttrShapeList' attr { name: 'a' type: 'list(shape)' }")
op = self._lib.apply_op("AttrShapeList", a=[[3, 2], [6, 5, 4]], name="sl")
self.assertProtoEquals("""
name: 'sl' op: 'AttrShapeList'
attr { key: 'a' value { list {
shape { dim { size: 3 } dim { size: 2 } }
shape { dim { size: 6 } dim { size: 5 } dim { size: 4 } } } } }
""", op.node_def)
op = self._lib.apply_op("AttrShapeList", a=[], name="esl")
self.assertProtoEquals("""
name: 'esl' op: 'AttrShapeList' attr { key: 'a' value { list { } } }
""", op.node_def)
def testAttrPartialShape(self):
self._add_op(
"name: 'AttrPartialShape' attr { name: 'a' type: 'shape' }")
op = self._lib.apply_op("AttrPartialShape", a=[5], name="s1")
self.assertProtoEquals("""
name: 's1' op: 'AttrPartialShape'
attr { key: 'a' value { shape { dim { size: 5 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrPartialShape", a=(4, None, 2), name="s2")
self.assertProtoEquals("""
name: 's2' op: 'AttrPartialShape'
attr { key: 'a' value {
shape { dim { size: 4 } dim { size: -1 } dim { size: 2 } } } }
""", op.node_def)
op = self._lib.apply_op(
"AttrPartialShape", a=tensor_shape.TensorShape([3, None]), name="s3")
self.assertProtoEquals("""
name: 's3' op: 'AttrPartialShape'
attr { key: 'a' value {
shape { dim { size: 3 } dim { size: -1 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrPartialShape", a=[], name="s4")
self.assertProtoEquals("""
name: 's4' op: 'AttrPartialShape'
attr { key: 'a' value { shape { } } }
""", op.node_def)
shape = tensor_shape_pb2.TensorShapeProto()
shape.dim.add().size = -1
shape.dim.add().size = 3
op = self._lib.apply_op("AttrPartialShape", a=shape, name="s5")
self.assertProtoEquals("""
name: 's5' op: 'AttrPartialShape'
attr { key: 'a' value {
shape { dim { size: -1 } dim { size: 3 } } } }
""", op.node_def)
# TODO(ebrevdo): Re-enable once we stop promoting scalars to shapes.
# with self.assertRaises(TypeError) as cm:
# self._lib.apply_op("AttrPartialShape", a=5)
# self.assertEqual(str(cm.exception),
# "Don't know how to convert 5 to a TensorShapeProto for "
# "argument 'a'")
with self.assertRaises(TypeError):
self._lib.apply_op("AttrPartialShape", a="ABC")
def testAttrPartialShapeList(self):
self._add_op("""
name: 'AttrPartialShapeList'
attr { name: 'a' type: 'list(shape)' }
""")
op = self._lib.apply_op(
"AttrPartialShapeList", a=[[3, 2], [6, None, 4]], name="sl")
self.assertProtoEquals("""
name: 'sl' op: 'AttrPartialShapeList'
attr { key: 'a' value { list {
shape { dim { size: 3 } dim { size: 2 } }
shape { dim { size: 6 } dim { size: -1 } dim { size: 4 } } } } }
""", op.node_def)
op = self._lib.apply_op("AttrPartialShapeList", a=[], name="esl")
self.assertProtoEquals("""
name: 'esl' op: 'AttrPartialShapeList' attr {
key: 'a' value { list { } } }
""", op.node_def)
def testAttrDefault(self):
self._add_op("name: 'AttrDefault' "
"attr { name: 'a' type: 'string' "
" default_value { s: 'banana' } }")
op = self._lib.apply_op("AttrDefault", a=None, name="d")
self.assertProtoEquals("""
name: 'd' op: 'AttrDefault' attr { key: 'a' value { s: 'banana' } }
""", op.node_def)
op = self._lib.apply_op("AttrDefault", a="kiwi", name="c")
self.assertProtoEquals("""
name: 'c' op: 'AttrDefault' attr { key: 'a' value { s: 'kiwi' } }
""", op.node_def)
def testAttrListDefault(self):
self._add_op("name: 'AttrListDefault' "
"attr { name: 'a' type: 'list(int)' "
" default_value { list { i: 5 i: 15 } } }")
op = self._lib.apply_op("AttrListDefault", a=None, name="b")
self.assertProtoEquals("""
name: 'b' op: 'AttrListDefault'
attr { key: 'a' value { list { i: 5 i: 15 } } }
""", op.node_def)
op = self._lib.apply_op("AttrListDefault", a=[3], name="a")
self.assertProtoEquals("""
name: 'a' op: 'AttrListDefault'
attr { key: 'a' value { list { i: 3 } } }
""", op.node_def)
op = self._lib.apply_op("AttrListDefault", a=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'AttrListDefault'
attr { key: 'a' value { list { } } }
""", op.node_def)
def testAttrEmptyListDefault(self):
self._add_op("name: 'AttrEmptyListDefault' "
"attr { name: 'a' type: 'list(float)' "
" default_value { list { } } }")
op = self._lib.apply_op("AttrEmptyListDefault", a=None, name="b")
self.assertProtoEquals("""
name: 'b' op: 'AttrEmptyListDefault'
attr { key: 'a' value { list { } } }
""", op.node_def)
op = self._lib.apply_op("AttrEmptyListDefault", a=[3], name="a")
self.assertProtoEquals("""
name: 'a' op: 'AttrEmptyListDefault'
attr { key: 'a' value { list { f: 3 } } }
""", op.node_def)
op = self._lib.apply_op("AttrEmptyListDefault", a=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'AttrEmptyListDefault'
attr { key: 'a' value { list { } } }
""", op.node_def)
def testReservedAttr(self):
self._add_op("name: 'ReservedAttr' "
"attr { name: 'range' type: 'int' } ")
op = self._lib.apply_op("ReservedAttr", range_=7, name="x")
self.assertProtoEquals("""
name: 'x' op: 'ReservedAttr' attr { key: 'range' value { i: 7 } }
""", op.node_def)
def testDefaultAttrType(self):
self._add_op("name: 'AttrTypeDefault' "
"input_arg { name: 'a' type_attr: 'T' } "
"attr { name: 'T' type: 'type' "
" default_value { type: DT_INT32 } }")
# Give an input whose type has no obvious output type.
op = self._lib.apply_op("AttrTypeDefault", a=[], name="n")
self.assertProtoEquals("""
name: 'n' op: 'AttrTypeDefault' input: 'n/a'
attr { key: 'T' value { type: DT_INT32 } }
""", op.node_def)
# Give an input whose type can be inferred as different
# than the default.
op = self._lib.apply_op("AttrTypeDefault", a=[1.0], name="f")
self.assertProtoEquals("""
name: 'f' op: 'AttrTypeDefault' input: 'f/a'
attr { key: 'T' value { type: DT_FLOAT } }
""", op.node_def)
def testDefaultListAttrType(self):
self._add_op("name: 'AttrListTypeDefault' "
"input_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"input_arg { name: 'b' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'T' type: 'type' "
" default_value { type: DT_INT32 } }"
"attr { name: 'N' type: 'int' }")
# Give an input whose type can be inferred as different
# than the default.
op = self._lib.apply_op("AttrListTypeDefault", a=[1.0], b=[2.0], name="n")
self.assertProtoEquals("""
name: 'n' op: 'AttrListTypeDefault' input: 'n/a_0' input: 'n/b_0'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 1 } }
""", op.node_def)
def testNIntsIn(self):
self._add_op("name: 'NIntsIn' "
"input_arg { name: 'a' type: DT_INT32 number_attr: 'N' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2 }")
op = self._lib.apply_op("NIntsIn", a=[1, 2], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NIntsIn' input: 'n/a_0' input: 'n/a_1'
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NIntsIn", a=[5, 4, 3, 2, 1], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NIntsIn'
input: 'o/a_0' input: 'o/a_1' input: 'o/a_2' input: 'o/a_3' input: 'o/a_4'
attr { key: 'N' value { i: 5 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn", a=["foo", "bar"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op have types "
"[string, string] that do not match expected type int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn",
a=[self.Tensor(dtypes.string),
self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op have "
"types [string, string] that do not match expected type "
"int32.")
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NIntsIn", a=[99])
self.assertEqual(str(cm.exception),
"List argument 'a' to 'NIntsIn' Op "
"with length 1 shorter than "
"minimum length 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn", a=[38, "bar"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op have types "
"[int32, string] that do not match expected type int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn",
a=[self.Tensor(dtypes.int32),
self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op "
"have types [int32, string] that do not match expected "
"type int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn", a=17)
self.assertStartsWith(str(cm.exception),
"Expected list for 'a' argument "
"to 'NIntsIn' Op, not ")
def testNPolymorphicIn(self):
self._add_op("name: 'NPolymorphicIn' "
"input_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'T' type: 'type' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2 }")
op = self._lib.apply_op("NPolymorphicIn", a=[1, 2], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NPolymorphicIn' input: 'n/a_0' input: 'n/a_1'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn", a=[5, 4, 3, 2, 1], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NPolymorphicIn'
input: 'o/a_0' input: 'o/a_1' input: 'o/a_2' input: 'o/a_3' input: 'o/a_4'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 5 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn", a=["foo", "bar"], name="p")
self.assertProtoEquals("""
name: 'p' op: 'NPolymorphicIn' input: 'p/a_0' input: 'p/a_1'
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn",
a=[1, self.Tensor(dtypes.float32, name="x")],
name="q")
self.assertProtoEquals("""
name: 'q' op: 'NPolymorphicIn' input: 'q/a_0' input: 'x'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn",
a=[self.Tensor(dtypes.float32, name="y"),
self.Tensor(dtypes.float32_ref, name="z")],
name="r")
self.assertProtoEquals("""
name: 'r' op: 'NPolymorphicIn' input: 'y' input: 'z'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[99])
self.assertEqual(str(cm.exception),
"List argument 'a' to 'NPolymorphicIn' Op with length 1 "
"shorter than minimum length 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[38, "bar"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [int32, string] that don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[38, self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [int32, string] that don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[38, None])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [int32, <NOT CONVERTIBLE TO TENSOR>] that "
"don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn",
a=["abcd", self.Tensor(dtypes.int32)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [string, int32] that don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=17)
self.assertStartsWith(str(cm.exception),
"Expected list for 'a' argument "
"to 'NPolymorphicIn' Op, not ")
def testNPolymorphicRestrictIn(self):
self._add_op("name: 'NPolymorphicRestrictIn' "
"input_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'T' type: 'type' allowed_values { "
" list { type: DT_STRING type: DT_BOOL } } } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2 }")
op = self._lib.apply_op("NPolymorphicRestrictIn", a=["foo", "bar"],
name="p")
self.assertProtoEquals("""
name: 'p' op: 'NPolymorphicRestrictIn' input: 'p/a_0' input: 'p/a_1'
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicRestrictIn",
a=[False, True, False],
name="b")
self.assertProtoEquals("""
name: 'b' op: 'NPolymorphicRestrictIn'
input: 'b/a_0' input: 'b/a_1' input: 'b/a_2'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 3 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicRestrictIn", a=[1, 2])
self.assertEqual(str(cm.exception),
"Value passed to parameter 'a' has DataType int32 not in "
"list of allowed values: string, bool")
def testNInTwice(self):
self._add_op("name: 'NInTwice' "
"input_arg { name: 'a' type: DT_INT32 number_attr: 'N' } "
"input_arg { name: 'b' type: DT_STRING number_attr: 'N' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 0 }")
op = self._lib.apply_op("NInTwice", a=[1, 2], b=["one", "two"], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NInTwice'
input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NInTwice", a=[], b=[], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NInTwice' attr { key: 'N' value { i: 0 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NInTwice", a=[1, 2, 3], b=["too short"])
self.assertEqual(str(cm.exception),
"List argument 'b' to 'NInTwice' Op "
"with length 1 must match "
"length 3 of argument 'a'.")
def testNInPolymorphicTwice(self):
self._add_op("name: 'NInPolymorphicTwice' "
"input_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"input_arg { name: 'b' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'T' type: 'type' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 0 }")
op = self._lib.apply_op("NInPolymorphicTwice", a=[1, 2], b=[3, 4], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NInPolymorphicTwice'
input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NInPolymorphicTwice", a=[1, 2, 3], b=[5])
self.assertEqual(str(cm.exception),
"List argument 'b' to 'NInPolymorphicTwice' Op "
"with length 1 "
"must match length 3 of argument 'a'.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NInPolymorphicTwice", a=[1, 2], b=["one", "two"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of 'NInPolymorphicTwice' "
"Op have types [string, string] that do not match type "
"int32 inferred from earlier arguments.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NInPolymorphicTwice",
a=[self.Tensor(dtypes.int32)],
b=[self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of "
"'NInPolymorphicTwice' Op have types [string] that do not "
"match type int32 inferred from earlier arguments.")
def testNInTwoTypeVariables(self):
self._add_op("name: 'NInTwoTypeVariables' "
"input_arg { name: 'a' type_attr: 'S' number_attr: 'N' } "
"input_arg { name: 'b' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'S' type: 'type' } "
"attr { name: 'T' type: 'type' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 0 }")
op = self._lib.apply_op("NInTwoTypeVariables",
a=[1, 2],
b=[True, False],
name="n")
self.assertProtoEquals("""
name: 'n' op: 'NInTwoTypeVariables'
input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'
attr { key: 'S' value { type: DT_INT32 } }
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NInTwoTypeVariables", a=[1, 2], b=[3, 4], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NInTwoTypeVariables'
input: 'o/a_0' input: 'o/a_1' input: 'o/b_0' input: 'o/b_1'
attr { key: 'S' value { type: DT_INT32 } }
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NInTwoTypeVariables",
a=[self.Tensor(dtypes.int32, name="q")],
b=[self.Tensor(dtypes.string, name="r")],
name="p")
self.assertProtoEquals("""
name: 'p' op: 'NInTwoTypeVariables' input: 'q' input: 'r'
attr { key: 'S' value { type: DT_INT32 } }
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 1 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NInTwoTypeVariables", a=[1, 2, 3], b=["5"])
self.assertEqual(str(cm.exception),
"List argument 'b' to 'NInTwoTypeVariables' Op "
"with length 1 "
"must match length 3 of argument 'a'.")
def testInPolymorphicTwice(self):
self._add_op("name: 'InPolymorphicTwice' "
"input_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"input_arg { name: 'b' type_attr: 'T' number_attr: 'M' } "
"attr { name: 'T' type: 'type' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 0 } "
"attr { name: 'M' type: 'int' has_minimum: true minimum: 0 } ")
op = self._lib.apply_op("InPolymorphicTwice", a=[8], b=[3, 4, 5], name="n")
self.assertProtoEquals("""
name: 'n' op: 'InPolymorphicTwice'
input: 'n/a_0' input: 'n/b_0' input: 'n/b_1' input: 'n/b_2'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 1 } }
attr { key: 'M' value { i: 3 } }
""", op.node_def)
op = self._lib.apply_op("InPolymorphicTwice", a=[8], b=[], name="o")
self.assertProtoEquals("""
name: 'o' op: 'InPolymorphicTwice' input: 'o/a_0'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 1 } }
attr { key: 'M' value { i: 0 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("InPolymorphicTwice", a=[], b=[3, 4, 5])
self.assertEqual(str(cm.exception),
"Don't know how to infer type variable from empty input "
"list passed to input 'a' of 'InPolymorphicTwice' Op.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("InPolymorphicTwice", a=[1, 2], b=["one", "two"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of 'InPolymorphicTwice' Op "
"have types [string, string] that do not match type int32 "
"inferred from earlier arguments.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("InPolymorphicTwice",
a=[self.Tensor(dtypes.int32)],
b=[self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of 'InPolymorphicTwice' "
"Op have types [string] that do not match type int32 "
"inferred from earlier arguments.")
def testNIntsOut(self):
self._add_op("name: 'NIntsOut' "
"output_arg { name: 'a' type: DT_INT32 number_attr: 'N' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2 }")
out1, out2 = self._lib.apply_op("NIntsOut", N=2, name="n")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 'n' op: 'NIntsOut' attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3, out4, out5 = self._lib.apply_op(
"NIntsOut", N=5, name="o")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertEqual(dtypes.int32, out3.dtype)
self.assertEqual(dtypes.int32, out4.dtype)
self.assertEqual(dtypes.int32, out5.dtype)
self.assertProtoEquals("""
name: 'o' op: 'NIntsOut' attr { key: 'N' value { i: 5 } }
""", out5.op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NIntsOut", N=1)
self.assertEqual(str(cm.exception),
"Attr 'N' of 'NIntsOut' Op passed 1 less than minimum 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsOut", N=[3])
self.assertEqual(str(cm.exception),
"Expected int for argument 'N' not [3].")
def testNIntsOutDefault(self):
self._add_op("name: 'NIntsOutDefault' "
"output_arg { name: 'a' type: DT_INT32 number_attr: 'N' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2"
" default_value { i:3 } }")
out1, out2, out3 = self._lib.apply_op(
"NIntsOutDefault", N=None, name="z")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertEqual(dtypes.int32, out3.dtype)
self.assertProtoEquals("""
name: 'z' op: 'NIntsOutDefault' attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
out1, out2 = self._lib.apply_op("NIntsOutDefault", N=2, name="y")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 'y' op: 'NIntsOutDefault' attr { key: 'N' value { i: 2 } }
""", out2.op.node_def)
def testNPolymorphicOut(self):
self._add_op("name: 'NPolymorphicOut' "
"output_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'T' type: 'type' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2 }")
out1, out2 = self._lib.apply_op("NPolymorphicOut",
N=2,
T=dtypes.int32,
name="n")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 'n' op: 'NPolymorphicOut'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicOut", T=dtypes.string, N=3, name="o")
self.assertEqual(dtypes.string, out1.dtype)
self.assertEqual(dtypes.string, out2.dtype)
self.assertEqual(dtypes.string, out3.dtype)
self.assertProtoEquals("""
name: 'o' op: 'NPolymorphicOut'
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 3 } }
""", out3.op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NPolymorphicOut", N=1, T=dtypes.string)
self.assertEqual(str(cm.exception),
"Attr 'N' of 'NPolymorphicOut' Op "
"passed 1 less than minimum 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicOut", N=3, T=[dtypes.string])
self.assertEqual(
str(cm.exception),
"Expected DataType for argument 'T' not [tf.string].")
def testNPolymorphicOutDefault(self):
self._add_op("name: 'NPolymorphicOutDefault' "
"output_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'T' type: 'type'"
" default_value { type: DT_BOOL } } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2 "
" default_value { i: 2 } }")
out1, out2 = self._lib.apply_op(
"NPolymorphicOutDefault", N=None, T=None, name="r")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertProtoEquals("""
name: 'r' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicOutDefault", N=3, T=None, name="s")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertEqual(dtypes.bool, out3.dtype)
self.assertProtoEquals("""
name: 's' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
out1, out2 = self._lib.apply_op(
"NPolymorphicOutDefault", N=None, T=dtypes.int32, name="t")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 't' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicOutDefault", N=3, T=dtypes.int32, name="u")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertEqual(dtypes.int32, out3.dtype)
self.assertProtoEquals("""
name: 'u' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
def testNPolymorphicRestrictOut(self):
self._add_op("name: 'NPolymorphicRestrictOut' "
"output_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'T' type: 'type' allowed_values { "
" list { type: DT_STRING type: DT_BOOL } } } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2 }")
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicRestrictOut", N=3, T=dtypes.bool, name="u")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertEqual(dtypes.bool, out3.dtype)
self.assertProtoEquals("""
name: 'u' op: 'NPolymorphicRestrictOut'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicRestrictOut", N=2, T=dtypes.int32)
self.assertEqual(str(cm.exception),
"Value passed to parameter 'T' has DataType int32 "
"not in list of allowed values: string, bool")
def testRef(self):
self._add_op("name: 'RefIn' "
"input_arg { name: 'a' type_attr: 'T' is_ref: true } "
"attr { name: 'T' type: 'type' } ")
self._add_op("name: 'TwoRefsIn' "
"input_arg { name: 'a' type_attr: 'T' is_ref: true } "
"input_arg { name: 'b' type_attr: 'T' is_ref: true } "
"attr { name: 'T' type: 'type' } ")
self._add_op("name: 'RefOut' "
"output_arg { name: 'a' type_attr: 'T' is_ref: true } "
"attr { name: 'T' type: 'type' } ")
out = self._lib.apply_op("RefOut", T=dtypes.bool, name="o")
self.assertEqual(dtypes.bool_ref, out.dtype)
self.assertProtoEquals("""
name: 'o' op: 'RefOut'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
op = self._lib.apply_op("RefIn", a=out, name="i")
self.assertProtoEquals("""
name: 'i' op: 'RefIn' input: 'o'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: "_class" value { list { s: "loc:@o" } } }
""", op.node_def)
# Can pass ref to non-ref input.
out = self._lib.apply_op("RefOut", T=dtypes.int32, name="r")
out = self._lib.apply_op("Simple", a=out, name="s")
self.assertProtoEquals("""
name: 's' op: 'Simple' input: 'r'
""", out.op.node_def)
# Can't pass non-ref to ref input.
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("RefIn", a=2)
self.assertEqual(str(cm.exception),
"'RefIn' Op requires that input 'a' be a mutable tensor " +
"(e.g.: a tf.Variable)")
input_a = self._lib.apply_op("RefOut", T=dtypes.int32, name="t")
input_b = self._lib.apply_op("RefOut", T=dtypes.int32, name="u")
op = self._lib.apply_op("TwoRefsIn", a=input_a, b=input_b, name="v")
# NOTE(mrry): The order of colocation constraints is an implementation
# detail.
self.assertProtoEquals("""
name: 'v' op: 'TwoRefsIn' input: 't' input: 'u'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: "_class" value { list { s: "loc:@t" s: "loc:@u" } } }
""", op.node_def)
def testSpecifyDevice(self):
with self._g.device("/job:ADevice"):
self._lib.apply_op("Simple", a=3)
# We look at the whole graph here to make sure the Const op is also given
# the specified device.
graph_def = self._g.as_graph_def()
self.assertEqual(len(graph_def.node), 2)
for node in graph_def.node:
self.assertDeviceEqual(node.device, "/job:ADevice")
def testStructuredOutputSingleList(self):
self._add_op("name: 'SimpleStruct' "
"output_arg { name: 'a' type: DT_INT32 number_attr: 'n_a' } "
"attr { name: 'n_a' type: 'int' }")
for n_a in [0, 1, 3]:
a = self._lib.apply_op("SimpleStruct", n_a=n_a)
self.assertTrue(isinstance(a, list))
self.assertEqual(n_a, len(a))
def testStructuredOutputListAndSingle(self):
self._add_op("name: 'MixedStruct' "
"output_arg { name: 'a' type: DT_INT32 number_attr: 'n_a' } "
"output_arg { name: 'b' type: DT_FLOAT } "
"attr { name: 'n_a' type: 'int' }")
for n_a in [0, 1, 3]:
a, b = self._lib.apply_op("MixedStruct", n_a=n_a)
self.assertTrue(isinstance(a, list))
self.assertEqual(n_a, len(a))
self.assertTrue(all(x.dtype == dtypes.int32 for x in a))
self.assertTrue(isinstance(b, ops.Tensor))
self.assertEqual(dtypes.float32, b.dtype)
def testStructuredOutputMultipleLists(self):
self._add_op("name: 'ComplexStruct' "
"output_arg { name: 'a' type: DT_INT32 number_attr: 'n_a' } "
"output_arg { name: 'b' type: DT_INT64 number_attr: 'n_b' } "
"output_arg { name: 'c' type_list_attr: 't_c' } "
"attr { name: 'n_a' type: 'int' } "
"attr { name: 'n_b' type: 'int' } "
"attr { name: 't_c' type: 'list(type)' }")
for n_a in [0, 1, 3]:
for n_b in [0, 1, 3]:
for t_c in [[],
[dtypes.int32],
[dtypes.int32, dtypes.float32]]:
a, b, c = self._lib.apply_op("ComplexStruct",
n_a=n_a,
n_b=n_b,
t_c=t_c)
self.assertEqual(n_a, len(a))
self.assertTrue(all(x.dtype == dtypes.int32 for x in a))
self.assertEqual(n_b, len(b))
self.assertTrue(all(x.dtype == dtypes.int64 for x in b))
self.assertEqual(t_c, [x.dtype for x in c])
class OpDefLibraryGraphTest(test_util.TensorFlowTestCase):
def setUp(self):
self._lib = OpDefLibrary()
self._g = ops.Graph()
self._add_op("name: 'Simple' input_arg { name: 'a' type: DT_INT32 } "
"output_arg { name: 'out' type: DT_FLOAT }")
self._add_op("name: 'Binary' "
"input_arg { name: 'a' type_attr: 'T' } "
"input_arg { name: 'b' type_attr: 'T' } "
"output_arg { name: 'out' type_attr: 'T' } "
"attr { name: 'T' type: 'type' }")
def _add_op(self, ascii):
op_def = op_def_pb2.OpDef()
text_format.Merge(ascii, op_def)
self._lib.add_op(op_def)
def testNoGraph(self):
out = self._lib.apply_op("Simple", a=3)
self.assertEqual(out.graph, ops.get_default_graph())
def testDefaultGraph(self):
with self._g.as_default():
out = self._lib.apply_op("Simple", a=3)
self.assertEqual(out.graph, self._g)
def testDifferentGraphFails(self):
with self._g.as_default():
a = self._lib.apply_op("Simple", a=3)
other_g = ops.Graph()
with other_g.as_default():
b = self._lib.apply_op("Simple", a=4)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("Binary", a=a, b=b)
self.assertTrue("must be from the same graph" in str(cm.exception))
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
nkgilley/home-assistant | homeassistant/components/template/__init__.py | 2 | 1888 | """The template component."""
from itertools import chain
import logging
from homeassistant.const import MATCH_ALL
_LOGGER = logging.getLogger(__name__)
def initialise_templates(hass, templates, attribute_templates=None):
"""Initialise templates and attribute templates."""
if attribute_templates is None:
attribute_templates = {}
for template in chain(templates.values(), attribute_templates.values()):
if template is None:
continue
template.hass = hass
def extract_entities(
device_name, device_type, manual_entity_ids, templates, attribute_templates=None
):
"""Extract entity ids from templates and attribute templates."""
if attribute_templates is None:
attribute_templates = {}
entity_ids = set()
if manual_entity_ids is None:
invalid_templates = []
for template_name, template in chain(
templates.items(), attribute_templates.items()
):
if template is None:
continue
template_entity_ids = template.extract_entities()
if template_entity_ids != MATCH_ALL:
entity_ids |= set(template_entity_ids)
else:
invalid_templates.append(template_name.replace("_template", ""))
if invalid_templates:
entity_ids = MATCH_ALL
_LOGGER.warning(
"Template %s '%s' has no entity ids configured to track nor"
" were we able to extract the entities to track from the %s "
"template(s). This entity will only be able to be updated "
"manually.",
device_type,
device_name,
", ".join(invalid_templates),
)
else:
entity_ids = list(entity_ids)
else:
entity_ids = manual_entity_ids
return entity_ids
| apache-2.0 |
RydrDojo/Ridr | pylotVenv/lib/python2.7/site-packages/sqlalchemy/orm/strategy_options.py | 43 | 34130 | # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
"""
from .interfaces import MapperOption, PropComparator
from .. import util
from ..sql.base import _generative, Generative
from .. import exc as sa_exc, inspect
from .base import _is_aliased_class, _class_to_mapper
from . import util as orm_util
from .path_registry import PathRegistry, TokenRegistry, \
_WILDCARD_TOKEN, _DEFAULT_TOKEN
class Load(Generative, MapperOption):
"""Represents loader options which modify the state of a
:class:`.Query` in order to affect how various mapped attributes are
loaded.
.. versionadded:: 0.9.0 The :meth:`.Load` system is a new foundation for
the existing system of loader options, including options such as
:func:`.orm.joinedload`, :func:`.orm.defer`, and others. In
particular, it introduces a new method-chained system that replaces the
need for dot-separated paths as well as "_all()" options such as
:func:`.orm.joinedload_all`.
A :class:`.Load` object can be used directly or indirectly. To use one
directly, instantiate given the parent class. This style of usage is
useful when dealing with a :class:`.Query` that has multiple entities,
or when producing a loader option that can be applied generically to
any style of query::
myopt = Load(MyClass).joinedload("widgets")
The above ``myopt`` can now be used with :meth:`.Query.options`::
session.query(MyClass).options(myopt)
The :class:`.Load` construct is invoked indirectly whenever one makes use
of the various loader options that are present in ``sqlalchemy.orm``,
including options such as :func:`.orm.joinedload`, :func:`.orm.defer`,
:func:`.orm.subqueryload`, and all the rest. These constructs produce an
"anonymous" form of the :class:`.Load` object which tracks attributes and
options, but is not linked to a parent class until it is associated with a
parent :class:`.Query`::
# produce "unbound" Load object
myopt = joinedload("widgets")
# when applied using options(), the option is "bound" to the
# class observed in the given query, e.g. MyClass
session.query(MyClass).options(myopt)
Whether the direct or indirect style is used, the :class:`.Load` object
returned now represents a specific "path" along the entities of a
:class:`.Query`. This path can be traversed using a standard
method-chaining approach. Supposing a class hierarchy such as ``User``,
``User.addresses -> Address``, ``User.orders -> Order`` and
``Order.items -> Item``, we can specify a variety of loader options along
each element in the "path"::
session.query(User).options(
joinedload("addresses"),
subqueryload("orders").joinedload("items")
)
Where above, the ``addresses`` collection will be joined-loaded, the
``orders`` collection will be subquery-loaded, and within that subquery
load the ``items`` collection will be joined-loaded.
"""
def __init__(self, entity):
insp = inspect(entity)
self.path = insp._path_registry
self.context = {}
self.local_opts = {}
def _generate(self):
cloned = super(Load, self)._generate()
cloned.local_opts = {}
return cloned
strategy = None
propagate_to_loaders = False
def process_query(self, query):
self._process(query, True)
def process_query_conditionally(self, query):
self._process(query, False)
def _process(self, query, raiseerr):
current_path = query._current_path
if current_path:
for (token, start_path), loader in self.context.items():
chopped_start_path = self._chop_path(start_path, current_path)
if chopped_start_path is not None:
query._attributes[(token, chopped_start_path)] = loader
else:
query._attributes.update(self.context)
def _generate_path(self, path, attr, wildcard_key, raiseerr=True):
if raiseerr and not path.has_entity:
if isinstance(path, TokenRegistry):
raise sa_exc.ArgumentError(
"Wildcard token cannot be followed by another entity")
else:
raise sa_exc.ArgumentError(
"Attribute '%s' of entity '%s' does not "
"refer to a mapped entity" %
(path.prop.key, path.parent.entity)
)
if isinstance(attr, util.string_types):
default_token = attr.endswith(_DEFAULT_TOKEN)
if attr.endswith(_WILDCARD_TOKEN) or default_token:
if default_token:
self.propagate_to_loaders = False
if wildcard_key:
attr = "%s:%s" % (wildcard_key, attr)
return path.token(attr)
try:
# use getattr on the class to work around
# synonyms, hybrids, etc.
attr = getattr(path.entity.class_, attr)
except AttributeError:
if raiseerr:
raise sa_exc.ArgumentError(
"Can't find property named '%s' on the "
"mapped entity %s in this Query. " % (
attr, path.entity)
)
else:
return None
else:
attr = attr.property
path = path[attr]
else:
prop = attr.property
if not prop.parent.common_parent(path.mapper):
if raiseerr:
raise sa_exc.ArgumentError(
"Attribute '%s' does not "
"link from element '%s'" % (attr, path.entity))
else:
return None
if getattr(attr, '_of_type', None):
ac = attr._of_type
ext_info = inspect(ac)
path_element = ext_info.mapper
existing = path.entity_path[prop].get(
self.context, "path_with_polymorphic")
if not ext_info.is_aliased_class:
ac = orm_util.with_polymorphic(
ext_info.mapper.base_mapper,
ext_info.mapper, aliased=True,
_use_mapper_path=True,
_existing_alias=existing)
path.entity_path[prop].set(
self.context, "path_with_polymorphic", inspect(ac))
path = path[prop][path_element]
else:
path = path[prop]
if path.has_entity:
path = path.entity_path
return path
def __str__(self):
return "Load(strategy=%r)" % self.strategy
def _coerce_strat(self, strategy):
if strategy is not None:
strategy = tuple(sorted(strategy.items()))
return strategy
@_generative
def set_relationship_strategy(
self, attr, strategy, propagate_to_loaders=True):
strategy = self._coerce_strat(strategy)
self.propagate_to_loaders = propagate_to_loaders
# if the path is a wildcard, this will set propagate_to_loaders=False
self.path = self._generate_path(self.path, attr, "relationship")
self.strategy = strategy
if strategy is not None:
self._set_path_strategy()
@_generative
def set_column_strategy(self, attrs, strategy, opts=None):
strategy = self._coerce_strat(strategy)
for attr in attrs:
path = self._generate_path(self.path, attr, "column")
cloned = self._generate()
cloned.strategy = strategy
cloned.path = path
cloned.propagate_to_loaders = True
if opts:
cloned.local_opts.update(opts)
cloned._set_path_strategy()
def _set_path_strategy(self):
if self.path.has_entity:
self.path.parent.set(self.context, "loader", self)
else:
self.path.set(self.context, "loader", self)
def __getstate__(self):
d = self.__dict__.copy()
d["path"] = self.path.serialize()
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.path = PathRegistry.deserialize(self.path)
def _chop_path(self, to_chop, path):
i = -1
for i, (c_token, p_token) in enumerate(zip(to_chop, path.path)):
if isinstance(c_token, util.string_types):
# TODO: this is approximated from the _UnboundLoad
# version and probably has issues, not fully covered.
if i == 0 and c_token.endswith(':' + _DEFAULT_TOKEN):
return to_chop
elif c_token != 'relationship:%s' % (_WILDCARD_TOKEN,) and \
c_token != p_token.key:
return None
if c_token is p_token:
continue
else:
return None
return to_chop[i + 1:]
class _UnboundLoad(Load):
"""Represent a loader option that isn't tied to a root entity.
The loader option will produce an entity-linked :class:`.Load`
object when it is passed :meth:`.Query.options`.
This provides compatibility with the traditional system
of freestanding options, e.g. ``joinedload('x.y.z')``.
"""
def __init__(self):
self.path = ()
self._to_bind = set()
self.local_opts = {}
_is_chain_link = False
def _set_path_strategy(self):
self._to_bind.add(self)
def _generate_path(self, path, attr, wildcard_key):
if wildcard_key and isinstance(attr, util.string_types) and \
attr in (_WILDCARD_TOKEN, _DEFAULT_TOKEN):
if attr == _DEFAULT_TOKEN:
self.propagate_to_loaders = False
attr = "%s:%s" % (wildcard_key, attr)
return path + (attr, )
def __getstate__(self):
d = self.__dict__.copy()
d['path'] = ret = []
for token in util.to_list(self.path):
if isinstance(token, PropComparator):
ret.append((token._parentmapper.class_, token.key))
else:
ret.append(token)
return d
def __setstate__(self, state):
ret = []
for key in state['path']:
if isinstance(key, tuple):
cls, propkey = key
ret.append(getattr(cls, propkey))
else:
ret.append(key)
state['path'] = tuple(ret)
self.__dict__ = state
def _process(self, query, raiseerr):
for val in self._to_bind:
val._bind_loader(query, query._attributes, raiseerr)
@classmethod
def _from_keys(self, meth, keys, chained, kw):
opt = _UnboundLoad()
def _split_key(key):
if isinstance(key, util.string_types):
# coerce fooload('*') into "default loader strategy"
if key == _WILDCARD_TOKEN:
return (_DEFAULT_TOKEN, )
# coerce fooload(".*") into "wildcard on default entity"
elif key.startswith("." + _WILDCARD_TOKEN):
key = key[1:]
return key.split(".")
else:
return (key,)
all_tokens = [token for key in keys for token in _split_key(key)]
for token in all_tokens[0:-1]:
if chained:
opt = meth(opt, token, **kw)
else:
opt = opt.defaultload(token)
opt._is_chain_link = True
opt = meth(opt, all_tokens[-1], **kw)
opt._is_chain_link = False
return opt
def _chop_path(self, to_chop, path):
i = -1
for i, (c_token, (p_mapper, p_prop)) in enumerate(
zip(to_chop, path.pairs())):
if isinstance(c_token, util.string_types):
if i == 0 and c_token.endswith(':' + _DEFAULT_TOKEN):
return to_chop
elif c_token != 'relationship:%s' % (
_WILDCARD_TOKEN,) and c_token != p_prop.key:
return None
elif isinstance(c_token, PropComparator):
if c_token.property is not p_prop:
return None
else:
i += 1
return to_chop[i:]
def _bind_loader(self, query, context, raiseerr):
start_path = self.path
# _current_path implies we're in a
# secondary load with an existing path
current_path = query._current_path
if current_path:
start_path = self._chop_path(start_path, current_path)
if not start_path:
return None
token = start_path[0]
if isinstance(token, util.string_types):
entity = self._find_entity_basestring(query, token, raiseerr)
elif isinstance(token, PropComparator):
prop = token.property
entity = self._find_entity_prop_comparator(
query,
prop.key,
token._parententity,
raiseerr)
else:
raise sa_exc.ArgumentError(
"mapper option expects "
"string key or list of attributes")
if not entity:
return
path_element = entity.entity_zero
# transfer our entity-less state into a Load() object
# with a real entity path.
loader = Load(path_element)
loader.context = context
loader.strategy = self.strategy
path = loader.path
for token in start_path:
loader.path = path = loader._generate_path(
loader.path, token, None, raiseerr)
if path is None:
return
loader.local_opts.update(self.local_opts)
if loader.path.has_entity:
effective_path = loader.path.parent
else:
effective_path = loader.path
# prioritize "first class" options over those
# that were "links in the chain", e.g. "x" and "y" in
# someload("x.y.z") versus someload("x") / someload("x.y")
if effective_path.is_token:
for path in effective_path.generate_for_superclasses():
if self._is_chain_link:
path.setdefault(context, "loader", loader)
else:
path.set(context, "loader", loader)
else:
if self._is_chain_link:
effective_path.setdefault(context, "loader", loader)
else:
effective_path.set(context, "loader", loader)
def _find_entity_prop_comparator(self, query, token, mapper, raiseerr):
if _is_aliased_class(mapper):
searchfor = mapper
else:
searchfor = _class_to_mapper(mapper)
for ent in query._mapper_entities:
if ent.corresponds_to(searchfor):
return ent
else:
if raiseerr:
if not list(query._mapper_entities):
raise sa_exc.ArgumentError(
"Query has only expression-based entities - "
"can't find property named '%s'."
% (token, )
)
else:
raise sa_exc.ArgumentError(
"Can't find property '%s' on any entity "
"specified in this Query. Note the full path "
"from root (%s) to target entity must be specified."
% (token, ",".join(str(x) for
x in query._mapper_entities))
)
else:
return None
def _find_entity_basestring(self, query, token, raiseerr):
if token.endswith(':' + _WILDCARD_TOKEN):
if len(list(query._mapper_entities)) != 1:
if raiseerr:
raise sa_exc.ArgumentError(
"Wildcard loader can only be used with exactly "
"one entity. Use Load(ent) to specify "
"specific entities.")
elif token.endswith(_DEFAULT_TOKEN):
raiseerr = False
for ent in query._mapper_entities:
# return only the first _MapperEntity when searching
# based on string prop name. Ideally object
# attributes are used to specify more exactly.
return ent
else:
if raiseerr:
raise sa_exc.ArgumentError(
"Query has only expression-based entities - "
"can't find property named '%s'."
% (token, )
)
else:
return None
class loader_option(object):
def __init__(self):
pass
def __call__(self, fn):
self.name = name = fn.__name__
self.fn = fn
if hasattr(Load, name):
raise TypeError("Load class already has a %s method." % (name))
setattr(Load, name, fn)
return self
def _add_unbound_fn(self, fn):
self._unbound_fn = fn
fn_doc = self.fn.__doc__
self.fn.__doc__ = """Produce a new :class:`.Load` object with the
:func:`.orm.%(name)s` option applied.
See :func:`.orm.%(name)s` for usage examples.
""" % {"name": self.name}
fn.__doc__ = fn_doc
return self
def _add_unbound_all_fn(self, fn):
self._unbound_all_fn = fn
fn.__doc__ = """Produce a standalone "all" option for :func:`.orm.%(name)s`.
.. deprecated:: 0.9.0
The "_all()" style is replaced by method chaining, e.g.::
session.query(MyClass).options(
%(name)s("someattribute").%(name)s("anotherattribute")
)
""" % {"name": self.name}
return self
@loader_option()
def contains_eager(loadopt, attr, alias=None):
"""Indicate that the given attribute should be eagerly loaded from
columns stated manually in the query.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
The option is used in conjunction with an explicit join that loads
the desired rows, i.e.::
sess.query(Order).\\
join(Order.user).\\
options(contains_eager(Order.user))
The above query would join from the ``Order`` entity to its related
``User`` entity, and the returned ``Order`` objects would have the
``Order.user`` attribute pre-populated.
:func:`contains_eager` also accepts an `alias` argument, which is the
string name of an alias, an :func:`~sqlalchemy.sql.expression.alias`
construct, or an :func:`~sqlalchemy.orm.aliased` construct. Use this when
the eagerly-loaded rows are to come from an aliased table::
user_alias = aliased(User)
sess.query(Order).\\
join((user_alias, Order.user)).\\
options(contains_eager(Order.user, alias=user_alias))
.. seealso::
:ref:`contains_eager`
"""
if alias is not None:
if not isinstance(alias, str):
info = inspect(alias)
alias = info.selectable
cloned = loadopt.set_relationship_strategy(
attr,
{"lazy": "joined"},
propagate_to_loaders=False
)
cloned.local_opts['eager_from_alias'] = alias
return cloned
@contains_eager._add_unbound_fn
def contains_eager(*keys, **kw):
return _UnboundLoad()._from_keys(
_UnboundLoad.contains_eager, keys, True, kw)
@loader_option()
def load_only(loadopt, *attrs):
"""Indicate that for a particular entity, only the given list
of column-based attribute names should be loaded; all others will be
deferred.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
Example - given a class ``User``, load only the ``name`` and ``fullname``
attributes::
session.query(User).options(load_only("name", "fullname"))
Example - given a relationship ``User.addresses -> Address``, specify
subquery loading for the ``User.addresses`` collection, but on each
``Address`` object load only the ``email_address`` attribute::
session.query(User).options(
subqueryload("addreses").load_only("email_address")
)
For a :class:`.Query` that has multiple entities, the lead entity can be
specifically referred to using the :class:`.Load` constructor::
session.query(User, Address).join(User.addresses).options(
Load(User).load_only("name", "fullname"),
Load(Address).load_only("email_addres")
)
.. versionadded:: 0.9.0
"""
cloned = loadopt.set_column_strategy(
attrs,
{"deferred": False, "instrument": True}
)
cloned.set_column_strategy("*",
{"deferred": True, "instrument": True},
{"undefer_pks": True})
return cloned
@load_only._add_unbound_fn
def load_only(*attrs):
return _UnboundLoad().load_only(*attrs)
@loader_option()
def joinedload(loadopt, attr, innerjoin=None):
"""Indicate that the given attribute should be loaded using joined
eager loading.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
examples::
# joined-load the "orders" collection on "User"
query(User).options(joinedload(User.orders))
# joined-load Order.items and then Item.keywords
query(Order).options(joinedload(Order.items).joinedload(Item.keywords))
# lazily load Order.items, but when Items are loaded,
# joined-load the keywords collection
query(Order).options(lazyload(Order.items).joinedload(Item.keywords))
:param innerjoin: if ``True``, indicates that the joined eager load should
use an inner join instead of the default of left outer join::
query(Order).options(joinedload(Order.user, innerjoin=True))
In order to chain multiple eager joins together where some may be
OUTER and others INNER, right-nested joins are used to link them::
query(A).options(
joinedload(A.bs, innerjoin=False).
joinedload(B.cs, innerjoin=True)
)
The above query, linking A.bs via "outer" join and B.cs via "inner" join
would render the joins as "a LEFT OUTER JOIN (b JOIN c)". When using
SQLite, this form of JOIN is translated to use full subqueries as this
syntax is otherwise not directly supported.
The ``innerjoin`` flag can also be stated with the term ``"unnested"``.
This will prevent joins from being right-nested, and will instead
link an "innerjoin" eagerload to an "outerjoin" eagerload by bypassing
the "inner" join. Using this form as follows::
query(A).options(
joinedload(A.bs, innerjoin=False).
joinedload(B.cs, innerjoin="unnested")
)
Joins will be rendered as "a LEFT OUTER JOIN b LEFT OUTER JOIN c", so that
all of "a" is matched rather than being incorrectly limited by a "b" that
does not contain a "c".
.. note:: The "unnested" flag does **not** affect the JOIN rendered
from a many-to-many association table, e.g. a table configured
as :paramref:`.relationship.secondary`, to the target table; for
correctness of results, these joins are always INNER and are
therefore right-nested if linked to an OUTER join.
.. versionadded:: 0.9.4 Added support for "nesting" of eager "inner"
joins. See :ref:`feature_2976`.
.. versionchanged:: 1.0.0 ``innerjoin=True`` now implies
``innerjoin="nested"``, whereas in 0.9 it implied
``innerjoin="unnested"``. In order to achieve the pre-1.0 "unnested"
inner join behavior, use the value ``innerjoin="unnested"``.
See :ref:`migration_3008`.
.. note::
The joins produced by :func:`.orm.joinedload` are **anonymously
aliased**. The criteria by which the join proceeds cannot be
modified, nor can the :class:`.Query` refer to these joins in any way,
including ordering.
To produce a specific SQL JOIN which is explicitly available, use
:meth:`.Query.join`. To combine explicit JOINs with eager loading
of collections, use :func:`.orm.contains_eager`; see
:ref:`contains_eager`.
.. seealso::
:ref:`loading_toplevel`
:ref:`contains_eager`
:func:`.orm.subqueryload`
:func:`.orm.lazyload`
:paramref:`.relationship.lazy`
:paramref:`.relationship.innerjoin` - :func:`.relationship`-level
version of the :paramref:`.joinedload.innerjoin` option.
"""
loader = loadopt.set_relationship_strategy(attr, {"lazy": "joined"})
if innerjoin is not None:
loader.local_opts['innerjoin'] = innerjoin
return loader
@joinedload._add_unbound_fn
def joinedload(*keys, **kw):
return _UnboundLoad._from_keys(
_UnboundLoad.joinedload, keys, False, kw)
@joinedload._add_unbound_all_fn
def joinedload_all(*keys, **kw):
return _UnboundLoad._from_keys(
_UnboundLoad.joinedload, keys, True, kw)
@loader_option()
def subqueryload(loadopt, attr):
"""Indicate that the given attribute should be loaded using
subquery eager loading.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
examples::
# subquery-load the "orders" collection on "User"
query(User).options(subqueryload(User.orders))
# subquery-load Order.items and then Item.keywords
query(Order).options(subqueryload(Order.items).subqueryload(Item.keywords))
# lazily load Order.items, but when Items are loaded,
# subquery-load the keywords collection
query(Order).options(lazyload(Order.items).subqueryload(Item.keywords))
.. seealso::
:ref:`loading_toplevel`
:func:`.orm.joinedload`
:func:`.orm.lazyload`
:paramref:`.relationship.lazy`
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "subquery"})
@subqueryload._add_unbound_fn
def subqueryload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.subqueryload, keys, False, {})
@subqueryload._add_unbound_all_fn
def subqueryload_all(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.subqueryload, keys, True, {})
@loader_option()
def lazyload(loadopt, attr):
"""Indicate that the given attribute should be loaded using "lazy"
loading.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
.. seealso::
:paramref:`.relationship.lazy`
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "select"})
@lazyload._add_unbound_fn
def lazyload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.lazyload, keys, False, {})
@lazyload._add_unbound_all_fn
def lazyload_all(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.lazyload, keys, True, {})
@loader_option()
def immediateload(loadopt, attr):
"""Indicate that the given attribute should be loaded using
an immediate load with a per-attribute SELECT statement.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
.. seealso::
:ref:`loading_toplevel`
:func:`.orm.joinedload`
:func:`.orm.lazyload`
:paramref:`.relationship.lazy`
"""
loader = loadopt.set_relationship_strategy(attr, {"lazy": "immediate"})
return loader
@immediateload._add_unbound_fn
def immediateload(*keys):
return _UnboundLoad._from_keys(
_UnboundLoad.immediateload, keys, False, {})
@loader_option()
def noload(loadopt, attr):
"""Indicate that the given relationship attribute should remain unloaded.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
:func:`.orm.noload` applies to :func:`.relationship` attributes; for
column-based attributes, see :func:`.orm.defer`.
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "noload"})
@noload._add_unbound_fn
def noload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.noload, keys, False, {})
@loader_option()
def defaultload(loadopt, attr):
"""Indicate an attribute should load using its default loader style.
This method is used to link to other loader options, such as
to set the :func:`.orm.defer` option on a class that is linked to
a relationship of the parent class being loaded, :func:`.orm.defaultload`
can be used to navigate this path without changing the loading style
of the relationship::
session.query(MyClass).options(defaultload("someattr").defer("some_column"))
.. seealso::
:func:`.orm.defer`
:func:`.orm.undefer`
"""
return loadopt.set_relationship_strategy(
attr,
None
)
@defaultload._add_unbound_fn
def defaultload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.defaultload, keys, False, {})
@loader_option()
def defer(loadopt, key):
"""Indicate that the given column-oriented attribute should be deferred, e.g.
not loaded until accessed.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
e.g.::
from sqlalchemy.orm import defer
session.query(MyClass).options(
defer("attribute_one"),
defer("attribute_two"))
session.query(MyClass).options(
defer(MyClass.attribute_one),
defer(MyClass.attribute_two))
To specify a deferred load of an attribute on a related class,
the path can be specified one token at a time, specifying the loading
style for each link along the chain. To leave the loading style
for a link unchanged, use :func:`.orm.defaultload`::
session.query(MyClass).options(defaultload("someattr").defer("some_column"))
A :class:`.Load` object that is present on a certain path can have
:meth:`.Load.defer` called multiple times, each will operate on the same
parent entity::
session.query(MyClass).options(
defaultload("someattr").
defer("some_column").
defer("some_other_column").
defer("another_column")
)
:param key: Attribute to be deferred.
:param \*addl_attrs: Deprecated; this option supports the old 0.8 style
of specifying a path as a series of attributes, which is now superseded
by the method-chained style.
.. seealso::
:ref:`deferred`
:func:`.orm.undefer`
"""
return loadopt.set_column_strategy(
(key, ),
{"deferred": True, "instrument": True}
)
@defer._add_unbound_fn
def defer(key, *addl_attrs):
return _UnboundLoad._from_keys(
_UnboundLoad.defer, (key, ) + addl_attrs, False, {})
@loader_option()
def undefer(loadopt, key):
"""Indicate that the given column-oriented attribute should be undeferred,
e.g. specified within the SELECT statement of the entity as a whole.
The column being undeferred is typically set up on the mapping as a
:func:`.deferred` attribute.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
Examples::
# undefer two columns
session.query(MyClass).options(undefer("col1"), undefer("col2"))
# undefer all columns specific to a single class using Load + *
session.query(MyClass, MyOtherClass).options(
Load(MyClass).undefer("*"))
:param key: Attribute to be undeferred.
:param \*addl_attrs: Deprecated; this option supports the old 0.8 style
of specifying a path as a series of attributes, which is now superseded
by the method-chained style.
.. seealso::
:ref:`deferred`
:func:`.orm.defer`
:func:`.orm.undefer_group`
"""
return loadopt.set_column_strategy(
(key, ),
{"deferred": False, "instrument": True}
)
@undefer._add_unbound_fn
def undefer(key, *addl_attrs):
return _UnboundLoad._from_keys(
_UnboundLoad.undefer, (key, ) + addl_attrs, False, {})
@loader_option()
def undefer_group(loadopt, name):
"""Indicate that columns within the given deferred group name should be
undeferred.
The columns being undeferred are set up on the mapping as
:func:`.deferred` attributes and include a "group" name.
E.g::
session.query(MyClass).options(undefer_group("large_attrs"))
To undefer a group of attributes on a related entity, the path can be
spelled out using relationship loader options, such as
:func:`.orm.defaultload`::
session.query(MyClass).options(
defaultload("someattr").undefer_group("large_attrs"))
.. versionchanged:: 0.9.0 :func:`.orm.undefer_group` is now specific to a
particiular entity load path.
.. seealso::
:ref:`deferred`
:func:`.orm.defer`
:func:`.orm.undefer`
"""
return loadopt.set_column_strategy(
"*",
None,
{"undefer_group": name}
)
@undefer_group._add_unbound_fn
def undefer_group(name):
return _UnboundLoad().undefer_group(name)
| mit |
hoosteeno/mozillians | vendor-local/lib/python/unidecode/x0a0.py | 253 | 4428 | data = (
'it', # 0x00
'ix', # 0x01
'i', # 0x02
'ip', # 0x03
'iet', # 0x04
'iex', # 0x05
'ie', # 0x06
'iep', # 0x07
'at', # 0x08
'ax', # 0x09
'a', # 0x0a
'ap', # 0x0b
'uox', # 0x0c
'uo', # 0x0d
'uop', # 0x0e
'ot', # 0x0f
'ox', # 0x10
'o', # 0x11
'op', # 0x12
'ex', # 0x13
'e', # 0x14
'wu', # 0x15
'bit', # 0x16
'bix', # 0x17
'bi', # 0x18
'bip', # 0x19
'biet', # 0x1a
'biex', # 0x1b
'bie', # 0x1c
'biep', # 0x1d
'bat', # 0x1e
'bax', # 0x1f
'ba', # 0x20
'bap', # 0x21
'buox', # 0x22
'buo', # 0x23
'buop', # 0x24
'bot', # 0x25
'box', # 0x26
'bo', # 0x27
'bop', # 0x28
'bex', # 0x29
'be', # 0x2a
'bep', # 0x2b
'but', # 0x2c
'bux', # 0x2d
'bu', # 0x2e
'bup', # 0x2f
'burx', # 0x30
'bur', # 0x31
'byt', # 0x32
'byx', # 0x33
'by', # 0x34
'byp', # 0x35
'byrx', # 0x36
'byr', # 0x37
'pit', # 0x38
'pix', # 0x39
'pi', # 0x3a
'pip', # 0x3b
'piex', # 0x3c
'pie', # 0x3d
'piep', # 0x3e
'pat', # 0x3f
'pax', # 0x40
'pa', # 0x41
'pap', # 0x42
'puox', # 0x43
'puo', # 0x44
'puop', # 0x45
'pot', # 0x46
'pox', # 0x47
'po', # 0x48
'pop', # 0x49
'put', # 0x4a
'pux', # 0x4b
'pu', # 0x4c
'pup', # 0x4d
'purx', # 0x4e
'pur', # 0x4f
'pyt', # 0x50
'pyx', # 0x51
'py', # 0x52
'pyp', # 0x53
'pyrx', # 0x54
'pyr', # 0x55
'bbit', # 0x56
'bbix', # 0x57
'bbi', # 0x58
'bbip', # 0x59
'bbiet', # 0x5a
'bbiex', # 0x5b
'bbie', # 0x5c
'bbiep', # 0x5d
'bbat', # 0x5e
'bbax', # 0x5f
'bba', # 0x60
'bbap', # 0x61
'bbuox', # 0x62
'bbuo', # 0x63
'bbuop', # 0x64
'bbot', # 0x65
'bbox', # 0x66
'bbo', # 0x67
'bbop', # 0x68
'bbex', # 0x69
'bbe', # 0x6a
'bbep', # 0x6b
'bbut', # 0x6c
'bbux', # 0x6d
'bbu', # 0x6e
'bbup', # 0x6f
'bburx', # 0x70
'bbur', # 0x71
'bbyt', # 0x72
'bbyx', # 0x73
'bby', # 0x74
'bbyp', # 0x75
'nbit', # 0x76
'nbix', # 0x77
'nbi', # 0x78
'nbip', # 0x79
'nbiex', # 0x7a
'nbie', # 0x7b
'nbiep', # 0x7c
'nbat', # 0x7d
'nbax', # 0x7e
'nba', # 0x7f
'nbap', # 0x80
'nbot', # 0x81
'nbox', # 0x82
'nbo', # 0x83
'nbop', # 0x84
'nbut', # 0x85
'nbux', # 0x86
'nbu', # 0x87
'nbup', # 0x88
'nburx', # 0x89
'nbur', # 0x8a
'nbyt', # 0x8b
'nbyx', # 0x8c
'nby', # 0x8d
'nbyp', # 0x8e
'nbyrx', # 0x8f
'nbyr', # 0x90
'hmit', # 0x91
'hmix', # 0x92
'hmi', # 0x93
'hmip', # 0x94
'hmiex', # 0x95
'hmie', # 0x96
'hmiep', # 0x97
'hmat', # 0x98
'hmax', # 0x99
'hma', # 0x9a
'hmap', # 0x9b
'hmuox', # 0x9c
'hmuo', # 0x9d
'hmuop', # 0x9e
'hmot', # 0x9f
'hmox', # 0xa0
'hmo', # 0xa1
'hmop', # 0xa2
'hmut', # 0xa3
'hmux', # 0xa4
'hmu', # 0xa5
'hmup', # 0xa6
'hmurx', # 0xa7
'hmur', # 0xa8
'hmyx', # 0xa9
'hmy', # 0xaa
'hmyp', # 0xab
'hmyrx', # 0xac
'hmyr', # 0xad
'mit', # 0xae
'mix', # 0xaf
'mi', # 0xb0
'mip', # 0xb1
'miex', # 0xb2
'mie', # 0xb3
'miep', # 0xb4
'mat', # 0xb5
'max', # 0xb6
'ma', # 0xb7
'map', # 0xb8
'muot', # 0xb9
'muox', # 0xba
'muo', # 0xbb
'muop', # 0xbc
'mot', # 0xbd
'mox', # 0xbe
'mo', # 0xbf
'mop', # 0xc0
'mex', # 0xc1
'me', # 0xc2
'mut', # 0xc3
'mux', # 0xc4
'mu', # 0xc5
'mup', # 0xc6
'murx', # 0xc7
'mur', # 0xc8
'myt', # 0xc9
'myx', # 0xca
'my', # 0xcb
'myp', # 0xcc
'fit', # 0xcd
'fix', # 0xce
'fi', # 0xcf
'fip', # 0xd0
'fat', # 0xd1
'fax', # 0xd2
'fa', # 0xd3
'fap', # 0xd4
'fox', # 0xd5
'fo', # 0xd6
'fop', # 0xd7
'fut', # 0xd8
'fux', # 0xd9
'fu', # 0xda
'fup', # 0xdb
'furx', # 0xdc
'fur', # 0xdd
'fyt', # 0xde
'fyx', # 0xdf
'fy', # 0xe0
'fyp', # 0xe1
'vit', # 0xe2
'vix', # 0xe3
'vi', # 0xe4
'vip', # 0xe5
'viet', # 0xe6
'viex', # 0xe7
'vie', # 0xe8
'viep', # 0xe9
'vat', # 0xea
'vax', # 0xeb
'va', # 0xec
'vap', # 0xed
'vot', # 0xee
'vox', # 0xef
'vo', # 0xf0
'vop', # 0xf1
'vex', # 0xf2
'vep', # 0xf3
'vut', # 0xf4
'vux', # 0xf5
'vu', # 0xf6
'vup', # 0xf7
'vurx', # 0xf8
'vur', # 0xf9
'vyt', # 0xfa
'vyx', # 0xfb
'vy', # 0xfc
'vyp', # 0xfd
'vyrx', # 0xfe
'vyr', # 0xff
)
| bsd-3-clause |
chbrown/geo | geo/shapefile/reader.py | 1 | 13651 | from struct import unpack, calcsize
import os
from geo.shapefile import ShapefileException, Array
from geo.shapefile.six import u, b, is_string
from geo.shapefile.shape import Shape
class Reader(object):
"""Reads the three files of a shapefile as a unit or
separately. If one of the three files (.shp, .shx,
.dbf) is missing no exception is thrown until you try
to call a method that depends on that particular file.
The .shx index file is used if available for efficiency
but is not required to read the geometry from the .shp
file. The "shapefile" argument in the constructor is the
name of the file you want to open.
You can instantiate a Reader without specifying a shapefile
and then specify one later with the load() method.
Only the shapefile headers are read upon loading. Content
within each file is only accessed when required and as
efficiently as possible. Shapefiles are usually not large
but they can be.
"""
def __init__(self, *args, **kwargs):
self.shp = None
self.shx = None
self.dbf = None
self.shapeName = "Not specified"
self._offsets = []
self.shpLength = None
self.numRecords = None
self.fields = []
self.__dbfHdrLength = 0
# See if a shapefile name was passed as an argument
if len(args) > 0:
if is_string(args[0]):
self.load(args[0])
return
if "shp" in kwargs.keys():
if hasattr(kwargs["shp"], "read"):
self.shp = kwargs["shp"]
if hasattr(self.shp, "seek"):
self.shp.seek(0)
if "shx" in kwargs.keys():
if hasattr(kwargs["shx"], "read"):
self.shx = kwargs["shx"]
if hasattr(self.shx, "seek"):
self.shx.seek(0)
if "dbf" in kwargs.keys():
if hasattr(kwargs["dbf"], "read"):
self.dbf = kwargs["dbf"]
if hasattr(self.dbf, "seek"):
self.dbf.seek(0)
if self.shp or self.dbf:
self.load()
else:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object.")
def load(self, shapefile=None):
"""Opens a shapefile from a filename or file-like
object. Normally this method would be called by the
constructor with the file object or file name as an
argument."""
if shapefile:
shapeName, ext = os.path.splitext(shapefile)
self.shapeName = shapeName
try:
self.shp = open("%s.shp" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.shp" % shapeName)
try:
self.shx = open("%s.shx" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.shx" % shapeName)
try:
self.dbf = open("%s.dbf" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.dbf" % shapeName)
if self.shp:
self.__shpHeader()
if self.dbf:
self.__dbfHeader()
def __getFileObj(self, f):
"""Checks to see if the requested shapefile file object is
available. If not a ShapefileException is raised."""
if not f:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object.")
if self.shp and self.shpLength is None:
self.load()
if self.dbf and len(self.fields) == 0:
self.load()
return f
def __restrictIndex(self, i):
"""Provides list-like handling of a record index with a clearer
error message if the index is out of bounds."""
if self.numRecords:
rmax = self.numRecords - 1
if abs(i) > rmax:
raise IndexError("Shape or Record index out of range.")
if i < 0:
i = range(self.numRecords)[i]
return i
def assertFile(self, attr):
# attr should be 'shp', 'dbf', or 'shx'
if not getattr(self, attr, None):
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object."
"(no %s file found)" % attr)
def __shpHeader(self):
"""Reads the header information from a .shp or .shx file."""
if not self.shp:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no shp file found")
shp = self.shp
# File length (16-bit word * 2 = bytes)
shp.seek(24)
self.shpLength = unpack(">i", shp.read(4))[0] * 2
# Shape type
shp.seek(32)
self.shapeType= unpack("<i", shp.read(4))[0]
# The shapefile's bounding box (lower left, upper right)
self.bbox = Array('d', unpack("<4d", shp.read(32)))
# Elevation
self.elevation = Array('d', unpack("<2d", shp.read(16)))
# Measure
self.measure = Array('d', unpack("<2d", shp.read(16)))
def __shape(self):
"""Returns the header info and geometry for a single shape."""
f = self.__getFileObj(self.shp)
record = Shape()
nParts = nPoints = zmin = zmax = mmin = mmax = None
(recNum, recLength) = unpack(">2i", f.read(8))
# Determine the start of the next record
next = f.tell() + (2 * recLength)
shapeType = unpack("<i", f.read(4))[0]
record.shapeType = shapeType
# For Null shapes create an empty points list for consistency
if shapeType == 0:
record.points = []
# All shape types capable of having a bounding box
elif shapeType in (3, 5, 8, 13, 15, 18, 23, 25, 28, 31):
record.bbox = Array('d', unpack("<4d", f.read(32)))
# Shape types with parts
if shapeType in (3, 5, 13, 15, 23, 25, 31):
nParts = unpack("<i", f.read(4))[0]
# Shape types with points
if shapeType in (3, 5, 8, 13, 15, 23, 25, 31):
nPoints = unpack("<i", f.read(4))[0]
# Read parts
if nParts:
record.parts = Array('i', unpack("<%si" % nParts, f.read(nParts * 4)))
# Read part types for Multipatch - 31
if shapeType == 31:
record.partTypes = Array('i', unpack("<%si" % nParts, f.read(nParts * 4)))
# Read points - produces a list of [x,y] values
if nPoints:
record.points = [Array('d', unpack("<2d", f.read(16))) for p in range(nPoints)]
# Read z extremes and values
if shapeType in (13, 15, 18, 31):
(zmin, zmax) = unpack("<2d", f.read(16))
record.z = Array('d', unpack("<%sd" % nPoints, f.read(nPoints * 8)))
# Read m extremes and values if header m values do not equal 0.0
if shapeType in (13, 15, 18, 23, 25, 28, 31) and not 0.0 in self.measure:
(mmin, mmax) = unpack("<2d", f.read(16))
# Measure values less than -10e38 are nodata values according to the spec
record.m = []
for m in Array('d', unpack("<%sd" % nPoints, f.read(nPoints * 8))):
if m > -10e38:
record.m.append(m)
else:
record.m.append(None)
# Read a single point
if shapeType in (1, 11, 21):
record.points = [Array('d', unpack("<2d", f.read(16)))]
# Read a single Z value
if shapeType == 11:
record.z = unpack("<d", f.read(8))
# Read a single M value
if shapeType in (11, 21):
record.m = unpack("<d", f.read(8))
# Seek to the end of this record as defined by the record header because
# the shapefile spec doesn't require the actual content to meet the header
# definition. Probably allowed for lazy feature deletion.
f.seek(next)
return record
def __shapeIndex(self, i=None):
"""Returns the offset in a .shp file for a shape based on information
in the .shx index file."""
shx = self.shx
if not shx:
return None
if not self._offsets:
# File length (16-bit word * 2 = bytes) - header length
shx.seek(24)
shxRecordLength = (unpack(">i", shx.read(4))[0] * 2) - 100
numRecords = shxRecordLength // 8
# Jump to the first record.
shx.seek(100)
for r in range(numRecords):
# Offsets are 16-bit words just like the file length
self._offsets.append(unpack(">i", shx.read(4))[0] * 2)
shx.seek(shx.tell() + 4)
if not i == None:
return self._offsets[i]
def shape(self, i=0):
"""Returns a shape object for a shape in the the geometry
record file."""
shp = self.__getFileObj(self.shp)
i = self.__restrictIndex(i)
offset = self.__shapeIndex(i)
if not offset:
# Shx index not available so iterate the full list.
for j, k in enumerate(self.iterShapes()):
if j == i:
return k
shp.seek(offset)
return self.__shape()
def shapes(self):
"""
Yield all shapes in the shapefile.
"""
shp = self.__getFileObj(self.shp)
# Found shapefiles which report incorrect
# shp file length in the header. Can't trust
# that so we seek to the end of the file
# and figure it out.
shp.seek(0, 2)
self.shpLength = shp.tell()
shp.seek(100)
while shp.tell() < self.shpLength:
yield self.__shape()
def __dbfHeaderLength(self):
"""Retrieves the header length of a dbf file header."""
if not self.__dbfHdrLength:
self.assertFile('dbf')
self.numRecords, self.__dbfHdrLength = unpack("<xxxxLH22x", self.dbf.read(32))
return self.__dbfHdrLength
def __dbfHeader(self):
"""Reads a dbf header. Xbase-related code borrows heavily from ActiveState Python Cookbook Recipe 362715 by Raymond Hettinger"""
self.assertFile('dbf')
headerLength = self.__dbfHeaderLength()
numFields = (headerLength - 33) // 32
for field in range(numFields):
fieldDesc = list(unpack("<11sc4xBB14x", self.dbf.read(32)))
name = 0
idx = 0
if b("\x00") in fieldDesc[name]:
idx = fieldDesc[name].index(b("\x00"))
else:
idx = len(fieldDesc[name]) - 1
fieldDesc[name] = fieldDesc[name][:idx]
fieldDesc[name] = u(fieldDesc[name])
fieldDesc[name] = fieldDesc[name].lstrip()
fieldDesc[1] = u(fieldDesc[1])
self.fields.append(fieldDesc)
terminator = self.dbf.read(1)
assert terminator == b("\r")
self.fields.insert(0, ('DeletionFlag', 'C', 1, 0))
def __recordFmt(self):
"""Calculates the size of a .shp geometry record."""
if not self.numRecords:
self.__dbfHeader()
fmt = ''.join(['%ds' % fieldinfo[2] for fieldinfo in self.fields])
fmtSize = calcsize(fmt)
return (fmt, fmtSize)
def __record(self):
"""Reads and returns a dbf record row as a list of values."""
f = self.__getFileObj(self.dbf)
recFmt = self.__recordFmt()
recordContents = unpack(recFmt[0], f.read(recFmt[1]))
if recordContents[0] != b(' '):
# deleted record
return None
record = []
for (name, typ, size, deci), value in zip(self.fields, recordContents):
if name == 'DeletionFlag':
continue
elif not value.strip():
record.append(value)
continue
elif typ == "N":
value = value.replace(b('\0'), b('')).strip()
if value == b(''):
value = 0
elif deci:
value = float(value)
else:
value = int(value)
elif typ == b('D'):
try:
y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8])
value = [y, m, d]
except:
value = value.strip()
elif typ == b('L'):
value = (value in b('YyTt') and b('T')) or (value in b('NnFf') and b('F')) or b('?')
else:
value = u(value)
value = value.strip()
record.append(value)
return record
def record(self, i=0):
"""Returns a specific dbf record based on the supplied index."""
f = self.__getFileObj(self.dbf)
if not self.numRecords:
self.__dbfHeader()
i = self.__restrictIndex(i)
recSize = self.__recordFmt()[1]
f.seek(0)
f.seek(self.__dbfHeaderLength() + (i * recSize))
return self.__record()
def records(self):
"""
Yield all records in the dbf file.
Wrap with list() if you want a list.
"""
if not self.numRecords:
self.__dbfHeader()
f = self.__getFileObj(self.dbf)
f.seek(self.__dbfHeaderLength())
for i in xrange(self.numRecords):
r = self.__record()
if r:
yield r
@property
def field_names(self):
return [field_name for field_name, _, _, _ in self.fields[1:]]
| mit |
Epirex/android_external_chromium_org | tools/telemetry/telemetry/page/page_unittest.py | 26 | 5528 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry.page import page
from telemetry.page import page_set
class TestPage(unittest.TestCase):
def assertPathEqual(self, path1, path2):
self.assertEqual(os.path.normpath(path1), os.path.normpath(path2))
def testFilePathRelative(self):
apage = page.Page('file://somedir/otherdir/file.html',
None, base_dir='basedir')
self.assertPathEqual(apage.file_path, 'basedir/somedir/otherdir/file.html')
def testFilePathAbsolute(self):
apage = page.Page('file:///somedir/otherdir/file.html',
None, base_dir='basedir')
self.assertPathEqual(apage.file_path, '/somedir/otherdir/file.html')
def testFilePathQueryString(self):
apage = page.Page('file://somedir/otherdir/file.html?key=val',
None, base_dir='basedir')
self.assertPathEqual(apage.file_path, 'basedir/somedir/otherdir/file.html')
def testFilePathUrlQueryString(self):
apage = page.Page('file://somedir/file.html?key=val',
None, base_dir='basedir')
self.assertPathEqual(apage.file_path_url,
'basedir/somedir/file.html?key=val')
def testFilePathUrlTrailingSeparator(self):
apage = page.Page('file://somedir/otherdir/',
None, base_dir='basedir')
self.assertPathEqual(apage.file_path_url, 'basedir/somedir/otherdir/')
self.assertTrue(apage.file_path_url.endswith(os.sep) or
(os.altsep and apage.file_path_url.endswith(os.altsep)))
def testSort(self):
ps = page_set.PageSet.FromDict({
'description': 'hello',
'archive_path': 'foo.wpr',
'pages': [
{'url': 'http://www.foo.com/'},
{'url': 'http://www.bar.com/'}
]
}, os.path.dirname(__file__))
pages = [ps.pages[0], ps.pages[1]]
pages.sort()
self.assertEquals([ps.pages[1], ps.pages[0]],
pages)
def testGetUrlBaseDirAndFileForUrlBaseDir(self):
ps = page_set.PageSet.FromDict({
'description': 'hello',
'archive_path': 'foo.wpr',
'serving_dirs': ['../somedir/'],
'pages': [
{'url': 'file://../otherdir/file.html'}
]}, 'basedir/')
self.assertPathEqual(ps[0].file_path, 'otherdir/file.html')
def testDisplayUrlForHttp(self):
ps = page_set.PageSet.FromDict({
'description': 'hello',
'archive_path': 'foo.wpr',
'pages': [
{'url': 'http://www.foo.com/'},
{'url': 'http://www.bar.com/'}
]
}, os.path.dirname(__file__))
self.assertEquals(ps[0].display_name, 'http://www.foo.com/')
self.assertEquals(ps[1].display_name, 'http://www.bar.com/')
def testDisplayUrlForHttps(self):
ps = page_set.PageSet.FromDict({
'description': 'hello',
'archive_path': 'foo.wpr',
'pages': [
{'url': 'http://www.foo.com/'},
{'url': 'https://www.bar.com/'}
]
}, os.path.dirname(__file__))
self.assertEquals(ps[0].display_name, 'http://www.foo.com/')
self.assertEquals(ps[1].display_name, 'https://www.bar.com/')
def testDisplayUrlForFile(self):
ps = page_set.PageSet.FromDict({
'description': 'hello',
'archive_path': 'foo.wpr',
'pages': [
{'url': 'file://../../otherdir/foo.html'},
{'url': 'file://../../otherdir/bar.html'},
]
}, os.path.dirname(__file__))
self.assertEquals(ps[0].display_name, 'foo.html')
self.assertEquals(ps[1].display_name, 'bar.html')
def testDisplayUrlForFilesDifferingBySuffix(self):
ps = page_set.PageSet.FromDict({
'description': 'hello',
'archive_path': 'foo.wpr',
'pages': [
{'url': 'file://../../otherdir/foo.html'},
{'url': 'file://../../otherdir/foo1.html'},
]
}, os.path.dirname(__file__))
self.assertEquals(ps[0].display_name, 'foo.html')
self.assertEquals(ps[1].display_name, 'foo1.html')
def testDisplayUrlForFileOfDifferentPaths(self):
ps = page_set.PageSet.FromDict({
'description': 'hello',
'archive_path': 'foo.wpr',
'pages': [
{'url': 'file://../../somedir/foo.html'},
{'url': 'file://../../otherdir/bar.html'},
]
}, os.path.dirname(__file__))
self.assertEquals(ps[0].display_name, 'somedir/foo.html')
self.assertEquals(ps[1].display_name, 'otherdir/bar.html')
def testDisplayUrlForFileDirectories(self):
ps = page_set.PageSet.FromDict({
'description': 'hello',
'archive_path': 'foo.wpr',
'pages': [
{'url': 'file://../../otherdir/foo/'},
{'url': 'file://../../otherdir/bar/'},
]
}, os.path.dirname(__file__))
self.assertEquals(ps[0].display_name, 'foo')
self.assertEquals(ps[1].display_name, 'bar')
def testDisplayUrlForSingleFile(self):
ps = page_set.PageSet.FromDict({
'description': 'hello',
'archive_path': 'foo.wpr',
'pages': [{'url': 'file://../../otherdir/foo.html'}]
}, os.path.dirname(__file__))
self.assertEquals(ps[0].display_name, 'foo.html')
def testDisplayUrlForSingleDirectory(self):
ps = page_set.PageSet.FromDict({
'description': 'hello',
'archive_path': 'foo.wpr',
'pages': [{'url': 'file://../../otherdir/foo/'}]
}, os.path.dirname(__file__))
self.assertEquals(ps[0].display_name, 'foo')
| bsd-3-clause |
NMGRL/pychron | pychron/entry/export/xml_irradiation_exporter.py | 2 | 1261 | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
from __future__ import absolute_import
from pychron.entry.export.base_irradiation_exporter import BaseIrradiationExporter
class XMLIrradiationExporter(BaseIrradiationExporter):
"""
export irradiations from pychron database to an XML file
"""
# ============= EOF =============================================
| apache-2.0 |
kohnle-lernmodule/KITexe201based | twisted/protocols/finger.py | 81 | 1246 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""The Finger User Information Protocol (RFC 1288)"""
from twisted.protocols import basic
import string
class Finger(basic.LineReceiver):
def lineReceived(self, line):
parts = string.split(line)
if not parts:
parts = ['']
if len(parts) == 1:
slash_w = 0
else:
slash_w = 1
user = parts[-1]
if '@' in user:
host_place = string.rfind(user, '@')
user = user[:host_place]
host = user[host_place+1:]
return self.forwardQuery(slash_w, user, host)
if user:
return self.getUser(slash_w, user)
else:
return self.getDomain(slash_w)
def _refuseMessage(self, message):
self.transport.write(message+"\n")
self.transport.loseConnection()
def forwardQuery(self, slash_w, user, host):
self._refuseMessage('Finger forwarding service denied')
def getDomain(self, slash_w):
self._refuseMessage('Finger online list denied')
def getUser(self, slash_w, user):
self.transport.write('Login: '+user+'\n')
self._refuseMessage('No such user')
| gpl-2.0 |
117111302/PyGithub | github/RepositoryKey.py | 72 | 5483 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Srijan Choudhary <srijan4@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
class RepositoryKey(github.GithubObject.CompletableGithubObject):
"""
This class represents RepositoryKeys. The reference can be found here http://developer.github.com/v3/repos/keys/
"""
def __init__(self, requester, headers, attributes, completed, repoUrl):
github.GithubObject.CompletableGithubObject.__init__(self, requester, headers, attributes, completed)
self.__repoUrl = repoUrl
@property
def __customUrl(self):
return self.__repoUrl + "/keys/" + str(self.id)
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def key(self):
"""
:type: string
"""
self._completeIfNotSet(self._key)
return self._key.value
@property
def title(self):
"""
:type: string
"""
self._completeIfNotSet(self._title)
return self._title.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def verified(self):
"""
:type: bool
"""
self._completeIfNotSet(self._verified)
return self._verified.value
def delete(self):
"""
:calls: `DELETE /repos/:owner/:repo/keys/:id <http://developer.github.com/v3/repos/keys>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.__customUrl
)
def edit(self, title=github.GithubObject.NotSet, key=github.GithubObject.NotSet):
"""
:calls: `PATCH /repos/:owner/:repo/keys/:id <http://developer.github.com/v3/repos/keys>`_
:param title: string
:param key: string
:rtype: None
"""
assert title is github.GithubObject.NotSet or isinstance(title, (str, unicode)), title
assert key is github.GithubObject.NotSet or isinstance(key, (str, unicode)), key
post_parameters = dict()
if title is not github.GithubObject.NotSet:
post_parameters["title"] = title
if key is not github.GithubObject.NotSet:
post_parameters["key"] = key
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.__customUrl,
input=post_parameters
)
self._useAttributes(data)
def _initAttributes(self):
self._id = github.GithubObject.NotSet
self._key = github.GithubObject.NotSet
self._title = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
self._verified = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "key" in attributes: # pragma no branch
self._key = self._makeStringAttribute(attributes["key"])
if "title" in attributes: # pragma no branch
self._title = self._makeStringAttribute(attributes["title"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "verified" in attributes: # pragma no branch
self._verified = self._makeBoolAttribute(attributes["verified"])
| gpl-3.0 |
poljeff/odoo | addons/website_event_track/models/event.py | 300 | 8344 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.addons.website.models.website import slug
import pytz
class event_track_tag(osv.osv):
_name = "event.track.tag"
_order = 'name'
_columns = {
'name': fields.char('Event Track Tag', translate=True)
}
class event_tag(osv.osv):
_name = "event.tag"
_order = 'name'
_columns = {
'name': fields.char('Event Tag', translate=True)
}
#
# Tracks: conferences
#
class event_track_stage(osv.osv):
_name = "event.track.stage"
_order = 'sequence'
_columns = {
'name': fields.char('Track Stage', translate=True),
'sequence': fields.integer('Sequence')
}
_defaults = {
'sequence': 0
}
class event_track_location(osv.osv):
_name = "event.track.location"
_columns = {
'name': fields.char('Track Rooms')
}
class event_track(osv.osv):
_name = "event.track"
_description = 'Event Tracks'
_order = 'priority, date'
_inherit = ['mail.thread', 'ir.needaction_mixin', 'website.seo.metadata']
def _website_url(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, '')
for track in self.browse(cr, uid, ids, context=context):
res[track.id] = "/event/%s/track/%s" % (slug(track.event_id), slug(track))
return res
_columns = {
'name': fields.char('Track Title', required=True, translate=True),
'user_id': fields.many2one('res.users', 'Responsible'),
'speaker_ids': fields.many2many('res.partner', string='Speakers'),
'tag_ids': fields.many2many('event.track.tag', string='Tags'),
'stage_id': fields.many2one('event.track.stage', 'Stage'),
'description': fields.html('Track Description', translate=True),
'date': fields.datetime('Track Date'),
'duration': fields.float('Duration', digits=(16,2)),
'location_id': fields.many2one('event.track.location', 'Location'),
'event_id': fields.many2one('event.event', 'Event', required=True),
'color': fields.integer('Color Index'),
'priority': fields.selection([('3','Low'),('2','Medium (*)'),('1','High (**)'),('0','Highest (***)')], 'Priority', required=True),
'website_published': fields.boolean('Available in the website', copy=False),
'website_url': fields.function(_website_url, string="Website url", type="char"),
'image': fields.related('speaker_ids', 'image', type='binary', readonly=True)
}
def set_priority(self, cr, uid, ids, priority, context={}):
return self.write(cr, uid, ids, {'priority' : priority})
def _default_stage_id(self, cr, uid, context={}):
stage_obj = self.pool.get('event.track.stage')
ids = stage_obj.search(cr, uid, [], context=context)
return ids and ids[0] or False
_defaults = {
'user_id': lambda self, cr, uid, ctx: uid,
'website_published': lambda self, cr, uid, ctx: False,
'duration': lambda *args: 1.5,
'stage_id': _default_stage_id,
'priority': '2'
}
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
stage_obj = self.pool.get('event.track.stage')
result = stage_obj.name_search(cr, uid, '', context=context)
return result, {}
_group_by_full = {
'stage_id': _read_group_stage_ids,
}
#
# Events
#
class event_event(osv.osv):
_inherit = "event.event"
def _list_tz(self,cr,uid, context=None):
# put POSIX 'Etc/*' entries at the end to avoid confusing users - see bug 1086728
return [(tz,tz) for tz in sorted(pytz.all_timezones, key=lambda tz: tz if not tz.startswith('Etc/') else '_')]
def _count_tracks(self, cr, uid, ids, field_name, arg, context=None):
return {
event.id: len(event.track_ids)
for event in self.browse(cr, uid, ids, context=context)
}
def _get_tracks_tag_ids(self, cr, uid, ids, field_names, arg=None, context=None):
res = dict((res_id, []) for res_id in ids)
for event in self.browse(cr, uid, ids, context=context):
for track in event.track_ids:
res[event.id] += [tag.id for tag in track.tag_ids]
res[event.id] = list(set(res[event.id]))
return res
_columns = {
'tag_ids': fields.many2many('event.tag', string='Tags'),
'track_ids': fields.one2many('event.track', 'event_id', 'Tracks', copy=True),
'sponsor_ids': fields.one2many('event.sponsor', 'event_id', 'Sponsorships', copy=True),
'blog_id': fields.many2one('blog.blog', 'Event Blog'),
'show_track_proposal': fields.boolean('Talks Proposals'),
'show_tracks': fields.boolean('Multiple Tracks'),
'show_blog': fields.boolean('News'),
'count_tracks': fields.function(_count_tracks, type='integer', string='Tracks'),
'tracks_tag_ids': fields.function(_get_tracks_tag_ids, type='one2many', relation='event.track.tag', string='Tags of Tracks'),
'allowed_track_tag_ids': fields.many2many('event.track.tag', string='Accepted Tags', help="List of available tags for track proposals."),
'timezone_of_event': fields.selection(_list_tz, 'Event Timezone', size=64),
}
_defaults = {
'show_track_proposal': False,
'show_tracks': False,
'show_blog': False,
'timezone_of_event':lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).tz,
}
def _get_new_menu_pages(self, cr, uid, event, context=None):
context = context or {}
result = super(event_event, self)._get_new_menu_pages(cr, uid, event, context=context)
if event.show_tracks:
result.append( (_('Talks'), '/event/%s/track' % slug(event)))
result.append( (_('Agenda'), '/event/%s/agenda' % slug(event)))
if event.blog_id:
result.append( (_('News'), '/blogpost'+slug(event.blog_ig)))
if event.show_track_proposal:
result.append( (_('Talk Proposals'), '/event/%s/track_proposal' % slug(event)))
return result
#
# Sponsors
#
class event_sponsors_type(osv.osv):
_name = "event.sponsor.type"
_order = "sequence"
_columns = {
"name": fields.char('Sponsor Type', required=True, translate=True),
"sequence": fields.integer('Sequence')
}
class event_sponsors(osv.osv):
_name = "event.sponsor"
_order = "sequence"
_columns = {
'event_id': fields.many2one('event.event', 'Event', required=True),
'sponsor_type_id': fields.many2one('event.sponsor.type', 'Sponsoring Type', required=True),
'partner_id': fields.many2one('res.partner', 'Sponsor/Customer', required=True),
'url': fields.text('Sponsor Website'),
'sequence': fields.related('sponsor_type_id', 'sequence', string='Sequence', store=True),
'image_medium': fields.related('partner_id', 'image_medium', string='Logo', type='binary')
}
def has_access_to_partner(self, cr, uid, ids, context=None):
partner_ids = [sponsor.partner_id.id for sponsor in self.browse(cr, uid, ids, context=context)]
return len(partner_ids) == self.pool.get("res.partner").search(cr, uid, [("id", "in", partner_ids)], count=True, context=context)
| agpl-3.0 |
andfoy/margffoy-tuay-server | env/lib/python2.7/site-packages/django/db/migrations/writer.py | 42 | 20479 | from __future__ import unicode_literals
import collections
import datetime
import decimal
import inspect
import math
import os
import re
import sys
import types
from importlib import import_module
from django.apps import apps
from django.db import migrations, models
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.operations.base import Operation
from django.utils import datetime_safe, six
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.functional import Promise
from django.utils.timezone import utc
from django.utils.version import get_docs_version
COMPILED_REGEX_TYPE = type(re.compile(''))
class SettingsReference(str):
"""
Special subclass of string which actually references a current settings
value. It's treated as the value in memory, but serializes out to a
settings.NAME attribute reference.
"""
def __new__(self, value, setting_name):
return str.__new__(self, value)
def __init__(self, value, setting_name):
self.setting_name = setting_name
class OperationWriter(object):
def __init__(self, operation, indentation=2):
self.operation = operation
self.buff = []
self.indentation = indentation
def serialize(self):
def _write(_arg_name, _arg_value):
if (_arg_name in self.operation.serialization_expand_args and
isinstance(_arg_value, (list, tuple, dict))):
if isinstance(_arg_value, dict):
self.feed('%s={' % _arg_name)
self.indent()
for key, value in _arg_value.items():
key_string, key_imports = MigrationWriter.serialize(key)
arg_string, arg_imports = MigrationWriter.serialize(value)
args = arg_string.splitlines()
if len(args) > 1:
self.feed('%s: %s' % (key_string, args[0]))
for arg in args[1:-1]:
self.feed(arg)
self.feed('%s,' % args[-1])
else:
self.feed('%s: %s,' % (key_string, arg_string))
imports.update(key_imports)
imports.update(arg_imports)
self.unindent()
self.feed('},')
else:
self.feed('%s=[' % _arg_name)
self.indent()
for item in _arg_value:
arg_string, arg_imports = MigrationWriter.serialize(item)
args = arg_string.splitlines()
if len(args) > 1:
for arg in args[:-1]:
self.feed(arg)
self.feed('%s,' % args[-1])
else:
self.feed('%s,' % arg_string)
imports.update(arg_imports)
self.unindent()
self.feed('],')
else:
arg_string, arg_imports = MigrationWriter.serialize(_arg_value)
args = arg_string.splitlines()
if len(args) > 1:
self.feed('%s=%s' % (_arg_name, args[0]))
for arg in args[1:-1]:
self.feed(arg)
self.feed('%s,' % args[-1])
else:
self.feed('%s=%s,' % (_arg_name, arg_string))
imports.update(arg_imports)
imports = set()
name, args, kwargs = self.operation.deconstruct()
argspec = inspect.getargspec(self.operation.__init__)
# See if this operation is in django.db.migrations. If it is,
# We can just use the fact we already have that imported,
# otherwise, we need to add an import for the operation class.
if getattr(migrations, name, None) == self.operation.__class__:
self.feed('migrations.%s(' % name)
else:
imports.add('import %s' % (self.operation.__class__.__module__))
self.feed('%s.%s(' % (self.operation.__class__.__module__, name))
self.indent()
# Start at one because argspec includes "self"
for i, arg in enumerate(args, 1):
arg_value = arg
arg_name = argspec.args[i]
_write(arg_name, arg_value)
i = len(args)
# Only iterate over remaining arguments
for arg_name in argspec.args[i + 1:]:
if arg_name in kwargs:
arg_value = kwargs[arg_name]
_write(arg_name, arg_value)
self.unindent()
self.feed('),')
return self.render(), imports
def indent(self):
self.indentation += 1
def unindent(self):
self.indentation -= 1
def feed(self, line):
self.buff.append(' ' * (self.indentation * 4) + line)
def render(self):
return '\n'.join(self.buff)
class MigrationWriter(object):
"""
Takes a Migration instance and is able to produce the contents
of the migration file from it.
"""
def __init__(self, migration):
self.migration = migration
self.needs_manual_porting = False
def as_string(self):
"""
Returns a string of the file contents.
"""
items = {
"replaces_str": "",
}
imports = set()
# Deconstruct operations
operations = []
for operation in self.migration.operations:
operation_string, operation_imports = OperationWriter(operation).serialize()
imports.update(operation_imports)
operations.append(operation_string)
items["operations"] = "\n".join(operations) + "\n" if operations else ""
# Format dependencies and write out swappable dependencies right
dependencies = []
for dependency in self.migration.dependencies:
if dependency[0] == "__setting__":
dependencies.append(" migrations.swappable_dependency(settings.%s)," % dependency[1])
imports.add("from django.conf import settings")
else:
# No need to output bytestrings for dependencies
dependency = tuple(force_text(s) for s in dependency)
dependencies.append(" %s," % self.serialize(dependency)[0])
items["dependencies"] = "\n".join(dependencies) + "\n" if dependencies else ""
# Format imports nicely, swapping imports of functions from migration files
# for comments
migration_imports = set()
for line in list(imports):
if re.match("^import (.*)\.\d+[^\s]*$", line):
migration_imports.add(line.split("import")[1].strip())
imports.remove(line)
self.needs_manual_porting = True
imports.discard("from django.db import models")
items["imports"] = "\n".join(imports) + "\n" if imports else ""
if migration_imports:
items["imports"] += (
"\n\n# Functions from the following migrations need manual "
"copying.\n# Move them and any dependencies into this file, "
"then update the\n# RunPython operations to refer to the local "
"versions:\n# %s"
) % "\n# ".join(migration_imports)
# If there's a replaces, make a string for it
if self.migration.replaces:
items['replaces_str'] = "\n replaces = %s\n" % self.serialize(self.migration.replaces)[0]
return (MIGRATION_TEMPLATE % items).encode("utf8")
@staticmethod
def serialize_datetime(value):
"""
Returns a serialized version of a datetime object that is valid,
executable python code. It converts timezone-aware values to utc with
an 'executable' utc representation of tzinfo.
"""
if value.tzinfo is not None and value.tzinfo != utc:
value = value.astimezone(utc)
value_repr = repr(value).replace("<UTC>", "utc")
if isinstance(value, datetime_safe.datetime):
value_repr = "datetime.%s" % value_repr
return value_repr
@property
def filename(self):
return "%s.py" % self.migration.name
@property
def path(self):
migrations_package_name = MigrationLoader.migrations_module(self.migration.app_label)
# See if we can import the migrations module directly
try:
migrations_module = import_module(migrations_package_name)
# Python 3 fails when the migrations directory does not have a
# __init__.py file
if not hasattr(migrations_module, '__file__'):
raise ImportError
basedir = os.path.dirname(upath(migrations_module.__file__))
except ImportError:
app_config = apps.get_app_config(self.migration.app_label)
migrations_package_basename = migrations_package_name.split(".")[-1]
# Alright, see if it's a direct submodule of the app
if '%s.%s' % (app_config.name, migrations_package_basename) == migrations_package_name:
basedir = os.path.join(app_config.path, migrations_package_basename)
else:
# In case of using MIGRATION_MODULES setting and the custom
# package doesn't exist, create one.
package_dirs = migrations_package_name.split(".")
create_path = os.path.join(upath(sys.path[0]), *package_dirs)
if not os.path.isdir(create_path):
os.makedirs(create_path)
for i in range(1, len(package_dirs) + 1):
init_dir = os.path.join(upath(sys.path[0]), *package_dirs[:i])
init_path = os.path.join(init_dir, "__init__.py")
if not os.path.isfile(init_path):
open(init_path, "w").close()
return os.path.join(create_path, self.filename)
return os.path.join(basedir, self.filename)
@classmethod
def serialize_deconstructed(cls, path, args, kwargs):
name, imports = cls._serialize_path(path)
strings = []
for arg in args:
arg_string, arg_imports = cls.serialize(arg)
strings.append(arg_string)
imports.update(arg_imports)
for kw, arg in kwargs.items():
arg_string, arg_imports = cls.serialize(arg)
imports.update(arg_imports)
strings.append("%s=%s" % (kw, arg_string))
return "%s(%s)" % (name, ", ".join(strings)), imports
@classmethod
def _serialize_path(cls, path):
module, name = path.rsplit(".", 1)
if module == "django.db.models":
imports = {"from django.db import models"}
name = "models.%s" % name
else:
imports = {"import %s" % module}
name = path
return name, imports
@classmethod
def serialize(cls, value):
"""
Serializes the value to a string that's parsable by Python, along
with any needed imports to make that string work.
More advanced than repr() as it can encode things
like datetime.datetime.now.
"""
# FIXME: Ideally Promise would be reconstructible, but for now we
# use force_text on them and defer to the normal string serialization
# process.
if isinstance(value, Promise):
value = force_text(value)
# Sequences
if isinstance(value, (list, set, tuple)):
imports = set()
strings = []
for item in value:
item_string, item_imports = cls.serialize(item)
imports.update(item_imports)
strings.append(item_string)
if isinstance(value, set):
# Don't use the literal "{%s}" as it doesn't support empty set
format = "set([%s])"
elif isinstance(value, tuple):
# When len(value)==0, the empty tuple should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
format = "(%s)" if len(value) != 1 else "(%s,)"
else:
format = "[%s]"
return format % (", ".join(strings)), imports
# Dictionaries
elif isinstance(value, dict):
imports = set()
strings = []
for k, v in value.items():
k_string, k_imports = cls.serialize(k)
v_string, v_imports = cls.serialize(v)
imports.update(k_imports)
imports.update(v_imports)
strings.append((k_string, v_string))
return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports
# Datetimes
elif isinstance(value, datetime.datetime):
value_repr = cls.serialize_datetime(value)
imports = ["import datetime"]
if value.tzinfo is not None:
imports.append("from django.utils.timezone import utc")
return value_repr, set(imports)
# Dates
elif isinstance(value, datetime.date):
value_repr = repr(value)
if isinstance(value, datetime_safe.date):
value_repr = "datetime.%s" % value_repr
return value_repr, {"import datetime"}
# Times
elif isinstance(value, datetime.time):
value_repr = repr(value)
if isinstance(value, datetime_safe.time):
value_repr = "datetime.%s" % value_repr
return value_repr, {"import datetime"}
# Timedeltas
elif isinstance(value, datetime.timedelta):
return repr(value), {"import datetime"}
# Settings references
elif isinstance(value, SettingsReference):
return "settings.%s" % value.setting_name, {"from django.conf import settings"}
# Simple types
elif isinstance(value, float):
if math.isnan(value) or math.isinf(value):
return 'float("{}")'.format(value), set()
return repr(value), set()
elif isinstance(value, six.integer_types + (bool, type(None))):
return repr(value), set()
elif isinstance(value, six.binary_type):
value_repr = repr(value)
if six.PY2:
# Prepend the `b` prefix since we're importing unicode_literals
value_repr = 'b' + value_repr
return value_repr, set()
elif isinstance(value, six.text_type):
value_repr = repr(value)
if six.PY2:
# Strip the `u` prefix since we're importing unicode_literals
value_repr = value_repr[1:]
return value_repr, set()
# Decimal
elif isinstance(value, decimal.Decimal):
return repr(value), {"from decimal import Decimal"}
# Django fields
elif isinstance(value, models.Field):
attr_name, path, args, kwargs = value.deconstruct()
return cls.serialize_deconstructed(path, args, kwargs)
# Classes
elif isinstance(value, type):
special_cases = [
(models.Model, "models.Model", []),
]
for case, string, imports in special_cases:
if case is value:
return string, set(imports)
if hasattr(value, "__module__"):
module = value.__module__
if module == six.moves.builtins.__name__:
return value.__name__, set()
else:
return "%s.%s" % (module, value.__name__), {"import %s" % module}
elif isinstance(value, models.manager.BaseManager):
as_manager, manager_path, qs_path, args, kwargs = value.deconstruct()
if as_manager:
name, imports = cls._serialize_path(qs_path)
return "%s.as_manager()" % name, imports
else:
return cls.serialize_deconstructed(manager_path, args, kwargs)
elif isinstance(value, Operation):
string, imports = OperationWriter(value, indentation=0).serialize()
# Nested operation, trailing comma is handled in upper OperationWriter._write()
return string.rstrip(','), imports
# Anything that knows how to deconstruct itself.
elif hasattr(value, 'deconstruct'):
return cls.serialize_deconstructed(*value.deconstruct())
# Functions
elif isinstance(value, (types.FunctionType, types.BuiltinFunctionType)):
# @classmethod?
if getattr(value, "__self__", None) and isinstance(value.__self__, type):
klass = value.__self__
module = klass.__module__
return "%s.%s.%s" % (module, klass.__name__, value.__name__), {"import %s" % module}
# Further error checking
if value.__name__ == '<lambda>':
raise ValueError("Cannot serialize function: lambda")
if value.__module__ is None:
raise ValueError("Cannot serialize function %r: No module" % value)
# Python 3 is a lot easier, and only uses this branch if it's not local.
if getattr(value, "__qualname__", None) and getattr(value, "__module__", None):
if "<" not in value.__qualname__: # Qualname can include <locals>
return "%s.%s" % (value.__module__, value.__qualname__), {"import %s" % value.__module__}
# Python 2/fallback version
module_name = value.__module__
# Make sure it's actually there and not an unbound method
module = import_module(module_name)
if not hasattr(module, value.__name__):
raise ValueError(
"Could not find function %s in %s.\n"
"Please note that due to Python 2 limitations, you cannot "
"serialize unbound method functions (e.g. a method "
"declared and used in the same class body). Please move "
"the function into the main module body to use migrations.\n"
"For more information, see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values"
% (value.__name__, module_name, get_docs_version()))
return "%s.%s" % (module_name, value.__name__), {"import %s" % module_name}
# Other iterables
elif isinstance(value, collections.Iterable):
imports = set()
strings = []
for item in value:
item_string, item_imports = cls.serialize(item)
imports.update(item_imports)
strings.append(item_string)
# When len(strings)==0, the empty iterable should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
format = "(%s)" if len(strings) != 1 else "(%s,)"
return format % (", ".join(strings)), imports
# Compiled regex
elif isinstance(value, COMPILED_REGEX_TYPE):
imports = {"import re"}
regex_pattern, pattern_imports = cls.serialize(value.pattern)
regex_flags, flag_imports = cls.serialize(value.flags)
imports.update(pattern_imports)
imports.update(flag_imports)
args = [regex_pattern]
if value.flags:
args.append(regex_flags)
return "re.compile(%s)" % ', '.join(args), imports
# Uh oh.
else:
raise ValueError(
"Cannot serialize: %r\nThere are some values Django cannot serialize into "
"migration files.\nFor more, see https://docs.djangoproject.com/en/%s/"
"topics/migrations/#migration-serializing" % (value, get_docs_version())
)
MIGRATION_TEMPLATE = """\
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
%(imports)s
class Migration(migrations.Migration):
%(replaces_str)s
dependencies = [
%(dependencies)s\
]
operations = [
%(operations)s\
]
"""
| gpl-2.0 |
agry/NGECore2 | scripts/object/tangible/wearables/armor/mandalorian_imperial/armor_mandalorian_imperial_bracer_r.py | 14 | 1049 | import sys
def setup(core, object):
object.setStringAttribute('faction_restriction', 'Imperial')
object.setIntAttribute('required_combat_level', 75)
object.setStringAttribute('armor_category', '@obj_attr_n:armor_battle')
object.setIntAttribute('cat_stat_mod_bonus.@stat_n:constitution_modified', 18)
object.setIntAttribute('cat_stat_mod_bonus.@stat_n:luck_modified', 6)
object.setIntAttribute('cat_stat_mod_bonus.@stat_n:precision_modified', 12)
object.setIntAttribute('cat_stat_mod_bonus.@stat_n:strength_modified', 12)
object.setIntAttribute('cat_armor_standard_protection.kinetic', 5496)
object.setIntAttribute('cat_armor_standard_protection.energy', 5496)
object.setIntAttribute('cat_armor_special_protection.special_protection_type_heat', 5496)
object.setIntAttribute('cat_armor_special_protection.special_protection_type_cold', 5496)
object.setIntAttribute('cat_armor_special_protection.special_protection_type_acid', 5496)
object.setIntAttribute('cat_armor_special_protection.special_protection_type_electricity', 5496)
return | lgpl-3.0 |
Karaage-Cluster/karaage-debian | karaage/legacy/machines/south_migrations/0011_rename_user_to_person.py | 3 | 13854 | # -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_column('user_account', 'user_id', 'person_id')
def backwards(self, orm):
db.rename_column('user_account', 'person_id', 'user_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'institutes.institute': {
'Meta': {'ordering': "['name']", 'object_name': 'Institute', 'db_table': "'institute'"},
'delegates': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'delegate'", 'to': "orm['people.Person']", 'through': "orm['institutes.InstituteDelegate']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'saml_entityid': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'institutes.institutedelegate': {
'Meta': {'object_name': 'InstituteDelegate', 'db_table': "'institutedelegate'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['institutes.Institute']"}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Person']"}),
'send_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'machines.machine': {
'Meta': {'object_name': 'Machine', 'db_table': "'machine'"},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['machines.MachineCategory']"}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mem_per_core': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'no_cpus': ('django.db.models.fields.IntegerField', [], {}),
'no_nodes': ('django.db.models.fields.IntegerField', [], {}),
'pbs_server_host': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'scaling_factor': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'machines.machinecategory': {
'Meta': {'object_name': 'MachineCategory', 'db_table': "'machine_category'"},
'datastore': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'machines.useraccount': {
'Meta': {'ordering': "['person']", 'object_name': 'UserAccount', 'db_table': "'user_account'"},
'date_created': ('django.db.models.fields.DateField', [], {}),
'date_deleted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'default_project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']", 'null': 'True', 'blank': 'True'}),
'disk_quota': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'login_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'machine_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['machines.MachineCategory']"}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Person']"}),
'shell': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'people.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'groups'", 'symmetrical': 'False', 'to': "orm['people.Person']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'people.person': {
'Meta': {'ordering': "['first_name', 'last_name']", 'object_name': 'Person', 'db_table': "'person'"},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'approved_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_approver'", 'null': 'True', 'to': "orm['people.Person']"}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'date_approved': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_deleted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_deletor'", 'null': 'True', 'to': "orm['people.Person']"}),
'department': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'expires': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['institutes.Institute']"}),
'is_systemuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_usage': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'legacy_ldap_password': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'login_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'saml_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'supervisor': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'projects.project': {
'Meta': {'ordering': "['pid']", 'object_name': 'Project', 'db_table': "'project'"},
'additional_req': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'approved_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'project_approver'", 'null': 'True', 'to': "orm['people.Person']"}),
'date_approved': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_deleted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'project_deletor'", 'null': 'True', 'to': "orm['people.Person']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Group']"}),
'institute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['institutes.Institute']"}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_usage': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'leaders': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'leaders'", 'symmetrical': 'False', 'to': "orm['people.Person']"}),
'machine_categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'projects'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['machines.MachineCategory']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'pid': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 8, 7, 0, 0)'})
}
}
complete_apps = ['machines']
| gpl-3.0 |
asm-products/cloudroutes-service | src/monitors/broker.py | 6 | 2951 | #!/usr/bin/python
#####################################################################
# Cloud Routes Availability Manager: Broker
# ------------------------------------------------------------------
# Description:
# ------------------------------------------------------------------
# This is a message broker for the availability manager.
# This process will send messages from the various control processes
# and send them to the worker processes via zMQ.
# ------------------------------------------------------------------
# Version: Alpha.20140308
# Original Author: Benjamin J. Cane - madflojo@cloudrout.es
# Contributors:
# - your name here
#####################################################################
# Imports
# ------------------------------------------------------------------
import sys
import zmq
import time
import yaml
import signal
import logconfig
# Load Configuration
# ------------------------------------------------------------------
if len(sys.argv) != 2:
print("Hey, thats not how you launch this...")
print("%s <config file>") % sys.argv[0]
sys.exit(1)
# Open Config File and Parse Config Data
configfile = sys.argv[1]
cfh = open(configfile, "r")
config = yaml.safe_load(cfh)
cfh.close()
# Make Connections
# ------------------------------------------------------------------
# Init logger
logger = logconfig.getLogger('monitors.broker', config['use_syslog'])
logger.info("Using config %s" % configfile)
# Start ZeroMQ listener for control
context = zmq.Context()
zrecv = context.socket(zmq.PULL)
bindaddress_pull = "tcp://%s:%d" % (
config['broker_ip'], config['broker_control_port'])
zrecv.bind(bindaddress_pull)
logger.info("Attempting to bind to %s for pulling" % bindaddress_pull)
# Start ZeroMQ listener for workers
context2 = zmq.Context()
zsend = context2.socket(zmq.PUSH)
bindaddress_push = "tcp://%s:%d" % (
config['broker_ip'], config['broker_worker_port'])
zsend.bind(bindaddress_push)
logger.info("Attempting to bind to %s for pushing" % bindaddress_push)
# Handle Kill Signals Cleanly
# ------------------------------------------------------------------
def killhandle(signum, frame):
''' This will close connections cleanly '''
logger.info("SIGTERM detected, shutting down")
zsend.close()
zrecv.close()
sys.exit(0)
signal.signal(signal.SIGTERM, killhandle)
# Run For Loop
# ------------------------------------------------------------------
# Let the workers get started
time.sleep(20)
# Start an infinante loop that checks every 2 minutes
while True:
# Get list of members to check from queue
msg = zrecv.recv()
logger.debug("Got message from %s, sending it to %s, %s" % (
bindaddress_pull, bindaddress_push, msg))
zsend.send(msg)
# The following should be disabled unless it is times of distress
#import json
#jdata = json.loads(msg)
#logger.debug("Sent health check %s to workers" % jdata['cid'])
| agpl-3.0 |
wukan1986/kquant_data | demo_stock/A_1day_000016/E03_merge_000016.py | 1 | 2779 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
指定数据目录,生成对应的合约行业数据
分为两种
1. 全市场数据,将部分标记上权重
2. 只对历史上成为成份股的,进行处理,由于前面已经转换了数据,这里只要跳选数据并处理即可
"""
import os
import pandas as pd
from kquant_data.config import __CONFIG_H5_STK_WEIGHT_DIR__, __CONFIG_H5_STK_DIR__, __CONFIG_TDX_STK_DIR__, \
__CONFIG_H5_STK_DIVIDEND_DIR__
from kquant_data.processing.merge import merge_weight, load_index_weight, merge_weight_internal
from kquant_data.stock.symbol import get_symbols_from_wind_code_df
from kquant_data.api import get_datetime
from kquant_data.processing.utils import filter_dataframe, split_into_group
from kquant_data.processing.MergeBar import MergeBar
from kquant_data.stock.stock import read_h5_tdx
class MergeDataStock_1day_000016(MergeBar):
def __init__(self, folder):
super(MergeDataStock_1day_000016, self).__init__(folder)
self.bar_size = 86400
def init_symbols(self):
# 不再从导出列表中取,而是从文件夹中推算
wind_code = '000016.SH'
path = os.path.join(__CONFIG_H5_STK_WEIGHT_DIR__, wind_code)
df = load_index_weight(path)
wind_codes = pd.DataFrame(list(df.columns), columns=['wind_code'])
df = get_symbols_from_wind_code_df(wind_codes)
self.instruments = df
path = os.path.join(self.folder, 'Symbol.csv')
self.instruments.to_csv(path, index=False)
self.instruments_group = split_into_group(self.instruments, self.group_len)
def init_datetime(self):
df = read_h5_tdx("sh", "000016", 86400, __CONFIG_H5_STK_DIR__, __CONFIG_TDX_STK_DIR__,
__CONFIG_H5_STK_DIVIDEND_DIR__)
df = filter_dataframe(df, 'DateTime', None, None, fields=['DateTime'])
# 可以保存,也可以不保存
self.datetime = df
super(MergeDataStock_1day_000016, self).init_datetime()
def init_fields(self):
self.fields = ['Open', 'High', 'Low', 'Close', 'Volume', 'Amount', 'backward_factor', 'forward_factor']
def read_data(self, market, code, bar_size):
h5_path = os.path.join(__CONFIG_H5_STK_DIR__, '1day', market, '%s%s.h5' % (market, code))
try:
df = pd.read_hdf(h5_path)
df = filter_dataframe(df, 'DateTime', None, None, None)
except:
return pd.DataFrame(columns=self.fields + ['DateTime'])
return df
if __name__ == '__main__':
# 得到50成份股内的各种开高低收等行情
path = os.path.join(__CONFIG_H5_STK_DIR__, "1day_000016.SH")
mdf = MergeDataStock_1day_000016(path)
mdf.merge()
mdf.hmerge()
mdf.clear()
pass
| bsd-2-clause |
apache/bloodhound | trac/tracopt/mimeview/silvercity.py | 2 | 6153 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2009 Edgewall Software
# Copyright (C) 2004 Daniel Lundin <daniel@edgewall.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin <daniel@edgewall.com>
"""Syntax highlighting module, based on the SilverCity module.
Get it at: http://silvercity.sourceforge.net/
"""
import re
from StringIO import StringIO
from genshi.core import Markup
from trac.core import *
from trac.config import ListOption
from trac.env import ISystemInfoProvider
from trac.mimeview.api import IHTMLPreviewRenderer, Mimeview
from trac.util import get_pkginfo
try:
import SilverCity
have_silvercity = True
except ImportError:
have_silvercity = False
__all__ = ['SilverCityRenderer']
types = {
'text/css': ('CSS', 3),
'text/html': ('HyperText', 3, {'asp.default.language':1}),
'application/xml': ('XML', 3),
'application/xhtml+xml': ('HyperText', 3, {'asp.default.language':1}),
'application/rss+xml': ('HyperText', 3, {'asp.default.language':1}),
'application/x-yaml': ('YAML', 3),
'text/x-yaml': ('YAML', 3),
'application/x-javascript': ('CPP', 3), # Kludgy.
'text/x-asp': ('HyperText', 3, {'asp.default.language':2}),
'text/x-c++hdr': ('CPP', 3),
'text/x-c++src': ('CPP', 3),
'text/x-chdr': ('CPP', 3),
'text/x-csrc': ('CPP', 3),
'text/x-perl': ('Perl', 3),
'text/x-php': ('HyperText', 3, {'asp.default.language': 4}),
'application/x-httpd-php': ('HyperText', 3, {'asp.default.language': 4}),
'application/x-httpd-php4': ('HyperText', 3, {'asp.default.language': 4}),
'application/x-httpd-php3': ('HyperText', 3, {'asp.default.language': 4}),
'text/x-java': ('Java', 3),
'text/x-javascript': ('CPP', 3), # Kludgy.
'text/x-psp': ('HyperText', 3, {'asp.default.language': 3}),
'text/x-python': ('Python', 3),
'text/x-ruby': ('Ruby', 3),
'text/x-sql': ('SQL', 3),
'text/x-verilog': ('Verilog', 3),
'text/xml': ('XML', 3),
'text/xslt': ('XSLT', 3),
'image/svg+xml': ('XML', 3)
}
CRLF_RE = re.compile('\r$', re.MULTILINE)
class SilverCityRenderer(Component):
"""Syntax highlighting based on SilverCity."""
implements(ISystemInfoProvider, IHTMLPreviewRenderer)
silvercity_modes = ListOption('mimeviewer', 'silvercity_modes',
'', doc=
"""List of additional MIME types known by SilverCity.
For each, a tuple `mimetype:mode:quality` has to be
specified, where `mimetype` is the MIME type,
`mode` is the corresponding SilverCity mode to be used
for the conversion and `quality` is the quality ratio
associated to this conversion.
That can also be used to override the default
quality ratio used by the SilverCity render, which is 3
(''since 0.10'').""")
expand_tabs = True
returns_source = True
def __init__(self):
self._types = None
# ISystemInfoProvider methods
def get_system_info(self):
if have_silvercity:
yield 'SilverCity', get_pkginfo(SilverCity).get('version', '?')
# TODO: the above works only if setuptools was used to build
# SilverCity, which is not yet the case by default for 0.9.7.
# I've not been able to find an alternative way to get version.
# IHTMLPreviewRenderer methods
def get_quality_ratio(self, mimetype):
# Extend default MIME type to mode mappings with configured ones
if not have_silvercity:
return 0
if not self._types:
self._types = {}
self._types.update(types)
self._types.update(
Mimeview(self.env).configured_modes_mapping('silvercity'))
return self._types.get(mimetype, (None, 0))[1]
def render(self, context, mimetype, content, filename=None, rev=None):
try:
mimetype = mimetype.split(';', 1)[0]
typelang = self._types[mimetype]
lang = typelang[0]
module = getattr(SilverCity, lang)
generator = getattr(module, lang + "HTMLGenerator")
try:
allprops = typelang[2]
propset = SilverCity.PropertySet()
for p in allprops.keys():
propset[p] = allprops[p]
except IndexError:
pass
except (KeyError, AttributeError):
err = "No SilverCity lexer found for mime-type '%s'." % mimetype
raise Exception, err
# SilverCity does not like unicode strings
content = content.encode('utf-8')
# SilverCity generates extra empty line against some types of
# the line such as comment or #include with CRLF. So we
# standardize to LF end-of-line style before call.
content = CRLF_RE.sub('', content)
buf = StringIO()
generator().generate_html(buf, content)
br_re = re.compile(r'<br\s*/?>$', re.MULTILINE)
span_default_re = re.compile(r'<span class="\w+_default">(.*?)</span>',
re.DOTALL)
html = span_default_re.sub(r'\1', br_re.sub('', buf.getvalue()))
# Convert the output back to a unicode string
html = html.decode('utf-8')
# SilverCity generates _way_ too many non-breaking spaces...
# We don't need them anyway, so replace them by normal spaces
return [Markup(line)
for line in html.replace(' ', ' ').splitlines()]
| apache-2.0 |
arch1tect0r/root | interpreter/llvm/src/utils/lit/lit/ShUtil.py | 83 | 12179 | from __future__ import absolute_import
import itertools
import lit.util
from lit.ShCommands import Command, Pipeline, Seq
class ShLexer:
def __init__(self, data, win32Escapes = False):
self.data = data
self.pos = 0
self.end = len(data)
self.win32Escapes = win32Escapes
def eat(self):
c = self.data[self.pos]
self.pos += 1
return c
def look(self):
return self.data[self.pos]
def maybe_eat(self, c):
"""
maybe_eat(c) - Consume the character c if it is the next character,
returning True if a character was consumed. """
if self.data[self.pos] == c:
self.pos += 1
return True
return False
def lex_arg_fast(self, c):
# Get the leading whitespace free section.
chunk = self.data[self.pos - 1:].split(None, 1)[0]
# If it has special characters, the fast path failed.
if ('|' in chunk or '&' in chunk or
'<' in chunk or '>' in chunk or
"'" in chunk or '"' in chunk or
';' in chunk or '\\' in chunk):
return None
self.pos = self.pos - 1 + len(chunk)
return chunk
def lex_arg_slow(self, c):
if c in "'\"":
str = self.lex_arg_quoted(c)
else:
str = c
while self.pos != self.end:
c = self.look()
if c.isspace() or c in "|&;":
break
elif c in '><':
# This is an annoying case; we treat '2>' as a single token so
# we don't have to track whitespace tokens.
# If the parse string isn't an integer, do the usual thing.
if not str.isdigit():
break
# Otherwise, lex the operator and convert to a redirection
# token.
num = int(str)
tok = self.lex_one_token()
assert isinstance(tok, tuple) and len(tok) == 1
return (tok[0], num)
elif c == '"':
self.eat()
str += self.lex_arg_quoted('"')
elif c == "'":
self.eat()
str += self.lex_arg_quoted("'")
elif not self.win32Escapes and c == '\\':
# Outside of a string, '\\' escapes everything.
self.eat()
if self.pos == self.end:
lit.util.warning(
"escape at end of quoted argument in: %r" % self.data)
return str
str += self.eat()
else:
str += self.eat()
return str
def lex_arg_quoted(self, delim):
str = ''
while self.pos != self.end:
c = self.eat()
if c == delim:
return str
elif c == '\\' and delim == '"':
# Inside a '"' quoted string, '\\' only escapes the quote
# character and backslash, otherwise it is preserved.
if self.pos == self.end:
lit.util.warning(
"escape at end of quoted argument in: %r" % self.data)
return str
c = self.eat()
if c == '"': #
str += '"'
elif c == '\\':
str += '\\'
else:
str += '\\' + c
else:
str += c
lit.util.warning("missing quote character in %r" % self.data)
return str
def lex_arg_checked(self, c):
pos = self.pos
res = self.lex_arg_fast(c)
end = self.pos
self.pos = pos
reference = self.lex_arg_slow(c)
if res is not None:
if res != reference:
raise ValueError("Fast path failure: %r != %r" % (
res, reference))
if self.pos != end:
raise ValueError("Fast path failure: %r != %r" % (
self.pos, end))
return reference
def lex_arg(self, c):
return self.lex_arg_fast(c) or self.lex_arg_slow(c)
def lex_one_token(self):
"""
lex_one_token - Lex a single 'sh' token. """
c = self.eat()
if c == ';':
return (c,)
if c == '|':
if self.maybe_eat('|'):
return ('||',)
return (c,)
if c == '&':
if self.maybe_eat('&'):
return ('&&',)
if self.maybe_eat('>'):
return ('&>',)
return (c,)
if c == '>':
if self.maybe_eat('&'):
return ('>&',)
if self.maybe_eat('>'):
return ('>>',)
return (c,)
if c == '<':
if self.maybe_eat('&'):
return ('<&',)
if self.maybe_eat('>'):
return ('<<',)
return (c,)
return self.lex_arg(c)
def lex(self):
while self.pos != self.end:
if self.look().isspace():
self.eat()
else:
yield self.lex_one_token()
###
class ShParser:
def __init__(self, data, win32Escapes = False, pipefail = False):
self.data = data
self.pipefail = pipefail
self.tokens = ShLexer(data, win32Escapes = win32Escapes).lex()
def lex(self):
for item in self.tokens:
return item
return None
def look(self):
token = self.lex()
if token is not None:
self.tokens = itertools.chain([token], self.tokens)
return token
def parse_command(self):
tok = self.lex()
if not tok:
raise ValueError("empty command!")
if isinstance(tok, tuple):
raise ValueError("syntax error near unexpected token %r" % tok[0])
args = [tok]
redirects = []
while 1:
tok = self.look()
# EOF?
if tok is None:
break
# If this is an argument, just add it to the current command.
if isinstance(tok, str):
args.append(self.lex())
continue
# Otherwise see if it is a terminator.
assert isinstance(tok, tuple)
if tok[0] in ('|',';','&','||','&&'):
break
# Otherwise it must be a redirection.
op = self.lex()
arg = self.lex()
if not arg:
raise ValueError("syntax error near token %r" % op[0])
redirects.append((op, arg))
return Command(args, redirects)
def parse_pipeline(self):
negate = False
commands = [self.parse_command()]
while self.look() == ('|',):
self.lex()
commands.append(self.parse_command())
return Pipeline(commands, negate, self.pipefail)
def parse(self):
lhs = self.parse_pipeline()
while self.look():
operator = self.lex()
assert isinstance(operator, tuple) and len(operator) == 1
if not self.look():
raise ValueError(
"missing argument to operator %r" % operator[0])
# FIXME: Operator precedence!!
lhs = Seq(lhs, operator[0], self.parse_pipeline())
return lhs
###
import unittest
class TestShLexer(unittest.TestCase):
def lex(self, str, *args, **kwargs):
return list(ShLexer(str, *args, **kwargs).lex())
def test_basic(self):
self.assertEqual(self.lex('a|b>c&d<e;f'),
['a', ('|',), 'b', ('>',), 'c', ('&',), 'd',
('<',), 'e', (';',), 'f'])
def test_redirection_tokens(self):
self.assertEqual(self.lex('a2>c'),
['a2', ('>',), 'c'])
self.assertEqual(self.lex('a 2>c'),
['a', ('>',2), 'c'])
def test_quoting(self):
self.assertEqual(self.lex(""" 'a' """),
['a'])
self.assertEqual(self.lex(""" "hello\\"world" """),
['hello"world'])
self.assertEqual(self.lex(""" "hello\\'world" """),
["hello\\'world"])
self.assertEqual(self.lex(""" "hello\\\\world" """),
["hello\\world"])
self.assertEqual(self.lex(""" he"llo wo"rld """),
["hello world"])
self.assertEqual(self.lex(""" a\\ b a\\\\b """),
["a b", "a\\b"])
self.assertEqual(self.lex(""" "" "" """),
["", ""])
self.assertEqual(self.lex(""" a\\ b """, win32Escapes = True),
['a\\', 'b'])
class TestShParse(unittest.TestCase):
def parse(self, str):
return ShParser(str).parse()
def test_basic(self):
self.assertEqual(self.parse('echo hello'),
Pipeline([Command(['echo', 'hello'], [])], False))
self.assertEqual(self.parse('echo ""'),
Pipeline([Command(['echo', ''], [])], False))
self.assertEqual(self.parse("""echo -DFOO='a'"""),
Pipeline([Command(['echo', '-DFOO=a'], [])], False))
self.assertEqual(self.parse('echo -DFOO="a"'),
Pipeline([Command(['echo', '-DFOO=a'], [])], False))
def test_redirection(self):
self.assertEqual(self.parse('echo hello > c'),
Pipeline([Command(['echo', 'hello'],
[((('>'),), 'c')])], False))
self.assertEqual(self.parse('echo hello > c >> d'),
Pipeline([Command(['echo', 'hello'], [(('>',), 'c'),
(('>>',), 'd')])], False))
self.assertEqual(self.parse('a 2>&1'),
Pipeline([Command(['a'], [(('>&',2), '1')])], False))
def test_pipeline(self):
self.assertEqual(self.parse('a | b'),
Pipeline([Command(['a'], []),
Command(['b'], [])],
False))
self.assertEqual(self.parse('a | b | c'),
Pipeline([Command(['a'], []),
Command(['b'], []),
Command(['c'], [])],
False))
def test_list(self):
self.assertEqual(self.parse('a ; b'),
Seq(Pipeline([Command(['a'], [])], False),
';',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a & b'),
Seq(Pipeline([Command(['a'], [])], False),
'&',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a && b'),
Seq(Pipeline([Command(['a'], [])], False),
'&&',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a || b'),
Seq(Pipeline([Command(['a'], [])], False),
'||',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a && b || c'),
Seq(Seq(Pipeline([Command(['a'], [])], False),
'&&',
Pipeline([Command(['b'], [])], False)),
'||',
Pipeline([Command(['c'], [])], False)))
self.assertEqual(self.parse('a; b'),
Seq(Pipeline([Command(['a'], [])], False),
';',
Pipeline([Command(['b'], [])], False)))
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 |
thundernet8/WRGameVideos-Server | venv/lib/python2.7/site-packages/werkzeug/debug/console.py | 256 | 5599 | # -*- coding: utf-8 -*-
"""
werkzeug.debug.console
~~~~~~~~~~~~~~~~~~~~~~
Interactive console support.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD.
"""
import sys
import code
from types import CodeType
from werkzeug.utils import escape
from werkzeug.local import Local
from werkzeug.debug.repr import debug_repr, dump, helper
_local = Local()
class HTMLStringO(object):
"""A StringO version that HTML escapes on write."""
def __init__(self):
self._buffer = []
def isatty(self):
return False
def close(self):
pass
def flush(self):
pass
def seek(self, n, mode=0):
pass
def readline(self):
if len(self._buffer) == 0:
return ''
ret = self._buffer[0]
del self._buffer[0]
return ret
def reset(self):
val = ''.join(self._buffer)
del self._buffer[:]
return val
def _write(self, x):
if isinstance(x, bytes):
x = x.decode('utf-8', 'replace')
self._buffer.append(x)
def write(self, x):
self._write(escape(x))
def writelines(self, x):
self._write(escape(''.join(x)))
class ThreadedStream(object):
"""Thread-local wrapper for sys.stdout for the interactive console."""
def push():
if not isinstance(sys.stdout, ThreadedStream):
sys.stdout = ThreadedStream()
_local.stream = HTMLStringO()
push = staticmethod(push)
def fetch():
try:
stream = _local.stream
except AttributeError:
return ''
return stream.reset()
fetch = staticmethod(fetch)
def displayhook(obj):
try:
stream = _local.stream
except AttributeError:
return _displayhook(obj)
# stream._write bypasses escaping as debug_repr is
# already generating HTML for us.
if obj is not None:
_local._current_ipy.locals['_'] = obj
stream._write(debug_repr(obj))
displayhook = staticmethod(displayhook)
def __setattr__(self, name, value):
raise AttributeError('read only attribute %s' % name)
def __dir__(self):
return dir(sys.__stdout__)
def __getattribute__(self, name):
if name == '__members__':
return dir(sys.__stdout__)
try:
stream = _local.stream
except AttributeError:
stream = sys.__stdout__
return getattr(stream, name)
def __repr__(self):
return repr(sys.__stdout__)
# add the threaded stream as display hook
_displayhook = sys.displayhook
sys.displayhook = ThreadedStream.displayhook
class _ConsoleLoader(object):
def __init__(self):
self._storage = {}
def register(self, code, source):
self._storage[id(code)] = source
# register code objects of wrapped functions too.
for var in code.co_consts:
if isinstance(var, CodeType):
self._storage[id(var)] = source
def get_source_by_code(self, code):
try:
return self._storage[id(code)]
except KeyError:
pass
def _wrap_compiler(console):
compile = console.compile
def func(source, filename, symbol):
code = compile(source, filename, symbol)
console.loader.register(code, source)
return code
console.compile = func
class _InteractiveConsole(code.InteractiveInterpreter):
def __init__(self, globals, locals):
code.InteractiveInterpreter.__init__(self, locals)
self.globals = dict(globals)
self.globals['dump'] = dump
self.globals['help'] = helper
self.globals['__loader__'] = self.loader = _ConsoleLoader()
self.more = False
self.buffer = []
_wrap_compiler(self)
def runsource(self, source):
source = source.rstrip() + '\n'
ThreadedStream.push()
prompt = self.more and '... ' or '>>> '
try:
source_to_eval = ''.join(self.buffer + [source])
if code.InteractiveInterpreter.runsource(self,
source_to_eval, '<debugger>', 'single'):
self.more = True
self.buffer.append(source)
else:
self.more = False
del self.buffer[:]
finally:
output = ThreadedStream.fetch()
return prompt + source + output
def runcode(self, code):
try:
eval(code, self.globals, self.locals)
except Exception:
self.showtraceback()
def showtraceback(self):
from werkzeug.debug.tbtools import get_current_traceback
tb = get_current_traceback(skip=1)
sys.stdout._write(tb.render_summary())
def showsyntaxerror(self, filename=None):
from werkzeug.debug.tbtools import get_current_traceback
tb = get_current_traceback(skip=4)
sys.stdout._write(tb.render_summary())
def write(self, data):
sys.stdout.write(data)
class Console(object):
"""An interactive console."""
def __init__(self, globals=None, locals=None):
if locals is None:
locals = {}
if globals is None:
globals = {}
self._ipy = _InteractiveConsole(globals, locals)
def eval(self, code):
_local._current_ipy = self._ipy
old_sys_stdout = sys.stdout
try:
return self._ipy.runsource(code)
finally:
sys.stdout = old_sys_stdout
| gpl-2.0 |
norayr/unisubs | apps/teams/migrations/0050_merge_models.py | 5 | 21086 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'changes_notification': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'follow_new_video': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'last_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'teams.application': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'Application'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applications'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_applications'", 'to': "orm['auth.CustomUser']"})
},
'teams.invite': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'Invite'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'max_length': '200', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invitations'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_invitations'", 'to': "orm['auth.CustomUser']"})
},
'teams.project': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'guidelines': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"})
},
'teams.task': {
'Meta': {'object_name': 'Task'},
'assignee': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'team_video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.TeamVideo']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'teams.team': {
'Meta': {'object_name': 'Team'},
'applicants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'applicated_teams'", 'symmetrical': 'False', 'through': "orm['teams.Application']", 'to': "orm['auth.CustomUser']"}),
'application_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'header_html_text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'last_notification_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'logo': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'blank': 'True'}),
'membership_policy': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'page_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'projects_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['teams.TeamMember']", 'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'intro_for_teams'", 'null': 'True', 'to': "orm['videos.Video']"}),
'video_policy': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'through': "orm['teams.TeamVideo']", 'symmetrical': 'False'})
},
'teams.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'changes_notification': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '16'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'teams.teamvideo': {
'Meta': {'unique_together': "(('team', 'video'),)", 'object_name': 'TeamVideo'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'all_languages': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'completed_languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.SubtitleLanguage']", 'symmetrical': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'teams.teamvideolanguage': {
'Meta': {'unique_together': "(('team_video', 'subtitle_language'),)", 'object_name': 'TeamVideoLanguage'},
'forked': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'is_lingua_franca': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'db_index': 'True'}),
'percent_done': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'subtitle_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'team_video': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'languages'", 'to': "orm['teams.TeamVideo']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'teams.teamvideolanguagepair': {
'Meta': {'object_name': 'TeamVideoLanguagePair'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_0': ('django.db.models.fields.CharField', [], {'max_length': '16', 'db_index': 'True'}),
'language_1': ('django.db.models.fields.CharField', [], {'max_length': '16', 'db_index': 'True'}),
'language_pair': ('django.db.models.fields.CharField', [], {'max_length': '16', 'db_index': 'True'}),
'percent_complete': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'subtitle_language_0': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_video_language_pairs_0'", 'to': "orm['videos.SubtitleLanguage']"}),
'subtitle_language_1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_video_language_pairs_1'", 'null': 'True', 'to': "orm['videos.SubtitleLanguage']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'team_video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.TeamVideo']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'videos.subtitlelanguage': {
'Meta': {'unique_together': "(('video', 'language', 'standard_language'),)", 'object_name': 'SubtitleLanguage'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_languages'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'had_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'has_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'percent_done': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'standard_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'subtitle_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'allow_video_urls_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'featured': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_videos'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'languages_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'moderated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'moderating'", 'null': 'True', 'to': "orm['teams.Team']"}),
's3_thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'blank': 'True'}),
'small_thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'was_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'widget_views_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
}
}
complete_apps = ['teams']
| agpl-3.0 |
Athrun29/horizon | openstack_dashboard/dashboards/settings/password/views.py | 59 | 1226 | # Copyright 2013 Centrin Data Systems Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import forms
from openstack_dashboard.dashboards.settings.password \
import forms as pass_forms
class PasswordView(forms.ModalFormView):
form_class = pass_forms.PasswordForm
form_id = "change_password_modal"
modal_header = _("Change Password")
modal_id = "change_password_modal"
page_title = _("Change Password")
submit_label = _("Change")
submit_url = reverse_lazy("horizon:settings:password:index")
template_name = 'settings/password/change.html'
| apache-2.0 |
mdworks2016/work_development | Python/20_Third_Certification/venv/lib/python3.7/site-packages/redis/connection.py | 1 | 54565 | from __future__ import unicode_literals
from distutils.version import StrictVersion
from itertools import chain
from time import time
import errno
import io
import os
import socket
import threading
import warnings
from redis._compat import (xrange, imap, unicode, long,
nativestr, basestring, iteritems,
LifoQueue, Empty, Full, urlparse, parse_qs,
recv, recv_into, unquote, BlockingIOError,
sendall, shutdown, ssl_wrap_socket)
from redis.exceptions import (
AuthenticationError,
AuthenticationWrongNumberOfArgsError,
BusyLoadingError,
ChildDeadlockedError,
ConnectionError,
DataError,
ExecAbortError,
InvalidResponse,
NoPermissionError,
NoScriptError,
ReadOnlyError,
RedisError,
ResponseError,
TimeoutError,
)
from redis.utils import HIREDIS_AVAILABLE
try:
import ssl
ssl_available = True
except ImportError:
ssl_available = False
NONBLOCKING_EXCEPTION_ERROR_NUMBERS = {
BlockingIOError: errno.EWOULDBLOCK,
}
if ssl_available:
if hasattr(ssl, 'SSLWantReadError'):
NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLWantReadError] = 2
NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLWantWriteError] = 2
else:
NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLError] = 2
# In Python 2.7 a socket.error is raised for a nonblocking read.
# The _compat module aliases BlockingIOError to socket.error to be
# Python 2/3 compatible.
# However this means that all socket.error exceptions need to be handled
# properly within these exception handlers.
# We need to make sure socket.error is included in these handlers and
# provide a dummy error number that will never match a real exception.
if socket.error not in NONBLOCKING_EXCEPTION_ERROR_NUMBERS:
NONBLOCKING_EXCEPTION_ERROR_NUMBERS[socket.error] = -999999
NONBLOCKING_EXCEPTIONS = tuple(NONBLOCKING_EXCEPTION_ERROR_NUMBERS.keys())
if HIREDIS_AVAILABLE:
import hiredis
hiredis_version = StrictVersion(hiredis.__version__)
HIREDIS_SUPPORTS_CALLABLE_ERRORS = \
hiredis_version >= StrictVersion('0.1.3')
HIREDIS_SUPPORTS_BYTE_BUFFER = \
hiredis_version >= StrictVersion('0.1.4')
HIREDIS_SUPPORTS_ENCODING_ERRORS = \
hiredis_version >= StrictVersion('1.0.0')
if not HIREDIS_SUPPORTS_BYTE_BUFFER:
msg = ("redis-py works best with hiredis >= 0.1.4. You're running "
"hiredis %s. Please consider upgrading." % hiredis.__version__)
warnings.warn(msg)
HIREDIS_USE_BYTE_BUFFER = True
# only use byte buffer if hiredis supports it
if not HIREDIS_SUPPORTS_BYTE_BUFFER:
HIREDIS_USE_BYTE_BUFFER = False
SYM_STAR = b'*'
SYM_DOLLAR = b'$'
SYM_CRLF = b'\r\n'
SYM_EMPTY = b''
SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server."
SENTINEL = object()
class Encoder(object):
"Encode strings to bytes-like and decode bytes-like to strings"
def __init__(self, encoding, encoding_errors, decode_responses):
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
def encode(self, value):
"Return a bytestring or bytes-like representation of the value"
if isinstance(value, (bytes, memoryview)):
return value
elif isinstance(value, bool):
# special case bool since it is a subclass of int
raise DataError("Invalid input of type: 'bool'. Convert to a "
"bytes, string, int or float first.")
elif isinstance(value, float):
value = repr(value).encode()
elif isinstance(value, (int, long)):
# python 2 repr() on longs is '123L', so use str() instead
value = str(value).encode()
elif not isinstance(value, basestring):
# a value we don't know how to deal with. throw an error
typename = type(value).__name__
raise DataError("Invalid input of type: '%s'. Convert to a "
"bytes, string, int or float first." % typename)
if isinstance(value, unicode):
value = value.encode(self.encoding, self.encoding_errors)
return value
def decode(self, value, force=False):
"Return a unicode string from the bytes-like representation"
if self.decode_responses or force:
if isinstance(value, memoryview):
value = value.tobytes()
if isinstance(value, bytes):
value = value.decode(self.encoding, self.encoding_errors)
return value
class BaseParser(object):
EXCEPTION_CLASSES = {
'ERR': {
'max number of clients reached': ConnectionError,
'Client sent AUTH, but no password is set': AuthenticationError,
'invalid password': AuthenticationError,
# some Redis server versions report invalid command syntax
# in lowercase
'wrong number of arguments for \'auth\' command':
AuthenticationWrongNumberOfArgsError,
# some Redis server versions report invalid command syntax
# in uppercase
'wrong number of arguments for \'AUTH\' command':
AuthenticationWrongNumberOfArgsError,
},
'EXECABORT': ExecAbortError,
'LOADING': BusyLoadingError,
'NOSCRIPT': NoScriptError,
'READONLY': ReadOnlyError,
'NOAUTH': AuthenticationError,
'NOPERM': NoPermissionError,
}
def parse_error(self, response):
"Parse an error response"
error_code = response.split(' ')[0]
if error_code in self.EXCEPTION_CLASSES:
response = response[len(error_code) + 1:]
exception_class = self.EXCEPTION_CLASSES[error_code]
if isinstance(exception_class, dict):
exception_class = exception_class.get(response, ResponseError)
return exception_class(response)
return ResponseError(response)
class SocketBuffer(object):
def __init__(self, socket, socket_read_size, socket_timeout):
self._sock = socket
self.socket_read_size = socket_read_size
self.socket_timeout = socket_timeout
self._buffer = io.BytesIO()
# number of bytes written to the buffer from the socket
self.bytes_written = 0
# number of bytes read from the buffer
self.bytes_read = 0
@property
def length(self):
return self.bytes_written - self.bytes_read
def _read_from_socket(self, length=None, timeout=SENTINEL,
raise_on_timeout=True):
sock = self._sock
socket_read_size = self.socket_read_size
buf = self._buffer
buf.seek(self.bytes_written)
marker = 0
custom_timeout = timeout is not SENTINEL
try:
if custom_timeout:
sock.settimeout(timeout)
while True:
data = recv(self._sock, socket_read_size)
# an empty string indicates the server shutdown the socket
if isinstance(data, bytes) and len(data) == 0:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
buf.write(data)
data_length = len(data)
self.bytes_written += data_length
marker += data_length
if length is not None and length > marker:
continue
return True
except socket.timeout:
if raise_on_timeout:
raise TimeoutError("Timeout reading from socket")
return False
except NONBLOCKING_EXCEPTIONS as ex:
# if we're in nonblocking mode and the recv raises a
# blocking error, simply return False indicating that
# there's no data to be read. otherwise raise the
# original exception.
allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)
if not raise_on_timeout and ex.errno == allowed:
return False
raise ConnectionError("Error while reading from socket: %s" %
(ex.args,))
finally:
if custom_timeout:
sock.settimeout(self.socket_timeout)
def can_read(self, timeout):
return bool(self.length) or \
self._read_from_socket(timeout=timeout,
raise_on_timeout=False)
def read(self, length):
length = length + 2 # make sure to read the \r\n terminator
# make sure we've read enough data from the socket
if length > self.length:
self._read_from_socket(length - self.length)
self._buffer.seek(self.bytes_read)
data = self._buffer.read(length)
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def readline(self):
buf = self._buffer
buf.seek(self.bytes_read)
data = buf.readline()
while not data.endswith(SYM_CRLF):
# there's more data in the socket that we need
self._read_from_socket()
buf.seek(self.bytes_read)
data = buf.readline()
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def purge(self):
self._buffer.seek(0)
self._buffer.truncate()
self.bytes_written = 0
self.bytes_read = 0
def close(self):
try:
self.purge()
self._buffer.close()
except Exception:
# issue #633 suggests the purge/close somehow raised a
# BadFileDescriptor error. Perhaps the client ran out of
# memory or something else? It's probably OK to ignore
# any error being raised from purge/close since we're
# removing the reference to the instance below.
pass
self._buffer = None
self._sock = None
class PythonParser(BaseParser):
"Plain Python parsing class"
def __init__(self, socket_read_size):
self.socket_read_size = socket_read_size
self.encoder = None
self._sock = None
self._buffer = None
def __del__(self):
self.on_disconnect()
def on_connect(self, connection):
"Called when the socket connects"
self._sock = connection._sock
self._buffer = SocketBuffer(self._sock,
self.socket_read_size,
connection.socket_timeout)
self.encoder = connection.encoder
def on_disconnect(self):
"Called when the socket disconnects"
self._sock = None
if self._buffer is not None:
self._buffer.close()
self._buffer = None
self.encoder = None
def can_read(self, timeout):
return self._buffer and self._buffer.can_read(timeout)
def read_response(self):
raw = self._buffer.readline()
if not raw:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
byte, response = raw[:1], raw[1:]
if byte not in (b'-', b'+', b':', b'$', b'*'):
raise InvalidResponse("Protocol Error: %r" % raw)
# server returned an error
if byte == b'-':
response = nativestr(response)
error = self.parse_error(response)
# if the error is a ConnectionError, raise immediately so the user
# is notified
if isinstance(error, ConnectionError):
raise error
# otherwise, we're dealing with a ResponseError that might belong
# inside a pipeline response. the connection's read_response()
# and/or the pipeline's execute() will raise this error if
# necessary, so just return the exception instance here.
return error
# single value
elif byte == b'+':
pass
# int value
elif byte == b':':
response = long(response)
# bulk response
elif byte == b'$':
length = int(response)
if length == -1:
return None
response = self._buffer.read(length)
# multi-bulk response
elif byte == b'*':
length = int(response)
if length == -1:
return None
response = [self.read_response() for i in xrange(length)]
if isinstance(response, bytes):
response = self.encoder.decode(response)
return response
class HiredisParser(BaseParser):
"Parser class for connections using Hiredis"
def __init__(self, socket_read_size):
if not HIREDIS_AVAILABLE:
raise RedisError("Hiredis is not installed")
self.socket_read_size = socket_read_size
if HIREDIS_USE_BYTE_BUFFER:
self._buffer = bytearray(socket_read_size)
def __del__(self):
self.on_disconnect()
def on_connect(self, connection):
self._sock = connection._sock
self._socket_timeout = connection.socket_timeout
kwargs = {
'protocolError': InvalidResponse,
'replyError': self.parse_error,
}
# hiredis < 0.1.3 doesn't support functions that create exceptions
if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:
kwargs['replyError'] = ResponseError
if connection.encoder.decode_responses:
kwargs['encoding'] = connection.encoder.encoding
if HIREDIS_SUPPORTS_ENCODING_ERRORS:
kwargs['errors'] = connection.encoder.encoding_errors
self._reader = hiredis.Reader(**kwargs)
self._next_response = False
def on_disconnect(self):
self._sock = None
self._reader = None
self._next_response = False
def can_read(self, timeout):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
if self._next_response is False:
self._next_response = self._reader.gets()
if self._next_response is False:
return self.read_from_socket(timeout=timeout,
raise_on_timeout=False)
return True
def read_from_socket(self, timeout=SENTINEL, raise_on_timeout=True):
sock = self._sock
custom_timeout = timeout is not SENTINEL
try:
if custom_timeout:
sock.settimeout(timeout)
if HIREDIS_USE_BYTE_BUFFER:
bufflen = recv_into(self._sock, self._buffer)
if bufflen == 0:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
self._reader.feed(self._buffer, 0, bufflen)
else:
buffer = recv(self._sock, self.socket_read_size)
# an empty string indicates the server shutdown the socket
if not isinstance(buffer, bytes) or len(buffer) == 0:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
self._reader.feed(buffer)
# data was read from the socket and added to the buffer.
# return True to indicate that data was read.
return True
except socket.timeout:
if raise_on_timeout:
raise TimeoutError("Timeout reading from socket")
return False
except NONBLOCKING_EXCEPTIONS as ex:
# if we're in nonblocking mode and the recv raises a
# blocking error, simply return False indicating that
# there's no data to be read. otherwise raise the
# original exception.
allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)
if not raise_on_timeout and ex.errno == allowed:
return False
raise ConnectionError("Error while reading from socket: %s" %
(ex.args,))
finally:
if custom_timeout:
sock.settimeout(self._socket_timeout)
def read_response(self):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
# _next_response might be cached from a can_read() call
if self._next_response is not False:
response = self._next_response
self._next_response = False
return response
response = self._reader.gets()
while response is False:
self.read_from_socket()
response = self._reader.gets()
# if an older version of hiredis is installed, we need to attempt
# to convert ResponseErrors to their appropriate types.
if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:
if isinstance(response, ResponseError):
response = self.parse_error(response.args[0])
elif isinstance(response, list) and response and \
isinstance(response[0], ResponseError):
response[0] = self.parse_error(response[0].args[0])
# if the response is a ConnectionError or the response is a list and
# the first item is a ConnectionError, raise it as something bad
# happened
if isinstance(response, ConnectionError):
raise response
elif isinstance(response, list) and response and \
isinstance(response[0], ConnectionError):
raise response[0]
return response
if HIREDIS_AVAILABLE:
DefaultParser = HiredisParser
else:
DefaultParser = PythonParser
class Connection(object):
"Manages TCP communication to and from a Redis server"
def __init__(self, host='localhost', port=6379, db=0, password=None,
socket_timeout=None, socket_connect_timeout=None,
socket_keepalive=False, socket_keepalive_options=None,
socket_type=0, retry_on_timeout=False, encoding='utf-8',
encoding_errors='strict', decode_responses=False,
parser_class=DefaultParser, socket_read_size=65536,
health_check_interval=0, client_name=None, username=None):
self.pid = os.getpid()
self.host = host
self.port = int(port)
self.db = db
self.username = username
self.client_name = client_name
self.password = password
self.socket_timeout = socket_timeout
self.socket_connect_timeout = socket_connect_timeout or socket_timeout
self.socket_keepalive = socket_keepalive
self.socket_keepalive_options = socket_keepalive_options or {}
self.socket_type = socket_type
self.retry_on_timeout = retry_on_timeout
self.health_check_interval = health_check_interval
self.next_health_check = 0
self.encoder = Encoder(encoding, encoding_errors, decode_responses)
self._sock = None
self._parser = parser_class(socket_read_size=socket_read_size)
self._connect_callbacks = []
self._buffer_cutoff = 6000
def __repr__(self):
repr_args = ','.join(['%s=%s' % (k, v) for k, v in self.repr_pieces()])
return '%s<%s>' % (self.__class__.__name__, repr_args)
def repr_pieces(self):
pieces = [
('host', self.host),
('port', self.port),
('db', self.db)
]
if self.client_name:
pieces.append(('client_name', self.client_name))
return pieces
def __del__(self):
self.disconnect()
def register_connect_callback(self, callback):
self._connect_callbacks.append(callback)
def clear_connect_callbacks(self):
self._connect_callbacks = []
def connect(self):
"Connects to the Redis server if not already connected"
if self._sock:
return
try:
sock = self._connect()
except socket.timeout:
raise TimeoutError("Timeout connecting to server")
except socket.error as e:
raise ConnectionError(self._error_message(e))
self._sock = sock
try:
self.on_connect()
except RedisError:
# clean up after any error in on_connect
self.disconnect()
raise
# run any user callbacks. right now the only internal callback
# is for pubsub channel/pattern resubscription
for callback in self._connect_callbacks:
callback(self)
def _connect(self):
"Create a TCP socket connection"
# we want to mimic what socket.create_connection does to support
# ipv4/ipv6, but we want to set options prior to calling
# socket.connect()
err = None
for res in socket.getaddrinfo(self.host, self.port, self.socket_type,
socket.SOCK_STREAM):
family, socktype, proto, canonname, socket_address = res
sock = None
try:
sock = socket.socket(family, socktype, proto)
# TCP_NODELAY
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# TCP_KEEPALIVE
if self.socket_keepalive:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
for k, v in iteritems(self.socket_keepalive_options):
sock.setsockopt(socket.IPPROTO_TCP, k, v)
# set the socket_connect_timeout before we connect
sock.settimeout(self.socket_connect_timeout)
# connect
sock.connect(socket_address)
# set the socket_timeout now that we're connected
sock.settimeout(self.socket_timeout)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
raise socket.error("socket.getaddrinfo returned an empty list")
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to %s:%s. %s." % \
(self.host, self.port, exception.args[0])
else:
return "Error %s connecting to %s:%s. %s." % \
(exception.args[0], self.host, self.port, exception.args[1])
def on_connect(self):
"Initialize the connection, authenticate and select a database"
self._parser.on_connect(self)
# if username and/or password are set, authenticate
if self.username or self.password:
if self.username:
auth_args = (self.username, self.password or '')
else:
auth_args = (self.password,)
# avoid checking health here -- PING will fail if we try
# to check the health prior to the AUTH
self.send_command('AUTH', *auth_args, check_health=False)
try:
auth_response = self.read_response()
except AuthenticationWrongNumberOfArgsError:
# a username and password were specified but the Redis
# server seems to be < 6.0.0 which expects a single password
# arg. retry auth with just the password.
# https://github.com/andymccurdy/redis-py/issues/1274
self.send_command('AUTH', self.password, check_health=False)
auth_response = self.read_response()
if nativestr(auth_response) != 'OK':
raise AuthenticationError('Invalid Username or Password')
# if a client_name is given, set it
if self.client_name:
self.send_command('CLIENT', 'SETNAME', self.client_name)
if nativestr(self.read_response()) != 'OK':
raise ConnectionError('Error setting client name')
# if a database is specified, switch to it
if self.db:
self.send_command('SELECT', self.db)
if nativestr(self.read_response()) != 'OK':
raise ConnectionError('Invalid Database')
def disconnect(self):
"Disconnects from the Redis server"
self._parser.on_disconnect()
if self._sock is None:
return
try:
if os.getpid() == self.pid:
shutdown(self._sock, socket.SHUT_RDWR)
self._sock.close()
except socket.error:
pass
self._sock = None
def check_health(self):
"Check the health of the connection with a PING/PONG"
if self.health_check_interval and time() > self.next_health_check:
try:
self.send_command('PING', check_health=False)
if nativestr(self.read_response()) != 'PONG':
raise ConnectionError(
'Bad response from PING health check')
except (ConnectionError, TimeoutError):
self.disconnect()
self.send_command('PING', check_health=False)
if nativestr(self.read_response()) != 'PONG':
raise ConnectionError(
'Bad response from PING health check')
def send_packed_command(self, command, check_health=True):
"Send an already packed command to the Redis server"
if not self._sock:
self.connect()
# guard against health check recursion
if check_health:
self.check_health()
try:
if isinstance(command, str):
command = [command]
for item in command:
sendall(self._sock, item)
except socket.timeout:
self.disconnect()
raise TimeoutError("Timeout writing to socket")
except socket.error as e:
self.disconnect()
if len(e.args) == 1:
errno, errmsg = 'UNKNOWN', e.args[0]
else:
errno = e.args[0]
errmsg = e.args[1]
raise ConnectionError("Error %s while writing to socket. %s." %
(errno, errmsg))
except BaseException:
self.disconnect()
raise
def send_command(self, *args, **kwargs):
"Pack and send a command to the Redis server"
self.send_packed_command(self.pack_command(*args),
check_health=kwargs.get('check_health', True))
def can_read(self, timeout=0):
"Poll the socket to see if there's data that can be read."
sock = self._sock
if not sock:
self.connect()
sock = self._sock
return self._parser.can_read(timeout)
def read_response(self):
"Read the response from a previously sent command"
try:
response = self._parser.read_response()
except socket.timeout:
self.disconnect()
raise TimeoutError("Timeout reading from %s:%s" %
(self.host, self.port))
except socket.error as e:
self.disconnect()
raise ConnectionError("Error while reading from %s:%s : %s" %
(self.host, self.port, e.args))
except BaseException:
self.disconnect()
raise
if self.health_check_interval:
self.next_health_check = time() + self.health_check_interval
if isinstance(response, ResponseError):
raise response
return response
def pack_command(self, *args):
"Pack a series of arguments into the Redis protocol"
output = []
# the client might have included 1 or more literal arguments in
# the command name, e.g., 'CONFIG GET'. The Redis server expects these
# arguments to be sent separately, so split the first argument
# manually. These arguments should be bytestrings so that they are
# not encoded.
if isinstance(args[0], unicode):
args = tuple(args[0].encode().split()) + args[1:]
elif b' ' in args[0]:
args = tuple(args[0].split()) + args[1:]
buff = SYM_EMPTY.join((SYM_STAR, str(len(args)).encode(), SYM_CRLF))
buffer_cutoff = self._buffer_cutoff
for arg in imap(self.encoder.encode, args):
# to avoid large string mallocs, chunk the command into the
# output list if we're sending large values or memoryviews
arg_length = len(arg)
if (len(buff) > buffer_cutoff or arg_length > buffer_cutoff
or isinstance(arg, memoryview)):
buff = SYM_EMPTY.join(
(buff, SYM_DOLLAR, str(arg_length).encode(), SYM_CRLF))
output.append(buff)
output.append(arg)
buff = SYM_CRLF
else:
buff = SYM_EMPTY.join(
(buff, SYM_DOLLAR, str(arg_length).encode(),
SYM_CRLF, arg, SYM_CRLF))
output.append(buff)
return output
def pack_commands(self, commands):
"Pack multiple commands into the Redis protocol"
output = []
pieces = []
buffer_length = 0
buffer_cutoff = self._buffer_cutoff
for cmd in commands:
for chunk in self.pack_command(*cmd):
chunklen = len(chunk)
if (buffer_length > buffer_cutoff or chunklen > buffer_cutoff
or isinstance(chunk, memoryview)):
output.append(SYM_EMPTY.join(pieces))
buffer_length = 0
pieces = []
if chunklen > buffer_cutoff or isinstance(chunk, memoryview):
output.append(chunk)
else:
pieces.append(chunk)
buffer_length += chunklen
if pieces:
output.append(SYM_EMPTY.join(pieces))
return output
class SSLConnection(Connection):
def __init__(self, ssl_keyfile=None, ssl_certfile=None,
ssl_cert_reqs='required', ssl_ca_certs=None,
ssl_check_hostname=False, **kwargs):
if not ssl_available:
raise RedisError("Python wasn't built with SSL support")
super(SSLConnection, self).__init__(**kwargs)
self.keyfile = ssl_keyfile
self.certfile = ssl_certfile
if ssl_cert_reqs is None:
ssl_cert_reqs = ssl.CERT_NONE
elif isinstance(ssl_cert_reqs, basestring):
CERT_REQS = {
'none': ssl.CERT_NONE,
'optional': ssl.CERT_OPTIONAL,
'required': ssl.CERT_REQUIRED
}
if ssl_cert_reqs not in CERT_REQS:
raise RedisError(
"Invalid SSL Certificate Requirements Flag: %s" %
ssl_cert_reqs)
ssl_cert_reqs = CERT_REQS[ssl_cert_reqs]
self.cert_reqs = ssl_cert_reqs
self.ca_certs = ssl_ca_certs
self.check_hostname = ssl_check_hostname
def _connect(self):
"Wrap the socket with SSL support"
sock = super(SSLConnection, self)._connect()
if hasattr(ssl, "create_default_context"):
context = ssl.create_default_context()
context.check_hostname = self.check_hostname
context.verify_mode = self.cert_reqs
if self.certfile and self.keyfile:
context.load_cert_chain(certfile=self.certfile,
keyfile=self.keyfile)
if self.ca_certs:
context.load_verify_locations(self.ca_certs)
sock = ssl_wrap_socket(context, sock, server_hostname=self.host)
else:
# In case this code runs in a version which is older than 2.7.9,
# we want to fall back to old code
sock = ssl_wrap_socket(ssl,
sock,
cert_reqs=self.cert_reqs,
keyfile=self.keyfile,
certfile=self.certfile,
ca_certs=self.ca_certs)
return sock
class UnixDomainSocketConnection(Connection):
def __init__(self, path='', db=0, username=None, password=None,
socket_timeout=None, encoding='utf-8',
encoding_errors='strict', decode_responses=False,
retry_on_timeout=False,
parser_class=DefaultParser, socket_read_size=65536,
health_check_interval=0, client_name=None):
self.pid = os.getpid()
self.path = path
self.db = db
self.username = username
self.client_name = client_name
self.password = password
self.socket_timeout = socket_timeout
self.retry_on_timeout = retry_on_timeout
self.health_check_interval = health_check_interval
self.next_health_check = 0
self.encoder = Encoder(encoding, encoding_errors, decode_responses)
self._sock = None
self._parser = parser_class(socket_read_size=socket_read_size)
self._connect_callbacks = []
self._buffer_cutoff = 6000
def repr_pieces(self):
pieces = [
('path', self.path),
('db', self.db),
]
if self.client_name:
pieces.append(('client_name', self.client_name))
return pieces
def _connect(self):
"Create a Unix domain socket connection"
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.socket_timeout)
sock.connect(self.path)
return sock
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to unix socket: %s. %s." % \
(self.path, exception.args[0])
else:
return "Error %s connecting to unix socket: %s. %s." % \
(exception.args[0], self.path, exception.args[1])
FALSE_STRINGS = ('0', 'F', 'FALSE', 'N', 'NO')
def to_bool(value):
if value is None or value == '':
return None
if isinstance(value, basestring) and value.upper() in FALSE_STRINGS:
return False
return bool(value)
URL_QUERY_ARGUMENT_PARSERS = {
'socket_timeout': float,
'socket_connect_timeout': float,
'socket_keepalive': to_bool,
'retry_on_timeout': to_bool,
'max_connections': int,
'health_check_interval': int,
'ssl_check_hostname': to_bool,
}
class ConnectionPool(object):
"Generic connection pool"
@classmethod
def from_url(cls, url, db=None, decode_components=False, **kwargs):
"""
Return a connection pool configured from the given URL.
For example::
redis://[[username]:[password]]@localhost:6379/0
rediss://[[username]:[password]]@localhost:6379/0
unix://[[username]:[password]]@/path/to/socket.sock?db=0
Three URL schemes are supported:
- ```redis://``
<https://www.iana.org/assignments/uri-schemes/prov/redis>`_ creates a
normal TCP socket connection
- ```rediss://``
<https://www.iana.org/assignments/uri-schemes/prov/rediss>`_ creates
a SSL wrapped TCP socket connection
- ``unix://`` creates a Unix Domain Socket connection
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
The ``decode_components`` argument allows this function to work with
percent-encoded URLs. If this argument is set to ``True`` all ``%xx``
escapes will be replaced by their single-character equivalents after
the URL has been parsed. This only applies to the ``hostname``,
``path``, ``username`` and ``password`` components.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. The querystring
arguments ``socket_connect_timeout`` and ``socket_timeout`` if supplied
are parsed as float values. The arguments ``socket_keepalive`` and
``retry_on_timeout`` are parsed to boolean values that accept
True/False, Yes/No values to indicate state. Invalid types cause a
``UserWarning`` to be raised. In the case of conflicting arguments,
querystring arguments always win.
"""
url = urlparse(url)
url_options = {}
for name, value in iteritems(parse_qs(url.query)):
if value and len(value) > 0:
parser = URL_QUERY_ARGUMENT_PARSERS.get(name)
if parser:
try:
url_options[name] = parser(value[0])
except (TypeError, ValueError):
warnings.warn(UserWarning(
"Invalid value for `%s` in connection URL." % name
))
else:
url_options[name] = value[0]
if decode_components:
username = unquote(url.username) if url.username else None
password = unquote(url.password) if url.password else None
path = unquote(url.path) if url.path else None
hostname = unquote(url.hostname) if url.hostname else None
else:
username = url.username or None
password = url.password or None
path = url.path
hostname = url.hostname
# We only support redis://, rediss:// and unix:// schemes.
if url.scheme == 'unix':
url_options.update({
'username': username,
'password': password,
'path': path,
'connection_class': UnixDomainSocketConnection,
})
elif url.scheme in ('redis', 'rediss'):
url_options.update({
'host': hostname,
'port': int(url.port or 6379),
'username': username,
'password': password,
})
# If there's a path argument, use it as the db argument if a
# querystring value wasn't specified
if 'db' not in url_options and path:
try:
url_options['db'] = int(path.replace('/', ''))
except (AttributeError, ValueError):
pass
if url.scheme == 'rediss':
url_options['connection_class'] = SSLConnection
else:
valid_schemes = ', '.join(('redis://', 'rediss://', 'unix://'))
raise ValueError('Redis URL must specify one of the following '
'schemes (%s)' % valid_schemes)
# last shot at the db value
url_options['db'] = int(url_options.get('db', db or 0))
# update the arguments from the URL values
kwargs.update(url_options)
# backwards compatability
if 'charset' in kwargs:
warnings.warn(DeprecationWarning(
'"charset" is deprecated. Use "encoding" instead'))
kwargs['encoding'] = kwargs.pop('charset')
if 'errors' in kwargs:
warnings.warn(DeprecationWarning(
'"errors" is deprecated. Use "encoding_errors" instead'))
kwargs['encoding_errors'] = kwargs.pop('errors')
return cls(**kwargs)
def __init__(self, connection_class=Connection, max_connections=None,
**connection_kwargs):
"""
Create a connection pool. If max_connections is set, then this
object raises redis.ConnectionError when the pool's limit is reached.
By default, TCP connections are created unless connection_class is
specified. Use redis.UnixDomainSocketConnection for unix sockets.
Any additional keyword arguments are passed to the constructor of
connection_class.
"""
max_connections = max_connections or 2 ** 31
if not isinstance(max_connections, (int, long)) or max_connections < 0:
raise ValueError('"max_connections" must be a positive integer')
self.connection_class = connection_class
self.connection_kwargs = connection_kwargs
self.max_connections = max_connections
# a lock to protect the critical section in _checkpid().
# this lock is acquired when the process id changes, such as
# after a fork. during this time, multiple threads in the child
# process could attempt to acquire this lock. the first thread
# to acquire the lock will reset the data structures and lock
# object of this pool. subsequent threads acquiring this lock
# will notice the first thread already did the work and simply
# release the lock.
self._fork_lock = threading.Lock()
self.reset()
def __repr__(self):
return "%s<%s>" % (
type(self).__name__,
repr(self.connection_class(**self.connection_kwargs)),
)
def reset(self):
self._lock = threading.RLock()
self._created_connections = 0
self._available_connections = []
self._in_use_connections = set()
# this must be the last operation in this method. while reset() is
# called when holding _fork_lock, other threads in this process
# can call _checkpid() which compares self.pid and os.getpid() without
# holding any lock (for performance reasons). keeping this assignment
# as the last operation ensures that those other threads will also
# notice a pid difference and block waiting for the first thread to
# release _fork_lock. when each of these threads eventually acquire
# _fork_lock, they will notice that another thread already called
# reset() and they will immediately release _fork_lock and continue on.
self.pid = os.getpid()
def _checkpid(self):
# _checkpid() attempts to keep ConnectionPool fork-safe on modern
# systems. this is called by all ConnectionPool methods that
# manipulate the pool's state such as get_connection() and release().
#
# _checkpid() determines whether the process has forked by comparing
# the current process id to the process id saved on the ConnectionPool
# instance. if these values are the same, _checkpid() simply returns.
#
# when the process ids differ, _checkpid() assumes that the process
# has forked and that we're now running in the child process. the child
# process cannot use the parent's file descriptors (e.g., sockets).
# therefore, when _checkpid() sees the process id change, it calls
# reset() in order to reinitialize the child's ConnectionPool. this
# will cause the child to make all new connection objects.
#
# _checkpid() is protected by self._fork_lock to ensure that multiple
# threads in the child process do not call reset() multiple times.
#
# there is an extremely small chance this could fail in the following
# scenario:
# 1. process A calls _checkpid() for the first time and acquires
# self._fork_lock.
# 2. while holding self._fork_lock, process A forks (the fork()
# could happen in a different thread owned by process A)
# 3. process B (the forked child process) inherits the
# ConnectionPool's state from the parent. that state includes
# a locked _fork_lock. process B will not be notified when
# process A releases the _fork_lock and will thus never be
# able to acquire the _fork_lock.
#
# to mitigate this possible deadlock, _checkpid() will only wait 5
# seconds to acquire _fork_lock. if _fork_lock cannot be acquired in
# that time it is assumed that the child is deadlocked and a
# redis.ChildDeadlockedError error is raised.
if self.pid != os.getpid():
# python 2.7 doesn't support a timeout option to lock.acquire()
# we have to mimic lock timeouts ourselves.
timeout_at = time() + 5
acquired = False
while time() < timeout_at:
acquired = self._fork_lock.acquire(False)
if acquired:
break
if not acquired:
raise ChildDeadlockedError
# reset() the instance for the new process if another thread
# hasn't already done so
try:
if self.pid != os.getpid():
self.reset()
finally:
self._fork_lock.release()
def get_connection(self, command_name, *keys, **options):
"Get a connection from the pool"
self._checkpid()
with self._lock:
try:
connection = self._available_connections.pop()
except IndexError:
connection = self.make_connection()
self._in_use_connections.add(connection)
try:
# ensure this connection is connected to Redis
connection.connect()
# connections that the pool provides should be ready to send
# a command. if not, the connection was either returned to the
# pool before all data has been read or the socket has been
# closed. either way, reconnect and verify everything is good.
try:
if connection.can_read():
raise ConnectionError('Connection has data')
except ConnectionError:
connection.disconnect()
connection.connect()
if connection.can_read():
raise ConnectionError('Connection not ready')
except BaseException:
# release the connection back to the pool so that we don't
# leak it
self.release(connection)
raise
return connection
def get_encoder(self):
"Return an encoder based on encoding settings"
kwargs = self.connection_kwargs
return Encoder(
encoding=kwargs.get('encoding', 'utf-8'),
encoding_errors=kwargs.get('encoding_errors', 'strict'),
decode_responses=kwargs.get('decode_responses', False)
)
def make_connection(self):
"Create a new connection"
if self._created_connections >= self.max_connections:
raise ConnectionError("Too many connections")
self._created_connections += 1
return self.connection_class(**self.connection_kwargs)
def release(self, connection):
"Releases the connection back to the pool"
self._checkpid()
with self._lock:
if connection.pid != self.pid:
return
self._in_use_connections.remove(connection)
self._available_connections.append(connection)
def disconnect(self):
"Disconnects all connections in the pool"
self._checkpid()
with self._lock:
all_conns = chain(self._available_connections,
self._in_use_connections)
for connection in all_conns:
connection.disconnect()
class BlockingConnectionPool(ConnectionPool):
"""
Thread-safe blocking connection pool::
>>> from redis.client import Redis
>>> client = Redis(connection_pool=BlockingConnectionPool())
It performs the same function as the default
``:py:class: ~redis.connection.ConnectionPool`` implementation, in that,
it maintains a pool of reusable connections that can be shared by
multiple redis clients (safely across threads if required).
The difference is that, in the event that a client tries to get a
connection from the pool when all of connections are in use, rather than
raising a ``:py:class: ~redis.exceptions.ConnectionError`` (as the default
``:py:class: ~redis.connection.ConnectionPool`` implementation does), it
makes the client wait ("blocks") for a specified number of seconds until
a connection becomes available.
Use ``max_connections`` to increase / decrease the pool size::
>>> pool = BlockingConnectionPool(max_connections=10)
Use ``timeout`` to tell it either how many seconds to wait for a connection
to become available, or to block forever:
# Block forever.
>>> pool = BlockingConnectionPool(timeout=None)
# Raise a ``ConnectionError`` after five seconds if a connection is
# not available.
>>> pool = BlockingConnectionPool(timeout=5)
"""
def __init__(self, max_connections=50, timeout=20,
connection_class=Connection, queue_class=LifoQueue,
**connection_kwargs):
self.queue_class = queue_class
self.timeout = timeout
super(BlockingConnectionPool, self).__init__(
connection_class=connection_class,
max_connections=max_connections,
**connection_kwargs)
def reset(self):
# Create and fill up a thread safe queue with ``None`` values.
self.pool = self.queue_class(self.max_connections)
while True:
try:
self.pool.put_nowait(None)
except Full:
break
# Keep a list of actual connection instances so that we can
# disconnect them later.
self._connections = []
# this must be the last operation in this method. while reset() is
# called when holding _fork_lock, other threads in this process
# can call _checkpid() which compares self.pid and os.getpid() without
# holding any lock (for performance reasons). keeping this assignment
# as the last operation ensures that those other threads will also
# notice a pid difference and block waiting for the first thread to
# release _fork_lock. when each of these threads eventually acquire
# _fork_lock, they will notice that another thread already called
# reset() and they will immediately release _fork_lock and continue on.
self.pid = os.getpid()
def make_connection(self):
"Make a fresh connection."
connection = self.connection_class(**self.connection_kwargs)
self._connections.append(connection)
return connection
def get_connection(self, command_name, *keys, **options):
"""
Get a connection, blocking for ``self.timeout`` until a connection
is available from the pool.
If the connection returned is ``None`` then creates a new connection.
Because we use a last-in first-out queue, the existing connections
(having been returned to the pool after the initial ``None`` values
were added) will be returned before ``None`` values. This means we only
create new connections when we need to, i.e.: the actual number of
connections will only increase in response to demand.
"""
# Make sure we haven't changed process.
self._checkpid()
# Try and get a connection from the pool. If one isn't available within
# self.timeout then raise a ``ConnectionError``.
connection = None
try:
connection = self.pool.get(block=True, timeout=self.timeout)
except Empty:
# Note that this is not caught by the redis client and will be
# raised unless handled by application code. If you want never to
raise ConnectionError("No connection available.")
# If the ``connection`` is actually ``None`` then that's a cue to make
# a new connection to add to the pool.
if connection is None:
connection = self.make_connection()
try:
# ensure this connection is connected to Redis
connection.connect()
# connections that the pool provides should be ready to send
# a command. if not, the connection was either returned to the
# pool before all data has been read or the socket has been
# closed. either way, reconnect and verify everything is good.
try:
if connection.can_read():
raise ConnectionError('Connection has data')
except ConnectionError:
connection.disconnect()
connection.connect()
if connection.can_read():
raise ConnectionError('Connection not ready')
except BaseException:
# release the connection back to the pool so that we don't leak it
self.release(connection)
raise
return connection
def release(self, connection):
"Releases the connection back to the pool."
# Make sure we haven't changed process.
self._checkpid()
if connection.pid != self.pid:
return
# Put the connection back into the pool.
try:
self.pool.put_nowait(connection)
except Full:
# perhaps the pool has been reset() after a fork? regardless,
# we don't want this connection
pass
def disconnect(self):
"Disconnects all connections in the pool."
self._checkpid()
for connection in self._connections:
connection.disconnect()
| apache-2.0 |
minrk/nbgrader | nbgrader/tests/formgrader/test_auth_failures.py | 1 | 3390 | import pytest
from .base import BaseTestFormgrade
@pytest.mark.js
@pytest.mark.usefixtures("bad_formgrader")
class TestAuthFailures(BaseTestFormgrade):
def test_start(self):
# This is just a fake test, since starting up the browser and formgrader
# can take a little while. So if anything goes wrong there, this test
# will fail, rather than having it fail on some other test.
pass
def test_login(self):
self._get(self.manager.base_formgrade_url)
self._wait_for_element("username_input")
next_url = self.formgrade_url().replace("http://localhost:8000", "")
self._check_url("http://localhost:8000/hub/login?next={}".format(next_url))
# fill out the form
self.browser.find_element_by_id("username_input").send_keys("foobar")
self.browser.find_element_by_id("login_submit").click()
# check the url
self._wait_for_gradebook_page("")
self._wait_for_element("error-500")
@pytest.mark.js
@pytest.mark.usefixtures("all_formgraders")
class TestInvalidGrader(BaseTestFormgrade):
def test_start(self):
# This is just a fake test, since starting up the browser and formgrader
# can take a little while. So if anything goes wrong there, this test
# will fail, rather than having it fail on some other test.
pass
def test_invalid_login(self):
if self.manager.jupyterhub is None:
pytest.skip("JupyterHub is not running")
self._get(self.manager.base_formgrade_url)
self._wait_for_element("username_input")
next_url = self.formgrade_url().replace("http://localhost:8000", "")
self._check_url("http://localhost:8000/hub/login?next={}".format(next_url))
# fill out the form
self.browser.find_element_by_id("username_input").send_keys("baz")
self.browser.find_element_by_id("login_submit").click()
# check the url
self._wait_for_gradebook_page("")
self._wait_for_element("error-403")
# logout
self._get("http://localhost:8000/hub/logout")
self._wait_for_element("username_input")
def test_expired_cookie(self):
if self.manager.jupyterhub is None:
pytest.skip("JupyterHub is not running")
self._get(self.manager.base_formgrade_url)
self._wait_for_element("username_input")
next_url = self.formgrade_url().replace("http://localhost:8000", "")
self._check_url("http://localhost:8000/hub/login?next={}".format(next_url))
# fill out the form
self.browser.find_element_by_id("username_input").send_keys("foobar")
self.browser.find_element_by_id("login_submit").click()
# check the url
self._wait_for_gradebook_page("")
# get and delete the cookie
cookie = self.browser.get_cookie("jupyter-hub-token")
self.browser.delete_cookie("jupyter-hub-token")
# check that we are redirected to the login page
self._get(self.manager.base_formgrade_url)
self._wait_for_element("username_input")
# add a bad cookie
cookie['value'] = cookie['value'][:-1] + 'a"'
self.browser.add_cookie(cookie)
# check that we are still redirected to the login page
self._get(self.manager.base_formgrade_url)
self._wait_for_element("username_input")
| bsd-3-clause |
newemailjdm/scipy | scipy/weave/size_check.py | 66 | 10170 | from __future__ import absolute_import, print_function
from numpy import ones, ndarray, array, asarray, concatenate, zeros, shape, \
alltrue, equal, divide, arccos, arcsin, arctan, cos, cosh, \
sin, sinh, exp, ceil, floor, fabs, log, log10, sqrt, argmin, \
argmax, argsort, around, absolute, sign, negative, float32
import sys
numericTypes = (int, long, float, complex)
def isnumeric(t):
return isinstance(t, numericTypes)
def time_it():
import time
expr = "ex[:,1:,1:] = ca_x[:,1:,1:] * ex[:,1:,1:]" \
"+ cb_y_x[:,1:,1:] * (hz[:,1:,1:] - hz[:,:-1,1:])" \
"- cb_z_x[:,1:,1:] * (hy[:,1:,1:] - hy[:,1:,:-1])"
ex = ones((10,10,10),dtype=float32)
ca_x = ones((10,10,10),dtype=float32)
cb_y_x = ones((10,10,10),dtype=float32)
cb_z_x = ones((10,10,10),dtype=float32)
hz = ones((10,10,10),dtype=float32)
hy = ones((10,10,10),dtype=float32)
N = 1
t1 = time.time()
for i in range(N):
passed = check_expr(expr,locals())
t2 = time.time()
print('time per call:', (t2 - t1)/N)
print('passed:', passed)
def check_expr(expr,local_vars,global_vars={}):
""" Currently only checks expressions (not suites).
Doesn't check that lhs = rhs. checked by compiled func though
"""
values = {}
# first handle the globals
for var,val in global_vars.items():
if isinstance(val, ndarray):
values[var] = dummy_array(val,name=var)
elif isnumeric(val):
values[var] = val
# now handle the locals
for var,val in local_vars.items():
if isinstance(val, ndarray):
values[var] = dummy_array(val,name=var)
if isnumeric(val):
values[var] = val
exec(expr,values)
try:
exec(expr,values)
except:
try:
eval(expr,values)
except:
return 0
return 1
empty = array(())
empty_slice = slice(None)
def make_same_length(x,y):
try:
Nx = len(x)
except:
Nx = 0
try:
Ny = len(y)
except:
Ny = 0
if Nx == Ny == 0:
return empty,empty
elif Nx == Ny:
return asarray(x),asarray(y)
else:
diff = abs(Nx - Ny)
front = ones(diff, int)
if Nx > Ny:
return asarray(x), concatenate((front,y))
elif Ny > Nx:
return concatenate((front,x)),asarray(y)
def binary_op_size(xx,yy):
""" This returns the resulting size from operating on xx, and yy
with a binary operator. It accounts for broadcasting, and
throws errors if the array sizes are incompatible.
"""
x,y = make_same_length(xx,yy)
res = zeros(len(x))
for i in range(len(x)):
if x[i] == y[i]:
res[i] = x[i]
elif x[i] == 1:
res[i] = y[i]
elif y[i] == 1:
res[i] = x[i]
else:
# offer more information here about which variables.
raise ValueError("frames are not aligned")
return res
class dummy_array(object):
def __init__(self,ary,ary_is_shape=0,name=None):
self.name = name
if ary_is_shape:
self.shape = ary
# self.shape = asarray(ary)
else:
try:
self.shape = shape(ary)
except:
self.shape = empty
# self.value = ary
def binary_op(self,other):
try:
x = other.shape
except AttributeError:
x = empty
new_shape = binary_op_size(self.shape,x)
return dummy_array(new_shape,1)
def __cmp__(self,other):
# This isn't an exact compare, but does work for ==
# cluge for Numeric
if isnumeric(other):
return 0
if len(self.shape) == len(other.shape) == 0:
return 0
return not alltrue(equal(self.shape,other.shape),axis=0)
def __add__(self,other):
return self.binary_op(other)
def __radd__(self,other):
return self.binary_op(other)
def __sub__(self,other):
return self.binary_op(other)
def __rsub__(self,other):
return self.binary_op(other)
def __mul__(self,other):
return self.binary_op(other)
def __rmul__(self,other):
return self.binary_op(other)
def __div__(self,other):
return self.binary_op(other)
def __rdiv__(self,other):
return self.binary_op(other)
def __mod__(self,other):
return self.binary_op(other)
def __rmod__(self,other):
return self.binary_op(other)
def __lshift__(self,other):
return self.binary_op(other)
def __rshift__(self,other):
return self.binary_op(other)
# unary ops
def __neg__(self,other):
return self
def __pos__(self,other):
return self
def __abs__(self,other):
return self
def __invert__(self,other):
return self
# Not sure what to do with coersion ops. Ignore for now.
#
# not currently supported by compiler.
# __divmod__
# __pow__
# __rpow__
# __and__
# __or__
# __xor__
# item access and slicing
def __setitem__(self,indices,val):
# ignore for now
pass
def __len__(self):
return self.shape[0]
def __getslice__(self,i,j):
i = max(i, 0)
j = max(j, 0)
return self.__getitem__((slice(i,j),))
def __getitem__(self,indices):
# ayeyaya this is a mess
# print indices, type(indices), indices.shape
if not isinstance(indices, tuple):
indices = (indices,)
if Ellipsis in indices:
raise IndexError("Ellipsis not currently supported")
new_dims = []
dim = 0
for index in indices:
try:
dim_len = self.shape[dim]
except IndexError:
raise IndexError("To many indices specified")
# if (type(index) is SliceType and index.start == index.stop == index.step):
if (index is empty_slice):
slc_len = dim_len
elif isinstance(index, slice):
beg,end,step = index.start,index.stop,index.step
# handle if they are dummy arrays
# if hasattr(beg,'value') and type(beg.value) != ndarray:
# beg = beg.value
# if hasattr(end,'value') and type(end.value) != ndarray:
# end = end.value
# if hasattr(step,'value') and type(step.value) != ndarray:
# step = step.value
if beg is None:
beg = 0
if end == sys.maxint or end is None:
end = dim_len
if step is None:
step = 1
if beg < 0:
beg += dim_len
if end < 0:
end += dim_len
# the following is list like behavior,
# which isn't adhered to by arrays.
# FIX THIS ANOMALY IN NUMERIC!
if beg < 0:
beg = 0
if beg > dim_len:
beg = dim_len
if end < 0:
end = 0
if end > dim_len:
end = dim_len
# This is rubbish.
if beg == end:
beg,end,step = 0,0,1
elif beg >= dim_len and step > 0:
beg,end,step = 0,0,1
# elif index.step > 0 and beg <= end:
elif step > 0 and beg <= end:
pass # slc_len = abs(divide(end-beg-1,step)+1)
# handle [::-1] and [-1::-1] correctly
# elif index.step > 0 and beg > end:
elif step > 0 and beg > end:
beg,end,step = 0,0,1
elif(step < 0 and index.start is None and index.stop is None):
beg,end,step = 0,dim_len,-step
elif(step < 0 and index.start is None):
# +1 because negative stepping is inclusive
beg,end,step = end+1,dim_len,-step
elif(step < 0 and index.stop is None):
beg,end,step = 0,beg+1,-step
elif(step < 0 and beg > end):
beg,end,step = end,beg,-step
elif(step < 0 and beg < end):
beg,end,step = 0,0,-step
slc_len = abs(divide(end-beg-1,step)+1)
new_dims.append(slc_len)
else:
if index < 0:
index += dim_len
if index >= 0 and index < dim_len:
# this reduces the array dimensions by one
pass
else:
raise IndexError("Index out of range")
dim += 1
new_dims.extend(self.shape[dim:])
if 0 in new_dims:
raise IndexError("Zero length slices not currently supported")
return dummy_array(new_dims,1)
def __repr__(self):
val = str((self.name, str(self.shape)))
return val
def unary(ary):
return ary
def not_implemented(ary):
return ary
# all imported from Numeric and need to be reassigned.
unary_op = [arccos, arcsin, arctan, cos, cosh, sin, sinh,
exp,ceil,floor,fabs,log,log10,sqrt]
unsupported = [argmin,argmax, argsort,around, absolute,sign,negative,floor]
for func in unary_op:
func = unary
for func in unsupported:
func = not_implemented
def reduction(ary,axis=0):
if axis < 0:
axis += len(ary.shape)
if axis < 0 or axis >= len(ary.shape):
raise ValueError("Dimension not in array")
new_dims = list(ary.shape[:axis]) + list(ary.shape[axis+1:])
return dummy_array(new_dims,1)
# functions currently not supported by compiler
# reductions are gonna take some array reordering for the general case,
# so this is gonna take some thought (probably some tree manipulation).
def take(ary,axis=0):
raise NotImplementedError
# and all the rest
| bsd-3-clause |
shaswatsunder/aakashlabs-forum | django-taggit/build/lib.linux-i686-2.7/taggit_autocomplete_modified/tests.py | 4 | 1554 | # -*- coding: utf-8 -*-
#
# This file is part of django-taggit-autocomplete-modified.
#
# django-taggit-autocomplete-modified provides autocomplete functionality
# to the tags form field of django-taggit.
#
# Development Web Site:
# - http://www.codetrax.org/projects/django-taggit-autocomplete-modified
# Public Source Code Repository:
# - https://source.codetrax.org/hgroot/django-taggit-autocomplete-modified
#
# Copyright 2011 George Notaras <gnot [at] g-loaded.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| gpl-3.0 |
liangwang/m5 | src/cpu/o3lite/O3Checker.py | 3 | 2101 | # Copyright (c) 2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from m5.params import *
from BaseCPU import BaseCPU
class O3Checker(BaseCPU):
type = 'O3Checker'
exitOnError = Param.Bool(False, "Exit on an error")
updateOnError = Param.Bool(False,
"Update the checker with the main CPU's state on an error")
warnOnlyOnLoadError = Param.Bool(False,
"If a load result is incorrect, only print a warning and do not exit")
function_trace = Param.Bool(False, "Enable function trace")
function_trace_start = Param.Tick(0, "Cycle to start function trace")
| bsd-3-clause |
revmischa/boto | boto/sdb/domain.py | 153 | 14351 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import print_function
"""
Represents an SDB Domain
"""
from boto.sdb.queryresultset import SelectResultSet
from boto.compat import six
class Domain(object):
def __init__(self, connection=None, name=None):
self.connection = connection
self.name = name
self._metadata = None
def __repr__(self):
return 'Domain:%s' % self.name
def __iter__(self):
return iter(self.select("SELECT * FROM `%s`" % self.name))
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'DomainName':
self.name = value
else:
setattr(self, name, value)
def get_metadata(self):
if not self._metadata:
self._metadata = self.connection.domain_metadata(self)
return self._metadata
def put_attributes(self, item_name, attributes,
replace=True, expected_value=None):
"""
Store attributes for a given item.
:type item_name: string
:param item_name: The name of the item whose attributes are being stored.
:type attribute_names: dict or dict-like object
:param attribute_names: The name/value pairs to store as attributes
:type expected_value: list
:param expected_value: If supplied, this is a list or tuple consisting
of a single attribute name and expected value. The list can be
of the form:
* ['name', 'value']
In which case the call will first verify that the attribute
"name" of this item has a value of "value". If it does, the delete
will proceed, otherwise a ConditionalCheckFailed error will be
returned. The list can also be of the form:
* ['name', True|False]
which will simply check for the existence (True) or non-existence
(False) of the attribute.
:type replace: bool
:param replace: Whether the attribute values passed in will replace
existing values or will be added as addition values.
Defaults to True.
:rtype: bool
:return: True if successful
"""
return self.connection.put_attributes(self, item_name, attributes,
replace, expected_value)
def batch_put_attributes(self, items, replace=True):
"""
Store attributes for multiple items.
:type items: dict or dict-like object
:param items: A dictionary-like object. The keys of the dictionary are
the item names and the values are themselves dictionaries
of attribute names/values, exactly the same as the
attribute_names parameter of the scalar put_attributes
call.
:type replace: bool
:param replace: Whether the attribute values passed in will replace
existing values or will be added as addition values.
Defaults to True.
:rtype: bool
:return: True if successful
"""
return self.connection.batch_put_attributes(self, items, replace)
def get_attributes(self, item_name, attribute_name=None,
consistent_read=False, item=None):
"""
Retrieve attributes for a given item.
:type item_name: string
:param item_name: The name of the item whose attributes are being retrieved.
:type attribute_names: string or list of strings
:param attribute_names: An attribute name or list of attribute names. This
parameter is optional. If not supplied, all attributes
will be retrieved for the item.
:rtype: :class:`boto.sdb.item.Item`
:return: An Item mapping type containing the requested attribute name/values
"""
return self.connection.get_attributes(self, item_name, attribute_name,
consistent_read, item)
def delete_attributes(self, item_name, attributes=None,
expected_values=None):
"""
Delete attributes from a given item.
:type item_name: string
:param item_name: The name of the item whose attributes are being deleted.
:type attributes: dict, list or :class:`boto.sdb.item.Item`
:param attributes: Either a list containing attribute names which will cause
all values associated with that attribute name to be deleted or
a dict or Item containing the attribute names and keys and list
of values to delete as the value. If no value is supplied,
all attribute name/values for the item will be deleted.
:type expected_value: list
:param expected_value: If supplied, this is a list or tuple consisting
of a single attribute name and expected value. The list can be of
the form:
* ['name', 'value']
In which case the call will first verify that the attribute "name"
of this item has a value of "value". If it does, the delete
will proceed, otherwise a ConditionalCheckFailed error will be
returned. The list can also be of the form:
* ['name', True|False]
which will simply check for the existence (True) or
non-existence (False) of the attribute.
:rtype: bool
:return: True if successful
"""
return self.connection.delete_attributes(self, item_name, attributes,
expected_values)
def batch_delete_attributes(self, items):
"""
Delete multiple items in this domain.
:type items: dict or dict-like object
:param items: A dictionary-like object. The keys of the dictionary are
the item names and the values are either:
* dictionaries of attribute names/values, exactly the
same as the attribute_names parameter of the scalar
put_attributes call. The attribute name/value pairs
will only be deleted if they match the name/value
pairs passed in.
* None which means that all attributes associated
with the item should be deleted.
:rtype: bool
:return: True if successful
"""
return self.connection.batch_delete_attributes(self, items)
def select(self, query='', next_token=None, consistent_read=False, max_items=None):
"""
Returns a set of Attributes for item names within domain_name that match the query.
The query must be expressed in using the SELECT style syntax rather than the
original SimpleDB query language.
:type query: string
:param query: The SimpleDB query to be performed.
:rtype: iter
:return: An iterator containing the results. This is actually a generator
function that will iterate across all search results, not just the
first page.
"""
return SelectResultSet(self, query, max_items=max_items, next_token=next_token,
consistent_read=consistent_read)
def get_item(self, item_name, consistent_read=False):
"""
Retrieves an item from the domain, along with all of its attributes.
:param string item_name: The name of the item to retrieve.
:rtype: :class:`boto.sdb.item.Item` or ``None``
:keyword bool consistent_read: When set to true, ensures that the most
recent data is returned.
:return: The requested item, or ``None`` if there was no match found
"""
item = self.get_attributes(item_name, consistent_read=consistent_read)
if item:
item.domain = self
return item
else:
return None
def new_item(self, item_name):
return self.connection.item_cls(self, item_name)
def delete_item(self, item):
self.delete_attributes(item.name)
def to_xml(self, f=None):
"""Get this domain as an XML DOM Document
:param f: Optional File to dump directly to
:type f: File or Stream
:return: File object where the XML has been dumped to
:rtype: file
"""
if not f:
from tempfile import TemporaryFile
f = TemporaryFile()
print('<?xml version="1.0" encoding="UTF-8"?>', file=f)
print('<Domain id="%s">' % self.name, file=f)
for item in self:
print('\t<Item id="%s">' % item.name, file=f)
for k in item:
print('\t\t<attribute id="%s">' % k, file=f)
values = item[k]
if not isinstance(values, list):
values = [values]
for value in values:
print('\t\t\t<value><![CDATA[', end=' ', file=f)
if isinstance(value, six.text_type):
value = value.encode('utf-8', 'replace')
else:
value = six.text_type(value, errors='replace').encode('utf-8', 'replace')
f.write(value)
print(']]></value>', file=f)
print('\t\t</attribute>', file=f)
print('\t</Item>', file=f)
print('</Domain>', file=f)
f.flush()
f.seek(0)
return f
def from_xml(self, doc):
"""Load this domain based on an XML document"""
import xml.sax
handler = DomainDumpParser(self)
xml.sax.parse(doc, handler)
return handler
def delete(self):
"""
Delete this domain, and all items under it
"""
return self.connection.delete_domain(self)
class DomainMetaData(object):
def __init__(self, domain=None):
self.domain = domain
self.item_count = None
self.item_names_size = None
self.attr_name_count = None
self.attr_names_size = None
self.attr_value_count = None
self.attr_values_size = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'ItemCount':
self.item_count = int(value)
elif name == 'ItemNamesSizeBytes':
self.item_names_size = int(value)
elif name == 'AttributeNameCount':
self.attr_name_count = int(value)
elif name == 'AttributeNamesSizeBytes':
self.attr_names_size = int(value)
elif name == 'AttributeValueCount':
self.attr_value_count = int(value)
elif name == 'AttributeValuesSizeBytes':
self.attr_values_size = int(value)
elif name == 'Timestamp':
self.timestamp = value
else:
setattr(self, name, value)
import sys
from xml.sax.handler import ContentHandler
class DomainDumpParser(ContentHandler):
"""
SAX parser for a domain that has been dumped
"""
def __init__(self, domain):
self.uploader = UploaderThread(domain)
self.item_id = None
self.attrs = {}
self.attribute = None
self.value = ""
self.domain = domain
def startElement(self, name, attrs):
if name == "Item":
self.item_id = attrs['id']
self.attrs = {}
elif name == "attribute":
self.attribute = attrs['id']
elif name == "value":
self.value = ""
def characters(self, ch):
self.value += ch
def endElement(self, name):
if name == "value":
if self.value and self.attribute:
value = self.value.strip()
attr_name = self.attribute.strip()
if attr_name in self.attrs:
self.attrs[attr_name].append(value)
else:
self.attrs[attr_name] = [value]
elif name == "Item":
self.uploader.items[self.item_id] = self.attrs
# Every 20 items we spawn off the uploader
if len(self.uploader.items) >= 20:
self.uploader.start()
self.uploader = UploaderThread(self.domain)
elif name == "Domain":
# If we're done, spawn off our last Uploader Thread
self.uploader.start()
from threading import Thread
class UploaderThread(Thread):
"""Uploader Thread"""
def __init__(self, domain):
self.db = domain
self.items = {}
super(UploaderThread, self).__init__()
def run(self):
try:
self.db.batch_put_attributes(self.items)
except:
print("Exception using batch put, trying regular put instead")
for item_name in self.items:
self.db.put_attributes(item_name, self.items[item_name])
print(".", end=' ')
sys.stdout.flush()
| mit |
louyihua/edx-platform | lms/djangoapps/instructor_task/tasks_helper.py | 3 | 74118 | """
This file contains tasks that are designed to perform background operations on the
running state of a course.
"""
import json
import re
from collections import OrderedDict
from datetime import datetime
from django.conf import settings
from eventtracking import tracker
from itertools import chain
from time import time
import unicodecsv
import logging
from celery import Task, current_task
from celery.states import SUCCESS, FAILURE
from django.contrib.auth.models import User
from django.core.files.storage import DefaultStorage
from django.db import reset_queries
from django.db.models import Q
import dogstats_wrapper as dog_stats_api
from pytz import UTC
from StringIO import StringIO
from edxmako.shortcuts import render_to_string
from instructor.paidcourse_enrollment_report import PaidCourseEnrollmentReportProvider
from shoppingcart.models import (
PaidCourseRegistration, CourseRegCodeItem, InvoiceTransaction,
Invoice, CouponRedemption, RegistrationCodeRedemption, CourseRegistrationCode
)
from survey.models import SurveyAnswer
from track.views import task_track
from util.db import outer_atomic
from util.file import course_filename_prefix_generator, UniversalNewlineIterator
from xblock.runtime import KvsFieldData
from xmodule.modulestore.django import modulestore
from xmodule.split_test_module import get_split_user_partitions
from django.utils.translation import ugettext as _
from certificates.models import (
CertificateWhitelist,
certificate_info_for_user,
CertificateStatuses,
GeneratedCertificate
)
from certificates.api import generate_user_certificates
from courseware.courses import get_course_by_id, get_problems_in_section
from lms.djangoapps.grades.course_grades import iterate_grades_for
from courseware.models import StudentModule
from courseware.model_data import DjangoKeyValueStore, FieldDataCache
from courseware.module_render import get_module_for_descriptor_internal
from instructor_analytics.basic import (
enrolled_students_features,
get_proctored_exam_results,
list_may_enroll,
list_problem_responses
)
from instructor_analytics.csvs import format_dictlist
from openassessment.data import OraAggregateData
from instructor_task.models import ReportStore, InstructorTask, PROGRESS
from lms.djangoapps.lms_xblock.runtime import LmsPartitionService
from openedx.core.djangoapps.course_groups.cohorts import get_cohort
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
from openedx.core.djangoapps.content.course_structures.models import CourseStructure
from opaque_keys.edx.keys import UsageKey
from openedx.core.djangoapps.course_groups.cohorts import add_user_to_cohort, is_course_cohorted
from student.models import CourseEnrollment, CourseAccessRole
from lms.djangoapps.teams.models import CourseTeamMembership
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification
# define different loggers for use within tasks and on client side
TASK_LOG = logging.getLogger('edx.celery.task')
# define value to use when no task_id is provided:
UNKNOWN_TASK_ID = 'unknown-task_id'
FILTERED_OUT_ROLES = ['staff', 'instructor', 'finance_admin', 'sales_admin']
# define values for update functions to use to return status to perform_module_state_update
UPDATE_STATUS_SUCCEEDED = 'succeeded'
UPDATE_STATUS_FAILED = 'failed'
UPDATE_STATUS_SKIPPED = 'skipped'
# The setting name used for events when "settings" (account settings, preferences, profile information) change.
REPORT_REQUESTED_EVENT_NAME = u'edx.instructor.report.requested'
class BaseInstructorTask(Task):
"""
Base task class for use with InstructorTask models.
Permits updating information about task in corresponding InstructorTask for monitoring purposes.
Assumes that the entry_id of the InstructorTask model is the first argument to the task.
The `entry_id` is the primary key for the InstructorTask entry representing the task. This class
updates the entry on success and failure of the task it wraps. It is setting the entry's value
for task_state based on what Celery would set it to once the task returns to Celery:
FAILURE if an exception is encountered, and SUCCESS if it returns normally.
Other arguments are pass-throughs to perform_module_state_update, and documented there.
"""
abstract = True
def on_success(self, task_progress, task_id, args, kwargs):
"""
Update InstructorTask object corresponding to this task with info about success.
Updates task_output and task_state. But it shouldn't actually do anything
if the task is only creating subtasks to actually do the work.
Assumes `task_progress` is a dict containing the task's result, with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
This is JSON-serialized and stored in the task_output column of the InstructorTask entry.
"""
TASK_LOG.debug('Task %s: success returned with progress: %s', task_id, task_progress)
# We should be able to find the InstructorTask object to update
# based on the task_id here, without having to dig into the
# original args to the task. On the other hand, the entry_id
# is the first value passed to all such args, so we'll use that.
# And we assume that it exists, else we would already have had a failure.
entry_id = args[0]
entry = InstructorTask.objects.get(pk=entry_id)
# Check to see if any subtasks had been defined as part of this task.
# If not, then we know that we're done. (If so, let the subtasks
# handle updating task_state themselves.)
if len(entry.subtasks) == 0:
entry.task_output = InstructorTask.create_output_for_success(task_progress)
entry.task_state = SUCCESS
entry.save_now()
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""
Update InstructorTask object corresponding to this task with info about failure.
Fetches and updates exception and traceback information on failure.
If an exception is raised internal to the task, it is caught by celery and provided here.
The information is recorded in the InstructorTask object as a JSON-serialized dict
stored in the task_output column. It contains the following keys:
'exception': type of exception object
'message': error message from exception object
'traceback': traceback information (truncated if necessary)
Note that there is no way to record progress made within the task (e.g. attempted,
succeeded, etc.) when such failures occur.
"""
TASK_LOG.debug(u'Task %s: failure returned', task_id)
entry_id = args[0]
try:
entry = InstructorTask.objects.get(pk=entry_id)
except InstructorTask.DoesNotExist:
# if the InstructorTask object does not exist, then there's no point
# trying to update it.
TASK_LOG.error(u"Task (%s) has no InstructorTask object for id %s", task_id, entry_id)
else:
TASK_LOG.warning(u"Task (%s) failed", task_id, exc_info=True)
entry.task_output = InstructorTask.create_output_for_failure(einfo.exception, einfo.traceback)
entry.task_state = FAILURE
entry.save_now()
class UpdateProblemModuleStateError(Exception):
"""
Error signaling a fatal condition while updating problem modules.
Used when the current module cannot be processed and no more
modules should be attempted.
"""
pass
def _get_current_task():
"""
Stub to make it easier to test without actually running Celery.
This is a wrapper around celery.current_task, which provides access
to the top of the stack of Celery's tasks. When running tests, however,
it doesn't seem to work to mock current_task directly, so this wrapper
is used to provide a hook to mock in tests, while providing the real
`current_task` in production.
"""
return current_task
class TaskProgress(object):
"""
Encapsulates the current task's progress by keeping track of
'attempted', 'succeeded', 'skipped', 'failed', 'total',
'action_name', and 'duration_ms' values.
"""
def __init__(self, action_name, total, start_time):
self.action_name = action_name
self.total = total
self.start_time = start_time
self.attempted = 0
self.succeeded = 0
self.skipped = 0
self.failed = 0
def update_task_state(self, extra_meta=None):
"""
Update the current celery task's state to the progress state
specified by the current object. Returns the progress
dictionary for use by `run_main_task` and
`BaseInstructorTask.on_success`.
Arguments:
extra_meta (dict): Extra metadata to pass to `update_state`
Returns:
dict: The current task's progress dict
"""
progress_dict = {
'action_name': self.action_name,
'attempted': self.attempted,
'succeeded': self.succeeded,
'skipped': self.skipped,
'failed': self.failed,
'total': self.total,
'duration_ms': int((time() - self.start_time) * 1000),
}
if extra_meta is not None:
progress_dict.update(extra_meta)
_get_current_task().update_state(state=PROGRESS, meta=progress_dict)
return progress_dict
def run_main_task(entry_id, task_fcn, action_name):
"""
Applies the `task_fcn` to the arguments defined in `entry_id` InstructorTask.
Arguments passed to `task_fcn` are:
`entry_id` : the primary key for the InstructorTask entry representing the task.
`course_id` : the id for the course.
`task_input` : dict containing task-specific arguments, JSON-decoded from InstructorTask's task_input.
`action_name` : past-tense verb to use for constructing status messages.
If no exceptions are raised, the `task_fcn` should return a dict containing
the task's result with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages.
Should be past-tense. Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
"""
# Get the InstructorTask to be updated. If this fails then let the exception return to Celery.
# There's no point in catching it here.
with outer_atomic():
entry = InstructorTask.objects.get(pk=entry_id)
entry.task_state = PROGRESS
entry.save_now()
# Get inputs to use in this task from the entry
task_id = entry.task_id
course_id = entry.course_id
task_input = json.loads(entry.task_input)
# Construct log message
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(task_id=task_id, entry_id=entry_id, course_id=course_id, task_input=task_input)
TASK_LOG.info(u'%s, Starting update (nothing %s yet)', task_info_string, action_name)
# Check that the task_id submitted in the InstructorTask matches the current task
# that is running.
request_task_id = _get_current_task().request.id
if task_id != request_task_id:
fmt = u'{task_info}, Requested task did not match actual task "{actual_id}"'
message = fmt.format(task_info=task_info_string, actual_id=request_task_id)
TASK_LOG.error(message)
raise ValueError(message)
# Now do the work
with dog_stats_api.timer('instructor_tasks.time.overall', tags=[u'action:{name}'.format(name=action_name)]):
task_progress = task_fcn(entry_id, course_id, task_input, action_name)
# Release any queries that the connection has been hanging onto
reset_queries()
# Log and exit, returning task_progress info as task result
TASK_LOG.info(u'%s, Task type: %s, Finishing task: %s', task_info_string, action_name, task_progress)
return task_progress
def perform_module_state_update(update_fcn, filter_fcn, _entry_id, course_id, task_input, action_name):
"""
Performs generic update by visiting StudentModule instances with the update_fcn provided.
StudentModule instances are those that match the specified `course_id` and `module_state_key`.
If `student_identifier` is not None, it is used as an additional filter to limit the modules to those belonging
to that student. If `student_identifier` is None, performs update on modules for all students on the specified problem.
If a `filter_fcn` is not None, it is applied to the query that has been constructed. It takes one
argument, which is the query being filtered, and returns the filtered version of the query.
The `update_fcn` is called on each StudentModule that passes the resulting filtering.
It is passed three arguments: the module_descriptor for the module pointed to by the
module_state_key, the particular StudentModule to update, and the xmodule_instance_args being
passed through. If the value returned by the update function evaluates to a boolean True,
the update is successful; False indicates the update on the particular student module failed.
A raised exception indicates a fatal condition -- that no other student modules should be considered.
The return value is a dict containing the task's results, with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible updates to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
Because this is run internal to a task, it does not catch exceptions. These are allowed to pass up to the
next level, so that it can set the failure modes and capture the error trace in the InstructorTask and the
result object.
"""
start_time = time()
usage_keys = []
problem_url = task_input.get('problem_url')
entrance_exam_url = task_input.get('entrance_exam_url')
student_identifier = task_input.get('student')
problems = {}
# if problem_url is present make a usage key from it
if problem_url:
usage_key = course_id.make_usage_key_from_deprecated_string(problem_url)
usage_keys.append(usage_key)
# find the problem descriptor:
problem_descriptor = modulestore().get_item(usage_key)
problems[unicode(usage_key)] = problem_descriptor
# if entrance_exam is present grab all problems in it
if entrance_exam_url:
problems = get_problems_in_section(entrance_exam_url)
usage_keys = [UsageKey.from_string(location) for location in problems.keys()]
# find the modules in question
modules_to_update = StudentModule.objects.filter(course_id=course_id, module_state_key__in=usage_keys)
# give the option of updating an individual student. If not specified,
# then updates all students who have responded to a problem so far
student = None
if student_identifier is not None:
# if an identifier is supplied, then look for the student,
# and let it throw an exception if none is found.
if "@" in student_identifier:
student = User.objects.get(email=student_identifier)
elif student_identifier is not None:
student = User.objects.get(username=student_identifier)
if student is not None:
modules_to_update = modules_to_update.filter(student_id=student.id)
if filter_fcn is not None:
modules_to_update = filter_fcn(modules_to_update)
task_progress = TaskProgress(action_name, modules_to_update.count(), start_time)
task_progress.update_task_state()
for module_to_update in modules_to_update:
task_progress.attempted += 1
module_descriptor = problems[unicode(module_to_update.module_state_key)]
# There is no try here: if there's an error, we let it throw, and the task will
# be marked as FAILED, with a stack trace.
with dog_stats_api.timer('instructor_tasks.module.time.step', tags=[u'action:{name}'.format(name=action_name)]):
update_status = update_fcn(module_descriptor, module_to_update)
if update_status == UPDATE_STATUS_SUCCEEDED:
# If the update_fcn returns true, then it performed some kind of work.
# Logging of failures is left to the update_fcn itself.
task_progress.succeeded += 1
elif update_status == UPDATE_STATUS_FAILED:
task_progress.failed += 1
elif update_status == UPDATE_STATUS_SKIPPED:
task_progress.skipped += 1
else:
raise UpdateProblemModuleStateError("Unexpected update_status returned: {}".format(update_status))
return task_progress.update_task_state()
def _get_task_id_from_xmodule_args(xmodule_instance_args):
"""Gets task_id from `xmodule_instance_args` dict, or returns default value if missing."""
return xmodule_instance_args.get('task_id', UNKNOWN_TASK_ID) if xmodule_instance_args is not None else UNKNOWN_TASK_ID
def _get_xqueue_callback_url_prefix(xmodule_instance_args):
"""Gets prefix to use when constructing xqueue_callback_url."""
return xmodule_instance_args.get('xqueue_callback_url_prefix', '') if xmodule_instance_args is not None else ''
def _get_track_function_for_task(student, xmodule_instance_args=None, source_page='x_module_task'):
"""
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
"""
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {'student': student.username, 'task_id': _get_task_id_from_xmodule_args(xmodule_instance_args)}
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page=source_page)
def _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args=None,
grade_bucket_type=None, course=None):
"""
Fetches a StudentModule instance for a given `course_id`, `student` object, and `module_descriptor`.
`xmodule_instance_args` is used to provide information for creating a track function and an XQueue callback.
These are passed, along with `grade_bucket_type`, to get_module_for_descriptor_internal, which sidesteps
the need for a Request object when instantiating an xmodule instance.
"""
# reconstitute the problem's corresponding XModule:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(course_id, student, module_descriptor)
student_data = KvsFieldData(DjangoKeyValueStore(field_data_cache))
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {"student": student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
def make_track_function():
'''
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
'''
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page='x_module_task')
xqueue_callback_url_prefix = xmodule_instance_args.get('xqueue_callback_url_prefix', '') \
if xmodule_instance_args is not None else ''
return get_module_for_descriptor_internal(
user=student,
descriptor=module_descriptor,
student_data=student_data,
course_id=course_id,
track_function=make_track_function(),
xqueue_callback_url_prefix=xqueue_callback_url_prefix,
grade_bucket_type=grade_bucket_type,
# This module isn't being used for front-end rendering
request_token=None,
# pass in a loaded course for override enabling
course=course
)
@outer_atomic
def rescore_problem_module_state(xmodule_instance_args, module_descriptor, student_module):
'''
Takes an XModule descriptor and a corresponding StudentModule object, and
performs rescoring on the student's problem submission.
Throws exceptions if the rescoring is fatal and should be aborted if in a loop.
In particular, raises UpdateProblemModuleStateError if module fails to instantiate,
or if the module doesn't support rescoring.
Returns True if problem was successfully rescored for the given student, and False
if problem encountered some kind of error in rescoring.
'''
# unpack the StudentModule:
course_id = student_module.course_id
student = student_module.student
usage_key = student_module.module_state_key
with modulestore().bulk_operations(course_id):
course = get_course_by_id(course_id)
# TODO: Here is a call site where we could pass in a loaded course. I
# think we certainly need it since grading is happening here, and field
# overrides would be important in handling that correctly
instance = _get_module_instance_for_task(
course_id,
student,
module_descriptor,
xmodule_instance_args,
grade_bucket_type='rescore',
course=course
)
if instance is None:
# Either permissions just changed, or someone is trying to be clever
# and load something they shouldn't have access to.
msg = "No module {loc} for student {student}--access denied?".format(
loc=usage_key,
student=student
)
TASK_LOG.debug(msg)
raise UpdateProblemModuleStateError(msg)
if not hasattr(instance, 'rescore_problem'):
# This should also not happen, since it should be already checked in the caller,
# but check here to be sure.
msg = "Specified problem does not support rescoring."
raise UpdateProblemModuleStateError(msg)
result = instance.rescore_problem()
instance.save()
if 'success' not in result:
# don't consider these fatal, but false means that the individual call didn't complete:
TASK_LOG.warning(
u"error processing rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: unexpected response %(msg)s",
dict(
msg=result,
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_FAILED
elif result['success'] not in ['correct', 'incorrect']:
TASK_LOG.warning(
u"error processing rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: %(msg)s",
dict(
msg=result['success'],
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_FAILED
else:
TASK_LOG.debug(
u"successfully processed rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: %(msg)s",
dict(
msg=result['success'],
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_SUCCEEDED
@outer_atomic
def reset_attempts_module_state(xmodule_instance_args, _module_descriptor, student_module):
"""
Resets problem attempts to zero for specified `student_module`.
Returns a status of UPDATE_STATUS_SUCCEEDED if a problem has non-zero attempts
that are being reset, and UPDATE_STATUS_SKIPPED otherwise.
"""
update_status = UPDATE_STATUS_SKIPPED
problem_state = json.loads(student_module.state) if student_module.state else {}
if 'attempts' in problem_state:
old_number_of_attempts = problem_state["attempts"]
if old_number_of_attempts > 0:
problem_state["attempts"] = 0
# convert back to json and save
student_module.state = json.dumps(problem_state)
student_module.save()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
event_info = {"old_attempts": old_number_of_attempts, "new_attempts": 0}
track_function('problem_reset_attempts', event_info)
update_status = UPDATE_STATUS_SUCCEEDED
return update_status
@outer_atomic
def delete_problem_module_state(xmodule_instance_args, _module_descriptor, student_module):
"""
Delete the StudentModule entry.
Always returns UPDATE_STATUS_SUCCEEDED, indicating success, if it doesn't raise an exception due to database error.
"""
student_module.delete()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
track_function('problem_delete_state', {})
return UPDATE_STATUS_SUCCEEDED
def upload_csv_to_report_store(rows, csv_name, course_id, timestamp, config_name='GRADES_DOWNLOAD'):
"""
Upload data as a CSV using ReportStore.
Arguments:
rows: CSV data in the following format (first column may be a
header):
[
[row1_colum1, row1_colum2, ...],
...
]
csv_name: Name of the resulting CSV
course_id: ID of the course
"""
report_store = ReportStore.from_config(config_name)
report_store.store_rows(
course_id,
u"{course_prefix}_{csv_name}_{timestamp_str}.csv".format(
course_prefix=course_filename_prefix_generator(course_id),
csv_name=csv_name,
timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M")
),
rows
)
tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": csv_name, })
def upload_exec_summary_to_store(data_dict, report_name, course_id, generated_at, config_name='FINANCIAL_REPORTS'):
"""
Upload Executive Summary Html file using ReportStore.
Arguments:
data_dict: containing executive report data.
report_name: Name of the resulting Html File.
course_id: ID of the course
"""
report_store = ReportStore.from_config(config_name)
# Use the data dict and html template to generate the output buffer
output_buffer = StringIO(render_to_string("instructor/instructor_dashboard_2/executive_summary.html", data_dict))
report_store.store(
course_id,
u"{course_prefix}_{report_name}_{timestamp_str}.html".format(
course_prefix=course_filename_prefix_generator(course_id),
report_name=report_name,
timestamp_str=generated_at.strftime("%Y-%m-%d-%H%M")
),
output_buffer,
)
tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": report_name})
def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=too-many-statements
"""
For a given `course_id`, generate a grades CSV file for all students that
are enrolled, and store using a `ReportStore`. Once created, the files can
be accessed by instantiating another `ReportStore` (via
`ReportStore.from_config()`) and calling `link_for()` on it. Writes are
buffered, so we'll never write part of a CSV file to S3 -- i.e. any files
that are visible in ReportStore will be complete ones.
As we start to add more CSV downloads, it will probably be worthwhile to
make a more general CSVDoc class instead of building out the rows like we
do here.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
course = get_course_by_id(course_id)
course_is_cohorted = is_course_cohorted(course.id)
teams_enabled = course.teams_enabled
cohorts_header = ['Cohort Name'] if course_is_cohorted else []
teams_header = ['Team Name'] if teams_enabled else []
experiment_partitions = get_split_user_partitions(course.user_partitions)
group_configs_header = [u'Experiment Group ({})'.format(partition.name) for partition in experiment_partitions]
certificate_info_header = ['Certificate Eligible', 'Certificate Delivered', 'Certificate Type']
certificate_whitelist = CertificateWhitelist.objects.filter(course_id=course_id, whitelist=True)
whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist]
# Loop over all our students and build our CSV lists in memory
header = None
rows = []
err_rows = [["id", "username", "error_msg"]]
current_step = {'step': 'Calculating Grades'}
total_enrolled_students = enrolled_students.count()
student_counter = 0
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s',
task_info_string,
action_name,
current_step,
total_enrolled_students
)
for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students):
# Periodically update task status (this is a cache write)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# Now add a log entry after each student is graded to get a sense
# of the task's progress
student_counter += 1
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Grade calculation in-progress for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_enrolled_students
)
if gradeset:
# We were able to successfully grade this student for this course.
task_progress.succeeded += 1
if not header:
header = [section['label'] for section in gradeset[u'section_breakdown']]
rows.append(
["id", "email", "username", "grade"] + header + cohorts_header +
group_configs_header + teams_header +
['Enrollment Track', 'Verification Status'] + certificate_info_header
)
percents = {
section['label']: section.get('percent', 0.0)
for section in gradeset[u'section_breakdown']
if 'label' in section
}
cohorts_group_name = []
if course_is_cohorted:
group = get_cohort(student, course_id, assign=False)
cohorts_group_name.append(group.name if group else '')
group_configs_group_names = []
for partition in experiment_partitions:
group = LmsPartitionService(student, course_id).get_group(partition, assign=False)
group_configs_group_names.append(group.name if group else '')
team_name = []
if teams_enabled:
try:
membership = CourseTeamMembership.objects.get(user=student, team__course_id=course_id)
team_name.append(membership.team.name)
except CourseTeamMembership.DoesNotExist:
team_name.append('')
enrollment_mode = CourseEnrollment.enrollment_mode_for_user(student, course_id)[0]
verification_status = SoftwareSecurePhotoVerification.verification_status_for_user(
student,
course_id,
enrollment_mode
)
certificate_info = certificate_info_for_user(
student,
course_id,
gradeset['grade'],
student.id in whitelisted_user_ids
)
# Not everybody has the same gradable items. If the item is not
# found in the user's gradeset, just assume it's a 0. The aggregated
# grades for their sections and overall course will be calculated
# without regard for the item they didn't have access to, so it's
# possible for a student to have a 0.0 show up in their row but
# still have 100% for the course.
row_percents = [percents.get(label, 0.0) for label in header]
rows.append(
[student.id, student.email, student.username, gradeset['percent']] +
row_percents + cohorts_group_name + group_configs_group_names + team_name +
[enrollment_mode] + [verification_status] + certificate_info
)
else:
# An empty gradeset means we failed to grade a student.
task_progress.failed += 1
err_rows.append([student.id, student.username, err_msg])
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Grade calculation completed for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_enrolled_students
)
# By this point, we've got the rows we're going to stuff into our CSV files.
current_step = {'step': 'Uploading CSVs'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_csv_to_report_store(rows, 'grade_report', course_id, start_date)
# If there are any error rows (don't count the header), write them out as well
if len(err_rows) > 1:
upload_csv_to_report_store(err_rows, 'grade_report_err', course_id, start_date)
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing grade task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def _order_problems(blocks):
"""
Sort the problems by the assignment type and assignment that it belongs to.
Args:
blocks (OrderedDict) - A course structure containing blocks that have been ordered
(i.e. when we iterate over them, we will see them in the order
that they appear in the course).
Returns:
an OrderedDict that maps a problem id to its headers in the final report.
"""
problems = OrderedDict()
assignments = OrderedDict()
# First, sort out all the blocks into their correct assignments and all the
# assignments into their correct types.
for block in blocks:
# Put the assignments in order into the assignments list.
if blocks[block]['block_type'] == 'sequential':
block_format = blocks[block]['format']
if block_format not in assignments:
assignments[block_format] = OrderedDict()
assignments[block_format][block] = list()
# Put the problems into the correct order within their assignment.
if blocks[block]['block_type'] == 'problem' and blocks[block]['graded'] is True:
current = blocks[block]['parent']
# crawl up the tree for the sequential block
while blocks[current]['block_type'] != 'sequential':
current = blocks[current]['parent']
current_format = blocks[current]['format']
assignments[current_format][current].append(block)
# Now that we have a sorting and an order for the assignments and problems,
# iterate through them in order to generate the header row.
for assignment_type in assignments:
for assignment_index, assignment in enumerate(assignments[assignment_type].keys(), start=1):
for problem in assignments[assignment_type][assignment]:
header_name = u"{assignment_type} {assignment_index}: {assignment_name} - {block}".format(
block=blocks[problem]['display_name'],
assignment_type=assignment_type,
assignment_index=assignment_index,
assignment_name=blocks[assignment]['display_name']
)
problems[problem] = [header_name + " (Earned)", header_name + " (Possible)"]
return problems
def upload_problem_responses_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing
all student answers to a given problem, and store using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Calculating students answers to problem'}
task_progress.update_task_state(extra_meta=current_step)
# Compute result table and format it
problem_location = task_input.get('problem_location')
student_data = list_problem_responses(course_id, problem_location)
features = ['username', 'state']
header, rows = format_dictlist(student_data, features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
problem_location = re.sub(r'[:/]', '_', problem_location)
csv_name = 'student_state_from_{}'.format(problem_location)
upload_csv_to_report_store(rows, csv_name, course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
Generate a CSV containing all students' problem grades within a given
`course_id`.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
# This struct encapsulates both the display names of each static item in the
# header row as values as well as the django User field names of those items
# as the keys. It is structured in this way to keep the values related.
header_row = OrderedDict([('id', 'Student ID'), ('email', 'Email'), ('username', 'Username')])
try:
course_structure = CourseStructure.objects.get(course_id=course_id)
blocks = course_structure.ordered_blocks
problems = _order_problems(blocks)
except CourseStructure.DoesNotExist:
return task_progress.update_task_state(
extra_meta={'step': 'Generating course structure. Please refresh and try again.'}
)
# Just generate the static fields for now.
rows = [list(header_row.values()) + ['Final Grade'] + list(chain.from_iterable(problems.values()))]
error_rows = [list(header_row.values()) + ['error_msg']]
current_step = {'step': 'Calculating Grades'}
for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students):
student_fields = [getattr(student, field_name) for field_name in header_row]
task_progress.attempted += 1
if 'percent' not in gradeset or 'raw_scores' not in gradeset:
# There was an error grading this student.
# Generally there will be a non-empty err_msg, but that is not always the case.
if not err_msg:
err_msg = u"Unknown error"
error_rows.append(student_fields + [err_msg])
task_progress.failed += 1
continue
final_grade = gradeset['percent']
# Only consider graded problems
problem_scores = {unicode(score.module_id): score for score, _ in gradeset['raw_scores'] if score.graded}
earned_possible_values = list()
for problem_id in problems:
try:
problem_score = problem_scores[problem_id]
earned_possible_values.append([problem_score.earned, problem_score.possible])
except KeyError:
# The student has not been graded on this problem. For example,
# iterate_grades_for skips problems that students have never
# seen in order to speed up report generation. It could also be
# the case that the student does not have access to it (e.g. A/B
# test or cohorted courseware).
earned_possible_values.append(['N/A', 'N/A'])
rows.append(student_fields + [final_grade] + list(chain.from_iterable(earned_possible_values)))
task_progress.succeeded += 1
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload if any students have been successfully graded
if len(rows) > 1:
upload_csv_to_report_store(rows, 'problem_grade_report', course_id, start_date)
# If there are any error rows, write them out as well
if len(error_rows) > 1:
upload_csv_to_report_store(error_rows, 'problem_grade_report_err', course_id, start_date)
return task_progress.update_task_state(extra_meta={'step': 'Uploading CSV'})
def upload_students_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing profile
information for all students that are enrolled, and store using a
`ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
current_step = {'step': 'Calculating Profile Info'}
task_progress.update_task_state(extra_meta=current_step)
# compute the student features table and format it
query_features = task_input
student_data = enrolled_students_features(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'student_profile_info', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def upload_enrollment_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing profile
information for all students that are enrolled, and store using a
`ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
students_in_course = CourseEnrollment.objects.enrolled_and_dropped_out_users(course_id)
task_progress = TaskProgress(action_name, students_in_course.count(), start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
# Loop over all our students and build our CSV lists in memory
rows = []
header = None
current_step = {'step': 'Gathering Profile Information'}
enrollment_report_provider = PaidCourseEnrollmentReportProvider()
total_students = students_in_course.count()
student_counter = 0
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, generating detailed enrollment report for total students: %s',
task_info_string,
action_name,
current_step,
total_students
)
for student in students_in_course:
# Periodically update task status (this is a cache write)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# Now add a log entry after certain intervals to get a hint that task is in progress
student_counter += 1
if student_counter % 100 == 0:
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, gathering enrollment profile for students in progress: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_students
)
user_data = enrollment_report_provider.get_user_profile(student.id)
course_enrollment_data = enrollment_report_provider.get_enrollment_info(student, course_id)
payment_data = enrollment_report_provider.get_payment_info(student, course_id)
# display name map for the column headers
enrollment_report_headers = {
'User ID': _('User ID'),
'Username': _('Username'),
'Full Name': _('Full Name'),
'First Name': _('First Name'),
'Last Name': _('Last Name'),
'Company Name': _('Company Name'),
'Title': _('Title'),
'Language': _('Language'),
'Year of Birth': _('Year of Birth'),
'Gender': _('Gender'),
'Level of Education': _('Level of Education'),
'Mailing Address': _('Mailing Address'),
'Goals': _('Goals'),
'City': _('City'),
'Country': _('Country'),
'Enrollment Date': _('Enrollment Date'),
'Currently Enrolled': _('Currently Enrolled'),
'Enrollment Source': _('Enrollment Source'),
'Manual (Un)Enrollment Reason': _('Manual (Un)Enrollment Reason'),
'Enrollment Role': _('Enrollment Role'),
'List Price': _('List Price'),
'Payment Amount': _('Payment Amount'),
'Coupon Codes Used': _('Coupon Codes Used'),
'Registration Code Used': _('Registration Code Used'),
'Payment Status': _('Payment Status'),
'Transaction Reference Number': _('Transaction Reference Number')
}
if not header:
header = user_data.keys() + course_enrollment_data.keys() + payment_data.keys()
display_headers = []
for header_element in header:
# translate header into a localizable display string
display_headers.append(enrollment_report_headers.get(header_element, header_element))
rows.append(display_headers)
rows.append(user_data.values() + course_enrollment_data.values() + payment_data.values())
task_progress.succeeded += 1
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Detailed enrollment report generated for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_students
)
# By this point, we've got the rows we're going to stuff into our CSV files.
current_step = {'step': 'Uploading CSVs'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_csv_to_report_store(rows, 'enrollment_report', course_id, start_date, config_name='FINANCIAL_REPORTS')
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing detailed enrollment task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def upload_may_enroll_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing
information about students who may enroll but have not done so
yet, and store using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Calculating info about students who may enroll'}
task_progress.update_task_state(extra_meta=current_step)
# Compute result table and format it
query_features = task_input.get('features')
student_data = list_may_enroll(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'may_enroll_info', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def get_executive_report(course_id):
"""
Returns dict containing information about the course executive summary.
"""
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_id)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id)
paid_invoices_total = InvoiceTransaction.get_total_amount_of_paid_course_invoices(course_id)
gross_paid_revenue = single_purchase_total + bulk_purchase_total + paid_invoices_total
all_invoices_total = Invoice.get_invoice_total_amount_for_course(course_id)
gross_pending_revenue = all_invoices_total - float(paid_invoices_total)
gross_revenue = float(gross_paid_revenue) + float(gross_pending_revenue)
refunded_self_purchased_seats = PaidCourseRegistration.get_self_purchased_seat_count(
course_id, status='refunded'
)
refunded_bulk_purchased_seats = CourseRegCodeItem.get_bulk_purchased_seat_count(
course_id, status='refunded'
)
total_seats_refunded = refunded_self_purchased_seats + refunded_bulk_purchased_seats
self_purchased_refunds = PaidCourseRegistration.get_total_amount_of_purchased_item(
course_id,
status='refunded'
)
bulk_purchase_refunds = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id, status='refunded')
total_amount_refunded = self_purchased_refunds + bulk_purchase_refunds
top_discounted_codes = CouponRedemption.get_top_discount_codes_used(course_id)
total_coupon_codes_purchases = CouponRedemption.get_total_coupon_code_purchases(course_id)
bulk_purchased_codes = CourseRegistrationCode.order_generated_registration_codes(course_id)
unused_registration_codes = 0
for registration_code in bulk_purchased_codes:
if not RegistrationCodeRedemption.is_registration_code_redeemed(registration_code.code):
unused_registration_codes += 1
self_purchased_seat_count = PaidCourseRegistration.get_self_purchased_seat_count(course_id)
bulk_purchased_seat_count = CourseRegCodeItem.get_bulk_purchased_seat_count(course_id)
total_invoiced_seats = CourseRegistrationCode.invoice_generated_registration_codes(course_id).count()
total_seats = self_purchased_seat_count + bulk_purchased_seat_count + total_invoiced_seats
self_purchases_percentage = 0.0
bulk_purchases_percentage = 0.0
invoice_purchases_percentage = 0.0
avg_price_paid = 0.0
if total_seats != 0:
self_purchases_percentage = (float(self_purchased_seat_count) / float(total_seats)) * 100
bulk_purchases_percentage = (float(bulk_purchased_seat_count) / float(total_seats)) * 100
invoice_purchases_percentage = (float(total_invoiced_seats) / float(total_seats)) * 100
avg_price_paid = gross_revenue / total_seats
course = get_course_by_id(course_id, depth=0)
currency = settings.PAID_COURSE_REGISTRATION_CURRENCY[1]
return {
'display_name': course.display_name,
'start_date': course.start.strftime("%Y-%m-%d") if course.start is not None else 'N/A',
'end_date': course.end.strftime("%Y-%m-%d") if course.end is not None else 'N/A',
'total_seats': total_seats,
'currency': currency,
'gross_revenue': float(gross_revenue),
'gross_paid_revenue': float(gross_paid_revenue),
'gross_pending_revenue': gross_pending_revenue,
'total_seats_refunded': total_seats_refunded,
'total_amount_refunded': float(total_amount_refunded),
'average_paid_price': float(avg_price_paid),
'discount_codes_data': top_discounted_codes,
'total_seats_using_discount_codes': total_coupon_codes_purchases,
'total_self_purchase_seats': self_purchased_seat_count,
'total_bulk_purchase_seats': bulk_purchased_seat_count,
'total_invoiced_seats': total_invoiced_seats,
'unused_bulk_purchase_code_count': unused_registration_codes,
'self_purchases_percentage': self_purchases_percentage,
'bulk_purchases_percentage': bulk_purchases_percentage,
'invoice_purchases_percentage': invoice_purchases_percentage,
}
def upload_exec_summary_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
For a given `course_id`, generate a html report containing information,
which provides a snapshot of how the course is doing.
"""
start_time = time()
report_generation_date = datetime.now(UTC)
status_interval = 100
enrolled_users = CourseEnrollment.objects.users_enrolled_in(course_id)
true_enrollment_count = 0
for user in enrolled_users:
if not user.is_staff and not CourseAccessRole.objects.filter(
user=user, course_id=course_id, role__in=FILTERED_OUT_ROLES
).exists():
true_enrollment_count += 1
task_progress = TaskProgress(action_name, true_enrollment_count, start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
current_step = {'step': 'Gathering executive summary report information'}
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, generating executive summary report',
task_info_string,
action_name,
current_step
)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# get the course executive summary report information.
data_dict = get_executive_report(course_id)
data_dict.update(
{
'total_enrollments': true_enrollment_count,
'report_generation_date': report_generation_date.strftime("%Y-%m-%d"),
}
)
# By this point, we've got the data that we need to generate html report.
current_step = {'step': 'Uploading executive summary report HTML file'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_exec_summary_to_store(data_dict, 'executive_report', course_id, report_generation_date)
task_progress.succeeded += 1
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing executive summary report task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def upload_course_survey_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
For a given `course_id`, generate a html report containing the survey results for a course.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Gathering course survey report information'}
task_progress.update_task_state(extra_meta=current_step)
distinct_survey_fields_queryset = SurveyAnswer.objects.filter(course_key=course_id).values('field_name').distinct()
survey_fields = []
for unique_field_row in distinct_survey_fields_queryset:
survey_fields.append(unique_field_row['field_name'])
survey_fields.sort()
user_survey_answers = OrderedDict()
survey_answers_for_course = SurveyAnswer.objects.filter(course_key=course_id).select_related('user')
for survey_field_record in survey_answers_for_course:
user_id = survey_field_record.user.id
if user_id not in user_survey_answers.keys():
user_survey_answers[user_id] = {
'username': survey_field_record.user.username,
'email': survey_field_record.user.email
}
user_survey_answers[user_id][survey_field_record.field_name] = survey_field_record.field_value
header = ["User ID", "User Name", "Email"]
header.extend(survey_fields)
csv_rows = []
for user_id in user_survey_answers.keys():
row = []
row.append(user_id)
row.append(user_survey_answers[user_id].get('username', ''))
row.append(user_survey_answers[user_id].get('email', ''))
for survey_field in survey_fields:
row.append(user_survey_answers[user_id].get(survey_field, ''))
csv_rows.append(row)
task_progress.attempted = task_progress.succeeded = len(csv_rows)
task_progress.skipped = task_progress.total - task_progress.attempted
csv_rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(csv_rows, 'course_survey_results', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def upload_proctored_exam_results_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=invalid-name
"""
For a given `course_id`, generate a CSV file containing
information about proctored exam results, and store using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Calculating info about proctored exam results in a course'}
task_progress.update_task_state(extra_meta=current_step)
# Compute result table and format it
query_features = _task_input.get('features')
student_data = get_proctored_exam_results(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'proctored_exam_results_report', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def generate_students_certificates(
_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate certificates for only students present in 'students' key in task_input
json column, otherwise generate certificates for all enrolled students.
"""
start_time = time()
students_to_generate_certs_for = CourseEnrollment.objects.users_enrolled_in(course_id)
student_set = task_input.get('student_set')
if student_set == 'all_whitelisted':
# Generate Certificates for all white listed students.
students_to_generate_certs_for = students_to_generate_certs_for.filter(
certificatewhitelist__course_id=course_id,
certificatewhitelist__whitelist=True
)
elif student_set == 'whitelisted_not_generated':
# Whitelist students who did not get certificates already.
students_to_generate_certs_for = students_to_generate_certs_for.filter(
certificatewhitelist__course_id=course_id,
certificatewhitelist__whitelist=True
).exclude(
generatedcertificate__course_id=course_id,
generatedcertificate__status__in=CertificateStatuses.PASSED_STATUSES
)
elif student_set == "specific_student":
specific_student_id = task_input.get('specific_student_id')
students_to_generate_certs_for = students_to_generate_certs_for.filter(id=specific_student_id)
task_progress = TaskProgress(action_name, students_to_generate_certs_for.count(), start_time)
current_step = {'step': 'Calculating students already have certificates'}
task_progress.update_task_state(extra_meta=current_step)
statuses_to_regenerate = task_input.get('statuses_to_regenerate', [])
if student_set is not None and not statuses_to_regenerate:
# We want to skip 'filtering students' only when students are given and statuses to regenerate are not
students_require_certs = students_to_generate_certs_for
else:
students_require_certs = students_require_certificate(
course_id, students_to_generate_certs_for, statuses_to_regenerate
)
if statuses_to_regenerate:
# Mark existing generated certificates as 'unavailable' before regenerating
# We need to call this method after "students_require_certificate" otherwise "students_require_certificate"
# would return no results.
invalidate_generated_certificates(course_id, students_to_generate_certs_for, statuses_to_regenerate)
task_progress.skipped = task_progress.total - len(students_require_certs)
current_step = {'step': 'Generating Certificates'}
task_progress.update_task_state(extra_meta=current_step)
course = modulestore().get_course(course_id, depth=0)
# Generate certificate for each student
for student in students_require_certs:
task_progress.attempted += 1
status = generate_user_certificates(
student,
course_id,
course=course
)
if CertificateStatuses.is_passing_status(status):
task_progress.succeeded += 1
else:
task_progress.failed += 1
return task_progress.update_task_state(extra_meta=current_step)
def cohort_students_and_upload(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
Within a given course, cohort students in bulk, then upload the results
using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
# Iterate through rows to get total assignments for task progress
with DefaultStorage().open(task_input['file_name']) as f:
total_assignments = 0
for _line in unicodecsv.DictReader(UniversalNewlineIterator(f)):
total_assignments += 1
task_progress = TaskProgress(action_name, total_assignments, start_time)
current_step = {'step': 'Cohorting Students'}
task_progress.update_task_state(extra_meta=current_step)
# cohorts_status is a mapping from cohort_name to metadata about
# that cohort. The metadata will include information about users
# successfully added to the cohort, users not found, and a cached
# reference to the corresponding cohort object to prevent
# redundant cohort queries.
cohorts_status = {}
with DefaultStorage().open(task_input['file_name']) as f:
for row in unicodecsv.DictReader(UniversalNewlineIterator(f), encoding='utf-8'):
# Try to use the 'email' field to identify the user. If it's not present, use 'username'.
username_or_email = row.get('email') or row.get('username')
cohort_name = row.get('cohort') or ''
task_progress.attempted += 1
if not cohorts_status.get(cohort_name):
cohorts_status[cohort_name] = {
'Cohort Name': cohort_name,
'Students Added': 0,
'Students Not Found': set()
}
try:
cohorts_status[cohort_name]['cohort'] = CourseUserGroup.objects.get(
course_id=course_id,
group_type=CourseUserGroup.COHORT,
name=cohort_name
)
cohorts_status[cohort_name]["Exists"] = True
except CourseUserGroup.DoesNotExist:
cohorts_status[cohort_name]["Exists"] = False
if not cohorts_status[cohort_name]['Exists']:
task_progress.failed += 1
continue
try:
add_user_to_cohort(cohorts_status[cohort_name]['cohort'], username_or_email)
cohorts_status[cohort_name]['Students Added'] += 1
task_progress.succeeded += 1
except User.DoesNotExist:
cohorts_status[cohort_name]['Students Not Found'].add(username_or_email)
task_progress.failed += 1
except ValueError:
# Raised when the user is already in the given cohort
task_progress.skipped += 1
task_progress.update_task_state(extra_meta=current_step)
current_step['step'] = 'Uploading CSV'
task_progress.update_task_state(extra_meta=current_step)
# Filter the output of `add_users_to_cohorts` in order to upload the result.
output_header = ['Cohort Name', 'Exists', 'Students Added', 'Students Not Found']
output_rows = [
[
','.join(status_dict.get(column_name, '')) if column_name == 'Students Not Found'
else status_dict[column_name]
for column_name in output_header
]
for _cohort_name, status_dict in cohorts_status.iteritems()
]
output_rows.insert(0, output_header)
upload_csv_to_report_store(output_rows, 'cohort_results', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def students_require_certificate(course_id, enrolled_students, statuses_to_regenerate=None):
"""
Returns list of students where certificates needs to be generated.
if 'statuses_to_regenerate' is given then return students that have Generated Certificates
and the generated certificate status lies in 'statuses_to_regenerate'
if 'statuses_to_regenerate' is not given then return all the enrolled student skipping the ones
whose certificates have already been generated.
:param course_id:
:param enrolled_students:
:param statuses_to_regenerate:
"""
if statuses_to_regenerate:
# Return Students that have Generated Certificates and the generated certificate status
# lies in 'statuses_to_regenerate'
students_require_certificates = enrolled_students.filter(
generatedcertificate__course_id=course_id,
generatedcertificate__status__in=statuses_to_regenerate
)
# Fetch results otherwise subsequent operations on table cause wrong data fetch
return list(students_require_certificates)
else:
# compute those students whose certificates are already generated
students_already_have_certs = User.objects.filter(
~Q(generatedcertificate__status=CertificateStatuses.unavailable),
generatedcertificate__course_id=course_id)
# Return all the enrolled student skipping the ones whose certificates have already been generated
return list(set(enrolled_students) - set(students_already_have_certs))
def invalidate_generated_certificates(course_id, enrolled_students, certificate_statuses): # pylint: disable=invalid-name
"""
Invalidate generated certificates for all enrolled students in the given course having status in
'certificate_statuses'.
Generated Certificates are invalidated by marking its status 'unavailable' and updating verify_uuid, download_uuid,
download_url and grade with empty string.
:param course_id: Course Key for the course whose generated certificates need to be removed
:param enrolled_students: (queryset or list) students enrolled in the course
:param certificate_statuses: certificates statuses for whom to remove generated certificate
"""
certificates = GeneratedCertificate.objects.filter( # pylint: disable=no-member
user__in=enrolled_students,
course_id=course_id,
status__in=certificate_statuses,
)
# Mark generated certificates as 'unavailable' and update download_url, download_uui, verify_uuid and
# grade with empty string for each row
certificates.update(
status=CertificateStatuses.unavailable,
verify_uuid='',
download_uuid='',
download_url='',
grade='',
)
def upload_ora2_data(
_xmodule_instance_args, _entry_id, course_id, _task_input, action_name
):
"""
Collect ora2 responses and upload them to S3 as a CSV
"""
start_date = datetime.now(UTC)
start_time = time()
num_attempted = 1
num_total = 1
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
task_progress = TaskProgress(action_name, num_total, start_time)
task_progress.attempted = num_attempted
curr_step = {'step': "Collecting responses"}
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s for all submissions',
task_info_string,
action_name,
curr_step,
)
task_progress.update_task_state(extra_meta=curr_step)
try:
header, datarows = OraAggregateData.collect_ora2_data(course_id)
rows = [header] + [row for row in datarows]
# Update progress to failed regardless of error type
except Exception: # pylint: disable=broad-except
TASK_LOG.exception('Failed to get ORA data.')
task_progress.failed = 1
curr_step = {'step': "Error while collecting data"}
task_progress.update_task_state(extra_meta=curr_step)
return UPDATE_STATUS_FAILED
task_progress.succeeded = 1
curr_step = {'step': "Uploading CSV"}
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s',
task_info_string,
action_name,
curr_step,
)
task_progress.update_task_state(extra_meta=curr_step)
upload_csv_to_report_store(rows, 'ORA_data', course_id, start_date)
curr_step = {'step': 'Finalizing ORA data report'}
task_progress.update_task_state(extra_meta=curr_step)
TASK_LOG.info(u'%s, Task type: %s, Upload complete.', task_info_string, action_name)
return UPDATE_STATUS_SUCCEEDED
| agpl-3.0 |
hosseinmh/Django_learning | djmod/.venv/lib/python3.5/site-packages/django/contrib/sites/management.py | 242 | 1597 | """
Creates the default Site object.
"""
from django.apps import apps as global_apps
from django.conf import settings
from django.core.management.color import no_style
from django.db import DEFAULT_DB_ALIAS, connections, router
def create_default_site(app_config, verbosity=2, interactive=True, using=DEFAULT_DB_ALIAS, apps=global_apps, **kwargs):
try:
Site = apps.get_model('sites', 'Site')
except LookupError:
return
if not router.allow_migrate_model(using, Site):
return
if not Site.objects.using(using).exists():
# The default settings set SITE_ID = 1, and some tests in Django's test
# suite rely on this value. However, if database sequences are reused
# (e.g. in the test suite after flush/syncdb), it isn't guaranteed that
# the next id will be 1, so we coerce it. See #15573 and #16353. This
# can also crop up outside of tests - see #15346.
if verbosity >= 2:
print("Creating example.com Site object")
Site(pk=getattr(settings, 'SITE_ID', 1), domain="example.com", name="example.com").save(using=using)
# We set an explicit pk instead of relying on auto-incrementation,
# so we need to reset the database sequence. See #17415.
sequence_sql = connections[using].ops.sequence_reset_sql(no_style(), [Site])
if sequence_sql:
if verbosity >= 2:
print("Resetting sequence")
with connections[using].cursor() as cursor:
for command in sequence_sql:
cursor.execute(command)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.