code
stringlengths 1
199k
|
|---|
import sqlite3
conn = sqlite3.connect('clarity.db', check_same_thread=False)
c = conn.cursor()
c.execute('''CREATE TABLE users (gender TEXT, rank INTEGER, username TEXT PRIMARY KEY, password TEXT, name TEXT, voted TEXT, credits INTEGER)''')
c.execute('''CREATE TABLE answers (upvotes INTEGER,
adate INTEGER,
aid INTEGER PRIMARY KEY AUTOINCREMENT,
atext TEXT,
qid INTEGER,
username TEXT)''')
"""
c.execute('''INSERT INTO answers (upvotes, adate, atext, qid, username)
VALUES (1337, 1457822190, 'por que no los dos?', 1, 'admin')''')
c.execute('''INSERT INTO answers (upvotes, adate, atext, qid, username)
VALUES (-1337, 1457822193, 'asdf', 1, 'admin')''')
"""
c.execute('''CREATE TABLE questions (title TEXT,
qdate INTEGER,
qtext TEXT,
qid INTEGER PRIMARY KEY AUTOINCREMENT,
category TEXT,
username TEXT,
minReq INTEGER)''')
conn.commit()
conn.close()
|
import json
import os
from bson.objectid import ObjectId
from flask import Blueprint, Response, render_template, request
from app import mongo
def mount_web_path(folder):
return os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))), folder)
main = Blueprint(
'godinez',
__name__,
template_folder=mount_web_path(folder='web'),
static_folder=mount_web_path(folder='web/dist')
)
@main.route('/')
def index():
return render_template('index.html')
@main.route('/issues', methods=['GET'])
def list_issues():
status = 200
issues = mongo.db.issues
try:
result = []
for s in issues.find():
result.append({'title': s['title'], 'description': s['description']})
data = json.dumps(result)
except Exception as e:
data = json.dumps({
'code': 500,
'message': str(e.args)
})
status = 500
return Response(data, status=status, mimetype='application/json')
@main.route('/issues', methods=['POST'])
def save_issues():
status = 200
issues = mongo.db.issues
try:
title = request.json['title']
description = request.json['description']
if title and description:
issue_id = issues.insert({'title': title, 'description': description})
new_issue = issues.find_one({'_id': issue_id})
result = {'title': new_issue['title'], 'description': new_issue['description']}
data = json.dumps(result)
else:
data = json.dumps({
'code': 500,
'message': 'Dados da issues não informados corretamente'
})
status = 500
except Exception as e:
data = json.dumps({
'code': 500,
'message': str(e.args)
})
status = 500
return Response(data, status=status, mimetype='application/json')
@main.route('/issues', methods=['PUT'])
def update_issues():
status = 200
issues = mongo.db.issues
try:
issue = json.loads(request.data)
if '_id' not in issue:
data = json.dumps({
'code': 500,
'message': 'ID da Issue não informado'
})
status = 500
else:
issues.update_one({'_id': ObjectId(issue['_id'])},
{'$set': {'title': issue['title'], 'description': issue['description']}})
data = json.dumps({'message': 'Issue atualizada com sucesso'})
except Exception as e:
data = json.dumps({
'code': 500,
'message': str(e.args)
})
status = 500
return Response(data, status=status, mimetype='application/json')
@main.route('/issues/<id>', methods=['DELETE'])
def delete_issue(id):
status = 200
issues = mongo.db.issues
data = {}
try:
if request.method == 'DELETE':
result = issues.delete_one({'_id': ObjectId(id)})
if result.deleted_count == 1:
data = json.dumps({'message': 'Issue delatada com sucesso'})
else:
data = json.dumps({
'code': 500,
'message': 'Error ao deletar Issue'
})
status = 500
except Exception as e:
data = json.dumps({
'code': 500,
'message': str(e.args)
})
status = 500
return Response(data, status=status, mimetype='application/json')
|
__author__ = 'James T. Dietrich'
__contact__ = 'james.t.dietrich@dartmouth.edu'
__copyright__ = '(c) James Dietrich 2016'
__license__ = 'MIT'
__date__ = 'Wed Nov 16 11:33:39 2016'
__version__ = '1.0'
__status__ = "initial release"
__url__ = "https://github.com/geojames/..."
"""
Name: Week6-3_Statistics.py
Compatibility: Python 3.5
Description: This program does stuff
URL: https://github.com/geojames/...
Requires: libraries
Dev ToDo:
AUTHOR: James T. Dietrich
ORGANIZATION: Dartmouth College
Contact: james.t.dietrich@dartmouth.edu
Copyright: (c) James Dietrich 2016
"""
from scipy import stats
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
x = np.linspace(1,15,20) + np.random.rand(20)
y = np.linspace(1,15,20) - np.random.rand(20)
slp, intercept, r_val, p_val, std_err = stats.linregress(x,y)
plt.scatter(x,y)
plt.plot(x,(slp*x+intercept))
plt.text(8,4,r'$y = %0.3f x + %0.3f$' %(slp, intercept),fontsize=20)
plt.text(8.5,2,r'$R^2 = %0.4f$' %(r_val**2),fontsize=20)
fit = np.polyfit(x,y,1)
fit_fn = np.poly1d(fit)
plt.plot(x,y, 'yo', x, fit_fn(x), '--k')
r = stats.pearsonr(y, fit_fn(x))
r_2 = r[0] ** 2
fit2 = np.polyfit(x,y,2)
plt.plot(x,y, 'yo', x, np.polyval(fit2,x), '--b')
r = stats.pearsonr(y, np.polyval(fit2,x))
r_2 = r[0] ** 2
dates = pd.date_range('1/1/2016', periods=20)
rows = np.vstack((np.linspace(1,15,20) + np.random.rand(20),\
np.linspace(1,15,20) - np.random.rand(20),\
np.linspace(1,15,20) / np.random.rand(20),\
np.linspace(1,15,20) * np.random.rand(20))).reshape((20,4))
data = pd.DataFrame(rows, index=dates, columns=['A', 'B', 'C', 'D'])
ols_model = pd.ols(y = data['A'], x = data['B'])
ols_model
ols_model.resid # residuals
ols_model.r2 # r^2 value
plt.plot(data.B, np.polyval(ols_model.beta,data.B),'r-')
plt.scatter(data.B, data.A)
predict = data.B*data.A
model = pd.ols(y=predict, x=data)
model
import statsmodels.api as sm
x = np.linspace(0, 10, 100) - 5 **2
X = sm.add_constant(x)
Y = x + 0.5 * np.random.normal(size=100)
model = sm.OLS(Y, X)
results = model.fit()
print(results.summary())
results.params
results.rsquared
results.resid
plt.scatter(x,Y)
plt.plot(x,results.fittedvalues,'r-',label='OLS')
xx = np.column_stack((np.ones(100),x, np.cos(x), (x-5)**2))
Y = np.cos(x) + 0.5 * np.random.normal(size=100)
model = sm.OLS(Y, xx)
results = model.fit()
print(results.summary())
plt.scatter(x,Y)
plt.plot(x,results.fittedvalues,'r-',label='OLS')
xx = np.column_stack((x**2, x, np.ones(100)))
Y = (x + 0.5 * np.random.normal(size=100)) ** 4
model = sm.OLS(Y, xx)
results = model.fit()
print(results.summary())
plt.scatter(x,Y)
plt.plot(x,results.fittedvalues,'r-',label='OLS')
from scipy import optimize
def expfit(x, p1, p2):
return p1 * np.exp(p2*x)
popt, pcov = optimize.curve_fit(expfit, x, Y,p0=(3000,-0.2))
plt.plot(x, expfit(x,popt[0],popt[1]),'r-')
plt.scatter(x,Y)
plt.text(-22,350000,r'$%0.4f \times e^{%0.4f x}$' %(popt[0],popt[1]),fontsize=20)
x = np.linspace(2, 10, 100) * 10 + np.random.normal(size=100)
Y = np.log(x) * 5 + np.random.normal(size=100)
def logfit(x,p1,p2):
return p1 + p2 * np.log(x)
popt, pcov = optimize.curve_fit(logfit, x, Y)
plt.plot(x, logfit(x,popt[0],popt[1]),'r-')
plt.scatter(x,Y)
plt.xlabel("Chipmonks per sq. meter",fontsize=15)
plt.ylabel("Cheetos required to feed them",fontsize=15)
plt.xlim(-0.5,110)
plt.tick_params(labelsize=15)
plt.grid()
plt.text(50,16,r'$%0.4f + %0.4f \times \ln{(x)}$' %(popt[0],popt[1]),fontsize=20)
fig = plt.figure()
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
ax1.loglog(x, logfit(x,popt[0],popt[1]))
ax1.loglog(x,Y,'bo')
ax1.grid(which='both')
ax1.tick_params(which='both',labelsize=15)
ax2.semilogx(x, logfit(x,popt[0],popt[1]))
ax2.semilogx(x,Y,'bo')
ax2.grid(which='both')
ax2.tick_params(which='both',labelsize=15)
ax3.set_xscale("log", nonposx='clip')
ax3.plot(x, logfit(x,popt[0],popt[1]),'b-')
ax3.scatter(x,Y,s=40, c='m', marker='*')
ax3.grid(which='both')
ax3.tick_params(which='both',labelsize=15)
y_predict = np.polyval(fit2,x)
resid = (y_predict - y)
plt.stem(x,resid)
plt.figure()
ax1 = plt.subplot2grid((4,1), (0,0), rowspan=3)
ax2 = plt.subplot2grid((4,1), (3,0))
ax1.scatter(x,y)
ax1.plot(x,np.polyval(fit2,x),'r--')
ax1.set_xlim(np.min(x)-1, np.max(x)+1)
ax1.set_xlabel([""])
ax2.stem(x,resid)
ax2.set_xlim(np.min(x)-1, np.max(x)+1)
sse = np.sum(resid ** 2)
me = np.mean(resid)
mse = np.mean(resid ** 2);
rmse = np.sqrt(np.sum(resid**2)/len(resid))
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "chills_pos.settings.main")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
import sys
import logging
from WordGraph import WordGraph
import networkx as nx
from networkx.readwrite import json_graph
import json
if __name__ == '__main__':
word_graph = WordGraph()
graph = word_graph.get_graph()
# write each line of the dictionary to the json file
with open('output/generated.json', 'w') as outfile:
outfile.write(json.dumps(json_graph.node_link_data(graph)))
# nx.write_gexf(graph, 'output/generated.gexf')
|
import openpnm as op
import numpy as _np
from numpy.testing import assert_approx_equal
class HydraulicConductanceTest:
def setup_class(self):
self.net = op.network.Cubic(shape=[5, 5, 5], spacing=1.0)
self.geo = op.geometry.GenericGeometry(network=self.net,
pores=self.net.Ps,
throats=self.net.Ts)
self.geo['pore.diameter'] = 1.0
self.geo['throat.diameter'] = 0.5
self.geo['pore.area'] = 1.0
self.geo['throat.area'] = 0.5
self.phase = op.phases.GenericPhase(network=self.net)
self.phase['pore.viscosity'] = 1e-5
self.phys = op.physics.GenericPhysics(network=self.net,
phase=self.phase,
geometry=self.geo)
def teardown_class(self):
mgr = op.Workspace()
mgr.clear()
def test_hagen_poiseuille(self):
self.geo['throat.conduit_lengths.pore1'] = 0.25
self.geo['throat.conduit_lengths.throat'] = 0.6
self.geo['throat.conduit_lengths.pore2'] = 0.15
mod = op.models.physics.hydraulic_conductance.hagen_poiseuille
self.phys.add_model(propname='throat.hydraulic_conductance', model=mod)
actual = self.phys['throat.hydraulic_conductance'].mean()
assert_approx_equal(actual, desired=1421.0262776)
def test_hagen_poiseuille_2D(self):
self.geo['throat.conduit_lengths.pore1'] = 0.25
self.geo['throat.conduit_lengths.throat'] = 0.6
self.geo['throat.conduit_lengths.pore2'] = 0.15
mod = op.models.physics.hydraulic_conductance.hagen_poiseuille_2D
self.phys.add_model(propname='throat.hydraulic_conductance', model=mod)
actual = self.phys['throat.hydraulic_conductance'].mean()
assert_approx_equal(actual, desired=1602.564)
def test_hagen_poiseuille_zero_length_throat(self):
self.geo['throat.conduit_lengths.pore1'] = 0.25
self.geo['throat.conduit_lengths.throat'] = 0.0
self.geo['throat.conduit_lengths.pore2'] = 0.15
mod = op.models.physics.hydraulic_conductance.hagen_poiseuille
self.phys.add_model(propname='throat.hydraulic_conductance',
model=mod)
actual = self.phys['throat.hydraulic_conductance'].mean()
assert_approx_equal(actual, desired=9947.1839)
def test_classic_hagen_poiseuille(self):
self.geo['pore.diameter'] = 1.0
self.geo['throat.diameter'] = 1.0
self.geo['throat.length'] = 1.0e-9
self.air = op.phases.Air(network=self.net)
self.phys = op.physics.GenericPhysics(network=self.net,
phase=self.air,
geometry=self.geo)
mod = op.models.physics.hydraulic_conductance.classic_hagen_poiseuille
self.phys.add_model(propname='throat.conductance',
model=mod)
assert _np.allclose(a=self.phys['throat.conductance'][0],
b=1330.68207684)
def test_valvatne_blunt(self):
self.phase = op.phases.GenericPhase(network=self.net)
self.phase['pore.viscosity'] = 1e-5
self.phys = op.physics.GenericPhysics(network=self.net,
phase=self.phase,
geometry=self.geo)
mod = op.models.physics.hydraulic_conductance.valvatne_blunt
sf = _np.sqrt(3)/36.0
self.geo['pore.shape_factor'] = _np.ones(self.geo.Np)*sf
self.geo['throat.shape_factor'] = _np.ones(self.geo.Nt)*sf
self.phys.add_model(propname='throat.valvatne_conductance', model=mod)
actual = self.phys['throat.valvatne_conductance'].mean()
desired = 1030.9826 # This is the old value
desired = 7216.8783 # This is what it gets now
assert_approx_equal(actual, desired=desired)
if __name__ == '__main__':
t = HydraulicConductanceTest()
self = t
t.setup_class()
for item in t.__dir__():
if item.startswith('test'):
print('running test: '+item)
t.__getattribute__(item)()
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .tektronixMSO4000 import *
class tektronixMSO4034B(tektronixMSO4000):
"Tektronix MSO4034B IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MSO4034B')
super(tektronixMSO4034B, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 350e6
self._init_channels()
|
import hashlib
from typing import List, Tuple, TYPE_CHECKING, Optional, Union, Sequence
import enum
from enum import IntEnum, Enum
from .util import bfh, bh2u, BitcoinException, assert_bytes, to_bytes, inv_dict, is_hex_str
from . import version
from . import segwit_addr
from . import constants
from . import ecc
from .crypto import sha256d, sha256, hash_160, hmac_oneshot
if TYPE_CHECKING:
from .network import Network
COINBASE_MATURITY = 100
COIN = 100000000
TOTAL_COIN_SUPPLY_LIMIT_IN_BTC = 105120000
NLOCKTIME_MIN = 0
NLOCKTIME_BLOCKHEIGHT_MAX = 500_000_000 - 1
NLOCKTIME_MAX = 2 ** 32 - 1
TYPE_ADDRESS = 0
TYPE_PUBKEY = 1
TYPE_SCRIPT = 2
class opcodes(IntEnum):
# push value
OP_0 = 0x00
OP_FALSE = OP_0
OP_PUSHDATA1 = 0x4c
OP_PUSHDATA2 = 0x4d
OP_PUSHDATA4 = 0x4e
OP_1NEGATE = 0x4f
OP_RESERVED = 0x50
OP_1 = 0x51
OP_TRUE = OP_1
OP_2 = 0x52
OP_3 = 0x53
OP_4 = 0x54
OP_5 = 0x55
OP_6 = 0x56
OP_7 = 0x57
OP_8 = 0x58
OP_9 = 0x59
OP_10 = 0x5a
OP_11 = 0x5b
OP_12 = 0x5c
OP_13 = 0x5d
OP_14 = 0x5e
OP_15 = 0x5f
OP_16 = 0x60
# control
OP_NOP = 0x61
OP_VER = 0x62
OP_IF = 0x63
OP_NOTIF = 0x64
OP_VERIF = 0x65
OP_VERNOTIF = 0x66
OP_ELSE = 0x67
OP_ENDIF = 0x68
OP_VERIFY = 0x69
OP_RETURN = 0x6a
# stack ops
OP_TOALTSTACK = 0x6b
OP_FROMALTSTACK = 0x6c
OP_2DROP = 0x6d
OP_2DUP = 0x6e
OP_3DUP = 0x6f
OP_2OVER = 0x70
OP_2ROT = 0x71
OP_2SWAP = 0x72
OP_IFDUP = 0x73
OP_DEPTH = 0x74
OP_DROP = 0x75
OP_DUP = 0x76
OP_NIP = 0x77
OP_OVER = 0x78
OP_PICK = 0x79
OP_ROLL = 0x7a
OP_ROT = 0x7b
OP_SWAP = 0x7c
OP_TUCK = 0x7d
# splice ops
OP_CAT = 0x7e
OP_SUBSTR = 0x7f
OP_LEFT = 0x80
OP_RIGHT = 0x81
OP_SIZE = 0x82
# bit logic
OP_INVERT = 0x83
OP_AND = 0x84
OP_OR = 0x85
OP_XOR = 0x86
OP_EQUAL = 0x87
OP_EQUALVERIFY = 0x88
OP_RESERVED1 = 0x89
OP_RESERVED2 = 0x8a
# numeric
OP_1ADD = 0x8b
OP_1SUB = 0x8c
OP_2MUL = 0x8d
OP_2DIV = 0x8e
OP_NEGATE = 0x8f
OP_ABS = 0x90
OP_NOT = 0x91
OP_0NOTEQUAL = 0x92
OP_ADD = 0x93
OP_SUB = 0x94
OP_MUL = 0x95
OP_DIV = 0x96
OP_MOD = 0x97
OP_LSHIFT = 0x98
OP_RSHIFT = 0x99
OP_BOOLAND = 0x9a
OP_BOOLOR = 0x9b
OP_NUMEQUAL = 0x9c
OP_NUMEQUALVERIFY = 0x9d
OP_NUMNOTEQUAL = 0x9e
OP_LESSTHAN = 0x9f
OP_GREATERTHAN = 0xa0
OP_LESSTHANOREQUAL = 0xa1
OP_GREATERTHANOREQUAL = 0xa2
OP_MIN = 0xa3
OP_MAX = 0xa4
OP_WITHIN = 0xa5
# crypto
OP_RIPEMD160 = 0xa6
OP_SHA1 = 0xa7
OP_SHA256 = 0xa8
OP_HASH160 = 0xa9
OP_HASH256 = 0xaa
OP_CODESEPARATOR = 0xab
OP_CHECKSIG = 0xac
OP_CHECKSIGVERIFY = 0xad
OP_CHECKMULTISIG = 0xae
OP_CHECKMULTISIGVERIFY = 0xaf
# expansion
OP_NOP1 = 0xb0
OP_CHECKLOCKTIMEVERIFY = 0xb1
OP_NOP2 = OP_CHECKLOCKTIMEVERIFY
OP_CHECKSEQUENCEVERIFY = 0xb2
OP_NOP3 = OP_CHECKSEQUENCEVERIFY
OP_NOP4 = 0xb3
OP_NOP5 = 0xb4
OP_NOP6 = 0xb5
OP_NOP7 = 0xb6
OP_NOP8 = 0xb7
OP_NOP9 = 0xb8
OP_NOP10 = 0xb9
OP_INVALIDOPCODE = 0xff
def hex(self) -> str:
return bytes([self]).hex()
def rev_hex(s: str) -> str:
return bh2u(bfh(s)[::-1])
def int_to_hex(i: int, length: int=1) -> str:
"""Converts int to little-endian hex string.
`length` is the number of bytes available
"""
if not isinstance(i, int):
raise TypeError('{} instead of int'.format(i))
range_size = pow(256, length)
if i < -(range_size//2) or i >= range_size:
raise OverflowError('cannot convert int {} to hex ({} bytes)'.format(i, length))
if i < 0:
# two's complement
i = range_size + i
s = hex(i)[2:].rstrip('L')
s = "0"*(2*length - len(s)) + s
return rev_hex(s)
def script_num_to_hex(i: int) -> str:
"""See CScriptNum in Bitcoin Core.
Encodes an integer as hex, to be used in script.
ported from https://github.com/bitcoin/bitcoin/blob/8cbc5c4be4be22aca228074f087a374a7ec38be8/src/script/script.h#L326
"""
if i == 0:
return ''
result = bytearray()
neg = i < 0
absvalue = abs(i)
while absvalue > 0:
result.append(absvalue & 0xff)
absvalue >>= 8
if result[-1] & 0x80:
result.append(0x80 if neg else 0x00)
elif neg:
result[-1] |= 0x80
return bh2u(result)
def var_int(i: int) -> str:
# https://en.bitcoin.it/wiki/Protocol_specification#Variable_length_integer
# https://github.com/bitcoin/bitcoin/blob/efe1ee0d8d7f82150789f1f6840f139289628a2b/src/serialize.h#L247
# "CompactSize"
assert i >= 0, i
if i<0xfd:
return int_to_hex(i)
elif i<=0xffff:
return "fd"+int_to_hex(i,2)
elif i<=0xffffffff:
return "fe"+int_to_hex(i,4)
else:
return "ff"+int_to_hex(i,8)
def witness_push(item: str) -> str:
"""Returns data in the form it should be present in the witness.
hex -> hex
"""
return var_int(len(item) // 2) + item
def _op_push(i: int) -> str:
if i < opcodes.OP_PUSHDATA1:
return int_to_hex(i)
elif i <= 0xff:
return opcodes.OP_PUSHDATA1.hex() + int_to_hex(i, 1)
elif i <= 0xffff:
return opcodes.OP_PUSHDATA2.hex() + int_to_hex(i, 2)
else:
return opcodes.OP_PUSHDATA4.hex() + int_to_hex(i, 4)
def push_script(data: str) -> str:
"""Returns pushed data to the script, automatically
choosing canonical opcodes depending on the length of the data.
hex -> hex
ported from https://github.com/btcsuite/btcd/blob/fdc2bc867bda6b351191b5872d2da8270df00d13/txscript/scriptbuilder.go#L128
"""
data = bfh(data)
data_len = len(data)
# "small integer" opcodes
if data_len == 0 or data_len == 1 and data[0] == 0:
return opcodes.OP_0.hex()
elif data_len == 1 and data[0] <= 16:
return bh2u(bytes([opcodes.OP_1 - 1 + data[0]]))
elif data_len == 1 and data[0] == 0x81:
return opcodes.OP_1NEGATE.hex()
return _op_push(data_len) + bh2u(data)
def make_op_return(x:bytes) -> bytes:
return bytes([opcodes.OP_RETURN]) + bytes.fromhex(push_script(x.hex()))
def add_number_to_script(i: int) -> bytes:
return bfh(push_script(script_num_to_hex(i)))
def construct_witness(items: Sequence[Union[str, int, bytes]]) -> str:
"""Constructs a witness from the given stack items."""
witness = var_int(len(items))
for item in items:
if type(item) is int:
item = script_num_to_hex(item)
elif isinstance(item, (bytes, bytearray)):
item = bh2u(item)
else:
assert is_hex_str(item)
witness += witness_push(item)
return witness
def construct_script(items: Sequence[Union[str, int, bytes, opcodes]]) -> str:
"""Constructs bitcoin script from given items."""
script = ''
for item in items:
if isinstance(item, opcodes):
script += item.hex()
elif type(item) is int:
script += add_number_to_script(item).hex()
elif isinstance(item, (bytes, bytearray)):
script += push_script(item.hex())
elif isinstance(item, str):
assert is_hex_str(item)
script += push_script(item)
else:
raise Exception(f'unexpected item for script: {item!r}')
return script
def relayfee(network: 'Network' = None) -> int:
"""Returns feerate in sat/kbyte."""
from .simple_config import FEERATE_DEFAULT_RELAY, FEERATE_MAX_RELAY
if network and network.relay_fee is not None:
fee = network.relay_fee
else:
fee = FEERATE_DEFAULT_RELAY
# sanity safeguards, as network.relay_fee is coming from a server:
fee = min(fee, FEERATE_MAX_RELAY)
fee = max(fee, FEERATE_DEFAULT_RELAY)
return fee
DUST_LIMIT_P2PKH = 546
DUST_LIMIT_P2SH = 540
DUST_LIMIT_UNKNOWN_SEGWIT = 354
DUST_LIMIT_P2WSH = 330
DUST_LIMIT_P2WPKH = 294
def dust_threshold(network: 'Network' = None) -> int:
"""Returns the dust limit in satoshis."""
# Change <= dust threshold is added to the tx fee
dust_lim = 182 * 3 * relayfee(network) # in msat
# convert to sat, but round up:
return (dust_lim // 1000) + (dust_lim % 1000 > 0)
def hash_encode(x: bytes) -> str:
return bh2u(x[::-1])
def hash_decode(x: str) -> bytes:
return bfh(x)[::-1]
def hash160_to_b58_address(h160: bytes, addrtype: int) -> str:
s = bytes([addrtype]) + h160
s = s + sha256d(s)[0:4]
return base_encode(s, base=58)
def b58_address_to_hash160(addr: str) -> Tuple[int, bytes]:
addr = to_bytes(addr, 'ascii')
_bytes = DecodeBase58Check(addr)
if len(_bytes) != 21:
raise Exception(f'expected 21 payload bytes in base58 address. got: {len(_bytes)}')
return _bytes[0], _bytes[1:21]
def hash160_to_p2pkh(h160: bytes, *, net=None) -> str:
if net is None: net = constants.net
return hash160_to_b58_address(h160, net.ADDRTYPE_P2PKH)
def hash160_to_p2sh(h160: bytes, *, net=None) -> str:
if net is None: net = constants.net
return hash160_to_b58_address(h160, net.ADDRTYPE_P2SH)
def public_key_to_p2pkh(public_key: bytes, *, net=None) -> str:
if net is None: net = constants.net
return hash160_to_p2pkh(hash_160(public_key), net=net)
def hash_to_segwit_addr(h: bytes, witver: int, *, net=None) -> str:
if net is None: net = constants.net
addr = segwit_addr.encode_segwit_address(net.SEGWIT_HRP, witver, h)
assert addr is not None
return addr
def public_key_to_p2wpkh(public_key: bytes, *, net=None) -> str:
if net is None: net = constants.net
return hash_to_segwit_addr(hash_160(public_key), witver=0, net=net)
def script_to_p2wsh(script: str, *, net=None) -> str:
if net is None: net = constants.net
return hash_to_segwit_addr(sha256(bfh(script)), witver=0, net=net)
def p2wpkh_nested_script(pubkey: str) -> str:
pkh = hash_160(bfh(pubkey))
return construct_script([0, pkh])
def p2wsh_nested_script(witness_script: str) -> str:
wsh = sha256(bfh(witness_script))
return construct_script([0, wsh])
def pubkey_to_address(txin_type: str, pubkey: str, *, net=None) -> str:
if net is None: net = constants.net
if txin_type == 'p2pkh':
return public_key_to_p2pkh(bfh(pubkey), net=net)
elif txin_type == 'p2wpkh':
return public_key_to_p2wpkh(bfh(pubkey), net=net)
elif txin_type == 'p2wpkh-p2sh':
scriptSig = p2wpkh_nested_script(pubkey)
return hash160_to_p2sh(hash_160(bfh(scriptSig)), net=net)
else:
raise NotImplementedError(txin_type)
def redeem_script_to_address(txin_type: str, scriptcode: str, *, net=None) -> str:
if net is None: net = constants.net
if txin_type == 'p2sh':
# given scriptcode is a redeem_script
return hash160_to_p2sh(hash_160(bfh(scriptcode)), net=net)
elif txin_type == 'p2wsh':
# given scriptcode is a witness_script
return script_to_p2wsh(scriptcode, net=net)
elif txin_type == 'p2wsh-p2sh':
# given scriptcode is a witness_script
redeem_script = p2wsh_nested_script(scriptcode)
return hash160_to_p2sh(hash_160(bfh(redeem_script)), net=net)
else:
raise NotImplementedError(txin_type)
def script_to_address(script: str, *, net=None) -> str:
from .transaction import get_address_from_output_script
return get_address_from_output_script(bfh(script), net=net)
def address_to_script(addr: str, *, net=None) -> str:
if net is None: net = constants.net
if not is_address(addr, net=net):
raise BitcoinException(f"invalid bitcoin address: {addr}")
witver, witprog = segwit_addr.decode_segwit_address(net.SEGWIT_HRP, addr)
if witprog is not None:
if not (0 <= witver <= 16):
raise BitcoinException(f'impossible witness version: {witver}')
return construct_script([witver, bytes(witprog)])
addrtype, hash_160_ = b58_address_to_hash160(addr)
if addrtype == net.ADDRTYPE_P2PKH:
script = pubkeyhash_to_p2pkh_script(bh2u(hash_160_))
elif addrtype in [net.ADDRTYPE_P2SH, net.ADDRTYPE_P2SH_ALT]:
script = construct_script([opcodes.OP_HASH160, hash_160_, opcodes.OP_EQUAL])
else:
raise BitcoinException(f'unknown address type: {addrtype}')
return script
class OnchainOutputType(Enum):
"""Opaque types of scriptPubKeys.
In case of p2sh, p2wsh and similar, no knowledge of redeem script, etc.
"""
P2PKH = enum.auto()
P2SH = enum.auto()
WITVER0_P2WPKH = enum.auto()
WITVER0_P2WSH = enum.auto()
def address_to_hash(addr: str, *, net=None) -> Tuple[OnchainOutputType, bytes]:
"""Return (type, pubkey hash / witness program) for an address."""
if net is None: net = constants.net
if not is_address(addr, net=net):
raise BitcoinException(f"invalid bitcoin address: {addr}")
witver, witprog = segwit_addr.decode_segwit_address(net.SEGWIT_HRP, addr)
if witprog is not None:
if witver != 0:
raise BitcoinException(f"not implemented handling for witver={witver}")
if len(witprog) == 20:
return OnchainOutputType.WITVER0_P2WPKH, bytes(witprog)
elif len(witprog) == 32:
return OnchainOutputType.WITVER0_P2WSH, bytes(witprog)
else:
raise BitcoinException(f"unexpected length for segwit witver=0 witprog: len={len(witprog)}")
addrtype, hash_160_ = b58_address_to_hash160(addr)
if addrtype == net.ADDRTYPE_P2PKH:
return OnchainOutputType.P2PKH, hash_160_
elif addrtype == net.ADDRTYPE_P2SH:
return OnchainOutputType.P2SH, hash_160_
raise BitcoinException(f"unknown address type: {addrtype}")
def address_to_scripthash(addr: str, *, net=None) -> str:
script = address_to_script(addr, net=net)
return script_to_scripthash(script)
def script_to_scripthash(script: str) -> str:
h = sha256(bfh(script))[0:32]
return bh2u(bytes(reversed(h)))
def public_key_to_p2pk_script(pubkey: str) -> str:
return construct_script([pubkey, opcodes.OP_CHECKSIG])
def pubkeyhash_to_p2pkh_script(pubkey_hash160: str) -> str:
return construct_script([
opcodes.OP_DUP,
opcodes.OP_HASH160,
pubkey_hash160,
opcodes.OP_EQUALVERIFY,
opcodes.OP_CHECKSIG
])
__b58chars = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
assert len(__b58chars) == 58
__b43chars = b'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ$*+-./:'
assert len(__b43chars) == 43
class BaseDecodeError(BitcoinException): pass
def base_encode(v: bytes, *, base: int) -> str:
""" encode v, which is a string of bytes, to base58."""
assert_bytes(v)
if base not in (58, 43):
raise ValueError('not supported base: {}'.format(base))
chars = __b58chars
if base == 43:
chars = __b43chars
long_value = 0
power_of_base = 1
for c in v[::-1]:
# naive but slow variant: long_value += (256**i) * c
long_value += power_of_base * c
power_of_base <<= 8
result = bytearray()
while long_value >= base:
div, mod = divmod(long_value, base)
result.append(chars[mod])
long_value = div
result.append(chars[long_value])
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == 0x00:
nPad += 1
else:
break
result.extend([chars[0]] * nPad)
result.reverse()
return result.decode('ascii')
def base_decode(v: Union[bytes, str], *, base: int, length: int = None) -> Optional[bytes]:
""" decode v into a string of len bytes."""
# assert_bytes(v)
v = to_bytes(v, 'ascii')
if base not in (58, 43):
raise ValueError('not supported base: {}'.format(base))
chars = __b58chars
if base == 43:
chars = __b43chars
long_value = 0
power_of_base = 1
for c in v[::-1]:
digit = chars.find(bytes([c]))
if digit == -1:
raise BaseDecodeError('Forbidden character {} for base {}'.format(c, base))
# naive but slow variant: long_value += digit * (base**i)
long_value += digit * power_of_base
power_of_base *= base
result = bytearray()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result.append(mod)
long_value = div
result.append(long_value)
nPad = 0
for c in v:
if c == chars[0]:
nPad += 1
else:
break
result.extend(b'\x00' * nPad)
if length is not None and len(result) != length:
return None
result.reverse()
return bytes(result)
class InvalidChecksum(BaseDecodeError):
pass
def EncodeBase58Check(vchIn: bytes) -> str:
hash = sha256d(vchIn)
return base_encode(vchIn + hash[0:4], base=58)
def DecodeBase58Check(psz: Union[bytes, str]) -> bytes:
vchRet = base_decode(psz, base=58)
payload = vchRet[0:-4]
csum_found = vchRet[-4:]
csum_calculated = sha256d(payload)[0:4]
if csum_calculated != csum_found:
raise InvalidChecksum(f'calculated {bh2u(csum_calculated)}, found {bh2u(csum_found)}')
else:
return payload
WIF_SCRIPT_TYPES = {
'p2pkh':0,
'p2wpkh':1,
'p2wpkh-p2sh':2,
'p2sh':5,
'p2wsh':6,
'p2wsh-p2sh':7
}
WIF_SCRIPT_TYPES_INV = inv_dict(WIF_SCRIPT_TYPES)
def is_segwit_script_type(txin_type: str) -> bool:
return txin_type in ('p2wpkh', 'p2wpkh-p2sh', 'p2wsh', 'p2wsh-p2sh')
def serialize_privkey(secret: bytes, compressed: bool, txin_type: str, *,
internal_use: bool = False) -> str:
# we only export secrets inside curve range
secret = ecc.ECPrivkey.normalize_secret_bytes(secret)
if internal_use:
prefix = bytes([(WIF_SCRIPT_TYPES[txin_type] + constants.net.WIF_PREFIX) & 255])
else:
prefix = bytes([constants.net.WIF_PREFIX])
suffix = b'\01' if compressed else b''
vchIn = prefix + secret + suffix
base58_wif = EncodeBase58Check(vchIn)
if internal_use:
return base58_wif
else:
return '{}:{}'.format(txin_type, base58_wif)
def deserialize_privkey(key: str) -> Tuple[str, bytes, bool]:
if is_minikey(key):
return 'p2pkh', minikey_to_private_key(key), False
txin_type = None
if ':' in key:
txin_type, key = key.split(sep=':', maxsplit=1)
if txin_type not in WIF_SCRIPT_TYPES:
raise BitcoinException('unknown script type: {}'.format(txin_type))
try:
vch = DecodeBase58Check(key)
except Exception as e:
neutered_privkey = str(key)[:3] + '..' + str(key)[-2:]
raise BaseDecodeError(f"cannot deserialize privkey {neutered_privkey}") from e
if txin_type is None:
# keys exported in version 3.0.x encoded script type in first byte
prefix_value = vch[0] - constants.net.WIF_PREFIX
try:
txin_type = WIF_SCRIPT_TYPES_INV[prefix_value]
except KeyError as e:
raise BitcoinException('invalid prefix ({}) for WIF key (1)'.format(vch[0])) from None
else:
# all other keys must have a fixed first byte
if vch[0] != constants.net.WIF_PREFIX:
raise BitcoinException('invalid prefix ({}) for WIF key (2)'.format(vch[0]))
if len(vch) not in [33, 34]:
raise BitcoinException('invalid vch len for WIF key: {}'.format(len(vch)))
compressed = False
if len(vch) == 34:
if vch[33] == 0x01:
compressed = True
else:
raise BitcoinException(f'invalid WIF key. length suggests compressed pubkey, '
f'but last byte is {vch[33]} != 0x01')
if is_segwit_script_type(txin_type) and not compressed:
raise BitcoinException('only compressed public keys can be used in segwit scripts')
secret_bytes = vch[1:33]
# we accept secrets outside curve range; cast into range here:
secret_bytes = ecc.ECPrivkey.normalize_secret_bytes(secret_bytes)
return txin_type, secret_bytes, compressed
def is_compressed_privkey(sec: str) -> bool:
return deserialize_privkey(sec)[2]
def address_from_private_key(sec: str) -> str:
txin_type, privkey, compressed = deserialize_privkey(sec)
public_key = ecc.ECPrivkey(privkey).get_public_key_hex(compressed=compressed)
return pubkey_to_address(txin_type, public_key)
def is_segwit_address(addr: str, *, net=None) -> bool:
if net is None: net = constants.net
try:
witver, witprog = segwit_addr.decode_segwit_address(net.SEGWIT_HRP, addr)
except Exception as e:
return False
return witprog is not None
def is_b58_address(addr: str, *, net=None) -> bool:
if net is None: net = constants.net
try:
# test length, checksum, encoding:
addrtype, h = b58_address_to_hash160(addr)
except Exception as e:
return False
if addrtype not in [net.ADDRTYPE_P2PKH, net.ADDRTYPE_P2SH, net.ADDRTYPE_P2SH_ALT]:
return False
return True
def is_address(addr: str, *, net=None) -> bool:
if net is None: net = constants.net
return is_segwit_address(addr, net=net) \
or is_b58_address(addr, net=net)
def is_private_key(key: str, *, raise_on_error=False) -> bool:
try:
deserialize_privkey(key)
return True
except BaseException as e:
if raise_on_error:
raise
return False
def is_minikey(text: str) -> bool:
# Minikeys are typically 22 or 30 characters, but this routine
# permits any length of 20 or more provided the minikey is valid.
# A valid minikey must begin with an 'S', be in base58, and when
# suffixed with '?' have its SHA256 hash begin with a zero byte.
# They are widely used in Casascius physical bitcoins.
return (len(text) >= 20 and text[0] == 'S'
and all(ord(c) in __b58chars for c in text)
and sha256(text + '?')[0] == 0x00)
def minikey_to_private_key(text: str) -> bytes:
return sha256(text)
|
"""
Created on Sun Apr 19 19:08:49 2015
@author: Paco
"""
from signalprocessing import *
from fractal import *
from stats import *
from tqdm import tqdm
def dictionaryInitialization():
d = {'fft': list(),
'w0': list(),
'w1': list(),
'w2': list(),
'w3': list(),
'w4': list(),
'w5': list(),
'w6': list(),
'w7': list(),
'wc0': list(),
'wc1': list(),
'wc2': list(),
'wc3': list(),
'wc4': list(),
'wc5': list(),
'wc6': list(),
'wc7': list(),
'wr0': list(),
'wr1': list(),
'wr2': list(),
'wr3': list(),
'wr4': list(),
'wr5': list(),
'wr6': list(),
'wr7': list(),
'wb0': list(),
'wb1': list(),
'wb2': list(),
'wb3': list(),
'wb4': list(),
'wb5': list(),
'wb6': list(),
'wb7': list(),
'ww0': list(),
'ww1': list(),
'ww2': list(),
'ww3': list(),
'ww4': list(),
'ww5': list(),
'ww6': list(),
'ww7': list(),
'dfa': list(),
'pfd': list(),
'hfd': list(),
'min': list(),
'max': list(),
'kurto': list(),
'cwt': list(),
'fisher': list(),
'autoco': list(),
'hjorth0': list(),
'hjorth1': list(),
'spentro': list(),
'svdentro': list(),
'p0': list(),
'p1': list(),
'p2': list(),
'p3': list()}
return d
def waveletsAppend(x,d,dname='w',waveType='db8'):
dd = d
w = waveCoeffs(x,wave=waveType,levels=8)
dd[dname+'0'].append(w[0])
dd[dname+'1'].append(w[1])
dd[dname+'2'].append(w[2])
dd[dname+'3'].append(w[3])
dd[dname+'4'].append(w[4])
dd[dname+'5'].append(w[5])
dd[dname+'6'].append(w[6])
dd[dname+'7'].append(w[7])
return dd
'''
Super loop
'''
def dico_construction(X,dd):
#X = dataset['X_train']
d = dd
print("Building features (can take some time) ...")
for i in tqdm(range(0,len(X))):
#Signal processing
d['fft'].append(findFreq(X[i],fs=200,sel=10))
d = waveletsAppend(X[i],d,dname='w',waveType='db8')
d = waveletsAppend(X[i],d,dname='wc',waveType='coif5')
d = waveletsAppend(X[i],d,dname='wb',waveType='bior2.8')
d = waveletsAppend(X[i],d,dname='wr',waveType='rbio2.8')
d = waveletsAppend(X[i],d,dname='ww',waveType='sym8')
d['cwt'].append(peakCWT(X[i]))
d['autoco'].append(autoCorrelation(X[i]))
#Fractal
power_r = pyeegSignal(X[i])
py_frac = pyeegFractal(X[i],pr=power_r)
d['dfa'].append(py_frac[6])
d['pfd'].append(py_frac[0])
d['p0'].append(power_r[1][0])
d['p1'].append(power_r[1][1])
d['p2'].append(power_r[1][2])
d['p3'].append(power_r[1][3])
d['fisher'].append(py_frac[5])
d['hjorth0'].append(py_frac[2][0])
d['hjorth1'].append(py_frac[2][1])
d['svdentro'].append(py_frac[4])
d['hfd'].append(py_frac[1])
d['spentro'].append(py_frac[3])
#Stats
stat = statsFeatures(X[i])
d['min'].append(stat[0])
d['max'].append(stat[1])
d['kurto'].append(stat[2])
return d
'''
do = dictionaryInitialization()
do = dico_train_construction(dataset['X_train'],do)
do_test = dictionaryInitialization()
do_test = dico_train_construction(dataset['X_test'],do_test)
'''
|
name = input("What is your name? ")
course = input("What class are you in? ")
print()
weight_test = float(input("How much are tests worth in this class (i.e. 0.40 for 40%): "))
test1 = float(input("Enter test score #1: "))
test2 = float(input("Enter test score #2: "))
test3 = float(input("Enter test score #3: "))
print()
test_avg = (test1 + test2 + test3) / 3
print("Your test average is: %.2f" %(test_avg))
print()
weight_hw = float(input("How much are homework assignments worth in this class (i.e. 0.60 for 60%): "))
hw1 = float(input("Enter homework score #1: "))
hw2 = float(input("Enter homework score #2: "))
hw3 = float(input("Enter homework score #3: "))
print()
hw_avg = (hw1 + hw2 + hw3) / 3
print("Your homework average is: %.1f" %(hw_avg))
print()
total = hw_avg * weight_hw + test_avg * weight_test
print("Thanks, %s. Your final score in %s is %.2f" %(name, course, total))
|
from django.contrib.auth.models import User
from django.core import mail
class BasicBackend:
supports_object_permissions = False
supports_anonymous_user = False
supports_inactive_user = False
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
class EmailBackend(BasicBackend):
def authenticate(self, username=None, password=None):
#If username is an email address, then try to pull it up
if email.search(username): #@UndefinedVariable
try:
user = User.objects.get(email=username)
except User.DoesNotExist:
return None
else:
#We have a non-email address username we should try username
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return None
if user.check_password(password):
return user
from django import forms
from django.contrib.auth.forms import AuthenticationForm
class MyAuthenticationForm(AuthenticationForm):
username = forms.CharField(label="Email address", max_length=75)
|
import os
from types import GeneratorType
import pytest
from rss_scrapper.task_factory import create_task
from rss_scrapper.tasks.text import TextTask
TEST_DATA_FOLDER = "tests/test_data"
@pytest.fixture(params={
"lorem_ipsum.txt",
"utf-8.txt",
"cp1252.txt",
})
def test_file_path(request):
return os.path.join(TEST_DATA_FOLDER, request.param)
def test_task_text(test_file_path):
with open(test_file_path, 'rb') as test_file:
test_data = test_file.read()
task = create_task("text")
assert isinstance(task, TextTask)
task.init(text=test_data)
res = task.execute(None)
assert isinstance(res, GeneratorType)
res_data = list(res)
assert len(res_data) == 1
res_data = res_data[0]
assert test_data == res_data
|
"""Checker for anything related to the async protocol (PEP 492)."""
import sys
import astroid
from astroid import exceptions
from pylint import checkers
from pylint.checkers import utils as checker_utils
from pylint import interfaces
from pylint import utils
class AsyncChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = 'async'
msgs = {
'E1700': ('Yield inside async function',
'yield-inside-async-function',
'Used when an `yield` or `yield from` statement is '
'found inside an async function.',
{'minversion': (3, 5)}),
'E1701': ("Async context manager '%s' doesn't implement __aenter__ and __aexit__.",
'not-async-context-manager',
'Used when an async context manager is used with an object '
'that does not implement the async context management protocol.',
{'minversion': (3, 5)}),
}
def open(self):
self._ignore_mixin_members = utils.get_global_option(self, 'ignore-mixin-members')
@checker_utils.check_messages('yield-inside-async-function')
def visit_asyncfunctiondef(self, node):
for child in node.nodes_of_class(astroid.Yield):
if child.scope() is node and (sys.version_info[:2] == (3, 5) or
isinstance(child, astroid.YieldFrom)):
self.add_message('yield-inside-async-function', node=child)
@checker_utils.check_messages('not-async-context-manager')
def visit_asyncwith(self, node):
for ctx_mgr, _ in node.items:
infered = checker_utils.safe_infer(ctx_mgr)
if infered is None or infered is astroid.YES:
continue
if isinstance(infered, astroid.Instance):
try:
infered.getattr('__aenter__')
infered.getattr('__aexit__')
except exceptions.NotFoundError:
if isinstance(infered, astroid.Instance):
# If we do not know the bases of this class,
# just skip it.
if not checker_utils.has_known_bases(infered):
continue
# Just ignore mixin classes.
if self._ignore_mixin_members:
if infered.name[-5:].lower() == 'mixin':
continue
else:
continue
self.add_message('not-async-context-manager',
node=node, args=(infered.name, ))
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(AsyncChecker(linter))
|
def simple(func):
func.__annotation__ = "Hello"
return func
@simple
def foo():
pass
def complex(msg):
def annotate(func):
func.__annotation__ = msg
return func
return annotate
@complex("Hi")
def bar():
pass
foo
bar
class C(object):
@staticmethod
def smeth(arg0, arg1):
arg0
arg1
@classmethod
def cmeth(cls, arg0):
cls
arg0
|
try:
import Tkinter as tk
import ttk
except ImportError:
import tkinter as tk
import tkinter.ttk as ttk
class Base_Form(object):
"""Base class of all forms"""
def __init__(self, widget_class, master, action, hidden_input, kw):
self.action = action
if hidden_input is None:
self.hidden_input = dict()
else:
if not isinstance(hidden_input, dict):
raise ValueError("'hidden_input' should be a dict")
self.hidden_input = hidden_input
kw["class"] = "Form"
widget_class.__init__(self, master, **kw)
class Base_SubmitButton(object):
"""Base class of submit buttons"""
def submit(self):
form_widget = self
while True:
form_widget = form_widget.master
if form_widget is None:
raise Exception("No form found")
else:
if form_widget.winfo_class() == "Form":
break
if form_widget.action is None: return
form_action = form_widget.action
form_data = {}
form_data.update(form_widget.hidden_input)
# Applying list for python 2/3 compatibility. dict_values is a view in Python 3.
list_of_widgets = list(form_widget.children.values())
while True:
try:
widget = list_of_widgets.pop()
except IndexError:
break
list_of_widgets.extend(list(widget.children.values()))
if not hasattr(widget,"fieldname"): continue
field_name = widget.fieldname
Tk_class = widget.winfo_class()
if Tk_class == "Entry" or Tk_class == "TEntry":
field_value = widget.get()
elif Tk_class == "Text":
field_value = widget.get("1.0",'end-1c')
elif Tk_class == "TCombobox":
field_value = widget.get()
elif Tk_class == "Listbox":
field_value = [widget.get(idx) for idx in widget.curselection()]
elif Tk_class == "Checkbutton" or Tk_class == "TCheckbutton":
variable_name = widget.cget("variable").string
field_value = widget.tk.globalgetvar(variable_name)
elif Tk_class == "Radiobutton" or Tk_class == "TRadiobutton":
field_value = widget.tk.globalgetvar(widget.cget("variable").string)
else:
continue
form_data[field_name] = field_value
form_action(form_data)
class Form_Frame(tk.Frame, Base_Form):
def __init__(self, master, action=None, hidden_input=None, **kw):
Base_Form.__init__(self, tk.Frame, master, action, hidden_input, kw)
class Form_TFrame(tk.Frame, Base_Form):
def __init__(self, master, action=None, hidden_input=None, **kw):
Base_Form.__init__(self, ttk.Frame, master, action, hidden_input, kw)
class Form_LabelFrame(tk.LabelFrame, Base_Form):
def __init__(self, master, action=None, hidden_input=None, **kw):
Base_Form.__init__(self, tk.LabelFrame, master, action, hidden_input, kw)
class Form_TLabelFrame(ttk.LabelFrame, Base_Form):
def __init__(self, master, action=None, hidden_input=None, **kw):
Base_Form.__init__(self, ttk.LabelFrame, master, action, hidden_input, kw)
Form = Form_Frame
class Submit_Button(tk.Button, Base_SubmitButton):
def __init__(self, parent, *args, **kw):
kw["command"] = self.submit
tk.Button.__init__(self, parent, *args, **kw)
class Submit_TButton(ttk.Button, Base_SubmitButton):
def __init__(self, parent, *args, **kw):
kw["command"] = self.submit
ttk.Button.__init__(self, parent, *args, **kw)
if __name__== "__main__":
try:
from Tkinter import Frame, Entry, Radiobutton, Checkbutton, Text, Listbox, Tk, Label, StringVar
import tkMessageBox as messagebox
from ttk import Combobox
from Tkconstants import *
except ImportError:
from tkinter import Frame, Entry, Radiobutton, Checkbutton, Text, Listbox, Tk, Label, messagebox, StringVar
from tkinter.ttk import Combobox
from tkinter.constants import *
import pprint
pp = pprint.PrettyPrinter(indent=4)
root= Tk()
Label(root, text="Fill form and click submit button to execute action (open a popup) with all the form data.").pack(anchor=W, padx=(2,0))
form = Form(root, action =lambda data: messagebox.showinfo("form data",pp.pformat(data)))
form.pack(expand=True, fill="both", ipadx=10, ipady=10)
# It's possible to provide hidden data
form.hidden_input["hidden_var1"] = "value1"
form.hidden_input["hidden_var2"] = "value2"
Label(form, text="Entry:").grid(row=0,column=0, sticky=E, pady=(8,0))
# The fieldname attribute is necessary to provide data to action
entry = Entry(form)
entry.fieldname = "entry"
entry.grid(row=1,column=1, sticky =E+W)
Label(form, text="Checkbuttons:").grid(row=2,column=0, sticky=E, pady=(8,0))
column = Frame(form)
column.grid(row=3,column=1, sticky =E+W)
checkbutton0 = Checkbutton(column, text="Option 0")
checkbutton0.fieldname = "checkbutton0"
checkbutton0.pack(side=LEFT)
checkbutton1 = Checkbutton(column, text="Option 1")
checkbutton1.fieldname = "checkbutton1"
checkbutton1.pack(side=LEFT)
checkbutton2 = Checkbutton(column, text="Option 2")
checkbutton2.fieldname = "checkbutton2"
checkbutton2.pack(side=LEFT)
Label(form, text="Radiobuttons:").grid(row=4,column=0, sticky=E, pady=(8,0))
column = Frame(form)
column.grid(row=5,column=1, sticky =E+W)
# All radiobuttons require a variable
variable = StringVar()
radiobutton0 = Radiobutton(column, variable = variable, value="value0", text="Selection 0")
radiobutton0.fieldname = "radiobutton"
radiobutton0.pack(side=LEFT)
radiobutton1 = Radiobutton(column, variable = variable, value="value1", text="Selection 1")
radiobutton0.fieldname = "radiobutton"
radiobutton1.pack(side=LEFT)
Label(form, text="Text area:").grid(row=6,column=0, sticky=E, pady=(8,0))
text = Text(form, height=5)
text.fieldname = "text"
text.grid(row=7,column=1, sticky =E+W)
Label(form, text="Listbox:").grid(row=8,column=0, sticky=E, pady=(8,0))
listbox = Listbox(form)
listbox.fieldname = "listbox"
listbox.grid(row=9,column=1, sticky=W)
for item in ["one", "two", "three", "four"]:
listbox.insert("end", item)
Label(form, text="Combobox:").grid(row=10,column=0, sticky=E, pady=(8,0))
combobox = Combobox(form, values = ('X', 'Y', 'Z'), width=5)
combobox.fieldname = "combobox"
combobox.grid(row=11,column=1, sticky=W)
Submit_Button(form, text="Submit").grid(row=12,column=1,sticky =E)
root.mainloop()
|
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import sys, os, glob, re, math
results = {}
def AddRes(name, mem, N, eltime):
global results
results.setdefault(mem, {}).setdefault(name, {})[N] = eltime
def ReadRes(fn):
with open(fn, "rt") as f:
data = f.read()
match = re.search(r"Arrays: (\d*) x (\d*)", data)
N = int(match.group(2))
match = re.search(r"Memory: (\d*)", data)
mem = int(match.group(1))
for match in re.finditer(r"\s*([0-9.]+)\s*ns\s*:\s*(\S+)", data):
eltime = float(match.group(1))
name = match.group(2)
AddRes(name, mem, N, eltime)
for fn in glob.glob("res/*.log"):
ReadRes(fn)
styles = ['yx', 'rx', 'r+', 'mx', 'm+', 'k.', 'ko', 'bo', 'bs', 'yo', 'g*', 'gP', 'gd', 'm*', 'c*']
dpi = 150
for mem, graphs in results.items():
args = []
names = []
argsPE = []
argsLog = []
idx = 0
for name, graph in graphs.items():
if ('linear' in name and 'scalar' in name):
continue
X = []
Y = []
Z = []
W = []
for N, eltime in graph.items():
X.append(N)
Y.append(eltime)
Z.append(eltime / N)
W.append(eltime / math.log(N, 2.0))
args += [X, Y, styles[idx]]
argsPE += [X, Z, styles[idx]]
argsLog += [X, W, styles[idx]]
names.append(name)
idx += 1
print("%s: %s" % (name, args[-1]))
title = "(memory = %dB)" % mem
if len(sys.argv) > 1:
title = sys.argv[1] + " " + title
ax = plt.axes()
ax.set_title(title)
ax.loglog(*args, basex=2, basey=2, linestyle='-')
ax.set_xlabel("Array length (N)")
ax.set_ylabel("Time per search, ns")
ax.grid(True, which="major")
ax.grid(True, which="minor", color='0.8', linestyle=':')
ax.legend(names, loc=2, prop={'size': 6})
ax.get_yaxis().get_minor_locator().subs([1.25, 1.5, 1.75])
ax.get_yaxis().set_minor_formatter(ticker.FuncFormatter(lambda x,p: str(int(x))))
ax.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
ax.get_xaxis().set_major_formatter(ticker.ScalarFormatter())
#plt.show()
plt.savefig('res/plot_search_%d.png' % mem, bbox_inches='tight', dpi=dpi)
plt.gcf().clear()
ax = plt.axes()
ax.set_title(title)
ax.semilogx(*argsPE, basex=2, linestyle='-')
ax.set_xlabel("Array length (N)")
ax.set_ylabel("Time per element, ns")
ax.grid(True, which="major")
ax.grid(True, which="minor", color='0.8', linestyle=':')
ax.legend(names, loc=1, prop={'size': 6})
ax.set_ylim(0.0, 0.5)
ax.get_yaxis().set_minor_locator(ticker.MultipleLocator(0.01))
ax.get_yaxis().tick_right()
ax.get_xaxis().set_major_formatter(ticker.ScalarFormatter())
#plt.show()
plt.savefig('res/plot_elem_%d.png' % mem, bbox_inches='tight', dpi=dpi)
plt.gcf().clear()
ax = plt.axes()
ax.set_title(title)
ax.semilogx(*argsLog, basex=2, linestyle='-')
ax.set_xlabel("Array length (N)")
ax.set_ylabel("Time per one bin.search comparison, ns")
ax.grid(True, which="major")
ax.grid(True, which="minor", color='0.8', linestyle=':')
ax.legend(names, loc=2, prop={'size': 6})
ax.set_ylim(1.0, 7.0)
ax.get_yaxis().set_minor_locator(ticker.MultipleLocator(0.5))
ax.get_yaxis().tick_right()
ax.get_xaxis().set_major_formatter(ticker.ScalarFormatter())
#plt.show()
plt.savefig('res/plot_log_%d.png' % mem, bbox_inches='tight', dpi=dpi)
plt.gcf().clear()
|
from os import listdir, rename
from os.path import isfile, isdir, join
import sys
import json
import math
TIMEOUT = 300*1000
def load_json_file(json_path):
f = open(json_path)
content = f.read()
data = json.loads(content)
f.close()
# "purify" the data to a form we care about: (time,
time = int(data["executionTime"])
if time > TIMEOUT:
kind = "TIMEOUT"
elif "remarks" in data:
kind = "INVALID"
else:
kind = "VALID"
# TODO: Could also consider whether the test "PASSED", "FAILED"
return (time, kind)
timeof = lambda (time,kind): time
kindof = lambda (time,kind): kind
def filter_for_kind(kind):
return lambda (xs) : filter(lambda (t,k): k == kind, xs)
filter_for_timeout = filter_for_kind("TIMEOUT")
filter_for_invalid = filter_for_kind("INVALID")
filter_for_valid = filter_for_kind("VALID")
def filter_for_time(time, xs):
return filter(lambda (t,k): t <= time, xs)
def load_files_in_dir(dir_path):
# Strictly assumes everything in dir_path is a json file.
return [load_json_file(join(dir_path, res)) for res in listdir(dir_path)]
def chart_results(ls_of_res):
sorted_data = sorted(ls_of_res)
sorted_valid_data = filter_for_valid(sorted_data)
sorted_invalid_data = filter_for_invalid(sorted_data)
def percentile_idx(p):
return int(math.ceil(float(len(sorted_data) * p) / 100)) - 1
def print_stats(xs, use_timeout = -1):
# Longest Running
longest = timeof(xs[-1])
print "Longest Time: %8d ms" % longest
# Sum up till that point
total = sum([timeof(d) for d in xs])
print "Total Time (excl T/OUT): %8d ms" % total
tout = max(use_timeout, longest) # use use_timeout, if given, otherwise use `longest`
touts = total + (tout * (len(sorted_data) - len(xs)))
print "Total Time (incl T/OUT): %8d ms" % touts
# What % of all tests are "covered" by `xs`?
print "Percent of All Tests: %3d%%" % (100 * float(len(xs)) / len(sorted_data))
# How many aren't?
print "Complement size (T/OUT): %d" % (len(sorted_data) - len(xs))
print "Percent of VALID tests: %3d%%" % (100 * float(len(filter_for_valid(xs))) / len(sorted_valid_data))
print "Percent of INVALID tests: %3d%%" % (100 * float(len(filter_for_invalid(xs))) / len(sorted_invalid_data))
print "Charting Results"
print "(%d results)" % len(sorted_data)
print
# percentiles 10..90
# n.b. ostensible < 80-percentile not great, so
for percent in range(80, 100, 5):
print "For %d Percentile:" % percent
pidx = percentile_idx(percent)
xs = sorted_data[:pidx+1]
print_stats(xs)
print
# Want to investigate various proposed T/OUT times.
# The largest not-TIMEOUT-time is ~138s, which would surely cost too much.
# for time in range(1800, 3200, 200):
for time in range(1500, 6000, 500):
print "For tentative timeout %d ms:" % time
xs = filter_for_time(time, sorted_data)
print_stats(xs, time)
print
# For reference, if we do *all* tests:
print "For All Tests:"
print_stats(sorted_data)
if __name__ == "__main__":
dirname = sys.argv[1]
res = load_files_in_dir(dirname)
chart_results(res)
print
# Find longest time, before timeout.
# i.e. if time longer than this, 100% chance it will time out.
# (if this is >> 2s, then will be expensive to run all tests..)
print "Count > 300s: %d" % len(filter_for_timeout(res))
|
import json
import os
from functools import partial
import cv2
import numpy as np
from tierpsy.analysis.compress.compressVideo import getROIMask, selectVideoReader, reduceBuffer
from PyQt5.QtCore import Qt, QTimer
from PyQt5.QtGui import QImage
from PyQt5.QtWidgets import QApplication, QMainWindow, \
QFileDialog, QMessageBox
from tierpsy.gui.GetAllParameters import GetAllParameters, ParamWidgetMapper, save_params_json
from tierpsy.gui.GetMaskParams_ui import Ui_GetMaskParams
from tierpsy.gui.HDF5VideoPlayer import LineEditDragDrop, ViewsWithZoom, setChildrenFocusPolicy
from tierpsy.analysis.compress.BackgroundSubtractor import BackgroundSubtractorVideo, BackgroundSubtractorMasked
from tierpsy.helper.params.tracker_param import TrackerParams, default_param
class twoViewsWithZoom():
'''
Object useful to sincronize the pan/zoom of two images i.e. original and masked
'''
def __init__(self, view_full, view_mask):
self.view_full = ViewsWithZoom(view_full)
self.view_mask = ViewsWithZoom(view_mask)
self.view_full._view.wheelEvent = self.zoomWheelEvent
self.view_mask._view.wheelEvent = self.zoomWheelEvent
# link scrollbars in the graphics view so if one changes the other
# changes too
self.hscroll_full = self.view_full._view.horizontalScrollBar()
self.vscroll_full = self.view_full._view.verticalScrollBar()
self.hscroll_mask = self.view_mask._view.horizontalScrollBar()
self.vscroll_mask = self.view_mask._view.verticalScrollBar()
dd = partial(self.chgScroll, self.hscroll_mask, self.hscroll_full)
self.hscroll_mask.valueChanged.connect(dd)
dd = partial(self.chgScroll, self.vscroll_mask, self.vscroll_full)
self.vscroll_mask.valueChanged.connect(dd)
dd = partial(self.chgScroll, self.hscroll_full, self.hscroll_mask)
self.hscroll_full.valueChanged.connect(dd)
dd = partial(self.chgScroll, self.vscroll_full, self.vscroll_mask)
self.vscroll_full.valueChanged.connect(dd)
def chgScroll(self, scroll_changed, scroll2change):
scroll2change.setValue(scroll_changed.value())
def zoomWheelEvent(self, event):
self.view_full.zoomWheelEvent(event)
self.view_mask.zoomWheelEvent(event)
def zoom(self, zoom_direction):
self.view_full.zoom(zoom_direction)
self.view_mask.zoom(zoom_direction)
def zoomFitInView(self):
self.view_full.zoomFitInView()
self.view_mask.zoomFitInView()
def setPixmap(self, qimage_full, qimage_mask):
# for both maps to have the same size
v_height = min(
self.view_full._view.height(),
self.view_mask._view.height())
v_width = min(
self.view_full._view.width(),
self.view_mask._view.width())
self.view_full._view.resize(v_width, v_height)
self.view_mask._view.resize(v_width, v_height)
self.view_full.setPixmap(qimage_full)
self.view_mask.setPixmap(qimage_mask)
def cleanCanvas(self):
self.view_full.cleanCanvas()
self.view_mask.cleanCanvas()
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self._mousePressed = True
self._dragPos = event.pos()
event.accept()
self.view_full._view.setCursor(Qt.ClosedHandCursor)
self.view_mask._view.setCursor(Qt.ClosedHandCursor)
def mouseMoveEvent(self, event):
if self._mousePressed:
newPos = event.pos()
diff = newPos - self._dragPos
self._dragPos = newPos
dx = self.view_full._view.horizontalScrollBar().value() - diff.x()
dy = self.view_full._view.verticalScrollBar().value() - diff.y()
self.view_full._view.horizontalScrollBar().setValue(dx)
self.view_full._view.verticalScrollBar().setValue(dy)
event.accept()
def mouseReleaseEvent(self, event):
if event.button() == Qt.LeftButton:
self.view_full._view.setCursor(Qt.OpenHandCursor)
self.view_mask._view.setCursor(Qt.OpenHandCursor)
self._mousePressed = False
class ParamsGUI(QMainWindow):
def __init__(self, default_videos_dir='', scripts_dir=''):
self.json_file = ''
self.json_param = default_param.copy()
super().__init__()
# Set up the user interface from Designer.
self.ui = Ui_GetMaskParams()
self.ui.setupUi(self)
self._link_slider_spinbox()
self.mapper = ParamWidgetMapper(self.ui)
self.ui.pushButton_saveParam.clicked.connect(self.saveParamFile)
self.ui.pushButton_paramFile.clicked.connect(self.getParamFile)
LineEditDragDrop(self.ui.lineEdit_paramFile, self.updateParamFile, os.path.isfile)
self.ui.pushButton_moreParams.clicked.connect(self.getMoreParams)
def _link_slider_spinbox(self):
'''
Link a given slider to a spinbox so when the value of one changes the other changes too.
'''
def _single_link(slider, spinbox, connect_func):
slider.sliderReleased.connect(connect_func)
spinbox.editingFinished.connect(connect_func)
slider.valueChanged.connect(spinbox.setValue)
spinbox.valueChanged.connect(slider.setValue)
for field in ['mask_min_area', 'mask_max_area', 'thresh_block_size', 'thresh_C', 'dilation_size']:
slider = getattr(self.ui, 'horizontalSlider_' + field)
spinbox = getattr(self.ui, 'p_' + field)
_single_link(slider, spinbox, self.updateMask)
def saveParamFile(self):
self.json_file = self.ui.lineEdit_paramFile.text()
if not self.json_file:
QMessageBox.critical(
self,
'No parameter file name given.',
'No parameter file name given. Please select name using the "Parameters File" button',
QMessageBox.Ok)
return
if os.path.exists(self.json_file):
reply = QMessageBox.question(
self,
'Message',
'''The parameters file already exists. Do you want to overwrite it?
If No the parameters in the existing file will be used instead of the values displayed.''',
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if reply == QMessageBox.No:
return
dname = os.path.dirname(self.json_file)
if not os.path.isdir(dname):
QMessageBox.critical(
self,
'Invalid directory.',
'The directory in the file path name does not exists. Please be sure the path name is correct.',
QMessageBox.Ok)
return
# save data into the json file
#update before saving
self._update_json_params()
try:
save_params_json(self.json_file, self.json_param)
except OSError:
QMessageBox.critical(
self,
'Invalid file name.',
'I was not able to save the file. Please be sure the path name is correct.',
QMessageBox.Ok)
return
def _update_json_params(self):
'''
update json_params field from the gui vaues.
'''
for param_name in self.mapper:
assert param_name in self.json_param
self.json_param[param_name] = self.mapper[param_name]
def _update_gui_params(self, json_param):
checkit = (json_param['mask_bgnd_buff_size']>0) and (json_param['mask_bgnd_frame_gap']>0)
self.ui.checkBox_is_bgnd_subtraction.setChecked(checkit)
# set correct widgets to the values given in the json file
for param_name in json_param:
if param_name in self.mapper:
self.mapper[param_name] = json_param[param_name]
def getMoreParams(self):
self._update_json_params()
allparamGUI = GetAllParameters(self.json_param)
#the gui must have updated the dictioary in json_param
allparamGUI.exec_()
self._update_gui_params(self.json_param)
# file dialog to the the hdf5 file
def getParamFile(self):
json_dir = os.path.dirname(self.ui.lineEdit_paramFile.text())
json_file, _ = QFileDialog.getOpenFileName(self, "Find parameters file", json_dir, "JSON files (*.json);; All (*)")
if json_file:
self.updateParamFile(json_file)
def updateParamFile(self, json_file):
# set the widgets with the default parameters, in case the parameters are not given
# by the json file.
if os.path.exists(json_file):
try:
params = TrackerParams(json_file)
json_param = params.p_dict
except (OSError, UnicodeDecodeError, json.decoder.JSONDecodeError):
QMessageBox.critical(
self,
'Cannot read parameters file.',
"Cannot read parameters file. Try another file",
QMessageBox.Ok)
return
else:
json_param = default_param.copy()
self._update_gui_params(json_param)
self.json_file = json_file
self.json_param = json_param
self.ui.lineEdit_paramFile.setText(self.json_file)
class GetMaskParams_GUI(ParamsGUI):
def __init__(self, default_videos_dir='', scripts_dir=''):
super().__init__()
self.video_file = ''
self.Ibuff = np.zeros(0)
self.Ifull = np.zeros(0)
self.IsubtrB = np.zeros(0)
self.bgnd_subtractor = None
self.vid = None
self.frame_number = 0
#remove tabs for the moment. I need to fix this it later
self.ui.tabWidget.setCurrentIndex(self.ui.tabWidget.indexOf(self.ui.tab_mask))
self.tab_keys = dict(mask=self.ui.tabWidget.indexOf(self.ui.tab_mask),
bgnd=self.ui.tabWidget.indexOf(self.ui.tab_bgnd))
self.ui.p_keep_border_data.stateChanged.connect(self.updateMask)
self.ui.p_is_light_background.stateChanged.connect(self.updateBgnd)
self.ui.pushButton_update_bgnd.clicked.connect(self.updateBgnd)
self.ui.pushButton_video.clicked.connect(self.getVideoFile)
self.ui.pushButton_next.clicked.connect(self.getNextChunk)
#self.ui.checkBox_subtractBackground.clicked.connect(self.updateMask)
self.ui.checkBox_is_bgnd_subtraction.stateChanged.connect(self.updateCheckedBgndSubtr)
self.updateCheckedBgndSubtr()
self.ui.tabWidget.currentChanged.connect(self.updateROIs)
self.videos_dir = default_videos_dir
if not os.path.exists(self.videos_dir):
self.videos_dir = ''
# setup image view as a zoom
self.twoViews = twoViewsWithZoom(
self.ui.graphicsView_full,
self.ui.graphicsView_mask)
LineEditDragDrop(self.ui.lineEdit_video, self.updateVideoFile, os.path.isfile)
# make sure the childrenfocus policy is none in order to be able to use
# the arrow keys
setChildrenFocusPolicy(self, Qt.ClickFocus)
self.is_play = False
self.ui.pushButton_play.clicked.connect(self.playVideo)
self.timer = QTimer()
self.timer.timeout.connect(self.getNextChunk)
def keyPressEvent(self, event):
if self.vid is not None:
# break no file open, nothing to do here
return
key = event.key()
if key == Qt.Key_Minus:
self.twoViews.zoom(-1)
elif key == Qt.Key_Plus:
self.twoViews.zoom(1)
else:
QMainWindow.keyPressEvent(self, event)
def playVideo(self):
if self.vid is None:
return
if not self.is_play:
self.startPlay()
else:
self.stopPlay()
def startPlay(self):
fps = self.mapper['expected_fps']
compression_buff = self.mapper['compression_buff']
if fps> 0 and compression_buff > 0:
fps_n = fps/compression_buff
freq = round(1000 / fps_n)
else:
freq = 30
self.timer.start()
self.is_play = True
self.ui.pushButton_play.setText('Stop')
def stopPlay(self):
self.timer.stop()
self.is_play = False
self.ui.pushButton_play.setText('Play')
def closeEvent(self, event):
if self.vid is not None:
self.vid.release()
super().closeEvent(event)
# update image if the GUI is resized event
def resizeEvent(self, event):
self.updateROIs()
self.twoViews.zoomFitInView()
# file dialog to the the hdf5 file
def getVideoFile(self):
video_file, _ = QFileDialog.getOpenFileName(
self, "Find video file", self.videos_dir, "All files (*)")
self.updateVideoFile(video_file)
def _start_video(self, video_file):
vid = selectVideoReader(video_file)
if vid.width == 0 or vid.height == 0:
raise ValueError
else:
if self.vid is not None:
self.vid.release()
self.vid, self.im_width, self.im_height = vid, vid.width, vid.height
self.bgnd_subtractor = None #make sure this get restarted when a new file is initialized
self.frame_number = 0
def updateVideoFile(self, video_file):
if video_file and os.path.exists(video_file):
try:
self._start_video(video_file)
except (OSError, ValueError, IOError):
QMessageBox.critical(
self,
'Cannot read video file.',
"Cannot read video file. Try another file",
QMessageBox.Ok)
return
self.video_file = video_file
self.videos_dir = os.path.split(self.video_file)[0]
self.ui.lineEdit_video.setText(self.video_file)
# read json file
json_file = self.video_file.rpartition('.')[0] + '.json'
self.updateParamFile(json_file)
# get next chuck
self.getNextChunk()
# fit the image to the canvas size
self.twoViews.zoomFitInView()
#set the valid limits for the block size
max_block = min(self.Ifull.shape)
min_block = 3
self.ui.p_thresh_block_size.setRange(min_block, max_block)
def updateReducedBuff(self):
if self.Ibuff.size > 0:
#update the image used to create the mask
is_light_background = self.mapper['is_light_background']
if self.ui.checkBox_is_bgnd_subtraction.isChecked() and self.bgnd_subtractor is not None:
Ibuff_b = self.bgnd_subtractor.apply(self.Ibuff, self.frame_number)
else:
Ibuff_b = self.Ibuff
self.Imin = reduceBuffer(Ibuff_b, is_light_background)
self.updateMask()
def getNextChunk(self):
if self.vid is not None:
# read the buffsize before getting the next chunk
self.buffer_size = self.mapper['compression_buff']
if self.buffer_size <= 0:
self.buffer_size = 1
self.Ibuff = np.zeros(
(self.buffer_size,
self.im_height,
self.im_width),
dtype=np.uint8)
tot = 0
for ii in range(self.buffer_size):
# get video frame, stop program when no frame is retrive (end
# of file)
ret, image = self.vid.read()
if ret == 0:
if ii == 0:
#restart video
self._start_video(self.video_file)
ret, image = self.vid.read() #try to read again, if you cannot again just quit
if ret == 0:
break
else:
break
if image.ndim == 3:
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
self.Ibuff[ii] = image
tot += 1
if tot == 0:
return
elif tot < self.buffer_size:
self.Ibuff = self.Ibuff[:tot]
self.frame_number += tot
self.ui.spinBox_frame_number.setValue(self.frame_number)
self.Ifull = self.Ibuff[0].copy()
self.updateBgnd()
def _numpy2qimage(self, im_ori):
return QImage(im_ori.data, im_ori.shape[1], im_ori.shape[0],
im_ori.data.strides[0], QImage.Format_Indexed8)
def updateROIs(self):
#useful for resizing events
if self.Ifull.size == 0:
self.twoViews.cleanCanvas()
else:
cur = self.ui.tabWidget.currentIndex()
if cur == self.tab_keys['mask']:
I1, I2 = self.Ifull, self.Imask
elif cur == self.tab_keys['bgnd']:
I1 = self.Ifull
I2 = np.zeros_like(self.IsubtrB)
cv2.normalize(self.IsubtrB,I2,0,255,cv2.NORM_MINMAX)
else:
I1, I2 = self.Ifull, self.Ifull
qimage_roi1 = self._numpy2qimage(I1)
qimage_roi2 = self._numpy2qimage(I2)
self.twoViews.setPixmap(qimage_roi1, qimage_roi2)
def updateMask(self):
if self.Ifull.size == 0:
return
# read parameters used to calculate the mask
roi_mask_params_str = ['mask_max_area', 'mask_min_area', 'thresh_block_size', 'thresh_C',
'dilation_size', 'keep_border_data', 'is_light_background']
mask_param = {x:self.mapper[x] for x in roi_mask_params_str}
mask_param['max_area'] = mask_param.pop('mask_max_area')
mask_param['min_area'] = mask_param.pop('mask_min_area')
mask = getROIMask(self.Imin.copy(), **mask_param)
self.Imask = mask * self.Ifull
self.updateROIs()
def _updateBgnd(self):
if self.vid is None:
return
if self.bgnd_subtractor is None:
#try to calculate the background if the parameters are corrected
keys = ['is_light_background', 'mask_bgnd_buff_size', 'mask_bgnd_frame_gap']
kwargs = {x.replace('mask_bgnd_', ''):self.mapper[x] for x in keys}
if kwargs['buff_size'] >0 and kwargs['frame_gap']>0:
if not self.video_file.endswith('.hdf5'):
self.bgnd_subtractor = BackgroundSubtractorVideo(self.video_file, **kwargs)
else:
self.bgnd_subtractor = BackgroundSubtractorMasked(self.video_file, **kwargs)
#if the background substraction is checked and it was calculated correctly update the background
if self.ui.checkBox_is_bgnd_subtraction.isChecked() and \
self.Ifull.size > 0 and self.bgnd_subtractor is not None:
self.IsubtrB = self.bgnd_subtractor.subtract_bgnd(self.Ifull)
#debug!!!
#self.IsubtrB = self.bgnd_subtractor.bgnd.astype(np.uint8)
else:
self.IsubtrB = self.Ifull
self.updateReducedBuff()
def updateBgnd(self):
self.bgnd_subtractor = None
self._updateBgnd()
def updateCheckedBgndSubtr(self):
valid = self.ui.checkBox_is_bgnd_subtraction.isChecked()
self.ui.p_mask_bgnd_frame_gap.setEnabled(valid)
self.ui.p_mask_bgnd_buff_size.setEnabled(valid)
self._updateBgnd()
def updateParamFile(self, json_file):
super().updateParamFile(json_file)
self.updateMask()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
ui = GetMaskParams_GUI()
ui.show()
sys.exit(app.exec_())
|
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
class TwitterAccount(models.Model):
"""
Twitter account model that stores the oauth token and secret
for a given user.
"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
access_token = models.CharField(max_length=100)
access_secret = models.CharField(max_length=100)
class Meta:
verbose_name = _("twitter account")
verbose_name_plural = _("twitter accounts")
def __str__(self):
return "Twitter Account of user: {}".format(self.user)
|
from django.db.models import Q
from rest_framework import filters
class BaseFilter(filters.BaseFilterBackend):
"""Base filter class for django rest framework views
The fields can be mapped to a different filter attribute
by using the mapped fields dictionary, this way we can filter
related models.
"""
class Meta:
fields = []
mapped_fields = {}
def filter_queryset(self, request, qs, view):
filter_dict = {}
for field, value in request.query_params.iterlists():
if field not in self.Meta.fields:
continue
# get mapped field if it exists
# else just use the field name
if getattr(self.Meta, 'mapped_fields', None):
field = self.Meta.mapped_fields.get(field, field)
if isinstance(value, list):
for val in value:
if filter_dict.get(field) is None:
filter_dict[field] = Q(**{field: val})
else:
filter_dict[field] |= Q(**{field: val})
else:
filter_dict[field] = Q(**{field: value})
for field, query in filter_dict.items():
qs = qs.filter(query)
return qs
def _or_filter(self, request, qs):
or_queries = []
for field, value in request.query_params.iterlists():
if field in self.OR_FIELDS:
if isinstance(value, list):
for val in value:
or_queries.append(
Q(**{field: val})
)
else:
or_queries.append(
Q(**{field: val})
)
if or_queries:
or_query = or_queries[0]
for query in or_queries[1:]:
or_query |= query
qs = qs.filter(or_query)
return qs
|
import requests
import pytest
def test_code_changes_by_group(metrics):
response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/code-changes')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
assert data[0]['commit_count'] > 0
def test_code_changes_by_repo(metrics):
response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/repos/25430/code-changes')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
assert data[0]['commit_count'] > 0
def test_code_changes_lines_by_group(metrics):
response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/code-changes-lines')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
assert data[0]['added'] >= 0
assert data[0]['removed'] >= 0
def test_code_changes_lines_by_repo(metrics):
response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/repos/25430/code-changes-lines')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
assert data[0]['added'] >= 0
assert data[0]['removed'] >= 0
def test_sub_projects_by_group(metrics):
response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/sub-projects')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
assert data[0]["sub_project_count"] > 0
def test_sub_projects_by_repo(metrics):
response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/repos/25430/sub-projects')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
assert data[0]["sub_project_count"] > 0
def test_cii_best_practices_badge_by_repo(metrics):
response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/repos/25430/cii-best-practices-badge')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
def test_languages_by_group(metrics):
# TODO need data
pass
def test_languages_by_repo(metrics):
# TODO need data
pass
def test_annual_lines_of_code_count_ranked_by_new_repo_in_repo_group_by_repo(metrics):
response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/repos/25430/annual-lines-of-code-count-ranked-by-new-repo-in-repo-group')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
assert data[0]["net"] > 0
def test_annual_lines_of_code_count_ranked_by_new_repo_in_repo_group_by_group(metrics):
response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/annual-lines-of-code-count-ranked-by-new-repo-in-repo-group')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
assert data[0]["net"] > 0
def test_annual_lines_of_code_count_ranked_by_repo_in_repo_group_by_repo(metrics):
response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/repos/25430/annual-lines-of-code-count-ranked-by-repo-in-repo-group')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
assert data[0]["net"] > 0
def test_annual_lines_of_code_count_ranked_by_repo_in_repo_group_by_group(metrics):
response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/annual-lines-of-code-count-ranked-by-repo-in-repo-group')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
assert data[0]["net"] > 0
|
from django.shortcuts import get_object_or_404, render, RequestContext, render_to_response
from repo.forms import UserForm, UserEditForm
from reg.forms import MyRegistrationForm
from reg.models import UserProfile
from django.contrib.auth.models import User
from django.contrib.auth.views import password_reset
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.forms.models import inlineformset_factory
from django.core.exceptions import PermissionDenied
from django.views import generic
from hashids import Hashids
from django.contrib import messages
from django import forms as django_forms
hashids = Hashids(salt='2016-08-18 16:27:22 IiTNmll0 ATn1ViSu', alphabet='123456789abdefghijmdncklopqrstuvwxy0', min_length=7)
def home(request):
return render(request, 'reg/home.html')
def register(request):
if request.user.is_authenticated():
return HttpResponseRedirect(reverse('reg:home'))
if request.method == 'POST':
form = MyRegistrationForm(request.POST)
if form.is_valid():
user = form.save()
user = authenticate(username=form.cleaned_data.get('username'), password=form.cleaned_data.get('password1'))
login(request, user)
messages.success(request, 'Account created successful')
return HttpResponseRedirect(reverse('reg:index'))
messages.error(request, 'Registration failed. Check the listed errors')
else:
form = MyRegistrationForm()
return render(request, 'reg/register.html', {
'form': form,
})
def login_user(request):
if request.user.is_authenticated():
return HttpResponseRedirect(reverse('reg:home'))
if request.method == 'POST':
user = authenticate(username=request.POST.__getitem__('username'),
password=request.POST.__getitem__('password'))
if user is not None:
login(request, user)
messages.success(request, 'Login successful')
return HttpResponseRedirect(reverse('reg:index'))
messages.error(request, 'Login failed')
return render(request, 'reg/login.html', {'form': UserForm()})
@login_required(login_url='/login')
def logout_view(request):
logout(request)
messages.success(request, 'You have been logged out!')
return render(request, 'reg/home.html')
@login_required(login_url='/login')
def edit_user(request, pk):
try:
pk = hashids.decode(pk)[0]
except IndexError:
raise Http404
user = User.objects.get(pk=pk)
# Prepopulate UserProfileForm with retrieved user values from above.
user_form = UserEditForm(instance=user)
ProfileInlineFormset = inlineformset_factory(User, UserProfile,
fields=('website', 'bio', 'phone', 'city', 'country', 'organisation'),
widgets={
'website': django_forms.TextInput(attrs={'class': 'mdl-textfield__input'}),
'bio': django_forms.TextInput(attrs={'class': 'mdl-textfield__input'}),
'phone': django_forms.TextInput(attrs={'class': 'mdl-textfield__input'}),
'city': django_forms.TextInput(attrs={'class': 'mdl-textfield__input'}),
'country': django_forms.TextInput(attrs={'class': 'mdl-textfield__input'}),
'organisation': django_forms.TextInput(attrs={'class': 'mdl-textfield__input'})
})
formset = ProfileInlineFormset(instance=user)
if request.user.is_authenticated() and request.user.id == user.id:
if request.method == 'POST':
user_form = UserEditForm(request.POST, request.FILES, instance=user)
formset = ProfileInlineFormset(request.POST, request.FILES, instance=user)
if user_form.is_valid():
created_user = user_form.save(commit=False)
formset = ProfileInlineFormset(request.POST, request.FILES, instance=created_user)
if formset.is_valid():
created_user.save()
formset.save()
return HttpResponseRedirect(reverse('reg:profile', args=(hashids.encode(pk),)))
return render(request, "reg/account_update.html", {
"noodle": pk,
"noodle_form": user_form,
"formset": formset,
})
else:
raise PermissionDenied
def profile(request, pk):
try:
pk = hashids.decode(pk)[0]
except:
raise Http404
#user_data = get_object_or_404(User, pk=pk)
try:
user_profile = UserProfile.objects.select_related('user').get(user__pk=pk, user__is_active='TRUE')
user_data = User.objects.get(pk=pk)
except UserProfile.DoesNotExist or User.DoesNotExist:
raise Http404("Profile unavailable")
return render(request, 'reg/viewprofile.html', {
'user_profile': user_profile,
'user_data': user_data,
})
|
from distutils.core import setup
setup(
name = 'wikiquote',
py_modules = ['wikiquote'],
version = '0.1.1',
description = 'Retrieve quotes from any Wikiquote page.',
author = 'Federico Tedin',
author_email = 'federicotedin@gmail.com',
install_requires = ['lxml'],
url = 'https://github.com/federicotdn/python-wikiquotes',
download_url = 'https://github.com/federicotdn/python-wikiquotes/tarball/0.1.1',
keywords = ['quotes', 'wikiquote', 'python', 'api', 'qotd'],
license = 'MIT',
classifiers = [],
)
|
from django.db import transaction
from django.utils import timezone
from autolims.models import (Instruction, Aliquot,Container,
AliquotEffect, Resource)
from transcriptic_tools.inventory import get_transcriptic_inventory
from transcriptic_tools.enums import Reagent
from transcriptic_tools.utils import _CONTAINER_TYPES
def get_or_create_aliquot_from_path(run_id, aliquot_path):
"""
aliquot address is of the format "container label / index"
"""
container_label, well_idx_str = aliquot_path.split('/')
well_idx = int(well_idx_str)
container = Container.get_container_from_run_and_container_label(run_id,
container_label)
assert isinstance(container,Container)
#check if the aliquot exists
aliquot_query = container.aliquots.filter(well_idx = well_idx )
if aliquot_query.exists():
return aliquot_query.first()
return Aliquot.objects.create(well_idx = well_idx,
container = container,
volume_ul='0')
def execute_oligosynthesize(instruction):
operation = instruction.operation
for oligo_info in operation['oligos']:
aliquot = get_or_create_aliquot_from_path(instruction.run_id, oligo_info['destination'])
aliquot.properties.update({'sequence':oligo_info['sequence'],
'scale':oligo_info['scale'],
'purification':oligo_info['purification']
})
aliquot.save()
AliquotEffect.objects.create(aliquot = aliquot,
instruction = instruction,
type = 'instruction'
)
#@TODO: make an actual api call to order the oligos
mark_instruction_complete(instruction)
def execute_acoustic_transfer(instruction):
raise NotImplementedError
def execute_gel_purify(instruction):
raise NotImplementedError
def execute_gel_separate(instruction):
raise NotImplementedError
def execute_magnetic_transfer(instruction):
raise NotImplementedError
def simplify_pipette_operations(pipette_group):
#each group can only have one key
#check this assumption
if len(pipette_group.keys())!=1:
raise NotImplementedError, "We aren't ready for groups to have multiple keys."
pipette_operation_type = pipette_group.keys()[0]
pipette_operation_info = pipette_group[pipette_operation_type]
#dicts of with keys to_aq_path, from_aq_path, volume_str
volume_transfers = []
if pipette_operation_type == 'transfer':
for transfer_op in pipette_operation_info:
volume_transfers.append({
'to_aq_path':transfer_op['to'],
'from_aq_path':transfer_op['from'],
'volume_str':transfer_op['volume']}
)
elif pipette_operation_type == 'distribute':
distribute_op = pipette_operation_info
for destination_info in distribute_op['to']:
volume_transfers.append({
'to_aq_path':destination_info['well'],
'from_aq_path':distribute_op['from'],
'volume_str':destination_info['volume']}
)
elif pipette_operation_type == 'consolidate':
consolidate_op = pipette_operation_info
for source_info in consolidate_op['from']:
volume_transfers.append({
'to_aq_path':consolidate_op['to'],
'from_aq_path':source_info['well'],
'volume_str':source_info['volume']}
)
elif pipette_operation_type == 'mix':
pass
else:
raise NotImplementedError, "Uknown pipette operation, %s"%pipette_operation_type
return volume_transfers
def execute_pipette(instruction):
"""
transfer, distribute, consolidate, mix are all pipette operations
"""
operation = instruction.operation
for pipette_group in operation['groups']:
volume_transfers = simplify_pipette_operations(pipette_group)
for transfer_info in volume_transfers:
from_aq = get_or_create_aliquot_from_path(instruction.run, transfer_info['from_aq_path'])
to_aq = get_or_create_aliquot_from_path(instruction.run, transfer_info['to_aq_path'])
added_volume = to_aq.add_volume(transfer_info['volume_str'])
from_aq.subtract_volume(transfer_info['volume_str'])
to_aq.save()
from_aq.save()
AliquotEffect.objects.create(aliquot = to_aq,
instruction = instruction,
data = {"source":{
'container_id': from_aq.container_id,
'well_idx': from_aq.well_idx
},
'volume_ul': str(added_volume.to('microliter').magnitude)
},
type = 'liquid_transfer_in'
)
AliquotEffect.objects.create(aliquot = from_aq,
instruction = instruction,
data = {"destination":{
'container_id': to_aq.container_id,
'well_idx': to_aq.well_idx
},
'volume_ul': str(added_volume.to('microliter').magnitude)
},
type = 'liquid_transfer_out'
)
mark_instruction_complete(instruction)
def execute_cover(instruction):
container = Container.get_container_from_run_and_container_label(instruction.run_id,
instruction.operation['object'])
container.cover = instruction.operation['lid']
container.save()
def execute_uncover(instruction):
container = Container.get_container_from_run_and_container_label(instruction.run_id,
instruction.operation['object'])
container.cover = None
container.save()
def execute_provision(instruction):
operation = instruction.operation
resource_id = operation['resource_id']
#strings are transcriptic id's
if isinstance(resource_id,basestring):
resource = Resource.objects.get(transcriptic_id=resource_id)
else:
resource = Resource.objects.get(id=resource_id)
for destination_info in operation['to']:
aliquot = get_or_create_aliquot_from_path(instruction.run, destination_info['well'])
aliquot.properties.update({'resource_id':resource.id,
'resource_name': resource.name
})
aliquot.add_volume(destination_info['volume'])
aliquot.save()
AliquotEffect.objects.create(aliquot = aliquot,
instruction = instruction,
type = 'instructions'
)
mark_instruction_complete(instruction)
def execute_dispense(instruction):
operation = instruction.operation
resource_id = operation['resource_id']
destination_container = Container.get_container_from_run_and_container_label(instruction.run_id,
operation['object'])
#strings are transcriptic id's
if isinstance(resource_id,basestring):
resource = Resource.objects.get(transcriptic_id=resource_id)
else:
resource = Resource.objects.get(id=resource_id)
for column_info in operation['columns']:
column_id = column_info['column']
#convert columns into well indexes
well_indexes = destination_container.get_column_well_indexes(column_id)
for well_idx in well_indexes:
aliquot, created = Aliquot.objects.get_or_create(well_idx = well_idx,
container = destination_container)
aliquot.properties.update({'resource_id':resource.id,
'resource_name': resource.name
})
aliquot.add_volume(column_info['volume'])
aliquot.save()
AliquotEffect.objects.create(aliquot = aliquot,
instruction = instruction,
type = 'instructions'
)
mark_instruction_complete(instruction)
def execute_stamp(instruction):
raise NotImplementedError
def mark_instruction_complete(instruction):
instruction.completed_at = timezone.now()
instruction.save()
def get_instruction_executer(operation):
execute_func_name = 'execute_%s'%operation
if execute_func_name in globals():
return globals()[execute_func_name]
return globals()['mark_instruction_complete']
@transaction.atomic
def execute_run(run):
"""
Executes all the autoprotocol associated with a run.
Updates the status of the run.
Updates volumes of all inventory used by the run
Create new samples as needed.
Ensures that test runs can't access real inventory (and visa versa)
update properties and names of aliquots (see outs of autoprotocol)
Mark Samples as discarded as needed
"""
#ensure that the run is accepted
assert run.status in ['accepted','in_progress'],\
'Run must be in accepted or in_progress state to execute. Currently %s'%run.status
#sequence no asc
ordered_instructions = run.instructions.all().order_by('sequence_no')
for instruction in ordered_instructions:
assert isinstance(instruction,Instruction)
exec_function = get_instruction_executer(instruction.operation['op'])
exec_function(instruction)
#update properties and names of aliquots (see outs of autoprotocol)
for container_label, out_info in run.protocol.get('outs',{}).items():
for well_idx_str, well_info in out_info.items():
aq = get_or_create_aliquot_from_path(run.id, '%s/%s'%(container_label,
well_idx_str))
updated = False
if 'name' in well_info:
updated=True
aq.name = well_info['name']
if 'properties' in well_info:
updated=True
aq.properties.update(well_info['properties'])
if updated:
aq.save()
#discard containers
for container_label, ref_info in run.protocol['refs'].items():
if ref_info.get('discard'):
container = Container.get_container_from_run_and_container_label(run.id,
container_label)
container.status = 'destroyed'
container.save()
run.status = 'complete'
run.completed_at = timezone.now()
run.save()
|
"""
mixture.py: Classes for estimators for mixture distributions
"""
from __future__ import division
from __future__ import print_function
import numpy as np
import copy
import vampyre.common as common
from vampyre.estim.base import BaseEst
class MixEst(BaseEst):
""" Mixture estimator class
Given a list of estimators where :code:`est_list[i]`,
corresponds to a penalty, :math:`f_i(z)`, this class
creates a new estimator corresponding to the penalty,
:math:`f(z)=-\\ln( \\sum_i w_i \\exp(-f_i(z))`,
for a set of weights :math:`w_i`.
:note: If :math:`\\int e^{-f_i(z)}dz=1` for all :math:`i` and
and :math:`\\sum_i w_i=1`, then :math:`w_i` can be interpreted
as the probability of the :math:`i`-th component.
:note: The variables must be scalar and the estimators
must have attributes :code:`shape` and :code:`var_axes`.
Also, the estimator's :code:`est_init` and :code:`est`
methods must implement a :code:`avg_var_cost` option to
disable averaging of the averaging the variance and cost.
:param est_list: list of estimators
:param w: weight for the components
"""
def __init__(self, est_list, w, name=None):
self.est_list = est_list
self.w = w
shape = est_list[0].shape
var_axes = est_list[0].var_axes
dtype = est_list[0].dtype
# Check that all estimators have cost available
for est in est_list:
if est.shape != shape:
raise common.VpException('Estimators must have the same shape')
if est.var_axes != var_axes:
raise common.VpException('Estimators must have the same var_axes')
if not est.cost_avail:
raise common.VpException(\
"Estimators in a mixture distribution"+\
"must have cost_avail==True")
BaseEst.__init__(self,shape=shape,var_axes=var_axes,dtype=dtype,\
name=name, type_name='Mixture', nvars=1, cost_avail=True)
def est_init(self, return_cost=False, ind_out=None,\
avg_var_cost=True):
"""
Initial estimator.
See the base class :class:`vampyre.estim.base.Estim` for
a complete description.
:param Boolean return_cost: Flag indicating if :code:`cost` is
to be returned
:returns: :code:`zmean, zvar, [cost]` which are the
prior mean and variance
"""
# Check parameters
if (ind_out != [0]) and (ind_out != None):
raise ValueError("ind_out must be either [0] or None")
if not avg_var_cost:
raise ValueError("disabling variance averaging not supported for MixEst")
# Get the mean and variance of each component
zmean_list = []
zvar_list = []
cost_list = []
for i,est in enumerate(self.est_list):
zmeani, zvari, ci = est.est_init(return_cost=True,avg_var_cost=False)
zmean_list.append(zmeani)
zvar_list.append(zvari)
cost_list.append(ci)
return self._comp_est(zmean_list,zvar_list,cost_list,return_cost)
def est(self,r,rvar,return_cost=False,ind_out=None,\
avg_var_cost=True):
"""
Estimation function
The proximal estimation function as
described in the base class :class:`vampyre.estim.base.Estim`
:param r: Proximal mean
:param rvar: Proximal variance
:param boolean return_cost: Flag indicating if :code:`cost` is to be returned
:returns: :code:`zhat, zhatvar, [cost]` which are the posterior
mean, variance and optional cost.
"""
# Check parameters
if (ind_out != [0]) and (ind_out != None):
raise ValueError("ind_out must be either [0] or None")
if not avg_var_cost:
raise ValueError("disabling variance averaging not supported for MixEst")
# Get the mean and variance of each component
zmean_list = []
zvar_list = []
cost_list = []
for i,est in enumerate(self.est_list):
zmeani, zvari, ci = \
est.est(r,rvar,return_cost=True,avg_var_cost=False)
zmean_list.append(zmeani)
zvar_list.append(zvari)
cost_list.append(ci)
return self._comp_est(zmean_list,zvar_list,cost_list,return_cost)
def _comp_est(self,zmean_list,zvar_list,cost_list,return_cost):
"""
Computes the estimates given mean, variance and costs on each
component.
:param zmean_list: list of mean values for each component.
The values are stored as a list where each element is an
array of shape :code:`self.shape`.
:param zvar_list: list of variance values
:param cost_list: list of cost values
:param return_cost: Flag indicating if to return the cost.
"""
# Find the minimum cost. This will be subtracted from all the costs
# to prevent overflow when taking an exponential
ncomp = len(self.w)
cmin = copy.deepcopy(cost_list[0])
for i in range(1,ncomp):
cmin = np.minimum(cmin, cost_list[i])
# Compute p_list[i] \prop w[i]*exp(-cost_list[i]),
# which represents the probabilities for each component.
p_list = []
psum = np.zeros(self.shape)
for i in range(ncomp):
pi = self.w[i]*np.exp(-cost_list[i] + cmin)
psum += pi
p_list.append(pi)
cost = np.sum(-np.log(psum) + cmin)
for i in range(ncomp):
p_list[i] /= psum
# Save the probability, and conditional means and variances
self.prob = p_list
self.zmean_list = zmean_list
self.zvar_list = zvar_list
# Compute prior mean and variance
zmean = np.zeros(self.shape)
zsq = np.zeros(self.shape)
for i in range(ncomp):
zmean += p_list[i]*zmean_list[i]
zsq += p_list[i]*(zvar_list[i] + np.abs(zmean_list[i])**2)
zvar = zsq - np.abs(zmean)**2
zvar = np.mean(zvar, axis=self.var_axes)
if return_cost:
return zmean, zvar, cost
else:
return zmean, zvar
|
from virtualisation.misc.jsonobject import JSONObject as JOb
from virtualisation.misc.log import Log as L
import os
import psycopg2
import datetime
import json
__author__ = 'Marten Fischer (m.fischer@hs-osnabrueck.de)'
class SQL(object):
datatype_map = {'int': 'INT', 'str': 'VARCHAR', 'float': 'FLOAT', 'datetime.datetime': 'TIMESTAMP'}
escape_string = ('str', 'datetime.datetime')
cp_observation_fields = ["sampling_time", "sensor_uuid", "observation_uuid", "data", "quality"]
SCHEMA = "observations"# + ("_dev" if os.environ['LOGNAME'] == 'mafi' else "")
def __init__(self, gdi_config, rm):
self.rm = rm
self.PAGINATION_LIMIT = 100
connect_str = "host='%s' dbname='%s' user='%s' password='%s' port=%d" % (
gdi_config.host, gdi_config.database, gdi_config.username, gdi_config.password, gdi_config.port)
self.conn = psycopg2.connect(connect_str)
self.curs = self.conn.cursor()
try:
self.curs.execute("CREATE SCHEMA IF NOT EXISTS %s;" % SQL.SCHEMA)
# self.curs.execute("CREATE TABLE IF NOT EXISTS %s.cp_sensors (sensor_uuid UUID CONSTRAINT uuid_key PRIMARY KEY, sensor_annotation_id VARCHAR, sercvice_category VARCHAR, traffic INTEGER, geom GEOMETRY(GEOMETRY, 4326) );" % (SQL.SCHEMA,))
self.curs.execute("CREATE TABLE IF NOT EXISTS %s.cp_sensors (sensor_uuid UUID CONSTRAINT uuid_key PRIMARY KEY, sensor_annotation_id VARCHAR, sercvice_category VARCHAR, traffic INTEGER, geom GEOMETRY(GEOMETRY, 4326) );" % ("public",))
cols = ["sampling_time TIMESTAMP", "sensor_uuid UUID", "observation_uuid UUID", "data JSON", "quality JSON"]
query = 'CREATE TABLE IF NOT EXISTS %s.cp_observations ( %s, PRIMARY KEY (%s), FOREIGN KEY (sensor_uuid) REFERENCES %s.cp_sensors(sensor_uuid));\n' % (SQL.SCHEMA, ', '.join(cols), ", ".join(["observation_uuid"]), "public")
self.curs.execute(query)
# index over sampling_time and sensor_uuid
# since a 'IF NOT EXISTS' is not available for us (version < 9.5)
# the error is catched in a separate try-catch
try:
query = 'CREATE INDEX "timeindex" ON %s.cp_observations USING btree (sampling_time);' % (SQL.SCHEMA,)
self.curs.execute(query)
query = 'CREATE INDEX uuidindex ON %s.cp_observations USING btree (sensor_uuid);' % (SQL.SCHEMA,)
self.curs.execute(query)
except:
pass
# primary secondary observation_uuid map
query = 'CREATE TABLE IF NOT EXISTS %s.p_s_observation_uuid (main UUID, secondary UUID);' % (SQL.SCHEMA,)
self.curs.execute(query)
self.conn.commit()
L.i("SQL: schema/tables created")
except Exception as e:
L.e("SQL: Could not create schema/tables", e)
self.conn.rollback()
def is_timestamp_field(self, fieldname, sd):
return sd.isTimestampedStream() and sd.timestamp.inField == fieldname
def insert_observation(self, sd, ob, q):
# L.d("Inserting observation", ob)
query = None
try:
_ob = ob.deepcopy()
_ob.fields = filter(lambda x: not self.is_timestamp_field(x, sd), ob.fields)
if sd.isTimestampedStream() and sd.timestamp.inField in _ob:
_ob.remove_item(sd.timestamp.inField)
if 'latency' in _ob:
_ob.remove_item('latency')
primary_ob_uuid = None
p_s_values = []
for _f in ob.fields:
if _f not in ob or (sd.isTimestampedStream() and sd.timestamp.inField == _f):
continue
if not query:
field = ob[_f]
primary_ob_uuid = self._escape_string(None, str(field.observationID))
v = [
"TIMESTAMP " + self._escape_string(None, field.observationSamplingTime),
self._escape_string(None, str(sd.uuid)),
primary_ob_uuid,
self._escape_string(None, _ob.dumps(), singleqoute_to_double=True),
self._escape_string(None, JOb(q).dumps())
]
query = "INSERT INTO %s.cp_observations (%s) VALUES (%s);\n" % (SQL.SCHEMA, ','.join(SQL.cp_observation_fields), ','.join(v))
del v
p_s_values.append("(%s, %s)" % (primary_ob_uuid, self._escape_string(None, str(field.observationID))))
if query:
query += "INSERT INTO %s.p_s_observation_uuid (main, secondary) VALUES %s;\n" % (SQL.SCHEMA, ','.join(p_s_values))
L.d2("Using query:", query)
if query:
self.curs.execute(query)
self.conn.commit()
del query
del _ob
return True
except Exception as e:
self.conn.rollback()
L.e(e)
L.e("SQL query used:", query)
return False
def _escape_string(self, fieldname, value, sd=None, singleqoute_to_double=False):
if singleqoute_to_double:
value = value.replace("'", "''")
if not fieldname or sd.field[fieldname].dataType in SQL.escape_string:
return "'" + value + "'"
else:
return value
def get_observations(self, uuid, start=None, end=None, format='json', onlyLast=False, fields=None, offset=0):
from virtualisation.resourcemanagement.resourcemanagement import ResourceManagement
w = self.rm.getWrapperByUUID(uuid)
if not w:
return None
sd = w.getSensorDescription()
# prepare query
_filter = ["sensor_uuid = '%s'" % uuid]
order = "ORDER BY sampling_time"
limitation = ""
if onlyLast:
order += " DESC"
else:
if start:
_filter.append("sampling_time >= TIMESTAMP '%s'" % start)
if end:
_filter.append("sampling_time <= TIMESTAMP '%s'" % end)
_filter = "WHERE " + " and ".join(_filter)
if fields:
fields = fields.split(',')
fields_ = []
for ft in fields:
fields_.append("data->'%s' AS %s" % (ft, ft))
fields_.append("quality")
else:
fields_ = SQL.cp_observation_fields
limitation = "LIMIT %d" % (1 if onlyLast else self.PAGINATION_LIMIT)
query = "SELECT %s FROM %s.cp_observations %s %s %s OFFSET %d;" % (",".join(fields_), SQL.SCHEMA, _filter, order, limitation, offset)
# query = "SELECT %s FROM %s.cp_observations %s %s;" % (",".join(fields_), SQL.SCHEMA, _filter, order)
L.d("SQL: executing query", query)
try:
# need a new cursor object to no interfere with the state of the class's inserting cursor
cursor = self.conn.cursor()
cursor.execute(query)
data = cursor.fetchall()
data2 = [list(x) for x in data]
del data
if format in ('n3', 'nt', 'xml', 'turtle', 'pretty-xml', 'trix'):
if ResourceManagement.args.messagebus or ResourceManagement.args.triplestore:
if fields:
observations = []
qualities = []
for x in data2:
tmp = JOb()
for i in range(0, len(fields)):
ft = fields[i]
tmp[ft] = JOb(x[i])
tmp.fields = fields
observations.append(tmp)
qualities.append(JOb(x[-1]))
else:
observations = [JOb(x[3]) for x in data2]
qualities = [JOb(x[4]) for x in data2]
g = self.rm.annotator.annotateObservation(observations, sd, None, qualities)
del observations
del qualities
del query
return g.serialize(format=format)
else:
return "Error: requires messagebus or triplestore to be enabled"
else:
# search in all columns in each row for a datetime.datetime and parse it
for i in range(0, len(data2)):
data2[i] = map(lambda x: x.strftime("%Y-%m-%d %H:%M:%S") if isinstance(x, datetime.datetime) else x, data2[i])
json_list = []
for x in data2:
if fields:
# y = JOb({})
y = {}
for i in range(0, len(fields)):
# ft = fields[i]
# y[ft] = JOb(x[i])
y[fields[i]] = x[i]
# y.quality = JOb(x[-1])
# y.fields = fields
y["fields"] = fields
y["quality"] = x[-1]
else:
# y = JOb(x[3])
# y.quality = JOb(x[4])
y = x[3]
y["quality"] = x[4]
json_list.append(y)
del query
del data2
# return JOb(json_list).dumps()
return json_list
except Exception as e:
L.e("SQL:", e)
L.e("SQL query used:", query)
return "Error: " + str(e)
def data_timeframe(self, uuid):
"""
return the minimum and maximum sampling_time as tuple (min, max) for a sensor identified by the parameter uuid.
If no sensor with the uuid is found in the database (None, None) is returned.
:param uuid:
:return:
"""
query = "SELECT min(sampling_time), max(sampling_time) FROM %s.cp_observations WHERE sensor_uuid = '%s';" % (SQL.SCHEMA, uuid)
# need a new cursor object to no interfere with the state of the class's inserting cursor
cursor = self.conn.cursor()
cursor.execute(query)
data = cursor.fetchall()
if len(data) == 1:
d1, d2 = data[0]
return d1.strftime("%Y-%m-%dT%H:%M:%S%z"), d2.strftime("%Y-%m-%dT%H:%M:%S%z")
else:
return None, None
def get_observations_service_category(self, service_category, start=None, end=None, offset=0):
cursor = self.conn.cursor()
query = "SELECT sensor_uuid from cp_sensors WHERE sercvice_category = '" + service_category + "';"
cursor.execute(query)
data = cursor.fetchall()
all_observations = []
for sensor_uuid, in data:
all_observations.append(self.get_observations(sensor_uuid, start, end, format='nt', offset=offset))
return all_observations
|
import cppy.cppy
import os
import fnmatch
import filecmp
def test_file(file):
src = os.path.join('src', file)
dst = os.path.join('dst', file)
ref = os.path.join('ref', file)
cppy.cppy.expand(src, dst, newline='\n')
if filecmp.cmp(dst, ref):
print('Passed: ' + file)
return True
print('FAILED: ' + file)
return False
def run():
if os.path.exists('dst') == False:
os.mkdir('dst')
num = 0
failed = 0
for file in sorted(os.listdir('src')):
for filter in ['*.h', '*.cpp', '*.xml', '*.html']:
if fnmatch.fnmatch(file, filter):
num += 1
if test_file(file) == False:
failed += 1
if failed == 0:
print('PASSED ALL ' + str(num) + ' TESTS')
else:
print('FAILED ' + str(failed) + ' OF ' + str(num) + ' TESTS')
if __name__ == '__main__':
run()
|
import sys
import os
from subprocess import Popen, PIPE, STDOUT
RED = '\033[91m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
LIGHT_PURPLE = '\033[94m'
PURPLE = '\033[95m'
END = '\033[0m'
if __name__ == '__main__':
try:
src_dir = sys.argv[1]
new_dir = sys.argv[2]
except IndexError:
print "Usage: python do_compare.py src_dir new_idr"
sys.exit(0)
diff_dir = 'changes'
for root, dirs, files in os.walk('.'):
if src_dir in root:
for f in files:
if f.endswith('png'):
print "%(purple)sCompare%(end)s %(red)s%(file_name)s%(end)s\n" % {'purple': PURPLE, 'end': END, 'file_name': f, 'red': RED}
src_f = "%s/%s" % (src_dir, f)
new_f = "%s/%s" % (new_dir, f)
diff_f = "%s/%s" % (diff_dir, f)
# print src_f, new_f, diff_f
proccess = Popen(["compare", "-extract", "1024x768", "-verbose", "-metric", "PAE", src_f, new_f, diff_f], stdout=PIPE, stderr=STDOUT, close_fds=True)
stdoutdata, stderrdata = proccess.communicate()
print stdoutdata
proccess.stdout.close()
print "----------------------------\n"
|
if __name__ == '__main__' and __package__ is None:
from test_helpers import TestHelpers
helpers=TestHelpers()
helpers.add_relative_path()
from lib.mongo import MongoDB
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans #, MiniBatchKMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
import numpy as np
import pylab as pl
from time import time
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
collection="week1"
proto_count=0 # number of protomemes to extract. Use zero to process all
nbRecords=5000
db=MongoDB("weibodata").db
data=db[collection]
tweets_count=data.count()
print 10*"-"
print str(tweets_count)+" tweets in the db"
print "Loading tweets from db..."
_type="dico"
query={_type: {"$not": {"$size": 0} } }
tweets=data.find(query).limit(nbRecords)
corpus=[]
STOP_TWEETS=["转发微博","轉發微博","分享图片"]
for t in list(tweets):
dico=' '.join(x for x in t["dico"]) # TODO : if x not in STOP_TWEETS
# print dico
corpus.append(dico)
print "%d tweets in corpus" % len(corpus)
print
true_k= 10
print "Extracting features from the training dataset using a sparse vectorizer"
t0 = time()
tfidf_vectorizer = TfidfVectorizer()
tfidf_matrix = tfidf_vectorizer.fit_transform(corpus)
print "done in %fs" % (time() - t0)
print "n_samples: %d, n_features: %d" % tfidf_matrix.shape
print
print
n_digits=10
reduced_data = PCA(n_components=2).fit_transform(tfidf_matrix.toarray())
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
h = 1 # point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1
y_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1
xx, yy = np.meshgrid(np.arange(x_max,x_min, h), np.arange(y_max,y_min, h))
print x_min,x_max,y_min,y_max
print xx,yy
print np.arange(3,7,2)
print np.arange(x_max,x_min)
print np.arange(y_max,y_min)
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
pl.figure(1)
pl.clf()
print xx.min(), xx.max(), yy.min(), yy.max()
pl.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=pl.cm.Paired,
aspect='auto', origin='lower')
pl.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
centroids = kmeans.cluster_centers_
pl.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
pl.title('K-means clustering on tweets dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
pl.xlim(x_min, x_max)
pl.ylim(y_min, y_max)
pl.xticks(())
pl.yticks(())
pl.show()
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('groups', '0001_initial'),
('tracker', '0004_auto_20171029_1059'),
]
operations = [
migrations.RemoveField(
model_name='day',
name='group',
),
migrations.RemoveField(
model_name='round',
name='combat',
),
migrations.RemoveField(
model_name='combat',
name='game_day',
),
migrations.AddField(
model_name='combat',
name='group',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='groups.Group'),
preserve_default=False,
),
migrations.DeleteModel(
name='Day',
),
migrations.DeleteModel(
name='Round',
),
]
|
count = 0
for index in range(len(s) - 2):
if s[index : index + 3].lower() == 'bob':
count += 1
print count
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import xml.etree.ElementTree as ET
from compas import _iotools
from compas.files._xml.xml_cpython import prettify_string # doesn't need special handling for pre-3.8 so we just import
__all__ = [
'xml_from_file',
'xml_from_string',
'prettify_string',
]
def xml_from_file(source, tree_parser=None):
if tree_parser:
raise NotImplementedError('XML parsing on CPython 3.7 and older does not support a custom tree parser')
tree_parser = ET.XMLPullParser
parser = tree_parser(events=('start', 'start-ns'))
with _iotools.open_file(source) as file:
for data in _iotools.iter_file(file):
parser.feed(data)
return _process_all_events(parser)
def xml_from_string(text, tree_parser=None):
if tree_parser:
raise NotImplementedError('XML parsing on CPython 3.7 and older does not support a custom tree parser')
tree_parser = ET.XMLPullParser
parser = tree_parser(events=('start', 'end', 'start-ns', 'end-ns'))
parser.feed(text)
return _process_all_events(parser)
def _process_all_events(parser):
root = None
current_namespaces = {}
for event, event_data in parser.read_events():
if event == 'start':
element = event_data
if not root:
root = element
if len(current_namespaces):
element.attrib.update(current_namespaces)
current_namespaces = {}
if event == 'start-ns':
prefix, uri = event_data
ns_prefix = 'xmlns:' + prefix if prefix else 'xmlns'
current_namespaces[ns_prefix] = uri
parser.close()
return root
|
import time
import logging
from flask import request
from flask.ext.restful import Resource, abort
from werkzeug.exceptions import HTTPException
from ..config import Config
from ..utils import udefault
from ..auth import Authentication
from ..dao.mongo.daomongo import DaoMongo
class DspHandler(Resource):
# here build some shortcut variety to provide convenience
__cfg = Config.cfg
__param = __cfg['app']['param']
__req = __cfg['http']['req']
__res = __cfg['http']['res']
__token = __cfg['http']['req']['token']
__dsp_tabObj = __cfg['db']['mongo']['client']['dsp_tabObj']
__dsp = __cfg['model']['dsp']
__fields = __res['fields']
_auth = ()
@classmethod
def set_parser(cls, parser):
cls.parser = parser
return cls
def post(self):
''' create advertisers' info '''
# self._auth = _assert, _code = Authentication.verify(self.__token, \
# request.headers.get(self.__param['access_token']), self.__res)
# if _code: return self._auth
try:
json_req = request.get_json()
except HTTPException as ex:
abort(self.__res['code'][500], message=ex)
# check format by using [ jsonschema ] here
schemapath = Config.cfg['path']['schema']['dsp']
ok, ex = udefault.check_schema(json_req, schemapath)
if not ok:
abort(self.__res['code'][400], message=ex.message)
# insert data here in mongo
json_req.setdefault(self.__dsp['existence'], True)
json_req.setdefault(self.__dsp['created'], time.time())
json_req.setdefault(self.__dsp['updated'], time.time())
result = DaoMongo.insert_one(self.__dsp_tabObj, json_req)
if result:
return {
self.__fields['id']: str(result),
self.__fields['access_token']: self.__req['token'],
self.__fields['message']: self.__res['desc']['dsp201']
}
# TODO when error occured that log is too long
abort(self.__res['code'][500], message=self.__res['desc']['insert500'])
def delete(self):
''' remove advertisers's info '''
self._auth = _assert, _code = Authentication.verify(self.__token, \
request.headers.get(self.__param['access_token']), self.__res)
if _code: return self._auth
args = self.parser.parse_args()
try:
id_val = udefault.get_objId(args[self.__param['id']])
except:
abort(self.__res['code'][400], message=self.__res['desc']['del400'])
# maybe .. it's owned by me now
update_info = {
self.__dsp['existence']: False,
self.__dsp['updated']: time.time()
}
update_one_result = DaoMongo.update_one(self.__dsp_tabObj, \
'_id', id_val, update_info)
if update_one_result is 2:
abort(self.__res['code'][500])
return self.__res['desc']['del200']
# result = DaoMongo.remove_one(self.__dsp_tabObj, '_id', id_val)
# if result:
# if result is 2:
# abort(self.__res['code'][500], message=self.__res['desc']['del500'])
# return self.__res['desc']['del200']
# return self.__res['desc']['delno200']
def put(self):
''' modify advertisers' info '''
self._auth = _assert, _code = Authentication.verify(self.__token, \
request.headers.get(self.__param['access_token']), self.__res)
if _code: return self._auth
args = self.parser.parse_args()
try:
id_val = udefault.get_objId(args[self.__param['id']])
name_val = args[self.__param['name']]
burl_val = args[self.__param['burl']]
except:
abort(self.__res['code'][400], message=self.__res['desc']['put400'])
if name_val and burl_val:
update_info = {
self.__dsp['name']: name_val,
self.__dsp['burl']: burl_val ,
self.__dsp['updated']: time.time()
}
elif name_val:
update_info = {
self.__dsp['name']: name_val,
self.__dsp['updated']: time.time()
}
elif burl_val:
update_info = {
self.__dsp['burl']: burl_val,
self.__dsp['updated']: time.time()
}
else:
abort(self.__res['code'][400], message=self.__res['desc']['update400'])
result = DaoMongo.update_one(self.__dsp_tabObj, '_id', id_val, update_info)
if result:
if result is 2:
abort(self.__res['code'][500], message=self.__res['desc']['update500'])
return self.__res['desc']['put200']
return self.__res['desc']['putno200']
def get(self):
''' query all from advertisers' info '''
# self._auth = _assert, _code = Authentication.verify(self.__token, \
# request.headers.get(self.__param['access_token']), self.__res)
# if _code: return self._auth
result = DaoMongo.find_all(self.__dsp_tabObj)
if result:
if result is 2:
abort(self.__res['code']['500'], message=self.__res['desc']['getall500'])
real_res = []
for per in result:
per.pop(self.__dsp['existence'])
per.pop(self.__dsp['created'])
per.pop(self.__dsp['updated'])
per[self.__dsp['id']] = str(per.pop('_id'))
real_res.append(per)
return real_res
return self.__res['desc']['getall200']
class DspHandlerOne(Resource):
__cfg = Config.cfg
__param = __cfg['app']['param']
__token = __cfg['http']['req']['token']
__res = __cfg['http']['res']
__dsp_tabObj = __cfg['db']['mongo']['client']['dsp_tabObj']
__dsp = __cfg['model']['dsp']
_auth = ()
@classmethod
def set_parser(cls, parser):
cls.parser = parser
return cls
def get(self, id):
''' query one dsp info from advertisers' records '''
# self._auth = _assert, _code = Authentication.verify(self.__token, \
# request.headers.get(self.__param['access_token']), self.__res)
# if _code: return self._auth
try:
id = udefault.get_objId(id)
except:
abort(self.__res['code'][400], message=self.__res['desc']['getone400'])
result = DaoMongo.find_one(self.__dsp_tabObj, '_id', id)
if result:
if result is 2:
abort(self.__res['code']['500'], message=self.__res['desc']['getone500'])
if not result[self.__dsp['existence']]:
abort(self.__res['code'][404])
result.pop(self.__dsp['existence'])
result.pop(self.__dsp['created'])
result.pop(self.__dsp['updated'])
result[self.__dsp['id']] = str(result.pop('_id'))
return result
return self.__res['desc']['getone200']
|
"""
Sphinx extension for Hades's options
"""
from typing import Any
import sphinx.ext.autodoc
from docutils.parsers.rst.roles import CustomRole, code_role
from sphinx.application import Sphinx
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType
from sphinx.roles import XRefRole
from sphinx.util.docfields import Field, GroupedField
from sphinx.util.docstrings import prepare_docstring
from hades.config.base import Compute, Option, OptionMeta, qualified_name
class OptionDirective(ObjectDescription):
doc_field_types = [
Field('default', label='Default'),
Field('required', label='Required'),
Field('static-check', label='Static Check'),
Field('runtime-check', label='Runtime Check'),
GroupedField('type', label='Types'),
]
def add_target_and_index(self, name, sig, signode):
targetname = self.objtype + name
if targetname not in self.state.document.ids:
signode['names'].append(targetname)
signode['ids'].append(targetname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
inv = self.env.domaindata[self.domain]['objects']
if name in inv:
self.state_machine.reporter.warning(
'duplicate option description of {}, other instance in {}'
.format(name, self.env.doc2path(inv[name][0])),
line=self.lineno)
inv[name] = self.env.docname
self.indexnode['entries'].append(('pair: option; ' + name, name,
targetname, '', None))
class HadesDomain(Domain):
name = 'hades'
label = 'Hades'
object_types = {
'option': ObjType('Option', 'option'),
}
directives = {
'option': OptionDirective,
}
roles = {
'option': XRefRole(),
}
class OptionDocumenter(sphinx.ext.autodoc.ClassDocumenter):
priority = 10
domain = HadesDomain.name
objtype = 'option'
@classmethod
def can_document_member(cls, member: Any, membername: str, isattr: bool,
parent: Any) -> bool:
return isinstance(member, type) and issubclass(member, Option)
def add_field(self, name: str, body: str, sourcename: str):
"""
Add a field list item
:param name: Name of the field
:param body: Body of the field, multiple lines will be indented
:param sourcename: Source name
"""
lines = iter(prepare_docstring(body))
self.add_line(":{}:".format(name), sourcename)
original_indent = self.indent
self.indent += ' '
for line in lines:
self.add_line(line, sourcename)
self.indent = original_indent
self.add_line("", sourcename)
def generate(self, more_content=None, real_modname=None,
check_module: bool = False, all_members: bool = False):
self.parse_name()
self.import_object()
idx = len(self.directive.result)
# type: OptionMeta
option = self.object
sourcename = self.get_sourcename()
name = option.__name__
self.add_line(".. hades:option:: " + name, sourcename)
self.add_line("", sourcename)
self.indent += self.content_indent
self.add_content(more_content)
self.add_line("", sourcename)
if option.required:
self.add_line(":required: This option is **required**.", sourcename)
self.add_line("", sourcename)
if option.has_default:
self.add_field("default", (
option.default.__doc__
if isinstance(option.default, Compute) else
":python:`{!r}`".format(option.default)
), sourcename)
if option.type is None:
types = ()
elif isinstance(option.type, tuple):
types = option.type
else:
types = (option.type,)
for t in types:
self.add_field("type", ":class:`{}`"
.format(qualified_name(t)), sourcename)
if option.static_check is not None:
self.add_field("Static Check", option.static_check.__doc__,
sourcename)
if option.runtime_check is not None:
self.add_field("Runtime Check", option.runtime_check.__doc__,
sourcename)
print('\n'.join(self.directive.result[idx:]))
#print(self.directive.result[idx:])
def setup(app: Sphinx):
# Define a custom role for highlighted inline code
app.add_role("python", CustomRole(
"python", code_role, {'language': 'python', 'class': ['highlight']}
))
app.add_role("sql", CustomRole(
"sql", code_role, {'language': 'sql', 'class': ['highlight']}
))
app.add_domain(HadesDomain)
app.add_autodocumenter(OptionDocumenter)
return {
'parallel_read_safe': True,
}
|
from openmdao.main.api import VariableTree
from openmdao.main.datatypes.api import Float
class Geometry(VariableTree):
"""Container of variables defining the mixer-ejector geometry"""
length = Float(10.0, units='ft', desc='Ejector width')
width = Float(6.0, units='ft', desc='Ejector width')
Apri = Float(6.00, units='ft**2', desc='Primary nozzle exit area')
Asec = Float(8.40, units='ft**2', desc='Secondary nozzle exit area')
Aexit = Float(13.68, units='ft**2', desc='Ejector exit area')
AsAp = Float(1.4, desc='Area ratio (secondary area/primary area)')
AR = Float(1.25, desc='Aspect ratio of the ejector at the inlet')
AeAt = Float(0.95, desc='Area ratio (exit area/total inlet area)')
ChuteAngles = Float(-10.0, units='deg',
desc='Chute divergence angles. Outer tangent line relative to ejector flow surface at nozzle downstream of mixer exit')
Num_Lobes = Float(18.0, desc='Number of spokes or lobes of the mixer')
LhMh = Float(0.90, desc='Lobe height to mixing section height (from centerline) ratio (chute penetration)')
LhWave = Float(2.88, desc='Lobe height to wavelength ratio')
def calc_geom(self, length, Apri, AsAp, AR, AeAt, LhMh, LhWave):
self.length = length
self.Apri = Apri
self.AsAp = AsAp
self.AR = AR
self.AeAT = AeAt
self.LhMh = LhMh
self.LhWave = LhWave
self.Asec = AsAp * Apri
self.Aexit = AeAt * (Apri + self.Asec)
self.width = (AR * (Apri + self.Asec)) ** 0.5
self.Num_Lobes = 4 * self.width ** 2 * LhWave / ((Apri + self.Asec) * LhMh)
|
"""File containing some simple helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def diag(diag_elements):
"""Function to create tensorflow diagonal matrix with input diagonal entries.
Args:
diag_elements: tensor with diagonal elements
Returns:
tf matrix with diagonal entries as diag_elements
"""
return tf.diag(tf.reshape(diag_elements, [-1]))
def initialize_dual(neural_net_params_object, init_dual_file=None,
random_init_variance=0.01, init_nu=200.0):
"""Function to initialize the dual variables of the class.
Args:
neural_net_params_object: Object with the neural net weights, biases
and types
init_dual_file: Path to file containing dual variables, if the path
is empty, perform random initialization
Expects numpy dictionary with
lambda_pos_0, lambda_pos_1, ..
lambda_neg_0, lambda_neg_1, ..
lambda_quad_0, lambda_quad_1, ..
lambda_lu_0, lambda_lu_1, ..
random_init_variance: variance for random initialization
init_nu: Value to initialize nu variable with
Returns:
dual_var: dual variables initialized appropriately.
"""
lambda_pos = []
lambda_neg = []
lambda_quad = []
lambda_lu = []
if init_dual_file is None:
for i in range(0, neural_net_params_object.num_hidden_layers + 1):
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_pos.append(tf.get_variable('lambda_pos_' + str(i),
initializer=initializer,
dtype=tf.float32))
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_neg.append(tf.get_variable('lambda_neg_' + str(i),
initializer=initializer,
dtype=tf.float32))
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_quad.append(tf.get_variable('lambda_quad_' + str(i),
initializer=initializer,
dtype=tf.float32))
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_lu.append(tf.get_variable('lambda_lu_' + str(i),
initializer=initializer,
dtype=tf.float32))
nu = tf.get_variable('nu', initializer=init_nu)
else:
# Loading from file
dual_var_init_val = np.load(init_dual_file).item()
for i in range(0, neural_net_params_object.num_hidden_layers + 1):
lambda_pos.append(
tf.get_variable('lambda_pos_' + str(i),
initializer=dual_var_init_val['lambda_pos'][i],
dtype=tf.float32))
lambda_neg.append(
tf.get_variable('lambda_neg_' + str(i),
initializer=dual_var_init_val['lambda_neg'][i],
dtype=tf.float32))
lambda_quad.append(
tf.get_variable('lambda_quad_' + str(i),
initializer=dual_var_init_val['lambda_quad'][i],
dtype=tf.float32))
lambda_lu.append(
tf.get_variable('lambda_lu_' + str(i),
initializer=dual_var_init_val['lambda_lu'][i],
dtype=tf.float32))
nu = tf.get_variable('nu', initializer=1.0*dual_var_init_val['nu'])
dual_var = {'lambda_pos': lambda_pos, 'lambda_neg': lambda_neg,
'lambda_quad': lambda_quad, 'lambda_lu': lambda_lu, 'nu': nu}
return dual_var
def eig_one_step(current_vector, learning_rate, vector_prod_fn):
"""Function that performs one step of gd (variant) for min eigen value.
Args:
current_vector: current estimate of the eigen vector with minimum eigen
value.
learning_rate: learning rate.
vector_prod_fn: function which returns product H*x, where H is a matrix for
which we computing eigenvector.
Returns:
updated vector after one step
"""
grad = 2*vector_prod_fn(current_vector)
# Current objective = (1/2)*v^T (2*M*v); v = current_vector
# grad = 2*M*v
current_objective = tf.reshape(tf.matmul(tf.transpose(current_vector),
grad) / 2., shape=())
# Project the gradient into the tangent space of the constraint region.
# This way we do not waste time taking steps that try to change the
# norm of current_vector
grad = grad - current_vector*tf.matmul(tf.transpose(current_vector), grad)
grad_norm = tf.norm(grad)
grad_norm_sq = tf.square(grad_norm)
# Computing normalized gradient of unit norm
norm_grad = grad / grad_norm
# Computing directional second derivative (dsd)
# dsd = 2*g^T M g, where g is normalized gradient
directional_second_derivative = (
tf.reshape(2*tf.matmul(tf.transpose(norm_grad),
vector_prod_fn(norm_grad)),
shape=()))
# Computing grad^\top M grad [useful to compute step size later]
# Just a rescaling of the directional_second_derivative (which uses
# normalized gradient
grad_m_grad = directional_second_derivative*grad_norm_sq / 2
# Directional_second_derivative/2 = objective when vector is norm_grad
# If this is smaller than current objective, simply return that
if directional_second_derivative / 2. < current_objective:
return norm_grad
# If curvature is positive, jump to the bottom of the bowl
if directional_second_derivative > 0.:
step = -1. * grad_norm / directional_second_derivative
else:
# If the gradient is very small, do not move
if grad_norm_sq <= 1e-16:
step = 0.0
else:
# Make a heuristic guess of the step size
step = -2. * tf.reduce_sum(current_vector*grad) / grad_norm_sq
# Computing gain using the gradient and second derivative
gain = -(2 * tf.reduce_sum(current_vector*grad) +
(step*step) * grad_m_grad)
# Fall back to pre-determined learning rate if no gain
if gain < 0.:
step = -learning_rate * grad_norm
current_vector = current_vector + step * norm_grad
return tf.nn.l2_normalize(current_vector)
def minimum_eigen_vector(x, num_steps, learning_rate, vector_prod_fn):
"""Computes eigenvector which corresponds to minimum eigenvalue.
Args:
x: initial value of eigenvector.
num_steps: number of optimization steps.
learning_rate: learning rate.
vector_prod_fn: function which takes x and returns product H*x.
Returns:
approximate value of eigenvector.
This function finds approximate value of eigenvector of matrix H which
corresponds to smallest (by absolute value) eigenvalue of H.
It works by solving optimization problem x^{T}*H*x -> min.
"""
x = tf.nn.l2_normalize(x)
for _ in range(num_steps):
x = eig_one_step(x, learning_rate, vector_prod_fn)
return x
def tf_lanczos_smallest_eigval(vector_prod_fn,
matrix_dim,
initial_vector,
num_iter=1000,
max_iter=1000,
collapse_tol=1e-9,
dtype=tf.float32):
"""Computes smallest eigenvector and eigenvalue using Lanczos in pure TF.
This function computes smallest eigenvector and eigenvalue of the matrix
which is implicitly specified by `vector_prod_fn`.
`vector_prod_fn` is a function which takes `x` and returns a product of matrix
in consideration and `x`.
Computation is done using Lanczos algorithm, see
https://en.wikipedia.org/wiki/Lanczos_algorithm#The_algorithm
Args:
vector_prod_fn: function which takes a vector as an input and returns
matrix vector product.
matrix_dim: dimentionality of the matrix.
initial_vector: guess vector to start the algorithm with
num_iter: user-defined number of iterations for the algorithm
max_iter: maximum number of iterations.
collapse_tol: tolerance to determine collapse of the Krylov subspace
dtype: type of data
Returns:
tuple of (eigenvalue, eigenvector) of smallest eigenvalue and corresponding
eigenvector.
"""
# alpha will store diagonal elements
alpha = tf.TensorArray(dtype, size=1, dynamic_size=True, element_shape=())
# beta will store off diagonal elements
beta = tf.TensorArray(dtype, size=0, dynamic_size=True, element_shape=())
# q will store Krylov space basis
q_vectors = tf.TensorArray(
dtype, size=1, dynamic_size=True, element_shape=(matrix_dim, 1))
# If start vector is all zeros, make it a random normal vector and run for max_iter
if tf.norm(initial_vector) < collapse_tol:
initial_vector = tf.random_normal(shape=(matrix_dim, 1), dtype=dtype)
num_iter = max_iter
w = initial_vector / tf.norm(initial_vector)
# Iteration 0 of Lanczos
q_vectors = q_vectors.write(0, w)
w_ = vector_prod_fn(w)
cur_alpha = tf.reduce_sum(w_ * w)
alpha = alpha.write(0, cur_alpha)
w_ = w_ - tf.scalar_mul(cur_alpha, w)
w_prev = w
w = w_
# Subsequent iterations of Lanczos
for i in tf.range(1, num_iter):
cur_beta = tf.norm(w)
if cur_beta < collapse_tol:
# return early if Krylov subspace collapsed
break
# cur_beta is larger than collapse_tol,
# so division will return finite result.
w = w / cur_beta
w_ = vector_prod_fn(w)
cur_alpha = tf.reduce_sum(w_ * w)
q_vectors = q_vectors.write(i, w)
alpha = alpha.write(i, cur_alpha)
beta = beta.write(i-1, cur_beta)
w_ = w_ - tf.scalar_mul(cur_alpha, w) - tf.scalar_mul(cur_beta, w_prev)
w_prev = w
w = w_
alpha = alpha.stack()
beta = beta.stack()
q_vectors = tf.reshape(q_vectors.stack(), (-1, matrix_dim))
offdiag_submatrix = tf.linalg.diag(beta)
tridiag_matrix = (tf.linalg.diag(alpha)
+ tf.pad(offdiag_submatrix, [[0, 1], [1, 0]])
+ tf.pad(offdiag_submatrix, [[1, 0], [0, 1]]))
eigvals, eigvecs = tf.linalg.eigh(tridiag_matrix)
smallest_eigval = eigvals[0]
smallest_eigvec = tf.matmul(tf.reshape(eigvecs[:, 0], (1, -1)),
q_vectors)
smallest_eigvec = smallest_eigvec / tf.norm(smallest_eigvec)
smallest_eigvec = tf.reshape(smallest_eigvec, (matrix_dim, 1))
return smallest_eigval, smallest_eigvec
|
from __future__ import print_function
import sys
import subprocess
import datetime
import time
import string
import argparse
import ConfigParser
from urllib import urlencode
import feedparser
from pyfiglet import Figlet
def clear_console():
'''
Clear previous screen.
'''
subprocess.call(['clear'])
def read_config():
'''
Read configuration from config.ini.
See config.ini for details on configuration.
'''
config = ConfigParser.ConfigParser()
config.read('config.ini')
city_code = config.get('weather','city')
unit = config.get('weather','unit')
duration = config.get('duration','duration')
rss_list = config.items('rss')
return city_code,unit,int(duration),rss_list
def weather_now(code,unit):
'''
Get weather for city represented by code.
'''
yahoo_api = 'http://weather.yahooapis.com/forecastrss?'
para = {'w': code, 'u': unit.lower()}
feed_url = yahoo_api + urlencode(para)
f = feedparser.parse(feed_url)
f_city = f['feed']['title'].split('-')[1].strip()
f_entries = f['entries']
f_forecast = f_entries[0]['yweather_forecast']
weather_heading = '=============Weather==============\n'
weather_details = "{f[text]} {f[low]}{c}-{f[high]}{c} {city}\n".format(
f=f_forecast,
c=u'\u00b0'.encode('utf-8'),
city=f_city
)
return (weather_heading + weather_details).decode('utf-8')
def time_now():
'''
Return current time in Figlet format.
'''
time_heading = '===============Time===============\n'
now = datetime.datetime.now().strftime("%H:%M")
f = Figlet(font='doom')
time_details = f.renderText(now).rstrip() + '\n'
return (time_heading + time_details).decode('utf-8')
def read_rss(rss_list,num,width):
'''
Return rss titles from rss in rss_list.
Sort items from newest to oldest.
Cut number of items to fit window height.
'''
rss = []
for item in rss_list:
source,url = item[0],item[1]
f = feedparser.parse(url)
f_entries = f['entries']
for x in f_entries:
title = x['title'][:width - len(source + ': ')].replace('\n',' ')
rss.append((x['published_parsed'],title,source))
rss.sort(key=lambda x: x[0],reverse=True)
return rss[:num]
def news_now(rows,columns,rss_list,rss_option):
'''
Return newest feed titles from given urls.
Number of feeds to return is determined by console height.
'''
rss_dict = {x:y for (x,y) in rss_list}
f_num = rows - 8 - 1 - 3
if all([x in string.digits for x in rss_option]):
rss_option = [int(x) for x in rss_option]
if len(rss_option) == 1:
if rss_option[0] == 0:
feed_list = rss_list
feed_content = read_rss(feed_list,f_num,columns)
else:
feed_list = [rss_list[rss_option[0] - 1]]
feed_content = read_rss(feed_list,f_num,columns)
else:
feed_list = [x for x in rss_list if rss_list.index(x)+1 in rss_option]
feed_content = read_rss(feed_list,f_num,columns)
else:
try:
feed_list = [(x,rss_dict[x]) for x in rss_option]
feed_content = read_rss(feed_list,f_num,columns)
except KeyError as e:
sys.exit('Invalid Flag: ' + e.message)
except:
print("Unexpected error:", sys.exc_info()[0])
raise
news_heading = '===============News===============\n'
news_details = ''
for entry in feed_content:
news_details += '{srs}: {title}\n'.format(srs=entry[2].encode('utf-8'),title=entry[1].encode('utf-8'))
return (news_heading + news_details).decode('utf-8')
def console_size():
'''
Return current console size.
To be used to determine number of feeds to display.
'''
p = subprocess.Popen(['stty','size'],stdout=subprocess.PIPE)
rows, columns = [int(x) for x in p.stdout.read().split()]
return rows,columns
def main(argv):
parser = argparse.ArgumentParser(description='Feed choose flags.')
parser.add_argument('flags',metavar='Flags',type=str,nargs='+',
help='numbers or names of feeds.')
args = parser.parse_args(argv[1:])
rss_option = args.flags
city_code,unit,duration,rss_list = read_config()
end = time.time() + duration * 60
try:
while time.time() < end if duration !=0 else True:
rows,columns = console_size()
now = time_now()
weather = weather_now(city_code,unit)
news = news_now(rows,columns,rss_list,rss_option)
content = weather + now + news
clear_console()
print(content)
time.sleep(60)
else:
sys.exit('Time up. Exit now.')
except KeyboardInterrupt:
sys.exit(1)
if __name__ == '__main__':
main(sys.argv)
|
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from matplotlib import animation, cm
from numpy.linalg import inv
from mpl_toolkits.mplot3d import Axes3D
import tables as tb
import subprocess
from scipy.optimize import brentq
import sys
def quick_plot(input_filename=None, filename=None, start=0):
# the other version was really slow - this does it by hand, making a load of png files then using ffmpeg to stitch them together. It finishes by deleting all the pngs.
# set defaults
if input_filename is None:
input_filename = 'input_file.txt'
if filename is None:
filename = '../../Documents/Work/swerve/iridis2'
data_filename = filename + '.h5'
# read input file
input_file = open(input_filename, 'r')
inputs = input_file.readlines()
for line in inputs:
name, *dat = line.split()
if name == 'nx':
nx = int(dat[0])
elif name == 'ny':
ny = int(dat[0])
elif name == 'nt':
nt = int(dat[0])
elif name == 'nlayers':
nlayers = int(dat[0])
elif name == 'xmin':
xmin = float(dat[0])
elif name == 'xmax':
xmax = float(dat[0])
elif name == 'ymin':
ymin = float(dat[0])
elif name == 'ymax':
ymax = float(dat[0])
elif name == 'dprint':
dprint = int(dat[0])
dx = (xmax - xmin) / (nx-2)
dy = (ymax - ymin) / (ny-2)
dt = 0.1 * min(dx, dy)
input_file.close()
# read data
f = tb.open_file(data_filename, 'r')
table = f.root.SwerveOutput
D_2d = np.swapaxes(table[:,:,:,:,0], 1, 3)
zeta_2d = np.swapaxes(table[:,:,:,:,3], 1, 3)
#D_2d[D_2d > 1.e3] = 0.
#D_2d = D_2d[::dprint,:,:,:]
#print(D_2d[:,:,2:-2,2:-2])
x = np.linspace(0, xmax, num=nx-4, endpoint=False)
y = np.linspace(0, ymax, num=ny-4, endpoint=False)
X, Y = np.meshgrid(x,y)
fig = plt.figure(figsize=(12,10), facecolor='w', dpi=100)
ax = fig.gca(projection='3d')
#print(np.shape(X), np.shape(Y), np.shape(D_2d[0,1,:,:].T))
location = '/'.join(filename.split('/')[:-1])
name = filename.split('/')[-1]
for i in range(start, len(D_2d[:,0,0,0])):
#if i % 10 == 0:
print('Printing {}'.format(i))
outname = location + '/plotting/' + name + '_' + format(i, '05') + '.png'
ax.clear()
ax.set_xlim(0,10)
ax.set_ylim(0,10)
ax.set_zlim(0.7,1.9)
ax.plot_surface(X,Y,D_2d[i,1,2:-2,2:-2].T, rstride=1, cstride=2, lw=0, facecolors=cm.viridis_r(zeta_2d[i,1,2:-2,2:-2].T), antialiased=True)
#ax.plot_wireframe(X,Y,D_2d[i,0,2:-2,2:-2].T, rstride=2, cstride=2, lw=0.1, cmap=cm.viridis, antialiased=True)
ax.plot_surface(X,Y,D_2d[i,0,2:-2,2:-2].T, rstride=1, cstride=2, lw=0, facecolors=cm.viridis_r(zeta_2d[i,0,2:-2,2:-2].T), antialiased=True)
plt.savefig(outname)
# close hdf5 file
f.close()
# now make a video!
"""
bashCommand = "ffmpeg -framerate 10 -pattern_type glob -i '../../Documents/Work/swerve/plotting/fv_?????.png' -c:v libx264 -r 10 " + movie_filename
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
# delete image files
bashCommand = "rm ../../Documents/Work/swerve/plotting/fv_*.png"
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()"""
def find_height(D, Sx, Sy, gamma_up, M=1.0):
D[D < 1.e-6] = 1.e-6
W = np.sqrt((Sx**2*gamma_up[0,0] + 2. * Sx * Sy * gamma_up[1,0]
+ Sy**2 * gamma_up[1,1]) / D**2 + 1.0)
return 2. * M / (1. - np.exp(-2 * D / W))
def mesh_plot(input_filename=None, filename=None, print_level=0, start=0):
# the other version was really slow - this does it by hand, making a load of png files then using ffmpeg to stitch them together. It finishes by deleting all the pngs.
# set defaults
if input_filename is None:
input_filename = 'mesh_input.txt'
if filename is None:
filename = '../../Documents/Work/swerve/iridis2'
data_filename = filename + '.h5'
# read input file
input_file = open(input_filename, 'r')
inputs = input_file.readlines()
for line in inputs:
name, *dat = line.split()
if name == 'r':
r = int(dat[0])
elif name == 'df':
df = float(dat[0])
elif name == 'gamma':
gamma = float(dat[0])
elif name == 'xmin':
xmin = float(dat[0])
elif name == 'xmax':
xmax = float(dat[0])
elif name == 'ymin':
ymin = float(dat[0])
elif name == 'ymax':
ymax = float(dat[0])
elif name == 'gamma_down':
gamma_down = np.array([float(i) for i in dat])
if len(gamma_down) == 4:
gamma_up = np.reshape(gamma_down, (2,2))
gamma_up[0,0] = 1. / gamma_up[0,0]
gamma_up[1,1] = 1. / gamma_up[1,1]
else:
n = int(np.sqrt(len(gamma_down)))
gamma_up = np.reshape(gamma_down, (n,n))
gamma_up = inv(gamma_up)
elif name == 'dprint':
dprint = int(dat[0])
elif name == 'print_levels':
print_levels = np.array([int(i) for i in dat])
# read data
f = tb.open_file(data_filename, 'r')
dataset = "/level_" + str(print_levels[print_level])
table = f.get_node(dataset)
#if print_level == 1:
#table = f.root.level_1
if len(table[0,0,0,0,:]) == 4:
# swe
swe = True
D = table[:,:,:,:,0]
Sx = table[:,:,:,:,1]
Sy = table[:,:,:,:,2]
DX = table[:,:,:,:,3]
else:
swe = False
D = table[:,:,:,:,0]
Sx = table[:,:,:,:,1]
Sy = table[:,:,:,:,2]
Sz = table[:,:,:,:,3]
tau = table[:,:,:,:,4]
DX = table[:,:,:,:,5]
# HACK
nx = len(D[0,0,0,:])
ny = len(D[0,0,:,0])
nz = len(D[0,:,0,0])
nt = len(D[:,0,0,0])
dx = (xmax - xmin) / (nx-2)
dy = (ymax - ymin) / (ny-2)
dt = 0.1 * min(dx, dy)
input_file.close()
"""S = np.sqrt(Sx**2 + Sy**2 + Sz**2)
def f_of_p(p, tau, D, S):
sq = np.sqrt((tau + p + D) * (tau + p + D) - S**2)
return (gamma - 1.0) * sq / (tau + p + D) * (sq - p * (tau + p + D) / sq - D) - p
ps = np.zeros_like(D[:,:,:,:])
for t in range(nt):
print(t)
for z in range(nz):
for y in range(ny):
for x in range(nx):
#print(tau[t,z,y,x], D[t,z,y,x], S[t,z,y,x])
ps[t,z,y,x] = brentq(f_of_p, 0, tau[t,z,y,x] + D[t,z,y,x] + 1., args=(tau[t,z,y,x], D[t,z,y,x], S[t,z,y,x]))"""
if swe:
plot_var = find_height(D, Sx, Sy, gamma_up)#np.sqrt(Sx**2 + Sy**2)#
if nz > 1:
plot_range = range(1,2)
else:
plot_range = range(1)
else:
plot_var = Sz#np.sqrt(Sx**2+Sy**2)#tau
plot_range = range(2,3)
#plot_var[np.abs(plot_var) > 1.e3] = 0.
#plot_var[np.isnan(plot_var)] = 0.0
#D_2d = D_2d[::dprint,:,:,:]
np.set_printoptions(threshold=np.nan)
x = np.linspace(0, xmax, num=nx, endpoint=False)
y = np.linspace(0, ymax, num=ny, endpoint=False)
X, Y = np.meshgrid(x,y)
swe = True
fig = plt.figure(figsize=(12,10), facecolor='w', dpi=100)
if swe:
ax = fig.gca(projection='3d')
else:
ax = fig.gca()
location = '/'.join(filename.split('/')[:-1])
name = filename.split('/')[-1]
for i in range(start, len(D[:,0,0,0])-1):
#if i % 10 == 0:
print('Plotting {}'.format(i))
outname = location + '/plotting/' + name + '_' + format(i, '05') + '.png'
ax.clear()
#ax.set_xlim(0,10)
#ax.set_ylim(0,10)
#ax.set_zlim(2.22,2.24)
print(plot_var[i,1,:,:])
for l in plot_range:
#print(plot_var[i,l,:,15])
face_colours = DX[i,l,:,:].T
face_colours[np.isnan(face_colours)] = 0.
if abs(np.amax(face_colours)) > 0.:
face_colours /= abs(np.amax(face_colours))
face_colours = (face_colours - np.amin(face_colours)) / (np.amax(face_colours) - np.amin(face_colours))
if swe:
ax.plot_surface(X[18:-18,18:-18],Y[18:-18,18:-18],plot_var[i,l,18:-18,18:-18].T, rstride=1,
cstride=2, lw=0, cmap=cm.viridis_r, antialiased=True)#,
#facecolors=cm.viridis_r(face_colours))
else:
plt.plot(Y[:,0], plot_var[i,l,75,:])#
#ax.set_ylim(0.499995, 0.500005)
plt.savefig(outname)
# close hdf5 file
f.close()
if __name__ == '__main__':
if len(sys.argv) > 1:
input_filename = sys.argv[1]
filename = sys.argv[2]
print_level = int(sys.argv[3])
else:
input_filename = "testing/multiscale_input.txt"
filename = "../../Documents/Work/swerve/multiscale_test"
print_level = 1
mesh_plot(input_filename=input_filename, filename=filename,
print_level=print_level)
|
from django.db import models
from filebrowser.fields import FileBrowseField
class CommonSupport(models.Model):
likes = models.IntegerField(u'喜欢', default=0)
unlikes = models.IntegerField(u'不喜欢', default=0)
user = models.ForeignKey('authentication.Account', verbose_name=u'创建人', null=True)
create_at = models.DateTimeField(u'创建时间', auto_now_add=True)
class Meta:
abstract = True
ordering = ['-create_at']
class Level(models.Model):
"""
Word level: cet4 , cet6,
"""
name = models.CharField(u'等级名称', max_length=20)
word = models.ManyToManyField('Word', verbose_name=u'单词')
class Meta:
verbose_name = u'等级'
verbose_name_plural = u'等级管理'
def __unicode__(self):
return '{0}'.format(self.name)
class Word(models.Model):
"""
Word Model
"""
content = models.CharField(u'英文', max_length=40)
en_definition = models.ManyToManyField('EnDefinition', verbose_name=u'英文定义')
cn_definition = models.CharField(max_length=500, verbose_name=u'中文定义', null=True, blank=True)
example = models.ManyToManyField('Example', verbose_name=u'例句')
note = models.ManyToManyField('Note', verbose_name=u'笔记')
def get_pronunciation(self):
return Pronunciation.objects.filter(word=self)
def get_audio(self):
return Audio.objects.filter(word=self)
def __unicode__(self):
return '%s %s' % (self.content, self.cn_definition)
class Meta:
verbose_name = u'单词'
verbose_name_plural = u'单词管理'
class EnDefinition(models.Model):
"""
英文定义
"""
pos = models.CharField(u'词性', max_length=10)
defn = models.ManyToManyField('EnDefn', verbose_name=u'英文定义')
class Meta:
verbose_name = u'英文定义'
verbose_name_plural = u'英文定义管理'
def __unicode__(self):
return '{0}, {1}'.format(self.pos, self.defn)
class EnDefn(models.Model):
"""
英文定义m2m
"""
content = models.CharField(u'英文定义', max_length=500)
def __unicode__(self):
return self.content
class Audio(models.Model):
"""
Word Audio Model
"""
word = models.ForeignKey('Word', verbose_name=u'单词')
us_audio = models.CharField(max_length=200, verbose_name=u'美式发音', null=True)
uk_audio = models.CharField(max_length=200, verbose_name=u'英式发音', null=True)
class Meta:
verbose_name = u'发音'
verbose_name_plural = u'发音管理'
class Pronunciation(models.Model):
"""
Word pronunciations model with US and uk fields
"""
word = models.ForeignKey('Word', verbose_name=u'单词')
us = models.CharField(max_length=40, verbose_name=u'美式发音')
uk = models.CharField(max_length=40, verbose_name=u'英式发音')
class Meta:
verbose_name = u'发音'
verbose_name_plural = u'发音管理'
class Example(CommonSupport):
"""
System Word Example Model
"""
annotation = models.CharField('例句', max_length=500)
translation = models.CharField('译文', max_length=500)
class Meta:
verbose_name = u'例句'
verbose_name_plural = u'例句管理'
def __unicode__(self):
return '{0} {1}'.format(self.annotation, self.translation)
class Note(CommonSupport):
"""
User-shared Note Model
"""
content = models.TextField()
class Meta:
verbose_name = u'用户笔记'
verbose_name_plural = u'用户笔记管理'
def __unicode__(self):
return self.content
|
import argparse
import asyncio
import logging
import json
import time
import random
from urllib.parse import urlparse
from .proto import Client
from .exceptions import Draining
from .packets import Inserted
log = logging.getLogger(__name__)
class CallingError(Exception):
"""We tried to put a task, but not sure if it failed
When we did `put` but didn't receive a reply, we are not sure whether task
is queued and we failed to get reply, or task has not been put at all.
"""
class Caller(object):
def __init__(self, hosts, loop=None):
assert hosts, hosts
self._hosts = hosts
self._loop = loop or asyncio.get_event_loop()
self._clients = {}
self._events = {}
self._tasks = [self._start_connection(h, p) for h, p in hosts]
def _start_connection(self, host, port):
ev = asyncio.Event(loop=self._loop)
self._events[host, port] = ev
return asyncio.Task(self._reconnect(host, port, ev), loop=self._loop)
@asyncio.coroutine
def _reconnect(self, host, port, event):
try:
while True:
cli = yield from self._connect(host, port)
self._clients[host, port] = cli
event.set()
try:
yield from cli.wait_closed()
except EOFError:
self._events[host, port].clear()
continue
except asyncio.CancelledError:
return
finally:
event.clear()
# May already be removed in case of draining
self._clients.pop((host, port), None)
except Exception:
log.exception("Abnormal termination for client thread")
@asyncio.coroutine
def _connect(self, host, port):
log.debug("Connecting to %r:%d", host, port)
while True:
try:
start = time.time()
cli = yield from Client.connect(host, port)
except OSError:
log.warning("Error establishing connection. Will retry...")
yield from asyncio.sleep(max(0, start + 0.1 - time.time()),
loop=self._loop)
else:
return cli
@asyncio.coroutine
def call(self, name, args, kwargs, *,
priority=2**31, ttr=120, delay=0, tube='default'):
lst = [name, kwargs]
lst.extend(args)
body = json.dumps(lst, ensure_ascii=False).encode('utf-8')
while True:
while not self._clients:
yield from asyncio.wait(
[ev.wait() for ev in self._events.values()],
return_when=asyncio.FIRST_COMPLETED, loop=self._loop)
cli = random.choice(list(self._clients.values()))
try:
# Must pipeline these two
use = cli.send_command('use', tube)
put = cli.send_command('put', priority, delay, ttr, body=body)
yield from use
val = yield from put
except EOFError:
raise CallingError()
if isinstance(val, Draining):
self._clients.pop((cli.host, cli.port), None)
log.info("Server is draining, trying next")
continue
elif isinstance(val, Exception):
raise val
assert isinstance(val, Inserted)
return 'beanstalk://{}:{}/{}'.format(
cli.host, cli.port, val.job_id)
def close(self):
for i in self._tasks:
i.cancel()
@asyncio.coroutine
def wait_closed(self):
yield from asyncio.wait(self._tasks, loop=self._loop)
@asyncio.coroutine
def run(options, loop=None):
import yaml
conn = [('localhost', 11300)]
if options.connect:
conn = [(h, int(p))
for h, p in (a.split(':', 1)
for a in options.connect)]
args = tuple(map(yaml.safe_load, options.arguments))
kwargs = {k: yaml.safe_load(v) for k, v in options.kwargs}
caller = Caller(conn, loop=loop)
reserve = yield from caller.call(options.task_name, args, kwargs,
priority=options.priority,
ttr=options.ttr,
delay=options.delay,
tube=options.queue,
)
caller.close()
yield from caller.wait_closed()
print(reserve)
def main():
ap = argparse.ArgumentParser()
ap.add_argument('-c', '--connect', metavar="HOST:PORT",
help="Add beanstalkd server to connect to (repeatable)",
default=[], action="append")
ap.add_argument('--log-level',
help="The base log level",
default="WARNING")
ap.add_argument('-p', '--pri', '--priority',
help="Priority for the task (default %(default)s)",
default=2 ** 31, type=int, dest='priority')
ap.add_argument('-t', '--ttr', '--time-to-release',
help="The TTR (time to release) for task (default %(default)s)",
default=120, type=int, dest='ttr')
ap.add_argument('-q', '--queue',
help="The queue (tube) for task (default %(default)s)",
default='default')
ap.add_argument('-d', '--delay',
help="Delay to put task with in seconds (default %(default)s)",
default=0, type=int)
ap.add_argument('--debug-asyncio',
help="Enable debugging of asyncio (too verbose)",
default=False, action="store_true")
ap.add_argument('task_name',
help="Full name of the task to queue")
ap.add_argument('arguments', metavar="ARG", nargs="*",
help="Full name of the task to queue")
ap.add_argument('-K', nargs=2, metavar=("NAME", "VALUE"),
help="Keyword argument",
dest="kwargs", action="append", default=[])
options = ap.parse_args()
logging.basicConfig(
level=getattr(logging, options.log_level),
format='%(asctime)-15s %(name)s %(levelname)s %(message)s',
)
if not options.debug_asyncio:
logging.getLogger('asyncio').setLevel(logging.WARNING)
asyncio.get_event_loop().run_until_complete(run(options))
if __name__ == '__main__':
from .json import main # flake8: noqa
main()
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst')) as f:
README = f.read()
requires = [
'pandas',
]
setup(name='genedataset',
version='1.0.0a4',
description='Store and access gene expression datasets and gene definitions.',
long_description=README,
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
author='Jarny Choi',
author_email='jarnyc@unimelb.edu.au',
url='https://github.com/jarny/genedataset',
keywords='gene expression dataset rna-seq',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=requires,
)
|
from __future__ import absolute_import, print_function
import re
from time import time, mktime
import datetime
from .relativedelta import relativedelta
search_re = re.compile(r'^([^-]+)-([^-/]+)(/(.*))?$')
only_int_re = re.compile(r'^\d+$')
any_int_re = re.compile(r'^\d+')
star_or_int_re = re.compile(r'^(\d+|\*)$')
__all__ = ('croniter',)
class croniter(object):
MONTHS_IN_YEAR = 12
RANGES = (
(0, 59),
(0, 23),
(1, 31),
(1, 12),
(0, 6),
(0, 59)
)
DAYS = (
31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
)
ALPHACONV = (
{ },
{ },
{ },
{ 'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6,
'jul':7, 'aug':8, 'sep':9, 'oct':10, 'nov':11, 'dec':12 },
{ 'sun':0, 'mon':1, 'tue':2, 'wed':3, 'thu':4, 'fri':5, 'sat':6 },
{ }
)
LOWMAP = (
{},
{},
{0: 1},
{0: 1},
{7: 0},
{},
)
bad_length = 'Exactly 5 or 6 columns has to be specified for iterator' \
'expression.'
def __init__(self, expr_format, start_time=time()):
self.tzinfo = None
if isinstance(start_time, datetime.datetime):
self.tzinfo = start_time.tzinfo
start_time = mktime(start_time.timetuple())
self.cur = start_time
self.exprs = expr_format.split()
if len(self.exprs) != 5 and len(self.exprs) != 6:
raise ValueError(self.bad_length)
expanded = []
for i, expr in enumerate(self.exprs):
e_list = expr.split(',')
res = []
while len(e_list) > 0:
e = e_list.pop()
t = re.sub(r'^\*(/.+)$', r'%d-%d\1' % (self.RANGES[i][0],
self.RANGES[i][1]),
str(e))
m = search_re.search(t)
if m:
(low, high, step) = m.group(1), m.group(2), m.group(4) or 1
if not any_int_re.search(low):
low = self.ALPHACONV[i][low.lower()]
if not any_int_re.search(high):
high = self.ALPHACONV[i][high.lower()]
if (not low or not high or int(low) > int(high)
or not only_int_re.search(str(step))):
raise ValueError("[%s] is not acceptable" %expr_format)
low, high, step = map(int, [low, high, step])
for j in range(low, high+1, step):
e_list.append(j)
else:
if not star_or_int_re.search(t):
t = self.ALPHACONV[i][t.lower()]
try:
t = int(t)
except:
pass
if t in self.LOWMAP[i]:
t = self.LOWMAP[i][t]
if t != '*' and (int(t) < self.RANGES[i][0] or
int(t) > self.RANGES[i][1]):
raise ValueError("[%s] is not acceptable, out of range" % expr_format)
res.append(t)
res.sort()
expanded.append(['*'] if (len(res) == 1 and res[0] == '*') else res)
self.expanded = expanded
def get_next(self, ret_type=float):
return self._get_next(ret_type, is_prev=False)
def get_prev(self, ret_type=float):
return self._get_next(ret_type, is_prev=True)
def get_current(self, ret_type=float):
if ret_type == datetime.datetime:
return datetime.datetime.fromtimestamp(self.cur)
return self.cur
def _get_next(self, ret_type=float, is_prev=False):
expanded = self.expanded[:]
if ret_type not in (float, datetime.datetime):
raise TypeError("Invalid ret_type, only 'float' or 'datetime' " \
"is acceptable.")
if expanded[2][0] != '*' and expanded[4][0] != '*':
bak = expanded[4]
expanded[4] = ['*']
t1 = self._calc(self.cur, expanded, is_prev)
expanded[4] = bak
expanded[2] = ['*']
t2 = self._calc(self.cur, expanded, is_prev)
if not is_prev:
result = t1 if t1 < t2 else t2
else:
result = t1 if t1 > t2 else t2
else:
result = self._calc(self.cur, expanded, is_prev)
self.cur = result
if ret_type == datetime.datetime:
result = datetime.datetime.fromtimestamp(result)
if self.tzinfo:
result = self.tzinfo.localize(result)
return result
def _calc(self, now, expanded, is_prev):
if is_prev:
nearest_diff_method = self._get_prev_nearest_diff
sign = -1
else:
nearest_diff_method = self._get_next_nearest_diff
sign = 1
offset = len(expanded) == 6 and 1 or 60
dst = now = datetime.datetime.fromtimestamp(now + sign * offset)
day, month, year = dst.day, dst.month, dst.year
current_year = now.year
DAYS = self.DAYS
def proc_month(d):
if expanded[3][0] != '*':
diff_month = nearest_diff_method(d.month, expanded[3], self.MONTHS_IN_YEAR)
days = DAYS[month - 1]
if month == 2 and self.is_leap(year) == True:
days += 1
reset_day = 1
if diff_month != None and diff_month != 0:
if is_prev:
d += relativedelta(months=diff_month)
reset_day = DAYS[d.month - 1]
d += relativedelta(day=reset_day, hour=23, minute=59, second=59)
else:
d += relativedelta(months=diff_month, day=reset_day,
hour=0, minute=0, second=0)
return True, d
return False, d
def proc_day_of_month(d):
if expanded[2][0] != '*':
days = DAYS[month - 1]
if month == 2 and self.is_leap(year) == True:
days += 1
if is_prev:
days_in_prev_month = DAYS[(month - 2) % self.MONTHS_IN_YEAR]
diff_day = nearest_diff_method(d.day, expanded[2], days_in_prev_month)
else:
diff_day = nearest_diff_method(d.day, expanded[2], days)
if diff_day != None and diff_day != 0:
if is_prev:
d += relativedelta(days=diff_day, hour=23, minute=59, second=59)
else:
d += relativedelta(days=diff_day, hour=0, minute=0, second=0)
return True, d
return False, d
def proc_day_of_week(d):
if expanded[4][0] != '*':
diff_day_of_week = nearest_diff_method(d.isoweekday() % 7, expanded[4], 7)
if diff_day_of_week != None and diff_day_of_week != 0:
if is_prev:
d += relativedelta(days=diff_day_of_week, hour=23, minute=59, second=59)
else:
d += relativedelta(days=diff_day_of_week, hour=0, minute=0, second=0)
return True, d
return False, d
def proc_hour(d):
if expanded[1][0] != '*':
diff_hour = nearest_diff_method(d.hour, expanded[1], 24)
if diff_hour != None and diff_hour != 0:
if is_prev:
d += relativedelta(hours = diff_hour, minute=59, second=59)
else:
d += relativedelta(hours = diff_hour, minute=0, second=0)
return True, d
return False, d
def proc_minute(d):
if expanded[0][0] != '*':
diff_min = nearest_diff_method(d.minute, expanded[0], 60)
if diff_min != None and diff_min != 0:
if is_prev:
d += relativedelta(minutes = diff_min, second=59)
else:
d += relativedelta(minutes = diff_min, second=0)
return True, d
return False, d
def proc_second(d):
if len(expanded) == 6:
if expanded[5][0] != '*':
diff_sec = nearest_diff_method(d.second, expanded[5], 60)
if diff_sec != None and diff_sec != 0:
dst += relativedelta(seconds = diff_sec)
return True, d
else:
d += relativedelta(second = 0)
return False, d
procs = [proc_month,
proc_day_of_month,
proc_day_of_week,
proc_hour,
proc_minute,
proc_second]
while abs(year - current_year) <= 1:
next = False
for proc in procs:
(changed, dst) = proc(dst)
if changed:
next = True
break
if next:
continue
return mktime(dst.timetuple())
raise "failed to find prev date"
def _get_next_nearest(self, x, to_check):
small = [item for item in to_check if item < x]
large = [item for item in to_check if item >= x]
large.extend(small)
return large[0]
def _get_prev_nearest(self, x, to_check):
small = [item for item in to_check if item <= x]
large = [item for item in to_check if item > x]
small.reverse()
large.reverse()
small.extend(large)
return small[0]
def _get_next_nearest_diff(self, x, to_check, range_val):
for i, d in enumerate(to_check):
if d >= x:
return d - x
return to_check[0] - x + range_val
def _get_prev_nearest_diff(self, x, to_check, range_val):
candidates = to_check[:]
candidates.reverse()
for d in candidates:
if d <= x:
return d - x
candidate = candidates[0]
for c in candidates:
if c < range_val:
candidate = c
break
return (candidate - x - range_val)
def is_leap(self, year):
if year % 400 == 0 or (year % 4 == 0 and year % 100 != 0):
return True
else:
return False
if __name__ == '__main__':
base = datetime.datetime(2010, 1, 25)
itr = croniter('0 0 1 * *', base)
n1 = itr.get_next(datetime.datetime)
print(n1)
|
import csv
import sys
import urllib
if len(sys.argv) < 1:
print "Usage: %s <inputfile csv>" % sys.argv[0]
sys.exit(1)
PEOPLE_FILE = sys.argv[1]
people = []
headers = []
col_name = 'image_search'
with open(PEOPLE_FILE, 'rb') as f:
rows = csv.reader(f, delimiter=',')
headers = next(rows, None) # remove header
# init people list
if col_name not in headers:
headers.append(col_name)
# populate people list
for row in rows:
person = {}
for i, h in enumerate(headers):
if (i >= len(row)): # doesn't exist, add as blank
person[h] = ''
else:
person[h] = row[i]
query = person['name'] + ' ' + person['movie_name']
person[col_name] = 'https://www.google.com/search?newwindow=1&site=&tbm=isch&tbs=isz:l&sa=X&q=' + urllib.quote_plus(query)
people.append(person)
with open(PEOPLE_FILE, 'wb') as f:
w = csv.writer(f)
w.writerow(headers)
for p in people:
row = []
for h in headers:
row.append(p[h])
w.writerow(row)
print('Successfully updated file: '+PEOPLE_FILE)
|
"""
Django settings for Internet_store project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (Internet_store/config/settings/common.py - 3 = Internet_store/)
APPS_DIR = ROOT_DIR.path('Internet_store')
env = environ.Env()
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
LOCAL_APPS = (
# custom users app
'Internet_store.users.apps.UsersConfig',
'Internet_store.core',
'Internet_store.products',
'Internet_store.home',
'Internet_store.carts',
# Your stuff: custom apps go here
)
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE_CLASSES = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
MIGRATION_MODULES = {
'sites': 'Internet_store.contrib.sites.migrations'
}
DEBUG = env.bool('DJANGO_DEBUG', False)
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
ADMINS = (
("""Oleksandr Maistrov""", 'maistrovas@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db('DATABASE_URL', default='postgres:///Internet-store'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
TIME_ZONE = 'Europe/Kiev'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
CRISPY_TEMPLATE_PACK = 'bootstrap3'
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
STATIC_URL = '/static/'
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
MEDIA_ROOT = str(APPS_DIR('media'))
MEDIA_URL = '/media/'
ROOT_URLCONF = 'config.urls'
WSGI_APPLICATION = 'config.wsgi.application'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'Internet_store.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'Internet_store.users.adapters.SocialAccountAdapter'
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
INSTALLED_APPS += ('Internet_store.taskapp.celery.CeleryConfig',)
INSTALLED_APPS += ('kombu.transport.django',)
BROKER_URL = env('CELERY_BROKER_URL', default='django://')
if BROKER_URL == 'django://':
CELERY_RESULT_BACKEND = 'redis://'
else:
CELERY_RESULT_BACKEND = BROKER_URL
INSTALLED_APPS += ("compressor", )
STATICFILES_FINDERS += ("compressor.finders.CompressorFinder", )
ADMIN_URL = r'^admin/'
|
def is_prime_fast(n):
if n < 2:
return False
if n == 2 or n == 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
k = 6
while k-1 < n ** 0.5:
if n % (k - 1) == 0 or n % (k+1) == 0:
return False
k += 6
return True
|
from webnews import api
class WebnewsObject(object):
"""
This object is the parent object for all webnews objects
"""
def __init__(self, api_val):
"""
init
:param api_val: The api key or api object to use
:return:
"""
if type(api) == api.APINonSingle or type(api) == api.API:
self._api = api_val
else:
self._api = api.API(api_val)
def populate(self, ng):
"""
Populate the object with values from a dictionary
Useful for building objects from JSON
:param ng: object to populate
:return: None
"""
for k in ng:
setattr(self, k, ng[k])
class Newsgroup(WebnewsObject):
#For autocomplete in IDE mostly. Not needed to compile
unread_class, status, updated_at, created_at, name, newest_date, unread_count = ([None for i in range(7)])
def __init__(self, api_val, name):
super(Newsgroup, self).__init__(api_val)
ng = self._api.newsgroups_search(name)
self.populate(ng['newsgroup'])
#print([i for i in ng['newsgroup'].keys()])
def list(self, limit = 20, callLimit=20):
"""
List posts
:param limit: Max number of posts to list
:param callLimit: Number of posts to fetch on a single call
:return: Each post; This is a generator
"""
more_older = True
oldest = [None]
while more_older and limit > 0:
params = {}
if oldest[0] != None:
params['from_older'] = oldest[0]
params['limit'] = callLimit
data = self._api.newsgroup_posts(self.name, params=params)
more_older = data['more_older']
if not 'posts_older' in data:
raise KeyError("Missing key in api response")
for p in data['posts_older']:
oldest[0] = p['post']['date']
yield p
limit -= 1
if limit == 0:
break
|
import numpy as np
from abc import ABCMeta, abstractmethod
from ezclimate.damage_simulation import DamageSimulation
from ezclimate.forcing import Forcing
class Damage(object, metaclass=ABCMeta):
"""Abstract damage class for the EZ-Climate model.
Parameters
----------
tree : `TreeModel` object
provides the tree structure used
bau : `BusinessAsUsual` object
business-as-usual scenario of emissions
Attributes
----------
tree : `TreeModel` object
provides the tree structure used
bau : `BusinessAsUsual` object
business-as-usual scenario of emissions
"""
def __init__(self, tree, bau):
self.tree = tree
self.bau = bau
@abstractmethod
def average_mitigation(self):
"""The average_mitigation function should return a 1D array of the
average mitigation for every node in the period.
"""
pass
@abstractmethod
def damage_function(self):
"""The damage_function should return a 1D array of the damages for
every node in the period.
"""
pass
class DLWDamage(Damage):
"""Damage class for the EZ-Climate model. Provides the damages from emissions and mitigation outcomes.
Parameters
----------
tree : `TreeModel` object
provides the tree structure used
bau : `BusinessAsUsual` object
business-as-usual scenario of emissions
cons_growth : float
constant consumption growth rate
ghg_levels : ndarray or list
end GHG levels for each end scenario
Attributes
----------
tree : `TreeModel` object
provides the tree structure used
bau : `BusinessAsUsual` object
business-as-usual scenario of emissions
cons_growth : float
constant consumption growth rate
ghg_levels : ndarray or list
end GHG levels for each end scenario
dnum : int
number of simulated damage paths
d : ndarray
simulated damages
d_rcomb : ndarray
adjusted simulated damages for recombining tree
cum_forcings : ndarray
cumulative forcing interpolation coeffiecients, used to calculate forcing based mitigation
forcing : `Forcing` object
class for calculating cumulative forcing and GHG levels
damage_coefs : ndarray
interpolation coefficients used to calculate damages
"""
def __init__(self, tree, bau, cons_growth, ghg_levels, subinterval_len):
super(DLWDamage, self).__init__(tree, bau)
self.ghg_levels = ghg_levels
if isinstance(self.ghg_levels, list):
self.ghg_levels = np.array(self.ghg_levels)
self.cons_growth = cons_growth
self.dnum = len(ghg_levels)
self.subinterval_len = subinterval_len
self.cum_forcings = None
self.d = None
self.d_rcomb = None
self.emit_pct = None
self.damage_coefs = None
def _recombine_nodes(self):
"""Creating damage coefficients for recombining tree. The state reached by an up-down move is
separate from a down-up move because in general the two paths will lead to different degrees of
mitigation and therefore of GHG level. A 'recombining' tree is one in which the movement from
one state to the next through time is nonetheless such that an up move followed by a down move
leads to the same fragility.
"""
nperiods = self.tree.num_periods
sum_class = np.zeros(nperiods, dtype=int)
new_state = np.zeros([nperiods, self.tree.num_final_states], dtype=int)
temp_prob = self.tree.final_states_prob.copy()
self.d_rcomb = self.d.copy()
for old_state in range(self.tree.num_final_states):
temp = old_state
n = nperiods-2
d_class = 0
while n >= 0:
if temp >= 2**n:
temp -= 2**n
d_class += 1
n -= 1
sum_class[d_class] += 1
new_state[d_class, sum_class[d_class]-1] = old_state
sum_nodes = np.append(0, sum_class.cumsum())
prob_sum = np.array([self.tree.final_states_prob[sum_nodes[i]:sum_nodes[i+1]].sum() for i in range(len(sum_nodes)-1)])
for period in range(nperiods):
for k in range(self.dnum):
d_sum = np.zeros(nperiods)
old_state = 0
for d_class in range(nperiods):
d_sum[d_class] = (self.tree.final_states_prob[old_state:old_state+sum_class[d_class]] * self.d_rcomb[k, old_state:old_state+sum_class[d_class], period]).sum()
old_state += sum_class[d_class]
self.tree.final_states_prob[new_state[d_class, 0:sum_class[d_class]]] = temp_prob[0]
for d_class in range(nperiods):
self.d_rcomb[k, new_state[d_class, 0:sum_class[d_class]], period] = d_sum[d_class] / prob_sum[d_class]
self.tree.node_prob[-len(self.tree.final_states_prob):] = self.tree.final_states_prob
for p in range(1,nperiods-1):
nodes = self.tree.get_nodes_in_period(p)
for node in range(nodes[0], nodes[1]+1):
worst_end_state, best_end_state = self.tree.reachable_end_states(node, period=p)
self.tree.node_prob[node] = self.tree.final_states_prob[worst_end_state:best_end_state+1].sum()
def _damage_interpolation(self):
"""Create the interpolation coefficients used in `damage_function`."""
if self.d is None:
print("Importing stored damage simulation")
self.import_damages()
self._recombine_nodes()
if self.emit_pct is None:
bau_emission = self.bau.ghg_end - self.bau.ghg_start
self.emit_pct = 1.0 - (self.ghg_levels-self.bau.ghg_start) / bau_emission
self.damage_coefs = np.zeros((self.tree.num_final_states, self.tree.num_periods, self.dnum-1, self.dnum))
amat = np.ones((self.tree.num_periods, self.dnum, self.dnum))
bmat = np.ones((self.tree.num_periods, self.dnum))
self.damage_coefs[:, :, -1, -1] = self.d_rcomb[-1, :, :]
self.damage_coefs[:, :, -1, -2] = (self.d_rcomb[-2, :, :] - self.d_rcomb[-1, :, :]) / self.emit_pct[-2]
amat[:, 0, 0] = 2.0 * self.emit_pct[-2]
amat[:, 1:, 0] = self.emit_pct[:-1]**2
amat[:, 1:, 1] = self.emit_pct[:-1]
amat[:, 0, -1] = 0.0
for state in range(0, self.tree.num_final_states):
bmat[:, 0] = self.damage_coefs[state, :, -1, -2] * self.emit_pct[-2]
bmat[:, 1:] = self.d_rcomb[:-1, state, :].T
self.damage_coefs[state, :, 0] = np.linalg.solve(amat, bmat)
def import_damages(self, file_name="simulated_damages"):
"""Import saved simulated damages. File must be saved in 'data' directory
inside current working directory. Save imported values in `d`.
Parameters
----------
file_name : str, optional
name of file of saved simulated damages
Raises
------
IOError
If file does not exist.
"""
from ezclimate.tools import import_csv
try:
d = import_csv(file_name, ignore="#", header=False)
except IOError as e:
import sys
print(("Could not import simulated damages:\n\t{}".format(e)))
sys.exit(0)
n = self.tree.num_final_states
self.d = np.array([d[n*i:n*(i+1)] for i in range(0, self.dnum)])
self._damage_interpolation()
def damage_simulation(self, draws, peak_temp=9.0, disaster_tail=12.0, tip_on=True,
multi_tips=False, temp_map=1, temp_dist_params=None, maxh=100.0, save_simulation=True):
"""Initialization and simulation of damages, given by :mod:`ez_climate.DamageSimulation`.
Parameters
----------
draws : int
number of Monte Carlo draws
peak_temp : float, optional
tipping point parameter
disaster_tail : float, optional
curvature of tipping point
tip_on : bool, optional
flag that turns tipping points on or off
multi_tips : bool, optional
if to allow multiple tipping points in simulation
temp_map : int, optional
mapping from GHG to temperature
* 0: implies Pindyck displace gamma
* 1: implies Wagner-Weitzman normal
* 2: implies Roe-Baker
* 3: implies user-defined normal
* 4: implies user-defined gamma
temp_dist_params : ndarray or list, optional
if temp_map is either 3 or 4, user needs to define the distribution parameters
maxh : float, optional
time parameter from Pindyck which indicates the time it takes for temp to get half
way to its max value for a given level of ghg
cons_growth : float, optional
yearly growth in consumption
save_simulation : bool, optional
True if simulated values should be save, False otherwise
Returns
-------
ndarray
simulated damages
"""
ds = DamageSimulation(tree=self.tree, ghg_levels=self.ghg_levels, peak_temp=peak_temp,
disaster_tail=disaster_tail, tip_on=tip_on, temp_map=temp_map,
temp_dist_params=temp_dist_params, maxh=maxh, cons_growth=self.cons_growth)
self.ds = ds
print("Starting damage simulation..")
self.d = ds.simulate(draws, write_to_file=save_simulation, multiple_tipping_points=multi_tips)
print("Done!")
self._damage_interpolation()
return self.d
def _forcing_based_mitigation(self, forcing, period):
"""Calculation of mitigation based on forcing up to period. Interpolating between the forcing associated
with the constant degree of mitigation consistent with the damage simulation scenarios.
"""
p = period - 1
if forcing > self.cum_forcings[p][1]:
weight_on_sim2 = (self.cum_forcings[p][2] - forcing) / (self.cum_forcings[p][2] - self.cum_forcings[p][1])
weight_on_sim3 = 0
elif forcing > self.cum_forcings[p][0]:
weight_on_sim2 = (forcing - self.cum_forcings[p][0]) / (self.cum_forcings[p][1] - self.cum_forcings[p][0])
weight_on_sim3 = (self.cum_forcings[p][1] - forcing) / (self.cum_forcings[p][1] - self.cum_forcings[p][0])
else:
weight_on_sim2 = 0
weight_on_sim3 = 1.0 + (self.cum_forcings[p][0] - forcing) / self.cum_forcings[p][0]
return weight_on_sim2 * self.emit_pct[1] + weight_on_sim3*self.emit_pct[0]
def _forcing_init(self):
"""Initialize `Forcing` object and cum_forcings used in calculating the force mitigation up to a node."""
if self.emit_pct is None:
bau_emission = self.bau.ghg_end - self.bau.ghg_start
self.emit_pct = 1.0 - (self.ghg_levels-self.bau.ghg_start) / bau_emission
self.cum_forcings = np.zeros((self.tree.num_periods, self.dnum))
mitigation = np.ones((self.dnum, self.tree.num_decision_nodes)) * self.emit_pct[:, np.newaxis]
for i in range(0, self.dnum):
for n in range(1, self.tree.num_periods+1):
node = self.tree.get_node(n, 0)
self.cum_forcings[n-1, i] = Forcing.forcing_at_node(mitigation[i], node, self.tree,
self.bau, self.subinterval_len)
def average_mitigation_node(self, m, node, period=None):
"""Calculate the average mitigation until node.
Parameters
----------
m : ndarray or list
array of mitigation
node : int
node for which average mitigation is to be calculated for
period : int, optional
the period the node is in
Returns
-------
float
average mitigation
"""
if period == 0:
return 0
if period is None:
period = self.tree.get_period(node)
state = self.tree.get_state(node, period)
path = self.tree.get_path(node, period)
new_m = m[path[:-1]]
period_len = self.tree.decision_times[1:period+1] - self.tree.decision_times[:period]
bau_emissions = self.bau.emission_by_decisions[:period]
total_emission = np.dot(bau_emissions, period_len)
ave_mitigation = np.dot(new_m, bau_emissions*period_len)
return ave_mitigation / total_emission
def average_mitigation(self, m, period):
"""Calculate the average mitigation for all node in a period.
m : ndarray or list
array of mitigation
period : int
period to calculate average mitigation for
Returns
-------
ndarray
average mitigations
"""
nodes = self.tree.get_num_nodes_period(period)
ave_mitigation = np.zeros(nodes)
for i in range(nodes):
node = self.tree.get_node(period, i)
ave_mitigation[i] = self.average_mitigation_node(m, node, period)
return ave_mitigation
def _ghg_level_node(self, m, node):
return Forcing.ghg_level_at_node(m, node, self.tree, self.bau, self.subinterval_len)
def ghg_level_period(self, m, period=None, nodes=None):
"""Calculate the GHG levels corresponding to the given mitigation.
Need to provide either `period` or `nodes`.
Parameters
----------
m : ndarray or list
array of mitigation
period : int, optional
what period to calculate GHG levels for
nodes : ndarray or list, optional
the nodes to calculate GHG levels for
Returns
-------
ndarray
GHG levels
"""
if nodes is None and period is not None:
start_node, end_node = self.tree.get_nodes_in_period(period)
if period >= self.tree.num_periods:
add = end_node-start_node+1
start_node += add
end_node += add
nodes = np.array(list(range(start_node, end_node+1)))
if period is None and nodes is None:
raise ValueError("Need to give function either nodes or the period")
ghg_level = np.zeros(len(nodes))
for i in range(len(nodes)):
ghg_level[i] = self._ghg_level_node(m, nodes[i])
return ghg_level
def ghg_level(self, m, periods=None):
"""Calculate the GHG levels for more than one period.
Parameters
----------
m : ndarray or list
array of mitigation
periods : int, optional
number of periods to calculate GHG levels for
Returns
-------
ndarray
GHG levels
"""
if periods is None:
periods = self.tree.num_periods-1
if periods >= self.tree.num_periods:
ghg_level = np.zeros(self.tree.num_decision_nodes+self.tree.num_final_states)
else:
ghg_level = np.zeros(self.tree.num_decision_nodes)
for period in range(periods+1):
start_node, end_node = self.tree.get_nodes_in_period(period)
if period >= self.tree.num_periods:
add = end_node-start_node+1
start_node += add
end_node += add
nodes = np.array(list(range(start_node, end_node+1)))
ghg_level[nodes] = self.ghg_level_period(m, nodes=nodes)
return ghg_level
def _damage_function_node(self, m, node):
"""Calculate the damage at any given node, based on mitigation actions in `m`."""
if self.damage_coefs is None:
self._damage_interpolation()
if self.cum_forcings is None:
self._forcing_init()
if node == 0:
return 0.0
period = self.tree.get_period(node)
forcing, ghg_level = Forcing.forcing_and_ghg_at_node(m, node, self.tree, self.bau, self.subinterval_len, "both")
force_mitigation = self._forcing_based_mitigation(forcing, period)
ghg_extension = 1.0 / (1 + np.exp(0.05*(ghg_level-200)))
worst_end_state, best_end_state = self.tree.reachable_end_states(node, period=period)
probs = self.tree.final_states_prob[worst_end_state:best_end_state+1]
if force_mitigation < self.emit_pct[1]:
damage = (probs *(self.damage_coefs[worst_end_state:best_end_state+1, period-1, 1, 1] * force_mitigation \
+ self.damage_coefs[worst_end_state:best_end_state+1, period-1, 1, 2])).sum()
elif force_mitigation < self.emit_pct[0]:
damage = (probs * (self.damage_coefs[worst_end_state:best_end_state+1, period-1, 0, 0] * force_mitigation**2 \
+ self.damage_coefs[worst_end_state:best_end_state+1, period-1, 0, 1] * force_mitigation \
+ self.damage_coefs[worst_end_state:best_end_state+1, period-1, 0, 2])).sum()
else:
damage = 0.0
i = 0
for state in range(worst_end_state, best_end_state+1):
if self.d_rcomb[0, state, period-1] > 1e-5:
deriv = 2.0 * self.damage_coefs[state, period-1, 0, 0]*self.emit_pct[0] \
+ self.damage_coefs[state, period-1, 0, 1]
decay_scale = deriv / (self.d_rcomb[0, state, period-1]*np.log(0.5))
dist = force_mitigation - self.emit_pct[0] + np.log(self.d_rcomb[0, state, period-1]) \
/ (np.log(0.5) * decay_scale)
damage += probs[i] * (0.5**(decay_scale*dist) * np.exp(-np.square(force_mitigation-self.emit_pct[0])/60.0))
i += 1
return (damage / probs.sum()) + ghg_extension
def damage_function(self, m, period):
"""Calculate the damage for every node in a period, based on mitigation actions `m`.
Parameters
----------
m : ndarray or list
array of mitigation
period : int
period to calculate damages for
Returns
-------
ndarray
damages
"""
nodes = self.tree.get_num_nodes_period(period)
damages = np.zeros(nodes)
for i in range(nodes):
node = self.tree.get_node(period, i)
damages[i] = self._damage_function_node(m, node)
return damages
|
import unittest
from os.path import abspath, join, exists
from modipyd.utils import compile_python_source
from modipyd.module import ModuleCode, compile_source, \
collect_module_code, \
read_module_code
from tests import TestCase, FILES_DIR
class TestModipydModuleCode(TestCase):
def test_init(self):
py = join(FILES_DIR, 'python', 'a.py')
code = compile_source(py)
self.assertNotNone(code)
module = ModuleCode('a', None, py, code)
self.assertNotNone(module)
self.assertEqual(0, len(module.context['imports']))
def test_collect_module_code(self):
modules = list(collect_module_code(
join(FILES_DIR, 'python'),
[FILES_DIR]))
self.assertNotNone(modules)
self.assertEqual(2, len(modules))
names = [m.name for m in modules]
self.assert_('python.a' in names)
self.assert_('python' in names)
def read_module_code(self, modulename):
items = [FILES_DIR] + list(modulename.split('.'))
items[-1] += '.py'
filepath = join(*items)
module = read_module_code(filepath, search_path=[FILES_DIR])
self.assertNotNone(module)
self.assertEqual(modulename, module.name)
self.assertEqual(filepath, module.filename)
return module
def test_python_module(self):
self.read_module_code('python.a')
def test_module_equality(self):
modules = list(collect_module_code(
join(FILES_DIR, 'python'),
[FILES_DIR]))
self.assertEqual(2, len(modules))
module1 = modules[0]
module2 = modules[1]
self.assert_(module1 is not module2)
self.assert_(not module1 == module2)
self.assert_(module1 is module1)
self.assert_(module1 == module1)
modules2 = list(collect_module_code(
join(FILES_DIR, 'python'),
[FILES_DIR]))
self.assert_(module1 is not modules2[0])
self.assert_(module1 == modules2[0])
# hash
self.assertEqual(hash(module1), hash(module1))
self.assertEqual(hash(module1), hash(modules2[0]))
self.assertEqual(hash(module2), hash(modules2[1]))
def test_read_module_code_not_existense(self):
# Can't import a module in no package
self.assertRaises(ImportError, self.read_module_code, 'python3.c')
def test_imports(self):
modcode = self.read_module_code('module_code.imports_classdefs')
imports = modcode.context['imports']
self.assertEqual(5, len(imports))
# import sys
self.assertEqual(('sys', 'sys', -1), imports[0])
self.assertEqual(('join', 'os.path.join', -1), imports[1])
self.assertEqual(('dirname', 'os.path.dirname', -1), imports[2])
self.assertEqual(('unittest', 'unittest', -1), imports[3])
self.assertEqual(('altos', 'os', -1), imports[4])
def test_classdefs(self):
modcode = self.read_module_code('module_code.imports_classdefs')
classdefs = modcode.context['classdefs']
self.assertEqual(4, len(classdefs))
# class A:
self.assertEqual('A', classdefs[0][0])
self.assertEqual(0, len(classdefs[0][1]))
# class B(object):
self.assertEqual('B', classdefs[1][0])
self.assertEqual(1, len(classdefs[1][1]))
self.assertEqual('object', classdefs[1][1][0])
# class C(unittest.TestCase):
self.assertEqual('C', classdefs[2][0])
self.assertEqual(1, len(classdefs[2][1]))
self.assertEqual('unittest.TestCase', classdefs[2][1][0])
# class D(A, C):
self.assertEqual('D', classdefs[3][0])
self.assertEqual(2, len(classdefs[3][1]))
self.assertEqual('A', classdefs[3][1][0])
def test_python_module_reload(self):
search_path = abspath(join(FILES_DIR, 'imports'))
pypath = abspath(join(search_path, 'A', 'a.py'))
assert exists(pypath)
pycpath = abspath(join(search_path, 'A', 'a.pyc'))
pyopath = abspath(join(search_path, 'A', 'a.pyo'))
if not exists(pycpath) or not exists(pyopath):
compile_python_source(pypath)
compile_python_source(pypath, optimization=True)
assert exists(pycpath) and exists(pyopath)
m = read_module_code(pypath, search_path=[search_path])
old_imports = m.context['imports'][:]
old_classdefs = m.context['classdefs'][:]
for f in [pypath, pycpath, pyopath]:
# ugly ...
del m.context['imports'][:]
del m.context['classdefs'][:]
m.filename = f
co = m.reload()
self.assertNotNone(co)
self.assertEqual(pypath, co.co_filename)
self.assertEqual(old_imports, m.context['imports'])
self.assertEqual(old_classdefs, m.context['classdefs'])
if __name__ == '__main__':
unittest.main()
|
'''
Created by auto_sdk on 2015.09.11
'''
from top.api.base import RestApi
class ItemImgUploadRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.id = None
self.image = None
self.is_major = None
self.num_iid = None
self.position = None
def getapiname(self):
return 'taobao.item.img.upload'
def getMultipartParas(self):
return ['image']
|
from wiktionaryparser import WiktionaryParser
import sys
import json
parser = WiktionaryParser()
entries = parser.fetch(sys.argv[1], sys.argv[2])
print(json.dumps(entries))
|
class CalendarEntry(object):
def __init__(self, title, description, due_date):
self.entry_title = title
self.entry_description = description
self.entry_due_date = due_date
def __str__(self):
return "\tTitle: " + self.entry_title + "\n\tDescription: " + self.entry_description + "\n"
# Returns entry's due date
def get_due_date(self):
return self.entry_due_date
# Returns entry's title
def get_title(self):
return self.entry_title
# Returns entry's description
def get_description(self):
return self.entry_description
|
import pytz
from unittest import mock
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponseRedirect
from django.test import TestCase, RequestFactory
from django.utils import timezone
from snippets.tests.factories import SnippetFactory
from snippets.views import new_snippet, list_recently_updated_snippets
UserModel = get_user_model()
tzinfo = pytz.timezone(settings.TIME_ZONE)
current_datetime = timezone.datetime(year=2019, month=1, day=15, hour=12, tzinfo=tzinfo)
class ListRecentlyUpdatedSnippetsTests(TestCase):
@mock.patch('django.utils.timezone.now', return_value=current_datetime)
def test_should_match_only_updated_two_days_ago(self, mock_now):
SnippetFactory(updated_at=current_datetime - timezone.timedelta(days=2))
SnippetFactory(updated_at=current_datetime - timezone.timedelta(days=4))
actual = list_recently_updated_snippets(days=3)
expected_snippet_counts = 2
self.assertEqual(len(actual), expected_snippet_counts)
self.assertTrue(mock_now.called)
class SnippetCreateViewTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = UserModel.objects.create_user(
username='c-bata', email='shibata@example.com', password='secret')
def test_should_return_200_if_sending_get_request(self):
request = self.factory.get("/endpoint/of/create_snippet")
request.user = self.user
response = new_snippet(request)
self.assertEqual(response.status_code, 200)
def test_should_redirect_if_user_does_not_login(self):
request = self.factory.get("/endpoint/of/create_snippet")
request.user = AnonymousUser()
response = new_snippet(request)
self.assertIsInstance(response, HttpResponseRedirect)
def test_should_return_400_if_sending_empty_post_request(self):
request = self.factory.post("/endpoint/of/create_snippet", data={})
request.user = self.user
response = new_snippet(request)
self.assertEqual(response.status_code, 400)
def test_should_return_201_if_sending_valid_post_request(self):
request = self.factory.post("/endpoint/of/create_snippet", data={
'title': 'hello world',
'code': 'print("Hello World")',
'description': 'Just printing "Hello World"',
})
request.user = self.user
response = new_snippet(request)
self.assertIsInstance(response, HttpResponseRedirect)
|
def is_leap(year):
"""
For a year to be a leap year three things criterias need to be looked at.
1. The year is evenly divisible by four.
2. If the year can be divided by 100 it is not a leap year, unless it can
also be divided by 400.
"""
leap = False
if year % 4 == 0:
leap = True
if year % 4 == 0 and year % 100 == 0 and year % 400 == 0:
leap = True
if year % 4 == 0 and year % 100 == 0 and year % 400 != 0:
leap = False
return leap
|
import os
import sys
import vim
def main():
vim_dir = sys.argv[1]
install_dir = os.path.join(vim_dir, 'vimapt/install')
file_list = []
for f in os.listdir(install_dir):
if os.path.isfile(os.path.join(install_dir, f)) and not os.path.basename(f).startswith('.'):
file_list.append(f)
return file_list
if __name__ == "__main__":
package_list = main()
# set vim's variable to make command complete function works
pkg_list_string = "[" + ",".join(["'" + i + "'" for i in package_list]) + "]"
vim.command('let s:package_remove_list = ' + pkg_list_string)
|
from app.app import create_app
from app.models import Attorney, Organization
import datetime
import pytest
import os
from selenium import webdriver
os.environ["MONGOLAB_URI"] = "mongodb://localhost/honorroll"
def base_url(live_server):
return live_server.url()
@pytest.fixture(scope='session')
def app(request):
app = create_app()
app.debug = True
return app
@pytest.fixture(scope='session')
def driver(request):
browser = webdriver.PhantomJS('phantomjs')
browser.set_page_load_timeout(30)
request.addfinalizer(lambda *args: browser.quit())
return browser
@pytest.fixture(scope="session")
def add_attorney(request):
org = Organization(organization_name="TestOrg").save()
attorney = Attorney(
first_name="John", last_name="Doe", organization_name=org.organization_name,
email_address="john.doe@example.com",
records=[
{
"year": "2015", "honor_choice": "Honors",
"rule_49_choice": "dc", "method_added": "bulk",
"date_modified": datetime.datetime.now()
}
]
).save()
def teardown():
Attorney.objects.delete()
Organization.objects.delete()
request.addfinalizer(teardown)
return add_attorney
|
import _plotly_utils.basevalidators
class TracerefValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="traceref", parent_name="scattergl.error_x", **kwargs
):
super(TracerefValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs
)
|
"""Commands supported by the Earth Engine command line interface.
Each command is implemented by extending the Command class. Each class
defines the supported positional and optional arguments, as well as
the actions to be taken when the command is executed.
"""
from __future__ import print_function
from six.moves import input # pylint: disable=redefined-builtin
import argparse
import calendar
from collections import Counter
import datetime
import json
import os
import re
import sys
import webbrowser
try:
# Python 2.x
import urlparse
except ImportError:
# Python 3.x
from urllib.parse import urlparse
import ee
from ee.cli import utils
ALL_USERS = 'AllUsers'
ALL_USERS_CAN_READ = 'all_users_can_read'
READERS = 'readers'
WRITERS = 'writers'
TYPE_DATE = 'date'
TYPE_NUMBER = 'number'
TYPE_STRING = 'string'
SYSTEM_TIME_START = 'system:time_start'
SYSTEM_TIME_END = 'system:time_end'
PROPERTY_RE = re.compile(r'(\(([^\)]*)\))?([^=]+)=(.*)')
TASK_TYPES = {
'EXPORT_FEATURES': 'Export.table',
'EXPORT_IMAGE': 'Export.image',
'EXPORT_TILES': 'Export.map',
'EXPORT_VIDEO': 'Export.video',
'INGEST': 'Upload',
}
def _add_wait_arg(parser):
parser.add_argument(
'--wait', '-w', nargs='?', default=-1, type=int, const=sys.maxsize,
help=('Wait for the task to finish,'
' or timeout after the specified number of seconds.'
' Without this flag, the command just starts an export'
' task in the background, and returns immediately.'))
def _upload(args, request, ingestion_function):
if 0 <= args.wait < 10:
raise ee.EEException('Wait time should be at least 10 seconds.')
task_id = ee.data.newTaskId()[0]
ingestion_function(task_id, request)
print('Started upload task with ID: %s' % task_id)
if args.wait >= 0:
print('Waiting for the upload task to complete...')
utils.wait_for_task(task_id, args.wait)
def _comma_separated_numbers(string):
"""Parses an input consisting of comma-separated numbers."""
error_msg = 'Argument should be a comma-separated list of numbers: {}'
values = string.split(',')
if not values:
raise argparse.ArgumentTypeError(error_msg.format(string))
numbervalues = []
for value in values:
try:
numbervalues.append(int(value))
except ValueError:
try:
numbervalues.append(float(value))
except ValueError:
raise argparse.ArgumentTypeError(error_msg.format(string))
return numbervalues
def _comma_separated_pyramiding_policies(string):
"""Parses an input consisting of comma-separated pyramiding policies."""
error_msg = ('Argument should be a comma-separated list of: '
'{{"mean", "sample", "min", "max", "mode"}}: {}')
values = string.split(',')
if not values:
raise argparse.ArgumentTypeError(error_msg.format(string))
redvalues = []
for value in values:
if value.lower() not in {'mean', 'sample', 'min', 'max', 'mode'}:
raise argparse.ArgumentTypeError(error_msg.format(string))
redvalues.append(value.lower())
return redvalues
def _decode_number(string):
"""Decodes a number from a command line argument."""
try:
return float(string)
except ValueError:
raise argparse.ArgumentTypeError(
'Invalid value for property of type "number": "%s".' % string)
def _timestamp_ms_for_datetime(datetime_obj):
"""Returns time since the epoch in ms for the given UTC datetime object."""
return (
int(calendar.timegm(datetime_obj.timetuple()) * 1000) +
datetime_obj.microsecond / 1000)
def _decode_date(string):
"""Decodes a date from a command line argument, as msec since the epoch."""
try:
return int(string)
except ValueError:
date_formats = ['%Y-%m-%d',
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%S.%f']
for date_format in date_formats:
try:
dt = datetime.datetime.strptime(string, date_format)
return _timestamp_ms_for_datetime(dt)
except ValueError:
continue
raise argparse.ArgumentTypeError(
'Invalid value for property of type "date": "%s".' % string)
def _decode_property(string):
"""Decodes a general key-value property from a command line argument."""
m = PROPERTY_RE.match(string)
if not m:
raise argparse.ArgumentTypeError(
'Invalid property: "%s". Must have the form "name=value" or '
'"(type)name=value".', string)
_, type_str, name, value_str = m.groups()
if type_str is None:
# Guess numeric types automatically.
try:
value = _decode_number(value_str)
except argparse.ArgumentTypeError:
value = value_str
elif type_str == TYPE_DATE:
value = _decode_date(value_str)
elif type_str == TYPE_NUMBER:
value = _decode_number(value_str)
elif type_str == TYPE_STRING:
value = value_str
else:
raise argparse.ArgumentTypeError(
'Unrecognized property type name: "%s". Expected one of "string", '
'"number", "date", or a prefix.' % type_str)
return (name, value)
def _add_property_flags(parser):
"""Adds command line flags related to metadata properties to a parser."""
parser.add_argument(
'--property', '-p',
help='A property to set, in the form [(type)]name=value. If no type '
'is specified the type will be "number" if the value is numeric and '
'"string" otherwise. May be provided multiple times.',
action='append',
type=_decode_property)
parser.add_argument(
'--time_start', '-ts',
help='Sets the start time property to a number or date.',
type=_decode_date)
parser.add_argument(
'--time_end', '-te',
help='Sets the end time property to a number or date.',
type=_decode_date)
def _decode_property_flags(args):
"""Decodes metadata properties from args as a list of (name,value) pairs."""
property_list = list(args.property or [])
if args.time_start:
property_list.append((SYSTEM_TIME_START, args.time_start))
if args.time_end:
property_list.append((SYSTEM_TIME_END, args.time_end))
names = [name for name, _ in property_list]
duplicates = [name for name, count in Counter(names).items() if count > 1]
if duplicates:
raise ee.EEException('Duplicate property name(s): %s.' % duplicates)
return dict(property_list)
def _check_valid_files(filenames):
"""Returns true if the given filenames are valid upload file URIs."""
for filename in filenames:
if not filename.startswith('gs://'):
raise ee.EEException('Invalid Cloud Storage URL: ' + filename)
def _pretty_print_json(json_obj):
"""Pretty-prints a JSON object to stdandard output."""
print(json.dumps(json_obj, sort_keys=True, indent=2, separators=(',', ': ')))
class Dispatcher(object):
"""Dispatches to a set of commands implemented as command classes."""
def __init__(self, parser):
self.command_dict = {}
self.dest = self.name + '_cmd'
subparsers = parser.add_subparsers(title='Commands', dest=self.dest)
subparsers.required = True # Needed for proper missing arg handling in 3.x
for command in self.COMMANDS:
subparser = subparsers.add_parser(
command.name, description=command.__doc__,
help=command.__doc__.splitlines()[0])
self.command_dict[command.name] = command(subparser)
def run(self, args, config):
self.command_dict[vars(args)[self.dest]].run(args, config)
class AuthenticateCommand(object):
"""Prompts the user to authorize access to Earth Engine via OAuth2."""
name = 'authenticate'
def __init__(self, unused_parser):
pass
def run(self, unused_args, unused_config):
"""Generates and opens a URL to get auth code, then retrieve a token."""
auth_url = ee.oauth.get_authorization_url()
webbrowser.open_new(auth_url)
print("""
Opening web browser to address %s
Please authorize access to your Earth Engine account, and paste
the resulting code below.
If the web browser does not start, please manually browse the URL above.
""" % auth_url)
auth_code = input('Please enter authorization code: ').strip()
token = ee.oauth.request_token(auth_code)
ee.oauth.write_token(token)
print('\nSuccessfully saved authorization token.')
class AclChCommand(object):
"""Changes the access control list for an asset.
Each change specifies the email address of a user or group and,
for additions, one of R or W corresponding to the read or write
permissions to be granted, as in "user@domain.com:R". Use the
special name "AllUsers" to change whether all users can read the
asset.
"""
name = 'ch'
def __init__(self, parser):
parser.add_argument('-u', action='append', metavar='permission',
help='Add or modify a user\'s permission.')
parser.add_argument('-d', action='append', metavar='user',
help='Remove all permissions for a user.')
parser.add_argument('asset_id', help='ID of the asset.')
def run(self, args, config):
config.ee_init()
permissions = self._parse_permissions(args)
acl = ee.data.getAssetAcl(args.asset_id)
self._apply_permissions(acl, permissions)
# The original permissions will contain an 'owners' stanza, but EE
# does not currently allow setting the owner ACL so we have to
# remove it even though it has not changed.
del acl['owners']
ee.data.setAssetAcl(args.asset_id, json.dumps(acl))
def _parse_permissions(self, args):
"""Decodes and sanity-checks the permissions in the arguments."""
# A dictionary mapping from user ids to one of 'R', 'W', or 'D'.
permissions = {}
if args.u:
for grant in args.u:
parts = grant.split(':')
if len(parts) != 2 or parts[1] not in ['R', 'W']:
raise ee.EEException('Invalid permission "%s".' % grant)
user, role = parts
if user in permissions:
raise ee.EEException('Multiple permission settings for "%s".' % user)
if user == ALL_USERS and role == 'W':
raise ee.EEException('Cannot grant write permissions to AllUsers.')
permissions[user] = role
if args.d:
for user in args.d:
if user in permissions:
raise ee.EEException('Multiple permission settings for "%s".' % user)
permissions[user] = 'D'
return permissions
def _apply_permissions(self, acl, permissions):
"""Applies the given permission edits to the given acl."""
for user, role in permissions.iteritems():
if user == ALL_USERS:
acl[ALL_USERS_CAN_READ] = (role == 'R')
elif role == 'R':
if user not in acl[READERS]:
acl[READERS].append(user)
if user in acl[WRITERS]:
acl[WRITERS].remove(user)
elif role == 'W':
if user in acl[READERS]:
acl[READERS].remove(user)
if user not in acl[WRITERS]:
acl[WRITERS].append(user)
elif role == 'D':
if user in acl[READERS]:
acl[READERS].remove(user)
if user in acl[WRITERS]:
acl[WRITERS].remove(user)
class AclGetCommand(object):
"""Prints the access control list for an asset."""
name = 'get'
def __init__(self, parser):
parser.add_argument('asset_id', help='ID of the asset.')
def run(self, args, config):
config.ee_init()
acl = ee.data.getAssetAcl(args.asset_id)
_pretty_print_json(acl)
class AclSetCommand(object):
"""Sets the access control list for an asset.
The ACL may be the name of a canned ACL, or it may be the path to a
file containing the output from "acl get". The recognized canned ACL
names are "private", indicating that no users other than the owner
have access, and "public", indicating that all users have read
access. It is currently not possible to modify the owner ACL using
this tool.
"""
name = 'set'
CANNED_ACLS = {
'private': {
READERS: [],
WRITERS: [],
ALL_USERS_CAN_READ: False,
},
'public': {
READERS: [],
WRITERS: [],
ALL_USERS_CAN_READ: True,
},
}
def __init__(self, parser):
parser.add_argument('file_or_acl_name',
help='File path or canned ACL name.')
parser.add_argument('asset_id', help='ID of the asset.')
def run(self, args, config):
"""Sets asset ACL to a canned ACL or one provided in a JSON file."""
config.ee_init()
if args.file_or_acl_name in self.CANNED_ACLS.keys():
acl = self.CANNED_ACLS[args.file_or_acl_name]
else:
acl = json.load(open(args.file_or_acl_name))
# In the expected usage the ACL file will have come from a previous
# invocation of 'acl get', which means it will include an 'owners'
# stanza, but EE does not currently allow setting the owner ACL,
# so we have to remove it.
if 'owners' in acl:
print('Warning: Not updating the owner ACL.')
del acl['owners']
ee.data.setAssetAcl(args.asset_id, json.dumps(acl))
class AclCommand(Dispatcher):
"""Prints or updates the access control list of the specified asset."""
name = 'acl'
COMMANDS = [
AclChCommand,
AclGetCommand,
AclSetCommand,
]
class AssetInfoCommand(object):
"""Prints metadata and other information about an Earth Engine asset."""
name = 'info'
def __init__(self, parser):
parser.add_argument('asset_id', help='ID of the asset to print.')
def run(self, args, config):
config.ee_init()
info = ee.data.getInfo(args.asset_id)
if info:
_pretty_print_json(info)
else:
raise ee.EEException(
'Asset does not exist or is not accessible: %s' % args.asset_id)
class AssetSetCommand(object):
"""Sets metadata properties of an Earth Engine asset.
Properties may be of type "string", "number", or "date". Dates must
be specified in the form YYYY-MM-DD[Thh:mm:ss[.ff]] in UTC and are
stored as numbers representing the number of milliseconds since the
Unix epoch (00:00:00 UTC on 1 January 1970).
"""
name = 'set'
def __init__(self, parser):
parser.add_argument('asset_id', help='ID of the asset to update.')
_add_property_flags(parser)
def run(self, args, config):
properties = _decode_property_flags(args)
config.ee_init()
if not properties:
raise ee.EEException('No properties specified.')
ee.data.setAssetProperties(args.asset_id, properties)
class AssetCommand(Dispatcher):
"""Prints or updates metadata associated with an Earth Engine asset."""
name = 'asset'
COMMANDS = [
AssetInfoCommand,
AssetSetCommand,
]
class CopyCommand(object):
"""Creates a new Earth Engine asset as a copy of another asset."""
name = 'cp'
def __init__(self, parser):
parser.add_argument(
'source', help='Full path of the source asset.')
parser.add_argument(
'destination', help='Full path of the destination asset.')
def run(self, args, config):
"""Runs the asset copy."""
config.ee_init()
ee.data.copyAsset(args.source, args.destination)
class CreateCommandBase(object):
"""Base class for implementing Create subcommands."""
def __init__(self, parser, fragment, asset_type):
parser.add_argument(
'asset_id', nargs='+',
help='Full path of %s to create.' % fragment)
parser.add_argument(
'--parents', '-p', action='store_true',
help='Make parent folders as needed.')
self.asset_type = asset_type
def run(self, args, config):
config.ee_init()
ee.data.create_assets(args.asset_id, self.asset_type, args.parents)
class CreateCollectionCommand(CreateCommandBase):
"""Creates one or more image collections."""
name = 'collection'
def __init__(self, parser):
super(CreateCollectionCommand, self).__init__(
parser, 'an image collection', ee.data.ASSET_TYPE_IMAGE_COLL)
class CreateFolderCommand(CreateCommandBase):
"""Creates one or more folders."""
name = 'folder'
def __init__(self, parser):
super(CreateFolderCommand, self).__init__(
parser, 'a folder', ee.data.ASSET_TYPE_FOLDER)
class CreateCommand(Dispatcher):
"""Creates assets and folders."""
name = 'create'
COMMANDS = [
CreateCollectionCommand,
CreateFolderCommand,
]
class ListCommand(object):
"""Prints the contents of a folder or collection."""
name = 'ls'
def __init__(self, parser):
parser.add_argument(
'asset_id', nargs='*',
help='A folder or image collection to be inspected.')
parser.add_argument(
'-l', action='store_true',
help='Print output in long format.')
parser.add_argument(
'--max_items', '-m', default=-1, type=int,
help='Maximum number of items to list for each collection.')
def run(self, args, config):
config.ee_init()
if not args.asset_id:
roots = ee.data.getAssetRoots()
self._print_assets(roots, '', args.l)
return
assets = args.asset_id
count = 0
for asset in assets:
if count > 0:
print()
self._list_asset_content(
asset, args.max_items, len(assets), args.l)
count += 1
def _print_assets(self, assets, indent, long_format):
if not assets:
return
max_type_length = max([len(asset['type']) for asset in assets])
format_str = '%s{:%ds}{:s}' % (indent, max_type_length + 4)
for asset in assets:
if long_format:
# Example output:
# [Image] user/test/my_img
# [ImageCollection] user/test/my_coll
print(format_str.format('['+asset['type']+']', asset['id']))
else:
print(asset['id'])
def _list_asset_content(self, asset, max_items, total_assets, long_format):
try:
list_req = {'id': asset}
if max_items >= 0:
list_req['num'] = max_items
children = ee.data.getList(list_req)
indent = ''
if total_assets > 1:
print('%s:' % asset)
indent = ' '
self._print_assets(children, indent, long_format)
except ee.EEException as e:
print(e)
class MoveCommand(object):
"""Moves or renames an Earth Engine asset."""
name = 'mv'
def __init__(self, parser):
parser.add_argument(
'source', help='Full path of the source asset.')
parser.add_argument(
'destination', help='Full path of the destination asset.')
def run(self, args, config):
config.ee_init()
ee.data.renameAsset(args.source, args.destination)
class RmCommand(object):
"""Deletes the specified assets."""
name = 'rm'
def __init__(self, parser):
parser.add_argument(
'asset_id', nargs='+', help='Full path of an asset to delete.')
parser.add_argument(
'--recursive', '-r', action='store_true',
help='Recursively delete child assets.')
parser.add_argument(
'--dry_run', action='store_true',
help=('Perform a dry run of the delete operation. Does not '
'delete any assets.'))
parser.add_argument(
'--verbose', '-v', action='store_true',
help='Print the progress of the operation to the console.')
def run(self, args, config):
config.ee_init()
for asset in args.asset_id:
self._delete_asset(asset, args.recursive, args.verbose, args.dry_run)
def _delete_asset(self, asset_id, recursive, verbose, dry_run):
"""Attempts to delete the specified asset or asset collection."""
info = ee.data.getInfo(asset_id)
if info is None:
print('Asset does not exist or is not accessible: %s' % asset_id)
return
if recursive:
if info['type'] in (ee.data.ASSET_TYPE_FOLDER,
ee.data.ASSET_TYPE_IMAGE_COLL):
children = ee.data.getList({'id': asset_id})
for child in children:
self._delete_asset(child['id'], True, verbose, dry_run)
if dry_run:
print('[dry-run] Deleting asset: %s' % asset_id)
else:
if verbose:
print('Deleting asset: %s' % asset_id)
try:
ee.data.deleteAsset(asset_id)
except ee.EEException as e:
print('Failed to delete %s. %s' % (asset_id, e))
class TaskCancelCommand(object):
"""Cancels a running task."""
name = 'cancel'
def __init__(self, parser):
parser.add_argument(
'task_ids', nargs='+',
help='IDs of one or more tasks to cancel,'
' or `all` to cancel all tasks.')
def run(self, args, config):
config.ee_init()
cancel_all = args.task_ids == ['all']
if cancel_all:
statuses = ee.data.getTaskList()
else:
statuses = ee.data.getTaskStatus(args.task_ids)
for status in statuses:
state = status['state']
task_id = status['id']
if state == 'UNKNOWN':
raise ee.EEException('Unknown task id "%s"' % task_id)
elif state == 'READY' or state == 'RUNNING':
print('Canceling task "%s"' % task_id)
ee.data.cancelTask(task_id)
elif not cancel_all:
print('Task "%s" already in state "%s".' % (status['id'], state))
class TaskInfoCommand(object):
"""Prints information about a task."""
name = 'info'
def __init__(self, parser):
parser.add_argument('task_id', nargs='*', help='ID of a task to get.')
def run(self, args, config):
config.ee_init()
for i, status in enumerate(ee.data.getTaskStatus(args.task_id)):
if i:
print()
print('%s:' % status['id'])
print(' State: %s' % status['state'])
if status['state'] == 'UNKNOWN':
continue
print(' Type: %s' % TASK_TYPES.get(status.get('task_type'), 'Unknown'))
print(' Description: %s' % status.get('description'))
print(' Created: %s'
% self._format_time(status['creation_timestamp_ms']))
if 'start_timestamp_ms' in status:
print(' Started: %s' % self._format_time(status['start_timestamp_ms']))
if 'update_timestamp_ms' in status:
print(' Updated: %s'
% self._format_time(status['update_timestamp_ms']))
if 'error_message' in status:
print(' Error: %s' % status['error_message'])
def _format_time(self, millis):
return datetime.datetime.fromtimestamp(millis / 1000)
class TaskListCommand(object):
"""Lists the tasks submitted recently."""
name = 'list'
def __init__(self, unused_parser):
pass
def run(self, unused_args, config):
config.ee_init()
tasks = ee.data.getTaskList()
descs = [utils.truncate(task.get('description', ''), 40) for task in tasks]
desc_length = max(len(word) for word in descs)
format_str = '{:25s} {:13s} {:%ds} {:10s} {:s}' % (desc_length + 1)
for task in tasks:
truncated_desc = utils.truncate(task.get('description', ''), 40)
task_type = TASK_TYPES.get(task['task_type'], 'Unknown')
print(format_str.format(
task['id'], task_type, truncated_desc,
task['state'], task.get('error_message', '---')))
class TaskWaitCommand(object):
"""Waits for the specified task or tasks to complete."""
name = 'wait'
def __init__(self, parser):
parser.add_argument(
'--timeout', '-t', default=sys.maxsize, type=int,
help=('Stop waiting for the task(s) to finish after the specified,'
' number of seconds. Without this flag, the command will wait'
' indefinitely.'))
parser.add_argument('--verbose', '-v', action='store_true',
help=('Print periodic status messages for each'
' incomplete task.'))
parser.add_argument('task_ids', nargs='+',
help=('Either a list of one or more currently-running'
' task ids to wait on; or \'all\' to wait on all'
' running tasks.'))
def run(self, args, config):
"""Waits on the given tasks to complete or for a timeout to pass."""
config.ee_init()
task_ids = []
if args.task_ids == ['all']:
tasks = ee.data.getTaskList()
for task in tasks:
if task['state'] not in utils.TASK_FINISHED_STATES:
task_ids.append(task['id'])
else:
statuses = ee.data.getTaskStatus(args.task_ids)
for status in statuses:
state = status['state']
task_id = status['id']
if state == 'UNKNOWN':
raise ee.EEException('Unknown task id "%s"' % task_id)
else:
task_ids.append(task_id)
utils.wait_for_tasks(task_ids, args.timeout, log_progress=args.verbose)
class TaskCommand(Dispatcher):
"""Prints information about or manages long-running tasks."""
name = 'task'
COMMANDS = [
TaskCancelCommand,
TaskInfoCommand,
TaskListCommand,
TaskWaitCommand,
]
class UploadImageCommand(object):
"""Uploads an image from Cloud Storage to Earth Engine.
See docs for "asset set" for additional details on how to specify asset
metadata properties.
"""
name = 'image'
def __init__(self, parser):
_add_wait_arg(parser)
parser.add_argument(
'src_files',
help=('Cloud Storage URL(s) of the file(s) to upload. '
'Must have the prefix \'gs://\'.'),
nargs='+')
parser.add_argument(
'--asset_id',
help='Destination asset ID for the uploaded file.')
parser.add_argument(
'--last_band_alpha',
help='Use the last band as a masking channel for all bands. '
'Mutually exclusive with nodata_value.',
action='store_true')
parser.add_argument(
'--nodata_value',
help='Value for missing data. '
'Mutually exclusive with last_band_alpha.',
type=_comma_separated_numbers)
parser.add_argument(
'--pyramiding_policy',
help='The pyramid reduction policy to use',
type=_comma_separated_pyramiding_policies)
_add_property_flags(parser)
# TODO(user): add --bands arg
def run(self, args, config):
"""Starts the upload task, and waits for completion if requested."""
_check_valid_files(args.src_files)
config.ee_init()
if args.last_band_alpha and args.nodata_value:
raise ValueError(
'last_band_alpha and nodata_value are mutually exclusive.')
properties = _decode_property_flags(args)
request = {
'id': args.asset_id,
'properties': properties
}
source_files = utils.expand_gcs_wildcards(args.src_files)
sources = [{'primaryPath': source} for source in source_files]
tileset = {'sources': sources}
if args.last_band_alpha:
tileset['fileBands'] = [{'fileBandIndex': -1, 'maskForAllBands': True}]
request['tilesets'] = [tileset]
if args.pyramiding_policy:
if len(args.pyramiding_policy) == 1:
request['pyramidingPolicy'] = args.pyramiding_policy[0].upper()
else:
bands = []
for index, policy in enumerate(args.pyramiding_policy):
bands.append({'id': index, 'pyramidingPolicy': policy.upper()})
request['bands'] = bands
if args.nodata_value:
if len(args.nodata_value) == 1:
request['missingData'] = {'value': args.nodata_value[0]}
else:
if 'bands' in request:
if len(request['bands']) != len(args.nodata_value):
raise ValueError('Inconsistent number of bands: {} and {}'
.format(args.pyramiding_policy, args.nodata_value))
else:
request['bands'] = []
bands = request['bands']
for index, nodata in enumerate(args.nodata_value):
if index < len(bands):
bands[index]['missingData'] = {'value': nodata}
else:
bands.append({'id': index, 'missingData': {'value': nodata}})
_upload(args, request, ee.data.startIngestion)
class UploadCommand(Dispatcher):
"""Uploads assets to Earth Engine."""
name = 'upload'
COMMANDS = [
UploadImageCommand,
]
|
from setuptools import setup
from versioneer import get_cmdclass, get_version
def readme():
with open('README.rst') as f:
return f.read()
setup(name='vibe-analyser',
version=get_version(),
cmdclass=get_cmdclass(),
description='A vibration analysis and data acquisition suite for the rpi',
long_description=readme(),
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Flask',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Intended Audience :: End Users/Desktop'
],
url='http://github.com/3ll3d00d/vibe',
author='Matt Khan',
author_email='mattkhan+vibe@gmail.com',
license='MIT',
packages=[
'analyser',
'analyser.common',
'analyser.resources',
'core'
],
package_dir={'': 'backend/src'},
entry_points={
'console_scripts': [
'analyser = analyser.app:main',
],
},
install_requires=[
'flask',
'flask-restful',
'numpy',
'pyyaml',
'requests',
'scipy',
'twisted',
'librosa',
'typing',
'cffi',
'pysoundfile'
],
setup_requires=[
'pytest-runner',
'versioneer'
],
tests_require=[
'pytest'
],
include_package_data=True,
zip_safe=False)
|
from django.shortcuts import render,render_to_response
from django.template import RequestContext
from django.http import HttpResponse, HttpResponseRedirect
from django.http import JsonResponse
from tools.dbcon import *
from online.models import User
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def isflag(req):
user_info = req.session['user_info']
condition = {'name': user_info['name']}
r = User.objects.get(**condition)
user_info['job'] = r.job
user_info['edu'] = r.edu
user_info['comp'] = r.comp
user_info['myjob'] = r.myjob
user_info['mylocal'] = r.mylocal
user_info['mycomp'] = r.mycomp
user_info['myintr'] = r.myintr
user_info['mymoney'] = r.mymoney
if cmp(r.comp,"")!=0 or cmp(r.edu,"")!=0 or cmp(r.job,"")!=0:
flag = 0
else:
flag = 1
if cmp(r.mycomp,"")!=0 or cmp(r.mymoney,"")!=0 or cmp(r.mylocal,"")!=0 or cmp(r.myintr,"")!=0 or cmp(r.myjob,"")!=0:
xzflag = 0
else:
xzflag = 1
return flag,xzflag,user_info
def index(req):
if req.method == 'GET':
try:
islogin = req.session['islogin']
except Exception,e:
msg = '请登录'
return render(req,'msg.html', locals())
if req.session['islogin'] == True:
flag,xzflag,user_info=isflag(req)
return render(req,"intr_index.html", locals())
else:
msg = '请登录'
return render(req,'msg.html', locals())
def money(req):
if req.method == 'POST':
mycomp = req.POST['mycomp']
myjob = req.POST['myjob']
mylocal = req.POST['mylocal']
mymoney = req.POST['mymoney']
myintr = req.POST['myintr']
user_info = req.session['user_info']
condition = {'name': user_info['name']}
r = User.objects.get(**condition)
beans = r.beans
beans +=10
User.objects.filter(name=user_info['name']).update(beans=beans,myjob=myjob,mycomp=mycomp,mylocal=mylocal,mymoney=mymoney,myintr=myintr)
flag,xzflag,user_info=isflag(req)
return render(req,"intr_index.html", locals())
def info(req):
if req.method == 'POST':
comp = req.POST['comp']
job = req.POST['job']
edu = req.POST['edu']
user_info = req.session['user_info']
condition = {'name': user_info['name']}
r = User.objects.get(**condition)
beans = r.beans
beans +=10
User.objects.filter(name=user_info['name']).update(beans=beans,job=job,comp=comp,edu=edu)
flag,xzflag,user_info=isflag(req)
return render(req,"intr_index.html", locals())
|
class Solution(object):
def jump(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
step = [0] * n
visited = [False] * n
nodes = [0]
visited[0] = True
while nodes:
i = nodes[0]
nodes = nodes[1:]
for j in reversed(range(i + 1, min(i + nums[i] + 1, n))):
if not visited[j]:
nodes.append(j)
visited[j] = True
step[j] = step[i] + 1
if j == n - 1:
break
if visited[-1]:
break
return step[-1]
if __name__ == "__main__":
sol = Solution()
x = [2,3,1,1,4]
print(sol.jump(x))
x = [1, 2, 0, 1]
print(sol.jump(x))
x = [2, 0, 2, 0, 1]
print(sol.jump(x))
x = [5,9,3,2,1,0,2,3,3,1,0,0]
print(sol.jump(x))
|
"""XML Parsing package
At the moment it's really limited,
but it does the basics, and the rest
is mostly just a matter of fiddling
about with Unicode and CharacterType
support. There is only very minimal
support for Reference types, basically
we note that a Reference exists, but
don't do any further processing of it.
"""
|
import asynchat
import contextlib
import errno
import glob
import logging
import os
import random
import socket
import sys
import time
import traceback
import warnings
from datetime import datetime
try:
import pwd
import grp
except ImportError:
pwd = grp = None
try:
from OpenSSL import SSL # requires "pip install pyopenssl"
except ImportError:
SSL = None
try:
from collections import OrderedDict # python >= 2.7
except ImportError:
OrderedDict = dict
from . import __ver__
from ._compat import b
from ._compat import getcwdu
from ._compat import PY3
from ._compat import u
from ._compat import unicode
from ._compat import xrange
from .authorizers import AuthenticationFailed
from .authorizers import AuthorizerError
from .authorizers import DummyAuthorizer
from .filesystems import AbstractedFS
from .filesystems import FilesystemError
from .ioloop import _ERRNOS_DISCONNECTED
from .ioloop import _ERRNOS_RETRY
from .ioloop import Acceptor
from .ioloop import AsyncChat
from .ioloop import Connector
from .ioloop import RetryError
from .ioloop import timer
from .log import debug
from .log import logger
CR_BYTE = ord('\r')
def _import_sendfile():
# By default attempt to use os.sendfile introduced in Python 3.3:
# http://bugs.python.org/issue10882
# ...otherwise fallback on using third-party pysendfile module:
# https://github.com/giampaolo/pysendfile/
if os.name == 'posix':
try:
return os.sendfile # py >= 3.3
except AttributeError:
try:
import sendfile as sf
# dirty hack to detect whether old 1.2.4 version is installed
if hasattr(sf, 'has_sf_hdtr'):
raise ImportError
return sf.sendfile
except ImportError:
pass
return None
sendfile = _import_sendfile()
proto_cmds = {
'ABOR': dict(
perm=None, auth=True, arg=False,
help='Syntax: ABOR (abort transfer).'),
'ALLO': dict(
perm=None, auth=True, arg=True,
help='Syntax: ALLO <SP> bytes (noop; allocate storage).'),
'APPE': dict(
perm='a', auth=True, arg=True,
help='Syntax: APPE <SP> file-name (append data to file).'),
'CDUP': dict(
perm='e', auth=True, arg=False,
help='Syntax: CDUP (go to parent directory).'),
'CWD': dict(
perm='e', auth=True, arg=None,
help='Syntax: CWD [<SP> dir-name] (change working directory).'),
'DELE': dict(
perm='d', auth=True, arg=True,
help='Syntax: DELE <SP> file-name (delete file).'),
'EPRT': dict(
perm=None, auth=True, arg=True,
help='Syntax: EPRT <SP> |proto|ip|port| (extended active mode).'),
'EPSV': dict(
perm=None, auth=True, arg=None,
help='Syntax: EPSV [<SP> proto/"ALL"] (extended passive mode).'),
'FEAT': dict(
perm=None, auth=False, arg=False,
help='Syntax: FEAT (list all new features supported).'),
'HELP': dict(
perm=None, auth=False, arg=None,
help='Syntax: HELP [<SP> cmd] (show help).'),
'LIST': dict(
perm='l', auth=True, arg=None,
help='Syntax: LIST [<SP> path] (list files).'),
'MDTM': dict(
perm='l', auth=True, arg=True,
help='Syntax: MDTM [<SP> path] (file last modification time).'),
'MFMT': dict(
perm='T', auth=True, arg=True,
help='Syntax: MFMT <SP> timeval <SP> path (file update last '
'modification time).'),
'MLSD': dict(
perm='l', auth=True, arg=None,
help='Syntax: MLSD [<SP> path] (list directory).'),
'MLST': dict(
perm='l', auth=True, arg=None,
help='Syntax: MLST [<SP> path] (show information about path).'),
'MODE': dict(
perm=None, auth=True, arg=True,
help='Syntax: MODE <SP> mode (noop; set data transfer mode).'),
'MKD': dict(
perm='m', auth=True, arg=True,
help='Syntax: MKD <SP> path (create directory).'),
'NLST': dict(
perm='l', auth=True, arg=None,
help='Syntax: NLST [<SP> path] (list path in a compact form).'),
'NOOP': dict(
perm=None, auth=False, arg=False,
help='Syntax: NOOP (just do nothing).'),
'OPTS': dict(
perm=None, auth=True, arg=True,
help='Syntax: OPTS <SP> cmd [<SP> option] (set option for command).'),
'PASS': dict(
perm=None, auth=False, arg=None,
help='Syntax: PASS [<SP> password] (set user password).'),
'PASV': dict(
perm=None, auth=True, arg=False,
help='Syntax: PASV (open passive data connection).'),
'PORT': dict(
perm=None, auth=True, arg=True,
help='Syntax: PORT <sp> h,h,h,h,p,p (open active data connection).'),
'PWD': dict(
perm=None, auth=True, arg=False,
help='Syntax: PWD (get current working directory).'),
'QUIT': dict(
perm=None, auth=False, arg=False,
help='Syntax: QUIT (quit current session).'),
'REIN': dict(
perm=None, auth=True, arg=False,
help='Syntax: REIN (flush account).'),
'REST': dict(
perm=None, auth=True, arg=True,
help='Syntax: REST <SP> offset (set file offset).'),
'RETR': dict(
perm='r', auth=True, arg=True,
help='Syntax: RETR <SP> file-name (retrieve a file).'),
'RMD': dict(
perm='d', auth=True, arg=True,
help='Syntax: RMD <SP> dir-name (remove directory).'),
'RNFR': dict(
perm='f', auth=True, arg=True,
help='Syntax: RNFR <SP> file-name (rename (source name)).'),
'RNTO': dict(
perm='f', auth=True, arg=True,
help='Syntax: RNTO <SP> file-name (rename (destination name)).'),
'SITE': dict(
perm=None, auth=False, arg=True,
help='Syntax: SITE <SP> site-command (execute SITE command).'),
'SITE HELP': dict(
perm=None, auth=False, arg=None,
help='Syntax: SITE HELP [<SP> cmd] (show SITE command help).'),
'SITE CHMOD': dict(
perm='M', auth=True, arg=True,
help='Syntax: SITE CHMOD <SP> mode path (change file mode).'),
'SIZE': dict(
perm='l', auth=True, arg=True,
help='Syntax: SIZE <SP> file-name (get file size).'),
'STAT': dict(
perm='l', auth=False, arg=None,
help='Syntax: STAT [<SP> path name] (server stats [list files]).'),
'STOR': dict(
perm='w', auth=True, arg=True,
help='Syntax: STOR <SP> file-name (store a file).'),
'STOU': dict(
perm='w', auth=True, arg=None,
help='Syntax: STOU [<SP> name] (store a file with a unique name).'),
'STRU': dict(
perm=None, auth=True, arg=True,
help='Syntax: STRU <SP> type (noop; set file structure).'),
'SYST': dict(
perm=None, auth=False, arg=False,
help='Syntax: SYST (get operating system type).'),
'TYPE': dict(
perm=None, auth=True, arg=True,
help='Syntax: TYPE <SP> [A | I] (set transfer type).'),
'USER': dict(
perm=None, auth=False, arg=True,
help='Syntax: USER <SP> user-name (set username).'),
'XCUP': dict(
perm='e', auth=True, arg=False,
help='Syntax: XCUP (obsolete; go to parent directory).'),
'XCWD': dict(
perm='e', auth=True, arg=None,
help='Syntax: XCWD [<SP> dir-name] (obsolete; change directory).'),
'XMKD': dict(
perm='m', auth=True, arg=True,
help='Syntax: XMKD <SP> dir-name (obsolete; create directory).'),
'XPWD': dict(
perm=None, auth=True, arg=False,
help='Syntax: XPWD (obsolete; get current dir).'),
'XRMD': dict(
perm='d', auth=True, arg=True,
help='Syntax: XRMD <SP> dir-name (obsolete; remove directory).'),
}
if not hasattr(os, 'chmod'):
del proto_cmds['SITE CHMOD']
def _strerror(err):
if isinstance(err, EnvironmentError):
try:
return os.strerror(err.errno)
except AttributeError:
# not available on PythonCE
if not hasattr(os, 'strerror'):
return err.strerror
raise
else:
return str(err)
def _is_ssl_sock(sock):
return SSL is not None and isinstance(sock, SSL.Connection)
def _support_hybrid_ipv6():
"""Return True if it is possible to use hybrid IPv6/IPv4 sockets
on this platform.
"""
# Note: IPPROTO_IPV6 constant is broken on Windows, see:
# http://bugs.python.org/issue6926
try:
if not socket.has_ipv6:
return False
with contextlib.closing(socket.socket(socket.AF_INET6)) as sock:
return not sock.getsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY)
except (socket.error, AttributeError):
return False
SUPPORTS_HYBRID_IPV6 = _support_hybrid_ipv6()
class _FileReadWriteError(OSError):
"""Exception raised when reading or writing a file during a transfer."""
class _GiveUpOnSendfile(Exception):
"""Exception raised in case use of sendfile() fails on first try,
in which case send() will be used.
"""
class PassiveDTP(Acceptor):
"""Creates a socket listening on a local port, dispatching the
resultant connection to DTPHandler. Used for handling PASV command.
- (int) timeout: the timeout for a remote client to establish
connection with the listening socket. Defaults to 30 seconds.
- (int) backlog: the maximum number of queued connections passed
to listen(). If a connection request arrives when the queue is
full the client may raise ECONNRESET. Defaults to 5.
"""
timeout = 30
backlog = None
def __init__(self, cmd_channel, extmode=False):
"""Initialize the passive data server.
- (instance) cmd_channel: the command channel class instance.
- (bool) extmode: wheter use extended passive mode response type.
"""
self.cmd_channel = cmd_channel
self.log = cmd_channel.log
self.log_exception = cmd_channel.log_exception
Acceptor.__init__(self, ioloop=cmd_channel.ioloop)
local_ip = self.cmd_channel.socket.getsockname()[0]
if local_ip in self.cmd_channel.masquerade_address_map:
masqueraded_ip = self.cmd_channel.masquerade_address_map[local_ip]
elif self.cmd_channel.masquerade_address:
masqueraded_ip = self.cmd_channel.masquerade_address
else:
masqueraded_ip = None
if self.cmd_channel.server.socket.family != socket.AF_INET:
# dual stack IPv4/IPv6 support
af = self.bind_af_unspecified((local_ip, 0))
self.socket.close()
else:
af = self.cmd_channel.socket.family
self.create_socket(af, socket.SOCK_STREAM)
if self.cmd_channel.passive_ports is None:
# By using 0 as port number value we let kernel choose a
# free unprivileged random port.
self.bind((local_ip, 0))
else:
ports = list(self.cmd_channel.passive_ports)
while ports:
port = ports.pop(random.randint(0, len(ports) - 1))
self.set_reuse_addr()
try:
self.bind((local_ip, port))
except socket.error as err:
if err.errno == errno.EADDRINUSE: # port already in use
if ports:
continue
# If cannot use one of the ports in the configured
# range we'll use a kernel-assigned port, and log
# a message reporting the issue.
# By using 0 as port number value we let kernel
# choose a free unprivileged random port.
else:
self.bind((local_ip, 0))
self.cmd_channel.log(
"Can't find a valid passive port in the "
"configured range. A random kernel-assigned "
"port will be used.",
logfun=logger.warning
)
else:
raise
else:
break
self.listen(self.backlog or self.cmd_channel.server.backlog)
port = self.socket.getsockname()[1]
if not extmode:
ip = masqueraded_ip or local_ip
if ip.startswith('::ffff:'):
# In this scenario, the server has an IPv6 socket, but
# the remote client is using IPv4 and its address is
# represented as an IPv4-mapped IPv6 address which
# looks like this ::ffff:151.12.5.65, see:
# http://en.wikipedia.org/wiki/IPv6#IPv4-mapped_addresses
# http://tools.ietf.org/html/rfc3493.html#section-3.7
# We truncate the first bytes to make it look like a
# common IPv4 address.
ip = ip[7:]
# The format of 227 response in not standardized.
# This is the most expected:
resp = '227 Entering passive mode (%s,%d,%d).' % (
ip.replace('.', ','), port // 256, port % 256)
self.cmd_channel.respond(resp)
else:
self.cmd_channel.respond('229 Entering extended passive mode '
'(|||%d|).' % port)
if self.timeout:
self.call_later(self.timeout, self.handle_timeout)
# --- connection / overridden
def handle_accepted(self, sock, addr):
"""Called when remote client initiates a connection."""
if not self.cmd_channel.connected:
return self.close()
# Check the origin of data connection. If not expressively
# configured we drop the incoming data connection if remote
# IP address does not match the client's IP address.
if self.cmd_channel.remote_ip != addr[0]:
if not self.cmd_channel.permit_foreign_addresses:
try:
sock.close()
except socket.error:
pass
msg = '425 Rejected data connection from foreign address ' \
'%s:%s.' % (addr[0], addr[1])
self.cmd_channel.respond_w_warning(msg)
# do not close listening socket: it couldn't be client's blame
return
else:
# site-to-site FTP allowed
msg = 'Established data connection with foreign address ' \
'%s:%s.' % (addr[0], addr[1])
self.cmd_channel.log(msg, logfun=logger.warning)
# Immediately close the current channel (we accept only one
# connection at time) and avoid running out of max connections
# limit.
self.close()
# delegate such connection to DTP handler
if self.cmd_channel.connected:
handler = self.cmd_channel.dtp_handler(sock, self.cmd_channel)
if handler.connected:
self.cmd_channel.data_channel = handler
self.cmd_channel._on_dtp_connection()
def handle_timeout(self):
if self.cmd_channel.connected:
self.cmd_channel.respond("421 Passive data channel timed out.",
logfun=logger.info)
self.close()
def handle_error(self):
"""Called to handle any uncaught exceptions."""
try:
raise
except Exception:
logger.error(traceback.format_exc())
try:
self.close()
except Exception:
logger.critical(traceback.format_exc())
def close(self):
debug("call: close()", inst=self)
Acceptor.close(self)
class ActiveDTP(Connector):
"""Connects to remote client and dispatches the resulting connection
to DTPHandler. Used for handling PORT command.
- (int) timeout: the timeout for us to establish connection with
the client's listening data socket.
"""
timeout = 30
def __init__(self, ip, port, cmd_channel):
"""Initialize the active data channel attemping to connect
to remote data socket.
- (str) ip: the remote IP address.
- (int) port: the remote port.
- (instance) cmd_channel: the command channel class instance.
"""
Connector.__init__(self, ioloop=cmd_channel.ioloop)
self.cmd_channel = cmd_channel
self.log = cmd_channel.log
self.log_exception = cmd_channel.log_exception
self._idler = None
if self.timeout:
self._idler = self.ioloop.call_later(self.timeout,
self.handle_timeout,
_errback=self.handle_error)
if ip.count('.') == 4:
self._cmd = "PORT"
self._normalized_addr = "%s:%s" % (ip, port)
else:
self._cmd = "EPRT"
self._normalized_addr = "[%s]:%s" % (ip, port)
source_ip = self.cmd_channel.socket.getsockname()[0]
# dual stack IPv4/IPv6 support
try:
self.connect_af_unspecified((ip, port), (source_ip, 0))
except (socket.gaierror, socket.error):
self.handle_close()
def readable(self):
return False
def handle_write(self):
# overridden to prevent unhandled read/write event messages to
# be printed by asyncore on Python < 2.6
pass
def handle_connect(self):
"""Called when connection is established."""
self.del_channel()
if self._idler is not None and not self._idler.cancelled:
self._idler.cancel()
if not self.cmd_channel.connected:
return self.close()
# fix for asyncore on python < 2.6, meaning we aren't
# actually connected.
# test_active_conn_error tests this condition
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise socket.error(err)
#
msg = 'Active data connection established.'
self.cmd_channel.respond('200 ' + msg)
self.cmd_channel.log_cmd(self._cmd, self._normalized_addr, 200, msg)
#
if not self.cmd_channel.connected:
return self.close()
# delegate such connection to DTP handler
handler = self.cmd_channel.dtp_handler(self.socket, self.cmd_channel)
self.cmd_channel.data_channel = handler
self.cmd_channel._on_dtp_connection()
def handle_timeout(self):
if self.cmd_channel.connected:
msg = "Active data channel timed out."
self.cmd_channel.respond("421 " + msg, logfun=logger.info)
self.cmd_channel.log_cmd(
self._cmd, self._normalized_addr, 421, msg)
self.close()
def handle_close(self):
# With the new IO loop, handle_close() gets called in case
# the fd appears in the list of exceptional fds.
# This means connect() failed.
if not self._closed:
self.close()
if self.cmd_channel.connected:
msg = "Can't connect to specified address."
self.cmd_channel.respond("425 " + msg)
self.cmd_channel.log_cmd(
self._cmd, self._normalized_addr, 425, msg)
def handle_error(self):
"""Called to handle any uncaught exceptions."""
try:
raise
except (socket.gaierror, socket.error):
pass
except Exception:
self.log_exception(self)
try:
self.handle_close()
except Exception:
logger.critical(traceback.format_exc())
def close(self):
debug("call: close()", inst=self)
if not self._closed:
Connector.close(self)
if self._idler is not None and not self._idler.cancelled:
self._idler.cancel()
class DTPHandler(AsyncChat):
"""Class handling server-data-transfer-process (server-DTP, see
RFC-959) managing data-transfer operations involving sending
and receiving data.
Class attributes:
- (int) timeout: the timeout which roughly is the maximum time we
permit data transfers to stall for with no progress. If the
timeout triggers, the remote client will be kicked off
(defaults 300).
- (int) ac_in_buffer_size: incoming data buffer size (defaults 65536)
- (int) ac_out_buffer_size: outgoing data buffer size (defaults 65536)
"""
timeout = 300
ac_in_buffer_size = 65536
ac_out_buffer_size = 65536
def __init__(self, sock, cmd_channel):
"""Initialize the command channel.
- (instance) sock: the socket object instance of the newly
established connection.
- (instance) cmd_channel: the command channel class instance.
"""
self.cmd_channel = cmd_channel
self.file_obj = None
self.receive = False
self.transfer_finished = False
self.tot_bytes_sent = 0
self.tot_bytes_received = 0
self.cmd = None
self.log = cmd_channel.log
self.log_exception = cmd_channel.log_exception
self._data_wrapper = None
self._lastdata = 0
self._had_cr = False
self._start_time = timer()
self._resp = ()
self._offset = None
self._filefd = None
self._idler = None
self._initialized = False
try:
AsyncChat.__init__(self, sock, ioloop=cmd_channel.ioloop)
except socket.error as err:
# if we get an exception here we want the dispatcher
# instance to set socket attribute before closing, see:
# https://github.com/giampaolo/pyftpdlib/issues/188
AsyncChat.__init__(
self, socket.socket(), ioloop=cmd_channel.ioloop)
# https://github.com/giampaolo/pyftpdlib/issues/143
self.close()
if err.errno == errno.EINVAL:
return
self.handle_error()
return
# remove this instance from IOLoop's socket map
if not self.connected:
self.close()
return
if self.timeout:
self._idler = self.ioloop.call_every(self.timeout,
self.handle_timeout,
_errback=self.handle_error)
def __repr__(self):
return '<%s(%s)>' % (self.__class__.__name__,
self.cmd_channel.get_repr_info(as_str=True))
__str__ = __repr__
def use_sendfile(self):
if not self.cmd_channel.use_sendfile:
# as per server config
return False
if self.file_obj is None or not hasattr(self.file_obj, "fileno"):
# directory listing or unusual file obj
return False
if self.cmd_channel._current_type != 'i':
# text file transfer (need to transform file content on the fly)
return False
return True
def push(self, data):
self._initialized = True
self.modify_ioloop_events(self.ioloop.WRITE)
self._wanted_io_events = self.ioloop.WRITE
AsyncChat.push(self, data)
def push_with_producer(self, producer):
self._initialized = True
self.modify_ioloop_events(self.ioloop.WRITE)
self._wanted_io_events = self.ioloop.WRITE
if self.use_sendfile():
self._offset = producer.file.tell()
self._filefd = self.file_obj.fileno()
try:
self.initiate_sendfile()
except _GiveUpOnSendfile:
pass
else:
self.initiate_send = self.initiate_sendfile
return
debug("starting transfer using send()", self)
AsyncChat.push_with_producer(self, producer)
def close_when_done(self):
asynchat.async_chat.close_when_done(self)
def initiate_send(self):
asynchat.async_chat.initiate_send(self)
def initiate_sendfile(self):
"""A wrapper around sendfile."""
try:
sent = sendfile(self._fileno, self._filefd, self._offset,
self.ac_out_buffer_size)
except OSError as err:
if err.errno in _ERRNOS_RETRY or err.errno == errno.EBUSY:
return
elif err.errno in _ERRNOS_DISCONNECTED:
self.handle_close()
else:
if self.tot_bytes_sent == 0:
logger.warning(
"sendfile() failed; falling back on using plain send")
raise _GiveUpOnSendfile
else:
raise
else:
if sent == 0:
# this signals the channel that the transfer is completed
self.discard_buffers()
self.handle_close()
else:
self._offset += sent
self.tot_bytes_sent += sent
# --- utility methods
def _posix_ascii_data_wrapper(self, chunk):
"""The data wrapper used for receiving data in ASCII mode on
systems using a single line terminator, handling those cases
where CRLF ('\r\n') gets delivered in two chunks.
"""
if self._had_cr:
chunk = b'\r' + chunk
if chunk.endswith(b'\r'):
self._had_cr = True
chunk = chunk[:-1]
else:
self._had_cr = False
return chunk.replace(b'\r\n', b(os.linesep))
def enable_receiving(self, type, cmd):
"""Enable receiving of data over the channel. Depending on the
TYPE currently in use it creates an appropriate wrapper for the
incoming data.
- (str) type: current transfer type, 'a' (ASCII) or 'i' (binary).
"""
self._initialized = True
self.modify_ioloop_events(self.ioloop.READ)
self._wanted_io_events = self.ioloop.READ
self.cmd = cmd
if type == 'a':
if os.linesep == '\r\n':
self._data_wrapper = None
else:
self._data_wrapper = self._posix_ascii_data_wrapper
elif type == 'i':
self._data_wrapper = None
else:
raise TypeError("unsupported type")
self.receive = True
def get_transmitted_bytes(self):
"""Return the number of transmitted bytes."""
return self.tot_bytes_sent + self.tot_bytes_received
def get_elapsed_time(self):
"""Return the transfer elapsed time in seconds."""
return timer() - self._start_time
def transfer_in_progress(self):
"""Return True if a transfer is in progress, else False."""
return self.get_transmitted_bytes() != 0
# --- connection
def send(self, data):
result = AsyncChat.send(self, data)
self.tot_bytes_sent += result
return result
def refill_buffer(self): # pragma: no cover
"""Overridden as a fix around http://bugs.python.org/issue1740572
(when the producer is consumed, close() was called instead of
handle_close()).
"""
while True:
if len(self.producer_fifo):
p = self.producer_fifo.first()
# a 'None' in the producer fifo is a sentinel,
# telling us to close the channel.
if p is None:
if not self.ac_out_buffer:
self.producer_fifo.pop()
# self.close()
self.handle_close()
return
elif isinstance(p, str):
self.producer_fifo.pop()
self.ac_out_buffer += p
return
data = p.more()
if data:
self.ac_out_buffer = self.ac_out_buffer + data
return
else:
self.producer_fifo.pop()
else:
return
def handle_read(self):
"""Called when there is data waiting to be read."""
try:
chunk = self.recv(self.ac_in_buffer_size)
except RetryError:
pass
except socket.error:
self.handle_error()
else:
self.tot_bytes_received += len(chunk)
if not chunk:
self.transfer_finished = True
# self.close() # <-- asyncore.recv() already do that...
return
if self._data_wrapper is not None:
chunk = self._data_wrapper(chunk)
try:
self.file_obj.write(chunk)
except OSError as err:
raise _FileReadWriteError(err)
handle_read_event = handle_read # small speedup
def readable(self):
"""Predicate for inclusion in the readable for select()."""
# It the channel is not supposed to be receiving but yet it's
# in the list of readable events, that means it has been
# disconnected, in which case we explicitly close() it.
# This is necessary as differently from FTPHandler this channel
# is not supposed to be readable/writable at first, meaning the
# upper IOLoop might end up calling readable() repeatedly,
# hogging CPU resources.
if not self.receive and not self._initialized:
return self.close()
return self.receive
def writable(self):
"""Predicate for inclusion in the writable for select()."""
return not self.receive and asynchat.async_chat.writable(self)
def handle_timeout(self):
"""Called cyclically to check if data trasfer is stalling with
no progress in which case the client is kicked off.
"""
if self.get_transmitted_bytes() > self._lastdata:
self._lastdata = self.get_transmitted_bytes()
else:
msg = "Data connection timed out."
self._resp = ("421 " + msg, logger.info)
self.close()
self.cmd_channel.close_when_done()
def handle_error(self):
"""Called when an exception is raised and not otherwise handled."""
try:
raise
# an error could occur in case we fail reading / writing
# from / to file (e.g. file system gets full)
except _FileReadWriteError as err:
error = _strerror(err.errno)
except Exception:
# some other exception occurred; we don't want to provide
# confidential error messages
self.log_exception(self)
error = "Internal error"
try:
self._resp = ("426 %s; transfer aborted." % error, logger.warning)
self.close()
except Exception:
logger.critical(traceback.format_exc())
def handle_close(self):
"""Called when the socket is closed."""
# If we used channel for receiving we assume that transfer is
# finished when client closes the connection, if we used channel
# for sending we have to check that all data has been sent
# (responding with 226) or not (responding with 426).
# In both cases handle_close() is automatically called by the
# underlying asynchat module.
if not self._closed:
if self.receive:
self.transfer_finished = True
else:
self.transfer_finished = len(self.producer_fifo) == 0
try:
if self.transfer_finished:
self._resp = ("226 Transfer complete.", logger.debug)
else:
tot_bytes = self.get_transmitted_bytes()
self._resp = ("426 Transfer aborted; %d bytes transmitted."
% tot_bytes, logger.debug)
finally:
self.close()
def close(self):
"""Close the data channel, first attempting to close any remaining
file handles."""
debug("call: close()", inst=self)
if not self._closed:
# RFC-959 says we must close the connection before replying
AsyncChat.close(self)
# Close file object before responding successfully to client
if self.file_obj is not None and not self.file_obj.closed:
self.file_obj.close()
if self._resp:
self.cmd_channel.respond(self._resp[0], logfun=self._resp[1])
if self._idler is not None and not self._idler.cancelled:
self._idler.cancel()
if self.file_obj is not None:
filename = self.file_obj.name
elapsed_time = round(self.get_elapsed_time(), 3)
self.cmd_channel.log_transfer(
cmd=self.cmd,
filename=self.file_obj.name,
receive=self.receive,
completed=self.transfer_finished,
elapsed=elapsed_time,
bytes=self.get_transmitted_bytes())
if self.transfer_finished:
if self.receive:
self.cmd_channel.on_file_received(filename)
else:
self.cmd_channel.on_file_sent(filename)
else:
if self.receive:
self.cmd_channel.on_incomplete_file_received(filename)
else:
self.cmd_channel.on_incomplete_file_sent(filename)
self.cmd_channel._on_dtp_close()
if PY3:
class _AsyncChatNewStyle(AsyncChat):
pass
else:
class _AsyncChatNewStyle(object, AsyncChat):
def __init__(self, *args, **kwargs):
super(object, self).__init__(*args, **kwargs) # bypass object
class ThrottledDTPHandler(_AsyncChatNewStyle, DTPHandler):
"""A DTPHandler subclass which wraps sending and receiving in a data
counter and temporarily "sleeps" the channel so that you burst to no
more than x Kb/sec average.
- (int) read_limit: the maximum number of bytes to read (receive)
in one second (defaults to 0 == no limit).
- (int) write_limit: the maximum number of bytes to write (send)
in one second (defaults to 0 == no limit).
- (bool) auto_sized_buffers: this option only applies when read
and/or write limits are specified. When enabled it bumps down
the data buffer sizes so that they are never greater than read
and write limits which results in a less bursty and smoother
throughput (default: True).
"""
read_limit = 0
write_limit = 0
auto_sized_buffers = True
def __init__(self, sock, cmd_channel):
super(ThrottledDTPHandler, self).__init__(sock, cmd_channel)
self._timenext = 0
self._datacount = 0
self.sleeping = False
self._throttler = None
if self.auto_sized_buffers:
if self.read_limit:
while self.ac_in_buffer_size > self.read_limit:
self.ac_in_buffer_size /= 2
if self.write_limit:
while self.ac_out_buffer_size > self.write_limit:
self.ac_out_buffer_size /= 2
self.ac_in_buffer_size = int(self.ac_in_buffer_size)
self.ac_out_buffer_size = int(self.ac_out_buffer_size)
def __repr__(self):
return DTPHandler.__repr__(self)
def use_sendfile(self):
return False
def recv(self, buffer_size):
chunk = super(ThrottledDTPHandler, self).recv(buffer_size)
if self.read_limit:
self._throttle_bandwidth(len(chunk), self.read_limit)
return chunk
def send(self, data):
num_sent = super(ThrottledDTPHandler, self).send(data)
if self.write_limit:
self._throttle_bandwidth(num_sent, self.write_limit)
return num_sent
def _cancel_throttler(self):
if self._throttler is not None and not self._throttler.cancelled:
self._throttler.cancel()
def _throttle_bandwidth(self, len_chunk, max_speed):
"""A method which counts data transmitted so that you burst to
no more than x Kb/sec average.
"""
self._datacount += len_chunk
if self._datacount >= max_speed:
self._datacount = 0
now = timer()
sleepfor = (self._timenext - now) * 2
if sleepfor > 0:
# we've passed bandwidth limits
def unsleep():
if self.receive:
event = self.ioloop.READ
else:
event = self.ioloop.WRITE
self.add_channel(events=event)
self.del_channel()
self._cancel_throttler()
self._throttler = self.ioloop.call_later(
sleepfor, unsleep, _errback=self.handle_error)
self._timenext = now + 1
def close(self):
self._cancel_throttler()
super(ThrottledDTPHandler, self).close()
class FileProducer(object):
"""Producer wrapper for file[-like] objects."""
buffer_size = 65536
def __init__(self, file, type):
"""Initialize the producer with a data_wrapper appropriate to TYPE.
- (file) file: the file[-like] object.
- (str) type: the current TYPE, 'a' (ASCII) or 'i' (binary).
"""
self.file = file
self.type = type
self._prev_chunk_endswith_cr = False
if type == 'a' and os.linesep != '\r\n':
self._data_wrapper = self._posix_ascii_data_wrapper
else:
self._data_wrapper = None
def _posix_ascii_data_wrapper(self, chunk):
"""The data wrapper used for sending data in ASCII mode on
systems using a single line terminator, handling those cases
where CRLF ('\r\n') gets delivered in two chunks.
"""
chunk = bytearray(chunk)
pos = 0
if self._prev_chunk_endswith_cr and chunk.startswith(b'\n'):
pos += 1
while True:
pos = chunk.find(b'\n', pos)
if pos == -1:
break
if chunk[pos - 1] != CR_BYTE:
chunk.insert(pos, CR_BYTE)
pos += 1
pos += 1
self._prev_chunk_endswith_cr = chunk.endswith(b'\r')
return chunk
def more(self):
"""Attempt a chunk of data of size self.buffer_size."""
try:
data = self.file.read(self.buffer_size)
except OSError as err:
raise _FileReadWriteError(err)
else:
if self._data_wrapper is not None:
data = self._data_wrapper(data)
return data
class BufferedIteratorProducer(object):
"""Producer for iterator objects with buffer capabilities."""
# how many times iterator.next() will be called before
# returning some data
loops = 20
def __init__(self, iterator):
self.iterator = iterator
def more(self):
"""Attempt a chunk of data from iterator by calling
its next() method different times.
"""
buffer = []
for x in xrange(self.loops):
try:
buffer.append(next(self.iterator))
except StopIteration:
break
return b''.join(buffer)
class FTPHandler(AsyncChat):
"""Implements the FTP server Protocol Interpreter (see RFC-959),
handling commands received from the client on the control channel.
All relevant session information is stored in class attributes
reproduced below and can be modified before instantiating this
class.
- (int) timeout:
The timeout which is the maximum time a remote client may spend
between FTP commands. If the timeout triggers, the remote client
will be kicked off. Defaults to 300 seconds.
- (str) banner: the string sent when client connects.
- (int) max_login_attempts:
the maximum number of wrong authentications before disconnecting
the client (default 3).
- (bool)permit_foreign_addresses:
FTP site-to-site transfer feature: also referenced as "FXP" it
permits for transferring a file between two remote FTP servers
without the transfer going through the client's host (not
recommended for security reasons as described in RFC-2577).
Having this attribute set to False means that all data
connections from/to remote IP addresses which do not match the
client's IP address will be dropped (defualt False).
- (bool) permit_privileged_ports:
set to True if you want to permit active data connections (PORT)
over privileged ports (not recommended, defaulting to False).
- (str) masquerade_address:
the "masqueraded" IP address to provide along PASV reply when
pyftpdlib is running behind a NAT or other types of gateways.
When configured pyftpdlib will hide its local address and
instead use the public address of your NAT (default None).
- (dict) masquerade_address_map:
in case the server has multiple IP addresses which are all
behind a NAT router, you may wish to specify individual
masquerade_addresses for each of them. The map expects a
dictionary containing private IP addresses as keys, and their
corresponding public (masquerade) addresses as values.
- (list) passive_ports:
what ports the ftpd will use for its passive data transfers.
Value expected is a list of integers (e.g. range(60000, 65535)).
When configured pyftpdlib will no longer use kernel-assigned
random ports (default None).
- (bool) use_gmt_times:
when True causes the server to report all ls and MDTM times in
GMT and not local time (default True).
- (bool) use_sendfile: when True uses sendfile() system call to
send a file resulting in faster uploads (from server to client).
Works on UNIX only and requires pysendfile module to be
installed separately:
https://github.com/giampaolo/pysendfile/
Automatically defaults to True if pysendfile module is
installed.
- (bool) tcp_no_delay: controls the use of the TCP_NODELAY socket
option which disables the Nagle algorithm resulting in
significantly better performances (default True on all systems
where it is supported).
- (str) unicode_errors:
the error handler passed to ''.encode() and ''.decode():
http://docs.python.org/library/stdtypes.html#str.decode
(detaults to 'replace').
- (str) log_prefix:
the prefix string preceding any log line; all instance
attributes can be used as arguments.
All relevant instance attributes initialized when client connects
are reproduced below. You may be interested in them in case you
want to subclass the original FTPHandler.
- (bool) authenticated: True if client authenticated himself.
- (str) username: the name of the connected user (if any).
- (int) attempted_logins: number of currently attempted logins.
- (str) current_type: the current transfer type (default "a")
- (int) af: the connection's address family (IPv4/IPv6)
- (instance) server: the FTPServer class instance.
- (instance) data_channel: the data channel instance (if any).
"""
# these are overridable defaults
# default classes
authorizer = DummyAuthorizer()
active_dtp = ActiveDTP
passive_dtp = PassiveDTP
dtp_handler = DTPHandler
abstracted_fs = AbstractedFS
proto_cmds = proto_cmds
# session attributes (explained in the docstring)
timeout = 300
banner = "pyftpdlib %s ready." % __ver__
max_login_attempts = 3
permit_foreign_addresses = False
permit_privileged_ports = False
masquerade_address = None
masquerade_address_map = {}
passive_ports = None
use_gmt_times = True
use_sendfile = sendfile is not None
tcp_no_delay = hasattr(socket, "TCP_NODELAY")
unicode_errors = 'replace'
log_prefix = '%(remote_ip)s:%(remote_port)s-[%(username)s]'
auth_failed_timeout = 3
def __init__(self, conn, server, ioloop=None):
"""Initialize the command channel.
- (instance) conn: the socket object instance of the newly
established connection.
- (instance) server: the ftp server class instance.
"""
# public session attributes
self.server = server
self.fs = None
self.authenticated = False
self.username = ""
self.password = ""
self.attempted_logins = 0
self.data_channel = None
self.remote_ip = ""
self.remote_port = ""
self.started = time.time()
# private session attributes
self._last_response = ""
self._current_type = 'a'
self._restart_position = 0
self._quit_pending = False
self._in_buffer = []
self._in_buffer_len = 0
self._epsvall = False
self._dtp_acceptor = None
self._dtp_connector = None
self._in_dtp_queue = None
self._out_dtp_queue = None
self._extra_feats = []
self._current_facts = ['type', 'perm', 'size', 'modify']
self._rnfr = None
self._idler = None
self._log_debug = logging.getLogger('pyftpdlib').getEffectiveLevel() \
<= logging.DEBUG
if os.name == 'posix':
self._current_facts.append('unique')
self._available_facts = self._current_facts[:]
if pwd and grp:
self._available_facts += ['unix.mode', 'unix.uid', 'unix.gid']
if os.name == 'nt':
self._available_facts.append('create')
try:
AsyncChat.__init__(self, conn, ioloop=ioloop)
except socket.error as err:
# if we get an exception here we want the dispatcher
# instance to set socket attribute before closing, see:
# https://github.com/giampaolo/pyftpdlib/issues/188
AsyncChat.__init__(self, socket.socket(), ioloop=ioloop)
self.close()
debug("call: FTPHandler.__init__, err %r" % err, self)
if err.errno == errno.EINVAL:
# https://github.com/giampaolo/pyftpdlib/issues/143
return
self.handle_error()
return
self.set_terminator(b"\r\n")
# connection properties
try:
self.remote_ip, self.remote_port = self.socket.getpeername()[:2]
except socket.error as err:
debug("call: FTPHandler.__init__, err on getpeername() %r" % err,
self)
# A race condition may occur if the other end is closing
# before we can get the peername, hence ENOTCONN (see issue
# #100) while EINVAL can occur on OSX (see issue #143).
self.connected = False
if err.errno in (errno.ENOTCONN, errno.EINVAL):
self.close()
else:
self.handle_error()
return
else:
self.log("FTP session opened (connect)")
# try to handle urgent data inline
try:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1)
except socket.error as err:
debug("call: FTPHandler.__init__, err on SO_OOBINLINE %r" % err,
self)
# disable Nagle algorithm for the control socket only, resulting
# in significantly better performances
if self.tcp_no_delay:
try:
self.socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
except socket.error as err:
debug(
"call: FTPHandler.__init__, err on TCP_NODELAY %r" % err,
self)
# remove this instance from IOLoop's socket_map
if not self.connected:
self.close()
return
if self.timeout:
self._idler = self.ioloop.call_later(
self.timeout, self.handle_timeout, _errback=self.handle_error)
def get_repr_info(self, as_str=False, extra_info={}):
info = OrderedDict()
info['id'] = id(self)
info['addr'] = "%s:%s" % (self.remote_ip, self.remote_port)
if _is_ssl_sock(self.socket):
info['ssl'] = True
if self.username:
info['user'] = self.username
# If threads are involved sometimes "self" may be None (?!?).
dc = getattr(self, 'data_channel', None)
if dc is not None:
if _is_ssl_sock(dc.socket):
info['ssl-data'] = True
if dc.file_obj:
if self.data_channel.receive:
info['sending-file'] = dc.file_obj
if dc.use_sendfile():
info['use-sendfile(2)'] = True
else:
info['receiving-file'] = dc.file_obj
info['bytes-trans'] = dc.get_transmitted_bytes()
info.update(extra_info)
if as_str:
return ', '.join(['%s=%r' % (k, v) for (k, v) in info.items()])
return info
def __repr__(self):
return '<%s(%s)>' % (self.__class__.__name__, self.get_repr_info(True))
__str__ = __repr__
def handle(self):
"""Return a 220 'ready' response to the client over the command
channel.
"""
self.on_connect()
if not self._closed and not self._closing:
if len(self.banner) <= 75:
self.respond("220 %s" % str(self.banner))
else:
self.push('220-%s\r\n' % str(self.banner))
self.respond('220 ')
def handle_max_cons(self):
"""Called when limit for maximum number of connections is reached."""
msg = "421 Too many connections. Service temporarily unavailable."
self.respond_w_warning(msg)
# If self.push is used, data could not be sent immediately in
# which case a new "loop" will occur exposing us to the risk of
# accepting new connections. Since this could cause asyncore to
# run out of fds in case we're using select() on Windows we
# immediately close the channel by using close() instead of
# close_when_done(). If data has not been sent yet client will
# be silently disconnected.
self.close()
def handle_max_cons_per_ip(self):
"""Called when too many clients are connected from the same IP."""
msg = "421 Too many connections from the same IP address."
self.respond_w_warning(msg)
self.close_when_done()
def handle_timeout(self):
"""Called when client does not send any command within the time
specified in <timeout> attribute."""
msg = "Control connection timed out."
self.respond("421 " + msg, logfun=logger.info)
self.close_when_done()
# --- asyncore / asynchat overridden methods
def readable(self):
# Checking for self.connected seems to be necessary as per:
# https://github.com/giampaolo/pyftpdlib/issues/188#c18
# In contrast to DTPHandler, here we are not interested in
# attempting to receive any further data from a closed socket.
return self.connected and AsyncChat.readable(self)
def writable(self):
return self.connected and AsyncChat.writable(self)
def collect_incoming_data(self, data):
"""Read incoming data and append to the input buffer."""
self._in_buffer.append(data)
self._in_buffer_len += len(data)
# Flush buffer if it gets too long (possible DoS attacks).
# RFC-959 specifies that a 500 response could be given in
# such cases
buflimit = 2048
if self._in_buffer_len > buflimit:
self.respond_w_warning('500 Command too long.')
self._in_buffer = []
self._in_buffer_len = 0
def decode(self, bytes):
return bytes.decode('utf8', self.unicode_errors)
def found_terminator(self):
r"""Called when the incoming data stream matches the \r\n
terminator.
"""
if self._idler is not None and not self._idler.cancelled:
self._idler.reset()
line = b''.join(self._in_buffer)
try:
line = self.decode(line)
except UnicodeDecodeError:
# By default we'll never get here as we replace errors
# but user might want to override this behavior.
# RFC-2640 doesn't mention what to do in this case so
# we'll just return 501 (bad arg).
return self.respond("501 Can't decode command.")
self._in_buffer = []
self._in_buffer_len = 0
cmd = line.split(' ')[0].upper()
arg = line[len(cmd) + 1:]
try:
self.pre_process_command(line, cmd, arg)
except UnicodeEncodeError:
self.respond("501 can't decode path (server filesystem encoding "
"is %s)" % sys.getfilesystemencoding())
def pre_process_command(self, line, cmd, arg):
kwargs = {}
if cmd == "SITE" and arg:
cmd = "SITE %s" % arg.split(' ')[0].upper()
arg = line[len(cmd) + 1:]
if cmd != 'PASS':
self.logline("<- %s" % line)
else:
self.logline("<- %s %s" % (line.split(' ')[0], '*' * 6))
# Recognize those commands having a "special semantic". They
# should be sent by following the RFC-959 procedure of sending
# Telnet IP/Synch sequence (chr 242 and 255) as OOB data but
# since many ftp clients don't do it correctly we check the
# last 4 characters only.
if cmd not in self.proto_cmds:
if cmd[-4:] in ('ABOR', 'STAT', 'QUIT'):
cmd = cmd[-4:]
else:
msg = 'Command "%s" not understood.' % cmd
self.respond('500 ' + msg)
if cmd:
self.log_cmd(cmd, arg, 500, msg)
return
if not arg and self.proto_cmds[cmd]['arg'] == True: # NOQA
msg = "Syntax error: command needs an argument."
self.respond("501 " + msg)
self.log_cmd(cmd, "", 501, msg)
return
if arg and self.proto_cmds[cmd]['arg'] == False: # NOQA
msg = "Syntax error: command does not accept arguments."
self.respond("501 " + msg)
self.log_cmd(cmd, arg, 501, msg)
return
if not self.authenticated:
if self.proto_cmds[cmd]['auth'] or (cmd == 'STAT' and arg):
msg = "Log in with USER and PASS first."
self.respond("530 " + msg)
self.log_cmd(cmd, arg, 530, msg)
else:
# call the proper ftp_* method
self.process_command(cmd, arg)
return
else:
if (cmd == 'STAT') and not arg:
self.ftp_STAT(u(''))
return
# for file-system related commands check whether real path
# destination is valid
if self.proto_cmds[cmd]['perm'] and (cmd != 'STOU'):
if cmd in ('CWD', 'XCWD'):
arg = self.fs.ftp2fs(arg or u('/'))
elif cmd in ('CDUP', 'XCUP'):
arg = self.fs.ftp2fs(u('..'))
elif cmd == 'LIST':
if arg.lower() in ('-a', '-l', '-al', '-la'):
arg = self.fs.ftp2fs(self.fs.cwd)
else:
arg = self.fs.ftp2fs(arg or self.fs.cwd)
elif cmd == 'STAT':
if glob.has_magic(arg):
msg = 'Globbing not supported.'
self.respond('550 ' + msg)
self.log_cmd(cmd, arg, 550, msg)
return
arg = self.fs.ftp2fs(arg or self.fs.cwd)
elif cmd == 'SITE CHMOD':
if ' ' not in arg:
msg = "Syntax error: command needs two arguments."
self.respond("501 " + msg)
self.log_cmd(cmd, "", 501, msg)
return
else:
mode, arg = arg.split(' ', 1)
arg = self.fs.ftp2fs(arg)
kwargs = dict(mode=mode)
elif cmd == 'MFMT':
if ' ' not in arg:
msg = "Syntax error: command needs two arguments."
self.respond("501 " + msg)
self.log_cmd(cmd, "", 501, msg)
return
else:
timeval, arg = arg.split(' ', 1)
arg = self.fs.ftp2fs(arg)
kwargs = dict(timeval=timeval)
else: # LIST, NLST, MLSD, MLST
arg = self.fs.ftp2fs(arg or self.fs.cwd)
if not self.fs.validpath(arg):
line = self.fs.fs2ftp(arg)
msg = '"%s" points to a path which is outside ' \
"the user's root directory" % line
self.respond("550 %s." % msg)
self.log_cmd(cmd, arg, 550, msg)
return
# check permission
perm = self.proto_cmds[cmd]['perm']
if perm is not None and cmd != 'STOU':
if not self.authorizer.has_perm(self.username, perm, arg):
msg = "Not enough privileges."
self.respond("550 " + msg)
self.log_cmd(cmd, arg, 550, msg)
return
# call the proper ftp_* method
self.process_command(cmd, arg, **kwargs)
def process_command(self, cmd, *args, **kwargs):
"""Process command by calling the corresponding ftp_* class
method (e.g. for received command "MKD pathname", ftp_MKD()
method is called with "pathname" as the argument).
"""
if self._closed:
return
self._last_response = ""
method = getattr(self, 'ftp_' + cmd.replace(' ', '_'))
method(*args, **kwargs)
if self._last_response:
code = int(self._last_response[:3])
resp = self._last_response[4:]
self.log_cmd(cmd, args[0], code, resp)
def handle_error(self):
try:
self.log_exception(self)
self.close()
except Exception:
logger.critical(traceback.format_exc())
def handle_close(self):
self.close()
def close(self):
"""Close the current channel disconnecting the client."""
debug("call: close()", inst=self)
if not self._closed:
AsyncChat.close(self)
self._shutdown_connecting_dtp()
if self.data_channel is not None:
self.data_channel.close()
del self.data_channel
if self._out_dtp_queue is not None:
file = self._out_dtp_queue[2]
if file is not None:
file.close()
if self._in_dtp_queue is not None:
file = self._in_dtp_queue[0]
if file is not None:
file.close()
del self._out_dtp_queue
del self._in_dtp_queue
if self._idler is not None and not self._idler.cancelled:
self._idler.cancel()
# remove client IP address from ip map
if self.remote_ip in self.server.ip_map:
self.server.ip_map.remove(self.remote_ip)
if self.fs is not None:
self.fs.cmd_channel = None
self.fs = None
self.log("FTP session closed (disconnect).")
# Having self.remote_ip not set means that no connection
# actually took place, hence we're not interested in
# invoking the callback.
if self.remote_ip:
self.ioloop.call_later(0, self.on_disconnect,
_errback=self.handle_error)
def _shutdown_connecting_dtp(self):
"""Close any ActiveDTP or PassiveDTP instance waiting to
establish a connection (passive or active).
"""
if self._dtp_acceptor is not None:
self._dtp_acceptor.close()
self._dtp_acceptor = None
if self._dtp_connector is not None:
self._dtp_connector.close()
self._dtp_connector = None
# --- public callbacks
# Note: to run a time consuming task make sure to use a separate
# process or thread (see FAQs).
def on_connect(self):
"""Called when client connects, *before* sending the initial
220 reply.
"""
def on_disconnect(self):
"""Called when connection is closed."""
def on_login(self, username):
"""Called on user login."""
def on_login_failed(self, username, password):
"""Called on failed login attempt.
At this point client might have already been disconnected if it
failed too many times.
"""
def on_logout(self, username):
"""Called when user "cleanly" logs out due to QUIT or USER
issued twice (re-login). This is not called if the connection
is simply closed by client.
"""
def on_file_sent(self, file):
"""Called every time a file has been succesfully sent.
"file" is the absolute name of the file just being sent.
"""
def on_file_received(self, file):
"""Called every time a file has been succesfully received.
"file" is the absolute name of the file just being received.
"""
def on_incomplete_file_sent(self, file):
"""Called every time a file has not been entirely sent.
(e.g. ABOR during transfer or client disconnected).
"file" is the absolute name of that file.
"""
def on_incomplete_file_received(self, file):
"""Called every time a file has not been entirely received
(e.g. ABOR during transfer or client disconnected).
"file" is the absolute name of that file.
"""
# --- internal callbacks
def _on_dtp_connection(self):
"""Called every time data channel connects, either active or
passive.
Incoming and outgoing queues are checked for pending data.
If outbound data is pending, it is pushed into the data channel.
If awaiting inbound data, the data channel is enabled for
receiving.
"""
# Close accepting DTP only. By closing ActiveDTP DTPHandler
# would receive a closed socket object.
# self._shutdown_connecting_dtp()
if self._dtp_acceptor is not None:
self._dtp_acceptor.close()
self._dtp_acceptor = None
# stop the idle timer as long as the data transfer is not finished
if self._idler is not None and not self._idler.cancelled:
self._idler.cancel()
# check for data to send
if self._out_dtp_queue is not None:
data, isproducer, file, cmd = self._out_dtp_queue
self._out_dtp_queue = None
self.data_channel.cmd = cmd
if file:
self.data_channel.file_obj = file
try:
if not isproducer:
self.data_channel.push(data)
else:
self.data_channel.push_with_producer(data)
if self.data_channel is not None:
self.data_channel.close_when_done()
except Exception:
# dealing with this exception is up to DTP (see bug #84)
self.data_channel.handle_error()
# check for data to receive
elif self._in_dtp_queue is not None:
file, cmd = self._in_dtp_queue
self.data_channel.file_obj = file
self._in_dtp_queue = None
self.data_channel.enable_receiving(self._current_type, cmd)
def _on_dtp_close(self):
"""Called every time the data channel is closed."""
self.data_channel = None
if self._quit_pending:
self.close()
elif self.timeout:
# data transfer finished, restart the idle timer
if self._idler is not None and not self._idler.cancelled:
self._idler.cancel()
self._idler = self.ioloop.call_later(
self.timeout, self.handle_timeout, _errback=self.handle_error)
# --- utility
def push(self, s):
asynchat.async_chat.push(self, s.encode('utf8'))
def respond(self, resp, logfun=logger.debug):
"""Send a response to the client using the command channel."""
self._last_response = resp
self.push(resp + '\r\n')
if self._log_debug:
self.logline('-> %s' % resp, logfun=logfun)
else:
self.log(resp[4:], logfun=logfun)
def respond_w_warning(self, resp):
self.respond(resp, logfun=logger.warning)
def push_dtp_data(self, data, isproducer=False, file=None, cmd=None):
"""Pushes data into the data channel.
It is usually called for those commands requiring some data to
be sent over the data channel (e.g. RETR).
If data channel does not exist yet, it queues the data to send
later; data will then be pushed into data channel when
_on_dtp_connection() will be called.
- (str/classobj) data: the data to send which may be a string
or a producer object).
- (bool) isproducer: whether treat data as a producer.
- (file) file: the file[-like] object to send (if any).
"""
if self.data_channel is not None:
self.respond(
"125 Data connection already open. Transfer starting.")
if file:
self.data_channel.file_obj = file
try:
if not isproducer:
self.data_channel.push(data)
else:
self.data_channel.push_with_producer(data)
if self.data_channel is not None:
self.data_channel.cmd = cmd
self.data_channel.close_when_done()
except Exception:
# dealing with this exception is up to DTP (see bug #84)
self.data_channel.handle_error()
else:
self.respond(
"150 File status okay. About to open data connection.")
self._out_dtp_queue = (data, isproducer, file, cmd)
def flush_account(self):
"""Flush account information by clearing attributes that need
to be reset on a REIN or new USER command.
"""
self._shutdown_connecting_dtp()
# if there's a transfer in progress RFC-959 states we are
# supposed to let it finish
if self.data_channel is not None:
if not self.data_channel.transfer_in_progress():
self.data_channel.close()
self.data_channel = None
username = self.username
if self.authenticated and username:
self.on_logout(username)
self.authenticated = False
self.username = ""
self.password = ""
self.attempted_logins = 0
self._current_type = 'a'
self._restart_position = 0
self._quit_pending = False
self._in_dtp_queue = None
self._rnfr = None
self._out_dtp_queue = None
def run_as_current_user(self, function, *args, **kwargs):
"""Execute a function impersonating the current logged-in user."""
self.authorizer.impersonate_user(self.username, self.password)
try:
return function(*args, **kwargs)
finally:
self.authorizer.terminate_impersonation(self.username)
# --- logging wrappers
# this is defined earlier
# log_prefix = '%(remote_ip)s:%(remote_port)s-[%(username)s]'
def log(self, msg, logfun=logger.info):
"""Log a message, including additional identifying session data."""
prefix = self.log_prefix % self.__dict__
logfun("%s %s" % (prefix, msg))
def logline(self, msg, logfun=logger.debug):
"""Log a line including additional indentifying session data.
By default this is disabled unless logging level == DEBUG.
"""
if self._log_debug:
prefix = self.log_prefix % self.__dict__
logfun("%s %s" % (prefix, msg))
def logerror(self, msg):
"""Log an error including additional indentifying session data."""
prefix = self.log_prefix % self.__dict__
logger.error("%s %s" % (prefix, msg))
def log_exception(self, instance):
"""Log an unhandled exception. 'instance' is the instance
where the exception was generated.
"""
logger.exception("unhandled exception in instance %r", instance)
# the list of commands which gets logged when logging level
# is >= logging.INFO
log_cmds_list = ["DELE", "RNFR", "RNTO", "MKD", "RMD", "CWD",
"XMKD", "XRMD", "XCWD",
"REIN", "SITE CHMOD", "MFMT"]
def log_cmd(self, cmd, arg, respcode, respstr):
"""Log commands and responses in a standardized format.
This is disabled in case the logging level is set to DEBUG.
- (str) cmd:
the command sent by client
- (str) arg:
the command argument sent by client.
For filesystem commands such as DELE, MKD, etc. this is
already represented as an absolute real filesystem path
like "/home/user/file.ext".
- (int) respcode:
the response code as being sent by server. Response codes
starting with 4xx or 5xx are returned if the command has
been rejected for some reason.
- (str) respstr:
the response string as being sent by server.
By default only DELE, RMD, RNTO, MKD, CWD, ABOR, REIN, SITE CHMOD
commands are logged and the output is redirected to self.log
method.
Can be overridden to provide alternate formats or to log
further commands.
"""
if not self._log_debug and cmd in self.log_cmds_list:
line = '%s %s' % (' '.join([cmd, arg]).strip(), respcode)
if str(respcode)[0] in ('4', '5'):
line += ' %r' % respstr
self.log(line)
def log_transfer(self, cmd, filename, receive, completed, elapsed, bytes):
"""Log all file transfers in a standardized format.
- (str) cmd:
the original command who caused the tranfer.
- (str) filename:
the absolutized name of the file on disk.
- (bool) receive:
True if the transfer was used for client uploading (STOR,
STOU, APPE), False otherwise (RETR).
- (bool) completed:
True if the file has been entirely sent, else False.
- (float) elapsed:
transfer elapsed time in seconds.
- (int) bytes:
number of bytes transmitted.
"""
line = '%s %s completed=%s bytes=%s seconds=%s' % \
(cmd, filename, completed and 1 or 0, bytes, elapsed)
self.log(line)
# --- connection
def _make_eport(self, ip, port):
"""Establish an active data channel with remote client which
issued a PORT or EPRT command.
"""
# FTP bounce attacks protection: according to RFC-2577 it's
# recommended to reject PORT if IP address specified in it
# does not match client IP address.
remote_ip = self.remote_ip
if remote_ip.startswith('::ffff:'):
# In this scenario, the server has an IPv6 socket, but
# the remote client is using IPv4 and its address is
# represented as an IPv4-mapped IPv6 address which
# looks like this ::ffff:151.12.5.65, see:
# http://en.wikipedia.org/wiki/IPv6#IPv4-mapped_addresses
# http://tools.ietf.org/html/rfc3493.html#section-3.7
# We truncate the first bytes to make it look like a
# common IPv4 address.
remote_ip = remote_ip[7:]
if not self.permit_foreign_addresses and ip != remote_ip:
msg = "501 Rejected data connection to foreign address %s:%s." \
% (ip, port)
self.respond_w_warning(msg)
return
# ...another RFC-2577 recommendation is rejecting connections
# to privileged ports (< 1024) for security reasons.
if not self.permit_privileged_ports and port < 1024:
msg = '501 PORT against the privileged port "%s" refused.' % port
self.respond_w_warning(msg)
return
# close establishing DTP instances, if any
self._shutdown_connecting_dtp()
if self.data_channel is not None:
self.data_channel.close()
self.data_channel = None
# make sure we are not hitting the max connections limit
if not self.server._accept_new_cons():
msg = "425 Too many connections. Can't open data channel."
self.respond_w_warning(msg)
return
# open data channel
self._dtp_connector = self.active_dtp(ip, port, self)
def _make_epasv(self, extmode=False):
"""Initialize a passive data channel with remote client which
issued a PASV or EPSV command.
If extmode argument is True we assume that client issued EPSV in
which case extended passive mode will be used (see RFC-2428).
"""
# close establishing DTP instances, if any
self._shutdown_connecting_dtp()
# close established data connections, if any
if self.data_channel is not None:
self.data_channel.close()
self.data_channel = None
# make sure we are not hitting the max connections limit
if not self.server._accept_new_cons():
msg = "425 Too many connections. Can't open data channel."
self.respond_w_warning(msg)
return
# open data channel
self._dtp_acceptor = self.passive_dtp(self, extmode)
def ftp_PORT(self, line):
"""Start an active data channel by using IPv4."""
if self._epsvall:
self.respond("501 PORT not allowed after EPSV ALL.")
return
# Parse PORT request for getting IP and PORT.
# Request comes in as:
# > h1,h2,h3,h4,p1,p2
# ...where the client's IP address is h1.h2.h3.h4 and the TCP
# port number is (p1 * 256) + p2.
try:
addr = list(map(int, line.split(',')))
if len(addr) != 6:
raise ValueError
for x in addr[:4]:
if not 0 <= x <= 255:
raise ValueError
ip = '%d.%d.%d.%d' % tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
if not 0 <= port <= 65535:
raise ValueError
except (ValueError, OverflowError):
self.respond("501 Invalid PORT format.")
return
self._make_eport(ip, port)
def ftp_EPRT(self, line):
"""Start an active data channel by choosing the network protocol
to use (IPv4/IPv6) as defined in RFC-2428.
"""
if self._epsvall:
self.respond("501 EPRT not allowed after EPSV ALL.")
return
# Parse EPRT request for getting protocol, IP and PORT.
# Request comes in as:
# <d>proto<d>ip<d>port<d>
# ...where <d> is an arbitrary delimiter character (usually "|") and
# <proto> is the network protocol to use (1 for IPv4, 2 for IPv6).
try:
af, ip, port = line.split(line[0])[1:-1]
port = int(port)
if not 0 <= port <= 65535:
raise ValueError
except (ValueError, IndexError, OverflowError):
self.respond("501 Invalid EPRT format.")
return
if af == "1":
# test if AF_INET6 and IPV6_V6ONLY
if (self.socket.family == socket.AF_INET6 and not
SUPPORTS_HYBRID_IPV6):
self.respond('522 Network protocol not supported (use 2).')
else:
try:
octs = list(map(int, ip.split('.')))
if len(octs) != 4:
raise ValueError
for x in octs:
if not 0 <= x <= 255:
raise ValueError
except (ValueError, OverflowError):
self.respond("501 Invalid EPRT format.")
else:
self._make_eport(ip, port)
elif af == "2":
if self.socket.family == socket.AF_INET:
self.respond('522 Network protocol not supported (use 1).')
else:
self._make_eport(ip, port)
else:
if self.socket.family == socket.AF_INET:
self.respond('501 Unknown network protocol (use 1).')
else:
self.respond('501 Unknown network protocol (use 2).')
def ftp_PASV(self, line):
"""Start a passive data channel by using IPv4."""
if self._epsvall:
self.respond("501 PASV not allowed after EPSV ALL.")
return
self._make_epasv(extmode=False)
def ftp_EPSV(self, line):
"""Start a passive data channel by using IPv4 or IPv6 as defined
in RFC-2428.
"""
# RFC-2428 specifies that if an optional parameter is given,
# we have to determine the address family from that otherwise
# use the same address family used on the control connection.
# In such a scenario a client may use IPv4 on the control channel
# and choose to use IPv6 for the data channel.
# But how could we use IPv6 on the data channel without knowing
# which IPv6 address to use for binding the socket?
# Unfortunately RFC-2428 does not provide satisfing information
# on how to do that. The assumption is that we don't have any way
# to know wich address to use, hence we just use the same address
# family used on the control connection.
if not line:
self._make_epasv(extmode=True)
# IPv4
elif line == "1":
if self.socket.family != socket.AF_INET:
self.respond('522 Network protocol not supported (use 2).')
else:
self._make_epasv(extmode=True)
# IPv6
elif line == "2":
if self.socket.family == socket.AF_INET:
self.respond('522 Network protocol not supported (use 1).')
else:
self._make_epasv(extmode=True)
elif line.lower() == 'all':
self._epsvall = True
self.respond(
'220 Other commands other than EPSV are now disabled.')
else:
if self.socket.family == socket.AF_INET:
self.respond('501 Unknown network protocol (use 1).')
else:
self.respond('501 Unknown network protocol (use 2).')
def ftp_QUIT(self, line):
"""Quit the current session disconnecting the client."""
if self.authenticated:
msg_quit = self.authorizer.get_msg_quit(self.username)
else:
msg_quit = "Goodbye."
if len(msg_quit) <= 75:
self.respond("221 %s" % msg_quit)
else:
self.push("221-%s\r\n" % msg_quit)
self.respond("221 ")
# From RFC-959:
# If file transfer is in progress, the connection must remain
# open for result response and the server will then close it.
# We also stop responding to any further command.
if self.data_channel:
self._quit_pending = True
self.del_channel()
else:
self._shutdown_connecting_dtp()
self.close_when_done()
if self.authenticated and self.username:
self.on_logout(self.username)
# --- data transferring
def ftp_LIST(self, path):
"""Return a list of files in the specified directory to the
client.
On success return the directory path, else None.
"""
# - If no argument, fall back on cwd as default.
# - Some older FTP clients erroneously issue /bin/ls-like LIST
# formats in which case we fall back on cwd as default.
try:
isdir = self.fs.isdir(path)
if isdir:
listing = self.run_as_current_user(self.fs.listdir, path)
if isinstance(listing, list):
try:
# RFC 959 recommends the listing to be sorted.
listing.sort()
except UnicodeDecodeError:
# (Python 2 only) might happen on filesystem not
# supporting UTF8 meaning os.listdir() returned a list
# of mixed bytes and unicode strings:
# http://goo.gl/6DLHD
# http://bugs.python.org/issue683592
pass
iterator = self.fs.format_list(path, listing)
else:
basedir, filename = os.path.split(path)
self.fs.lstat(path) # raise exc in case of problems
iterator = self.fs.format_list(basedir, [filename])
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
producer = BufferedIteratorProducer(iterator)
self.push_dtp_data(producer, isproducer=True, cmd="LIST")
return path
def ftp_NLST(self, path):
"""Return a list of files in the specified directory in a
compact form to the client.
On success return the directory path, else None.
"""
try:
if self.fs.isdir(path):
listing = list(self.run_as_current_user(self.fs.listdir, path))
else:
# if path is a file we just list its name
self.fs.lstat(path) # raise exc in case of problems
listing = [os.path.basename(path)]
except (OSError, FilesystemError) as err:
self.respond('550 %s.' % _strerror(err))
else:
data = ''
if listing:
try:
listing.sort()
except UnicodeDecodeError:
# (Python 2 only) might happen on filesystem not
# supporting UTF8 meaning os.listdir() returned a list
# of mixed bytes and unicode strings:
# http://goo.gl/6DLHD
# http://bugs.python.org/issue683592
ls = []
for x in listing:
if not isinstance(x, unicode):
x = unicode(x, 'utf8')
ls.append(x)
listing = sorted(ls)
data = '\r\n'.join(listing) + '\r\n'
data = data.encode('utf8', self.unicode_errors)
self.push_dtp_data(data, cmd="NLST")
return path
# --- MLST and MLSD commands
# The MLST and MLSD commands are intended to standardize the file and
# directory information returned by the server-FTP process. These
# commands differ from the LIST command in that the format of the
# replies is strictly defined although extensible.
def ftp_MLST(self, path):
"""Return information about a pathname in a machine-processable
form as defined in RFC-3659.
On success return the path just listed, else None.
"""
line = self.fs.fs2ftp(path)
basedir, basename = os.path.split(path)
perms = self.authorizer.get_perms(self.username)
try:
iterator = self.run_as_current_user(
self.fs.format_mlsx, basedir, [basename], perms,
self._current_facts, ignore_err=False)
data = b''.join(iterator)
except (OSError, FilesystemError) as err:
self.respond('550 %s.' % _strerror(err))
else:
data = data.decode('utf8', self.unicode_errors)
# since TVFS is supported (see RFC-3659 chapter 6), a fully
# qualified pathname should be returned
data = data.split(' ')[0] + ' %s\r\n' % line
# response is expected on the command channel
self.push('250-Listing "%s":\r\n' % line)
# the fact set must be preceded by a space
self.push(' ' + data)
self.respond('250 End MLST.')
return path
def ftp_MLSD(self, path):
"""Return contents of a directory in a machine-processable form
as defined in RFC-3659.
On success return the path just listed, else None.
"""
# RFC-3659 requires 501 response code if path is not a directory
if not self.fs.isdir(path):
self.respond("501 No such directory.")
return
try:
listing = self.run_as_current_user(self.fs.listdir, path)
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
perms = self.authorizer.get_perms(self.username)
iterator = self.fs.format_mlsx(path, listing, perms,
self._current_facts)
producer = BufferedIteratorProducer(iterator)
self.push_dtp_data(producer, isproducer=True, cmd="MLSD")
return path
def ftp_RETR(self, file):
"""Retrieve the specified file (transfer from the server to the
client). On success return the file path else None.
"""
rest_pos = self._restart_position
self._restart_position = 0
try:
fd = self.run_as_current_user(self.fs.open, file, 'rb')
except (EnvironmentError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
return
try:
if rest_pos:
# Make sure that the requested offset is valid (within the
# size of the file being resumed).
# According to RFC-1123 a 554 reply may result in case that
# the existing file cannot be repositioned as specified in
# the REST.
ok = 0
try:
if rest_pos > self.fs.getsize(file):
raise ValueError
fd.seek(rest_pos)
ok = 1
except ValueError:
why = "Invalid REST parameter"
except (EnvironmentError, FilesystemError) as err:
why = _strerror(err)
if not ok:
fd.close()
self.respond('554 %s' % why)
return
producer = FileProducer(fd, self._current_type)
self.push_dtp_data(producer, isproducer=True, file=fd, cmd="RETR")
return file
except Exception:
fd.close()
raise
def ftp_STOR(self, file, mode='w'):
"""Store a file (transfer from the client to the server).
On success return the file path, else None.
"""
# A resume could occur in case of APPE or REST commands.
# In that case we have to open file object in different ways:
# STOR: mode = 'w'
# APPE: mode = 'a'
# REST: mode = 'r+' (to permit seeking on file object)
if 'a' in mode:
cmd = 'APPE'
else:
cmd = 'STOR'
rest_pos = self._restart_position
self._restart_position = 0
if rest_pos:
mode = 'r+'
try:
fd = self.run_as_current_user(self.fs.open, file, mode + 'b')
except (EnvironmentError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
return
try:
if rest_pos:
# Make sure that the requested offset is valid (within the
# size of the file being resumed).
# According to RFC-1123 a 554 reply may result in case
# that the existing file cannot be repositioned as
# specified in the REST.
ok = 0
try:
if rest_pos > self.fs.getsize(file):
raise ValueError
fd.seek(rest_pos)
ok = 1
except ValueError:
why = "Invalid REST parameter"
except (EnvironmentError, FilesystemError) as err:
why = _strerror(err)
if not ok:
fd.close()
self.respond('554 %s' % why)
return
if self.data_channel is not None:
resp = "Data connection already open. Transfer starting."
self.respond("125 " + resp)
self.data_channel.file_obj = fd
self.data_channel.enable_receiving(self._current_type, cmd)
else:
resp = "File status okay. About to open data connection."
self.respond("150 " + resp)
self._in_dtp_queue = (fd, cmd)
return file
except Exception:
fd.close()
raise
def ftp_STOU(self, line):
"""Store a file on the server with a unique name.
On success return the file path, else None.
"""
# Note 1: RFC-959 prohibited STOU parameters, but this
# prohibition is obsolete.
# Note 2: 250 response wanted by RFC-959 has been declared
# incorrect in RFC-1123 that wants 125/150 instead.
# Note 3: RFC-1123 also provided an exact output format
# defined to be as follow:
# > 125 FILE: pppp
# ...where pppp represents the unique path name of the
# file that will be written.
# watch for STOU preceded by REST, which makes no sense.
if self._restart_position:
self.respond("450 Can't STOU while REST request is pending.")
return
if line:
basedir, prefix = os.path.split(self.fs.ftp2fs(line))
prefix = prefix + '.'
else:
basedir = self.fs.ftp2fs(self.fs.cwd)
prefix = 'ftpd.'
try:
fd = self.run_as_current_user(self.fs.mkstemp, prefix=prefix,
dir=basedir)
except (EnvironmentError, FilesystemError) as err:
# likely, we hit the max number of retries to find out a
# file with a unique name
if getattr(err, "errno", -1) == errno.EEXIST:
why = 'No usable unique file name found'
# something else happened
else:
why = _strerror(err)
self.respond("450 %s." % why)
return
try:
if not self.authorizer.has_perm(self.username, 'w', fd.name):
try:
fd.close()
self.run_as_current_user(self.fs.remove, fd.name)
except (OSError, FilesystemError):
pass
self.respond("550 Not enough privileges.")
return
# now just acts like STOR except that restarting isn't allowed
filename = os.path.basename(fd.name)
if self.data_channel is not None:
self.respond("125 FILE: %s" % filename)
self.data_channel.file_obj = fd
self.data_channel.enable_receiving(self._current_type, "STOU")
else:
self.respond("150 FILE: %s" % filename)
self._in_dtp_queue = (fd, "STOU")
return filename
except Exception:
fd.close()
raise
def ftp_APPE(self, file):
"""Append data to an existing file on the server.
On success return the file path, else None.
"""
# watch for APPE preceded by REST, which makes no sense.
if self._restart_position:
self.respond("450 Can't APPE while REST request is pending.")
else:
return self.ftp_STOR(file, mode='a')
def ftp_REST(self, line):
"""Restart a file transfer from a previous mark."""
if self._current_type == 'a':
self.respond('501 Resuming transfers not allowed in ASCII mode.')
return
try:
marker = int(line)
if marker < 0:
raise ValueError
except (ValueError, OverflowError):
self.respond("501 Invalid parameter.")
else:
self.respond("350 Restarting at position %s." % marker)
self._restart_position = marker
def ftp_ABOR(self, line):
"""Abort the current data transfer."""
# ABOR received while no data channel exists
if (self._dtp_acceptor is None and
self._dtp_connector is None and
self.data_channel is None):
self.respond("225 No transfer to abort.")
return
else:
# a PASV or PORT was received but connection wasn't made yet
if (self._dtp_acceptor is not None or
self._dtp_connector is not None):
self._shutdown_connecting_dtp()
resp = "225 ABOR command successful; data channel closed."
# If a data transfer is in progress the server must first
# close the data connection, returning a 426 reply to
# indicate that the transfer terminated abnormally, then it
# must send a 226 reply, indicating that the abort command
# was successfully processed.
# If no data has been transmitted we just respond with 225
# indicating that no transfer was in progress.
if self.data_channel is not None:
if self.data_channel.transfer_in_progress():
self.data_channel.close()
self.data_channel = None
self.respond("426 Transfer aborted via ABOR.",
logfun=logger.info)
resp = "226 ABOR command successful."
else:
self.data_channel.close()
self.data_channel = None
resp = "225 ABOR command successful; data channel closed."
self.respond(resp)
# --- authentication
def ftp_USER(self, line):
"""Set the username for the current session."""
# RFC-959 specifies a 530 response to the USER command if the
# username is not valid. If the username is valid is required
# ftpd returns a 331 response instead. In order to prevent a
# malicious client from determining valid usernames on a server,
# it is suggested by RFC-2577 that a server always return 331 to
# the USER command and then reject the combination of username
# and password for an invalid username when PASS is provided later.
if not self.authenticated:
self.respond('331 Username ok, send password.')
else:
# a new USER command could be entered at any point in order
# to change the access control flushing any user, password,
# and account information already supplied and beginning the
# login sequence again.
self.flush_account()
msg = 'Previous account information was flushed'
self.respond('331 %s, send password.' % msg, logfun=logger.info)
self.username = line
def handle_auth_failed(self, msg, password):
def callback(username, password, msg):
self.add_channel()
if hasattr(self, '_closed') and not self._closed:
self.attempted_logins += 1
if self.attempted_logins >= self.max_login_attempts:
msg += " Disconnecting."
self.respond("530 " + msg)
self.close_when_done()
else:
self.respond("530 " + msg)
self.log("USER '%s' failed login." % username)
self.on_login_failed(username, password)
self.del_channel()
if not msg:
if self.username == 'anonymous':
msg = "Anonymous access not allowed."
else:
msg = "Authentication failed."
else:
# response string should be capitalized as per RFC-959
msg = msg.capitalize()
self.ioloop.call_later(self.auth_failed_timeout, callback,
self.username, password, msg,
_errback=self.handle_error)
self.username = ""
def handle_auth_success(self, home, password, msg_login):
if not isinstance(home, unicode):
if PY3:
raise TypeError('type(home) != text')
else:
warnings.warn(
'%s.get_home_dir returned a non-unicode string; now '
'casting to unicode' % (
self.authorizer.__class__.__name__),
RuntimeWarning)
home = home.decode('utf8')
if len(msg_login) <= 75:
self.respond('230 %s' % msg_login)
else:
self.push("230-%s\r\n" % msg_login)
self.respond("230 ")
self.log("USER '%s' logged in." % self.username)
self.authenticated = True
self.password = password
self.attempted_logins = 0
self.fs = self.abstracted_fs(home, self)
self.on_login(self.username)
def ftp_PASS(self, line):
"""Check username's password against the authorizer."""
if self.authenticated:
self.respond("503 User already authenticated.")
return
if not self.username:
self.respond("503 Login with USER first.")
return
try:
self.authorizer.validate_authentication(self.username, line, self)
home = self.authorizer.get_home_dir(self.username)
msg_login = self.authorizer.get_msg_login(self.username)
except (AuthenticationFailed, AuthorizerError) as err:
self.handle_auth_failed(str(err), line)
else:
self.handle_auth_success(home, line, msg_login)
def ftp_REIN(self, line):
"""Reinitialize user's current session."""
# From RFC-959:
# REIN command terminates a USER, flushing all I/O and account
# information, except to allow any transfer in progress to be
# completed. All parameters are reset to the default settings
# and the control connection is left open. This is identical
# to the state in which a user finds himself immediately after
# the control connection is opened.
self.flush_account()
# Note: RFC-959 erroneously mention "220" as the correct response
# code to be given in this case, but this is wrong...
self.respond("230 Ready for new user.")
# --- filesystem operations
def ftp_PWD(self, line):
"""Return the name of the current working directory to the client."""
# The 257 response is supposed to include the directory
# name and in case it contains embedded double-quotes
# they must be doubled (see RFC-959, chapter 7, appendix 2).
cwd = self.fs.cwd
assert isinstance(cwd, unicode), cwd
self.respond('257 "%s" is the current directory.'
% cwd.replace('"', '""'))
def ftp_CWD(self, path):
"""Change the current working directory.
On success return the new directory path, else None.
"""
# Temporarily join the specified directory to see if we have
# permissions to do so, then get back to original process's
# current working directory.
# Note that if for some reason os.getcwd() gets removed after
# the process is started we'll get into troubles (os.getcwd()
# will fail with ENOENT) but we can't do anything about that
# except logging an error.
init_cwd = getcwdu()
try:
self.run_as_current_user(self.fs.chdir, path)
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
cwd = self.fs.cwd
assert isinstance(cwd, unicode), cwd
self.respond('250 "%s" is the current directory.' % cwd)
if getcwdu() != init_cwd:
os.chdir(init_cwd)
return path
def ftp_CDUP(self, path):
"""Change into the parent directory.
On success return the new directory, else None.
"""
# Note: RFC-959 says that code 200 is required but it also says
# that CDUP uses the same codes as CWD.
return self.ftp_CWD(path)
def ftp_SIZE(self, path):
"""Return size of file in a format suitable for using with
RESTart as defined in RFC-3659."""
# Implementation note: properly handling the SIZE command when
# TYPE ASCII is used would require to scan the entire file to
# perform the ASCII translation logic
# (file.read().replace(os.linesep, '\r\n')) and then calculating
# the len of such data which may be different than the actual
# size of the file on the server. Considering that calculating
# such result could be very resource-intensive and also dangerous
# (DoS) we reject SIZE when the current TYPE is ASCII.
# However, clients in general should not be resuming downloads
# in ASCII mode. Resuming downloads in binary mode is the
# recommended way as specified in RFC-3659.
line = self.fs.fs2ftp(path)
if self._current_type == 'a':
why = "SIZE not allowed in ASCII mode"
self.respond("550 %s." % why)
return
if not self.fs.isfile(self.fs.realpath(path)):
why = "%s is not retrievable" % line
self.respond("550 %s." % why)
return
try:
size = self.run_as_current_user(self.fs.getsize, path)
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
self.respond("213 %s" % size)
def ftp_MDTM(self, path):
"""Return last modification time of file to the client as an ISO
3307 style timestamp (YYYYMMDDHHMMSS) as defined in RFC-3659.
On success return the file path, else None.
"""
line = self.fs.fs2ftp(path)
if not self.fs.isfile(self.fs.realpath(path)):
self.respond("550 %s is not retrievable" % line)
return
if self.use_gmt_times:
timefunc = time.gmtime
else:
timefunc = time.localtime
try:
secs = self.run_as_current_user(self.fs.getmtime, path)
lmt = time.strftime("%Y%m%d%H%M%S", timefunc(secs))
except (ValueError, OSError, FilesystemError) as err:
if isinstance(err, ValueError):
# It could happen if file's last modification time
# happens to be too old (prior to year 1900)
why = "Can't determine file's last modification time"
else:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
self.respond("213 %s" % lmt)
return path
def ftp_MFMT(self, path, timeval):
""" Sets the last modification time of file to timeval
3307 style timestamp (YYYYMMDDHHMMSS) as defined in RFC-3659.
On success return the modified time and file path, else None.
"""
# Note: the MFMT command is not a formal RFC command
# but stated in the following MEMO:
# https://tools.ietf.org/html/draft-somers-ftp-mfxx-04
# this is implemented to assist with file synchronization
line = self.fs.fs2ftp(path)
if len(timeval) != len("YYYYMMDDHHMMSS"):
why = "Invalid time format; expected: YYYYMMDDHHMMSS"
self.respond('550 %s.' % why)
return
if not self.fs.isfile(self.fs.realpath(path)):
self.respond("550 %s is not retrievable" % line)
return
if self.use_gmt_times:
timefunc = time.gmtime
else:
timefunc = time.localtime
try:
# convert timeval string to epoch seconds
epoch = datetime.utcfromtimestamp(0)
timeval_datetime_obj = datetime.strptime(timeval, '%Y%m%d%H%M%S')
timeval_secs = (timeval_datetime_obj - epoch).total_seconds()
except ValueError:
why = "Invalid time format; expected: YYYYMMDDHHMMSS"
self.respond('550 %s.' % why)
return
try:
# Modify Time
self.run_as_current_user(self.fs.utime, path, timeval_secs)
# Fetch Time
secs = self.run_as_current_user(self.fs.getmtime, path)
lmt = time.strftime("%Y%m%d%H%M%S", timefunc(secs))
except (ValueError, OSError, FilesystemError) as err:
if isinstance(err, ValueError):
# It could happen if file's last modification time
# happens to be too old (prior to year 1900)
why = "Can't determine file's last modification time"
else:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
self.respond("213 Modify=%s; %s." % (lmt, line))
return (lmt, path)
def ftp_MKD(self, path):
"""Create the specified directory.
On success return the directory path, else None.
"""
line = self.fs.fs2ftp(path)
try:
self.run_as_current_user(self.fs.mkdir, path)
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
# The 257 response is supposed to include the directory
# name and in case it contains embedded double-quotes
# they must be doubled (see RFC-959, chapter 7, appendix 2).
self.respond(
'257 "%s" directory created.' % line.replace('"', '""'))
return path
def ftp_RMD(self, path):
"""Remove the specified directory.
On success return the directory path, else None.
"""
if self.fs.realpath(path) == self.fs.realpath(self.fs.root):
msg = "Can't remove root directory."
self.respond("550 %s" % msg)
return
try:
self.run_as_current_user(self.fs.rmdir, path)
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
self.respond("250 Directory removed.")
def ftp_DELE(self, path):
"""Delete the specified file.
On success return the file path, else None.
"""
try:
self.run_as_current_user(self.fs.remove, path)
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
self.respond("250 File removed.")
return path
def ftp_RNFR(self, path):
"""Rename the specified (only the source name is specified
here, see RNTO command)"""
if not self.fs.lexists(path):
self.respond("550 No such file or directory.")
elif self.fs.realpath(path) == self.fs.realpath(self.fs.root):
self.respond("550 Can't rename home directory.")
else:
self._rnfr = path
self.respond("350 Ready for destination name.")
def ftp_RNTO(self, path):
"""Rename file (destination name only, source is specified with
RNFR).
On success return a (source_path, destination_path) tuple.
"""
if not self._rnfr:
self.respond("503 Bad sequence of commands: use RNFR first.")
return
src = self._rnfr
self._rnfr = None
try:
self.run_as_current_user(self.fs.rename, src, path)
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
self.respond("250 Renaming ok.")
return (src, path)
# --- others
def ftp_TYPE(self, line):
"""Set current type data type to binary/ascii"""
type = line.upper().replace(' ', '')
if type in ("A", "L7"):
self.respond("200 Type set to: ASCII.")
self._current_type = 'a'
elif type in ("I", "L8"):
self.respond("200 Type set to: Binary.")
self._current_type = 'i'
else:
self.respond('504 Unsupported type "%s".' % line)
def ftp_STRU(self, line):
"""Set file structure ("F" is the only one supported (noop))."""
stru = line.upper()
if stru == 'F':
self.respond('200 File transfer structure set to: F.')
elif stru in ('P', 'R'):
# R is required in minimum implementations by RFC-959, 5.1.
# RFC-1123, 4.1.2.13, amends this to only apply to servers
# whose file systems support record structures, but also
# suggests that such a server "may still accept files with
# STRU R, recording the byte stream literally".
# Should we accept R but with no operational difference from
# F? proftpd and wu-ftpd don't accept STRU R. We just do
# the same.
#
# RFC-1123 recommends against implementing P.
self.respond('504 Unimplemented STRU type.')
else:
self.respond('501 Unrecognized STRU type.')
def ftp_MODE(self, line):
"""Set data transfer mode ("S" is the only one supported (noop))."""
mode = line.upper()
if mode == 'S':
self.respond('200 Transfer mode set to: S')
elif mode in ('B', 'C'):
self.respond('504 Unimplemented MODE type.')
else:
self.respond('501 Unrecognized MODE type.')
def ftp_STAT(self, path):
"""Return statistics about current ftp session. If an argument
is provided return directory listing over command channel.
Implementation note:
RFC-959 does not explicitly mention globbing but many FTP
servers do support it as a measure of convenience for FTP
clients and users.
In order to search for and match the given globbing expression,
the code has to search (possibly) many directories, examine
each contained filename, and build a list of matching files in
memory. Since this operation can be quite intensive, both CPU-
and memory-wise, we do not support globbing.
"""
# return STATus information about ftpd
if not path:
s = []
s.append('Connected to: %s:%s' % self.socket.getsockname()[:2])
if self.authenticated:
s.append('Logged in as: %s' % self.username)
else:
if not self.username:
s.append("Waiting for username.")
else:
s.append("Waiting for password.")
if self._current_type == 'a':
type = 'ASCII'
else:
type = 'Binary'
s.append("TYPE: %s; STRUcture: File; MODE: Stream" % type)
if self._dtp_acceptor is not None:
s.append('Passive data channel waiting for connection.')
elif self.data_channel is not None:
bytes_sent = self.data_channel.tot_bytes_sent
bytes_recv = self.data_channel.tot_bytes_received
elapsed_time = self.data_channel.get_elapsed_time()
s.append('Data connection open:')
s.append('Total bytes sent: %s' % bytes_sent)
s.append('Total bytes received: %s' % bytes_recv)
s.append('Transfer elapsed time: %s secs' % elapsed_time)
else:
s.append('Data connection closed.')
self.push('211-FTP server status:\r\n')
self.push(''.join([' %s\r\n' % item for item in s]))
self.respond('211 End of status.')
# return directory LISTing over the command channel
else:
line = self.fs.fs2ftp(path)
try:
isdir = self.fs.isdir(path)
if isdir:
listing = self.run_as_current_user(self.fs.listdir, path)
if isinstance(listing, list):
try:
# RFC 959 recommends the listing to be sorted.
listing.sort()
except UnicodeDecodeError:
# (Python 2 only) might happen on filesystem not
# supporting UTF8 meaning os.listdir() returned a
# list of mixed bytes and unicode strings:
# http://goo.gl/6DLHD
# http://bugs.python.org/issue683592
pass
iterator = self.fs.format_list(path, listing)
else:
basedir, filename = os.path.split(path)
self.fs.lstat(path) # raise exc in case of problems
iterator = self.fs.format_list(basedir, [filename])
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
self.push('213-Status of "%s":\r\n' % line)
self.push_with_producer(BufferedIteratorProducer(iterator))
self.respond('213 End of status.')
return path
def ftp_FEAT(self, line):
"""List all new features supported as defined in RFC-2398."""
features = set(['UTF8', 'TVFS'])
features.update([feat for feat in
('EPRT', 'EPSV', 'MDTM', 'MFMT', 'SIZE')
if feat in self.proto_cmds])
features.update(self._extra_feats)
if 'MLST' in self.proto_cmds or 'MLSD' in self.proto_cmds:
facts = ''
for fact in self._available_facts:
if fact in self._current_facts:
facts += fact + '*;'
else:
facts += fact + ';'
features.add('MLST ' + facts)
if 'REST' in self.proto_cmds:
features.add('REST STREAM')
features = sorted(features)
self.push("211-Features supported:\r\n")
self.push("".join([" %s\r\n" % x for x in features]))
self.respond('211 End FEAT.')
def ftp_OPTS(self, line):
"""Specify options for FTP commands as specified in RFC-2389."""
try:
if line.count(' ') > 1:
raise ValueError('Invalid number of arguments')
if ' ' in line:
cmd, arg = line.split(' ')
if ';' not in arg:
raise ValueError('Invalid argument')
else:
cmd, arg = line, ''
# actually the only command able to accept options is MLST
if cmd.upper() != 'MLST' or 'MLST' not in self.proto_cmds:
raise ValueError('Unsupported command "%s"' % cmd)
except ValueError as err:
self.respond('501 %s.' % err)
else:
facts = [x.lower() for x in arg.split(';')]
self._current_facts = \
[x for x in facts if x in self._available_facts]
f = ''.join([x + ';' for x in self._current_facts])
self.respond('200 MLST OPTS ' + f)
def ftp_NOOP(self, line):
"""Do nothing."""
self.respond("200 I successfully done nothin'.")
def ftp_SYST(self, line):
"""Return system type (always returns UNIX type: L8)."""
# This command is used to find out the type of operating system
# at the server. The reply shall have as its first word one of
# the system names listed in RFC-943.
# Since that we always return a "/bin/ls -lA"-like output on
# LIST we prefer to respond as if we would on Unix in any case.
self.respond("215 UNIX Type: L8")
def ftp_ALLO(self, line):
"""Allocate bytes for storage (noop)."""
# not necessary (always respond with 202)
self.respond("202 No storage allocation necessary.")
def ftp_HELP(self, line):
"""Return help text to the client."""
if line:
line = line.upper()
if line in self.proto_cmds:
self.respond("214 %s" % self.proto_cmds[line]['help'])
else:
self.respond("501 Unrecognized command.")
else:
# provide a compact list of recognized commands
def formatted_help():
cmds = []
keys = sorted([x for x in self.proto_cmds.keys()
if not x.startswith('SITE ')])
while keys:
elems = tuple((keys[0:8]))
cmds.append(' %-6s' * len(elems) % elems + '\r\n')
del keys[0:8]
return ''.join(cmds)
self.push("214-The following commands are recognized:\r\n")
self.push(formatted_help())
self.respond("214 Help command successful.")
# --- site commands
# The user willing to add support for a specific SITE command must
# update self.proto_cmds dictionary and define a new ftp_SITE_%CMD%
# method in the subclass.
def ftp_SITE_CHMOD(self, path, mode):
"""Change file mode.
On success return a (file_path, mode) tuple.
"""
# Note: although most UNIX servers implement it, SITE CHMOD is not
# defined in any official RFC.
try:
assert len(mode) in (3, 4)
for x in mode:
assert 0 <= int(x) <= 7
mode = int(mode, 8)
except (AssertionError, ValueError):
self.respond("501 Invalid SITE CHMOD format.")
else:
try:
self.run_as_current_user(self.fs.chmod, path, mode)
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
self.respond('200 SITE CHMOD successful.')
return (path, mode)
def ftp_SITE_HELP(self, line):
"""Return help text to the client for a given SITE command."""
if line:
line = line.upper()
if line in self.proto_cmds:
self.respond("214 %s" % self.proto_cmds[line]['help'])
else:
self.respond("501 Unrecognized SITE command.")
else:
self.push("214-The following SITE commands are recognized:\r\n")
site_cmds = []
for cmd in sorted(self.proto_cmds.keys()):
if cmd.startswith('SITE '):
site_cmds.append(' %s\r\n' % cmd[5:])
self.push(''.join(site_cmds))
self.respond("214 Help SITE command successful.")
# --- support for deprecated cmds
# RFC-1123 requires that the server treat XCUP, XCWD, XMKD, XPWD
# and XRMD commands as synonyms for CDUP, CWD, MKD, LIST and RMD.
# Such commands are obsoleted but some ftp clients (e.g. Windows
# ftp.exe) still use them.
def ftp_XCUP(self, line):
"Change to the parent directory. Synonym for CDUP. Deprecated."
return self.ftp_CDUP(line)
def ftp_XCWD(self, line):
"Change the current working directory. Synonym for CWD. Deprecated."
return self.ftp_CWD(line)
def ftp_XMKD(self, line):
"Create the specified directory. Synonym for MKD. Deprecated."
return self.ftp_MKD(line)
def ftp_XPWD(self, line):
"Return the current working directory. Synonym for PWD. Deprecated."
return self.ftp_PWD(line)
def ftp_XRMD(self, line):
"Remove the specified directory. Synonym for RMD. Deprecated."
return self.ftp_RMD(line)
if SSL is not None:
class SSLConnection(_AsyncChatNewStyle):
"""An AsyncChat subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_established = False
_ssl_closing = False
_ssl_requested = False
def __init__(self, *args, **kwargs):
super(SSLConnection, self).__init__(*args, **kwargs)
self._error = False
self._ssl_want_read = False
self._ssl_want_write = False
def readable(self):
return self._ssl_want_read or \
super(SSLConnection, self).readable()
def writable(self):
return self._ssl_want_write or \
super(SSLConnection, self).writable()
def secure_connection(self, ssl_context):
"""Secure the connection switching from plain-text to
SSL/TLS.
"""
debug("securing SSL connection", self)
self._ssl_requested = True
try:
self.socket = SSL.Connection(ssl_context, self.socket)
except socket.error as err:
# may happen in case the client connects/disconnects
# very quickly
debug(
"call: secure_connection(); can't secure SSL connection "
"%r; closing" % err, self)
self.close()
except ValueError:
# may happen in case the client connects/disconnects
# very quickly
if self.socket.fileno() == -1:
debug(
"ValueError and fd == -1 on secure_connection()", self)
return
raise
else:
self.socket.set_accept_state()
self._ssl_accepting = True
@contextlib.contextmanager
def _handle_ssl_want_rw(self):
prev_row_pending = self._ssl_want_read or self._ssl_want_write
try:
yield
except SSL.WantReadError:
# we should never get here; it's just for extra safety
self._ssl_want_read = True
except SSL.WantWriteError:
# we should never get here; it's just for extra safety
self._ssl_want_write = True
if self._ssl_want_read:
self.modify_ioloop_events(
self._wanted_io_events | self.ioloop.READ, logdebug=True)
elif self._ssl_want_write:
self.modify_ioloop_events(
self._wanted_io_events | self.ioloop.WRITE, logdebug=True)
else:
if prev_row_pending:
self.modify_ioloop_events(self._wanted_io_events)
def _do_ssl_handshake(self):
self._ssl_accepting = True
self._ssl_want_read = False
self._ssl_want_write = False
try:
self.socket.do_handshake()
except SSL.WantReadError:
self._ssl_want_read = True
debug("call: _do_ssl_handshake, err: ssl-want-read", inst=self)
except SSL.WantWriteError:
self._ssl_want_write = True
debug("call: _do_ssl_handshake, err: ssl-want-write",
inst=self)
except SSL.SysCallError as err:
debug("call: _do_ssl_handshake, err: %r" % err, inst=self)
retval, desc = err.args
if (retval == -1 and desc == 'Unexpected EOF') or retval > 0:
return self.handle_close()
raise
except SSL.Error as err:
debug("call: _do_ssl_handshake, err: %r" % err, inst=self)
return self.handle_failed_ssl_handshake()
else:
debug("SSL connection established", self)
self._ssl_accepting = False
self._ssl_established = True
self.handle_ssl_established()
def handle_ssl_established(self):
"""Called when SSL handshake has completed."""
pass
def handle_ssl_shutdown(self):
"""Called when SSL shutdown() has completed."""
super(SSLConnection, self).close()
def handle_failed_ssl_handshake(self):
raise NotImplementedError("must be implemented in subclass")
def handle_read_event(self):
if not self._ssl_requested:
super(SSLConnection, self).handle_read_event()
else:
with self._handle_ssl_want_rw():
self._ssl_want_read = False
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if not self._ssl_requested:
super(SSLConnection, self).handle_write_event()
else:
with self._handle_ssl_want_rw():
self._ssl_want_write = False
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def handle_error(self):
self._error = True
try:
raise
except Exception:
self.log_exception(self)
# when facing an unhandled exception in here it's better
# to rely on base class (FTPHandler or DTPHandler)
# close() method as it does not imply SSL shutdown logic
try:
super(SSLConnection, self).close()
except Exception:
logger.critical(traceback.format_exc())
def send(self, data):
if not isinstance(data, bytes):
data = bytes(data)
try:
return super(SSLConnection, self).send(data)
except SSL.WantReadError:
debug("call: send(), err: ssl-want-read", inst=self)
self._ssl_want_read = True
return 0
except SSL.WantWriteError:
debug("call: send(), err: ssl-want-write", inst=self)
self._ssl_want_write = True
return 0
except SSL.ZeroReturnError as err:
debug(
"call: send() -> shutdown(), err: zero-return", inst=self)
super(SSLConnection, self).handle_close()
return 0
except SSL.SysCallError as err:
debug("call: send(), err: %r" % err, inst=self)
errnum, errstr = err.args
if errnum == errno.EWOULDBLOCK:
return 0
elif (errnum in _ERRNOS_DISCONNECTED or
errstr == 'Unexpected EOF'):
super(SSLConnection, self).handle_close()
return 0
else:
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except SSL.WantReadError:
debug("call: recv(), err: ssl-want-read", inst=self)
self._ssl_want_read = True
raise RetryError
except SSL.WantWriteError:
debug("call: recv(), err: ssl-want-write", inst=self)
self._ssl_want_write = True
raise RetryError
except SSL.ZeroReturnError as err:
debug("call: recv() -> shutdown(), err: zero-return",
inst=self)
super(SSLConnection, self).handle_close()
return b''
except SSL.SysCallError as err:
debug("call: recv(), err: %r" % err, inst=self)
errnum, errstr = err.args
if (errnum in _ERRNOS_DISCONNECTED or
errstr == 'Unexpected EOF'):
super(SSLConnection, self).handle_close()
return b''
else:
raise
def _do_ssl_shutdown(self):
"""Executes a SSL_shutdown() call to revert the connection
back to clear-text.
twisted/internet/tcp.py code has been used as an example.
"""
self._ssl_closing = True
if os.name == 'posix':
# since SSL_shutdown() doesn't report errors, an empty
# write call is done first, to try to detect if the
# connection has gone away
try:
os.write(self.socket.fileno(), b'')
except (OSError, socket.error) as err:
debug(
"call: _do_ssl_shutdown() -> os.write, err: %r" % err,
inst=self)
if err.errno in (errno.EINTR, errno.EWOULDBLOCK,
errno.ENOBUFS):
return
elif err.errno in _ERRNOS_DISCONNECTED:
return super(SSLConnection, self).close()
else:
raise
# Ok, this a mess, but the underlying OpenSSL API simply
# *SUCKS* and I really couldn't do any better.
#
# Here we just want to shutdown() the SSL layer and then
# close() the connection so we're not interested in a
# complete SSL shutdown() handshake, so let's pretend
# we already received a "RECEIVED" shutdown notification
# from the client.
# Once the client received our "SENT" shutdown notification
# then we close() the connection.
#
# Since it is not clear what errors to expect during the
# entire procedure we catch them all and assume the
# following:
# - WantReadError and WantWriteError means "retry"
# - ZeroReturnError, SysCallError[EOF], Error[] are all
# aliases for disconnection
try:
laststate = self.socket.get_shutdown()
self.socket.set_shutdown(laststate | SSL.RECEIVED_SHUTDOWN)
done = self.socket.shutdown()
if not (laststate & SSL.RECEIVED_SHUTDOWN):
self.socket.set_shutdown(SSL.SENT_SHUTDOWN)
except SSL.WantReadError:
self._ssl_want_read = True
debug("call: _do_ssl_shutdown, err: ssl-want-read", inst=self)
except SSL.WantWriteError:
self._ssl_want_write = True
debug("call: _do_ssl_shutdown, err: ssl-want-write", inst=self)
except SSL.ZeroReturnError as err:
debug(
"call: _do_ssl_shutdown() -> shutdown(), err: zero-return",
inst=self)
super(SSLConnection, self).close()
except SSL.SysCallError as err:
debug("call: _do_ssl_shutdown() -> shutdown(), err: %r" % err,
inst=self)
errnum, errstr = err.args
if (errnum in _ERRNOS_DISCONNECTED or
errstr == 'Unexpected EOF'):
super(SSLConnection, self).close()
else:
raise
except SSL.Error as err:
debug("call: _do_ssl_shutdown() -> shutdown(), err: %r" % err,
inst=self)
# see:
# https://github.com/giampaolo/pyftpdlib/issues/171
# https://bugs.launchpad.net/pyopenssl/+bug/785985
if err.args and not getattr(err, "errno", None):
pass
else:
raise
except socket.error as err:
debug("call: _do_ssl_shutdown() -> shutdown(), err: %r" % err,
inst=self)
if err.errno in _ERRNOS_DISCONNECTED:
super(SSLConnection, self).close()
else:
raise
else:
if done:
debug("call: _do_ssl_shutdown(), shutdown completed",
inst=self)
self._ssl_established = False
self._ssl_closing = False
self.handle_ssl_shutdown()
else:
debug(
"call: _do_ssl_shutdown(), shutdown not completed yet",
inst=self)
def close(self):
if self._ssl_established and not self._error:
self._do_ssl_shutdown()
else:
self._ssl_accepting = False
self._ssl_established = False
self._ssl_closing = False
super(SSLConnection, self).close()
class TLS_DTPHandler(SSLConnection, DTPHandler):
"""A DTPHandler subclass supporting TLS/SSL."""
def __init__(self, sock, cmd_channel):
super(TLS_DTPHandler, self).__init__(sock, cmd_channel)
if self.cmd_channel._prot:
self.secure_connection(self.cmd_channel.ssl_context)
def __repr__(self):
return DTPHandler.__repr__(self)
def use_sendfile(self):
if isinstance(self.socket, SSL.Connection):
return False
else:
return super(TLS_DTPHandler, self).use_sendfile()
def handle_failed_ssl_handshake(self):
# TLS/SSL handshake failure, probably client's fault which
# used a SSL version different from server's.
# RFC-4217, chapter 10.2 expects us to return 522 over the
# command channel.
self.cmd_channel.respond("522 SSL handshake failed.")
self.cmd_channel.log_cmd("PROT", "P", 522, "SSL handshake failed.")
self.close()
class TLS_FTPHandler(SSLConnection, FTPHandler):
"""A FTPHandler subclass supporting TLS/SSL.
Implements AUTH, PBSZ and PROT commands (RFC-2228 and RFC-4217).
Configurable attributes:
- (bool) tls_control_required:
When True requires SSL/TLS to be established on the control
channel, before logging in. This means the user will have
to issue AUTH before USER/PASS (default False).
- (bool) tls_data_required:
When True requires SSL/TLS to be established on the data
channel. This means the user will have to issue PROT
before PASV or PORT (default False).
SSL-specific options:
- (string) certfile:
the path to the file which contains a certificate to be
used to identify the local side of the connection.
This must always be specified, unless context is provided
instead.
- (string) keyfile:
the path to the file containing the private RSA key;
can be omitted if certfile already contains the private
key (defaults: None).
- (int) ssl_protocol:
the desired SSL protocol version to use. This defaults to
PROTOCOL_SSLv23 which will negotiate the highest protocol
that both the server and your installation of OpenSSL
support.
- (int) ssl_options:
specific OpenSSL options. These default to:
SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3| SSL.OP_NO_COMPRESSION
which are all considered insecure features.
Can be set to None in order to improve compatibilty with
older (insecure) FTP clients.
- (instance) ssl_context:
a SSL Context object previously configured; if specified
all other parameters will be ignored.
(default None).
"""
# configurable attributes
tls_control_required = False
tls_data_required = False
certfile = None
keyfile = None
ssl_protocol = SSL.SSLv23_METHOD
# - SSLv2 is easily broken and is considered harmful and dangerous
# - SSLv3 has several problems and is now dangerous
# - Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (see https://github.com/shazow/urllib3/pull/309)
ssl_options = SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3
if hasattr(SSL, "OP_NO_COMPRESSION"):
ssl_options |= SSL.OP_NO_COMPRESSION
ssl_context = None
# overridden attributes
dtp_handler = TLS_DTPHandler
proto_cmds = FTPHandler.proto_cmds.copy()
proto_cmds.update({
'AUTH': dict(
perm=None, auth=False, arg=True,
help='Syntax: AUTH <SP> TLS|SSL (set up secure control '
'channel).'),
'PBSZ': dict(
perm=None, auth=False, arg=True,
help='Syntax: PBSZ <SP> 0 (negotiate TLS buffer).'),
'PROT': dict(
perm=None, auth=False, arg=True,
help='Syntax: PROT <SP> [C|P] (set up un/secure data '
'channel).'),
})
def __init__(self, conn, server, ioloop=None):
super(TLS_FTPHandler, self).__init__(conn, server, ioloop)
if not self.connected:
return
self._extra_feats = ['AUTH TLS', 'AUTH SSL', 'PBSZ', 'PROT']
self._pbsz = False
self._prot = False
self.ssl_context = self.get_ssl_context()
def __repr__(self):
return FTPHandler.__repr__(self)
@classmethod
def get_ssl_context(cls):
if cls.ssl_context is None:
if cls.certfile is None:
raise ValueError("at least certfile must be specified")
cls.ssl_context = SSL.Context(cls.ssl_protocol)
if cls.ssl_protocol != SSL.SSLv2_METHOD:
cls.ssl_context.set_options(SSL.OP_NO_SSLv2)
else:
warnings.warn("SSLv2 protocol is insecure", RuntimeWarning)
cls.ssl_context.use_certificate_chain_file(cls.certfile)
if not cls.keyfile:
cls.keyfile = cls.certfile
cls.ssl_context.use_privatekey_file(cls.keyfile)
if cls.ssl_options:
cls.ssl_context.set_options(cls.ssl_options)
return cls.ssl_context
# --- overridden methods
def flush_account(self):
FTPHandler.flush_account(self)
self._pbsz = False
self._prot = False
def process_command(self, cmd, *args, **kwargs):
if cmd in ('USER', 'PASS'):
if self.tls_control_required and not self._ssl_established:
msg = "SSL/TLS required on the control channel."
self.respond("550 " + msg)
self.log_cmd(cmd, args[0], 550, msg)
return
elif cmd in ('PASV', 'EPSV', 'PORT', 'EPRT'):
if self.tls_data_required and not self._prot:
msg = "SSL/TLS required on the data channel."
self.respond("550 " + msg)
self.log_cmd(cmd, args[0], 550, msg)
return
FTPHandler.process_command(self, cmd, *args, **kwargs)
# --- new methods
def handle_failed_ssl_handshake(self):
# TLS/SSL handshake failure, probably client's fault which
# used a SSL version different from server's.
# We can't rely on the control connection anymore so we just
# disconnect the client without sending any response.
self.log("SSL handshake failed.")
self.close()
def ftp_AUTH(self, line):
"""Set up secure control channel."""
arg = line.upper()
if isinstance(self.socket, SSL.Connection):
self.respond("503 Already using TLS.")
elif arg in ('TLS', 'TLS-C', 'SSL', 'TLS-P'):
# From RFC-4217: "As the SSL/TLS protocols self-negotiate
# their levels, there is no need to distinguish between SSL
# and TLS in the application layer".
self.respond('234 AUTH %s successful.' % arg)
self.secure_connection(self.ssl_context)
else:
self.respond(
"502 Unrecognized encryption type (use TLS or SSL).")
def ftp_PBSZ(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
if not isinstance(self.socket, SSL.Connection):
self.respond(
"503 PBSZ not allowed on insecure control connection.")
else:
self.respond('200 PBSZ=0 successful.')
self._pbsz = True
def ftp_PROT(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if not isinstance(self.socket, SSL.Connection):
self.respond(
"503 PROT not allowed on insecure control connection.")
elif not self._pbsz:
self.respond(
"503 You must issue the PBSZ command prior to PROT.")
elif arg == 'C':
self.respond('200 Protection set to Clear')
self._prot = False
elif arg == 'P':
self.respond('200 Protection set to Private')
self._prot = True
elif arg in ('S', 'E'):
self.respond('521 PROT %s unsupported (use C or P).' % arg)
else:
self.respond("502 Unrecognized PROT type (use C or P).")
|
deps = dict()
deps[('subtype','wind')] = [dict(sector='energy',service='grid',type='wind',basis='weather')]
deps[('subtype','thunder')] = [dict(sector='energy',service='grid',subtype='thunder',basis='weather')]
deps[('subtype','snowstorm')] = [dict(sector='transport',service='roads',type='snowstorm',basis='weather'),
dict(sector='transport', service='airports',type='snowstorm',basis='weather')]
deps[('subtype','temperature')] = [dict(sector='water',service='cooling',type='temperature',basis='weather')]
def assign_affected_targets(event):
events = list()
etype = event.value('subtype', None)
if ('subtype', etype) in deps:
for add in deps[('subtype', etype)]:
event2 = events.Event(event)
for k,v in add.iteritems():
event2.clear(k)
event2.update(k,[v])
events.append(event2)
return events
|
from __future__ import unicode_literals
"""This file is part of the django ERP project.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import collections
from django.utils import six
from djangoerp.core.cache import Singleton
__author__ = 'Emanuele Bertoldi <emanuele.bertoldi@gmail.com>'
__copyright__ = 'Copyright (c) 2013-2015, django ERP Team'
__version__ = '0.0.1'
@six.add_metaclass(Singleton)
class PluggetSourceCache(object):
"""Stores all plugget sources.
"""
def __init__(self):
self.default_func = lambda x: x
self.clear()
self.auto_discover()
def register(self, func, title, description, template, form):
if not isinstance(func, collections.Callable):
func = self.default_func
import inspect
doc = inspect.getdoc(func) or ""
insp_title, sep, insp_description = doc.partition("\n")
title = title or insp_title.strip("\n.") or func.__name__.capitalize()
self.__sources[title] = {
"func": func,
"description": description or insp_description.strip("\n").replace("\n\n", " ").replace("\n", " "),
"default_template": template,
"form": form
}
def clear(self):
self.discovered = False
self.__sources = {}
@property
def sources(self):
self.auto_discover()
return self.__sources
def get_source_choices(self):
return [(k, k) for k, s in list(self.sources.items())]
def auto_discover(self):
""" Auto discover pluggets of installed applications.
"""
from django.conf import settings
if self.discovered:
return
for app in settings.INSTALLED_APPS:
# Skip Django's apps.
if app.startswith('django.'):
continue
# Try to import pluggets from the current app.
module_name = "%s.pluggets" % app
try:
module = __import__(module_name, {}, {}, ['*'])
except ImportError:
pass
self.discovered = True
def register_plugget_source(self, func, title=None, description=None, template_name=None, form=None):
"""Register a new plugget source.
A plugget source is identified by:
* func -- A callable which takes a context, manupulates and returns it.
* title -- A default title for the plugget [optional]. If title is already
registered, old plugget source will be replaced by new one.
(default: title specified in func's docstring or its name)
* description -- A description of purpose of the plugget [optional].
(default: the remaining part of func's docstring)
* template_name -- Path of template that must be used to render the plugget.
* form -- The form to be used for plugget customization.
Please note that title must be unique because it's used as key in the
register dictionary and is the univoque identifier of a specific source.
"""
self.register(func, title, description, template_name, form)
def register_simple_plugget_source(self, title, description="A simple plugget.", template_name=None, form=None):
"""Register a new simplified plugget source.
This is a convenient function to simplify registration of plugget sources
that do not change the current context (a dummy function is used).
* title -- A default title for the plugget. If title is already registered,
old plugget source will be replaced by new one.
* description -- A description of purpose of the plugget [optional].
(default: default description string)
* template_name -- Path of template that must be used to render the plugget.
* form -- The form to be used for plugget customization.
Please note that title must be unique because it's used as key in the
register dictionary and is the univoque identifier of a specific source.
"""
self.register(None, title, description, template_name, form)
def get_plugget_sources(self, force_discovering=False):
"""Returns the list of all registered plugget sources.
If force_discovering is True, a complete auto discovering of plugget sources
is forced.
"""
if force_discovering:
self.discovered = False
return self.sources
def get_plugget_source(self, source_title, force_discovering=False):
"""Returns the registered plugget sources identified by "source_title".
If the source is not registered, None is returned.
If force_discovering is True, a complete auto discovering of plugget sources
is forced.
"""
return self.get_plugget_sources(force_discovering).get(source_title, None)
def get_plugget_source_choices(self, force_discovering=False):
"""Returns all registered plugget sources as a choice list for forms.
A choice is a tuple in the form (source_title, source_uid).
If force_discovering is True, a complete auto discovering of plugget sources
is forced.
"""
if force_discovering:
self.discovered = False
return self.get_source_choices()
registry = PluggetSourceCache()
|
''' Puntuation module '''
from colorama import Fore
from karmaserver.data.models import db
class Puntuation(db.Model):
''' Puntuation class implementation '''
observation_id = db.Column(db.String(64), db.ForeignKey('observation._id'), primary_key=True)
positive = db.Column(db.Integer)
negative = db.Column(db.Integer)
''' Puntuation class '''
def __init__(self, votes):
self.positive = votes.upvotes
self.negative = votes.downvotes
def __str__(self):
positive = Fore.GREEN + '+' + str(self.positive) + Fore.RESET
negative = Fore.RED + '-' + str(self.negative) + Fore.RESET
return f'{positive}\t{negative}\t{self.calculate_certainty()*100}%'
def serialize(self):
''' Serializes object '''
return {
"positive": self.positive,
"negative": self.negative,
"certainty": self.calculate_certainty()
}
def add_vote(self, vote_type, karma_level):
''' Adds a new vote, returns the certainty '''
if vote_type:
self.positive = self.positive + karma_level
else:
self.negative = self.negative + karma_level
return self.calculate_certainty()
def calculate_certainty(self):
''' Calculates the certainty '''
try:
return (self.positive - self.negative) / (self.positive + self.negative)
except ZeroDivisionError:
return 0
|
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from ._configuration import CustomprovidersConfiguration
from .operations import Operations
from .operations import CustomResourceProviderOperations
from .operations import AssociationsOperations
from . import models
class Customproviders(object):
"""Allows extension of ARM control plane with custom resource providers.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.customproviders.operations.Operations
:ivar custom_resource_provider: CustomResourceProviderOperations operations
:vartype custom_resource_provider: azure.mgmt.customproviders.operations.CustomResourceProviderOperations
:ivar associations: AssociationsOperations operations
:vartype associations: azure.mgmt.customproviders.operations.AssociationsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The Azure subscription ID. This is a GUID-formatted string (e.g. 00000000-0000-0000-0000-000000000000).
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = CustomprovidersConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.custom_resource_provider = CustomResourceProviderOperations(
self._client, self._config, self._serialize, self._deserialize)
self.associations = AssociationsOperations(
self._client, self._config, self._serialize, self._deserialize)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> Customproviders
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
from django.db import IntegrityError
from django.test import Client
from django.core.urlresolvers import reverse
from base import BaseTestCase
from comments.models import CustomUser, Site, Thread, Comment
class CustomUserManagerTestCase(BaseTestCase):
def test_customusermanager_create_user_succeeds(self):
email = 'a@b.com'
CustomUser.objects.create_user(email, 'pass')
self.assertEqual(CustomUser.objects.count(), 1)
user = CustomUser.objects.all()[0]
self.assertEqual(user.email, email)
def test_customusermanager_user_creation_with_existing_email_fails(self):
email = 'a@b.com'
CustomUser.objects.create_user(email, 'pass')
with self.assertRaises(IntegrityError):
CustomUser.objects.create_user(email, 'pass')
def test_get_user_domain_data(self):
email = 'a@b.com'
u = CustomUser.objects.create_user(email, 'pass')
site = Site.objects.create(domain='www.google.com')
thread = Thread.objects.create(site=site, url='url')
thread.disliked_by.add(u)
comment = Comment.objects.create(thread=thread, user=u, text='aaa')
comment.liked_by.add(u)
ret = u.get_user_domain_data()
self.assertEqual(list(ret['posted_comments']), [comment.id])
self.assertEqual(list(ret['liked_threads']), [])
self.assertEqual(list(ret['disliked_threads']), [thread.id])
self.assertEqual(list(ret['liked_comments']), [comment.id])
self.assertEqual(list(ret['disliked_comments']), [])
def test_bulk_delete_successfully_deletes_users(self):
CustomUser.objects.bulk_create([
CustomUser(email='a@b.com', password='pass'),
CustomUser(email='b@b.com', password='pass'),
])
users = CustomUser.objects.all()
CustomUser.objects.bulk_delete(users)
self.assertEqual(CustomUser.objects.count(), 0)
def test_bulk_delete_successfully_deletes_non_staff_users(self):
CustomUser.objects.bulk_create([
CustomUser(email='a@b.com', password='pass'),
CustomUser(email='c@b.com', password='pass', is_staff=True)
])
users = CustomUser.objects.all()
CustomUser.objects.bulk_delete(users, is_staff=False)
self.assertEqual(CustomUser.objects.count(), 1)
class CustomUserTestCase(BaseTestCase):
def test_hide_user_is_success(self):
"""
Tests hide class method. First user's hidden state is set to False.
After calling hide method, user's hidden state should be True.
"""
user = CustomUser.objects.create_user(
email="donald@duck.com",
password="pass",
)
site = Site.objects.create(domain='www.google.com')
self.assertFalse(user.hidden.filter(id=site.id))
user.hide(site)
self.assertTrue(user.hidden.filter(id=site.id))
def test_hide_admin_doesnt_change_hidden_state(self):
"""
Tests hide class method for staff users. First user's hidden state is
set to False. After calling hide method, user's hidden state should
not be changed because staff should not be hidden.
"""
user = CustomUser.objects.create_superuser(
email="donald@duck.com",
password="pass",
)
site = Site.objects.create(domain='www.google.com')
self.assertFalse(user.hidden.filter(id=site.id))
user.hide(site)
self.assertFalse(user.hidden.filter(id=site.id))
def test_unhide_user_is_success(self):
"""
Tests unhide class method. First user's hidden state is set to True.
After calling unhide method, user's hidden state should be False.
"""
user = CustomUser.objects.create_user(
email="donald@duck.com",
password="pass",
)
site = Site.objects.create(domain='www.google.com')
user.hidden.add(site)
user.save()
self.assertTrue(user.hidden.filter(id=site.id))
user.unhide(site)
self.assertFalse(user.hidden.filter(id=site.id))
def test_unhide_admin_is_success(self):
"""
Tests if admin's hidden flag state is changed by unhide method (though
admins cannot be hidden by using hide method, hidden flag can be
set to True in DB by other means)
"""
user = CustomUser.objects.create_superuser(
email="donald@duck.com",
password="pass",
)
site = Site.objects.create(domain='www.google.com')
user.hide(site)
user.save()
user.unhide(site)
self.assertFalse(user.hidden.filter(id=site.id))
def test_delete_method_deletes_user_comments(self):
"""
Tests custom delete method in CustomUser model.
"""
u1 = CustomUser.objects.create_user(
email="donald@duck.com",
password="pass"
)
CustomUser.objects.create_user(
email="daffy@duck.com",
password="pass"
)
site = Site.objects.create(domain='www.google.com')
thread = Thread.objects.create(site=site, url='url')
Comment.objects.create(thread=thread, user=u1, text='aaa')
users = CustomUser.objects.all()
self.assertEqual(users.count(), 2)
self.assertEqual(Comment.objects.get(user=u1).user, u1)
u1.delete()
self.assertEqual(users.count(), 1)
self.assertFalse(Comment.objects.exists())
def test_delete_method_doesnt_delete_admin_user(self):
"""
Tests custom delete method in CustomUser model. Method should not
be able to delete user who is staff member.
"""
u1 = CustomUser.objects.create_superuser(
email="donald@duck.com",
password="pass"
)
site = Site.objects.create(domain='www.google.com')
thread = Thread.objects.create(site=site, url='url')
Comment.objects.create(thread=thread, user=u1, text='aaa')
users = CustomUser.objects.all()
self.assertEqual(users.count(), 1)
u1.delete()
self.assertEqual(users.count(), 1)
def test_delete_method_deletes_users_comments(self):
"""
Tests custom delete method in CustomUser model. Except user's data,
delete should also erase user made comments.
"""
site = Site.objects.create(domain='www.donald.duck')
thread = Thread.objects.create(site=site, url='url')
user = CustomUser.objects.create_user(
email="donald@duck.com",
password="pass"
)
Comment.objects.create(user=user, thread=thread)
Comment.objects.create(user=user, thread=thread)
self.assertEqual(Comment.objects.count(), 2)
user.delete()
self.assertEqual(Comment.objects.count(), 0)
class RegisterEndpointTestCase(BaseTestCase):
def setUp(self):
self.client = Client()
self.endpoint_url = reverse('comments:register_user')
def test_register_user_succeeds(self):
email = 'a@b.com'
r = self.client.post(self.endpoint_url, data={
'email': email,
'password': 'pass',
'password2': 'pass',
'avatar_num': 1,
'full_name': 'donald duck',
})
data = self.get_data_from_response(r.content)
status_code = data['status_code']
self.assertEqual(status_code, 200)
self.assertEqual(CustomUser.objects.count(), 1)
user = CustomUser.objects.all()[0]
self.assertEqual(user.email, email)
def test_register_user_twice_with_same_email_fails(self):
email = 'a@b.com'
self.client.post(self.endpoint_url, data={
'email': email,
'password': 'pass',
'password2': 'pass',
'avatar_num': 1,
'full_name': 'donald duck',
})
r = self.client.post(self.endpoint_url, data={
'email': email,
'password': 'pass',
'password2': 'pass',
'avatar_num': 1,
'full_name': 'donald duck',
})
data = self.get_data_from_response(r.content)
status_code = data['status_code']
self.assertEqual(status_code, 400)
def test_register_user_with_invalid_email_fails(self):
email = 'this is invalid email'
r = self.client.post(self.endpoint_url, data={
'email': email,
'password': 'pass',
'password2': 'pass',
'avatar_num': 1,
'full_name': 'donald duck',
})
data = self.get_data_from_response(r.content)
status_code = data['status_code']
self.assertEqual(status_code, 400)
def test_try_get_request_on_register_endpoint_fails(self):
email = 'this is invalid email'
r = self.client.get(self.endpoint_url, data={
'email': email,
'password': 'pass',
'password2': 'pass',
'avatar_num': 1,
'full_name': 'donald duck',
})
self.assertEqual(r.status_code, 405)
def test_register_user_with_nonexistent_email_fails(self):
r = self.client.post(self.endpoint_url, data={
'password': 'pass',
'password2': 'pass',
'avatar_num': 1,
'full_name': 'donald duck',
})
data = self.get_data_from_response(r.content)
status_code = data['status_code']
self.assertEqual(status_code, 400)
def test_register_user_with_nonexistent_password_fails(self):
email = 'a@b.com'
r = self.client.post(self.endpoint_url, data={
'email': email,
'avatar_num': 1,
'full_name': 'donald duck',
})
data = self.get_data_from_response(r.content)
status_code = data['status_code']
self.assertEqual(status_code, 400)
def test_register_user_returns_iframeId_if_provided(self):
email = 'a@b.com'
iframeId = 'some_donald_duck_id_123'
r = self.client.post(self.endpoint_url, data={
'email': email,
'password': 'pass',
'password2': 'pass',
'avatar_num': 1,
'full_name': 'donald duck',
'iframeId': iframeId,
})
data = self.get_data_from_response(r.content)
status_code = data['status_code']
self.assertEqual(status_code, 200)
self.assertTrue('iframeId' in data)
self.assertEqual(data['iframeId'], iframeId)
class LoginEndpointTestCase(BaseTestCase):
def setUp(self):
self.client = Client()
self.endpoint_url = reverse('comments:login_user')
def test_login_user_succeeds(self):
email = 'a@b.com'
password = 'pass'
CustomUser.objects.create_user(email, password)
site = Site.objects.create(domain='www.google.com')
r = self.client.post(self.endpoint_url, data={
'site_id': site.id,
'email': email,
'password': password,
})
data = self.get_data_from_response(r.content)
status_code = data['status_code']
self.assertEqual(status_code, 200)
def test_login_user_fails_no_data(self):
r = self.client.post(self.endpoint_url, data={})
data = self.get_data_from_response(r.content)
status_code = data['status_code']
self.assertEqual(status_code, 400)
def test_login_user_no_password_fails(self):
r = self.client.post(self.endpoint_url, data={
'email': 'a@b.com',
})
data = self.get_data_from_response(r.content)
status_code = data['status_code']
self.assertEqual(status_code, 400)
def test_login_user_no_email_fails(self):
r = self.client.post(self.endpoint_url, data={
'password': 'pass',
})
data = self.get_data_from_response(r.content)
status_code = data['status_code']
self.assertEqual(status_code, 400)
def test_login_user_wrong_password_fails(self):
email = 'a@b.com'
password = 'pass'
CustomUser.objects.create_user(email, password)
r = self.client.post(self.endpoint_url, data={
'email': email,
'password': 'wrong_password',
})
data = self.get_data_from_response(r.content)
status_code = data['status_code']
self.assertEqual(status_code, 400)
def test_login_user_wrong_email_fails(self):
r = self.client.post(self.endpoint_url, data={
'email': 'wrong@wrongity.wrong',
'password': 'pass',
})
data = self.get_data_from_response(r.content)
status_code = data['status_code']
self.assertEqual(status_code, 400)
def test_login_user_wrong_http_method(self):
r = self.client.get(self.endpoint_url, data={
'email': 'a@b.com',
'password': 'pass',
})
self.assertEqual(r.status_code, 405)
def test_login_user_returns_iframeId_if_provided(self):
email = 'a@b.com'
password = 'pass'
CustomUser.objects.create_user(email, password)
iframeId = 'some_donald_duck_id_123'
site = Site.objects.create(domain='www.google.com')
r = self.client.post(self.endpoint_url, data={
'site_id': site.id,
'email': email,
'password': password,
'iframeId': iframeId,
})
data = self.get_data_from_response(r.content)
status_code = data['status_code']
self.assertEqual(status_code, 200)
self.assertTrue('iframeId' in data)
self.assertEqual(data['iframeId'], iframeId)
|
"""
Functions that construct stable closed loop control systems. Many of the
methods here are adapted from Digital Control: A State-Space Approach and
accompanying courses at URI.
Requires numpy, scipy, control
"""
from __future__ import print_function
import control_poles
import control
import numpy as np
from numpy import linalg as LA
from scipy import signal
import cmath
def design_regsf(sys_c_ol, sampling_interval, desired_settling_time, spoles=None):
""" Design a digital full state feedback regulator with the desired settling time.
Args:
sys_c_ol (StateSpace): The continouous plant model
sampling_interval: The sampling interval for the digital control system in seconds.
desired_settling_time: The desired settling time in seconds
spoles (optional): The desired closed loop poles. If not supplied, then optimal
poles will try to be used. Default is None.
Returns:
tuple: (sys_d_ol, L) Where sys_d_ol is the discrete plant and L is the stablizing
gain matrix.
"""
# Make sure the system is in fact continuous and not discrete
if sys_c_ol.dt != None:
print("Error: Function expects continuous plant")
return None
A = sys_c_ol.A
B = sys_c_ol.B
C = sys_c_ol.C
D = sys_c_ol.D
num_states = A.shape[0]
num_inputs = B.shape[1]
num_outputs = C.shape[0]
# Convert to discrete system using zero order hold method
sys_d_ol = sys_c_ol.to_discrete(sampling_interval, method="zoh")
phi = sys_d_ol.A
gamma = sys_d_ol.B
# Check controlability of the discrete system
controllability_mat = control.ctrb(phi, gamma)
rank = LA.matrix_rank(controllability_mat)
if rank != num_states:
print("Error: System is not controlable")
return None
# Check observability of the discrete system
observability_mat = control.obsv(phi, C)
rank = LA.matrix_rank(observability_mat)
if rank != num_states:
print("Error: System is not observable")
return None
# Choose poles if none were given
if spoles is None:
spoles = []
(sys_spoles, vectors) = LA.eig(A)
# first go through the system poles and see if they are suitable.
s1_normalized = control_poles.bessel_spoles(1, desired_settling_time)[0]
for pole in sys_spoles:
if pole.real < s1_normalized:
# Use sufficiently damped plant poles: plant poles whose real parts lie to the left of s1/Ts.
spoles.extend([pole])
elif pole.imag != 0 and pole.real > s1_normalized and pole.real < 0:
# Replace real part of a complex pole that is not sufficiently damped with s1/Ts
spoles.extend([complex(s1_normalized, pole.imag)])
elif pole.real > 0 and pole.real > s1_normalized:
# Reflect the pole about the imaginary axis and use that
spoles.extend([complex(-pole.real, pole.imag)])
num_spoles_left = num_states - len(spoles)
if num_spoles_left > 0:
# Use normalized bessel poles for the rest
spoles.extend(control_poles.bessel_spoles(num_spoles_left, desired_settling_time))
zpoles = control_poles.spoles_to_zpoles(spoles, sampling_interval)
# place the poles such that ...
full_state_feedback = signal.place_poles(phi, gamma, zpoles)
# Check the poles for stability
for zpole in full_state_feedback.computed_poles:
if abs(zpole) > 1:
print("Computed pole is not stable")
return None
L = full_state_feedback.gain_matrix
return (sys_d_ol, np.matrix(L))
def design_regob(sys_c_ol, sampling_interval, desired_settling_time,
desired_observer_settling_time=None, spoles=None, sopoles=None,
disp=True):
""" Design a digital full order observer regulator with the desired settling time.
Args:
sys_c_ol (StateSpace): The continouous plant model
sampling_interval: The sampling interval for the digital control system in seconds.
desired_settling_time: The desired settling time in seconds
desired_observer_settling_time (optional): The desired observer settling time
in seconds. If not provided the observer settling time will be 4 times faster
than the overall settling time. Default is None.
spoles (optional): The desired closed loop poles. If not supplied, then optimal
poles will try to be used. Default is None.
sopoles (optional): The desired observer poles. If not supplied, then optimal
poles will try to be used. Default is None.
disp: Print debugging output. Default is True.
Returns:
tuple: (sys_d_ol, L, K) Where sys_d_ol is the discrete plant, L is the stablizing
gain matrix, and K is the observer gain matrix.
"""
# Make sure the system is in fact continuous and not discrete
if sys_c_ol.dt != None:
print("Error: Function expects continuous plant")
return None
A = sys_c_ol.A
B = sys_c_ol.B
C = sys_c_ol.C
D = sys_c_ol.D
num_states = A.shape[0]
num_inputs = B.shape[1]
num_outputs = C.shape[0]
# Convert to discrete system using zero order hold method
sys_d_ol = sys_c_ol.to_discrete(sampling_interval, method="zoh")
phi = sys_d_ol.A
gamma = sys_d_ol.B
# Check controlability of the discrete system
controllability_mat = control.ctrb(phi, gamma)
rank = LA.matrix_rank(controllability_mat)
if rank != num_states:
print(rank, num_states)
print("Error: System is not controlable")
return None
# Check observability of the discrete system
observability_mat = control.obsv(phi, C)
rank = LA.matrix_rank(observability_mat)
if rank != num_states:
print("Error: System is not observable")
return None
# Choose poles if none were given
if spoles is None:
spoles = []
(sys_spoles, vectors) = LA.eig(A)
# first go through the system poles and see if they are suitable.
s1_normalized = control_poles.bessel_spoles(1, desired_settling_time)[0]
for pole in sys_spoles:
if pole.real < s1_normalized:
# Use sufficiently damped plant poles: plant poles whose real parts lie to the left of s1/Ts.
spoles.extend([pole])
if disp:
print("Using sufficiently damped plant pole", pole)
elif pole.imag != 0 and pole.real > s1_normalized and pole.real < 0:
# Replace real part of a complex pole that is not sufficiently damped with s1/Ts
pole = [complex(s1_normalized, pole.imag)]
spoles.extend(pole)
if disp:
print("Using added damping pole", pole)
elif pole.real > 0 and pole.real > s1_normalized:
# Reflect the pole about the imaginary axis and use that
pole = [complex(-pole.real, pole.imag)]
spoles.extend(pole)
if disp:
print("Using pole reflection", pole)
num_spoles_left = phi.shape[0] - len(spoles)
if num_spoles_left > 0:
# Use normalized bessel poles for the rest
spoles.extend(control_poles.bessel_spoles(num_spoles_left, desired_settling_time))
if disp:
print("Using normalized bessel for the remaining", num_spoles_left, "spoles")
zpoles = control_poles.spoles_to_zpoles(spoles, sampling_interval)
# place the poles such that eig(phi - gamma*L) are inside the unit circle
full_state_feedback = signal.place_poles(phi, gamma, zpoles)
# Check the poles for stability just in case
for zpole in full_state_feedback.computed_poles:
if abs(zpole) >= 1:
print("Computed pole is not stable")
return None
L = full_state_feedback.gain_matrix
# Choose poles if none were given
if sopoles is None:
sopoles = []
if desired_observer_settling_time == None:
desired_observer_settling_time = desired_settling_time/4
# TODO: Find existing poles based on the rules. For now just use bessel
num_sopoles_left = num_states - len(sopoles)
if num_sopoles_left > 0:
# Use normalized bessel poles for the rest
sopoles.extend(control_poles.bessel_spoles(num_sopoles_left, desired_observer_settling_time))
if disp:
print("Using normalized bessel for the remaining", num_sopoles_left, "sopoles")
zopoles = control_poles.spoles_to_zpoles(sopoles, sampling_interval)
# Find K such that eig(phi - KC) are inside the unit circle
full_state_feedback = signal.place_poles(np.transpose(phi), np.transpose(C), zopoles)
# Check the poles for stability just in case
for zopole in full_state_feedback.computed_poles:
if abs(zopole) > 1:
print("Computed observer pole is not stable")
return None
K = np.transpose(full_state_feedback.gain_matrix)
return (sys_d_ol, np.matrix(L), np.matrix(K))
def design_tsob(sys_c_ol, sampling_interval, desired_settling_time,
desired_observer_settling_time=None, spoles=None, sopoles=None, sapoles=None,
disp=True):
""" Design a digital full order observer tracking system with the desired settling time.
Args:
sys_c_ol (StateSpace): The continouous plant model
sampling_interval: The sampling interval for the digital control system in seconds.
desired_settling_time: The desired settling time in seconds
desired_observer_settling_time (optional): The desired observer settling time
in seconds. If not provided the observer settling time will be 4 times faster
than the overall settling time. Default is None.
spoles (optional): The desired closed loop poles. If not supplied, then optimal
poles will try to be used. Default is None.
sopoles (optional): The desired observer poles. If not supplied, then optimal
poles will try to be used. Default is None.
sapoles (optional): The poles of the reference input and disturbance vectors.
If not supplied the reference input is assumed to be a step. Default is None.
disp: Print debugging output. Default is True.
Returns:
tuple: (sys_d_ol, phia, gammaa, L1, L2, K) Where sys_d_ol is the discrete plant,
phia is the discrete additional dynamics A matrix, gammaa is the discrete
additional dynamics B matrix, L1 is the plant gain matrix, L2 is the
additional gain matrix, and K is the observer gain matrix.
"""
if disp:
print("Designing a tracking system with full order observer.")
# Make sure the system is in fact continuous and not discrete
if sys_c_ol.dt != None:
print("Error: Function expects continuous plant")
return None
A = sys_c_ol.A
B = sys_c_ol.B
C = sys_c_ol.C
D = sys_c_ol.D
num_states = A.shape[0]
num_inputs = B.shape[1]
num_outputs = C.shape[0]
# Convert to discrete system using zero order hold method
sys_d_ol = sys_c_ol.to_discrete(sampling_interval, method="zoh")
phi = sys_d_ol.A
gamma = sys_d_ol.B
# Check controlability of the discrete system
controllability_mat = control.ctrb(phi, gamma)
rank = LA.matrix_rank(controllability_mat)
if rank != num_states:
print(rank, num_states)
print("Error: System is not controlable")
return None
# Check observability of the discrete system
observability_mat = control.obsv(phi, C)
rank = LA.matrix_rank(observability_mat)
if rank != num_states:
print("Error: System is not observable")
return None
# Create the design model with additional dynamics
if sapoles is None:
# assume tracking a step input (s=0, z=1)
sapoles = [0]
zapoles = [ -p for p in np.poly(control_poles.spoles_to_zpoles(sapoles, sampling_interval)) ]
zapoles = np.delete(zapoles, 0) # the first coefficient isn't important
gammaa = np.transpose(np.matrix(zapoles))
q = gammaa.shape[0]
phia_left = np.matrix(gammaa)
phia_right = np.concatenate((np.eye(q-1), np.zeros((1, q-1))), axis=0)
phia = np.concatenate((phia_left, phia_right), axis=1)
if num_outputs > 1:
# replicate the additional dynamics
phia = np.kron(np.eye(num_outputs), phia)
gammaa = np.kron(np.eye(num_outputs), gammaa)
# Form the design matrix
phid_top_row = np.concatenate((phi, np.zeros((num_states, q*num_outputs))), axis=1)
phid_bot_row = np.concatenate((gammaa*C, phia), axis=1)
phid = np.concatenate((phid_top_row, phid_bot_row), axis=0)
gammad = np.concatenate((gamma, np.zeros((gammaa.shape[0], num_inputs))), axis=0)
# Choose poles if none were given
if spoles is None:
spoles = []
(sys_spoles, vectors) = LA.eig(A)
# first go through the system poles and see if they are suitable.
s1_normalized = control_poles.bessel_spoles(1, desired_settling_time)[0]
for pole in sys_spoles:
if pole.real < s1_normalized:
# Use sufficiently damped plant poles: plant poles whose real parts lie to the left of s1/Ts.
spoles.extend([pole])
if disp:
print("Using sufficiently damped plant pole", pole)
elif pole.imag != 0 and pole.real > s1_normalized and pole.real < 0:
# Replace real part of a complex pole that is not sufficiently damped with s1/Ts
pole = [complex(s1_normalized, pole.imag)]
spoles.extend(pole)
if disp:
print("Using added damping pole", pole)
elif pole.real > 0 and pole.real > s1_normalized:
# Reflect the pole about the imaginary axis and use that
pole = [complex(-pole.real, pole.imag)]
spoles.extend(pole)
if disp:
print("Using pole reflection", pole)
num_spoles_left = phid.shape[0] - len(spoles)
if num_spoles_left > 0:
# Use normalized bessel poles for the rest
spoles.extend(control_poles.bessel_spoles(num_spoles_left, desired_settling_time))
if disp:
print("Using normalized bessel for the remaining", num_spoles_left, "poles")
zpoles = control_poles.spoles_to_zpoles(spoles, sampling_interval)
if disp:
print("spoles = ", spoles)
print("zpoles = ", zpoles)
# place the poles such that eig(phi - gamma*L) are inside the unit circle
full_state_feedback = signal.place_poles(phid, gammad, zpoles)
# Check the poles for stability just in case
for zpole in full_state_feedback.computed_poles:
if abs(zpole) >= 1:
print("Computed pole is not stable", zpole)
return None
L = full_state_feedback.gain_matrix
L1 = L[:,0:num_states]
L2 = L[:,num_states:]
# Choose poles if none were given
if sopoles is None:
sopoles = []
if desired_observer_settling_time == None:
desired_observer_settling_time = desired_settling_time/4
# TODO: Find existing poles based on the rules. For now just use bessel
num_sopoles_left = num_states - len(sopoles)
if num_sopoles_left > 0:
# Use normalized bessel poles for the rest
sopoles.extend(control_poles.bessel_spoles(num_sopoles_left, desired_observer_settling_time))
zopoles = control_poles.spoles_to_zpoles(sopoles, sampling_interval)
# Find K such that eig(phi - KC) are inside the unit circle
full_state_feedback = signal.place_poles(np.transpose(phi), np.transpose(C), zopoles)
# Check the poles for stability just in case
for zopole in full_state_feedback.computed_poles:
if abs(zopole) > 1:
print("Computed observer pole is not stable", zopole)
return None
K = np.transpose(full_state_feedback.gain_matrix)
return (sys_d_ol, phia, gammaa, np.matrix(L1), np.matrix(L2), np.matrix(K))
|
import sys, tempfile, shlex, glob, os
from subprocess import *
from collections import defaultdict
_author="Sebastian Szpakowski"
_date="2011/09/14"
_version="Version 1"
class MothurCommand():
def __init__(self):
self.name = ""
self.input_files = dict()
self.output_files = dict()
self.args = dict()
self.annotation = dict()
self.text = ""
self.linkbacks = defaultdict(list)
def addArg(self, type, arg, val):
self.text = "%s\n%s\t%s" % (self.text, arg[0:50].ljust(50), val[0:50].ljust(50))
if arg == "commandName":
self.name = val
elif type == "":
self.annotation[arg]=(val)
elif type == "outputTypesNames" and type!=arg:
self.output_files[arg] = ""
elif arg == "inputTypes":
for i in val.split("-"):
self.input_files[i]= ()
else:
self.parseType(arg, val)
def parseType(self, arg, val):
vals = val.strip().split("|")
if len(vals)==5:
vals[2] = self.booleanify(vals[2])
vals[0] = vals[0].strip().split("-")
self.args[arg] = vals
elif arg in self.input_files.keys():
req, imp, one, oneplus, linked, outs = vals
self.input_files[arg] = (req, one, oneplus, linked)
for K in one, oneplus, linked:
if K !="none":
for L in K.strip().split("-"):
self.linkbacks[L].append(arg)
else:
self.args[arg] = vals
#print self.name, ":", arg, "=", val
def booleanify(self, x):
if x in ["T", "t", "True", "true", "TRUE"]:
return True
elif x in ["F", "f", "False", "false", "FALSE"]:
return False
else:
raise ValueError
def getInputs(self):
return self.input_files.keys()
def getArguments(self):
return self.args.keys()
def isAnArgument(self, x):
return (x in self.args.keys())
def getOutputs(self):
return self.output_files.keys()
def isRequired(self, filetype):
return (self.booleanify(self.input_files[filetype][0]))
def getAlternativeInput(self, filetype):
x = set(self.linkbacks[self.input_files[filetype][1]])
x = x.difference(set([filetype]))
return (x)
def getOtherInput(self, filetype):
x = set(self.linkbacks[self.input_files[filetype][2]])
x = x.difference(set([filetype]))
return (x)
def getLinkedInput(self, filetype):
x = set(self.linkbacks[self.input_files[filetype][3]])
x = x.difference(set([filetype]))
return (x)
def getDefault(self, arg):
return (self.args[arg])
def __str__(self):
print self.output_files
otpt = "%s:" % (self.name)
otpt = "%s\n%s\t%s" % (otpt, "input".rjust(10) ,"".join([x.ljust(20) for x in self.input_files.keys()]))
otpt = "%s\n%s\t%s" % (otpt, "req".rjust(10) ,"".join(["".join("%s" % self.isRequired(x))[:18].ljust(20) for x in self.input_files.keys()]))
otpt = "%s\n%s\t%s" % (otpt, "alt".rjust(10) ,"".join(["-".join(self.getAlternativeInput(x))[:18].ljust(20) for x in self.input_files.keys()]))
otpt = "%s\n%s\t%s" % (otpt, "multi".rjust(10) ,"".join(["-".join(self.getOtherInput(x))[:18].ljust(20) for x in self.input_files.keys()]))
otpt = "%s\n%s\t%s" % (otpt, "link".rjust(10) ,"".join(["-".join(self.getLinkedInput(x))[:18].ljust(20) for x in self.input_files.keys()]))
otpt = "%s\n%s\t%s" % (otpt, "output".rjust(10) ,"".join([x.ljust(20) for x in self.output_files.keys()]))
otpt = "%s\n%s\t%s" % (otpt, "---".rjust(10) ,"".join([self.output_files[x].ljust(20) for x in self.output_files.keys()]))
otpt = "%s\n%s" % (otpt, self.text)
return otpt
class MothurCommandInfo():
def __init__(self,path=""):
tmpdir = tempfile.mkdtemp(prefix="output")
p = Popen(shlex.split("%smothur #get.commandinfo(output=mothurinfo)"% (path)), stdout=PIPE, stderr=PIPE, cwd=tmpdir)
log = p.stdout.readlines()
info = loadLines("%s/mothurinfo" % (tmpdir))
self.version, self.date = self.parseLog(log)
self.commands = dict()
self.parseInfo(info)
self.cleanUpDir(tmpdir)
def parseLog(self, log):
version = ""
date = ""
for line in log:
if line.startswith("mothur v"):
version = line[9:].strip()
if line.startswith("Last updated"):
date = line[14:].strip()
print ("Found Mothur: %s [%s]" % (version, date))
return (version, date)
def parseInfo(self, info):
current = MothurCommand()
counter = 0
curtype = ""
for line in info:
line = line.strip().split("=")
A = line[0]
if len(line)>1:
B = "=".join(line[1:])
if B.isdigit() or A=="inputTypes":
curtype = A
if A=="commandName":
self.addCommand(current)
current = MothurCommand()
curtype = ""
current.addArg(curtype, A, B)
else:
counter = int(A)
self.addCommand(current)
print "Identified %s commands [reported: %s]" % (len(self.commands.keys()), counter)
def addCommand(self, K):
if K.name!="":
self.commands[K.name] = K
def cleanUpDir(self, path, counter=1):
try:
for file in glob.glob("%s/*" % (path)):
os.remove(file)
os.rmdir(path)
except OSError:
print "Tmp cleanup delayed... x%s [%s]" % (counter, path)
time.sleep(1)
self.cleanUpDir(path, counter=counter+1)
def getCommandInfo(self, x):
return self.commands[x]
################################################
### Read in a file and return a list of lines
###
def loadLines(x):
try:
fp = open(x, "r")
cont=fp.readlines()
fp.close()
#print "%s line(s) loaded." % (len(cont))
except:
cont=""
#print "%s cannot be opened, does it exist? " % ( x )
return cont
def test(path = "./"):
M = MothurCommandInfo(path)
x = M.getCommandInfo("align.seqs")
print x
|
import api_utilities
import networkx as nx
import matplotlib.pyplot as plt
def make_group_tree(identifier,idtype='doi',level=0,maxlevel=2):
'''Returns a networkX graph based on papers available in the database.
Give a doi, or a uri if the keyword argument idtype = 'uri'
Example:
-------
import citationTrees
import api_utilities
import networkx as nx
import matplotlib.pyplot as plt
doi = api_utilities.randdoi()
G = citationTrees.make_group_tree(doi) # Use this script to make the network
pos=nx.spring_layout(G)
nx.draw(G,pos,node_color=[G.node[node]['color'] for node in G]) # Draw the map; colors are included as node property
labels = {}
for node in G:
if G.node[node]['color']=='red':
labels[node] = ''
else:
labels[node] = G.node[node]['label']
nx.draw_networkx_labels(G, pos, labels)
plt.show() # Need to run if not using interactive matplotlib
'''
G = nx.Graph()
if idtype=='doi':
if not api_utilities.in_database(identifier):
return G #no more to add to graph
else:
d = api_utilities.citations(identifier)
else:
if not api_utilities.in_database_from_uri(identifier):
return G #no more to add to graph
else:
d = api_utilities.citations_from_uri(identifier)
# Add root node
try:
label = d['bibliographic']['title'][0:10]
except KeyError:
label = 'no title'
G.add_node(d['uri'],color='blue',label=label)
# Check for citations
try:
d['citation_groups']
except KeyError:
return G
# Add branches
for grp in d['citation_groups']:
G.add_node((d['uri'], grp['id']),text_before=grp['context']['text_before'],text_after=grp['context']['text_after'],color='red')
G.add_edge(d['uri'],(d['uri'], grp['id']),label=grp['id'])
for ref in d['references']:
try:
label = ref['bibliographic']['title'][0:10]
except KeyError:
label = 'no title'
G.add_node(ref['uri'],color='blue',label=label)
for i in ref['citation_groups']:
G.add_edge((d['uri'],i),ref['uri'])
# Add second level when reference is cited 2 or more times
if level!=maxlevel:
for ref in d['references']:
if len(ref['citation_groups']) >= 2:
uri = ref['uri']
newG = make_group_tree(uri,idtype='uri',level=level+1,maxlevel=maxlevel)
G=nx.compose(G,newG)
return G
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def home():
return render_template("home.html")
@app.route('/about/')
def about():
return render_template("about.html")
if __name__ == '__main__':
app.run()
|
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-visits',
version='0.1',
packages=['visits'],
include_package_data=True,
license='MIT License',
description='A simple Django app to save the number of visits on any Django Model.',
long_description=README,
url='https://github.com/FrancoisConstant/django-visits',
author='Francois Constant',
author_email='francois.constant@gmail.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
zip_safe=False,
)
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='category',
name='index2',
field=models.IntegerField(default=999, verbose_name=b'\xe5\x88\x86\xe7\xb1\xbb\xe7\x9a\x84\xe6\x8e\x92\xe5\xba\x8f'),
),
]
|
class AbstractPrinter(object):
def printResults(self, operation):
pass
class ConsolePrinter(AbstractPrinter):
def printResults(self, operation):
operationName = type(operation).__name__
if operationName == 'CountNumberOfElements':
self.CountNumberOfElementsPrinter( operation )
elif operationName == 'ListOcurrences':
self.ListOcurrencesPrinter( operation )
else:
raise Exception( "Console Printer can't print output for " + operationName )
def CountNumberOfElementsPrinter( self, operation ):
print "Total number of elements:", operation.getResults()
print ""
def ListOcurrencesPrinter( self, operation ):
if operation.limit != None:
print "Top %d most frequently used tags:" % operation.limit
else:
print "Ocurrences by tag: "
results = operation.getResults()
for (tag, ocurrences) in results:
print "{:>5}: {:d}".format(tag, ocurrences)
print ""
|
from datetime import date
from workalendar.tests import GenericCalendarTest
from workalendar.america import Brazil, BrazilSaoPauloState
from workalendar.america import BrazilSaoPauloCity
from workalendar.america import Colombia
from workalendar.america import Mexico, Chile, Panama
class BrazilTest(GenericCalendarTest):
cal_class = Brazil
def test_year_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays) # new year
self.assertIn(date(2013, 4, 21), holidays) # Tiradentes
self.assertIn(date(2013, 5, 1), holidays) # Dia do trabalhador
self.assertIn(date(2013, 9, 7), holidays) # Dia da Independência
self.assertIn(date(2013, 10, 12), holidays) # Nossa Senhora Aparecida
self.assertIn(date(2013, 11, 2), holidays) # Finados
self.assertIn(date(2013, 11, 15), holidays) # Proclamação da República
self.assertIn(date(2013, 12, 25), holidays) # Natal
class SaoPauloStateTest(BrazilTest):
cal_class = BrazilSaoPauloState
def test_regional_2013(self):
holidays = self.cal.holidays_set(2013)
# Revolução Constitucionalista de 1932
self.assertIn(date(2013, 7, 9), holidays)
class SaoPauloCityTest(SaoPauloStateTest):
cal_class = BrazilSaoPauloCity
def test_city_2013(self):
holidays = self.cal.holidays_set(2013)
# Aniversário da Cidade de São Paulo
self.assertIn(date(2013, 1, 25), holidays)
self.assertIn(date(2013, 2, 12), holidays) # Carnaval
self.assertIn(date(2013, 11, 20), holidays) # Dia da Consciência Negra
self.assertIn(date(2013, 3, 29), holidays) # Sexta-feira da Paixão
self.assertIn(date(2013, 3, 31), holidays) # Páscoa
self.assertIn(date(2013, 5, 30), holidays) # Corpus Christi
class ChileTest(GenericCalendarTest):
cal_class = Chile
def test_holidays_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays)
self.assertIn(date(2013, 3, 29), holidays)
self.assertIn(date(2013, 3, 30), holidays)
self.assertIn(date(2013, 5, 1), holidays)
self.assertIn(date(2013, 5, 21), holidays)
self.assertIn(date(2013, 6, 29), holidays)
self.assertIn(date(2013, 7, 16), holidays)
self.assertIn(date(2013, 8, 15), holidays)
self.assertIn(date(2013, 9, 18), holidays)
self.assertIn(date(2013, 9, 19), holidays)
self.assertIn(date(2013, 9, 20), holidays)
self.assertIn(date(2013, 10, 12), holidays)
self.assertIn(date(2013, 10, 31), holidays)
self.assertIn(date(2013, 11, 1), holidays)
self.assertIn(date(2013, 12, 8), holidays)
self.assertIn(date(2013, 12, 25), holidays)
self.assertIn(date(2013, 12, 31), holidays)
def test_reformation_day(self):
holidays = self.cal.holidays_set(2012)
self.assertNotIn(date(2012, 10, 31), holidays)
self.assertIn(date(2012, 11, 2), holidays)
#
holidays = self.cal.holidays_set(2017)
self.assertNotIn(date(2017, 10, 31), holidays)
self.assertIn(date(2017, 10, 27), holidays)
class ColombiaTest(GenericCalendarTest):
cal_class = Colombia
def test_holidays_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 1, 1), holidays)
self.assertIn(date(2015, 1, 12), holidays)
self.assertIn(date(2015, 3, 23), holidays)
self.assertIn(date(2015, 3, 29), holidays)
self.assertIn(date(2015, 4, 2), holidays)
self.assertIn(date(2015, 4, 3), holidays)
self.assertIn(date(2015, 4, 5), holidays)
self.assertIn(date(2015, 5, 1), holidays)
self.assertIn(date(2015, 5, 18), holidays)
self.assertIn(date(2015, 6, 8), holidays)
self.assertIn(date(2015, 6, 15), holidays)
self.assertIn(date(2015, 6, 29), holidays)
self.assertIn(date(2015, 7, 20), holidays)
self.assertIn(date(2015, 8, 7), holidays)
self.assertIn(date(2015, 8, 17), holidays)
self.assertIn(date(2015, 10, 12), holidays)
self.assertIn(date(2015, 11, 2), holidays)
self.assertIn(date(2015, 11, 16), holidays)
self.assertIn(date(2015, 12, 8), holidays)
self.assertIn(date(2015, 12, 25), holidays)
self.assertEqual(len(holidays), 20)
class MexicoTest(GenericCalendarTest):
cal_class = Mexico
def test_holidays_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays)
self.assertIn(date(2013, 2, 4), holidays) # Constitution day
self.assertIn(date(2013, 3, 18), holidays) # Benito Juárez's birthday
self.assertIn(date(2013, 5, 1), holidays) # Labour day
self.assertIn(date(2013, 9, 16), holidays) # Independence day
self.assertIn(date(2013, 11, 18), holidays) # Revolution day
self.assertIn(date(2013, 12, 25), holidays) # XMas
def test_shift_to_monday(self):
holidays = self.cal.holidays_set(2017)
# New year on Sunday -> shift
self.assertIn(date(2017, 1, 2), holidays)
holidays = self.cal.holidays_set(2016)
# XMas on sunday -> shift to monday
self.assertIn(date(2016, 12, 26), holidays)
# Same for Labour day
self.assertIn(date(2016, 5, 2), holidays)
def test_shift_to_friday(self):
holidays = self.cal.holidays_set(2021)
# January 1st 2022 is a saturday, so we shift to friday
self.assertIn(date(2021, 12, 31), holidays)
# Same for Labour day
self.assertIn(date(2021, 4, 30), holidays)
holidays = self.cal.holidays_set(2021)
# December 25th, 2022 is a saturday, so we shift to friday
self.assertIn(date(2021, 12, 24), holidays)
class PanamaTest(GenericCalendarTest):
cal_class = Panama
def test_holidays_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays)
self.assertIn(date(2013, 1, 9), holidays) # Martyrs day
self.assertIn(date(2013, 2, 11), holidays) # carnival monday
self.assertIn(date(2013, 2, 12), holidays) # carnival tuesday
self.assertIn(date(2013, 3, 29), holidays) # good friday
self.assertIn(date(2013, 3, 30), holidays) # easter saturday
self.assertNotIn(date(2013, 3, 31), holidays) # easter sunday
self.assertIn(date(2013, 5, 1), holidays) # labour day
self.assertIn(date(2013, 11, 3), holidays) # independence day
self.assertIn(date(2013, 11, 5), holidays) # colon day
# Shout in Villa de los Santos
self.assertIn(date(2013, 11, 10), holidays)
self.assertIn(date(2013, 12, 2), holidays) # Independence from spain
self.assertIn(date(2013, 12, 8), holidays) # mother day
self.assertIn(date(2013, 12, 25), holidays) # XMas
def test_presidental_inaug(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 7, 1), holidays)
|
import sys, os
from datetime import datetime
projpath = os.path.abspath('..')
sys.path.append(projpath)
extensions = ['sphinx.ext.autodoc']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'django-slow-log'
copyright = u'2010 Jason Moiron'
version = '0.1'
version = None
for line in open(os.path.join(projpath, 'setup.py'), 'r'):
if line.startswith('version'):
exec line
if version is None:
version = '0.1'
release = version
print ("Building release: %s, version: %s" % (release, version))
exclude_trees = ['_build']
pygments_style = 'sphinx'
html_theme = 'nature'
html_theme_path = ['_theme']
html_static_path = ['_static']
htmlhelp_basename = 'django-slow-logdoc'
latex_documents = [
('index', 'django-slow-log.tex', u'django-slow-log Documentation',
u'Jason Moiron', 'manual'),
]
|
"""Implementations of resource abstract base class receivers."""
import abc
class ResourceReceiver:
"""The resource receiver is the consumer supplied interface for receiving notifications pertaining to new, updated or deleted ``Resource`` objects."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def new_resources(self, notification_id, resource_ids):
"""The callback for notifications of new resources.
:param notification_id: the notification ``Id``
:type notification_id: ``osid.id.Id``
:param resource_ids: the ``Ids`` of the new ``Resources``
:type resource_ids: ``osid.id.IdList``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def changed_resources(self, notification_id, resource_ids):
"""The callback for notification of updated resources.
:param notification_id: the notification ``Id``
:type notification_id: ``osid.id.Id``
:param resource_ids: the ``Ids`` of the updated ``Resources``
:type resource_ids: ``osid.id.IdList``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def deleted_resources(self, notification_id, resource_ids):
"""the callback for notification of deleted resources.
:param notification_id: the notification ``Id``
:type notification_id: ``osid.id.Id``
:param resource_ids: the ``Ids`` of the deleted ``Resources``
:type resource_ids: ``osid.id.IdList``
*compliance: mandatory -- This method must be implemented.*
"""
pass
class GroupReceiver:
"""The resource group receiver is the consumer supplied interface for receiving notifications pertaining to new or deleted members."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def new_member(self, group_id, member_id):
"""The callback for notifications of new resource members.
:param group_id: the ``Id`` of the ``Resource`` group
:type group_id: ``osid.id.Id``
:param member_id: the ``Id`` of the new ``Resource`` member
:type member_id: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def deleted_member(self, group_id, member_id):
"""the callback for notification of deleted resource members.
:param group_id: the ``Id`` of the ``Resource`` group
:type group_id: ``osid.id.Id``
:param member_id: the ``Id`` of the new ``Resource`` member
:type member_id: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
pass
class ResourceRelationshipReceiver:
"""The resource relationship receiver is the consumer supplied interface for receiving notifications pertaining to new, updated or deleted ``ResourceRelationships``."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def new_resource_relationships(self, notification_id, resource_relationship_ids):
"""The callback for notifications of new relationships.
:param notification_id: the notification ``Id``
:type notification_id: ``osid.id.Id``
:param resource_relationship_ids: the ``Ids`` of the new ``ResourceRelationships``
:type resource_relationship_ids: ``osid.id.IdList``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def changed_resource_relationships(self, notification_id, resource_relationship_ids):
"""The callback for notification of updated relationships.
:param notification_id: the notification ``Id``
:type notification_id: ``osid.id.Id``
:param resource_relationship_ids: the ``Ids`` of the updated ``ResourceRelationships``
:type resource_relationship_ids: ``osid.id.IdList``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def deleted_resource_relationships(self, notification_id, resource_relationship_ids):
"""The callback for notification of deleted relationships.
:param notification_id: the notification ``Id``
:type notification_id: ``osid.id.Id``
:param resource_relationship_ids: the ``Ids`` of the deleted ``ResourceRelationships``
:type resource_relationship_ids: ``osid.id.IdList``
*compliance: mandatory -- This method must be implemented.*
"""
pass
class BinReceiver:
"""The bin receiver is the consumer supplied interface for receiving notifications pertaining to new, updated or deleted ``Bin`` objects."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def new_bins(self, notification_id, bin_ids):
"""The callback for notifications of new bins.
:param notification_id: the notification ``Id``
:type notification_id: ``osid.id.Id``
:param bin_ids: the ``Ids`` of the new ``Bins``
:type bin_ids: ``osid.id.IdList``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def new_ancestor_bin(self, bin_id, ancestor_id):
"""The callback for notifications of new bin ancestors.
:param bin_id: the ``Id`` of the ``Bin``
:type bin_id: ``osid.id.Id``
:param ancestor_id: the ``Id`` of the new ``Bin`` ancestor
:type ancestor_id: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def new_descendant_bin(self, bin_id, descendant_id):
"""The callback for notifications of new bin descendants.
:param bin_id: the ``Id`` of the ``Bin``
:type bin_id: ``osid.id.Id``
:param descendant_id: the ``Id`` of the new ``Bin`` descendant
:type descendant_id: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def changed_bins(self, notification_id, bin_ids):
"""The callback for notification of updated bins.
:param notification_id: the notification ``Id``
:type notification_id: ``osid.id.Id``
:param bin_ids: the ``Ids`` of the updated ``Bins``
:type bin_ids: ``osid.id.IdList``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def deleted_bins(self, notification_id, bin_ids):
"""The callback for notification of deleted bins.
:param notification_id: the notification ``Id``
:type notification_id: ``osid.id.Id``
:param bin_ids: the ``Ids`` of the deleted ``Bins``
:type bin_ids: ``osid.id.IdList``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def deleted_ancestor_bin(self, bin_id, ancestor_id):
"""The callback for notifications of deleted bin ancestors.
:param bin_id: the ``Id`` of the ``Bin``
:type bin_id: ``osid.id.Id``
:param ancestor_id: the ``Id`` of the removed ``Bin`` ancestor
:type ancestor_id: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def deleted_descendant_bin(self, bin_id, descendant_id):
"""The callback for notifications of deleted bin descendants.
:param bin_id: the ``Id`` of the ``Bin``
:type bin_id: ``osid.id.Id``
:param descendant_id: the ``Id`` of the removed ``Bin`` descendant
:type descendant_id: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def restructured_bin_hierarchy(self):
"""The callback for notifications of changes to a bin hierarchy where the hierarchy needs to refreshed.
*compliance: mandatory -- This method must be implemented.*
"""
pass
|
"""Module for the Sticky cog."""
import asyncio
import contextlib
import logging
from typing import Any, Dict, Optional, Union
import discord
from redbot.core import Config, checks, commands
from redbot.core.utils.menus import start_adding_reactions
from redbot.core.utils.predicates import ReactionPredicate
UNIQUE_ID = 0x6AFE8000
log = logging.getLogger("red.sticky")
class Sticky(commands.Cog):
"""Sticky messages to your channels."""
STICKY_DELAY = 3
def __init__(self, bot):
super().__init__()
self.bot = bot
self.conf = Config.get_conf(self, identifier=UNIQUE_ID, force_registration=True)
self.conf.register_channel(
stickied=None, # This is for [p]sticky
header_enabled=True,
advstickied={"content": None, "embed": {}}, # This is for [p]stickyexisting
last=None,
)
self.locked_channels = set()
@checks.mod_or_permissions(manage_messages=True)
@commands.guild_only()
@commands.group(invoke_without_command=True)
async def sticky(self, ctx: commands.Context, *, content: str):
"""Sticky a message to this channel."""
channel = ctx.channel
settings = self.conf.channel(channel)
header_enabled = await settings.header_enabled()
to_send = (
f"__***Stickied Message***__\n\n{content}" if header_enabled else content
)
msg = await channel.send(to_send)
await settings.set(
{"stickied": content, "header_enabled": header_enabled, "last": msg.id}
)
@checks.mod_or_permissions(manage_messages=True)
@commands.guild_only()
@sticky.command(name="existing")
async def sticky_existing(
self, ctx: commands.Context, *, message_id_or_url: discord.Message
):
"""Sticky an existing message to this channel.
This will try to sticky the content and embed of the message.
Attachments will not be added to the stickied message.
Stickying messages with multiple embeds may result in unexpected
behaviour, as the bot cannot send multiple rich embeds in a
single message.
"""
message = message_id_or_url
del message_id_or_url
channel = ctx.channel
settings = self.conf.channel(channel)
if not (message.content or message.embeds):
await ctx.send("That message doesn't have any content or embed!")
return
embed = next(iter(message.embeds), None)
content = message.content or None
stickied_msg = await self.send_advstickied(
channel, content, embed, header_enabled=await settings.header_enabled()
)
embed_data = embed.to_dict() if embed is not None else None
await settings.set(
{
"advstickied": {"content": content, "embed": embed_data},
"last": stickied_msg.id,
# We don't want to overwrite the header setting
"header_enabled": await settings.header_enabled(),
}
)
@checks.mod_or_permissions(manage_messages=True)
@commands.guild_only()
@sticky.command(name="toggleheader")
async def sticky_toggleheader(self, ctx: commands.Context, true_or_false: bool):
"""Toggle the header for stickied messages in this channel.
The header is enabled by default.
"""
await self.conf.channel(ctx.channel).header_enabled.set(true_or_false)
await ctx.tick()
@checks.mod_or_permissions(manage_messages=True)
@commands.guild_only()
@commands.command()
async def unsticky(self, ctx: commands.Context, force: bool = False):
"""Remove the sticky message from this channel.
Deleting the sticky message will also unsticky it.
Do `[p]unsticky yes` to skip the confirmation prompt.
"""
channel = ctx.channel
settings = self.conf.channel(channel)
self.locked_channels.add(channel)
try:
last_id = await settings.last()
if last_id is None:
await ctx.send("There is no stickied message in this channel.")
return
msg = None
if not force and channel.permissions_for(ctx.me).add_reactions:
msg = await ctx.send(
"This will unsticky the current sticky message from "
"this channel. Are you sure you want to do this?"
)
start_adding_reactions(msg, emojis=ReactionPredicate.YES_OR_NO_EMOJIS)
pred = ReactionPredicate.yes_or_no(msg)
try:
resp = await ctx.bot.wait_for(
"reaction_add", check=pred, timeout=30
)
except asyncio.TimeoutError:
resp = None
if resp is None or pred.result is False:
with contextlib.suppress(discord.NotFound):
await msg.delete()
return
else:
await ctx.send(
f"I don't have the add_reactions permission here. "
f"Use `{ctx.prefix}unsticky yes` to remove the sticky message."
)
return
await settings.set(
# Preserve the header setting
{"header_enabled": await settings.header_enabled()}
)
with contextlib.suppress(discord.HTTPException):
last = await channel.fetch_message(last_id)
await last.delete()
if msg is not None:
with contextlib.suppress(discord.NotFound):
await msg.delete()
await ctx.tick()
finally:
self.locked_channels.remove(channel)
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
"""Event which checks for sticky messages to resend."""
channel = message.channel
early_exit = (
isinstance(channel, discord.abc.PrivateChannel)
or channel in self.locked_channels
)
if early_exit:
return
settings = self.conf.channel(channel)
last = await settings.last()
if last is None or message.id == last:
return
try:
last = await channel.fetch_message(last)
except discord.NotFound:
pass
except discord.Forbidden:
log.fatal(
"The bot does not have permission to retreive the stickied message"
)
else:
with contextlib.suppress(discord.NotFound):
await last.delete()
@commands.Cog.listener()
async def on_raw_message_delete(
self, payload: discord.raw_models.RawMessageDeleteEvent
):
"""If the stickied message was deleted, re-post it."""
channel = self.bot.get_channel(payload.channel_id)
settings = self.conf.channel(channel)
settings_dict = await settings.all()
if payload.message_id != settings_dict["last"]:
return
header = settings_dict["header_enabled"]
if settings_dict["stickied"] is not None:
content = settings_dict["stickied"]
to_send = f"__***Stickied Message***__\n\n{content}" if header else content
new = await channel.send(to_send)
else:
advstickied = settings_dict["advstickied"]
if advstickied["content"] or advstickied["embed"]:
new = await self.send_advstickied(
channel, **advstickied, header_enabled=header
)
else:
# The last stickied message was deleted but there's nothing to send
await settings.last.clear()
return
await settings.last.set(new.id)
@staticmethod
async def send_advstickied(
channel: discord.TextChannel,
content: Optional[str],
embed: Optional[Union[discord.Embed, Dict[str, Any]]],
*,
header_enabled: bool = False,
):
"""Send the content and embed as a stickied message."""
if embed and isinstance(embed, dict):
embed = discord.Embed.from_dict(embed)
if header_enabled:
header_text = "__***Stickied Message***__"
content = f"{header_text}\n\n{content}" if content else header_text
return await channel.send(content, embed=embed)
|
from collections import namedtuple
def namedtuplefetchall(cursor):
"""Return all rows from a cursor as a namedtuple"""
desc = cursor.description
nt_result = namedtuple('Result', [col[0] for col in desc])
return [nt_result(*row) for row in cursor.fetchall()]
|
"""Handle specific queries.
This module handles questies of type /<collection>/<id> where we respond with a
specific document. We can also respond with a group of document, in the case of
a request to the special keyword 'latest' - /<collection>/latest. These have no
pagination, as the 'latest' items can change quite fast on Reddit.
"""
import flask
import kludd.database
import kludd.jsonapi
specific = flask.Blueprint('specific', __name__)
@specific.route('/<collection_name>/<reddit_id>', methods=['GET'])
@kludd.jsonapi.bundle
@kludd.jsonapi.check_mime
def _handle_specific_request(collection_name=None, reddit_id=None):
"""This function handles queries of a) individual items and b) latest
items. Obviously, there is not pagination for single items. There is also
no pagination available for the latest items, as things tend to more
quite fast on Reddit.
"""
collection = kludd.database.get_db()[collection_name]
if reddit_id == 'latest':
res = []
try:
limit = min(20, int(flask.request.args.get('limit', '100')))
except ValueError:
flask.abort(400)
else:
cursor = collection.find({
}, {
'_id': False
}).sort([('created', -1)]).limit(limit)
for doc in cursor:
data = kludd.jsonapi.make_data_object(doc, collection_name)
res.append(data)
return {'data': res}, 200
doc = collection.find_one({'reddit_id': reddit_id}, {'_id': False})
if doc is None:
flask.abort(404)
return {'data': kludd.jsonapi.make_data_object(doc, collection_name)}, 200
|
from collections import defaultdict
from functools import partial
import unittest
from repeated_test import tup, WithTestClass
from sigtools._util import funcsigs
__all__ = [
'conv_first_posarg',
'transform_exp_sources', 'transform_real_sources',
'SignatureTests', 'Fixtures', 'tup'
]
def conv_first_posarg(sig):
if not sig.parameters:
return sig
first = list(sig.parameters.values())[0]
first = first.replace(kind=first.POSITIONAL_ONLY)
return sig.replace(
parameters=(first,) + tuple(sig.parameters.values())[1:])
def func_to_name(func):
if isinstance(func, partial):
return 'functoolspartial_' + str(id(func)) + '_' + func_to_name(func.func)
try:
return func.__name__
except AttributeError:
s = str(func)
if s != func:
return '_' + s
return s
def transform_exp_sources(d, subject=None):
ret = defaultdict(list)
funclist = []
subject_name = None if subject is None else func_to_name(subject)
for func, params in d.items():
if func == '+depths':
continue
if func == 0:
if subject is None:
raise ValueError(
"Used implicit function with no provided subject")
func = subject
func = func_to_name(func)
funclist.append(func)
for param in params:
ret[param].append(func)
if '+depths' not in d:
val = sorted(funclist)
if subject is not None:
try:
val.remove(subject_name)
except ValueError:
pass
val.insert(0, subject_name)
else:
val = d['+depths']
if isinstance(val, list):
val = dict((func_to_name(f), i) for i, f in enumerate(val))
else:
val = dict((func_to_name(f), v) for f, v in val.items())
ret['+depths'] = val
return dict(ret)
def transform_real_sources(d):
ret = {}
for param, funcs in d.items():
if param == '+depths':
ret[param] = dict(
(func_to_name(func), v) for func, v in funcs.items())
else:
ret[param] = [func_to_name(func) for func in funcs]
return ret
class SignatureTests(unittest.TestCase):
maxDiff = None
def assertSigsEqual(self, found, expected, *args, **kwargs):
conv = kwargs.pop('conv_first_posarg', False)
if expected != found:
if conv:
expected = conv_first_posarg(expected)
found = conv_first_posarg(found)
if expected == found:
return
raise AssertionError(
'Did not get expected signature({0}), got {1} instead.'
.format(expected, found))
def assertSourcesEqual(self, found, expected, func=None, depth_order=False):
r = transform_real_sources(found)
e = transform_exp_sources(expected, func)
if depth_order:
rd = r.pop('+depths')
ed = e.pop('+depths')
self.assertEqual(r, e)
if depth_order:
self.assertEqual(
[f for f in sorted(rd, key=rd.get) if f in ed],
[f for f in sorted(ed, key=ed.get)])
def downgrade_sig(self, sig):
return funcsigs.Signature(
sig.parameters.values(),
return_annotation=sig.return_annotation)
Fixtures = WithTestClass(SignatureTests)
|
from django.http import HttpResponse
from django.shortcuts import render, render_to_response
from django.template import RequestContext, loader
from django_translate.services import trans as _, transchoice
def hello(request):
return render_to_response("hello.html", context=RequestContext(request))
def apples(request):
return render_to_response("apples.html", context=RequestContext(request))
def pythonic_apples(request):
return render_to_response("apples_python.html", {"rendered":
u"<h1>{0}</h1>"
"<p>{1}</p>"
"<p>{2}</p>"
"<p>{3}</p>".format(
_("apples.header"),
_("apples.want_some", {"fruits": "apples"}),
transchoice("apples.praise_n", 1),
transchoice("apples.praise_n", 3)
)
})
def po(request):
return render_to_response("po.html", context=RequestContext(request))
|
"""
Example of creating a radar chart (a.k.a. a spider or star chart) [1]_.
Although this example allows a frame of either 'circle' or 'polygon', polygon
frames don't have proper gridlines (the lines are circles instead of polygons).
It's possible to get a polygon grid by setting GRIDLINE_INTERPOLATION_STEPS in
matplotlib.axis to the desired number of vertices, but the orientation of the
polygon is not aligned with the radial axes.
.. [1] http://en.wikipedia.org/wiki/Radar_chart
From: http://matplotlib.org/examples/api/radar_chart.html
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.spines import Spine
from matplotlib.projections.polar import PolarAxes
from matplotlib.projections import register_projection
def radar_factory(num_vars, frame='circle'):
"""
Create a radar chart with `num_vars` axes.
This function creates a RadarAxes projection and registers it.
Parameters
----------
num_vars : int
Number of variables for radar chart.
frame : {'circle' | 'polygon'}
Shape of frame surrounding axes.
"""
# calculate evenly-spaced axis angles
theta = 2*np.pi * np.linspace(0, 1-1./num_vars, num_vars)
# rotate theta such that the first axis is at the top
theta += np.pi/2
def draw_poly_patch(self):
verts = unit_poly_verts(theta)
return plt.Polygon(verts, closed=True, edgecolor='k')
def draw_circle_patch(self):
# unit circle centered on (0.5, 0.5)
return plt.Circle((0.5, 0.5), 0.5)
patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}
if frame not in patch_dict:
raise ValueError('unknown value for `frame`: %s' % frame)
class RadarAxes(PolarAxes):
name = 'radar'
# use 1 line segment to connect specified points
RESOLUTION = 1
# define draw_frame method
draw_patch = patch_dict[frame]
def fill(self, *args, **kwargs):
"""Override fill so that line is closed by default"""
closed = kwargs.pop('closed', True)
return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)
def plot(self, *args, **kwargs):
"""Override plot so that line is closed by default"""
lines = super(RadarAxes, self).plot(*args, **kwargs)
for line in lines:
self._close_line(line)
def _close_line(self, line):
x, y = line.get_data()
# FIXME: markers at x[0], y[0] get doubled-up
if x[0] != x[-1]:
x = np.concatenate((x, [x[0]]))
y = np.concatenate((y, [y[0]]))
line.set_data(x, y)
def set_varlabels(self, labels):
self.set_thetagrids(theta * 180/np.pi, labels)
def _gen_axes_patch(self):
return self.draw_patch()
def _gen_axes_spines(self):
if frame == 'circle':
return PolarAxes._gen_axes_spines(self)
# The following is a hack to get the spines (i.e. the axes frame)
# to draw correctly for a polygon frame.
# spine_type must be 'left', 'right', 'top', 'bottom', or `circle`.
spine_type = 'circle'
verts = unit_poly_verts(theta)
# close off polygon by repeating first vertex
verts.append(verts[0])
path = Path(verts)
spine = Spine(self, spine_type, path)
spine.set_transform(self.transAxes)
return {'polar': spine}
register_projection(RadarAxes)
return theta
def unit_poly_verts(theta):
"""Return vertices of polygon for subplot axes.
This polygon is circumscribed by a unit circle centered at (0.5, 0.5)
"""
x0, y0, r = [0.5] * 3
verts = [(r*np.cos(t) + x0, r*np.sin(t) + y0) for t in theta]
return verts
def example_data():
#The following data is from the Denver Aerosol Sources and Health study.
#See doi:10.1016/j.atmosenv.2008.12.017
#
#The data are pollution source profile estimates for five modeled pollution
#sources (e.g., cars, wood-burning, etc) that emit 7-9 chemical species.
#The radar charts are experimented with here to see if we can nicely
#visualize how the modeled source profiles change across four scenarios:
# 1) No gas-phase species present, just seven particulate counts on
# Sulfate
# Nitrate
# Elemental Carbon (EC)
# Organic Carbon fraction 1 (OC)
# Organic Carbon fraction 2 (OC2)
# Organic Carbon fraction 3 (OC3)
# Pyrolized Organic Carbon (OP)
# 2)Inclusion of gas-phase specie carbon monoxide (CO)
# 3)Inclusion of gas-phase specie ozone (O3).
# 4)Inclusion of both gas-phase speciesis present...
data = {
'column names':
['Sulfate', 'Nitrate', 'EC', 'OC1', 'OC2', 'OC3', 'OP', 'CO',
'O3'],
'Basecase':
[[0.88, 0.01, 0.03, 0.03, 0.00, 0.06, 0.01, 0.00, 0.00],
[0.07, 0.95, 0.04, 0.05, 0.00, 0.02, 0.01, 0.00, 0.00],
[0.01, 0.02, 0.85, 0.19, 0.05, 0.10, 0.00, 0.00, 0.00],
[0.02, 0.01, 0.07, 0.01, 0.21, 0.12, 0.98, 0.00, 0.00],
[0.01, 0.01, 0.02, 0.71, 0.74, 0.70, 0.00, 0.00, 0.00]],
'With CO':
[[0.88, 0.02, 0.02, 0.02, 0.00, 0.05, 0.00, 0.05, 0.00],
[0.08, 0.94, 0.04, 0.02, 0.00, 0.01, 0.12, 0.04, 0.00],
[0.01, 0.01, 0.79, 0.10, 0.00, 0.05, 0.00, 0.31, 0.00],
[0.00, 0.02, 0.03, 0.38, 0.31, 0.31, 0.00, 0.59, 0.00],
[0.02, 0.02, 0.11, 0.47, 0.69, 0.58, 0.88, 0.00, 0.00]],
'With O3':
[[0.89, 0.01, 0.07, 0.00, 0.00, 0.05, 0.00, 0.00, 0.03],
[0.07, 0.95, 0.05, 0.04, 0.00, 0.02, 0.12, 0.00, 0.00],
[0.01, 0.02, 0.86, 0.27, 0.16, 0.19, 0.00, 0.00, 0.00],
[0.01, 0.03, 0.00, 0.32, 0.29, 0.27, 0.00, 0.00, 0.95],
[0.02, 0.00, 0.03, 0.37, 0.56, 0.47, 0.87, 0.00, 0.00]],
'CO & O3':
[[0.87, 0.01, 0.08, 0.00, 0.00, 0.04, 0.00, 0.00, 0.01],
[0.09, 0.95, 0.02, 0.03, 0.00, 0.01, 0.13, 0.06, 0.00],
[0.01, 0.02, 0.71, 0.24, 0.13, 0.16, 0.00, 0.50, 0.00],
[0.01, 0.03, 0.00, 0.28, 0.24, 0.23, 0.00, 0.44, 0.88],
[0.02, 0.00, 0.18, 0.45, 0.64, 0.55, 0.86, 0.00, 0.16]]}
return data
if __name__ == '__main__':
N = 9
theta = radar_factory(N, frame='polygon')
data = example_data()
spoke_labels = data.pop('column names')
fig = plt.figure(figsize=(9, 9))
fig.subplots_adjust(wspace=0.25, hspace=0.20, top=0.85, bottom=0.05)
colors = ['b', 'r', 'g', 'm', 'y']
# Plot the four cases from the example data on separate axes
for n, title in enumerate(data.keys()):
ax = fig.add_subplot(2, 2, n+1, projection='radar')
plt.rgrids([0.2, 0.4, 0.6, 0.8])
ax.set_title(title, weight='bold', size='medium', position=(0.5, 1.1),
horizontalalignment='center', verticalalignment='center')
for d, color in zip(data[title], colors):
ax.plot(theta, d, color=color)
ax.fill(theta, d, facecolor=color, alpha=0.25)
ax.set_varlabels(spoke_labels)
# add legend relative to top-left plot
plt.subplot(2, 2, 1)
labels = ('Factor 1', 'Factor 2', 'Factor 3', 'Factor 4', 'Factor 5')
legend = plt.legend(labels, loc=(0.9, .95), labelspacing=0.1)
plt.setp(legend.get_texts(), fontsize='small')
plt.figtext(0.5, 0.965, '5-Factor Solution Profiles Across Four Scenarios',
ha='center', color='black', weight='bold', size='large')
plt.show()
|
from .added_loss_term import AddedLossTerm
class InducingPointKernelAddedLossTerm(AddedLossTerm):
def __init__(self, variational_dist, prior_dist, likelihood):
self.prior_dist = prior_dist
self.variational_dist = variational_dist
self.likelihood = likelihood
def loss(self, *params):
prior_covar = self.prior_dist.lazy_covariance_matrix
variational_covar = self.variational_dist.lazy_covariance_matrix
diag = prior_covar.diag() - variational_covar.diag()
shape = prior_covar.shape[:-1]
noise_diag = self.likelihood._shaped_noise_covar(shape, *params).diag()
return 0.5 * (diag / noise_diag).sum()
|
"""
Runs Bowtie on single-end or paired-end data.
"""
import optparse, os, shutil, sys, tempfile
def stop_err( msg ):
sys.stderr.write( "%s\n" % msg )
sys.exit()
def __main__():
#Parse Command Line
parser = optparse.OptionParser()
parser.add_option('', '--threads', dest='threads', help='The number of threads to run')
parser.add_option('', '--input1', dest='input1', help='The (forward or single-end) reads file in Sanger FASTQ format')
parser.add_option('', '--input2', dest='input2', help='The reverse reads file in Sanger FASTQ format')
parser.add_option('', '--output', dest='output', help='The output file')
parser.add_option('', '--paired', dest='paired', help='Whether the data is single- or paired-end')
parser.add_option('', '--genomeSource', dest='genomeSource', help='The type of reference provided')
parser.add_option('', '--ref', dest='ref', help='The reference genome to use or index')
parser.add_option('', '--skip', dest='skip', help='Skip the first n reads')
parser.add_option('', '--alignLimit', dest='alignLimit', help='Only align the first n reads')
parser.add_option('', '--trimH', dest='trimH', help='Trim n bases from high-quality (left) end of each read before alignment')
parser.add_option('', '--trimL', dest='trimL', help='Trim n bases from low-quality (right) end of each read before alignment')
parser.add_option('', '--mismatchSeed', dest='mismatchSeed', help='Maximum number of mismatches permitted in the seed')
parser.add_option('', '--mismatchQual', dest='mismatchQual', help='Maximum permitted total of quality values at mismatched read positions')
parser.add_option('', '--seedLen', dest='seedLen', help='Seed length')
parser.add_option('', '--rounding', dest='rounding', help='Whether or not to round to the nearest 10 and saturating at 30')
parser.add_option('', '--maqSoapAlign', dest='maqSoapAlign', help='Choose MAQ- or SOAP-like alignment policy')
parser.add_option('', '--tryHard', dest='tryHard', help='Whether or not to try as hard as possible to find valid alignments when they exist')
parser.add_option('', '--valAlign', dest='valAlign', help='Report up to n valid arguments per read')
parser.add_option('', '--allValAligns', dest='allValAligns', help='Whether or not to report all valid alignments per read')
parser.add_option('', '--suppressAlign', dest='suppressAlign', help='Suppress all alignments for a read if more than n reportable alignments exist')
parser.add_option('', '--best', dest='best', help="Whether or not to make Bowtie guarantee that reported singleton alignments are 'best' in terms of stratum and in terms of the quality values at the mismatched positions")
parser.add_option('', '--maxBacktracks', dest='maxBacktracks', help='Maximum number of backtracks permitted when aligning a read')
parser.add_option('', '--strata', dest='strata', help='Whether or not to report only those alignments that fall in the best stratum if many valid alignments exist and are reportable')
parser.add_option('', '--minInsert', dest='minInsert', help='Minimum insert size for valid paired-end alignments')
parser.add_option('', '--maxInsert', dest='maxInsert', help='Maximum insert size for valid paired-end alignments')
parser.add_option('', '--mateOrient', dest='mateOrient', help='The upstream/downstream mate orientation for valid paired-end alignment against the forward reference strand')
parser.add_option('', '--maxAlignAttempt', dest='maxAlignAttempt', help='Maximum number of attempts Bowtie will make to match an alignment for one mate with an alignment for the opposite mate')
parser.add_option('', '--forwardAlign', dest='forwardAlign', help='Whether or not to attempt to align the forward reference strand')
parser.add_option('', '--reverseAlign', dest='reverseAlign', help='Whether or not to attempt to align the reverse-complement reference strand')
parser.add_option('', '--offrate', dest='offrate', help='Override the offrate of the index to n')
parser.add_option('', '--seed', dest='seed', help='Seed for pseudo-random number generator')
parser.add_option('', '--dbkey', dest='dbkey', help='')
parser.add_option('', '--params', dest='params', help='Whether to use default or specified parameters')
parser.add_option('', '--iauto_b', dest='iauto_b', help='Automatic or specified behavior')
parser.add_option('', '--ipacked', dest='ipacked', help='Whether or not to use a packed representation for DNA strings')
parser.add_option('', '--ibmax', dest='ibmax', help='Maximum number of suffixes allowed in a block')
parser.add_option('', '--ibmaxdivn', dest='ibmaxdivn', help='Maximum number of suffixes allowed in a block as a fraction of the length of the reference')
parser.add_option('', '--idcv', dest='idcv', help='The period for the difference-cover sample')
parser.add_option('', '--inodc', dest='inodc', help='Whether or not to disable the use of the difference-cover sample')
parser.add_option('', '--inoref', dest='inoref', help='Whether or not to build the part of the reference index used only in paried-end alignment')
parser.add_option('', '--ioffrate', dest='ioffrate', help='How many rows get marked during annotation of some or all of the Burrows-Wheeler rows')
parser.add_option('', '--iftab', dest='iftab', help='The size of the lookup table used to calculate an initial Burrows-Wheeler range with respect to the first n characters of the query')
parser.add_option('', '--intoa', dest='intoa', help='Whether or not to convert Ns in the reference sequence to As')
parser.add_option('', '--iendian', dest='iendian', help='Endianness to use when serializing integers to the index file')
parser.add_option('', '--iseed', dest='iseed', help='Seed for the pseudorandom number generator')
parser.add_option('', '--icutoff', dest='icutoff', help='Number of first bases of the reference sequence to index')
parser.add_option('', '--indexSettings', dest='index_settings', help='Whether or not indexing options are to be set')
parser.add_option('', '--suppressHeader', dest='suppressHeader', help='Suppress header')
(options, args) = parser.parse_args()
# make temp directory for placement of indices and copy reference file there if necessary
tmp_index_dir = tempfile.mkdtemp()
# index if necessary
if options.genomeSource == 'history':
# set up commands
if options.index_settings =='index_pre_set':
indexing_cmds = ''
else:
try:
indexing_cmds = '%s %s %s %s %s %s %s --offrate %s %s %s %s %s %s' % \
(('','--noauto')[options.iauto_b=='set'],
('','--packed')[options.ipacked=='packed'],
('','--bmax %s'%options.ibmax)[options.ibmax!='None' and options.ibmax>=1],
('','--bmaxdivn %s'%options.ibmaxdivn)[options.ibmaxdivn!='None'],
('','--dcv %s'%options.idcv)[options.idcv!='None'],
('','--nodc')[options.inodc=='nodc'],
('','--noref')[options.inoref=='noref'], options.ioffrate,
('','--ftabchars %s'%options.iftab)[int(options.iftab)>=0],
('','--ntoa')[options.intoa=='yes'],
('--little','--big')[options.iendian=='big'],
('','--seed %s'%options.iseed)[int(options.iseed)>0],
('','--cutoff %s'%options.icutoff)[int(options.icutoff)>0])
except ValueError:
indexing_cmds = ''
try:
shutil.copy(options.ref, tmp_index_dir)
except Exception, erf:
stop_err('Error creating temp directory for indexing purposes\n' + str(erf))
options.ref = os.path.join(tmp_index_dir,os.path.split(options.ref)[1])
cmd1 = 'bowtie-build %s -f %s %s 2> /dev/null' % (indexing_cmds, options.ref, options.ref)
try:
os.chdir(tmp_index_dir)
os.system(cmd1)
except Exception, erf:
stop_err('Error indexing reference sequence\n' + str(erf))
# set up aligning and generate aligning command options
# automatically set threads in both cases
if options.params == 'pre_set':
aligning_cmds = '-p %s -S' % options.threads
else:
try:
aligning_cmds = '%s %s %s %s %s %s %s %s %s %s %s %s %s %s ' \
'%s %s %s %s %s %s %s %s %s %s -p %s -S' % \
(('','-s %s'%options.skip)[options.skip!='None'],
('','-u %s'%options.alignLimit)[int(options.alignLimit)>0],
('','-5 %s'%options.trimH)[int(options.trimH)>=0],
('','-3 %s'%options.trimL)[int(options.trimL)>=0],
('','-n %s'%options.mismatchSeed)[options.mismatchSeed=='0' or options.mismatchSeed=='1' or options.mismatchSeed=='2' or options.mismatchSeed=='3'],
('','-e %s'%options.mismatchQual)[int(options.mismatchQual)>=0],
('','-l %s'%options.seedLen)[int(options.seedLen)>=5],
('','--nomaqround')[options.rounding=='noRound'],
('','-v %s'%options.maqSoapAlign)[options.maqSoapAlign!='-1'],
('','-I %s'%options.minInsert)[options.minInsert!='None'],
('','-X %s'%options.maxInsert)[options.maxInsert!='None'],
('','--%s'%options.mateOrient)[options.mateOrient!='None'],
('','--pairtries %s'%options.maxAlignAttempt)[options.maxAlignAttempt!='None' and int(options.maxAlignAttempt)>=0],
('','--nofw')[options.forwardAlign=='noForward'],
('','--norc')[options.reverseAlign=='noReverse'],
('','--maxbts %s'%options.maxBacktracks)[options.maxBacktracks!='None' and (options.mismatchSeed=='2' or options.mismatchSeed=='3')],
('','-y')[options.tryHard=='doTryHard'],
('','-k %s'%options.valAlign)[options.valAlign!='None' and int(options.valAlign)>=0],
('','-a')[options.allValAligns=='doAllValAligns' and int(options.allValAligns)>=0],
('','-m %s'%options.suppressAlign)[int(options.suppressAlign)>=0],
('','--best')[options.best=='doBest'],
('','--strata')[options.strata=='doStrata'],
('','-o %s'%options.offrate)[int(options.offrate)>=0],
('','--seed %s'%options.seed)[int(options.seed)>=0],
options.threads)
except ValueError, erf:
stop_err('Something is wrong with the alignment parameters and the alignment could not be run\n' + str(erf))
# prepare actual aligning commands
if options.paired == 'paired':
cmd2 = 'bowtie %s %s -1 %s -2 %s > %s 2> /dev/null' % (aligning_cmds, options.ref, options.input1, options.input2, options.output)
else:
cmd2 = 'bowtie %s %s %s > %s 2> /dev/null' % (aligning_cmds, options.ref, options.input1, options.output)
# align
try:
os.system(cmd2)
except Exception, erf:
stop_err("Error aligning sequence\n" + str(erf))
# remove header if necessary
if options.suppressHeader == 'true':
tmp_out = tempfile.NamedTemporaryFile()
cmd3 = 'cp %s %s' % (options.output, tmp_out.name)
try:
os.system(cmd3)
except Exception, erf:
stop_err("Error copying output file before removing headers\n" + str(erf))
output = file(tmp_out.name, 'r')
fout = file(options.output, 'w')
header = True
line = output.readline()
while line.strip() != '':
if header:
if line.startswith('@HD') or line.startswith('@SQ') or line.startswith('@RG') or line.startswith('@PG') or line.startswith('@CO'):
pass
else:
header = False
fout.write(line)
else:
fout.write(line)
line = output.readline()
fout.close()
tmp_out.close()
# clean up temp dir
if os.path.exists(tmp_index_dir):
shutil.rmtree(tmp_index_dir)
if __name__=="__main__": __main__()
|
import inspect
import os
import re
import textwrap
def remove_indent(text, include_firstline=False):
"""Removes leading whitespace from the provided text.
Removes largest amount of leading whitespaces that is shared amongst all of the selected lines.
Parameters
----------
text : str or list of str
Text from which the indents will be removed
include_firstline : bool
True if indents of the first line is also included
Default is False
Returns
-------
dedented_text : str
Text after the indents are removed.
Examples
--------
>>> remove_indent(''' abc
... de
... fg''', include_firstline=True)
'abc\nde\n fg'
>>> remove_indent(''' abc
... de
... fg''', include_firstline=False)
' abc\nde\n fg'
"""
if isinstance(text, str):
text = text.split('\n')
if include_firstline:
return textwrap.dedent('\n'.join(text))
else:
return '{0}\n{1}'.format(text[0], textwrap.dedent('\n'.join(text[1:])))
def wrap(text, width=100, indent_level=0, tabsize=4, edges=('', ''), added_indent='',
remove_initial_indent=False, **kwargs):
"""Wrap a text with the given line length and indentations.
Parameters
----------
text : str
Text that will be wrapped
width : int
Maximum number of characters allowed in each line
indent_level : int
Number of indents (tabs) that are needed for the docstring
tabsize : int
Number of spaces that corresponds to a tab
edges : 2-tuple of string
Beginning and end of each line (after indent).
added_indent : str, 2-tuple/list of str
Indentation to be added after the indentation by the levels.
If 2 strings are given, then first string corresponds to the initial indent and the second
string to the subsequent indents.
Default is no added indent.
remove_initial_indent : bool
Flag for removing the indentation on the first line.
kwargs : dict
Other options for the textwrap.fill.
Default replaces tabs with spaces ('expand_tabs': True), does not replace whitespace
('replace_whitespace': False), drops whitespaces (that are not indentations) before or after
sentences ('drop_whitespace': True), and does not break long word into smaller pieces
('break_long_words': False).
"""
# default
kwargs.setdefault('expand_tabs', True)
kwargs.setdefault('replace_whitespace', False)
kwargs.setdefault('drop_whitespace', tuple(edges) == ('', ''))
kwargs.setdefault('break_long_words', False)
# parameters
kwargs['tabsize'] = tabsize
kwargs['width'] = width - len(edges[0]) - len(edges[1])
if isinstance(added_indent, str):
added_indent = [added_indent]
if isinstance(added_indent, (list, tuple)) and len(added_indent) == 1:
added_indent *= 2
elif len(added_indent) > 2:
raise ValueError('`added_indent` must be given as a string or a list/tuple of at most two '
'strings, where the first string correspond to the initial and the second '
'correspond to the subsequent indents. If only one string is given, then '
'all lines are indented.')
tab = tabsize * indent_level * ' '
kwargs['initial_indent'] = kwargs.setdefault('initial_indent', tab) + added_indent[0]
kwargs['subsequent_indent'] = kwargs.setdefault('subsequent_indent', tab) + added_indent[1]
num_indent = [len(kwargs['initial_indent']), len(kwargs['subsequent_indent'])]
lines = textwrap.fill(text, **kwargs).split('\n')
if remove_initial_indent:
# remove the initial indent
lines[0] = lines[0][num_indent[0]:]
num_indent[0] = 0
# add edges
output = [re.sub(r'^({0})(.+)$'.format(' ' * num_indent[0]),
r'\1{0}\2{1}'.format(*edges),
lines[0])]
output += [re.sub(r'^({0})(.+)$'.format(' ' * num_indent[1]),
r'\1{0}\2{1}'.format(*edges),
line) for line in lines[1:]]
return '\n'.join(output)
def multi_wrap(multiline, width=100, indent_level=0, tabsize=4):
"""Wrap multiple lines of text.
Preserving the newline, wraps each line so that the subsequent lines are indented by the same
amount as the leading whitespace.
Parameters
----------
multiline : str
String with multiple lines (newlines).
Returns
-------
wrapped_multiline : str
Multiple lines wrapped over each newline.
Examples
--------
"""
kwargs = {}
kwargs['expand_tabs'] = True
kwargs['replace_whitespace'] = False
kwargs['drop_whitespace'] = True
kwargs['break_long_words'] = False
kwargs['tabsize'] = tabsize
kwargs['width'] = width
tab = indent_level * tabsize * ' '
# divide by newline
multiline = multiline.split('\n')
# find leading whitespace
whitespace = [len(re.search(r'^(\s*).*$', line).group(1)) for line in multiline]
# wrap each line
wrapped_multiline = []
for line, num_space in zip(multiline, whitespace):
wrapped_multiline.append(textwrap.fill(line,
initial_indent=tab,
subsequent_indent=tab + num_space*' ',
**kwargs))
wrapped_multiline = '\n'.join(wrapped_multiline)
# there should be no newline at the end b/c drop_whitespace is True (but just in case)
wrapped_multiline = re.sub('\n*$', '', wrapped_multiline)
return wrapped_multiline
def is_math(text):
"""Check if the given text is a math equation in rst format.
Parameters
----------
text : str
Text to check.
Returns
-------
is_math :bool
True if text is a math equation.
False otherwise.
"""
re_math = re.compile(r'^\n*\.\.\s*math::\n*(?:\n\s+.+)+\n*$')
return bool(re_math.search(text))
def extract_math(text):
"""Extract multiline math equation from the text.
Parameters
----------
text : str
Text from which the math equation is extracted.
Returns
-------
split_eqns : list of str
Text where the math equations have been separated from the rest of the string.
"""
re_math = re.compile(r'\n*(\.\.\s*math::\n*(?: .+\n?)+)\n*')
# split equations
split_eqns = re_math.split(text)
# remove empty lines
split_eqns = [lines for lines in split_eqns if lines != '']
# remove trailing newline
split_eqns = [re.sub(r'\n*$', '', lines) if is_math(lines) else lines for lines in split_eqns]
return split_eqns
def layered_wrap(dict_edges_contents, width=100, indent_level=0, tabsize=4, edges=("'", "'"),
added_indent='', remove_initial_indent=False, **kwargs):
"""Recursively wraps the content of a layer with the appropriate edges.
When making nicely indented multiline nested lists, appropriate edges, e.g. [ and ], are used
to contain the contents inside. The contents inside can be indented and can also be encased in
edges.
Parameters
----------
dict_edges_contents : dict
Dictionary of the edges to the contents of the edges.
Each dictionary must specify exactly one key/value.
The key is 3-tuple of the edge on the left and the right and whether the next content will
start from a newline (vs current line).
The value is a string, list of strings, or another dictionary (or list of dictionaries) that
will be nested inside the current layer.
width : int
Maximum number of characters allowed in each width.
indent_level : int
Number of indents (tabs) that are needed for the docstring.
tabsize : int
Number of spaces that corresponds to a tab
edges : 2-tuple of string
Beginning and end of each line (after indent).
added_indent : str, 2-tuple/list of str
Indentation to be added after the indentation via `indent_level`.
If 2 strings are given, then first string corresponds to the initial indent and the second
string to the subsequent indents.
Default is no added indent.
remove_initial_indent : bool
Flag for removing the indentation on the first line.
kwargs : dict
Other options for the textwrap.fill.
Default replaces tabs with spaces ('expand_tabs': True), does not replace whitespace
('replace_whitespace': False), drops whitespaces (that are not indentations) before or after
sentences ('drop_whitespace': True), and does not break long word into smaller pieces
('break_long_words': False).
Raises
------
TypeError
If `dict_edges_contents` is not a dictionary.
ValueError
If `dict_edges_contents` has more than one key/value.
If value of `dict_edges_contents` is not a dictionary, string, or list/tuple of dictionaries
and strings.
If `layers_text` does not have the same shape as the `layers_edges`
"""
# NOTE: i would love to use numpy here, but that would mean adding a dependency solely for shape
# checking... So recursion will be used.
if not isinstance(dict_edges_contents, dict):
raise TypeError('`dict_edges_contents` must be a dictionary.')
if len(dict_edges_contents) != 1:
raise ValueError('`dict_edges_contents` must have exactly one key/value.')
kwargs['tabsize'] = tabsize
(l_edge, r_edge, has_newline), layer = dict_edges_contents.popitem()
wrapped_l_edge = wrap(l_edge, width=width, indent_level=indent_level,
drop_whitespace=False, **kwargs)
wrapped_r_edge = wrap(r_edge, width=width, indent_level=indent_level if has_newline else 0,
drop_whitespace=False, **kwargs)
if isinstance(layer, dict) or isinstance(layer, str):
layer = [layer]
elif not isinstance(layer, (list, tuple)):
raise ValueError('Contents of `dict_edges_contents` must be a dictionary, string, or '
'list/tuple of dictionaries and strings.')
output = ''
output += wrapped_l_edge
if has_newline:
output += '\n'
for i, item in enumerate(layer):
if isinstance(item, dict):
if has_newline:
output += layered_wrap(item,
width=width,
indent_level=indent_level+1,
added_indent=added_indent,
remove_initial_indent=False,
**kwargs)
else:
output += layered_wrap(item,
width=width - len(r_edge) - len(l_edge),
indent_level=indent_level,
added_indent=' ' * len(l_edge),
remove_initial_indent=True,
**kwargs)
elif isinstance(item, str):
if has_newline:
output += wrap(item,
width=width,
indent_level=indent_level+1,
edges=edges,
added_indent='',
remove_initial_indent=False,
**kwargs)
else:
output += wrap(item,
width=width,
indent_level=0,
edges=edges,
added_indent=' '*len(wrapped_l_edge),
remove_initial_indent=True,
**kwargs)
else:
raise ValueError('Contents of `dict_edges_contents` must be a dictionary, string, or '
'list/tuple of dictionaries and strings.')
if i != len(layer) - 1:
if has_newline:
output += ',\n'
else:
output += ', '
if has_newline:
output += '\n'
output += wrapped_r_edge
return output
def extract_members(module, recursive=False):
"""Extracts all members of a module that are defined in the same file.
Parameters
----------
module : instance
Any python module.
Returns
-------
"""
# get file location
filename = inspect.getsourcefile(module)
# find objects that are defined in the provided module
all_members = inspect.getmembers(module)
defined_names = []
defined_members = []
for name, member in all_members:
# skip code objects
if name == '__code__':
continue
# property
if isinstance(member, property):
# cannot getsourcefile of property objects
# NOTE: all property objects that belong to an instance is assumed to be defined within
# that instance (i.e. not inherited)
defined_names.append(name)
defined_members.append(member)
# other objects
try:
sourcefile = inspect.getsourcefile(member)
except TypeError:
continue
else:
if os.path.samefile(sourcefile, filename):
defined_names.append(name)
defined_members.append(member)
# extract objects
output = {}
for name, member in zip(defined_names, defined_members):
output[name] = member
# recurse
if not isinstance(member, property) and recursive:
output.update(extract_members(member))
return output
|
def a_kv(n):
return n*n*(-1)**n
def sumkv(n):
return sum(map(a_kv, range(1, n+1)))
def mysum(n):
if (n % 2) == 0:
k = n / 2
print(2*k*k+k)
else:
k = (n+1) / 2
print(-2*k*k+k)
print(sumkv(1))
print(sumkv(2))
print(sumkv(3))
print(sumkv(4))
mysum(1)
mysum(2)
mysum(3)
mysum(4)
|
from __future__ import print_function
import sys
from graph import make
from graph import GraphException
from timer import Timer
def main():
if len(sys.argv) == 4:
try:
filename = str(sys.argv[1])
# print(sys.argv)
timer = Timer()
graph = make(filename)
u = int(sys.argv[2])
v = int(sys.argv[3])
print(graph.jaccard(u, v))
except Exception as ex:
print ("Exception in main")
raise ex
else:
print (GraphException("You must supply a valid graph file"))
main()
|
from numpy import *
def Hasofer(G, u, Tinv, k, delta, sigma, inpt, otpt):
values = [Tinv(u)]
values.extend(pretaylorseries(u, Tinv, delta*sigma, inpt))
out = iter(G(values))
beta_t = out.next()
G2 = lambda x: out.next()
grad_g = taylorseries(G2, u, Tinv, delta*sigma, inpt, otpt).transpose()
alpha = grad_g[k] / linalg.norm(grad_g[k])
beta = u * alpha - beta_t[k] / linalg.norm(grad_g[k])
y = beta * alpha
return y, alpha
def taylorseries(G, x, Tinv, delta, inpt, otpt):
grad = zeros((inpt, otpt))
for i in range(inpt):
xvecPLUS = array(x)
xvecMINS = array(x)
xvecPLUS[i] += delta[i]
xvecMINS[i] -= delta[i]
xvecPLUS = Tinv(xvecPLUS)
xvecMINS = Tinv(xvecMINS)
grad[i] = (G(xvecPLUS) - G(xvecMINS)) / (2.0 * delta[i])
return grad
def pretaylorseries(x, Tinv, delta, inpt):
values = []
for i in range(inpt):
xvecPLUS = array(x)
xvecMINS = array(x)
xvecPLUS[i] += delta[i]
xvecMINS[i] -= delta[i]
xvecPLUS = Tinv(xvecPLUS)
xvecMINS = Tinv(xvecMINS)
if inf in xvecPLUS or -inf in xvecPLUS or inf in xvecMINS or -inf in xvecMINS:
print "Attempted to search past limits, stopping search."
raise ValueError
values.append(xvecPLUS)
values.append(xvecMINS)
return values
|
from grow.pods import pods
from grow.pods import storage
from grow.testing import testing
import unittest
import webob.exc
class RoutesTest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.dir_path = testing.create_test_pod_dir()
self.pod = pods.Pod(self.dir_path, storage=storage.FileStorage)
def test_match(self):
self.pod.match('/')
self.pod.match('/fr/about/')
self.pod.match('/de_alias/about/')
self.assertRaises(webob.exc.HTTPNotFound, self.pod.match, '/dummy/')
def test_list_concrete_paths(self):
expected = [
'/',
'/about/',
'/app/static/somepath/de_alias/test.txt',
'/app/static/test.txt',
'/de_alias/about/',
'/de_alias/contact-us/',
'/de_alias/home/',
'/de_alias/html/',
'/de_alias/intro/',
'/de_alias/yaml_test/',
'/fr/about/',
'/fr/contact-us/',
'/fr/home/',
'/fr/html/',
'/fr/intro/',
'/fr/yaml_test/',
'/html/',
'/intl/de_alias/localized/',
'/intl/de_alias/multiple-locales/',
'/intl/en_gb/localized/',
'/intl/fr/multiple-locales/',
'/intl/hi_in/localized/',
'/intl/it/multiple-locales/',
'/intro/',
'/it/about/',
'/it/contact-us/',
'/it/home/',
'/it/html/',
'/it/intro/',
'/it/yaml_test/',
'/post/newer/',
'/post/newest/',
'/post/older/',
'/post/oldest/',
'/public/file.txt',
'/public/main.css',
'/public/main.min.js',
'/root/base/',
'/root/static/file.txt',
'/yaml_test/',
]
result = self.pod.routes.list_concrete_paths()
self.assertItemsEqual(expected, result)
if __name__ == '__main__':
unittest.main()
|
from .linked_service import LinkedService
class AzureStorageLinkedService(LinkedService):
"""The storage account linked service.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param connect_via: The integration runtime reference.
:type connect_via:
~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:param description: Linked service description.
:type description: str
:param type: Constant filled by server.
:type type: str
:param connection_string: The connection string. It is mutually exclusive
with sasUri property.
:type connection_string: ~azure.mgmt.datafactory.models.SecureString
:param sas_uri: SAS URI of the Azure Storage resource. It is mutually
exclusive with connectionString property.
:type sas_uri: ~azure.mgmt.datafactory.models.SecureString
:param encrypted_credential: The encrypted credential used for
authentication. Credentials are encrypted using the integration runtime
credential manager. Type: string (or Expression with resultType string).
:type encrypted_credential: object
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'connect_via': {'key': 'connectVia', 'type': 'IntegrationRuntimeReference'},
'description': {'key': 'description', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'connection_string': {'key': 'typeProperties.connectionString', 'type': 'SecureString'},
'sas_uri': {'key': 'typeProperties.sasUri', 'type': 'SecureString'},
'encrypted_credential': {'key': 'typeProperties.encryptedCredential', 'type': 'object'},
}
def __init__(self, additional_properties=None, connect_via=None, description=None, connection_string=None, sas_uri=None, encrypted_credential=None):
super(AzureStorageLinkedService, self).__init__(additional_properties=additional_properties, connect_via=connect_via, description=description)
self.connection_string = connection_string
self.sas_uri = sas_uri
self.encrypted_credential = encrypted_credential
self.type = 'AzureStorage'
|
"""
:mod:`led`
==================
Created by hbldh <henrik.blidh@nedomkull.com>
Created on 2016-04-02
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import time
from pymetawear.client import MetaWearClient
address_1 = 'D1:75:74:0B:59:1F'
address_2 = 'F1:D9:71:7E:34:7A'
print("Connect to {0}...".format(address_1))
client_1 = MetaWearClient(str(address_1), debug=True)
print("New client created: {0}".format(client_1))
print("Connect to {0}...".format(address_2))
client_2 = MetaWearClient(str(address_2), debug=True)
print("New client created: {0}".format(client_2))
print("Blinking 10 times with green LED on client 1...")
pattern = client_1.led.load_preset_pattern('blink', repeat_count=10)
client_1.led.write_pattern(pattern, 'g')
client_1.led.play()
print("Blinking 10 times with red LED on client 2...")
pattern = client_2.led.load_preset_pattern('blink', repeat_count=10)
client_2.led.write_pattern(pattern, 'r')
client_2.led.play()
time.sleep(5.0)
client_1.disconnect()
client_2.disconnect()
|
import smtplib
from email.message import EmailMessage
import warnings
import __main__ as main
import os
import traceback
import socket
_username = None
_password = None
_destination = None
def set_credentials(username, password):
if not isinstance(username, str):
raise TypeException("Username must be a string.")
if not isinstance(password, str):
raise TypeException("Password must be a string.")
global _username
global _password
_username = username
_password = password
def set_destination(destination):
if not isinstance(destination, str):
raise TypeException("Destination must be a string.")
global _destination
_destination = destination
class notify_when_done(object):
def __init__(self, done_string, debug_connection = False,
server = "smtp.gmail.com", port = 587):
self.done_string = done_string
self.debug_connection = debug_connection
self.server = server
self.port = port
if hasattr(main, "__file__"):
self.file = main.__file__
else:
self.file = "Jupter notebook at " + os.path.abspath("")
def __enter__(self):
# Test connection and credentials.
self.login_good = True
try:
server = self.connect_to_server()
except Exception as e:
warnings.warn("Connection to email server failed. "\
"Run will continue without notifications.",
RuntimeWarning)
self.login_good = False
if self.debug_connection:
print("Connection error is:")
print(traceback.format_exc())
def __exit__(self, type, value, trace):
# Don't bother connecting again if it didn't work the first time.
if not self.login_good:
return
# Send an email/text informing the owner of the script what happened.
server = self.connect_to_server()
message = self.file + " reports "
if type == None:
# Block completed without error.
message += "completion of: " + self.done_string
else:
# Block hit an error.
message += "the following error: \n" + traceback.format_exc()
msg_email = EmailMessage()
msg_email.set_content(message)
msg_email["Subject"] = ("Error " if type != None else "Success ") +\
"reported by " + self.file
msg_email["From"] = _username
msg_email["To"] = _destination
server.send_message(msg_email)
def connect_to_server(self):
# Use socket.gethostbyname to get an IPv4 address, which connects much,
# much faster than an IPv6 address.
server = smtplib.SMTP(socket.gethostbyname(self.server), self.port)
server.starttls()
server.login(_username, _password)
return server
|
"""Helper proxy to the state object."""
from flask import current_app
from werkzeug.local import LocalProxy
current_circulation = LocalProxy(
lambda: current_app.extensions['invenio-circulation']
)
"""Helper proxy to circulation state object."""
|
from rest_framework import viewsets
from .serializers import *
from .models import *
class GermanLemmaViewSet(viewsets.ModelViewSet):
queryset = GermanLemma.objects.all()
serializer_class = GermanLemmaSerializer
class ForeignLemmaViewSet(viewsets.ModelViewSet):
queryset = ForeignLemma.objects.all()
serializer_class = ForeignLemmaSerializer
|
import abc
import pygame
class AbstractWidget(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
self.rect = None
def is_mouse_over(self):
xmouse, ymouse = pygame.mouse.get_pos()
xmouse_over = self.rect.left <= xmouse <= self.rect.right
ymouse_over = self.rect.top <= ymouse <= self.rect.bottom
return xmouse_over and ymouse_over
class Text(AbstractWidget):
def __init__(self, text):
AbstractWidget.__init__(self)
self._text = text
self._style = {}
self._font = None
self._surface = None
self._background = None
def render_on(self, background):
self._background = background
def stylize(self, **kwargs):
self._style.update(kwargs)
def update(self):
self._set_font()
self._set_rect()
self._background.blit(self._surface, self.rect)
def _hover(self):
if self._style['text_underline_hover']:
self._font.set_underline(True)
def _set_font(self):
self._font = pygame.font.Font(self._style['font_family'],
self._style['font_size'])
if self.rect and self.is_mouse_over():
self._hover()
self._surface = self._font.render(self._text, 1, self._style['color'])
def _set_rect(self):
centerx = (self._background.get_width() / 2
if self._style['text_align'] == 'center'
else self._style['margin_left'])
centery = self._style['margin_top']
self.rect = self._surface.get_rect(centerx=centerx, centery=centery)
class Label(Text):
pass
class LinkToState(Text):
def __init__(self, text, state=None):
Text.__init__(self, text)
self._state = state
@property
def state(self):
return self._state
|
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting SPC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the SpeedCoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/SpeedCoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "SpeedCoin")
return os.path.expanduser("~/.SpeedCoin")
def read_SpeedCoin_config(dbdir):
"""Read the SpeedCoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "SpeedCoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a SpeedCoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 18332 if testnet else 8332
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the SpeedCoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(SpeedCoind):
info = SpeedCoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
SpeedCoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = SpeedCoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(SpeedCoind):
address_summary = dict()
address_to_account = dict()
for info in SpeedCoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = SpeedCoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = SpeedCoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-SpeedCoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(SpeedCoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(SpeedCoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f SPC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to SpeedCoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = SpeedCoind.createrawtransaction(inputs, outputs)
signed_rawtx = SpeedCoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(SpeedCoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = SpeedCoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(SpeedCoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = SpeedCoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(SpeedCoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get SpeedCoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send SpeedCoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of SpeedCoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_SpeedCoin_config(options.datadir)
if options.testnet: config['testnet'] = True
SpeedCoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(SpeedCoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(SpeedCoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(SpeedCoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(SpeedCoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = SpeedCoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
"""
Blynk is a platform with iOS and Android apps to control
Arduino, Raspberry Pi and the likes over the Internet.
You can easily build graphic interfaces for all your
projects by simply dragging and dropping widgets.
Downloads, docs, tutorials: http://www.blynk.cc
Sketch generator: http://examples.blynk.cc
Blynk community: http://community.blynk.cc
Social networks: http://www.fb.com/blynkapp
http://twitter.com/blynk_app
This example shows how to initialize your W600 board
and connect it to Blynk.
Don't forget to change WIFI_SSID, WIFI_PASS and BLYNK_AUTH ;)
"""
import BlynkLib
import machine
from easyw600 import *
WIFI_SSID = 'YourWiFiNetwork'
WIFI_PASS = 'YourWiFiPassword'
BLYNK_AUTH = 'YourAuthToken'
wifi = connect(WIFI_SSID, WIFI_PASS)
print("Connecting to Blynk...")
blynk = BlynkLib.Blynk(BLYNK_AUTH, log=print)
@blynk.on("connected")
def blynk_connected(ping):
print('Blynk ready. Ping:', ping, 'ms')
@blynk.VIRTUAL_WRITE(1)
def v1(param):
print('!!!VIRTUAL_WRITE', param)
@blynk.ON("V2")
def v2(param):
print('!!!ON')
@blynk.on("V3")
def v3(param):
print('!!!on')
def runLoop():
while True:
blynk.run()
machine.idle()
runLoop()
|
class ThemeManager (object):
def __init__(self):
self.main_background_colour = .85, .85, .85
self.main_text_colour = .31, .31, .31
self.main_bar_colour = .85,.85,.85
self.main_tint_colour = .31,.31,.31
self.main_title_text_colour = .31,.31,.31
self.running_cell_background_colour = .1, .18, 1.0
self.running_cell_text_colour = 1.0, 1.0, 1.0
|
import logging
import tornado.escape
import tornado.ioloop
import tornado.web
import os.path
import uuid
from tornado.concurrent import Future
from tornado import gen
from tornado.options import define, options, parse_command_line
define("port", default=8888, help="run on the given port", type=int)
define("debug", default=False, help="run in debug mode")
class MessageBuffer(object):
def __init__(self):
self.waiters = set()
self.cache = []
self.cache_size = 200
def wait_for_messages(self, cursor=None):
# Construct a Future to return to our caller. This allows
# wait_for_messages to be yielded from a coroutine even though
# it is not a coroutine itself. We will set the result of the
# Future when results are available.
result_future = Future()
if cursor:
new_count = 0
for msg in reversed(self.cache):
if msg["id"] == cursor:
break
new_count += 1
if new_count:
result_future.set_result(self.cache[-new_count:])
return result_future
self.waiters.add(result_future)
return result_future
def cancel_wait(self, future):
self.waiters.remove(future)
# Set an empty result to unblock any coroutines waiting.
future.set_result([])
def new_messages(self, messages):
logging.info("Sending new message to %r listeners", len(self.waiters))
for future in self.waiters:
future.set_result(messages)
self.waiters = set()
self.cache.extend(messages)
if len(self.cache) > self.cache_size:
self.cache = self.cache[-self.cache_size:]
global_message_buffer = MessageBuffer()
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("index.html", messages=global_message_buffer.cache)
class MessageNewHandler(tornado.web.RequestHandler):
def post(self):
message = {
"id": str(uuid.uuid4()),
"body": self.get_argument("body"),
}
# to_basestring is necessary for Python 3's json encoder,
# which doesn't accept byte strings.
message["html"] = tornado.escape.to_basestring(
self.render_string("message.html", message=message))
if self.get_argument("next", None):
self.redirect(self.get_argument("next"))
else:
self.write(message)
global_message_buffer.new_messages([message])
class MessageUpdatesHandler(tornado.web.RequestHandler):
@gen.coroutine
def post(self):
cursor = self.get_argument("cursor", None)
# Save the future returned by wait_for_messages so we can cancel
# it in wait_for_messages
self.future = global_message_buffer.wait_for_messages(cursor=cursor)
messages = yield self.future
if self.request.connection.stream.closed():
return
self.write(dict(messages=messages))
def on_connection_close(self):
global_message_buffer.cancel_wait(self.future)
def main():
parse_command_line()
app = tornado.web.Application(
[
(r"/", MainHandler),
(r"/a/message/new", MessageNewHandler),
(r"/a/message/updates", MessageUpdatesHandler),
],
cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
debug=options.debug,
)
app.listen(options.port)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.