code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
#!/usr/bin/python
import time, os, stat, random, sys, logging, socket
from twisted.python import failure
from twisted.internet import defer
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
import flud.FludCrypto
from flud.FludNode import FludNode
from flud.protocol.FludClient import FludClient
from flud.protocol.FludCommUtil import *
from flud.fencode import fencode, fdecode
from flud.FludDefer import ErrDeferredList
"""
Test code for primitive operations. These ops include all of the descendents
of ROOT and REQUEST in FludProtocol.
"""
CONCURRENT=300
CONCREPORT=50
node = None
files = None
logger = logging.getLogger('test')
screenhandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s:'
' %(message)s', datefmt='%H:%M:%S')
screenhandler.setFormatter(formatter)
logger.addHandler(screenhandler)
logger.setLevel(logging.DEBUG)
def suitesuccess(results):
logger.info("all tests in suite passed")
#print results
return results
def suiteerror(failure):
logger.info("suite did not complete")
logger.info("DEBUG: %s" % failure)
return failure
def stagesuccess(result, message):
logger.info("stage %s succeeded" % message)
return result
def stageerror(failure, message):
logger.info("stage %s failed" % message)
#logger.info("DEBUG: %s" % failure)
return failure
def itersuccess(res, i, message):
if i % CONCREPORT == 0:
logger.info("itersuccess: %s" % message)
return res
def itererror(failure, message):
logger.info("itererror message: %s" % message)
#logger.info("DEBUG: %s" % failure)
#logger.info("DEBUG: %s" % dir(failure)
failure.printTraceback()
return failure
def checkVERIFY(results, nKu, host, port, hashes, num=CONCURRENT):
logger.info(" checking VERIFY results...")
for i in range(num):
hash = hashes[i]
res = results[i][1]
if long(hash, 16) != long(res, 16):
raise failure.DefaultException("verify didn't match: %s != %s"
% (hash, res))
logger.info(" ...VERIFY results good.")
return results #True
def testVERIFY(res, nKu, host, port, num=CONCURRENT):
logger.info("testVERIFY started...")
dlist = []
hashes = []
for i in range(num):
#if i == 4:
# port = 21
fd = os.open(files[i], os.O_RDONLY)
fsize = os.fstat(fd)[stat.ST_SIZE]
length = 20
offset = random.randrange(fsize-length)
os.lseek(fd, offset, 0)
data = os.read(fd, length)
os.close(fd)
hashes.append(flud.FludCrypto.hashstring(data))
filekey = os.path.basename(files[i])
deferred = node.client.sendVerify(filekey, offset, length, host,
port, nKu)
deferred.addCallback(itersuccess, i, "succeeded at testVERIFY %d" % i)
deferred.addErrback(itererror, "failed at testVERIFY %d: %s"
% (i, filekey))
dlist.append(deferred)
d = ErrDeferredList(dlist)
d.addCallback(stagesuccess, "testVERIFY")
d.addErrback(stageerror, 'failed at testVERIFY')
d.addCallback(checkVERIFY, nKu, host, port, hashes, num)
return d
def checkRETRIEVE(res, nKu, host, port, num=CONCURRENT):
logger.info(" checking RETRIEVE results...")
for i in range(num):
f1 = open(files[i])
filekey = os.path.basename(files[i])
f2 = open(node.config.clientdir+"/"+filekey)
if (f1.read() != f2.read()):
f1.close()
f2.close()
raise failure.DefaultException("upload/download files don't match")
f2.close()
f1.close()
logger.info(" ...RETRIEVE results good.")
return testVERIFY(res, nKu, host, port, num)
def testRETRIEVE(res, nKu, host, port, num=CONCURRENT):
logger.info("testRETRIEVE started...")
dlist = []
for i in range(num):
#if i == 4:
# port = 21
filekey = os.path.basename(files[i])
deferred = node.client.sendRetrieve(filekey, host, port, nKu)
deferred.addCallback(itersuccess, i, "succeeded at testRETRIEVE %d" % i)
deferred.addErrback(itererror, "failed at testRETRIEVE %d: %s"
% (i, filekey))
dlist.append(deferred)
d = ErrDeferredList(dlist)
d.addCallback(stagesuccess, "testRETRIEVE")
d.addErrback(stageerror, 'failed at testRETRIEVE')
d.addCallback(checkRETRIEVE, nKu, host, port, num)
return d
def testSTORE(nKu, host, port, num=CONCURRENT):
logger.info("testSTORE started...")
dlist = []
for i in range(num):
#if i == 4:
# port = 21
deferred = node.client.sendStore(files[i], None, host, port, nKu)
deferred.addCallback(itersuccess, i, "succeeded at testSTORE %d" % i)
deferred.addErrback(itererror, "failed at testSTORE %d" % i)
dlist.append(deferred)
d = ErrDeferredList(dlist)
d.addCallback(stagesuccess, "testSTORE")
d.addErrback(stageerror, 'failed at testSTORE')
d.addCallback(testRETRIEVE, nKu, host, port, num)
#d.addCallback(testVERIFY, nKu, host, port, num)
return d
def testID(host, port, num=CONCURRENT):
logger.info("testID started...")
dlist = []
for i in range(num):
#if i == 4:
# port = 21
deferred = node.client.sendGetID(host, port)
deferred.debug = True
deferred.addErrback(itererror, "failed at testID %d" % i)
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "testID")
d.addErrback(stageerror, 'testID')
d.addCallback(testSTORE, host, port, num)
return d
def runTests(host, port=None, listenport=None):
num = CONCURRENT
#num = 5
global files, node
files = createFakeData()
node = FludNode(port=listenport)
if port == None:
port = node.config.port
node.run()
if num > len(files):
num = len(files)
d1 = testID(host, port, num)
d1.addCallback(suitesuccess)
d1.addErrback(suiteerror)
d1.addBoth(cleanup)
#nku = FludRSA.importPublicKey({'e': 65537L, 'n': 138646504113696863667807411690225283099791076530135000331764542300161152585426296356409290228001197773401729468267448145387041995053893737880473447042984919037843163552727823101445272608470814297563395471329917904393936481407769396601027233955938405001434483474847834031774504827822809611707032477570548179411L})
#d2 = testSTORE(nku, node, host, port, files, num)
#d2.addErrback(suiteerror, 'failed at %s' % d2.testname)
node.join()
#node.start() # doesn't work, because reactor may not have started
# listening by time requests start flying
def createFakeData(dir="/tmp", num=CONCURRENT):
randsrc = open("/dev/urandom", 'rb')
files = []
for i in range(num):
randdata = randsrc.read(256)
filekey = fencode(int(flud.FludCrypto.hashstring(randdata), 16))
filename = dir+'/'+filekey
f = open(filename, 'wb')
f.write(randdata)
f.close()
files.append(filename)
randsrc.close()
return files
def deleteFakeData(files):
for f in files:
if os.path.exists(f):
os.remove(f)
else:
logger.warn("s already deleted!" % f)
def cleanup(dummy=None):
logger.info("cleaning up files and shutting down in 1 seconds...")
time.sleep(1)
deleteFakeData(files)
reactor.callLater(1, node.stop)
logger.info("done cleaning up")
"""
Main currently invokes test code
"""
if __name__ == '__main__':
localhost = socket.getfqdn()
if len(sys.argv) == 1:
print "Warning: testing against self my result in timeout failures"
runTests(localhost) # test by talking to self
elif len(sys.argv) == 2:
runTests(localhost, eval(sys.argv[1])) # talk to self on port [1]
elif len(sys.argv) == 3:
runTests(sys.argv[1], eval(sys.argv[2])) # talk to [1] on port [2]
elif len(sys.argv) == 4:
# talk to [1] on port [2], listen on port [3]
runTests(sys.argv[1], eval(sys.argv[2]), eval(sys.argv[3]))
| Python |
#!/usr/bin/python
import tarfile, tempfile, random, os, sys
import gzip
from Crypto.Hash import SHA256
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from flud.fencode import fencode
import flud.TarfileUtils as TarfileUtils
def maketarball(numfiles, avgsize, hashnames=False, addmetas=False):
tarballname = tempfile.mktemp()+".tar"
tarball = tarfile.open(tarballname, 'w')
if addmetas:
metafname = tempfile.mktemp()
metaf = file(metafname, 'w')
metaf.write('m'*48)
metaf.close()
for i in xrange(numfiles):
fname = tempfile.mktemp()
f = file(fname, 'wb')
size = int(avgsize * (random.random()+0.5))
blocksize = 65*1024
if hashnames:
sha256 = SHA256.new()
for j in range(0, size, blocksize):
if j+blocksize > size:
block = 'a'*(size-j)
else:
block = 'a'*blocksize
if hashnames:
sha256.update(block)
f.write(block)
f.close()
arcname = fname
if hashnames:
arcname = fencode(int(sha256.hexdigest(),16))
tarball.add(fname, arcname)
if addmetas:
tarball.add(metafname, arcname+".343434.meta")
os.remove(fname)
if addmetas:
os.remove(metafname)
contents = tarball.getnames()
tarball.close()
return tarballname, contents
def gzipTarball(tarball):
f = gzip.GzipFile(tarball+".gz", 'wb')
f.write(file(tarball, 'rb').read())
f.close()
os.remove(tarball)
return tarball+".gz"
def main():
# test plain TarfileUtils.delete()
(tballname, contents) = maketarball(5, 4096)
TarfileUtils.delete(tballname, contents[2:4])
tarball = tarfile.open(tballname, 'r')
os.remove(tballname)
assert(tarball.getnames() == contents[:2]+contents[4:])
tarball.close()
# test gzip TarfileUtils.delete()
(tballname, contents) = maketarball(5, 4096)
tballname = gzipTarball(tballname)
TarfileUtils.delete(tballname, contents[2:4])
tarball = tarfile.open(tballname, 'r')
os.remove(tballname)
assert(tarball.getnames() == contents[:2]+contents[4:])
tarball.close()
# test plain TarfileUtils.concatenate()
(tballname1, contents1) = maketarball(5, 4096)
(tballname2, contents2) = maketarball(5, 4096)
TarfileUtils.concatenate(tballname1, tballname2)
assert(not os.path.exists(tballname2))
tarball = tarfile.open(tballname1, 'r')
os.remove(tballname1)
assert(tarball.getnames() == contents1+contents2)
# test TarfileUtils.concatenate(gz, plain)
(tballname1, contents1) = maketarball(5, 4096)
(tballname2, contents2) = maketarball(5, 4096)
tballname1 = gzipTarball(tballname1)
TarfileUtils.concatenate(tballname1, tballname2)
assert(not os.path.exists(tballname2))
tarball = tarfile.open(tballname1, 'r')
os.remove(tballname1)
assert(tarball.getnames() == contents1+contents2)
# test TarfileUtils.concatenate(plain, gz)
(tballname1, contents1) = maketarball(5, 4096)
(tballname2, contents2) = maketarball(5, 4096)
tballname2 = gzipTarball(tballname2)
TarfileUtils.concatenate(tballname1, tballname2)
assert(not os.path.exists(tballname2))
tarball = tarfile.open(tballname1, 'r')
os.remove(tballname1)
assert(tarball.getnames() == contents1+contents2)
# test TarfileUtils.concatenate(gz, gz)
(tballname1, contents1) = maketarball(5, 4096)
(tballname2, contents2) = maketarball(5, 4096)
tballname1 = gzipTarball(tballname1)
tballname2 = gzipTarball(tballname2)
TarfileUtils.concatenate(tballname1, tballname2)
assert(not os.path.exists(tballname2))
tarball = tarfile.open(tballname1, 'r')
os.remove(tballname1)
assert(tarball.getnames() == contents1+contents2)
# test TarfileUtils.verifyHashes(plain no meta)
(tballname, contents) = maketarball(5, 4096, True)
assert(TarfileUtils.verifyHashes(tballname, contents[2:4]))
os.remove(tballname)
# test TarfileUtils.verifyHashes(plain with meta)
(tballname, contents) = maketarball(5, 4096, True, True)
assert(TarfileUtils.verifyHashes(tballname, contents[2:4]), ".meta")
os.remove(tballname)
# test TarfileUtils.verifyHashes(gzipped no meta)
(tballname, contents) = maketarball(5, 4096, True)
tballname = gzipTarball(tballname)
assert(TarfileUtils.verifyHashes(tballname, contents[2:4]))
os.remove(tballname)
# test TarfileUtils.verifyHashes(gzipped with meta)
(tballname, contents) = maketarball(5, 4096, True, True)
tballname = gzipTarball(tballname)
assert(TarfileUtils.verifyHashes(tballname, contents[2:4]), ".meta")
os.remove(tballname)
print "all tests passed"
if __name__ == "__main__":
main()
| Python |
from distutils.core import setup, Extension
import os, sys
class AutoToolsExtension(Extension):
'''
provides simple hooks for running ./configure, make, and any pre-configure
scripts (such as ./bootstrap) necessary to build an Extension package.
'''
def __init__(self, name, sources=[], extra_objects=[], include_dirs=[],
libraries=[], library_dirs=[], language=[], pre_configure=[],
run_configure=[], run_make=[]):
for dir, cmd in pre_configure:
r = os.system('cd %s; %s' % (dir, cmd))
if r != 0: sys.exit()
for dir in run_configure:
r = os.system('cd %s; ./configure' % dir)
if r != 0: sys.exit()
for dir in run_make:
r = os.system('cd %s; make' % dir)
if r != 0: sys.exit()
Extension.__init__(self, name, sources=sources,
extra_objects=extra_objects, include_dirs=include_dirs,
libraries=libraries, library_dirs=library_dirs,
language=language)
setup(name="flud",
version="0.0.2",
description="flud decentralized backup",
long_description='a 100% decentralized backup system',
author="Alen Peacock",
author_email="apeacock@flud.org",
url='http://flud.org',
copyright='(c)2004-2007 Alen Peacock, licensed GPL v3',
license='GPL v3',
packages=['flud',
'flud.protocol',
'flud.bin',
'flud.test'],
package_dir={'flud': 'flud',
'flud.protocol': 'flud/protocol',
'flud.bin': 'flud/bin',
'flud.test': 'flud/test'},
package_data={'flud': ['images/*.png', 'FludNode.tac']},
scripts = ['flud/bin/fludnode',
'flud/bin/tacpath-flud',
'flud/bin/fludscheduler',
'flud/bin/fludclient',
'flud/bin/fludlocalclient',
'flud/bin/flud-mastermetadataViewer',
'flud/bin/flud-metadataViewer',
'flud/bin/start-fludnodes',
'flud/bin/stop-fludnodes',
'flud/bin/gauges-fludnodes',
'flud/bin/clean-fludnodes'],
ext_modules=[AutoToolsExtension('filecoder',
pre_configure = [('flud/coding', './bootstrap')],
run_configure = ['flud/coding'],
run_make = ['flud/coding/ldpc', 'flud/coding'],
sources = ['flud/coding/filecodermodule.cpp'],
extra_objects = ['flud/coding/CodedBlocks.o', 'flud/coding/Coder.o',
'flud/coding/Decoder.o'],
include_dirs = ['flud/coding/ldpc/src', 'flud/coding'],
libraries = ['ldpc', 'stdc++'],
library_dirs = ['flud/coding/ldpc/bin/linux'],
language = ['c++'])]
)
| Python |
"""
FludCrypto.py (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), version 3.
Provides FludRSA (an enhanced RSA.RSAobj), as well as convenience functions
for creating hashes, finding hash collisions, etc.
"""
import binascii
import operator
import struct
import time
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA, pubkey
from Crypto.Util.randpool import RandomPool
class FludRSA(RSA.RSAobj):
"""
Subclasses the Crypto.PublicKey.RSAobj object to add access to the
privatekey as well as methods for exporting and importing an RSA obj.
"""
rand = RandomPool()
def __init__(self, rsa):
self.__setstate__(rsa.__getstate__())
def publickey(self):
return FludRSA(RSA.construct((self.n, self.e)))
def privatekey(self):
return FludRSA(RSA.construct((self.n, self.e, self.d)))
def encrypt(self, message):
return RSA.RSAobj.encrypt(self, message, "")
def exportPublicKey(self):
return self.publickey().__getstate__()
def exportPrivateKey(self):
return self.privatekey().__getstate__()
def id(self):
"""
returns the hashstring of the public key
"""
#return hashstring(str(self.exportPublicKey()))
return hashstring(str(self.exportPublicKey()['n']))
def importPublicKey(key):
"""
Can take, as key, a dict describing the public key ('e' and 'n'), a
string describing n, or a long describing n (in the latter two cases, e
is assumed to be 65537L).
"""
if isinstance(key, str):
key = long(key, 16)
key = {'e': 65537L, 'n': key}
elif isinstance(key, long):
key = {'e': 65537L, 'n': key}
if isinstance(key, dict):
state = key
pkey = RSA.construct((0L,0L))
pkey.__setstate__(state)
return FludRSA(pkey)
else:
raise TypeError("type %s not supported by importPublicKey():"\
" try dict with keys of 'e' and 'n', string representing"\
" 'n', or long representing 'n'." % type(key))
importPublicKey = staticmethod(importPublicKey)
def importPrivateKey(key):
state = key
pkey = RSA.construct((0L,0L,0L))
pkey.__setstate__(state)
return FludRSA(pkey)
importPrivateKey = staticmethod(importPrivateKey)
def generate(keylength):
return FludRSA(RSA.generate(keylength, FludRSA.rand.get_bytes))
generate = staticmethod(generate)
def generateKeys(len=1024):
fludkey = FludRSA.generate(len)
return fludkey.publickey(), fludkey.privatekey()
def hashstring(string):
sha256 = SHA256.new()
sha256.update(string)
return sha256.hexdigest()
def hashfile(filename):
sha256 = SHA256.new()
f = open(filename, "r")
while 1:
buf = f.read(1048576) # XXX: 1Mb - magic number
if buf == "":
break
sha256.update(buf)
f.close()
return sha256.hexdigest()
def hashstream(file, len):
sha256 = SHA256.new()
readsize = 1048576 # XXX: 1Mb - magic number
while len > 0:
if len < readsize:
readsize = len
buf = file.read(readsize)
if buf == "":
break
sha256.update(buf)
len = len - readsize
return sha256.hexdigest()
def generateRandom(n):
rand = RandomPool() # using seperate instance of RandomPool purposely
return rand.get_bytes(n)
def hashcash(match, len, timestamp=False):
""" trys to find a hash collision of len significant bits. Returns
the 256-bit string that produced the collision. Uses sha256, so match
should be a sha256 hashstring (as a hexstring), and len should be between
0 and 256 (lengths close to 256 are intractable). The timestamp field
determines whether the current timestamp should be inserted into the
pre-hash result (to stem sybil attacks targetting specific IDs).
The result is hex-encoded, so to arrive at the matching hashvalue, you
would hashstring(binascii.unhexlify(result)).
"""
matchint = long(match,16)
len = 2**(256-len)
if date:
gtime = struct.pack("I",int(time.time()))
while True:
attempt = generateRandom(32) # 32 random bytes = 256 random bits
if date:
# rewrite the 2 lsBs of attempt with the 2 msBs of gtime (time
# granularity is thus 65536 seconds, or just over 18 hours between
# intervals -- more than enough for a refresh monthly, weekly, or
# even daily value)
attempt = attempt[0:30]+gtime[2:4]
attempthash = hashstring(attempt)
attemptint = long(attempthash,16)
distance = operator.xor(matchint, attemptint)
if distance < len:
break
return binascii.hexlify(attempt)
# XXX: should move all testing to doctest
if __name__ == '__main__':
fludkey = FludRSA.generate(1024)
print "fludkey (pub) is: "+str(fludkey.exportPublicKey())
print "fludkey (priv) is: "+str(fludkey.exportPrivateKey())
print ""
pubkeystring = fludkey.exportPublicKey()
pubkeylongn = pubkeystring['n']
pubkeystringn = hex(pubkeystring['n'])
privkeystring = fludkey.exportPrivateKey()
fludkeyPub = FludRSA.importPublicKey(pubkeystring)
print "fludkeyPub is: "+str(fludkeyPub.exportPublicKey())
fludkeyPub2 = FludRSA.importPublicKey(pubkeystringn)
print "fludkeyPub2 is: "+str(fludkeyPub2.exportPublicKey())
fludkeyPub3 = FludRSA.importPublicKey(pubkeylongn)
print "fludkeyPub3 is: "+str(fludkeyPub3.exportPublicKey())
fludkeyPriv = FludRSA.importPrivateKey(privkeystring)
print "fludkeyPriv is: "+str(fludkeyPriv.exportPrivateKey())
plaintext = "test message"
print "plaintext is: "+plaintext
ciphertext = fludkeyPub.encrypt(plaintext)
print "ciphertext is: "+str(ciphertext)
plaintext2 = fludkeyPriv.decrypt(ciphertext)
print "decrypted plaintext is: "+plaintext2
randstring = str(generateRandom(80))
print "80 bytes of random data: '"+binascii.hexlify(randstring)
data1='\x00\x1e4%`K\xef\xf6\xdd\x8a\x0eUP\x7f\xb0G\x1d\xb9\xe4\x82\x11n\n\xff\x1a\xc9\x013\xe9\x8e\x99\xb0]M@y\x86l\xb3l'
edata1=fludkeyPub.encrypt(data1)[0]
data2=fludkeyPriv.decrypt(edata1)
print binascii.hexlify(data1)
print binascii.hexlify(data2)
print data1 == data2
| Python |
"""
FludFileCoder.py (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), version 3.
Provides wrapper functionality around ldpc python module, for encoding and
decoding files.
"""
from filecoder import c_Coder, c_Decoder
class Coder:
def __init__(self, dataBlocks, parityBlocks, leftDegree):
self.c_coder = c_Coder(dataBlocks, parityBlocks, leftDegree)
def codeData(self, filename, stem):
return self.c_coder.codeData(filename, stem)
class Decoder:
def __init__(self, destFile, dataBlocks, parityBlocks, leftDegree):
self.c_decoder = c_Decoder(destFile, dataBlocks, parityBlocks,
leftDegree)
def done(self):
return self.c_decoder.done()
def decodeData(self, filename):
return self.c_decoder.decodeData(filename)
| Python |
"""
FludServer.py (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), version 3.
flud server operations
"""
import threading, binascii, time, os, stat, httplib, gc, re, sys, logging, sets
from twisted.web import server, resource, client
from twisted.web.resource import Resource
from twisted.internet import reactor, threads, defer
from twisted.web import http
from twisted.python import threadable, failure
from flud.FludCrypto import FludRSA
import flud.FludkRouting
from ServerPrimitives import *
from ServerDHTPrimitives import *
from LocalPrimitives import *
from FludCommUtil import *
threadable.init()
class FludServer(threading.Thread):
"""
This class runs the webserver, responding to all requests.
"""
def __init__(self, node, port):
threading.Thread.__init__(self)
self.port = port
self.node = node
self.clientport = node.config.clientport
self.logger = node.logger
self.root = ROOT(self)
commandmap = node.config.commandmap
self.root.putChild(commandmap['ID'], ID(self))
self.root.putChild(commandmap['STORE'], STORE(self))
self.root.putChild(commandmap['RETRIEVE'], RETRIEVE(self))
self.root.putChild(commandmap['VERIFY'], VERIFY(self))
self.root.putChild(commandmap['PROXY'], PROXY(self))
self.root.putChild(commandmap['DELETE'], DELETE(self))
self.root.putChild(commandmap['kFINDNODE'], kFINDNODE(self))
self.root.putChild(commandmap['kFINDVAL'], kFINDVAL(self))
self.root.putChild(commandmap['kSTORE'], kSTORE(self))
self.site = server.Site(self.root)
reactor.listenTCP(self.port, self.site)
reactor.listenTCP(self.clientport, LocalFactory(node),
interface="127.0.0.1")
#print "FludServer will listen on port %d, local client on %d"\
# % (self.port, self.clientport)
self.logger.log(logging.INFO,\
"FludServer will listen on port %d, local client on %d"
% (self.port, self.clientport))
def run(self):
self.logger.log(logging.INFO, "FludServer starting")
return reactor.run(installSignalHandlers=0)
def stop(self):
self.logger.log(logging.INFO, "FludServer stopping")
reactor.stop()
| Python |
"""
LocalPrimitives.py (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
Protocol for talking to a flud node locally (from client code -- command line,
GUI, etc).
Each command in the local protocol begins with 4 bytes designating the type of
command. These are followed by a status byte, which is either '?'=request,
':'=success response, or '!'=failure response. Arguments to the command follow
the status byte.
"""
import binascii, time, os, stat, httplib, gc, re, sys, logging, sets
from twisted.web.resource import Resource
from twisted.web import server, resource, client
from twisted.internet import protocol, reactor, threads, defer
from twisted.protocols import basic
from twisted.mail import smtp
from twisted.python import failure
from Crypto.Cipher import AES
from flud.FludCrypto import FludRSA, hashstring, generateRandom
import flud.FludkRouting
from flud.fencode import fencode, fdecode
from flud.FludFileOperations import *
from FludCommUtil import *
from FludServer import *
logger = logging.getLogger("flud.local.server")
"""
Protocol and Factory for local client/server communication
"""
MAXCONCURRENT = 300
(CONCURR, MAX, QUEUE) = (0, 1, 2) # indexes into LocalProtocol.commands
class LocalProtocol(basic.LineReceiver):
authenticated = False
commands = {'PUTF': [0, MAXCONCURRENT, []], 'GETF': [0, MAXCONCURRENT, []],
'GETI': [0, MAXCONCURRENT, []], 'FNDN': [0, 1, []],
'STOR': [0, MAXCONCURRENT, []], 'RTRV': [0, MAXCONCURRENT, []],
'VRFY': [0, MAXCONCURRENT, []], 'FNDV': [0, 1, []],
'CRED': [0, 1, []], 'LIST': [0, 1, []], 'GETM': [0, 1, []],
'PUTM': [0, 1, []] }
def connectionMade(self):
logger.info("client connected")
self.authenticated=False
def connectionLost(self, reason):
self.authenticated=False
def doOp(self, command, fname):
#print "got command '%s'" % command
if command == "PUTF":
logger.debug("PUTF %s", fname);
return StoreFile(self.factory.node, fname).deferred
elif command == "GETI":
logger.debug("GETI %s", fname);
return RetrieveFile(self.factory.node, fname).deferred
elif command == "GETF":
logger.debug("GETF %s", fname);
return RetrieveFilename(self.factory.node, fname).deferred
elif command == "FNDN":
logger.debug("FNDN %s" % fname);
try:
intval = long(fname, 16)
except:
return defer.fail("fname was not hex")
return self.factory.node.client.kFindNode(intval)
# The following is for testing aggregation of kFindNode on same key
#dl = []
#for i in [1,2,3,4,5]:
# d = self.factory.node.client.kFindNode(intval)
# dl.append(d)
#dlist = defer.DeferredList(dl)
#return dlist
elif command == "FNDV":
logger.debug("FNDV %s", fname);
try:
intval = long(fname, 16)
except:
return defer.fail("fname was not hex")
return self.factory.node.client.kFindValue(intval)
elif command == "CRED":
passphrase, email = fdecode(fname)
# XXX: allow an optional passphrase hint to be sent in email.
passphrase = self.factory.node.config.Kr.decrypt(passphrase)
logger.debug("CRED %s to %s", passphrase, email);
Kr = self.factory.node.config.Kr.exportPrivateKey()
Kr['g'] = self.factory.node.config.groupIDr
fKr = fencode(Kr)
key = AES.new(binascii.unhexlify(hashstring(passphrase)))
fKr = '\x00'*(16-(len(fKr)%16))+fKr
efKr = fencode(key.encrypt(fKr))
logger.debug("efKr = %s " % efKr)
d = smtp.sendmail('localhost', "your_flud_client@localhost",
email,
"Subject: Your encrypted flud credentials\n\n"
"Hopefully, you'll never need to use this email. Its "
"sole purpose is to help you recover your data after a "
"catastrophic and complete loss of the original computer "
"or hard drive.\n\n"
"In that unlucky event, you'll need a copy of your flud "
"credentials, which I've included below, sitting between "
"the \"---+++---\" markers. These credentials were "
"encrypted with a passphrase of your choosing when you "
"installed the flud software. I'll only say this "
"once:\n\n"
"YOU MUST REMEMBER THAT PASSWORD IN ORDER TO RECOVER YOUR "
"CREDENTIALS. If you are unable to remember the "
"passphrase and your computer fails catastrophically "
"(losing its local copy of these credentials), you will "
"not be able to recover your data."
"\n\n"
"Luckily, that's all you should ever need in order to "
"recover all your data: your passphrase and these "
"credentials."
"\n\n"
"Please save this email. You may want to print out hard "
"copies and store them safely, forward this email to "
"other email accounts, etc. Since the credentials are "
"encrypted, others won't be able to steal them "
"without guessing your passphrase. "
"\n\n"
"---+++---\n"+efKr+"\n---+++---\n")
return d
# to decode this email, we search for the '---+++---' markers, make
# sure the intervening data is all in one piece (remove any line
# breaks \r or \n inserted by email clients) and call this 'cred',
# reconstruct the AES key with the H(passphrase) (as above), and
# then use the key to .decrypt(fdecode(cred)) and call this dcred,
# then fdecode(dcred[dcred.find('d'):]) and call this ddcred, and
# finally importPrivateKey(ddcred) and set groupIDr to ddcred['g'].
elif command == "LIST":
logger.debug("LIST")
return defer.succeed(self.factory.config.master)
elif command == "GETM":
logger.debug("GETM")
return RetrieveMasterIndex(self.factory.node).deferred
elif command == "PUTM":
logger.debug("PUTM")
return UpdateMasterIndex(self.factory.node).deferred
else:
#print "fname is '%s'" % fname
host = fname[:fname.find(':')]
port = fname[fname.find(':')+1:fname.find(',')]
fname = fname[fname.find(',')+1:]
print "%s: %s : %s , %s" % (command, host, port, fname)
if command == "STOR":
logger.debug("STOR");
return self.factory.node.client.sendStore(fname, None,
host, int(port))
elif command == "RTRV":
logger.debug("RTRV");
return self.factory.node.client.sendRetrieve(fname, host,
int(port))
elif command == "VRFY":
logger.debug("VRFY");
offset = port[port.find(':')+1:port.find('-')]
length = port[port.find('-')+1:]
port = port[:port.find(':')]
print "%s: %s : %s %s - %s , %s" % (command, host, port,
offset, length, fname)
return self.factory.node.client.sendVerify(fname, int(offset),
int(length), host, int(port))
else:
logger.debug("bad op");
return defer.fail("bad op")
def serviceQueue(self, command):
if len(self.commands[command][QUEUE]) > 0 and \
self.commands[command][CONCURR] <= self.commands[command][MAX]:
data = self.commands[command][QUEUE].pop()
logger.info("servicing queue['%s'], item %s" % (command, data))
print "taking %s off the queue" % command
d = self.doOp(command, data)
d.addCallback(self.sendSuccess, command, data)
d.addErrback(self.sendFailure, command, data)
def sendSuccess(self, resp, command, data, prepend=None):
logger.debug("SUCCESS! "+command+":"+data)
#logger.debug("response: '%s'" % (resp,))
if prepend:
w = "%s:%s %s:%s\r\n" % (prepend, command, fencode(resp), data)
else:
w = "%s:%s:%s\r\n" % (command, fencode(resp), data)
self.transport.write(w)
self.commands[command][CONCURR] -= 1
try:
self.serviceQueue(command)
except:
print sys.exec_info()
return resp
def sendFailure(self, err, command, data, prepend=None):
logger.debug("FAILED! %s!%s" % (command, data))
errmsg = err.getErrorMessage()
if prepend:
w = "%s!%s %s!%s\r\n" % (prepend, command, errmsg, data)
else:
w = "%s!%s!%s\r\n" % (command, errmsg, data)
logger.debug("sending %s" % w)
self.transport.write(w)
self.commands[command][CONCURR] -= 1
self.serviceQueue(command)
return err
def lineReceived(self, line):
logger.debug("lineReceived: '%s'" % line)
# commands: AUTH, PUTF, GETF, VRFY
# status: ? = request, : = successful response, ! = failed response
command = line[0:4]
status = line[4]
data = line[5:]
#print "data is '%s'" % data
if not self.authenticated and command == "AUTH":
if status == '?':
# asked for AUTH challenge to be sent. send it
logger.debug("AUTH challenge requested, sending")
echallenge = self.factory.sendChallenge()
self.transport.write("AUTH?"+echallenge+"\r\n")
elif status == ':' and self.factory.challengeAnswered(data):
# sent AUTH response and it passed
logger.debug("AUTH challenge successful")
self.authenticated = True
self.transport.write("AUTH:\r\n")
elif status == ':':
logger.debug("AUTH challenge failed")
self.transport.write("AUTH!\r\n")
elif command == "DIAG":
if data == "NODE":
logger.debug("DIAG NODE")
self.transport.write("DIAG:NODE%s\r\n" %
fencode(
self.factory.config.routing.knownExternalNodes()))
elif data == "BKTS":
logger.debug("DIAG BKTS")
bucks = eval("%s" % self.factory.config.routing.kBuckets)
self.transport.write("DIAG:BKTS%s\r\n" % fencode(bucks))
else:
dcommand = data[:4]
ddata = data[5:]
logger.debug("DIAG %s %s" % (dcommand, ddata))
self.commands[dcommand][CONCURR] += 1
d = self.doOp(dcommand, ddata)
d.addCallback(self.sendSuccess, dcommand, ddata, "DIAG")
d.addErrback(self.sendFailure, dcommand, ddata, "DIAG")
elif status == '?':
# requested an operation to be performed. If we are below our
# maximum concurrent ops, do the operation. Otherwise, put it on
# the queue to be serviced when current ops finish. Response is
# sent back to client when deferreds fire.
if self.commands[command][CONCURR] >= self.commands[command][MAX]:
#print "putting %s on the queue" % line
logger.info("received %s request, enqueuing" % command)
self.commands[command][QUEUE].insert(0, data)
else:
#print "doing %s" % line
logger.info("received %s request, executing" % command)
print self.commands[command]
self.commands[command][CONCURR] += 1
d = self.doOp(command, data)
d.addCallback(self.sendSuccess, command, data)
d.addErrback(self.sendFailure, command, data)
class LocalFactory(protocol.ServerFactory):
protocol = LocalProtocol
def __init__(self, node):
self.node = node
self.config = node.config
def sendChallenge(self):
self.challenge = fencode(generateRandom(challengelength))
echallenge = self.config.Ku.encrypt(self.challenge)[0]
echallenge = fencode(echallenge)
return echallenge
def challengeAnswered(self, resp):
return resp == self.challenge
| Python |
#!/usr/bin/python
"""
LocalClient.py (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
LocalClient provides client functions which can be called to send commands to
a local FludNode instance.
"""
import sys, os, time
from twisted.internet.protocol import ClientFactory
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor
from twisted.python import threadable
threadable.init()
from flud.fencode import fencode, fdecode
from LocalPrimitives import *
logger = logging.getLogger("flud.local.client")
opTimeout = 1200
VALIDOPS = LocalProtocol.commands.keys() + ['AUTH', 'DIAG']
# XXX: print commands should either be raised, or put on factory.msgs
class LocalClient(LineReceiver):
MAX_LENGTH = 300000
auth=False
def connectionMade(self):
logger.debug("connection est.")
self.auth=False
self.sendLine("AUTH?")
def lineReceived(self, line):
logger.debug("received line '%s'" % line)
command = line[0:4]
if not command in VALIDOPS:
print "error: invalid command op ('%s')-- "\
" are you trying to connect to the wrong port"\
" (local client port is usually external port + 500)?"\
% command
return None
status = line[4]
data = line[5:]
if not self.auth:
if command == "AUTH" and status == '?':
# got challenge, send response
logger.debug("got AUTH challenge, sending response")
echallenge = data
self.sendLine("AUTH:"+self.factory.answerChallenge(echallenge))
return
elif command == "AUTH" and status == ':':
# response accepted, authenticated
logger.debug("AUTH challenge accepted, success")
self.auth = True
self.factory.clientReady(self)
#print "authenticated"
else:
if command == "AUTH" and status == "!":
logger.warn("authentication failed (is FLUDHOME set"
" correctly?)")
print "authentication failed (is FLUDHOME set correctly?)"
else:
logger.warn("unknown message received before being"
" authenticated:")
logger.warn(" %s : %s" % (command, status))
print "unknown message received before being authenticated:"
print " %s : %s" % (command, status)
self.factory.setDie()
elif command == "DIAG":
subcommand = data[:4]
data = data[4:]
if subcommand == "NODE":
logger.debug("DIAG NODE: %s" % data)
data = fdecode(data)
result = ""
for i in data:
petID = "%064x" % i[2]
netID = "%s:%d" % (i[0], i[1])
petID = petID[:(75-len(netID))]+"..."
result += "%s %s\n" % (netID, petID)
result += "%d known nodes\n" % len(data)
d = self.factory.pending['NODE'].pop('')
d.callback(result)
return
if subcommand == "BKTS":
logger.debug("DIAG BKTS")
data = fdecode(data)
result = ""
for i in data:
for bucket in i:
result += "Bucket %s:\n" % bucket
for k in i[bucket]:
id = "%064x" % k[2]
netID = "%s:%d" % (k[0], k[1])
result += " %s %s...\n" \
% (netID,id[:72-len(netID)])
d = self.factory.pending['BKTS'].pop('')
d.callback(result)
return
elif status == ':':
response, data = data.split(status, 1)
logger.debug("DIAG %s: success" % subcommand)
d = self.factory.pending[subcommand].pop(data)
d.callback(fdecode(response))
elif status == "!":
response, data = data.split(status, 1)
logger.debug("DIAG %s: failure" % subcommand)
d = self.factory.pending[subcommand].pop(data)
d.errback(failure.DefaultException(response))
elif status == ':':
response, data = data.split(status, 1)
logger.debug("%s: success" % command)
d = self.factory.pending[command].pop(data)
d.callback(fdecode(response))
elif status == "!":
response, data = data.split(status, 1)
logger.debug("%s: failure" % command)
if self.factory.pending.has_key(command):
if not self.factory.pending[command].has_key(data):
print "data key is '%s'" % data
print "pending is '%s'" % self.factory.pending[command]
if len(self.factory.pending[command]):
d = self.factory.pending[command].popitem()
d.errback(failure.DefaultException(response))
else:
d = self.factory.pending[command].pop(data)
d.errback(failure.DefaultException(response))
else:
print "failed command '%s' not in pending?" % command
print "pending is: %s" % self.factory.pending
if command != 'AUTH' and command != 'DIAG' and \
not None in self.factory.pending[command].values():
logger.debug("%s done at %s" % (command, time.ctime()))
class LocalClientFactory(ClientFactory):
protocol = LocalClient
def __init__(self, config):
self.config = config
self.messageQueue = []
self.client = None
self.die = False
self.pending = {'PUTF': {}, 'CRED': {}, 'GETI': {}, 'GETF': {},
'FNDN': {}, 'STOR': {}, 'RTRV': {}, 'VRFY': {}, 'FNDV': {},
'CRED': {}, 'LIST': {}, 'GETM': {}, 'PUTM': {}, 'NODE': {},
'BKTS': {}}
def clientConnectionFailed(self, connector, reason):
#print "connection failed: %s" % reason
logger.warn("connection failed: %s" % reason)
self.cleanup("connection failed: %s" % reason)
def clientConnectionLost(self, connector, reason):
#print "connection lost: %s" % reason
logger.debug("connection lost: %s" % reason)
self.cleanup("connection lost: %s" % reason)
def cleanup(self, msg):
# override me for cleanup
print msg;
def clientReady(self, instance):
self.client = instance
logger.debug("client ready, sending [any] queued msgs")
for i in self.messageQueue:
self._sendMessage(i)
def _sendMessage(self, msg):
if self.client:
logger.debug("sending msg '%s'" % msg)
self.client.sendLine(msg)
else:
logger.debug("queueing msg '%s'" % msg)
self.messageQueue.append(msg)
def answerChallenge(self, echallenge):
logger.debug("answering challenge")
echallenge = (fdecode(echallenge),)
challenge = self.config.Kr.decrypt(echallenge)
return challenge
def expire(self, pending, key):
if pending.has_key(fname):
logger.debug("timing out operation for %s" % key)
#print "timing out operation for %s" % key
pending.pop(key)
def addFile(self, type, fname):
logger.debug("addFile %s %s" % (type, fname))
if not self.pending[type].has_key(fname):
d = defer.Deferred()
self.pending[type][fname] = d
self._sendMessage(type+"?"+fname)
return d
else:
return self.pending[type][fname]
def sendPING(self, host, port):
logger.debug("sendPING")
d = defer.Deferred()
d.errback(failure.DefaultException(
"ping not yet implemented in FludLocalClient"))
return d
def sendPUTF(self, fname):
logger.debug("sendPUTF %s" % fname)
if os.path.isdir(fname):
dirlist = os.listdir(fname)
dlist = []
for i in dirlist:
dlist.append(self.sendPUTF(os.path.join(fname,i)))
dl = defer.DeferredList(dlist)
return dl
elif not self.pending['PUTF'].has_key(fname):
d = defer.Deferred()
self.pending['PUTF'][fname] = d
self._sendMessage("PUTF?"+fname)
#reactor.callLater(opTimeout, self.expire, self.pendingPUTF, fname)
return d
def sendCRED(self, passphrase, email):
logger.debug("sendCRED")
key = fencode((self.config.Ku.encrypt(passphrase)[0], email))
if not self.pending['CRED'].has_key(key):
d = defer.Deferred()
self.pending['CRED'][key] = d
self._sendMessage("CRED?"+key)
return d
else:
return self.pending['CRED'][key]
def sendGETI(self, fID):
logger.debug("sendGETI")
if not self.pending['GETI'].has_key(fID):
d = defer.Deferred()
self.pending['GETI'][fID] = d
self._sendMessage("GETI?"+fID)
return d
else:
return self.pending['GETI'][fID]
def sendGETF(self, fname):
logger.debug("sendGETF")
master = listMeta(self.config)
if master.has_key(fname):
return self.addFile("GETF",fname)
elif fname[-1:] == os.path.sep:
dlist = []
for name in master:
if fname == name[:len(fname)]:
dlist.append(self.addFile("GETF",name))
dl = defer.DeferredList(dlist)
return dl
def sendFNDN(self, nID):
logger.debug("sendFNDN")
if not self.pending['FNDN'].has_key(nID):
d = defer.Deferred()
self.pending['FNDN'][nID] = d
self._sendMessage("FNDN?"+nID)
return d
else:
return self.pending['FNDN'][nID]
def sendLIST(self):
logger.debug("sendLIST")
if not self.pending['LIST'].has_key(""):
d = defer.Deferred()
self.pending['LIST'][''] = d
logger.debug("LIST['']=%s" % d)
self._sendMessage("LIST?")
return d
else:
return self.pending['LIST']['']
def sendGETM(self):
logger.debug("sendGETM")
if not self.pending['GETM'].has_key(''):
d = defer.Deferred()
self.pending['GETM'][''] = d
logger.debug("GETM['']=%s" % d)
self._sendMessage("GETM?")
return d
else:
return self.pending['GETM']['']
def sendPUTM(self):
logger.debug("sendPUTM")
if not self.pending['PUTM'].has_key(''):
d = defer.Deferred()
self.pending['PUTM'][''] = d
self._sendMessage("PUTM?")
return d
else:
return self.pending['PUTM']['']
def sendDIAGNODE(self):
logger.debug("sendDIAGNODE")
if not self.pending['NODE'].has_key(''):
d = defer.Deferred()
self.pending['NODE'][''] = d
self._sendMessage("DIAG?NODE")
return d
else:
return self.pending['NODE']['']
def sendDIAGBKTS(self):
logger.debug("sendDIAGBKTS")
if not self.pending['BKTS'].has_key(''):
d = defer.Deferred()
self.pending['BKTS'][''] = d
self._sendMessage("DIAG?BKTS")
return d
else:
return self.pending['BKTS']['']
def sendDIAGSTOR(self, command):
logger.debug("sendDIAGSTOR")
if not self.pending['STOR'].has_key(command):
d = defer.Deferred()
self.pending['STOR'][command] = d
self._sendMessage("DIAG?STOR "+command)
return d
else:
return self.pending['STOR'][command]
def sendDIAGRTRV(self, command):
logger.debug("sendDIAGRTRV")
if not self.pending['RTRV'].has_key(command):
d = defer.Deferred()
self.pending['RTRV'][command] = d
self._sendMessage("DIAG?RTRV "+command)
return d
else:
return self.pending['RTRV'][command]
def sendDIAGVRFY(self, command):
logger.debug("sendDIAGVRFY")
if not self.pending['VRFY'].has_key(command):
d = defer.Deferred()
self.pending['VRFY'][command] = d
self._sendMessage("DIAG?VRFY "+command)
return d
else:
return self.pending['VRFY'][command]
def sendDIAGFNDV(self, val):
logger.debug("sendDIAGFNDV")
if not self.pending['FNDV'].has_key(val):
d = defer.Deferred()
self.pending['FNDV'][val] = d
self._sendMessage("FNDV?"+val)
return d
else:
return self.pending['FNDV'][val]
def setDie(self):
self.die = True
# XXX: this should move into FludNode side of things (LocalClientPrimitives).
# anything that calls this should make calls ('LIST', others as necessary) to
# get at master metadata, otherwise we could have multiple writer problems.
# FludNode should make the file ro while running, too.
# And everyone that does anything with the master metadata should do it through
# methods of FludConfig, instead of by direct access to the file.
def listMeta(config):
fmaster = open(os.path.join(config.metadir,config.metamaster), 'r')
master = fmaster.read()
fmaster.close()
if master == "":
master = {}
else:
master = fdecode(master)
return master
| Python |
"""
ServerDHTPrimitives.py (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
Primitive server DHT protocol
"""
import binascii, time, os, stat, httplib, gc, re, sys, logging, random, sets
from twisted.web.resource import Resource
from twisted.web import server, resource, http, client
from twisted.internet import reactor, defer
from twisted.python import failure
from flud.FludCrypto import FludRSA
from flud.fencode import fencode, fdecode
from ServerPrimitives import ROOT
from FludCommUtil import *
logger = logging.getLogger("flud.server.dht")
# XXX: move kRouting.insertNode code out of FludConfig. Add a 'addNode' method
# to FludProtocol module which calls config.addNode, calls
# kRouting.insertNode and if it gets a return value, calls sendGetID with
# the callback doing nothing (unless the header comes back in error) and
# the errback calling kRouting.replaceNode. Anywhere that we are
# currently called config.addNode, call this new method instead.
# FUTURE: check flud protocol version for backwards compatibility
# XXX: need to make sure we have appropriate timeouts for all comms.
# FUTURE: DOS attacks. For now, assume that network hardware can filter these
# out (by throttling individual IPs) -- i.e., it isn't our problem. If we
# want to defend against this at some point, we need to keep track of who
# is generating requests and then ignore them.
# XXX: find everywhere we are sending longs and consider sending hex (or our
# own base-64) encoded instead
# XXX: might want to consider some self-healing for the kademlia layer, as
# outlined by this thread:
# http://zgp.org/pipermail/p2p-hackers/2003-August/001348.html (should also
# consider Zooko's links in the parent to this post)
"""
The children of ROOT beginning with 'k' are kademlia protocol based.
"""
# XXX: need to do all the challenge/response jazz in the k classes
class kFINDNODE(ROOT):
isLeaf = True
def render_GET(self, request):
"""
Return the k closest nodes to the target ID from local k-routing table
"""
self.setHeaders(request)
self.node.DHTtstamp = time.time()
try:
required = ('nodeID', 'Ku_e', 'Ku_n', 'port', 'key')
params = requireParams(request, required)
except Exception, inst:
msg = inst.args[0] + " in request received by kFINDNODE"
logger.info(msg)
request.setResponseCode(http.BAD_REQUEST, "Bad Request")
return msg
else:
logger.info("received kFINDNODE request from %s..."
% params['nodeID'][:10])
reqKu = {}
reqKu['e'] = long(params['Ku_e'])
reqKu['n'] = long(params['Ku_n'])
reqKu = FludRSA.importPublicKey(reqKu)
host = getCanonicalIP(request.getClientIP())
#return "{'id': '%s', 'k': %s}"\
# % (self.config.nodeID,\
# self.config.routing.findNode(fdecode(params['key'])))
kclosest = self.config.routing.findNode(fdecode(params['key']))
notclose = list(set(self.config.routing.knownExternalNodes())
- set(kclosest))
if len(notclose) > 0 and len(kclosest) > 1:
r = random.choice(notclose)
#logger.info("**** got some notclose: %s:%d ****" % (r[0],r[1]))
kclosest.append(r)
#logger.info("returning kFINDNODE response: %s" % kclosest)
updateNode(self.node.client, self.config, host,
int(params['port']), reqKu, params['nodeID'])
return "{'id': '%s', 'k': %s}" % (self.config.nodeID, kclosest)
class kSTORE_true(ROOT):
# unrestricted kSTORE. Will store any key/value pair, as in generic
# kademlia. This should be unregistered in FludServer (can't allow
# generic stores).
isLeaf = True
def render_PUT(self, request):
self.setHeaders(request)
try:
required = ('nodeID', 'Ku_e', 'Ku_n', 'port', 'key', 'val')
params = requireParams(request, required)
except Exception, inst:
msg = inst.args[0] + " in request received by kSTORE_true"
logger.info(msg)
request.setResponseCode(http.BAD_REQUEST, "Bad Request")
return msg
else:
logger.info("received kSTORE_true request from %s..."
% params['nodeID'][:10])
reqKu = {}
reqKu['e'] = long(params['Ku_e'])
reqKu['n'] = long(params['Ku_n'])
reqKu = FludRSA.importPublicKey(reqKu)
host = getCanonicalIP(request.getClientIP())
updateNode(self.node.client, self.config, host,
int(params['port']), reqKu, params['nodeID'])
fname = self.config.kstoredir+'/'+params['key']
logger.info("storing dht data to %s" % fname)
f = open(fname, "wb")
f.write(params['val'])
f.close()
return ""
class kSTORE(ROOT):
# XXX: To prevent abuse of the DHT layer, we impose restrictions on its
# format. But format alone is not sufficient -- a malicious client
# could still format its data in a way that is allowed and gain
# arbitrary amounts of freeloading storage space in the DHT. To
# prevent this, nodes storing data in the DHT layer must also validate
# it. Validation simply requires that the blockIDs described in the
# kSTORE actually reside at a significant percentage of the hosts
# described in the kSTORE op. In other words, validation requires a
# VERIFY op for each block described in the kSTORE op. Validation can
# occur randomly sometime after a kSTORE operation, or at the time of
# the kSTORE op. The former is better, because it not only allows
# purging bad kSTOREs, but prevents them from happening in the first
# place (without significant conspiring among all participants).
# Since the originator of this request also needs to do a VERIFY,
# perhaps we can piggyback these through some means. And, since the
# kSTORE is replicated to k other nodes, each of which also should to
# a VERIFY, there are several ways to optimize this. One is for the k
# nodes to elect a single verifier, and allow the client to learn the
# result of the VERIFY op. Another is to allow each k node to do its
# own VERIFY, but stagger them in a way such that they can take the
# place of the originator's first k VERIFY ops. This could be
# coordinated or (perhaps better) allow each k node to randomly pick a
# time at which it will VERIFY, distributed over a period for which it
# is likely to cover many of the first k VERIFY ops generated by
# the originator. The random approach is nice because it is the same
# mechanism used by the k nodes to occasionally verify that the DHT
# data is valid and should not be purged.
isLeaf = True
def render_PUT(self, request):
self.setHeaders(request)
self.node.DHTtstamp = time.time()
try:
required = ('nodeID', 'Ku_e', 'Ku_n', 'port', 'key', 'val')
params = requireParams(request, required)
except Exception, inst:
msg = inst.args[0] + " in request received by kSTORE"
logger.info(msg)
request.setResponseCode(http.BAD_REQUEST, "Bad Request")
return msg
else:
logger.info("received kSTORE request from %s..."
% params['nodeID'][:10])
reqKu = {}
reqKu['e'] = long(params['Ku_e'])
reqKu['n'] = long(params['Ku_n'])
reqKu = FludRSA.importPublicKey(reqKu)
host = getCanonicalIP(request.getClientIP())
updateNode(self.node.client, self.config, host,
int(params['port']), reqKu, params['nodeID'])
fname = self.config.kstoredir+'/'+params['key']
md = fdecode(params['val'])
if not self.dataAllowed(params['key'], md, params['nodeID']):
msg = "malformed store data"
logger.info("bad data was: %s" % md)
request.setResponseCode(http.BAD_REQUEST, msg)
return msg
# XXX: see if there isn't already a 'val' for 'key' present
# - if so, compare to val. Metadata can differ. Blocks
# shouldn't. However, if blocks do differ, just add the
# new values in, up to N (3?) records per key. Flag these
# (all N) as ones we want to verify (to storer and storee).
# Expunge any blocks that fail verify, and punish storer's
# trust.
logger.info("storing dht data to %s" % fname)
if os.path.exists(fname) and isinstance(md, dict):
f = open(fname, "rb")
edata = f.read()
f.close()
md = self.mergeMetadata(md, fdecode(edata))
f = open(fname, "wb")
f.write(fencode(md))
f.close()
return "" # XXX: return a VERIFY reverse request: segname, offset
def dataAllowed(self, key, data, nodeID):
# ensures that 'data' is in [one of] the right format[s] (helps prevent
# DHT abuse)
def validValue(val):
if not isinstance(val, long) and not isinstance(val, int):
return False # not a valid key/nodeid
if val > 2**256 or val < 0: # XXX: magic 2**256, use fludkrouting
return False # not a valid key/nodeid
return True
def validMetadata(blockdata, nodeID):
# returns true if the format of data conforms to the standard for
# metadata
blocks = 0
try:
n = blockdata.pop('n')
m = blockdata.pop('m')
if n != 20 or m != 20:
# XXX: to support other than 20/20, need to constrain an
# upper bound and store multiple records with different m/n
# under the same key
return False
t = n+m
except:
return False
if not isinstance(n, int) or not isinstance(m, int):
return False
for (i, b) in blockdata:
if i > t:
return False
if not validValue(b):
#print "%s is invalid key" %i
return False
location = blockdata[(i,b)]
if isinstance(location, list):
if len(location) > 5:
#print "too many (list) nodeIDs" % j
return False
for j in location:
if not validValue(j):
#print "%s is invalid (list) nodeID" % j
return False
elif not validValue(location):
#print "%s is invalid nodeID" % location
return False
blocks += 1
if blocks != t:
return False # not the right number of blocks
blockdata['n'] = n
blockdata['m'] = m
return True
def validMasterCAS(key, data, nodeID):
# returns true if the data fits the characteristics of a master
# metadata CAS key, i.e., if key==nodeID and the data is the right
# length.
nodeID = fencode(long(nodeID,16))
if key != nodeID:
return False
# XXX: need to do challange/response on nodeID (just as in the
# regular primitives) here, or else imposters can store/replace
# this very important data!!!
# XXX: do some length stuff - should only be as long as a CAS key
return True
return (validMetadata(data, nodeID)
or validMasterCAS(key, data, nodeID))
def mergeMetadata(self, m1, m2):
# merges the data from m1 into m2. After calling, both m1 and m2
# contain the merged data.
"""
>>> a1 = {'b': {1: (1, 'a', 8), 2: (2, 'b', 8), 5: (1, 'a', 8)}}
>>> a2 = {'b': {1: (1, 'a', 8), 2: [(3, 'B', 80), (4, 'bb', 80)], 10: (10, 't', 80)}}
>>> mergeit(a1, a2)
{'b': {1: (1, 'a', 8), 2: [(3, 'B', 80), (4, 'bb', 80), (2, 'b', 8)], 10: (10, 't', 80), 5: (1, 'a', 8)}}
>>> a1 = {'b': {1: (1, 'a', 8), 2: (2, 'b', 8), 5: (1, 'a', 8)}}
>>> a2 = {'b': {1: (1, 'a', 8), 2: [(3, 'B', 80), (4, 'bb', 80)], 10: (10, 't', 80)}}
>>> mergeit(a2, a1)
{'b': {1: (1, 'a', 8), 2: [(3, 'B', 80), (4, 'bb', 80), (2, 'b', 8)], 10: (10, 't', 80), 5: (1, 'a', 8)}}
>>> a1 = {'b': {1: (1, 'a', 8), 2: [(2, 'b', 8), (7, 'r', 8)], 5: (1, 'a', 8)}}
>>> a2 = {'b': {1: (1, 'a', 8), 2: [(3, 'B', 80), (4, 'bb', 80)], 10: (10, 't', 80)}}
>>> mergeit(a2, a1)
{'b': {1: (1, 'a', 8), 2: [(2, 'b', 8), (7, 'r', 8), (3, 'B', 80), (4, 'bb', 80)], 10: (10, 't', 80), 5: (1, 'a', 8)}}
>>> a1 = {'b': {1: [(1, 'a', 8)], 2: [(2, 'b', 8), (7, 'r', 8)], 5: (1, 'a', 8)}}
>>> a2 = {'b': {1: (1, 'a', 8), 2: [(3, 'B', 80), (4, 'bb', 80)], 10: (10, 't', 80)}}
>>> mergeit(a1, a2)
{'b': {1: (1, 'a', 8), 2: [(2, 'b', 8), (7, 'r', 8), (3, 'B', 80), (4, 'bb', 80)], 10: (10, 't', 80), 5: (1, 'a', 8)}}
"""
# first merge blocks ('b' sections)
n = {}
for i in m2:
if m1.has_key(i) and m2[i] != m1[i]:
if isinstance(m1[i], list) and len(m1[i]) == 1:
m1[i] = m1[i][0] # collapse list of len 1
if isinstance(m2[i], list) and len(m2[i]) == 1:
m2[i] = m2[i][0] # collapse list of len 1
# combine
if isinstance(m1[i], list) and isinstance(m2[i], list):
n[i] = m2[i]
n[i].extend(m1[i])
elif isinstance(m2[i], list):
n[i] = m2[i]
n[i] = n[i].append(m1[i])
elif isinstance(m1[i], list):
n[i] = m1[i]
n[i] = n[i].append(m2[i])
elif m1[i] == m2[i]:
n[i] = m1[i]
else:
n[i] = [m1[i], m2[i]]
else:
n[i] = m2[i]
for i in m1:
if not n.has_key(i):
n[i] = m1[i]
# now n contains the merged blocks.
m1 = m2 = n
return m1
class kFINDVAL(ROOT):
isLeaf = True
def render_GET(self, request):
"""
Return the value, or if we don't have it, the k closest nodes to the
target ID
"""
self.setHeaders(request)
self.node.DHTtstamp = time.time()
try:
required = ('nodeID', 'Ku_e', 'Ku_n', 'port', 'key')
params = requireParams(request, required)
except Exception, inst:
msg = inst.args[0] + " in request received by kFINDVALUE"
logger.info(msg)
request.setResponseCode(http.BAD_REQUEST, "Bad Request")
return msg
else:
logger.info("received kFINDVALUE request from %s..."
% params['nodeID'][:10])
reqKu = {}
reqKu['e'] = long(params['Ku_e'])
reqKu['n'] = long(params['Ku_n'])
reqKu = FludRSA.importPublicKey(reqKu)
host = getCanonicalIP(request.getClientIP())
updateNode(self.node.client, self.config, host,
int(params['port']), reqKu, params['nodeID'])
fname = self.config.kstoredir+'/'+params['key']
if os.path.isfile(fname):
f = open(fname, "rb")
logger.info("returning data from kFINDVAL")
request.setHeader('nodeID',str(self.config.nodeID))
request.setHeader('Content-Type','application/x-flud-data')
d = fdecode(f.read())
if isinstance(d, dict) and d.has_key(params['nodeID']):
#print d
resp = {'b': d['b'], params['nodeID']: d[params['nodeID']]}
#resp = {'b': d['b']}
#if d.has_key(params['nodeID']):
# resp[params['nodeID']] = d[params['nodeID']]
else:
resp = d
request.write(fencode(resp))
f.close()
return ""
else:
# return the following if it isn't there.
logger.info("returning nodes from kFINDVAL for %s" % params['key'])
request.setHeader('Content-Type','application/x-flud-nodes')
return "{'id': '%s', 'k': %s}"\
% (self.config.nodeID,\
self.config.routing.findNode(fdecode(params['key'])))
| Python |
#!/usr/bin/python
"""
LocalClient.py (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
LocalClient provides client functions which can be called to send commands to
a local FludNode instance.
"""
import sys, os, time
from twisted.internet.protocol import ClientFactory
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor
from twisted.python import threadable
threadable.init()
from flud.fencode import fencode, fdecode
from LocalPrimitives import *
logger = logging.getLogger("flud.local.client")
opTimeout = 1200
VALIDOPS = LocalProtocol.commands.keys() + ['AUTH', 'DIAG']
# XXX: print commands should either be raised, or put on factory.msgs
class LocalClient(LineReceiver):
MAX_LENGTH = 300000
auth=False
def connectionMade(self):
logger.debug("connection est.")
self.auth=False
self.sendLine("AUTH?")
def lineReceived(self, line):
logger.debug("received line '%s'" % line)
command = line[0:4]
if not command in VALIDOPS:
print "error: invalid command op ('%s')-- "\
" are you trying to connect to the wrong port"\
" (local client port is usually external port + 500)?"\
% command
return None
status = line[4]
data = line[5:]
if not self.auth:
if command == "AUTH" and status == '?':
# got challenge, send response
logger.debug("got AUTH challenge, sending response")
echallenge = data
self.sendLine("AUTH:"+self.factory.answerChallenge(echallenge))
return
elif command == "AUTH" and status == ':':
# response accepted, authenticated
logger.debug("AUTH challenge accepted, success")
self.auth = True
self.factory.clientReady(self)
#print "authenticated"
else:
if command == "AUTH" and status == "!":
logger.warn("authentication failed (is FLUDHOME set"
" correctly?)")
print "authentication failed (is FLUDHOME set correctly?)"
else:
logger.warn("unknown message received before being"
" authenticated:")
logger.warn(" %s : %s" % (command, status))
print "unknown message received before being authenticated:"
print " %s : %s" % (command, status)
self.factory.setDie()
elif command == "DIAG":
subcommand = data[:4]
data = data[4:]
if subcommand == "NODE":
logger.debug("DIAG NODE: %s" % data)
data = fdecode(data)
result = ""
for i in data:
petID = "%064x" % i[2]
netID = "%s:%d" % (i[0], i[1])
petID = petID[:(75-len(netID))]+"..."
result += "%s %s\n" % (netID, petID)
result += "%d known nodes\n" % len(data)
d = self.factory.pending['NODE'].pop('')
d.callback(result)
return
if subcommand == "BKTS":
logger.debug("DIAG BKTS")
data = fdecode(data)
result = ""
for i in data:
for bucket in i:
result += "Bucket %s:\n" % bucket
for k in i[bucket]:
id = "%064x" % k[2]
netID = "%s:%d" % (k[0], k[1])
result += " %s %s...\n" \
% (netID,id[:72-len(netID)])
d = self.factory.pending['BKTS'].pop('')
d.callback(result)
return
elif status == ':':
response, data = data.split(status, 1)
logger.debug("DIAG %s: success" % subcommand)
d = self.factory.pending[subcommand].pop(data)
d.callback(fdecode(response))
elif status == "!":
response, data = data.split(status, 1)
logger.debug("DIAG %s: failure" % subcommand)
d = self.factory.pending[subcommand].pop(data)
d.errback(failure.DefaultException(response))
elif status == ':':
response, data = data.split(status, 1)
logger.debug("%s: success" % command)
d = self.factory.pending[command].pop(data)
d.callback(fdecode(response))
elif status == "!":
response, data = data.split(status, 1)
logger.debug("%s: failure" % command)
if self.factory.pending.has_key(command):
if not self.factory.pending[command].has_key(data):
print "data key is '%s'" % data
print "pending is '%s'" % self.factory.pending[command]
if len(self.factory.pending[command]):
d = self.factory.pending[command].popitem()
d.errback(failure.DefaultException(response))
else:
d = self.factory.pending[command].pop(data)
d.errback(failure.DefaultException(response))
else:
print "failed command '%s' not in pending?" % command
print "pending is: %s" % self.factory.pending
if command != 'AUTH' and command != 'DIAG' and \
not None in self.factory.pending[command].values():
logger.debug("%s done at %s" % (command, time.ctime()))
class LocalClientFactory(ClientFactory):
protocol = LocalClient
def __init__(self, config):
self.config = config
self.messageQueue = []
self.client = None
self.die = False
self.pending = {'PUTF': {}, 'CRED': {}, 'GETI': {}, 'GETF': {},
'FNDN': {}, 'STOR': {}, 'RTRV': {}, 'VRFY': {}, 'FNDV': {},
'CRED': {}, 'LIST': {}, 'GETM': {}, 'PUTM': {}, 'NODE': {},
'BKTS': {}}
def clientConnectionFailed(self, connector, reason):
#print "connection failed: %s" % reason
logger.warn("connection failed: %s" % reason)
self.cleanup("connection failed: %s" % reason)
def clientConnectionLost(self, connector, reason):
#print "connection lost: %s" % reason
logger.debug("connection lost: %s" % reason)
self.cleanup("connection lost: %s" % reason)
def cleanup(self, msg):
# override me for cleanup
print msg;
def clientReady(self, instance):
self.client = instance
logger.debug("client ready, sending [any] queued msgs")
for i in self.messageQueue:
self._sendMessage(i)
def _sendMessage(self, msg):
if self.client:
logger.debug("sending msg '%s'" % msg)
self.client.sendLine(msg)
else:
logger.debug("queueing msg '%s'" % msg)
self.messageQueue.append(msg)
def answerChallenge(self, echallenge):
logger.debug("answering challenge")
echallenge = (fdecode(echallenge),)
challenge = self.config.Kr.decrypt(echallenge)
return challenge
def expire(self, pending, key):
if pending.has_key(fname):
logger.debug("timing out operation for %s" % key)
#print "timing out operation for %s" % key
pending.pop(key)
def addFile(self, type, fname):
logger.debug("addFile %s %s" % (type, fname))
if not self.pending[type].has_key(fname):
d = defer.Deferred()
self.pending[type][fname] = d
self._sendMessage(type+"?"+fname)
return d
else:
return self.pending[type][fname]
def sendPING(self, host, port):
logger.debug("sendPING")
d = defer.Deferred()
d.errback(failure.DefaultException(
"ping not yet implemented in FludLocalClient"))
return d
def sendPUTF(self, fname):
logger.debug("sendPUTF %s" % fname)
if os.path.isdir(fname):
dirlist = os.listdir(fname)
dlist = []
for i in dirlist:
dlist.append(self.sendPUTF(os.path.join(fname,i)))
dl = defer.DeferredList(dlist)
return dl
elif not self.pending['PUTF'].has_key(fname):
d = defer.Deferred()
self.pending['PUTF'][fname] = d
self._sendMessage("PUTF?"+fname)
#reactor.callLater(opTimeout, self.expire, self.pendingPUTF, fname)
return d
def sendCRED(self, passphrase, email):
logger.debug("sendCRED")
key = fencode((self.config.Ku.encrypt(passphrase)[0], email))
if not self.pending['CRED'].has_key(key):
d = defer.Deferred()
self.pending['CRED'][key] = d
self._sendMessage("CRED?"+key)
return d
else:
return self.pending['CRED'][key]
def sendGETI(self, fID):
logger.debug("sendGETI")
if not self.pending['GETI'].has_key(fID):
d = defer.Deferred()
self.pending['GETI'][fID] = d
self._sendMessage("GETI?"+fID)
return d
else:
return self.pending['GETI'][fID]
def sendGETF(self, fname):
logger.debug("sendGETF")
master = listMeta(self.config)
if master.has_key(fname):
return self.addFile("GETF",fname)
elif fname[-1:] == os.path.sep:
dlist = []
for name in master:
if fname == name[:len(fname)]:
dlist.append(self.addFile("GETF",name))
dl = defer.DeferredList(dlist)
return dl
def sendFNDN(self, nID):
logger.debug("sendFNDN")
if not self.pending['FNDN'].has_key(nID):
d = defer.Deferred()
self.pending['FNDN'][nID] = d
self._sendMessage("FNDN?"+nID)
return d
else:
return self.pending['FNDN'][nID]
def sendLIST(self):
logger.debug("sendLIST")
if not self.pending['LIST'].has_key(""):
d = defer.Deferred()
self.pending['LIST'][''] = d
logger.debug("LIST['']=%s" % d)
self._sendMessage("LIST?")
return d
else:
return self.pending['LIST']['']
def sendGETM(self):
logger.debug("sendGETM")
if not self.pending['GETM'].has_key(''):
d = defer.Deferred()
self.pending['GETM'][''] = d
logger.debug("GETM['']=%s" % d)
self._sendMessage("GETM?")
return d
else:
return self.pending['GETM']['']
def sendPUTM(self):
logger.debug("sendPUTM")
if not self.pending['PUTM'].has_key(''):
d = defer.Deferred()
self.pending['PUTM'][''] = d
self._sendMessage("PUTM?")
return d
else:
return self.pending['PUTM']['']
def sendDIAGNODE(self):
logger.debug("sendDIAGNODE")
if not self.pending['NODE'].has_key(''):
d = defer.Deferred()
self.pending['NODE'][''] = d
self._sendMessage("DIAG?NODE")
return d
else:
return self.pending['NODE']['']
def sendDIAGBKTS(self):
logger.debug("sendDIAGBKTS")
if not self.pending['BKTS'].has_key(''):
d = defer.Deferred()
self.pending['BKTS'][''] = d
self._sendMessage("DIAG?BKTS")
return d
else:
return self.pending['BKTS']['']
def sendDIAGSTOR(self, command):
logger.debug("sendDIAGSTOR")
if not self.pending['STOR'].has_key(command):
d = defer.Deferred()
self.pending['STOR'][command] = d
self._sendMessage("DIAG?STOR "+command)
return d
else:
return self.pending['STOR'][command]
def sendDIAGRTRV(self, command):
logger.debug("sendDIAGRTRV")
if not self.pending['RTRV'].has_key(command):
d = defer.Deferred()
self.pending['RTRV'][command] = d
self._sendMessage("DIAG?RTRV "+command)
return d
else:
return self.pending['RTRV'][command]
def sendDIAGVRFY(self, command):
logger.debug("sendDIAGVRFY")
if not self.pending['VRFY'].has_key(command):
d = defer.Deferred()
self.pending['VRFY'][command] = d
self._sendMessage("DIAG?VRFY "+command)
return d
else:
return self.pending['VRFY'][command]
def sendDIAGFNDV(self, val):
logger.debug("sendDIAGFNDV")
if not self.pending['FNDV'].has_key(val):
d = defer.Deferred()
self.pending['FNDV'][val] = d
self._sendMessage("FNDV?"+val)
return d
else:
return self.pending['FNDV'][val]
def setDie(self):
self.die = True
# XXX: this should move into FludNode side of things (LocalClientPrimitives).
# anything that calls this should make calls ('LIST', others as necessary) to
# get at master metadata, otherwise we could have multiple writer problems.
# FludNode should make the file ro while running, too.
# And everyone that does anything with the master metadata should do it through
# methods of FludConfig, instead of by direct access to the file.
def listMeta(config):
fmaster = open(os.path.join(config.metadir,config.metamaster), 'r')
master = fmaster.read()
fmaster.close()
if master == "":
master = {}
else:
master = fdecode(master)
return master
| Python |
"""
ConnectionQueue, (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), version 3.
This module manages the connection queue. In order to reduce the
probability of the reactor getting tied up servicing requests/responses
during periods of extreme busy-ness (and thus 'starving' some ops,
causing TimeoutErrors), we throttle the number of outstanding requests
that we send to MAXOPS. The rest, we put in the 'waiting' queue, and
these are popped off when a spot becomes available.
"""
import logging
MAXOPS = 80 # maximum number of concurrent connections to maintain
pending = 0 # number of current connections
waiting = [] # queue of waiting connections to make. This queue contains
# tuples. The first element of the tuple must have a
# startRequest() func that takes the rest of the tuple as
# arguments.
logger = logging.getLogger("flud.client.connq")
def checkWaiting(resp, finishedOne=True):
"""
This function pops items off the waiting queue and invokes their
startRequest() method. It should eventually be called by any process that
also calls queueWaiting() (usually as part of callback/errback chain). The
'resp' object passed in will be returned (so that this function can sit
transparently in the errback/callback chain).
"""
numwaiting = len(waiting)
logger.debug("in checkWaiting, len(waiting) = %d" % numwaiting)
#print "resp = %s..." % fencode(long(resp,16))[:8]
#print "resp = %s..." % str(resp)
global pending
if finishedOne:
pending = pending - 1
logger.debug("decremented pending to %s" % pending)
if numwaiting > 0 and pending < MAXOPS:
saved = waiting.pop(0)
Req = saved[0]
args = saved[1:]
logger.debug("w: %d, p: %d, restoring Request %s(%s)" % (numwaiting,
pending, Req.__class__.__name__, str(args)))
Req.startRequest(*args)
pending += 1
return resp
def enqueue(requestTuple):
"""
Adds a requestTuple to those waiting. The first element of the tuple must
have a startRequest() func that takes the rest of the tuple as arguments.
This startRequest() function will be called with those arguments when it
comes off the queue (via checkWaiting).
"""
waiting.append(requestTuple)
logger.debug("trying to do %s now..." % requestTuple[0].__class__.__name__)
checkWaiting(None, finishedOne=False)
| Python |
"""
ClientPrimitives.py (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
Primitive client storage protocol
"""
from twisted.web import http, client
from twisted.internet import reactor, threads, defer, error
from twisted.python import failure
import time, os, stat, httplib, sys, logging, tarfile, gzip
from StringIO import StringIO
from flud.FludCrypto import FludRSA
from flud.fencode import fencode, fdecode
import ConnectionQueue
from FludCommUtil import *
logger = logging.getLogger("flud.client.op")
loggerid = logging.getLogger("flud.client.op.id")
loggerstor = logging.getLogger("flud.client.op.stor")
loggerstoragg = logging.getLogger("flud.client.op.stor.agg")
loggerrtrv = logging.getLogger("flud.client.op.rtrv")
loggerdele = logging.getLogger("flud.client.op.dele")
loggervrfy = logging.getLogger("flud.client.op.vrfy")
loggerauth = logging.getLogger("flud.client.op.auth")
MINSTORSIZE = 512000 # anything smaller than this tries to get aggregated
TARFILE_TO = 2 # timeout for checking aggregated tar files
MAXAUTHRETRY = 4 # number of times to retry auth
# FUTURE: check flud protocol version for backwards compatibility
# XXX: need to make sure we have appropriate timeouts for all comms.
# FUTURE: DOS attacks. For now, assume that network hardware can filter these
# out (by throttling individual IPs) -- i.e., it isn't our problem. If we
# want to defend against this at some point, we need to keep track of who
# is generating requests and then ignore them.
# XXX: might want to consider some self-healing for the kademlia layer, as
# outlined by this thread:
# http://zgp.org/pipermail/p2p-hackers/2003-August/001348.html (should also
# consider Zooko's links in the parent to this post)
# XXX: disallow requests to self.
class REQUEST(object):
"""
This is a parent class for generating http requests that follow the
FludProtocol.
"""
def __init__(self, host, port, node=None):
"""
All children should inherit. By convention, subclasses should
create a URL and attempt to retrieve it in the constructor.
@param node the requestor's node object
"""
self.host = host
self.port = port
self.dest = "%s:%d" % (host, port)
if node:
self.node = node
self.config = node.config
self.headers = {'Fludprotocol': fludproto_ver,
'User-Agent': 'FludClient 0.1'}
class SENDGETID(REQUEST):
def __init__(self, node, host, port):
"""
Send a request to retrive the node's ID. This is a reciprocal
request -- must send my own ID in order to get one back.
"""
host = getCanonicalIP(host)
REQUEST.__init__(self, host, port, node)
Ku = self.node.config.Ku.exportPublicKey()
url = "http://"+host+":"+str(port)+"/ID?"
url += 'nodeID='+str(self.node.config.nodeID)
url += '&port='+str(self.node.config.port)
url += "&Ku_e="+str(Ku['e'])
url += "&Ku_n="+str(Ku['n'])
#self.nKu = {}
self.timeoutcount = 0
self.deferred = defer.Deferred()
ConnectionQueue.enqueue((self, node, host, port, url))
def startRequest(self, node, host, port, url):
loggerid.info("sending SENDGETID to %s" % self.dest)
d = self._sendRequest(node, host, port, url)
d.addBoth(ConnectionQueue.checkWaiting)
d.addCallback(self.deferred.callback)
d.addErrback(self.deferred.errback)
d.addErrback(self._errID, node, host, port, url)
def _sendRequest(self, node, host, port, url):
factory = getPageFactory(url, timeout=primitive_to,
headers=self.headers)
d2 = factory.deferred
d2.addCallback(self._getID, factory, host, port)
d2.addErrback(self._errID, node, host, port, url)
return d2
def _getID(self, response, factory, host, port):
loggerid.debug( "received ID response: %s" % response)
if not hasattr(factory, 'status'):
raise failure.DefaultException(
"SENDGETID FAILED: no status in factory")
if eval(factory.status) != http.OK:
raise failure.DefaultException("SENDGETID FAILED to "+self.dest+": "
+"server sent status "+factory.status+", '"+response+"'")
try:
nKu = {}
nKu = eval(response)
nKu = FludRSA.importPublicKey(nKu)
loggerid.info("SENDGETID PASSED to %s" % self.dest)
updateNode(self.node.client, self.config, host, port, nKu)
return nKu
except:
raise failure.DefaultException("SENDGETID FAILED to "+self.dest+": "
+"received response, but it did not contain valid key")
def _errID(self, err, node, host, port, url):
if err.check('twisted.internet.error.TimeoutError') or \
err.check('twisted.internet.error.ConnectionLost'):
#print "GETID request error: %s" % err.__class__.__name__
self.timeoutcount += 1
if self.timeoutcount < MAXTIMEOUTS:
#print "trying again [#%d]...." % self.timeoutcount
return self._sendRequest(node, host, port, url)
else:
#print "not trying again [#%d]" % self.timeoutcount
return err
# XXX: updateNode
#print "_errID: %s" % err
#print "_errID: %s" % str(err.stack)
return err
# XXX: either 1) produce filekey here or 2) send it in as part of API
# (similar fixes for SENDRETRIEVE and VERIFY)? Currently filekey is
# chosen by caller, and is simply the filename.
class SENDSTORE(REQUEST):
def __init__(self, nKu, node, host, port, datafile, metadata=None, fsize=0):
"""
Try to upload a file.
"""
host = getCanonicalIP(host)
REQUEST.__init__(self, host, port, node)
loggerstor.info("sending STORE request to %s" % self.dest)
if not fsize:
fsize = os.stat(datafile)[stat.ST_SIZE]
Ku = self.node.config.Ku.exportPublicKey()
params = [('nodeID', self.node.config.nodeID),
('Ku_e', str(Ku['e'])),
('Ku_n', str(Ku['n'])),
('port', str(self.node.config.port)),
('filekey', os.path.basename(datafile)),
('size', str(fsize))]
self.timeoutcount = 0
self.deferred = defer.Deferred()
ConnectionQueue.enqueue((self, self.headers, nKu, host, port,
datafile, metadata, params, True))
#self.deferred = self._sendRequest(self.headers, nKu, host, port,
# datafile, params, True)
def startRequest(self, headers, nKu, host, port, datafile, metadata,
params, skipFile):
d = self._sendRequest(headers, nKu, host, port, datafile, metadata,
params, skipFile)
d.addBoth(ConnectionQueue.checkWaiting)
d.addCallback(self.deferred.callback)
d.addErrback(self.deferred.errback)
def _sendRequest(self, headers, nKu, host, port, datafile, metadata,
params, skipfile=False):
"""
skipfile - set to True if you want to send everything but file data
(used to send the unauthorized request before responding to challenge)
"""
if skipfile:
files = [(None, 'filename')]
elif metadata:
metakey = metadata[0]
params.append(('metakey', metakey))
metafile = metadata[1]
files = [(datafile, 'filename'), (metafile, 'meta')]
else:
files = [(datafile, 'filename')]
deferred = threads.deferToThread(fileUpload, host, port,
'/STORE', files, params, headers=self.headers)
deferred.addCallback(self._getSendStore, nKu, host, port, datafile,
metadata, params, self.headers)
deferred.addErrback(self._errSendStore,
"Couldn't upload file %s to %s:%d" % (datafile, host, port),
self.headers, nKu, host, port, datafile, metadata, params)
return deferred
def _getSendStore(self, httpconn, nKu, host, port, datafile, metadata,
params, headers):
"""
Check the response for status.
"""
deferred2 = threads.deferToThread(httpconn.getresponse)
deferred2.addCallback(self._getSendStore2, httpconn, nKu, host, port,
datafile, metadata, params, headers)
deferred2.addErrback(self._errSendStore, "Couldn't get response",
headers, nKu, host, port, datafile, metadata, params, httpconn)
return deferred2
def _getSendStore2(self, response, httpconn, nKu, host, port, datafile,
metadata, params, headers):
httpconn.close()
if response.status == http.UNAUTHORIZED:
loggerstor.info("SENDSTORE unauthorized, sending credentials")
challenge = response.reason
d = answerChallengeDeferred(challenge, self.node.config.Kr,
self.node.config.groupIDu, nKu.id(), headers)
d.addCallback(self._sendRequest, nKu, host, port, datafile,
metadata, params)
d.addErrback(self._errSendStore, "Couldn't answerChallenge",
headers, nKu, host, port, datafile, metadata, params,
httpconn)
return d
elif response.status == http.CONFLICT:
result = response.read()
# XXX: client should check key before ever sending request
raise BadCASKeyException("%s %s"
% (response.status, response.reason))
elif response.status != http.OK:
result = response.read()
raise failure.DefaultException(
"received %s in SENDSTORE response: %s"
% (response.status, result))
else:
result = response.read()
updateNode(self.node.client, self.config, host, port, nKu)
loggerstor.info("received SENDSTORE response from %s: %s"
% (self.dest, str(result)))
return result
def _errSendStore(self, err, msg, headers, nKu, host, port,
datafile, metadata, params, httpconn=None):
if err.check('socket.error'):
#print "SENDSTORE request error: %s" % err.__class__.__name__
self.timeoutcount += 1
if self.timeoutcount < MAXTIMEOUTS:
print "trying again [#%d]...." % self.timeoutcount
return self._sendRequest(headers, nKu, host, port, datafile,
metadata, params)
else:
print "Maxtimeouts exceeded: %d" % self.timeoutcount
elif err.check(BadCASKeyException):
pass
else:
print "%s: unexpected error in SENDSTORE: %s" % (msg,
str(err.getErrorMessage()))
# XXX: updateNode
if httpconn:
httpconn.close()
#loggerstor.info(msg+": "+err.getErrorMessage())
return err
aggDeferredMap = {} # a map of maps, containing a list of deferreds. The
# deferred(s) for file 'x' in tarball 'y' are accessed as
# aggDeferredMap['y']['x']
aggTimeoutMap = {} # a map of timout calls for a tarball. The timeout for
# tarball 'y' is stored in aggTimeoutMap['y']
class AggregateStore:
# XXX: if multiple guys store the same file, we're going to get into bad
# cb state (the except clause in errbackTarfiles). Need to catch this
# as it happens... (this happens e.g. for small files with the same
# filehash, e.g, 0-byte files, file copies etc). Should fix this in
# FludClient -- non-agg store has a similar problem (encoded file chunks
# get deleted out from under successive STOR ops for the same chunk, i.e.
# from two concurrent STORs of the same file contents)
def __init__(self, nKu, node, host, port, datafile, metadata):
tarfilename = os.path.join(node.config.clientdir,nKu.id())\
+'-'+host+'-'+str(port)+".tar"
loggerstoragg.debug("tarfile name is %s" % tarfilename)
if not os.path.exists(tarfilename) \
or not aggDeferredMap.has_key(tarfilename):
loggerstoragg.debug("creating tarfile %s to append %s"
% (tarfilename, datafile))
tar = tarfile.open(tarfilename, "w")
tarfileTimeout = reactor.callLater(TARFILE_TO, self.sendTar,
tarfilename, nKu, node, host, port)
aggDeferredMap[tarfilename] = {}
aggTimeoutMap[tarfilename] = tarfileTimeout
else:
loggerstoragg.debug("opening tarfile %s to append %s"
% (tarfilename, datafile))
tar = tarfile.open(tarfilename, "a")
if os.path.basename(datafile) not in tar.getnames():
loggerstoragg.info("adding datafile %s to tarball, %s"
% (os.path.basename(datafile), tar.getnames()))
loggerstoragg.debug("adding data to tarball")
tar.add(datafile, os.path.basename(datafile))
else:
loggerstoragg.info("skip adding datafile %s to tarball" % datafile)
if metadata:
metafilename = "%s.%s.meta" % (os.path.basename(datafile),
metadata[0])
loggerstoragg.debug("metadata filename is %s" % metafilename)
try:
if isinstance(metadata[1], StringIO):
loggerstoragg.debug("metadata is StringIO")
tinfo = tarfile.TarInfo(metafilename)
metadata[1].seek(0,2)
tinfo.size = metadata[1].tell()
metadata[1].seek(0,0)
tar.addfile(tinfo, metadata[1])
else:
loggerstoragg.debug("metadata is file")
tar.add(metadata[1], metafilename)
except:
import traceback
loggerstoragg.debug("exception while adding metadata to"
" tarball")
print sys.exc_info()[2]
traceback.print_exc()
tar.close()
loggerstoragg.debug("prepping deferred")
# XXX: (re)set timeout for tarfilename
self.deferred = defer.Deferred()
loggerstoragg.debug("adding deferred on %s for %s"
% (tarfilename, datafile))
try:
aggDeferredMap[tarfilename][os.path.basename(datafile)].append(
self.deferred)
except KeyError:
aggDeferredMap[tarfilename][os.path.basename(datafile)] \
= [self.deferred]
self.resetTimeout(aggTimeoutMap[tarfilename], tarfilename)
def resetTimeout(self, timeoutFunc, tarball):
loggerstoragg.debug("in resetTimeout...")
if timeoutFunc.active():
#timeoutFunc.reset(TARFILE_TO)
if os.stat(tarball)[stat.ST_SIZE] < MINSTORSIZE:
loggerstoragg.debug("...reset")
timeoutFunc.reset(TARFILE_TO)
return
loggerstoragg.debug("...didn't reset")
def sendTar(self, tarball, nKu, node, host, port):
gtarball = tarball+".gz"
loggerstoragg.info(
"aggregation op triggered, sending tarfile %s to %s:%d"
% (gtarball, host, port))
# XXX: bad blocking io
gtar = gzip.GzipFile(gtarball, 'wb')
gtar.write(file(tarball, 'r').read())
gtar.close()
os.remove(tarball)
self.deferred = SENDSTORE(nKu, node, host, port, gtarball).deferred
self.deferred.addCallback(self.callbackTarfiles, tarball)
self.deferred.addErrback(self.errbackTarfiles, tarball)
# XXX: make aggDeferredMap use a non-.tar key, so that we don't have to
# keep passing 'tarball' around (since we removed it and are really only
# interested in gtarball now, use gtarball at the least)
def callbackTarfiles(self, result, tarball):
loggerstoragg.debug("callbackTarfiles")
gtarball = tarball+".gz"
tar = tarfile.open(gtarball, "r:gz")
cbs = []
try:
for tarinfo in tar:
if tarinfo.name[-5:] != '.meta':
dlist = aggDeferredMap[tarball].pop(tarinfo.name)
loggerstoragg.debug("callingback for %s in %s"
" (%d deferreds)"
% (tarinfo.name, tarball, len(dlist)))
for d in dlist:
cbs.append(d)
except KeyError:
loggerstoragg.warn("aggDeferredMap has keys: %s"
% str(aggDeferredMap.keys()))
loggerstoragg.warn("aggDeferredMap[%s] has keys: %s" % (tarball,
str(aggDeferredMap[tarball].keys())))
tar.close()
loggerstoragg.debug("deleting tarball %s" % gtarball)
os.remove(gtarball)
for cb in cbs:
cb.callback(result)
def errbackTarfiles(self, failure, tarball):
loggerstoragg.debug("errbackTarfiles")
gtarball = tarball+".gz"
tar = tarfile.open(gtarball, "r:gz")
cbs = []
try:
for tarinfo in tar:
dlist = aggDeferredMap[tarball].pop(tarinfo.name)
loggerstoragg.debug("erringback for %s in %s"
" (%d deferreds)"
% (tarinfo.name, tarball, len(dlist)))
for d in dlist:
cbs.append(d)
except KeyError:
loggerstoragg.warn("aggDeferredMap has keys: %s"
% str(aggDeferredMap.keys()))
loggerstoragg.warn("aggDeferredMap[%s] has keys: %s" % (tarball,
str(aggDeferredMap[tarball].keys())))
tar.close()
loggerstoragg.debug("NOT deleting tarball %s (for debug)" % gtarball)
#os.remove(gtarball)
for cb in cbs:
cb.errback(failure)
class SENDRETRIEVE(REQUEST):
def __init__(self, nKu, node, host, port, filekey, metakey=True):
"""
Try to download a file.
"""
host = getCanonicalIP(host)
REQUEST.__init__(self, host, port, node)
loggerrtrv.info("sending RETRIEVE request to %s:%s" % (host, str(port)))
Ku = self.node.config.Ku.exportPublicKey()
url = 'http://'+host+':'+str(port)+'/RETRIEVE/'+filekey+'?'
url += 'nodeID='+str(self.node.config.nodeID)
url += '&port='+str(self.node.config.port)
url += "&Ku_e="+str(Ku['e'])
url += "&Ku_n="+str(Ku['n'])
url += "&metakey="+str(metakey)
#filename = self.node.config.clientdir+'/'+filekey
self.timeoutcount = 0
self.deferred = defer.Deferred()
ConnectionQueue.enqueue((self, self.headers, nKu, host, port,
url))
def startRequest(self, headers, nKu, host, port, url):
#print "doing RET: %s" % filename
d = self._sendRequest(headers, nKu, host, port, url)
d.addBoth(ConnectionQueue.checkWaiting)
d.addCallback(self.deferred.callback)
d.addErrback(self.deferred.errback)
def _sendRequest(self, headers, nKu, host, port, url):
factory = multipartDownloadPageFactory(url, self.node.config.clientdir,
headers=headers, timeout=transfer_to)
deferred = factory.deferred
deferred.addCallback(self._getSendRetrieve, nKu, host, port, factory)
deferred.addErrback(self._errSendRetrieve, nKu, host, port, factory,
url, headers)
return deferred
def _getSendRetrieve(self, response, nKu, host, port, factory):
if eval(factory.status) == http.OK:
# response is None, since it went to file (if a server error
# occured, it may be printed in this file)
# XXX: need to check that file hashes to key! If we don't do this,
# malicious nodes can corrupt entire files without detection!
#result = "received SENDRETRIEVE response"
loggerrtrv.info(response)
updateNode(self.node.client, self.config, host, port, nKu)
return response
else:
raise failure.DefaultException("SENDRETRIEVE FAILED: "
+"server sent status "+factory.status+", '"+response+"'")
def _errSendRetrieve(self, err, nKu, host, port, factory, url, headers):
if err.check('twisted.internet.error.TimeoutError') or \
err.check('twisted.internet.error.ConnectionLost'): #or \
#err.check('twisted.internet.error.ConnectBindError'):
self.timeoutcount += 1
if self.timeoutcount < MAXTIMEOUTS:
#print "RETR trying again [#%d]..." % self.timeoutcount
return self._sendRequest(headers, nKu, host, port, url)
else:
#print "RETR timeout exceeded: %d" % self.timeoutcount
pass
elif hasattr(factory, 'status') and \
eval(factory.status) == http.UNAUTHORIZED:
loggerrtrv.info("SENDRETRIEVE unauthorized, sending credentials")
challenge = err.getErrorMessage()[4:]
d = answerChallengeDeferred(challenge, self.node.config.Kr,
self.node.config.groupIDu, nKu.id(), headers)
d.addCallback(self._sendRequest, nKu, host, port, url)
#d.addErrback(self._errSendRetrieve, nKu, host, port, factory,
# url, headers)
return d
#extraheaders = answerChallenge(challenge, self.node.config.Kr,
# self.node.config.groupIDu, nKu.id(), self.headers)
#return self._sendRequest(nKu, host, port, url, extraheaders)
# XXX: these remaining else clauses are really just for debugging...
elif hasattr(factory, 'status'):
if eval(factory.status) == http.NOT_FOUND:
err = NotFoundException(err)
elif eval(factory.status) == http.BAD_REQUEST:
err = BadRequestException(err)
elif err.check('twisted.internet.error.ConnectionRefusedError'):
pass # fall through to return err
else:
print "non-timeout, non-UNAUTH RETR request error: %s" % err
# XXX: updateNode
loggerrtrv.info("SENDRETRIEVE failed")
raise err
class SENDDELETE(REQUEST):
def __init__(self, nKu, node, host, port, filekey, metakey):
"""
Try to delete a file.
"""
host = getCanonicalIP(host)
REQUEST.__init__(self, host, port, node)
loggerdele.info("sending DELETE request to %s:%s" % (host, str(port)))
Ku = self.node.config.Ku.exportPublicKey()
url = 'http://'+host+':'+str(port)+'/DELETE/'+filekey+'?'
url += 'nodeID='+str(self.node.config.nodeID)
url += '&port='+str(self.node.config.port)
url += "&Ku_e="+str(Ku['e'])
url += "&Ku_n="+str(Ku['n'])
url += "&metakey="+str(metakey)
self.timeoutcount = 0
self.authRetry = 0
self.deferred = defer.Deferred()
ConnectionQueue.enqueue((self, self.headers, nKu, host, port, url))
def startRequest(self, headers, nKu, host, port, url):
d = self._sendRequest(headers, nKu, host, port, url)
d.addBoth(ConnectionQueue.checkWaiting)
d.addCallback(self.deferred.callback)
d.addErrback(self.deferred.errback)
def _sendRequest(self, headers, nKu, host, port, url):
factory = getPageFactory(url, headers=headers, timeout=primitive_to)
deferred = factory.deferred
deferred.addCallback(self._getSendDelete, nKu, host, port, factory)
deferred.addErrback(self._errSendDelete, nKu, host, port, factory, url,
headers)
return deferred
def _getSendDelete(self, response, nKu, host, port, factory):
if eval(factory.status) == http.OK:
loggerdele.info("received SENDDELETE response")
updateNode(self.node.client, self.config, host, port, nKu)
return response
else:
# XXX: updateNode
raise failure.DefaultException("SENDDELETE FAILED: "
+"server sent status "+factory.status+", '"+response+"'")
def _errSendDelete(self, err, nKu, host, port, factory, url, headers):
if err.check('twisted.internet.error.TimeoutError') or \
err.check('twisted.internet.error.ConnectionLost'):
#print "DELETE request error: %s" % err.__class__.__name__
self.timeoutcount += 1
if self.timeoutcount < MAXTIMEOUTS:
#print "trying again [#%d]...." % self.timeoutcount
return self._sendRequest(headers, nKu, host, port, url)
elif hasattr(factory, 'status') and \
eval(factory.status) == http.UNAUTHORIZED and \
self.authRetry < MAXAUTHRETRY:
# XXX: add this authRetry stuff to all the other op classes (so
# that we don't DOS ourselves and another node
self.authRetry += 1
loggerdele.info("SENDDELETE unauthorized, sending credentials")
challenge = err.getErrorMessage()[4:]
d = answerChallengeDeferred(challenge, self.node.config.Kr,
self.node.config.groupIDu, nKu.id(), headers)
d.addCallback(self._sendRequest, nKu, host, port, url)
d.addErrback(self._errSendDelete, nKu, host, port, factory,
url, headers)
return d
elif hasattr(factory, 'status'):
# XXX: updateNode
loggerdele.info("SENDDELETE failed")
if eval(factory.status) == http.NOT_FOUND:
err = NotFoundException(err)
elif eval(factory.status) == http.BAD_REQUEST:
err = BadRequestException(err)
raise err
return err
class SENDVERIFY(REQUEST):
def __init__(self, nKu, node, host, port, filename, offset, length,
meta=None):
"""
Try to verify a file.
If meta is present, it should be a (metakey, filelikeobj) pair.
"""
host = getCanonicalIP(host)
REQUEST.__init__(self, host, port, node)
filekey = os.path.basename(filename) # XXX: filekey should be hash
loggervrfy.info("sending VERIFY request to %s:%s" % (host, str(port)))
Ku = self.node.config.Ku.exportPublicKey()
url = 'http://'+host+':'+str(port)+'/VERIFY/'+filekey+'?'
url += 'nodeID='+str(self.node.config.nodeID)
url += '&port='+str(self.node.config.port)
url += "&Ku_e="+str(Ku['e'])
url += "&Ku_n="+str(Ku['n'])
url += "&offset="+str(offset)
url += "&length="+str(length)
if meta:
url += "&metakey="+str(meta[0])
url += "&meta="+fencode(meta[1].read())
self.timeoutcount = 0
if not isinstance(nKu, FludRSA):
raise ValueError("must pass in a FludRSA as nKu to SENDVERIFY")
self.deferred = defer.Deferred()
ConnectionQueue.enqueue((self, self.headers, nKu, host, port, url))
def startRequest(self, headers, nKu, host, port, url):
#loggervrfy.debug("*Doing* VERIFY Request %s" % port)
d = self._sendRequest(headers, nKu, host, port, url)
d.addBoth(ConnectionQueue.checkWaiting)
d.addCallback(self.deferred.callback)
d.addErrback(self.deferred.errback)
def _sendRequest(self, headers, nKu, host, port, url):
loggervrfy.debug("in VERIFY sendReq %s" % port)
factory = getPageFactory(url, headers=headers, timeout=primitive_to)
deferred = factory.deferred
deferred.addCallback(self._getSendVerify, nKu, host, port, factory)
deferred.addErrback(self._errSendVerify, nKu, host, port, factory, url,
headers)
return deferred
def _getSendVerify(self, response, nKu, host, port, factory):
loggervrfy.debug("got vrfy response")
if eval(factory.status) == http.OK:
loggervrfy.info("received SENDVERIFY response")
updateNode(self.node.client, self.config, host, port, nKu)
return response
else:
# XXX: updateNode
loggervrfy.debug("received non-OK SENDVERIFY response")
raise failure.DefaultException("SENDVERIFY FAILED: "
+"server sent status "+factory.status+", '"+response+"'")
def _errSendVerify(self, err, nKu, host, port, factory, url, headers):
loggervrfy.debug("got vrfy err")
if err.check('twisted.internet.error.TimeoutError') or \
err.check('twisted.internet.error.ConnectionLost'):
#print "VERIFY request error: %s" % err.__class__.__name__
self.timeoutcount += 1
if self.timeoutcount < MAXTIMEOUTS:
#print "trying again [#%d]...." % self.timeoutcount
return self._sendRequest(headers, nKu, host, port, url)
elif hasattr(factory, 'status') and \
eval(factory.status) == http.UNAUTHORIZED:
loggervrfy.info("SENDVERIFY unauthorized, sending credentials")
challenge = err.getErrorMessage()[4:]
d = answerChallengeDeferred(challenge, self.node.config.Kr,
self.node.config.groupIDu, nKu.id(), headers)
d.addCallback(self._sendRequest, nKu, host, port, url)
d.addErrback(self._errVerify, nKu, host, port, factory,
url, headers, challenge)
#d.addErrback(self._errSendVerify, nKu, host, port, factory,
# url, headers)
return d
elif hasattr(factory, 'status'):
# XXX: updateNode
loggervrfy.info("SENDVERIFY failed: %s" % err.getErrorMessage())
if eval(factory.status) == http.NOT_FOUND:
err = NotFoundException(err)
elif eval(factory.status) == http.BAD_REQUEST:
err = BadRequestException(err)
raise err
def _errVerify(self, err, nKu, host, port, factory, url, headers,
challenge):
# we can get in here after storing the same file as another node when
# that data is stored in tarballs under its ID. It was expected that
# this would be caught up in _getSendVerify... figure out why it isn't.
loggervrfy.debug("factory status=%s" % factory.status)
loggervrfy.debug("couldn't answer challenge from %s:%d, WHOOPS: %s"
% (host, port, err.getErrorMessage()))
loggervrfy.debug("challenge was: '%s'" % challenge)
return err
def answerChallengeDeferred(challenge, Kr, groupIDu, sID, headers):
return threads.deferToThread(answerChallenge, challenge, Kr, groupIDu, sID,
headers)
def answerChallenge(challenge, Kr, groupIDu, sID, headers={}):
loggerauth.debug("got challenge: '%s'" % challenge)
sID = binascii.unhexlify(sID)
challenge = (fdecode(challenge),)
response = fencode(Kr.decrypt(challenge))
# XXX: RSA.decrypt won't restore leading 0's. This causes
# some challenges to fail when they shouldn't -- solved for now
# on the server side by generating non-0 leading challenges.
loggerauth.debug("decrypted challenge to %s" % response)
responseID = fdecode(response)[:len(sID)]
loggerauth.debug(" response id: %s" % fencode(responseID))
if responseID != sID:
# fail the op.
# If we don't do this, we may be allowing the server to build a
# dictionary useful for attack. The attack is as follows: node A
# (server) collects a bunch of un-IDed challenge/response pairs by
# issuing challenges to node B (client). Then node A uses those
# responses to pose as B to some other server C. This sounds
# farfetched, in that such a database would need to be huge, but in
# reality, such an attack can happen in real-time, with node A
# simultaneously serving requests from B, relaying challenges from C to
# B, and then responding with B's responses to C to gain resources
# there as an imposter. The ID string prevents this attack.
# XXX: trust-- (must go by ip:port, since ID could be innocent)
raise ImposterException("node %s is issuing invalid challenges --"
" claims to have id=%s" % (fencode(sID), fencode(responseID)))
response = fdecode(response)[len(sID):]
loggerauth.debug(" challenge response: '%s'" % fencode(response))
response = fencode(response)+":"+groupIDu
loggerauth.debug("response:groupIDu=%s" % response)
response = binascii.b2a_base64(response)
loggerauth.debug("b64(response:groupIDu)=%s" % response)
response = "Basic %s" % response
headers['Authorization'] = response
return headers
| Python |
"""
FludClient.py (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), version 3.
flud client ops.
"""
from twisted.web import client
from twisted.internet import error
import os, stat, httplib, sys, logging
from ClientPrimitives import *
from ClientDHTPrimitives import *
import FludCommUtil
logger = logging.getLogger('flud.client')
class FludClient(object):
"""
This class contains methods which create request objects
"""
def __init__(self, node):
self.node = node
self.currentStorOps = {}
"""
Data storage primitives
"""
def redoTO(self, f, node, host, port):
print "in redoTO: %s" % f
#print "in redoTO: %s" % dir(f.getTraceback())
if f.getTraceback().find("error.TimeoutError"):
print "retrying........"
return self.sendGetID(host, port)
else:
return f
def sendGetID(self, host, port):
#return SENDGETID(self.node, host, port).deferred
d = SENDGETID(self.node, host, port).deferred
#d.addErrback(self.redoTO, self.node, host, port)
return d
# XXX: we should cache nKu so that we don't do the GETID for all of these
# ops every single time
def sendStore(self, filename, metadata, host, port, nKu=None):
# XXX: need to keep a map of 'filename' to deferreds, in case we are
# asked to store the same chunk more than once concurrently (happens
# for 0-byte files or from identical copies of the same file, for
# example). both SENDSTORE and AggregateStore will choke on this.
# if we find a store req in said map, just return that deferred instead
# of redoing the op. [note, could mess up node choice... should also do
# this on whole-file level in FileOps]
# XXX: need to remove from currentStorOps on success or failure
key = "%s:%d:%s" % (host, port, filename)
if self.currentStorOps.has_key(key):
logger.debug("returning saved deferred for %s in sendStore"
% filename)
return self.currentStorOps[key]
def sendStoreWithnKu(nKu, host, port, filename, metadata):
return SENDSTORE(nKu, self.node, host, port, filename,
metadata).deferred
def removeKey(r, key):
self.currentStorOps.pop(key)
return r
if not nKu:
# XXX: doesn't do AggregateStore if file is small. Can fix by
# moving this AggStore v. SENDSTORE choice into SENDSTORE
# proper
logger.warn("not doing AggregateStore on small file because"
" of missing nKu")
print "not doing AggregateStore on small file because" \
" of missing nKu"
d = self.sendGetID(host, port)
d.addCallback(sendStoreWithnKu, host, port, filename, metadata)
self.currentStorOps[key] = d
return d
fsize = os.stat(filename)[stat.ST_SIZE];
if fsize < MINSTORSIZE:
logger.debug("doing AggStore")
if metadata:
logger.debug("with metadata")
d = AggregateStore(nKu, self.node, host, port, filename,
metadata).deferred
else:
logger.debug("SENDSTORE")
d = SENDSTORE(nKu, self.node, host, port, filename,
metadata).deferred
self.currentStorOps[key] = d
d.addBoth(removeKey, key)
return d
# XXX: need a version that takes a metakey, too
def sendRetrieve(self, filekey, host, port, nKu=None, metakey=True):
def sendRetrieveWithNKu(nKu, host, port, filekey, metakey=True):
return SENDRETRIEVE(nKu, self.node, host, port, filekey,
metakey).deferred
if not nKu:
d = self.sendGetID(host, port)
d.addCallback(sendRetrieveWithNKu, host, port, filekey, metakey)
return d
else:
return SENDRETRIEVE(nKu, self.node, host, port, filekey,
metakey).deferred
def sendVerify(self, filekey, offset, length, host, port, nKu=None,
meta=None):
def sendVerifyWithNKu(nKu, host, port, filekey, offset, length,
meta=True):
return SENDVERIFY(nKu, self.node, host, port, filekey, offset,
length, meta).deferred
if not nKu:
d = self.sendGetID(host, port)
d.addCallback(sendVerifyWithNKu, host, port, filekey, offset,
length, meta)
return d
else:
s = SENDVERIFY(nKu, self.node, host, port, filekey, offset, length,
meta)
return s.deferred
def sendDelete(self, filekey, metakey, host, port, nKu=None):
def sendDeleteWithNKu(nKu, host, port, filekey, metakey):
return SENDDELETE(nKu, self.node, host, port, filekey,
metakey).deferred
if not nKu:
d = self.sendGetID(host, port)
d.addCallback(sendDeleteWithNKu, host, port, filekey, metakey)
return d
else:
return SENDDELETE(nKu, self.node, host, port, filekey,
metakey).deferred
"""
DHT single primitives (single call to single peer). These should probably
only be called for testing or bootstrapping (sendkFindNode can be used to
'connect' to the flud network via a gateway, for instance). Use the
recursive primitives for doing DHT ops.
"""
def sendkFindNode(self, host, port, key):
return SENDkFINDNODE(self.node, host, port, key).deferred
def sendkStore(self, host, port, key, val):
return SENDkSTORE(self.node, host, port, key, val).deferred
def sendkFindValue(self, host, port, key):
return SENDkFINDVALUE(self.node, host, port, key).deferred
"""
DHT recursive primitives (recursive calls to muliple peers)
"""
def kFindNode(self, key):
return kFindNode(self.node, key).deferred
def kStore(self, key, val):
return kStore(self.node, key, val).deferred
def kFindValue(self, key):
return kFindValue(self.node, key).deferred
| Python |
"""
ServerPrimitives.py (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
Primitive server storage protocol
"""
import binascii, time, os, stat, httplib, gc, re, sys, logging, sets
import tempfile, tarfile
from StringIO import StringIO
from twisted.web.resource import Resource
from twisted.web import server, resource, client
from twisted.internet import reactor, threads, defer
from twisted.web import http
from twisted.python import failure
from flud.FludCrypto import FludRSA, hashstring, hashfile, generateRandom
import flud.TarfileUtils as TarfileUtils
from flud.fencode import fencode, fdecode
import BlockFile
from FludCommUtil import *
logger = logging.getLogger("flud.server.op")
loggerid = logging.getLogger("flud.server.op.id")
loggerstor = logging.getLogger("flud.server.op.stor")
loggerretr = logging.getLogger("flud.server.op.rtrv")
loggervrfy = logging.getLogger("flud.server.op.vrfy")
loggerdele = logging.getLogger("flud.server.op.dele")
loggerauth = logging.getLogger("flud.server.op.auth")
"""
These classes represent http requests received by this node, and the actions
taken to respond.
"""
# FUTURE: check flud protocol version for backwards compatibility
# XXX: need to refactor challengeResponse stuff so that all share this same
# code (put it in REQUEST obj). See if we can do the same for some of
# the other bits.
# XXX: need to make sure we have appropriate timeouts for all comms.
# FUTURE: DOS attacks. For now, assume that network hardware can filter these
# out (by throttling individual IPs) -- i.e., it isn't our problem. If we
# want to defend against this at some point, we need to keep track of who
# is generating requests and then ignore them.
# XXX: find everywhere we are sending longs and consider sending hex (or our
# own base-64) encoded instead
# XXX: might want to consider some self-healing for the kademlia layer, as
# outlined by this thread:
# http://zgp.org/pipermail/p2p-hackers/2003-August/001348.html (should
# also consider Zooko's links in the parent to this post)
# XXX: can man-in-the-middle get us with http basic auth, i.e., a node (Bob)
# receives a store request from node Alice. Bob 'forwards' this request
# to another node, Charlie, posing as Alice. When the challenge comes
# back from Charlie, Bob relays it back to Alice, gets Alice's response,
# and uses that response to answer Charlie's challenge. Bob then pretends
# to store the data, while in reality the data is at Charlie (VERIFYs can
# be performed similarly).
# [This scenario appears legitimate, but it isn't problematic. Bob can
# only store at Charlie if Charlie trusts Bob. And if Bob trust Charlie
# so much that he is willing to stake his trust with Alice on Charlie's
# reliability, there shouldn't be anything wrong with allowing this. If
# Charlie goes down, Bob will go down (partly) with him, as far as trust
# is concerned].
# [should disable/remove CHALLENGE and GROUPID so that this only works
# for forwarding. Leaving CHALLENGE/GROUPID allows imposters. Limiting
# only to forwarding imposters.]
# XXX: Auth problem still exists. Http-auth allows servers to know that clients
# legitimate, but clients don't know the legitimacy of servers. Proposal:
# all client requests are currently responded to with a challenge that is
# copied in the header and body. Instead, require that the server sign
# the url it was handed by the client, and return this in the body
# (challenge remains in the header). Now, the client can verify that the
# server is who it says it is. This is still somewhat vulnerable to replay
# (if the same url is repeated, which it can be), but this only matters if
# the client sends the exact same message again later to an imposter
# (imposters can only impersonate the server). Alternatively, could send
# a challenge as a param of the request. [I think this is fixed now?]
# XXX: disallow requests originating from self.
fludproto_ver = '0.1'
challengelength = 40 # XXX: is 40 bytes sufficient?
class ROOT(Resource):
"""
Notes on parameters common to most requests:
Ku_e - the public key RSA exponent (usually 65537L, but can be other values)
Ku_n - the public key RSA modulus.
nodeID - the nodeID of the requestor
port - the port that the reqestor runs their fludserver on.
All REQs will contain a nodeID ('nodeID'), a public key ('Ku_e', 'Ku_n'),
and a requestID ('reqID').
[XXX: must be more careful about all this code -- if wrong data is sent,
exceptions are raised. Put try blocks around everything.]
Client Authentication works as follows: Each client request is responded to
with a 401 response in the header, with a challenge in the header message
and repeated in the body. The client reissues the response, filling the
user 'username' field with the challenge-response and the 'password' field
with groupIDu. The client must compute the response to the challenge each
time by examining this value. The groupIDu never changes, so the client
can simply send this value back with the response. This implies some
server-side state; since the server responds with a 401 and expects the
client to send a new request with an Authorization header that contains the
challenge response, the server has to keep some state for each client.
This state expires after a short time, and implies that client must make
requests to individual servers serially.
"""
# FUTURE: might want to switch to nevow's rend.Page as parent...
def __init__(self, fludserver):
"""
All children should inherit. Make sure to call super.__init__ if you
override __init__
"""
Resource.__init__(self)
self.fludserver = fludserver
self.node = fludserver.node
self.config = fludserver.node.config
def getChild(self, name, request):
"""
should override.
"""
if name == "":
return self
return Resource.getChild(self, name, request)
def render_GET(self, request):
self.setHeaders(request)
return "<html>Flud</hrml>"
def setHeaders(self, request):
request.setHeader('Server','FludServer 0.1')
request.setHeader('FludProtocol', fludproto_ver)
class ID(ROOT):
"""
Just received a request to expose my identity. Send public key (from which
requestor can determine nodeID).
Response codes: 200- OK (default)
204- No Content (returned in case of error or not wanting
to divulge ID)
"""
def render_GET(self, request):
self.setHeaders(request)
try:
required = ('nodeID', 'Ku_e', 'Ku_n', 'port')
params = requireParams(request, required)
except Exception, inst:
msg = "%s in request received by ID" % inst.args[0]
loggerid.info(msg)
request.setResponseCode(http.BAD_REQUEST, "Bad Request")
return msg
else:
loggerid.info("received ID request from %s..."
% params['nodeID'][:10])
loggerid.info("returning ID response")
#try:
reqKu = {}
reqKu['e'] = long(params['Ku_e'])
reqKu['n'] = long(params['Ku_n'])
reqKu = FludRSA.importPublicKey(reqKu)
host = getCanonicalIP(request.getClientIP())
updateNode(self.node.client, self.config, host,
int(params['port']), reqKu, params['nodeID'])
return str(self.config.Ku.exportPublicKey())
#except:
# msg = "can't return ID"
# loggerid.log(logging.WARN, msg)
# request.setResponseCode(http.NO_CONTENT, msg)
# return msg
class STORE(ROOT):
"""
A request to store data via http upload.
Response codes: 200- OK (default)
400- Bad Request (missing params)
401- Unauthorized (ID hash, CHALLENGE, or
GROUPCHALLENGE failed)
Each file fragment is stored with its storage key as the file name. The
file fragment can be 'touched' (or use last access time if supported) each
time it is verified or read, so that we have a way to record age (which
also allows a purge strategy). Files are reference-listed (reference count
with owners) by the BlockFile object.
A preliminary file structure:
.flud/
store/
fragmenthash1
fragmenthash2
...
dht/
metadatahash1
metadatahash2
...
meta/
master
metadatahash1
metadatahash2
...
dl/
flud.conf
fludfile.conf
fludrules.init
flud.log
"""
isLeaf = True
def render_POST(self, request):
self.setHeaders(request)
try:
required = ('filekey', 'size', 'nodeID', 'Ku_e', 'Ku_n', 'port')
params = requireParams(request, required)
except Exception, inst:
msg = inst.args[0] + " in request received by STORE"
loggerstor.info(msg)
request.setResponseCode(http.BAD_REQUEST, "Bad Request")
return msg
else:
loggerstor.info("received STORE request from %s..."
% params['nodeID'][:10])
host = getCanonicalIP(request.getClientIP())
port = int(params['port'])
requestedSize = int(params['size'])
reqKu = {}
reqKu['e'] = long(params['Ku_e'])
reqKu['n'] = long(params['Ku_n'])
reqKu = FludRSA.importPublicKey(reqKu)
#if requestedSize > 10:
# msg = "unwilling to enter into storage relationship"
# request.setResponseCode(http.PAYMENT_REQUIRED, msg)
# return msg
return authenticate(request, reqKu, params['nodeID'],
host, int(params['port']), self.node.client, self.config,
self._storeFile, request, params['filekey'], reqKu,
params['nodeID'])
def _storeFile(self, request, filekey, reqKu, nodeID):
# [XXX: memory management is not happy here. might want to look at
# request.registerProducer(). Otherwise, might have to scrap
# using the STORE(ROOT(RESOURCE)) deal in favor of
# producer/consumer model for STORE ops
# (http://itamarst.org/writings/OSCON03/twisted_internet-108.html).
# Another option might include subclassing web.resource.Resource
# and making this derive from that... Or might be web.Site that
# needs to be subclassed... Or maybe web.site.Request -
# web.site.Request.process()? Request seems doubly-bad: perhaps a
# copy is made somewhere, because memory mushrooms to 2x big
# upload, then goes back down to around 1x.
# [update: This should be fixable in twisted.web2, but I am informed
# that in the current version, there is no workaround]
# get the data to a tmp file
loggerstor.debug("writing store data to tmpfile")
tmpfile = tempfile.mktemp(dir=self.config.storedir)
tarball = os.path.join(self.config.storedir,reqKu.id()+".tar")
# rename and/or prepend the data appropriately
tmpTarMode = None
if filekey[-4:] == ".tar":
tmpfile = tmpfile+".tar"
tmpTarMode = 'r'
targetTar = tarball
elif filekey[-7:] == ".tar.gz":
tmpfile = tmpfile+".tar.gz"
tmpTarMode = 'r:gz'
targetTar = tarball+".gz"
loggerstor.debug("tmpfile is %s" % tmpfile)
# XXX: if the server supports both .tar and tar.gz, this is wrong; we'd
# need to check *both* for already existing dudes instead of just
# choosing one
if os.path.exists(tarball+'.gz'):
tarball = (tarball+'.gz', 'r:gz')
elif os.path.exists(tarball):
tarball = (tarball, 'r')
else:
tarball = None
loggerstor.debug("tarball is %s" % str(tarball))
data = request.args.get('filename')[0] # XXX: file in mem! need web2.
# XXX: bad blocking stuff here
f = open(tmpfile, 'wb')
f.write(data)
f.close()
ftype = os.popen('file %s' % tmpfile)
loggerstor.debug("ftype of %s is %s" % (tmpfile, ftype.read()))
ftype.close()
if tmpTarMode:
# client sent a tarball
loggerstor.debug("about to chksum %s" % tmpfile)
digests = TarfileUtils.verifyHashes(tmpfile, '.meta')
loggerstor.debug("chksum returned %s" % digests)
ftype = os.popen('file %s' % tmpfile)
loggerstor.debug("ftype of %s is %s" % (tmpfile, ftype.read()))
ftype.close()
if not digests:
msg = "Attempted to use non-CAS storage key(s) for" \
" STORE tarball"
loggerstor.debug(msg)
os.remove(tmpfile)
request.setResponseCode(http.CONFLICT, msg)
return msg
# XXX: add digests to a db of already stored files (for quick
# lookup)
if tarball:
tarname, tarnameMode = tarball
loggerstor.debug("concatenating tarfiles %s and %s"
% (tarname, tmpfile))
f1 = tarfile.open(tarname, tarnameMode)
f2 = tarfile.open(tmpfile, tmpTarMode)
f1names = f1.getnames()
f2names = f2.getnames()
f1.close()
f2.close()
dupes = [f for f in f1names if f in f2names]
TarfileUtils.delete(tmpfile, dupes)
ftype = os.popen('file %s' % tarname)
loggerstor.debug("ftype of %s is %s" % (tarname, ftype.read()))
ftype.close()
TarfileUtils.concatenate(tarname, tmpfile)
ftype = os.popen('file %s' % tarname)
loggerstor.debug("ftype of %s is %s" % (tarname, ftype.read()))
ftype.close()
else:
loggerstor.debug("saving %s as tarfile %s" % (tmpfile,
targetTar))
os.rename(tmpfile, targetTar)
else:
# client sent regular file
h = hashfile(tmpfile)
if request.args.has_key('meta') and request.args.has_key('metakey'):
metakey = request.args.get('metakey')[0]
meta = request.args.get('meta')[0] # XXX: file in mem!
else:
metakey = None
meta = None
if fencode(long(h, 16)) != filekey:
msg = "Attempted to use non-CAS storage key for STORE data "
msg += "(%s != %s)" % (filekey, fencode(long(h, 16)))
os.remove(tmpfile)
request.setResponseCode(http.CONFLICT, msg)
return msg
fname = os.path.join(self.config.storedir, filekey)
if os.path.exists(fname):
loggerstor.debug("adding metadata to %s" % fname)
f = BlockFile.open(fname,'rb+')
if not f.hasNode(nodeID):
f.addNode(int(nodeID,16), {metakey: meta})
f.close()
os.remove(tmpfile)
else:
if os.path.exists(nodeID+".tar"):
# XXX: need to do something with metadata!
print "XXX: need to do something with metadata for tar!"
tarball = tarfile.open(tarname, 'r')
if fname in tarball.getnames():
loggerstor.debug("%s already stored in tarball" % fname)
# if the file is already in the corresponding tarball,
# update its timestamp and return success.
loggerstor.debug("%s already stored" % filekey)
# XXX: update timestamp for filekey in tarball
return "Successful STORE"
else:
loggerstor.debug("tarball for %s, but %s not in tarball"
% (nodeID,fname))
if len(data) < 8192 and fname != tarname: #XXX: magic # (blk sz)
# If the file is small, move it into the appropriate
# tarball. Note that this code is unlikely to ever be
# executed if the client is an official flud client, as
# they do the tarball aggregation thing already, and all
# tarballs will be > 8192. This is, then, really just
# defensive coding -- clients aren't required to implement
# that tarball aggregation strategy. And it is really only
# useful for filesystems with inefficient small file
# storage.
loggerstor.debug("moving small file '%s' into tarball"
% fname)
if not os.path.exists(tarname):
tarball = tarfile.open(tarname, 'w')
else:
tarball = tarfile.open(tarname, 'a')
# XXX: more bad blocking stuff
tarball.add(tmpfile, os.path.basename(fname))
if meta:
metafilename = "%s.%s.meta" % (os.path.basename(fname),
metakey)
loggerstor.debug("adding metadata file to tarball %s"
% metafilename)
metaio = StringIO(meta)
tinfo = tarfile.TarInfo(metafilename)
tinfo.size = len(meta)
tarball.addfile(tinfo, metaio)
tarball.close()
os.remove(tmpfile)
else:
# store the file
loggerstor.debug("storing %s" % fname)
os.rename(tmpfile, fname)
BlockFile.convert(fname, (int(nodeID,16), {metakey: meta}))
loggerstor.debug("successful STORE for %s" % filekey)
return "Successful STORE"
def _storeErr(self, error, request, msg):
out = msg+": "+error.getErrorMessage()
print "%s" % str(error)
loggerstor.info(out)
# update trust, routing
request.setResponseCode(http.UNAUTHORIZED,
"Unauthorized: %s" % msg) # XXX: wrong code
return msg
def render_GET(self, request):
request.setResponseCode(http.BAD_REQUEST, "Bad Request")
return "STORE request must be sent using POST"
class RETRIEVE(ROOT):
"""
A request to retrieve data. The file to retrieve is indicated by the URL
path, e.g. http://server:port/RETRIEVE/a35cd1339766ef209657a7b
Response codes: 200- OK (default)
400- Bad Request (missing params)
401- Unauthorized (ID hash, CHALLENGE, or
GROUPCHALLENGE failed)
"""
isLeaf = True
def render_GET(self, request):
self.setHeaders(request)
try:
required = ('nodeID', 'Ku_e', 'Ku_n', 'port')
params = requireParams(request, required)
except Exception, inst:
msg = inst.args[0] + " in request received by RETRIEVE"
loggerretr.log(logging.INFO, msg)
request.setResponseCode(http.BAD_REQUEST, "Bad Request")
return msg
else:
loggerretr.info("received RETRIEVE request for %s from %s..."
% (request.path, params['nodeID'][:10]))
host = getCanonicalIP(request.getClientIP())
port = int(params['port'])
filekey = re.sub('/RETRIEVE', '', str(request.path))
paths = [p for p in filekey.split(os.path.sep) if p != '']
if len(paths) > 1:
msg = "filekey contains illegal path seperator tokens."
loggerretr.debug("BAD_REQ: %s" % msg)
request.setResponseCode(http.BAD_REQUEST,
"Bad Request: %s" % msg)
return msg
reqKu = {}
reqKu['e'] = long(params['Ku_e'])
reqKu['n'] = long(params['Ku_n'])
reqKu = FludRSA.importPublicKey(reqKu)
if request.args.has_key('metakey'):
returnMeta = request.args['metakey']
if returnMeta == 'True':
returnMeta = True
else:
returnMeta = True
return authenticate(request, reqKu, params['nodeID'],
host, int(params['port']), self.node.client, self.config,
self._sendFile, request, filekey, reqKu, returnMeta)
def _sendFile(self, request, filekey, reqKu, returnMeta):
fname = self.config.storedir + filekey
loggerretr.debug("reading file data from %s" % fname)
# XXX: make sure requestor owns the file?
tfilekey = filekey[1:]
if returnMeta:
loggerretr.debug("returnMeta = %s" % returnMeta)
request.setHeader('Content-type', 'Multipart/Related')
rand_bound = binascii.hexlify(generateRandom(13))
request.setHeader('boundary', rand_bound)
if not os.path.exists(fname):
# check for tarball for originator
tarball = os.path.join(self.config.storedir,reqKu.id()+".tar")
tarballs = []
if os.path.exists(tarball+'.gz'):
tarballs.append((tarball+'.gz', 'r:gz'))
if os.path.exists(tarball):
tarballs.append((tarball, 'r'))
loggerretr.debug("tarballs = %s" % tarballs)
# XXX: does this work? does it close both tarballs if both got
# opened?
for tarball, openmode in tarballs:
tar = tarfile.open(tarball, openmode)
try:
tinfo = tar.getmember(tfilekey)
returnedMeta = False
if returnMeta:
loggerretr.debug("tar returnMeta %s" % tfilekey)
try:
metas = [f for f in tar.getnames()
if f[:len(tfilekey)] == tfilekey
and f[-4:] == 'meta']
loggerretr.debug("tar returnMetas=%s" % metas)
for m in metas:
minfo = tar.getmember(m)
H = []
H.append("--%s" % rand_bound)
H.append("Content-Type: "
"Application/octet-stream")
H.append("Content-ID: %s" % m)
H.append("Content-Length: %d" % minfo.size)
H.append("")
H = '\r\n'.join(H)
request.write(H)
request.write('\r\n')
tarm = tar.extractfile(minfo)
loggerretr.debug("successful metadata"
" RETRIEVE (from %s)" % tarball)
# XXX: bad blocking stuff
while 1:
buf = tarm.read()
if buf == "":
break
request.write(buf)
request.write('\r\n')
tarm.close()
H = []
H.append("--%s" % rand_bound)
H.append("Content-Type: Application/octet-stream")
H.append("Content-ID: %s" % tfilekey)
H.append("Content-Length: %d" % tinfo.size)
H.append("")
H = '\r\n'.join(H)
request.write(H)
request.write('\r\n')
returnedMeta = True
except:
# couldn't find any metadata, just return normal
# file
loggerretr.debug("no metadata found")
pass
# XXX: bad blocking stuff
tarf = tar.extractfile(tinfo)
# XXX: update timestamp on tarf in tarball
loggerretr.debug("successful RETRIEVE (from %s)"
% tarball)
# XXX: bad blocking stuff
while 1:
buf = tarf.read()
if buf == "":
break
request.write(buf)
tarf.close()
tar.close()
if returnedMeta:
T = []
T.append("")
T.append("--%s--" % rand_bound)
T.append("")
T = '\r\n'.join(T)
request.write(T)
return ""
except:
tar.close()
request.setResponseCode(http.NOT_FOUND, "Not found: %s" % filekey)
request.write("Not found: %s" % filekey)
else:
f = BlockFile.open(fname,"rb")
loggerretr.log(logging.INFO, "successful RETRIEVE for %s" % filekey)
meta = f.meta(int(reqKu.id(),16))
if returnMeta and meta:
loggerretr.debug("returnMeta %s" % filekey)
loggerretr.debug("returnMetas=%s" % meta)
for m in meta:
H = []
H.append("--%s" % rand_bound)
H.append("Content-Type: Application/octet-stream")
H.append("Content-ID: %s.%s.meta" % (tfilekey, m))
H.append("Content-Length: %d" % len(meta[m]))
H.append("")
H.append(meta[m])
H = '\r\n'.join(H)
request.write(H)
request.write('\r\n')
H = []
H.append("--%s" % rand_bound)
H.append("Content-Type: Application/octet-stream")
H.append("Content-ID: %s" % tfilekey)
H.append("Content-Length: %d" % f.size())
H.append("")
H = '\r\n'.join(H)
request.write(H)
request.write('\r\n')
# XXX: bad blocking stuff
while 1:
buf = f.read()
if buf == "":
break
request.write(buf)
f.close()
if returnMeta and meta:
T = []
T.append("")
T.append("--%s--" % rand_bound)
T.append("")
request.write('\r\n'.join(T))
return ""
def _sendErr(self, error, request, msg):
out = msg+": "+error.getErrorMessage()
loggerretr.log(logging.INFO, out )
# update trust, routing
request.setResponseCode(http.UNAUTHORIZED, "Unauthorized: %s" % msg)
request.write(msg)
request.finish()
class VERIFY(ROOT):
"""
Just received a storage VERIFY request.
Response codes: 200- OK (default)
400- Bad Request (missing params)
401- Unauthorized (ID hash, CHALLENGE, or
GROUPCHALLENGE failed)
[VERIFY is the most important of the trust-building ops. Issues:
1) how often do we verify.
a) each file
A) each block of each file
2) proxying hurts trust (see PROXY below)
The answer to #1 is, ideally, we check every node who is storing for us
every quanta. But doing the accounting for keeping track of who is storing
for us is heavy, so instead we just want to check all of our files and hope
that gives sufficient coverage, on average, of the nodes we store to. But
we have to put some limits on this, and the limits can't be imposed by the
sender (since a greedy sender can just modify this), so peer nodes have to
do some throttling of these types of requests. But such throttling is
tricky, as the requestor must decrease trust when VERIFY ops fail.
Also, since we will just randomly select from our files, such a throttling
scheme will reduce our accuracy as we store more and more data (we can only
verify a smaller percentage). Peers could enforce limits as a ratio of
total data stored for a node, but then the peers could act maliciously by
artificially lowering this number.
In summary, if we don't enforce limits, misbehaving nodes could flood
VERIFY requests resulting in effecitve DOS attack. If we do enforce
limits, we have to watch out for trust wars where both nodes end up
destroying all trust between them.
Possible answer: set an agreed-upon threshold a priori. This could be
a hardcoded limit, or (better) negotiated between node pairs. If the
requestor goes over this limit, he should understand that his trust will
be decreased by requestee. If he doesn't understand this, his trust
*should* be decreased, and if he decreases his own trust in us as well, we
don't care -- he's misbehaving.]
"""
# XXX: add support for sending multiple verify ops in a single request
isLeaf = True
def render_GET(self, request):
"""
A VERIFY contains a file[fragment]id, an offset, and a length.
When this message is received, the given file[fragment] should be
accessed and its bytes, from offset to offset+length, should be
sha256 hashed, and the result returned.
[in theory, we respond to all VERIFY requests. In practice, however,
we should probably do some throttling of responses to prevent DOS
attacks and probing. Or, maybe require that the sender encrypt the
challenge with their private key...]
[should also enforce some idea of reasonableness on length of bytes
to verify]
"""
self.setHeaders(request)
try:
required = ('nodeID', 'Ku_e', 'Ku_n', 'port', 'offset', 'length')
params = requireParams(request, required)
except Exception, inst:
msg = inst.args[0] + " in request received by VERIFY"
loggervrfy.log(logging.INFO, msg)
request.setResponseCode(http.BAD_REQUEST, "Bad Request")
loggervrfy.debug("BAD REQUEST")
return msg
else:
loggervrfy.log(logging.INFO, "received VERIFY request from %s..."
% params['nodeID'])
if 'meta' in request.args:
params['metakey'] = request.args['metakey'][0]
params['meta'] = fdecode(request.args['meta'][0])
loggerretr.info("VERIFY contained meta field with %d chars"
% len(params['meta']))
meta = (params['metakey'], params['meta'])
else:
meta = None
host = getCanonicalIP(request.getClientIP())
port = int(params['port'])
offset = int(params['offset'])
length = int(params['length'])
filekey = re.sub('/VERIFY', '', str(request.path))
paths = [p for p in filekey.split(os.path.sep) if p != '']
if len(paths) > 1:
msg = "Bad request:"\
" filekey contains illegal path seperator tokens."
loggerretr.debug(msg)
request.setResponseCode(http.BAD_REQUEST, msg)
return msg
reqKu = {}
reqKu['e'] = long(params['Ku_e'])
reqKu['n'] = long(params['Ku_n'])
reqKu = FludRSA.importPublicKey(reqKu)
return authenticate(request, reqKu, params['nodeID'],
host, int(params['port']), self.node.client, self.config,
self._sendVerify, request, filekey, offset, length, reqKu,
params['nodeID'], meta)
def _sendVerify(self, request, filekey, offset, length, reqKu, nodeID,
meta):
fname = self.config.storedir+filekey
loggervrfy.debug("request for %s" % fname)
if os.path.exists(fname):
loggervrfy.debug("looking in regular blockfile for %s" % fname)
if meta:
f = BlockFile.open(fname, 'rb+')
else:
f = BlockFile.open(fname, 'rb')
else:
# check for tarball for originator
loggervrfy.debug("checking tarball for %s" % fname)
tarballs = []
tarballbase = os.path.join(self.config.storedir, reqKu.id())+".tar"
if os.path.exists(tarballbase+".gz"):
tarballs.append((tarballbase+".gz", 'r:gz'))
if os.path.exists(tarballbase):
tarballs.append((tarballbase, 'r'))
loggervrfy.debug("tarballs is %s" % tarballs)
for tarball, openmode in tarballs:
loggervrfy.debug("looking in tarball %s..." % tarball)
tar = tarfile.open(tarball, openmode)
try:
tfilekey = filekey[1:]
tarf = tar.extractfile(tfilekey)
tari = tar.getmember(tfilekey)
# XXX: update timestamp on tarf in tarball
fsize = tari.size
if offset > fsize or (offset+length) > fsize:
# XXX: should limit length
loggervrfy.debug("VERIFY response failed (from %s):"
" bad offset/length" % tarball)
msg = "Bad request: bad offset/length in VERIFY"
request.setResponseCode(http.BAD_REQUEST, msg)
return msg
# XXX: could avoid seek/read if length == 0
tarf.seek(offset)
# XXX: bad blocking read
data = tarf.read(length)
tarf.close()
if meta:
mfname = "%s.%s.meta" % (tfilekey, meta[0])
loggervrfy.debug("looking for %s" % mfname)
if mfname in tar.getnames():
# make sure that the data is the same, if not,
# remove it and re-add it
tarmf = tar.extractfile(mfname)
# XXX: bad blocking read
stored_meta = tarmf.read()
tarmf.close()
if meta[1] != stored_meta:
loggervrfy.debug("updating tarball"
" metadata for %s.%s"
% (tfilekey, meta[0]))
tar.close()
TarfileUtils.delete(tarball, mfname)
if openmode == 'r:gz':
tarball = TarfileUtils.gunzipTarball(
tarball)
tar = tarfile.open(tarball, 'a')
metaio = StringIO(meta[1])
tinfo = tarfile.TarInfo(mfname)
tinfo.size = len(meta[1])
tar.addfile(tinfo, metaio)
tar.close()
if openmode == 'r:gz':
tarball = TarfileUtils.gzipTarball(
tarball)
else:
loggervrfy.debug("no need to update tarball"
" metadata for %s.%s"
% (tfilekey, meta[0]))
else:
# add it
loggervrfy.debug("adding tarball metadata"
" for %s.%s" % (tfilekey, meta[0]))
tar.close()
if openmode == 'r:gz':
tarball = TarfileUtils.gunzipTarball(tarball)
tar = tarfile.open(tarball, 'a')
metaio = StringIO(meta[1])
tinfo = tarfile.TarInfo(mfname)
tinfo.size = len(meta[1])
tar.addfile(tinfo, metaio)
tar.close()
if openmode == 'r:gz':
tarball = TarfileUtils.gzipTarball(
tarball)
tar.close()
hash = hashstring(data)
loggervrfy.info("successful VERIFY (from %s)" % tarball)
return hash
except:
tar.close()
loggervrfy.debug("requested file %s doesn't exist" % fname)
msg = "Not found: not storing %s" % filekey
request.setResponseCode(http.NOT_FOUND, msg)
return msg
# make sure request is reasonable
fsize = os.stat(fname)[stat.ST_SIZE]
if offset > fsize or (offset+length) > fsize:
# XXX: should limit length
loggervrfy.debug("VERIFY response failed (bad offset/length)")
msg = "Bad request: bad offset/length in VERIFY"
request.setResponseCode(http.BAD_REQUEST, msg)
return msg
else:
# XXX: blocking
# XXX: could avoid seek/read if length == 0 (noop for meta update)
f.seek(offset)
data = f.read(length)
if meta:
loggervrfy.debug("adding metadata for %s.%s"
% (fname, meta[0]))
f.addNode(int(nodeID, 16) , {meta[0]: meta[1]})
# XXX: blocking
f.close()
hash = hashstring(data)
loggervrfy.debug("returning VERIFY")
return hash
def _sendErr(self, error, request, msg):
out = "%s:%s" % (msg, error.getErrorMessage())
loggervrfy.info(out)
# update trust, routing
request.setResponseCode(http.UNAUTHORIZED, "Unauthorized: %s" % msg)
request.write(msg)
request.finish()
class DELETE(ROOT):
"""
A request to delete data. The file to delete is indicated by the URL
path, e.g. http://server:port/DELETE/a35cd1339766ef209657a7b
Response codes: 200- OK (default)
400- Bad Request (missing params)
401- Unauthorized (ID hash, CHALLENGE, or GROUPCHALLENGE
failed, or nodeID doesn't own this block)
"""
isLeaf = True
def render_GET(self, request):
self.setHeaders(request)
try:
required = ('nodeID', 'Ku_e', 'Ku_n', 'port', 'metakey')
params = requireParams(request, required)
except Exception, inst:
msg = inst.args[0] + " in request received by DELETE"
loggerdele.log(logging.INFO, msg)
request.setResponseCode(http.BAD_REQUEST, "Bad Request")
return msg
else:
loggerdele.debug("received DELETE request for %s from %s..."
% (request.path, params['nodeID'][:10]))
host = getCanonicalIP(request.getClientIP())
port = int(params['port'])
filekey = re.sub('/DELETE', '', str(request.path))
paths = [p for p in filekey.split(os.path.sep) if p != '']
if len(paths) > 1:
msg = "filekey contains illegal path seperator tokens."
loggerretr.debug("BAD_REQ: %s" % msg)
request.setResponseCode(http.BAD_REQUEST,
"Bad Request: %s" % msg)
return msg
reqKu = {}
reqKu['e'] = long(params['Ku_e'])
reqKu['n'] = long(params['Ku_n'])
reqKu = FludRSA.importPublicKey(reqKu)
metakey = params['metakey']
return authenticate(request, reqKu, params['nodeID'],
host, int(params['port']), self.node.client, self.config,
self._deleteFile, request, filekey, metakey, reqKu,
params['nodeID'])
def _deleteFile(self, request, filekey, metakey, reqKu, reqID):
fname = self.config.storedir + filekey
loggerdele.debug("reading file data from %s" % fname)
if not os.path.exists(fname):
# check for tarball for originator
tarballs = []
tarballbase = os.path.join(self.config.storedir, reqKu.id())+".tar"
if os.path.exists(tarballbase+".gz"):
tarballs.append((tarballbase+".gz", 'r:gz'))
if os.path.exists(tarballbase):
tarballs.append((tarballbase, 'r'))
for tarball, openmode in tarballs:
tfilekey = filekey[1:]
mfilekey = "%s.%s.meta" % (tfilekey, metakey)
loggerdele.debug("opening %s, %s for delete..."
% (tarball, openmode))
ftype = os.popen('file %s' % tarball)
loggerdele.debug("ftype of %s is %s" % (tarball, ftype.read()))
ftype.close()
tar = tarfile.open(tarball, openmode)
mnames = [n for n in tar.getnames()
if n[:len(tfilekey)] == tfilekey]
tar.close()
if len(mnames) > 2:
deleted = TarfileUtils.delete(tarball, mfilekey)
else:
deleted = TarfileUtils.delete(tarball, [tfilekey, mfilekey])
if deleted:
loggerdele.info("DELETED %s (from %s)" % (deleted, tarball))
return ""
request.setResponseCode(http.NOT_FOUND, "Not found: %s" % filekey)
request.write("Not found: %s" % filekey)
else:
f = BlockFile.open(fname,"rb+")
nID = int(reqID, 16)
if f.hasNode(nID):
# remove this node/metakey from owning this file block
f.delNode(nID, metakey)
if f.emptyNodes():
# if this was the only owning node, delete it
f.close()
os.remove(fname)
f.close()
loggerdele.debug("returning DELETE response")
return ""
def _sendErr(self, error, request, msg):
out = msg+": "+error.getErrorMessage()
loggerdele.log(logging.INFO, out )
# update trust, routing
request.setResponseCode(http.UNAUTHORIZED, "Unauthorized: %s" % msg)
request.write(msg)
request.finish()
class PROXY(ROOT):
"""
This is a special request which wraps another request. If received,
the node is required to forward the request and keep state to re-wrap when
the response returns.
This is useful for several reasons:
1) insert a bit of anonymity into VERIFY ops, so that a malicious node
can't know to do an early purge data for a missing node.
2) NAT'd boxes will need to use relays for STORE, VERIFY, and RETRIEVE
(since all those ops cause the receive to make a connection back)
[There must be an incentive for nodes to offer PROXY (trust), otherwise
it is advantageous to turn it off.]
[disadvantages of proxy: it hurts trust. How do we know that a bad (trust
decreasing) op really was caused by the originator? Couldn't the failure
be caused by one of the proxies?]
[Should consider using GnuNet's "Excess-Based Economic Model" where each
request contains a priority which 'spends' some of the trust at the
requestee when resources are scarce. In this model, the proxying node[s]
charge a fee on the priority, reducing it by a small amount as they
forward the request.]
"""
isLeaf = True
def render_GET(self, request):
self.setHeaders(request)
result = "NOT YET IMPLEMENTED"
return result
def authenticate(request, reqKu, reqID, host, port, client, config, callable,
*callargs):
# 1- make sure that reqKu hashes to reqID
# 2- send a challenge/groupchallenge to reqID (encrypt with reqKu)
challengeResponse = request.getUser()
groupResponse = request.getPassword()
if reqKu.id() != reqID:
msg = "Ku %s does not hash to nodeID %s, failing request" \
% (reqKu.id(), reqID)
loggerauth.info(msg)
# XXX: update trust, routing
request.setResponseCode(http.UNAUTHORIZED, "Unauthorized: %s" % msg)
return msg
if not challengeResponse or not groupResponse:
loggerauth.info("returning challenge for request from %s:%d" \
% (host, port))
return sendChallenge(request, reqKu, config.nodeID)
else:
if getChallenge(challengeResponse):
expireChallenge(challengeResponse)
if groupResponse == hashstring(
str(reqKu.exportPublicKey())
+str(config.groupIDr)):
updateNode(client, config, host, port, reqKu, reqID)
return callable(*callargs)
else:
err = "Group Challenge Failed"
loggerauth.info(err)
loggerauth.debug("Group Challenge received was %s" \
% groupResponse)
# XXX: update trust, routing
request.setResponseCode(http.FORBIDDEN, err);
return err
else:
err = "Challenge Failed"
loggerauth.info(err)
loggerauth.debug("Challenge received was %s" % challengeResponse)
# XXX: update trust, routing
request.setResponseCode(http.FORBIDDEN, err);
return err
def sendChallenge(request, reqKu, id):
challenge = generateRandom(challengelength)
while challenge[0] == '\x00':
# make sure we have at least challengelength bytes
challenge = generateRandom(challengelength)
addChallenge(challenge)
loggerauth.debug("unencrypted challenge is %s"
% fencode(binascii.unhexlify(id)+challenge))
echallenge = reqKu.encrypt(binascii.unhexlify(id)+challenge)[0]
echallenge = fencode(echallenge)
loggerauth.debug("echallenge = %s" % echallenge)
# since challenges will result in a new req/resp pair being generated,
# these could take much longer than the primitive_to. Expire in
# 15*primitive_to
reactor.callLater(primitive_to*15, expireChallenge, challenge, True)
resp = 'challenge = %s' % echallenge
loggerauth.debug("resp = %s" % resp)
request.setResponseCode(http.UNAUTHORIZED, echallenge)
request.setHeader('Connection', 'close')
request.setHeader('WWW-Authenticate', 'Basic realm="%s"' % 'default')
request.setHeader('Content-Length', str(len(resp)))
request.setHeader('Content-Type', 'text/html')
request.setHeader('Pragma','claimreserve=5555') # XXX: this doesn't work
return resp
outstandingChallenges = {}
def addChallenge(challenge):
outstandingChallenges[challenge] = True
loggerauth.debug("added challenge %s" % fencode(challenge))
def expireChallenge(challenge, expired=False):
try:
challenge = fdecode(challenge)
except:
pass
if outstandingChallenges.has_key(challenge):
del outstandingChallenges[challenge]
if expired:
# XXX: should put these in an expired challenge list so that we
# can send a more useful message on failure (must then do
# expirations on expired list -- maybe better to just make
# these expirations really liberal).
loggerauth.debug("expired challenge %s" % fencode(challenge))
else:
loggerauth.debug("deleted challenge %s" % fencode(challenge))
def getChallenge(challenge):
try:
challenge = fdecode(challenge)
except:
pass
if outstandingChallenges.has_key(challenge):
return outstandingChallenges[challenge]
else:
return None
| Python |
"""
FludCommUtil.py (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), version 3.
Communications routines used by both client and server code.
"""
from twisted.web import client
from twisted.internet import reactor, defer
from twisted.python import failure
import binascii, httplib, logging, os, stat, random, socket
import inspect
from flud.FludExceptions import FludException
from flud.FludCrypto import FludRSA, generateRandom
from flud.HTTPMultipartDownloader import HTTPMultipartDownloader
"""
Some constants used by the Flud Protocol classes
"""
fludproto_ver = '0.1'
# XXX: when things timeout, bad news. Unintuitive exceptions spewed. Make this
# small and fix all issues.
primitive_to = 3800 # default timeout for primitives
kprimitive_to = primitive_to/2 # default timeout for kademlia primitives
#kprimitive_to = 10 # default timeout for kademlia primitives
transfer_to = 3600 # 10-hr limit on file transfers
MAXTIMEOUTS = 5 # number of times to retry after connection timeout failure
CONNECT_TO = 60
CONNECT_TO_VAR = 5
logger = logging.getLogger('flud.comm')
class BadCASKeyException(failure.DefaultException):
pass
class NotFoundException(failure.DefaultException):
pass
class BadRequestException(failure.DefaultException):
pass
i = 0
"""
Some utility functions used by both client and server.
"""
def updateNodes(client, config, nodes):
if nodes and not isinstance(nodes, list) and not isinstance(nodes, tuple):
raise TypeError("updateNodes must be called with node list, tuple,"
" or kData dict")
logger.debug("updateNodes(%s)" % nodes)
for i in nodes:
host = i[0]
port = i[1]
nID = i[2]
nKu = FludRSA.importPublicKey(i[3])
updateNode(client, config, host, port, nKu, nID)
updateNodePendingGETID = {}
def updateNode(client, config, host, port, nKu=None, nID=None):
"""
Updates this node's view of the given node. This includes updating
the known-nodes record, trust, and routing table information
"""
def updateNodeFail(failure, host, port):
logging.getLogger('flud').log(logging.INFO,
"couldn't get nodeID from %s:%d: %s" % (host, port, failure))
def callUpdateNode(nKu, client, config, host, port, nID):
return updateNode(client, config, host, port, nKu, nID)
if isinstance(nID, long):
nID = "%064x" % nID
if nKu is None:
#print "updateNode, no nKu"
if nID is None:
d = client.sendGetID(host, port)
d.addCallback(callUpdateNode, client, config, host, port, nID)
d.addErrback(updateNodeFail, host, port)
else:
#print "updateNode, no nKu but got a nID"
if config.nodes.has_key(nID):
return updateNode(client, config, host, port,
FludRSA.importPublicKey(config.nodes[nID]['Ku']), nID)
elif updateNodePendingGETID.has_key(nID):
pass
else:
#print "updateNode, sending GETID"
updateNodePendingGETID[nID] = True
d = client.sendGetID(host, port)
d.addCallback(callUpdateNode, client, config, host, port, nID)
d.addErrback(updateNodeFail, host, port)
elif isinstance(nKu, FludRSA):
#print "updateNode with nKu"
if updateNodePendingGETID.has_key(nID):
del updateNodePendingGETID[nID]
if nID == None:
nID = nKu.id()
elif nID != nKu.id():
raise ValueError("updateNode: given nID doesn't match given nKu."
" '%s' != '%s'" % (nID, nKu.id()))
# XXX: looks like an imposter -- instead of raising, mark host:port
# pair as bad (trust-- on host:port alone, since we don't know id).
if config.nodes.has_key(nID) == False:
config.addNode(nID, host, port, nKu)
# XXX: trust
# routing
node = (host, port, long(nID, 16), nKu.exportPublicKey()['n'])
replacee = config.routing.updateNode(node)
#logger.info("knownnodes now: %s" % config.routing.knownNodes())
#print "knownnodes now: %s" % config.routing.knownNodes()
if replacee != None:
logging.getLogger('flud').info(
"determining if replacement in ktable is needed")
s = SENDGETID(replacee[0], replacee[1])
s.addErrback(replaceNode, config.routing, replacee, node)
else:
#print "updateNode nKu=%s, type=%s" % (nKu, type(nKu))
logging.getLogger('flud').warn(
"updateNode can't update without a public key or nodeID")
frame = inspect.currentframe()
# XXX: try/except here for debugging only
try:
stack = inspect.stack()
for i in stack:
print "from %s:%d" % (i[1], i[2])
except:
print "couldn't get stack trace"
raise ValueError("updateNode needs an nKu of type FludRSA"
" (received %s) or an nID of type long or str (received %s)"
% (type(nKu), type(nID)))
# XXX: should really make it impossible to call without one of these...
def replaceNode(error, routing, replacee, replacer):
routing.replaceNode(replacee, replacer)
print "replaced node in ktable"
def requireParams(request, paramNames):
# Looks for the named parameters in request. If found, returns
# a dict of param/value mappings. If any named parameter is missing,
# raises an exception
params = {}
for i in paramNames:
try:
params[i] = request.args[i][0]
except:
raise Exception, "missing parameter '"+i+"'" #XXX: use cust Exc
return params
def getCanonicalIP(IP):
# if IP is 'localhost' or '127.0.0.1', use the canonical local hostname.
# (this is mostly useful when multiple clients run on the same host)
# XXX: could use gethostbyname to get IP addy instead.
if IP == '127.0.0.1' or IP == 'localhost':
return socket.getfqdn()
else:
return socket.getfqdn(IP)
def getPageFactory(url, contextFactory=None, *args, **kwargs):
def failedConnect(reason, factory):
try:
i = factory.status
return reason
except:
pass
#logger.warn("couldn't connect to %s:%d in getPageFactory: %s"
# % (factory.host, factory.port, reason))
#logger.warn("state of factory is %s" % factory)
#logger.warn("dir() of factory is %s" % dir(factory))
return reason
if len(url) >= 16384:
raise ValueError(
"Too much data sent: twisted server doesn't appear to"
" support urls longer than 16384")
scheme, host, port, path = client._parse(url)
factory = client.HTTPClientFactory(url, *args, **kwargs)
factory.deferred.addErrback(failedConnect, factory)
to = CONNECT_TO+random.randrange(2+CONNECT_TO_VAR)-CONNECT_TO_VAR
if scheme == 'https':
from twisted.internet import ssl
if contextFactory is None:
contextFactory = ssl.ClientContextFactory()
reactor.connectSSL(host, port, factory, contextFactory)
else:
reactor.connectTCP(host, port, factory, timeout=to)
return factory
def _dlPageFactory(url, target, factoryClass, contextFactory=None, timeout=None,
*args, **kwargs):
scheme, host, port, path = client._parse(url)
if timeout != None:
# XXX: do something like http://twistedmatrix.com/pipermail/twisted-python/2003-August/005504.html
pass
factory = factoryClass(url, target, *args, **kwargs)
to = CONNECT_TO+random.randrange(2+CONNECT_TO_VAR)-CONNECT_TO_VAR
if scheme == 'https':
from twisted.internet import ssl
if contextFactory is None:
contextFactory = ssl.ClientContextFactory()
reactor.connectSSL(host, port, factory, contextFactory)
else:
reactor.connectTCP(host, port, factory, timeout=to)
return factory
def downloadPageFactory(url, file, contextFactory=None, timeout=None,
*args, **kwargs):
return _dlPageFactory(url, file, client.HTTPDownloader, contextFactory,
timeout, *args, **kwargs)
def multipartDownloadPageFactory(url, dir, contextFactory=None, timeout=None,
*args, **kwargs):
return _dlPageFactory(url, dir, HTTPMultipartDownloader, contextFactory,
timeout, *args, **kwargs)
def fileUpload(host, port, selector, files, form=(), headers={}):
"""
Performs a file upload via http.
host - webserver hostname
port - webserver listen port
selector - the request (relative URL)
files - list of files to upload. list contains tuples, with the first
entry as filename/file-like obj and the second as form element name.
If the first element is a file-like obj, the element will be used as
the filename. If the first element is a filename, the filename's
basename will be used as the filename on the form. Type will be
"application/octet-stream"
form (optional) - a list of pairs of additional name/value form elements
(param/values).
[hopefully, this method goes away in twisted-web2]
"""
# XXX: set timeout (based on filesize?)
port = int(port)
rand_bound = binascii.hexlify(generateRandom(13))
boundary = "---------------------------"+rand_bound
CRLF = '\r\n'
body_content_type = "application/octet-stream"
content_type = "multipart/form-data; boundary="+boundary
content_length = 0
H = []
for (param, value) in form:
H.append('--' + boundary)
H.append('Content-Disposition: form-data; name="%s"' % param)
H.append('')
H.append('%s' % value)
form_data = CRLF.join(H)+CRLF
content_length = content_length + len(form_data)
fuploads = []
for file, element in files:
if file == None:
file = "/dev/null" # XXX: not portable
if 'read' in dir(file):
fname = element
file.seek(0,2)
file_length = file.tell()
file.seek(0,0)
else:
fname = os.path.basename(file)
file_length = os.stat(file)[stat.ST_SIZE]
#logger.info("upload file %s len is %d" % (fname, file_length))
H = [] # stuff that goes above file data
T = [] # stuff that goes below file data
H.append('--' + boundary)
H.append('Content-Disposition: form-data; name="%s"; filename="%s"'
% (element, fname))
H.append('Content-Type: %s\n' % body_content_type)
H.append('')
file_headers = CRLF.join(H)
content_length = content_length + len(file_headers) + file_length
fuploads.append((file_headers, file, file_length))
T.append('--'+boundary+'--')
T.append('')
T.append('')
trailer = CRLF.join(T)
content_length = content_length + len(trailer)
h = httplib.HTTPConnection(host, port) # XXX: blocking
h.putrequest('POST', selector)
for pageheader in headers:
h.putheader(pageheader, headers[pageheader])
h.putheader('Content-Type', content_type)
h.putheader('Content-Length', content_length)
h.endheaders()
h.send(form_data)
for fheader, file, flen in fuploads:
if 'read' not in dir(file):
file = open(file, 'r')
h.send(fheader)
h.send(file.read(flen)+CRLF) # XXX: blocking
file.close()
h.send(trailer)
return h
class ImposterException(FludException):
pass
| Python |
"""
ClientDHTPrimitives.py (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
Primitive client DHT protocol
"""
import time, os, stat, httplib, sys, random, logging
from twisted.web import http, client
from twisted.internet import reactor, threads, defer
from twisted.python import failure
import inspect, pdb
from flud.FludCrypto import FludRSA
import flud.FludkRouting as FludkRouting
from flud.fencode import fencode, fdecode
import flud.FludDefer as FludDefer
import ConnectionQueue
from ClientPrimitives import REQUEST
from FludCommUtil import *
logger = logging.getLogger("flud.client.dht")
# FUTURE: check flud protocol version for backwards compatibility
# XXX: need to make sure we have appropriate timeouts for all comms.
# FUTURE: DOS attacks. For now, assume that network hardware can filter these
# out (by throttling individual IPs) -- i.e., it isn't our problem. If we
# want to defend against this at some point, we need to keep track of who
# is generating requests and then ignore them.
# XXX: might want to consider some self-healing for the kademlia layer, as
# outlined by this thread:
# http://zgp.org/pipermail/p2p-hackers/2003-August/001348.html (should also
# consider Zooko's links in the parent to this post). Basic idea: don't
# always take the k-closest -- take x random and k-x of the k-closest.
# Can alternate each round (k-closest / x + k-x-closest) for a bit more
# diversity (as in "Sybil-resistent DHT routing").
# XXX: right now, calls to updateNode are chained. Might want to think about
# doing some of this more asynchronously, so that the recursive parts
# aren't waiting for remote GETIDs to return before recursing.
"""
The first set of classes (those beginning with 'k') perform [multiple] queries
given a key or key/value pair. They use the second set of classes (those
beginning with 'SEND'), which perform a single query to a given node.
"""
def serviceWaiting(res, key, pending, waiting):
# provides a method for calling multiple callbacks on a saved query.
# add serviceWaiting as a callback before returning, and pass in the result,
# pending dict and waiting dict. All deferreds in the waiting dict will
# be called with the result, the waiting dict will be emptied of those
# deferreds, and the pending dict will likewise be emptied.
if waiting.has_key(key):
for d in waiting[key]:
#print "offbacking %s" % key
d.callback(res)
waiting.pop(key)
pending.pop(key)
return res
pendingkFindNodes = {}
waitingkFindNodes = {}
class kFindNode:
"""
Perform a kfindnode lookup.
"""
def __init__(self, node, key):
if pendingkFindNodes.has_key(key):
d = defer.Deferred()
if not waitingkFindNodes.has_key(key):
waitingkFindNodes[key] = []
waitingkFindNodes[key].append(d)
logger.debug("piggybacking on previous kfindnode for %s" % key)
self.deferred = d
return
self.node = node
self.node.DHTtstamp = time.time()
self.key = key
self.queried = {}
self.outstanding = []
self.pending = []
self.kclosest = []
self.abbrvkey = ("%x" % key)[:8]+"..."
self.abbrv = "(%s%s)" % (self.abbrvkey, str(self.node.DHTtstamp)[-7:])
self.debugpath = []
self.deferred = self.startQuery(key)
def startQuery(self, key):
# query self first
kclosest = self.node.config.routing.findNode(key)
#logger.debug("local kclosest: %s" % kclosest)
localhost = getCanonicalIP('localhost')
kd = {'id': self.node.config.nodeID, 'k': kclosest}
d = self.updateLists(kd, key, localhost, self.node.config.port,
long(self.node.config.nodeID, 16))
d.addErrback(self.errkfindnode, key, localhost, self.node.config.port)
pendingkFindNodes[key] = d
d.addCallback(serviceWaiting, key, pendingkFindNodes, waitingkFindNodes)
return d
def sendQuery(self, host, port, id, key):
self.outstanding.append((host, port, id))
#d = self.node.client.sendkFindNode(host, port, key)
d = SENDkFINDNODE(self.node, host, port, key).deferred
return d
def updateLists(self, response, key, host, port, closestyet, x=0):
logger.info("FN: received kfindnode %s response from %s:%d"
% (self.abbrv, host, port))
self.debugpath.append("FN: rec. resp from %s:%d" % (host, port))
if not isinstance(response, dict):
# a data value is being returned from findval
# XXX: moved this bit into findval and call parent for the rest
if response == None:
logger.warn("got None from key=%s, %s:%d, x=%d, this usually"
" means that the host replied None to a findval query"
% (key, host, port, x))
# if we found the fencoded value data, return it
return defer.succeed(response)
logger.debug("updateLists(%s)" % response)
if len(response['k']) == 1 and response['k'][0][2] == key:
# if we've found the key, don't keep making queries.
logger.debug("FN: %s:%d found key %s" % (host, port, key))
self.debugpath.append("FN: %s:%d found key %s" % (host, port, key))
if response['k'][0] not in self.kclosest:
self.kclosest.insert(0,response['k'][0])
self.kclosest = self.kclosest[:FludkRouting.k]
return defer.succeed(response)
#for i in response['k']:
# print " res: %s:%d" % (i[0], i[1])
id = long(response['id'], 16)
responder = (host, port, id)
if responder in self.outstanding:
self.outstanding.remove(responder)
self.queried[id] = (host, port)
knodes = response['k']
for n in knodes:
if not self.queried.has_key(n[2])\
and not n in self.pending and not n in self.outstanding:
self.pending.append((n[0], n[1], n[2]))
if n not in self.kclosest:
k = FludkRouting.k
# XXX: remove self it in the list?
self.kclosest.append(n)
self.kclosest.sort(
lambda a, b: FludkRouting.kCompare(a[2], b[2], key))
self.kclosest = self.kclosest[:k]
#for n in self.outstanding:
# if n in self.pending:
# self.pending.remove(n) # remove anyone we've sent queries to...
self.pending = list(set(self.pending) - set(self.outstanding))
for i in self.queried:
n = (self.queried[i][0], self.queried[i][1], i)
if n in self.pending:
self.pending.remove(n) # ...and anyone who has responded.
self.pending.sort(lambda a, b: FludkRouting.kCompare(a[2], b[2], key))
#print "queried: %s" % str(self.queried)
#print "outstanding: %s" % str(self.outstanding)
#print "pending: %s" % str(self.pending)
return self.decideToContinue(response, key, x)
def decideToContinue(self, response, key, x):
##print "x is %s" % str(x)
##for i in self.kclosest:
## print " kclosest %s" % str(i)
##for i in self.queried:
## print " queried %s" % str(self.queried[i])
#if len(filter(lambda x: x not in self.queried, self.kclosest)) <= 0:
# print "finishing up at round %d" % x
# # XXX: never gets in here...
# # XXX: remove anything not in self.kclosest from self.pending
# self.pending =\
# filter(lambda x: x not in self.kclosest, self.pending)
# #self.pending = self.pending[:FludkRouting.k]
#else:
# return self.makeQueries(key, x)
# this is here so that kFindVal can plug-in by overriding
return self.makeQueries(key, x)
def makeQueries(self, key, x):
#print "doing round %d" % x
self.debugpath.append("FN: doing round %d" % x)
dlist = []
for n in self.pending[:(FludkRouting.a - len(self.outstanding))]:
#print " querying %s:%d" % (n[0], n[1])
self.debugpath.append("FN: querying %s:%d" % (n[0], n[1]))
d = self.sendQuery(n[0], n[1], n[2], key)
d.addCallback(self.updateLists, key, n[0], n[1],
self.kclosest[0][2], x+1)
d.addErrback(self.errkfindnode, key, n[0], n[1],
raiseException=False)
dlist.append(d)
dl = defer.DeferredList(dlist)
dl.addCallback(self.roundDone, key, x)
return dl
def roundDone(self, responses, key, x):
#print "done %d:" % x
#print "roundDone: %s" % responses
if len(self.pending) != 0 or len(self.outstanding) != 0:
# should only get here for nodes that don't accept connections
# XXX: updatenode -- decrease trust
for i in self.pending:
logger.debug("FN: %s couldn't contact node %s (%s:%d)"
% (self.abbrv, fencode(i[2]), i[0], i[1]))
self.debugpath.append(
"FN: %s couldn't contact node %s (%s:%d)"
% (self.abbrv, fencode(i[2]), i[0], i[1]))
for n in self.kclosest:
if (n[0],n[1],n[2]) == i:
self.kclosest.remove(n)
logger.info("kFindNode %s terminated successfully after %d queries."
% (self.abbrv, len(self.queried)))
self.debugpath.append("FN: %s terminated successfully after %d queries."
% (self.abbrv, len(self.queried)))
self.kclosest.sort(
lambda a, b: FludkRouting.kCompare(a[2], b[2], key))
result = {}
if FludkRouting.k > len(self.kclosest):
k = len(self.kclosest)
else:
k = FludkRouting.k
result['k'] = self.kclosest[:k]
#print "result: %s" % result
#if len(result['k']) > 1:
# # if the results (aggregated from multiple responses) contains the
# # exact key, just return the correct answer (successful node
# # lookup done).
# #print "len(result): %d" % len(result['k'])
# #print "result[0][2]: %s %d" % (type(result['k'][0][2]),
# # result['k'][0][2])
# #print " key: %s %d" % (type(key), key)
# if result['k'][0][2] == key:
# #print "key matched!"
# result['k'] = (result['k'][0],)
return result
def errkfindnode(self, failure, key, host, port, raiseException=True):
logger.info("kFindNode %s request to %s:%d failed -- %s" % (self.abbrv,
host, port, failure.getErrorMessage()))
# XXX: updateNode--
if raiseException:
return failure
class kStore(kFindNode):
"""
Perform a kStore operation.
"""
def __init__(self, node, key, val):
self.node = node
self.node.DHTtstamp = time.time()
self.key = key
self.val = val
d = kFindNode(node,key).deferred
d.addCallback(self.store)
d.addErrback(self._kStoreErr, None, 0)
self.deferred = d
def store(self, knodes):
knodes = knodes['k']
if len(knodes) < 1:
raise RuntimeError("can't complete kStore -- no nodes")
dlist = []
for knode in knodes:
host = knode[0]
port = knode[1]
deferred = SENDkSTORE(self.node, host, port, self.key,
self.val).deferred
deferred.addErrback(self._kStoreErr, host, port)
dlist.append(deferred)
dl = FludDefer.ErrDeferredList(dlist)
dl.addCallback(self._kStoreFinished)
dl.addErrback(self._kStoreErr, None, 0)
return dl
def _kStoreFinished(self, response):
#print "_kStoreFinished: %s" % response
logger.info("kStore finished")
return ""
def _kStoreErr(self, failure, host, port):
logger.info("couldn't store on %s:%d -- %s"
% (host, port, failure.getErrorMessage()))
print "_kStoreErr was: %s" % failure
# XXX: updateNode--
return failure
class kFindValue(kFindNode):
"""
Perform a kFindValue.
"""
def __init__(self, node, key):
self.done = False
kFindNode.__init__(self, node, key)
def startQuery(self, key):
# query self first. We override kFindNode.startQuery here so that
# we don't just return the closest nodeID, but the value itself (if
# present)
localhost = getCanonicalIP('localhost')
d = self.sendQuery(localhost, self.node.config.port,
long(self.node.config.nodeID, 16), key)
d.addCallback(self.updateLists, key, localhost, self.node.config.port,
long(self.node.config.nodeID, 16), 0)
d.addErrback(self.errkfindnode, key, localhost, self.node.config.port)
return d
def sendQuery(self, host, port, id, key):
# We override sendQuery here in order to call sendkFindValue and handle
# its response
self.outstanding.append((host, port, id))
d = SENDkFINDVALUE(self.node, host, port, key).deferred
d.addCallback(self._handleFindVal, host, port)
d.addErrback(self.errkfindnode, key, host, port)
return d
def _handleFindVal(self, response, host, port):
if not isinstance(response, dict):
# stop sending out more queries.
self.pending = []
self.done = True
#print "%s:%d sent value: %s" % (host, port, str(response)[:50])
#f = {}
#f['k'] = []
#f['id'] = "0"
#f['val'] = response # pass on returned value
#return f
else:
pass
#print "%s:%d sent kData: %s" % (host, port, response)
return response
def decideToContinue(self, response, key, x):
if self.done:
#if not response.has_key('val'):
# logger.warn("response has no 'val', response is: %s" % response)
#return response['val']
return response
else:
return self.makeQueries(key, x)
def roundDone(self, responses, key, x):
self.debugpath.append("FV: roundDone %d" % x)
if self.done:
result = {}
# see if everyone's responses agreed...
for success, resp in responses:
# only look at successful non-kData (dict) responses.
if success and resp != None and not isinstance(resp, dict):
if result.has_key(resp):
result[resp] += 1
else:
result[resp] = 1
if len(result) == 0:
# ... if no one responded, XXX: do something orther than None?
logger.info("couldn't get any results")
return None
elif len(result) == 1:
# ... if they did, return the result
return result.keys()[0]
else:
# ... otherwise, return the result of the majority
# (other options include returning all results)
logger.info("got conflicting results, determining best...")
quorumResult = None
bestScore = 0
for r in result:
#logger.debug("result %s scored %d" % (r, result[r]))
if result[r] > bestScore:
bestScore = result[r]
quorumResult = r
#logger.debug("result %s is new best" % r)
logger.info("returning result %s", fdecode(quorumResult))
return quorumResult
class SENDkFINDNODE(REQUEST):
"""
Makes one request to a node for its k-closest nodes closest to key
"""
def __init__(self, node, host, port, key, commandName="kFINDNODE"):
"""
"""
logger.info("sending %s (findnode) for %s... to %s:%d"
% (commandName, ("%x" % key)[:10], host, port))
self.commandName = commandName
host = getCanonicalIP(host)
REQUEST.__init__(self, host, port, node)
Ku = self.node.config.Ku.exportPublicKey()
url = 'http://'+host+':'+str(port)+'/'+self.commandName+'?'
url += 'nodeID='+str(self.node.config.nodeID)
url += "&Ku_e="+str(Ku['e'])
url += "&Ku_n="+str(Ku['n'])
url += '&port='+str(self.node.config.port)
url += "&key="+str(fencode(key))
self.timeoutcount = 0
self.deferred = defer.Deferred()
ConnectionQueue.enqueue((self, node, host, port, key, url))
def startRequest(self, node, host, port, key, url):
d = self._sendRequest(node, host, port, key, url)
d.addBoth(ConnectionQueue.checkWaiting)
d.addCallback(self.deferred.callback)
d.addErrback(self.deferred.errback)
def _sendRequest(self, node, host, port, key, url):
factory = getPageFactory(url,
headers=self.headers, timeout=kprimitive_to)
factory.deferred.addCallback(self._gotResponse, factory,
node, host, port, key)
factory.deferred.addErrback(self._errSendk, factory, node,
host, port, key, url)
return factory.deferred
def _gotResponse(self, response, factory, node, host, port, key):
logger.debug("kfindnode._gotResponse()")
self._checkStatus(factory.status, response, host, port)
response = eval(response)
nID = long(response['id'], 16)
updateNode(node.client, node.config, host, port, None, nID)
updateNodes(node.client, node.config, response['k'])
return response
def _checkStatus(self, status, response, host, port):
logger.debug("kfindnode._checkStatus()")
if eval(status) != http.OK:
raise failure.DefaultException(self.commandName+" FAILED from "
+host+":"+str(port)+": received status "+status+", '"
+response+"'")
def _errSendk(self, err, factory, node, host, port, key, url):
if err.check('twisted.internet.error.TimeoutError') or \
err.check('twisted.internet.error.ConnectionLost'):
#print "GETID request error: %s" % err.__class__.__name__
self.timeoutcount += 1
if self.timeoutcount < MAXTIMEOUTS:
#print "trying again [#%d]...." % self.timeoutcount
return self._sendRequest(node, host, port, key, url)
else:
#print "not trying again [#%d]" % self.timeoutcount
return err
logger.info("%s to %s failed -- %s"
% (self.commandName, self.dest, err.getErrorMessage()))
# XXX: updateNode--
return err
class SENDkSTORE(REQUEST):
"""
Sends a single kSTORE to the given host:port, with key=val
"""
def __init__(self, node, host, port, key, val):
logger.info("sending kSTORE to %s:%d" % (host, port))
REQUEST.__init__(self, host, port, node)
Ku = node.config.Ku.exportPublicKey()
url = 'http://'+host+':'+str(port)+'/kSTORE?'
url += 'nodeID='+str(node.config.nodeID)
url += "&Ku_e="+str(Ku['e'])
url += "&Ku_n="+str(Ku['n'])
url += '&port='+str(node.config.port)
url += "&key="+str(fencode(key))
url += "&val="+str(fencode(val))
# XXX: instead of a single key/val, protocol will take a series of
# vals representing the blocks of the coded file and their
# locations (by nodeID). The entire thing will be stored under
# the given key. Also may need things like signature[s] from
# storing node[s], etc.
#print "in SENDkSTORE.__init__, len(val)=%d" % len(str(val))
#print "in SENDkSTORE.__init__, len(enc(val))=%d" % len(fencode(val))
#print "in SENDkSTORE.__init__, len(url)=%d" % len(url)
self.timeoutcount = 0
self.deferred = defer.Deferred()
ConnectionQueue.enqueue((self, host, port, url))
def startRequest(self, host, port, url):
d = self._sendRequest(host, port, url)
d.addBoth(ConnectionQueue.checkWaiting)
d.addCallback(self.deferred.callback)
d.addErrback(self.deferred.errback)
def _sendRequest(self, host, port, url):
factory = getPageFactory(url,\
headers=self.headers, method='PUT', timeout=kprimitive_to)
self.deferred.addCallback(self._kStoreFinished, host, port)
self.deferred.addErrback(self._storeErr, host, port, url)
return factory.deferred
def _kStoreFinished(self, response, host, port):
logger.info("kSTORE to %s:%d finished" % (host, port))
return response
def _storeErr(self, err, host, port, url):
if err.check('twisted.internet.error.TimeoutError') or \
err.check('twisted.internet.error.ConnectionLost'):
#print "GETID request error: %s" % err.__class__.__name__
self.timeoutcount += 1
if self.timeoutcount < MAXTIMEOUTS:
#print "trying again [#%d]...." % self.timeoutcount
return self._sendRequest(host, port, url)
else:
#print "not trying again [#%d]" % self.timeoutcount
return err
logger.info("kSTORE to %s failed: %s"
% (self.dest, err.getErrorMessage()))
# XXX: updateNode--
return err
class SENDkFINDVALUE(SENDkFINDNODE):
"""
Issues a single kFINDVALUE request to host:port for the key.
If the value is found at host:port, it is returned, otherwise, a
404 response is received and any errbacks are called.
"""
def __init__(self, node, host, port, key):
SENDkFINDNODE.__init__(self, node, host, port, key, "kFINDVAL")
def _gotResponse(self, response, factory, node, host, port, key):
self._checkStatus(factory.status, response, host, port)
# The following 'if' block is the only thing different from kFINDNODE.
# If a node returns the value, content-type will be set to x-flud-data
# and we should grab the data instead of continuing the recursive
# search.
if factory.response_headers.has_key('content-type')\
and factory.response_headers['content-type']\
== ['application/x-flud-data']:
logger.info("received SENDkFINDVALUE data.")
nID = None
if factory.response_headers.has_key('nodeid'):
nID = factory.response_headers['nodeid'][0]
updateNode(node.client, node.config, host, port, None, nID)
return response
response = eval(response)
nID = long(response['id'], 16)
updateNode(node.client, node.config, host, port, None, nID)
logger.info("received SENDkFINDVALUE nodes")
logger.debug("received SENDkFINDVALUE nodes: %s" % response)
updateNodes(node.client, node.config, response['k'])
return response
| Python |
import sys
from flud.FludFileCoder import Coder, Decoder
def encode(n,m,fname,stem):
coder = Coder(n,m,7)
coder.codeData(fname,stem)
def decode(n,m,dfname,ifnames):
decoder = Decoder(dfname, n, m, 7)
for f in ifnames:
if decoder.decodeData(f):
break
# note: super primitive commandline args; this is a throwaway:
# ldpc [-d|-e] n m fname stem
n = int(sys.argv[2])
m = int(sys.argv[3])
fname = sys.argv[4]
stem = sys.argv[5]
if sys.argv[1] == '-e':
encode(n,m,fname,stem)
elif sys.argv[1] == '-d':
decode(n,m,fname,["%s-%04d" % (stem, i) for i in range(n+m)])
| Python |
"""
FludkRouting.py (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), version 3.
Implements kademlia-style kbuckets (the routing table for the DHT layer).
Although this is not a derivative of Khashmir (written by Andrew Loewenstern,
Aaron Swartz, et. al.), we would like to give Khashmir a nod for inspiring
portions of the design. Khashmir is distributed under the MIT License and is a
very nice piece of work. Take a look at http://khashmir.sourceforge.net/ for
more information.
"""
from bisect import *
import logging
#k = 5 # This is the max depth of a kBucket
k = 12 # This is the max depth of a kBucket and the replication factor
# XXX: need to split out k into two vars
a = 3 # alpha, the system-wide concurrency parameter
idspace = 256 # using sha-256
logger = logging.getLogger("flud.k")
class NodeCache:
"""
An LRU cache for nodes
"""
def __init__(self, size):
self.size = size
self.cache = {}
self.cacheOrder = []
def insertNode(self, node):
"""
adds a node to the cache. if this displaces a node, the displaced node
is returned
"""
if node[2] not in self.cache:
self.cache[node[2]] = node
self.cacheOrder.append(node[2])
if len(self.cacheOrder) > self.size:
popped = self.cacheOrder.pop(0)
self.cache.pop(popped)
return popped
def removeNode(self, node):
if node[2] in self.cache:
self.cache.pop(node[2])
self.cacheOrder.pop(node[2])
def nodes(self):
return [self.cache[i] for i in self.cache]
def kCompare(a, b, target):
"""
Uses the XOR metric to compare target to a and b (useful for sorting)
@param a an integer (or long) value
@param b an integer (or long) value
@param target the target ID as an integer (or long) value
@return 1 if b > a, -1 if a < b, 0 if a == b
>>> l = [1, 2, 23, 14, 5, 4, 5, 3, 20]
>>> l.sort(lambda a, b: kCompare(a, b, 5))
>>> l
[5, 5, 4, 1, 3, 2, 14, 20, 23]
"""
x, y = target^a, target^b
if x == y:
return 0
return int((x - y) / abs(x - y))
class kRouting:
"""
Contains the kBuckets for this node. Provides methods for inserting,
updating, and removing nodes. Most importantly, performs kademlia-style
routing by returning the node[s] closest to a particular id.
>>> table = kRouting(('1.2.3.4', 34, 123456), 20, 5)
>>> table.insertNode(('2.2.3.4', 34, 23456))
>>> table.insertNode(('3.2.3.4', 34, 223456))
>>> table.insertNode(('4.2.3.4', 34, 723456))
>>> table.insertNode(('5.2.3.4', 34, 423456))
>>> table.insertNode(('6.2.3.4', 34, 323456))
>>> table.kBuckets
[{'0-80000': [('1.2.3.4', 34, 123456), ('2.2.3.4', 34, 23456), ('3.2.3.4', 34, 223456), ('5.2.3.4', 34, 423456), ('6.2.3.4', 34, 323456)]}, {'80001-100000': [('4.2.3.4', 34, 723456)]}]
>>> table.findNode(23456)
[('2.2.3.4', 34, 23456), ('1.2.3.4', 34, 123456), ('3.2.3.4', 34, 223456), ('6.2.3.4', 34, 323456), ('5.2.3.4', 34, 423456)]
>>> table.findNode(55555)
[('2.2.3.4', 34, 23456), ('1.2.3.4', 34, 123456), ('3.2.3.4', 34, 223456), ('6.2.3.4', 34, 323456), ('5.2.3.4', 34, 423456)]
>>> table.findNode(722222)
[('4.2.3.4', 34, 723456), ('3.2.3.4', 34, 223456), ('1.2.3.4', 34, 123456), ('2.2.3.4', 34, 23456), ('5.2.3.4', 34, 423456)]
>>> table.insertNode(('7.2.3.4', 34, 733456))
>>> table.insertNode(('8.2.3.4', 34, 743456))
>>> table.insertNode(('9.2.3.4', 34, 753456))
>>> table.insertNode(('10.2.3.4', 34, 763456))
>>> table.insertNode(('11.2.3.4', 34, 773456))
('4.2.3.4', 34, 723456)
>>> table.replaceNode(('4.2.3.4', 34, 723456), ('11.2.3.4', 34, 773456))
>>> table.kBuckets
[{'0-80000': [('1.2.3.4', 34, 123456), ('2.2.3.4', 34, 23456), ('3.2.3.4', 34, 223456), ('5.2.3.4', 34, 423456), ('6.2.3.4', 34, 323456)]}, {'80001-100000': [('7.2.3.4', 34, 733456), ('8.2.3.4', 34, 743456), ('9.2.3.4', 34, 753456), ('10.2.3.4', 34, 763456), ('11.2.3.4', 34, 773456)]}]
>>> table.removeNode(('1.2.3.4', 34, 123456))
>>> table.kBuckets
[{'0-80000': [('2.2.3.4', 34, 23456), ('3.2.3.4', 34, 223456), ('5.2.3.4', 34, 423456), ('6.2.3.4', 34, 323456)]}, {'80001-100000': [('7.2.3.4', 34, 733456), ('8.2.3.4', 34, 743456), ('9.2.3.4', 34, 753456), ('10.2.3.4', 34, 763456), ('11.2.3.4', 34, 773456)]}]
>>> table.knownNodes()
[('2.2.3.4', 34, 23456), ('3.2.3.4', 34, 223456), ('5.2.3.4', 34, 423456), ('6.2.3.4', 34, 323456), ('7.2.3.4', 34, 733456), ('8.2.3.4', 34, 743456), ('9.2.3.4', 34, 753456), ('10.2.3.4', 34, 763456), ('11.2.3.4', 34, 773456)]
>>> table.knownExternalNodes()
[('2.2.3.4', 34, 23456), ('3.2.3.4', 34, 223456), ('5.2.3.4', 34, 423456), ('6.2.3.4', 34, 323456), ('7.2.3.4', 34, 733456), ('8.2.3.4', 34, 743456), ('9.2.3.4', 34, 753456), ('10.2.3.4', 34, 763456), ('11.2.3.4', 34, 773456)]
"""
def __init__(self, node, bits=idspace, depth=k):
"""
@param node a (ip, port, id) triple, where id is an int (this is
needed to know when to split a bucket).
"""
self.k = depth
self.replacementCache = NodeCache(300)
self.kBuckets = [kBucket(0, 2**bits, depth),]
#self.kBuckets = [kBucket(0, 1, depth),]
#for i in xrange(1,bits):
# self.kBuckets.append(kBucket(2**i, 2**(i+1)-1, depth))
self.insertNode(node)
self.node = node
def insertNode(self, node):
"""
Inserts a node into the appropriate kBucket. If the node already
exists in the appropriate kBucket, it is moved to the tail of the list.
If the bucket is full, this method returns the oldest node, which the
caller should then ping. If the oldest node is alive, the caller
does nothing. Otherwise, the caller should call replaceNode.
@param node a (ip, port, id) triple, where id is a long.
"""
if len(node) < 3:
raise ValueError("node must be a triple (ip, port, id)")
id = node[2]
bucket = self._findBucket(id)
try:
# XXX: need to transfer key/vals that belong to new node?
bucket.updateNode(node)
self.replacementCache.removeNode(node)
except BucketFullException, e:
if (bucket.begin <= self.node[2] < bucket.end):
# bucket is full /and/ the local node is in this bucket,
# split and try adding it again.
self._splitBucket(bucket)
self.insertNode(node)
logger.debug("split and added %x" % node[2])
return
# XXX: need to also split for some other cases, see sections 2.4
# and 4.2.
else:
# bucket is full but we won't split. Return the oldest node
# so that the caller can determine if it should be expunged.
# If the old node is not reachable, caller should call
# replaceNode()
logger.debug("didn't add %x" % node[2])
return bucket.contents[0]
logger.debug("didn't add %x" % node[2])
return bucket.contents[0]
def removeNode(self, node):
"""
Invalidates a node.
"""
bucket = self._findBucket(node[2])
bucket.delNode(node)
def replaceNode(self, replacee, replacer):
"""
Expunges replacee from its bucket, making room to add replacer
"""
# XXX: constraint checks: replacee & replacer belong to the same bucket,
# bucket is currently full, adding replacer doesn't overfill, etc.
self.removeNode(replacee)
self.insertNode(replacer)
def findNode(self, nodeID):
"""
Returns k closest node triples with which the caller may make
additional queries. If nodeID is found, it will be the first result.
@param nodeID an int
"""
nodes = []
bucket = self._findBucket(nodeID)
#n = bucket.findNode(nodeID)
#if n != None:
# nodes.append(n)
nodes += bucket.contents
if len(nodes) < self.k:
nextbucket = self._nextbucket(bucket)
prevbucket = self._prevbucket(bucket)
while len(nodes) < self.k \
and (nextbucket != None or prevbucket != None):
if nextbucket != None:
nodes += nextbucket.contents
if prevbucket != None:
nodes += prevbucket.contents
nextbucket = self._nextbucket(nextbucket)
prevbucket = self._prevbucket(prevbucket)
nodes.sort(lambda a, b, n=nodeID: cmp(n ^ a[2], n ^ b[2]))
return nodes[:self.k]
def findNodeOld(self, nodeID):
"""
Attempts to find the given node, returning a <ip, port, id> triple.
If the node is not found locally, returns k closest node triples with
which the caller may make additional queries.
@param nodeID an int
"""
bucket = self._findBucket(nodeID)
n = bucket.findNode(nodeID)
if n != None:
return (n,)
# nodeID isn't in our routing table, so return the k closest matches
nodes = []
nodes += bucket.contents
if len(nodes) < self.k:
nextbucket = self._nextbucket(bucket)
prevbucket = self._prevbucket(bucket)
while len(nodes) < self.k \
and (nextbucket != None or prevbucket != None):
if nextbucket != None:
nodes += nextbucket.contents
if prevbucket != None:
nodes += prevbucket.contents
nextbucket = self._nextbucket(nextbucket)
prevbucket = self._prevbucket(prevbucket)
nodes.sort(lambda a, b, n=nodeID: cmp(n ^ a[2], n ^ b[2]))
return nodes[:self.k]
def updateNode(self, node):
"""
Call to update a node, i.e., whenever the node has been recently seen
@param node a (ip, port, id) triple, where id is an int.
"""
self.insertNode(node)
def knownExternalNodes(self):
result = []
for i in self.kBuckets:
for j in i.contents:
if j[2] != self.node[2]:
result.append(j)
result += self.replacementCache.nodes()
return result
def knownNodes(self):
result = []
for i in self.kBuckets:
for j in i.contents:
result.append(j)
result += self.replacementCache.nodes()
return result
def _nextbucket(self, bucket):
if bucket == None:
return bucket
i = self.kBuckets.index(bucket)+1
if i >= len(self.kBuckets):
return None
return self.kBuckets[i]
def _prevbucket(self, bucket):
if bucket == None:
return bucket
i = self.kBuckets.index(bucket)-1
if i < 0:
return None
return self.kBuckets[i]
def _findBucket(self, i):
"""
returns the bucket which would contain i.
@param i an int
"""
#print "kBuckets = %s" % str(self.kBuckets)
bl = bisect_left(self.kBuckets, i)
if bl >= len(self.kBuckets):
raise Exception(
"tried to find an ID that is larger than ID space: %s" % i)
return self.kBuckets[bisect_left(self.kBuckets, i)]
def _splitBucket(self, bucket):
"""
This is called for the special case when the bucket is full and this
node is a member of the bucket. When this occurs, the bucket should
be split into two new buckets.
"""
halfpoint = (bucket.end - bucket.begin) / 2
newbucket = kBucket(bucket.end - halfpoint + 1, bucket.end, self.k)
self.kBuckets.insert(self.kBuckets.index(bucket.begin) + 1, newbucket)
bucket.end -= halfpoint
for node in bucket.contents[:]:
if node[2] > bucket.end:
bucket.delNode(node)
newbucket.addNode(node)
class kBucket:
"""
A kBucket is a list of <ip, port, id> triples, ordered according to time
last seen (most recent at tail). Every kBucket has a begin and end
number, indicating the chunk of the id space that it contains.
>>> b = kBucket(0,100,5)
>>> b
{'0-64': []}
>>> n1 = ('1.2.3.4', 45, 'd234a53546e4c23')
>>> n2 = ('10.20.30.40', 45, 'abcd234a53546e4')
>>> n3 = ('10.20.30.4', 5, 'abcd')
>>> b.addNode(n1)
>>> b
{'0-64': [('1.2.3.4', 45, 'd234a53546e4c23')]}
>>> b.addNode(n2)
>>> b
{'0-64': [('1.2.3.4', 45, 'd234a53546e4c23'), ('10.20.30.40', 45, 'abcd234a53546e4')]}
>>> b.addNode(n1)
>>> b
{'0-64': [('10.20.30.40', 45, 'abcd234a53546e4'), ('1.2.3.4', 45, 'd234a53546e4c23')]}
>>> b.delNode(n3)
>>> b
{'0-64': [('10.20.30.40', 45, 'abcd234a53546e4'), ('1.2.3.4', 45, 'd234a53546e4c23')]}
>>> b.addNode(n2)
>>> b
{'0-64': [('1.2.3.4', 45, 'd234a53546e4c23'), ('10.20.30.40', 45, 'abcd234a53546e4')]}
>>> b.updateNode(n1)
>>> b
{'0-64': [('10.20.30.40', 45, 'abcd234a53546e4'), ('1.2.3.4', 45, 'd234a53546e4c23')]}
>>> b.delNode(n2)
>>> b
{'0-64': [('1.2.3.4', 45, 'd234a53546e4c23')]}
>>> b.addNode(n3)
>>> f = b.findNode(n3[2])
>>> f == n3
True
>>> c = kBucket(101,200,5)
>>> d = kBucket(150,250,5) # wouldn't really have overlap in practice
>>> e = kBucket(251, 2**256,5)
>>> buckets = (b, c, d, e) # if not added insort, must sort for bisect
>>> b1 = b
>>> b1 == b
True
>>> b1 != b
False
>>> b == 50
True
>>> b == 0
True
>>> b == 100
True
>>> b == -1
False
>>> b > -1
True
>>> b == 101
False
>>> b < 101
True
>>> b <= 90
True
>>> b <= 100
True
>>> b <= 101
True
>>> b < d
True
>>> b <= c
True
>>> b > c
False
>>> bisect_left(buckets, 98)
0
>>> bisect_left(buckets, 198)
1
>>> bisect_left(buckets, 238)
2
>>> bisect_left(buckets, 298)
3
"""
def __init__(self, begin, end, depth=k):
self.k = depth
self.begin = begin
self.end = end
self.contents = []
def __repr__(self):
return "{'%x-%x': %s}" % (self.begin, self.end, self.contents)
#return "{'"+repr(self.begin)+'-'+repr(self.end)+"': "\
# +repr(self.contents)+"}"
#return "<kBucket "+repr(self.begin)+'-'+repr(self.end)+": "\
# +repr(self.contents)+">"
def addNode(self, node):
""" adds the given node to this bucket. If the node is already a member
of this bucket, its position is updated to the end of the list. If the
bucket is full, raises an exception
"""
if node in self.contents:
self.contents.remove(node)
self.contents.append(node)
elif len(self.contents) >= self.k:
raise BucketFullException()
else:
ids = [x[2] for x in self.contents]
if node[2] in ids:
# remove the matching node's old contact info
self.contents.pop(ids.index(node[2]))
self.contents.append(node)
def updateNode(self, node):
""" Moves the given node to the tail of the list. If the node isn't
present in this bucket, this method attempts to add it by calling
addNode (which may throw a BucketFullException if bucket is full)
"""
self.addNode(node)
def delNode(self, node):
""" removes the given node, if present, from this bucket """
try:
self.contents.remove(node)
except:
pass
def findNode(self, nodeID):
for i in self.contents:
if i[2] == nodeID:
return i
return None
# The following comparators allow us to use list & bisect on the buckets.
# integers, longs, and buckets all may be compared to a bucket.
def __eq__(self, i):
return i >= self.begin and self.end >= i
def __ne__(self, i):
return i < self.begin or self.end < i
def __lt__(self, i):
return self.end < i
def __le__(self, i):
return self.begin <= i
def __gt__(self, i):
return self.begin > i
def __ge__(self, i):
return self.end >= i
class BucketFullException(Exception):
pass
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
| Python |
from distutils.core import setup, Extension
filecoder = Extension('filecoder',
sources = ['filecodermodule.cpp'],
extra_objects = ['CodedBlocks.o', 'Coder.o', 'Decoder.o'],
include_dirs = ['ldpc/src'],
libraries = ['ldpc', 'stdc++'],
library_dirs = ['ldpc/bin/linux'],
language = ['c++'])
setup(name = 'FileCoding',
version = '1.0',
description = 'Module for file coding / decoding',
ext_modules = [filecoder])
| Python |
from twisted.python import failure
from twisted.internet import defer
"""
FludDefer.py (c) 2003-2006 Alen Peacock. This program is distributed under the
terms of the GNU General Public License (the GPL), version 3.
"""
class ErrDeferredList(defer.DeferredList):
"""
ErrDeferredList acts just like DeferredList, except that if *any* of the
Deferreds in the DeferredList errback(), the NewDeferredList also
errback()s. This is different from DeferredList(fireOnOneErrback=True) in
that if you use that method, you only know about the first failure, and you
won't learn of subsequent failures/success in the list. returnOne indicates
whether the full result of the DeferredList should be returned, or just the
first result (or first error)
"""
def __init__(self, list, returnOne=False):
defer.DeferredList.__init__(self, list, consumeErrors=True)
self.returnOne = returnOne
self.addCallback(self.wrapResult)
def wrapResult(self, result):
#print "DEBUG: result= %s" % result
for i in result:
if i[0] == False:
if self.returnOne:
raise failure.DefaultException(i[1])
else:
raise failure.DefaultException(result)
if self.returnOne:
#print "DEBUG: returning %s" % str(result[0][1])
return result[0][1]
else:
#print "DEBUG: returning %s" % result
return result
| Python |
#!/usr/bin/python
"""
FludNode.py (c) 2003-2006 Alen Peacock. This program is distributed under the
terms of the GNU General Public License (the GPL), verison 3.
FludNode is the process that runs to talk with other nodes in the flud backup network.
"""
from twisted.internet import reactor, defer
import threading, signal, sys, time, os, random, logging
from flud.FludConfig import FludConfig
from flud.protocol.FludServer import FludServer
from flud.protocol.FludClient import FludClient
from flud.protocol.FludCommUtil import getCanonicalIP
PINGTIME=60
SYNCTIME=900
class FludNode(object):
"""
A node in the flud network. A node is both a client and a server. It
listens on a network accessible port for both DHT and Storage layer
requests, and it listens on a local port for client interface commands.
"""
def __init__(self, port=None):
self._initLogger()
self.config = FludConfig()
self.logger.removeHandler(self.screenhandler)
self.config.load(serverport=port)
self.client = FludClient(self)
self.DHTtstamp = time.time()+10
def _initLogger(self):
logger = logging.getLogger('flud')
self.screenhandler = logging.StreamHandler()
self.screenhandler.setLevel(logging.INFO)
logger.addHandler(self.screenhandler)
self.logger = logger
def pingRandom(self, tstamp):
return
# XXX: see pg. 4, Section 2.2 (short) or 2.3 (long) of the Kademlia
# paper -- once an hour, each node should check any buckets that
# haven't been refreshed and pick a random id within that space
# to findnode(id) on, for all buckets.
if tstamp < self.DHTtstamp:
#r = random.randrange(2**256)
n = self.config.routing.knownExternalNodes()
if len(n) > 2:
n1 = random.choice(n)[2]
n2 = random.choice(n)[2]
r = (n1+n2)/2
else:
r = random.randrange(2**256)
def badNode(error):
node.logger.warn("Couldn't ping %s:%s" %
(sys.argv[1], sys.argv[2]))
d = self.client.kFindNode(r)
d.addErrback(badNode)
pingtime = random.randrange(PINGTIME/2, PINGTIME)
reactor.callLater(pingtime, self.pingRandom, time.time())
def syncConfig(self):
self.config.save()
reactor.callLater(SYNCTIME, self.syncConfig)
def start(self, twistd=False):
""" starts the reactor in this thread """
self.webserver = FludServer(self, self.config.port)
self.logger.log(logging.INFO, "FludServer starting")
reactor.callLater(1, self.pingRandom, time.time())
reactor.callLater(random.randrange(10), self.syncConfig)
if not twistd:
reactor.run()
def run(self):
""" starts the reactor in its own thread """
#signal.signal(signal.SIGINT, self.sighandler)
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.webserver = FludServer(self, self.config.port)
self.webserver.start()
# XXX: need to do save out current config every X seconds
# XXX: need to seperate known_nodes from config, and then update this
# every X seconds. only update config when it changes.
def stop(self):
self.logger.log(logging.INFO, "shutting down FludNode")
self.webserver.stop()
def join(self):
self.webserver.join()
def sighandler(self, sig, frame):
self.logger.log(logging.INFO, "handling signal %s" % sig)
def connectViaGateway(self, host, port):
def refresh(knodes):
def refreshDone(results):
self.logger.info("bucket refreshes finished: %s" % results)
print "flud node connected and listening on port %d"\
% self.config.port
#print "found knodes %s" % knodes
dlist = []
for bucket in self.config.routing.kBuckets:
#if True:
if bucket.begin <= self.config.routing.node[2] < bucket.end:
pass
#print "passed on bucket %x-%s" % (bucket.begin, bucket.end)
else:
refreshID = random.randrange(bucket.begin, bucket.end)
#print "refreshing bucket %x-%x by finding %x" \
# % (bucket.begin, bucket.end, refreshID)
self.logger.info("refreshing bucket %x-%x by finding %x"
% (bucket.begin, bucket.end, refreshID))
deferred = self.client.kFindNode(refreshID)
dlist.append(deferred)
dl = defer.DeferredList(dlist)
dl.addCallback(refreshDone)
# XXX: do we need to ping newly discovered known nodes? If not,
# we could be vulnerable to a poisoning attack (at first
# glance, this attack seems rather impotent...)
# XXX: need to call refresh about every 60 minutes. Add a
# reactor.callLater here to do it.
def badGW(error):
self.logger.warn(error)
self.logger.warn("Couldn't connect to gateway at %s:%s" %
(sys.argv[1], sys.argv[2]))
self.logger.debug("connectViaGateway %s%d" % (host, port))
deferred = self.client.sendkFindNode(host, port,
self.config.routing.node[2])
deferred.addCallback(refresh)
deferred.addErrback(badGW)
def getPath():
# this is a hack to be able to get the location of FludNode.tac
return os.path.dirname(os.path.abspath(__file__))
| Python |
"""
CheckboxState.py, (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
CheckboxState represents the states which a checkbox in DirCtrl can take
"""
class CheckboxState:
(UNSELECTED, SELECTED, SELECTEDCHILD, SELECTEDPARENT, EXCLUDED,
EXCLUDEDCHILD) = range(6)
def offset(oldstate, newstate):
return newstate - oldstate
offset = staticmethod(offset)
| Python |
class FludException(Exception):
pass
| Python |
#!/usr/bin/python
"""
FludTestGauges.py (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
Provides gauges for visualizing storage for multiple flud nodes running on
the same host. This is really only useful for demos and testing.
"""
import sys, os, signal, stat, random
import wx
import wx.lib.buttons as buttons
from flud.FludConfig import FludConfig
dutotal = 0
def visit(arg, top, files):
global dutotal
for file in files:
dutotal += os.lstat("%s" % (os.path.join(top,file)))[stat.ST_SIZE]
arg += dutotal
def du(dir):
global dutotal
dutotal = 0
os.path.walk(dir, visit, dutotal)
return dutotal
# XXX: too much manual layout. should convert to a managed layout to allow for
# resizing, etc.
SGAUGEWIDTH = 230 # storage gauge
DGAUGEWIDTH = 100 # dht gauge
GAUGEHEIGHT = 20
ROWHEIGHT = 30
SEP = 5
LABELWIDTH = 20
POWERWIDTH = 70
RATIOBARHEIGHT = 70
COLWIDTH = SGAUGEWIDTH+DGAUGEWIDTH+LABELWIDTH+POWERWIDTH
COLGAPFUDGE = 30
class FludTestGauges(wx.Frame):
def __init__(self, parent, title, dirroot, dirs):
screenHeight = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_Y)-100
rowheight = ROWHEIGHT+SEP
height = len(dirs)*(rowheight)+RATIOBARHEIGHT
columns = height / screenHeight + 1
width = COLWIDTH*columns
if columns > 1:
height = (len(dirs)/columns)*(rowheight)+RATIOBARHEIGHT
if (len(dirs) % columns) > 0:
height += rowheight
wx.Frame.__init__(self, parent, wx.ID_ANY, title, size=(width,height),
style=wx.DEFAULT_FRAME_STYLE|wx.NO_FULL_REPAINT_ON_RESIZE)
self.storebarend = 1024
self.smultiplier = 100.0 / self.storebarend
self.sdivisor = 1
self.sbytelabel = ""
self.dhtbarend = 512
self.dmultiplier = 100.0 / self.dhtbarend
self.ddivisor = 1
self.dbytelabel = ""
self.storeheading = wx.StaticText(self, -1, "block storage",
(LABELWIDTH, 5))
self.totaldht = wx.StaticText(self, -1, "metadata",
(LABELWIDTH+SGAUGEWIDTH+SEP, 5))
self.gauges = []
curCol = 0
curRow = 30
for i in range(len(dirs)):
self.gauges.append(wx.Gauge(self, -1, 100,
(curCol*COLWIDTH+LABELWIDTH, curRow),
(SGAUGEWIDTH, GAUGEHEIGHT)))
self.gauges[i].SetBezelFace(3)
self.gauges[i].SetShadowWidth(3)
self.gauges[i].SetValue(0)
self.gauges[i].dir = "%s%s" % (dirroot,dirs[i])
os.environ['FLUDHOME'] = self.gauges[i].dir;
conf = FludConfig()
conf.load(doLogging = False)
print "%s" % conf.nodeID
self.gauges[i].label = wx.StaticText(self, -1, "%2s" % dirs[i],
(curCol*COLWIDTH, curRow+(rowheight/4)),
size=(LABELWIDTH, -1))
self.gauges[i].idlabel = wx.StaticText(self, -1, "%s" % conf.nodeID,
(curCol*COLWIDTH+LABELWIDTH, curRow+20))
font = self.gauges[i].idlabel.GetFont()
font.SetPointSize(6)
self.gauges[i].idlabel.SetFont(font)
self.gauges[i].dhtgauge = wx.Gauge(self, -1, 100,
(curCol*COLWIDTH+LABELWIDTH+SGAUGEWIDTH+SEP,
curRow),
(SGAUGEWIDTH/3, GAUGEHEIGHT))
self.gauges[i].power = wx.Button(self, i, "turn OFF",
(curCol*COLWIDTH
+LABELWIDTH+SGAUGEWIDTH+2*SEP+SGAUGEWIDTH/3,
curRow),
(POWERWIDTH, ROWHEIGHT))
#self.gauges[i].power = buttons.GenBitmapToggleButton(self, i,
# None,
# (LABELWIDTH+SGAUGEWIDTH+2*SEP+SGAUGEWIDTH/3, curRow),
# (POWERWIDTH, ROWHEIGHT))
#self.gauges[i].button.SetBestSize()
self.gauges[i].power.SetToolTipString("power on/off")
self.Bind(wx.EVT_BUTTON, self.onClick, self.gauges[i].power)
curRow += rowheight
if curRow > height-RATIOBARHEIGHT:
curCol += 1
curRow = 30
self.totalstore = wx.StaticText(self, -1, "total: 0",
(LABELWIDTH, height-40))
self.totaldht = wx.StaticText(self, -1, "total: 0",
(LABELWIDTH+SGAUGEWIDTH+SEP, height-40))
self.ratiogauge = wx.Gauge(self, -1, 100, (LABELWIDTH, height-20),
(SGAUGEWIDTH+SEP+SGAUGEWIDTH/3, 10))
self.ratiogauge.SetValue(0)
self.Bind(wx.EVT_IDLE, self.IdleHandler)
self.timer = wx.PyTimer(self.update)
self.timer.Start(1000)
def onClick(self, event):
# XXX: note that under our current startNnodes.sh scheme, the first
# node spawned doesn't contact anyone, so if that one is powered off
# and then powered back on, it will not be part of the node until
# another node pings it
# XXX: unix-specific proc management stuff follows
idx = event.GetId()
home = self.gauges[idx].dir
pidfile = os.path.join(home, 'twistd.pid')
if os.path.exists(pidfile):
print "shutting down %s" % home
f = open(pidfile)
pid = int(f.read())
f.close()
# XXX: ps command no worky on windows, and "-ww" may not worker on
# oldskool unixes
self.gauges[idx].savedCmd = os.popen(
"ps f -wwp %d -o args=" % pid).read()
procline = os.popen("ps e -wwp %d" % pid).read()
self.gauges[idx].savedEnv = [e for e in procline.split()
if e[:4] == 'FLUD']
# XXX: os.kill no worky on windows, need something like:
#def windowskill(pid):
# import win32api
# handle = win32api.OpenProcess(1, 0, pid)
# return (0 != win32api.TerminateProcess(handle, 0))
os.kill(pid, signal.SIGTERM)
self.gauges[idx].power.SetLabel("turn ON")
self.gauges[idx].Hide()
self.gauges[idx].dhtgauge.Hide()
else:
print "powering up %s" % home
# XXX: this exec no worky on windows
fullcmd = "%s %s" % (' '.join(self.gauges[idx].savedEnv),
self.gauges[idx].savedCmd)
print fullcmd
result = os.popen('%s %s' % (' '.join(self.gauges[idx].savedEnv),
self.gauges[idx].savedCmd)).readlines()
self.gauges[idx].power.SetLabel("turn OFF")
self.gauges[idx].Show()
self.gauges[idx].dhtgauge.Show()
print result
def update(self):
def sizeclass(num):
divisor = 1
bytelabel = ""
if num > 1024:
divisor = 1024.0
bytelabel = 'K'
if num > 1048576:
divisor = 1048576.0
bytelabel = 'M'
if num > 1073741824:
divisor = 1073741824.0
bytelabel = 'G'
return (divisor, bytelabel)
storelargest = 0
dhtlargest = 0
storetotal = 0
dhttotal = 0
for i in self.gauges:
if os.path.isdir(i.dir):
i.storebytes = du(os.path.join(i.dir,'store'))
if i.storebytes > storelargest:
storelargest = i.storebytes
storetotal += i.storebytes
i.dhtbytes = du(os.path.join(i.dir,'dht'))
if i.dhtbytes > dhtlargest:
dhtlargest = i.dhtbytes
dhttotal += i.dhtbytes
else:
i.storebytes = 0
i.dhtbytes = 0
i.Disable()
i.power.Disable()
while storelargest > self.storebarend:
self.storebarend = self.storebarend * 2
self.smultiplier = 100.0 / self.storebarend
self.sdivisor, self.sbytelabel = sizeclass(storetotal)
while dhtlargest > self.dhtbarend:
self.dhtbarend = self.dhtbarend * 2
self.dmultiplier = 100.0 / self.dhtbarend
self.ddivisor, self.dbytelabel = sizeclass(dhttotal)
#print "-----"
for i in self.gauges:
i.SetValue(i.storebytes*self.smultiplier)
i.dhtgauge.SetValue(i.dhtbytes*self.dmultiplier)
#print "%.2f, %.2f" % ((float(i.storebytes)/float(i.dhtbytes)),
# (float(i.GetValue())/float(i.dhtgauge.GetValue())))
self.totalstore.SetLabel("total: %.1f%s"
% (float(storetotal)/self.sdivisor, self.sbytelabel))
self.totaldht.SetLabel("total: %.1f%s"
% (float(dhttotal)/self.ddivisor, self.dbytelabel))
if (dhttotal+storetotal == 0):
self.ratiogauge.SetValue(0)
else:
self.ratiogauge.SetValue((storetotal*100/(dhttotal+storetotal)))
def updateGauges(self, update):
for index, value in update:
self.monitors[index].setValue(value)
def IdleHandler(self, event):
pass
def main():
if len(sys.argv) < 2:
print "usage: %s dircommon exts" % sys.argv[0]
print " where exts will be appended to dircommon"
print " e.g., '%s /home/joe/.flud 1,2,3,4,10,15,20'"\
% sys.argv[0]
print " or, '%s /home/joe/.flud 1-10,15,20'"\
% sys.argv[0]
sys.exit()
root = sys.argv[1]
exts = []
dirs = [d.strip() for d in sys.argv[2].split(',')]
for i in dirs:
if i == "_":
exts.append('') # undocumented, means "just dircommon"
elif i.find('-') >= 0:
start, end = i.split('-')
for j in range(int(start),int(end)+1):
exts.append(j)
else:
exts.append(int(i))
app = wx.PySimpleApp()
t = FludTestGauges(None, 'Flud Test Gauges', root, exts)
t.Show(1)
app.MainLoop()
if __name__ == '__main__':
main()
| Python |
import os, stat, sys, tarfile, tempfile
import gzip
from flud.FludCrypto import hashstream
from flud.fencode import fencode
"""
TarfileUtils.py (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), version 3.
Provides additional tarfile functionality (deletion of a member from a
tarball, and concatenation of tarballs).
"""
def delete(tarball, membernames):
"""
Deletes a member file[s] from a tarball. Returns the names of deleted
members if they are removed, False if the file[s] aren't members. If
membernames contains all the members in the tarball, the entire tarball is
deleted
"""
gzipped = False
if tarball[-7:] == ".tar.gz":
gzipped = True
f = tarfile.open(tarball, 'r:gz')
else:
f = tarfile.open(tarball, 'r')
if not isinstance(membernames, list):
membernames = [membernames]
tarnames = f.getnames()
for membername in membernames:
if not membername in tarnames:
membernames.remove(membername)
if len(membernames) < 1:
f.close()
return False
if len(tarnames) == len(membernames):
f.close()
os.remove(tarball)
return True
f.close()
if gzipped:
tarball = gunzipTarball(tarball)
f = open(tarball, 'r+')
tfile = tempfile.mktemp()
if gzipped:
f2 = gzip.GzipFile(tfile, 'w')
else:
f2 = open(tfile, 'w')
empty = tarfile.BLOCKSIZE * '\0'
done = False
removednames = []
while not done:
bytes = f.read(tarfile.BLOCKSIZE)
if bytes == "":
done = True
elif bytes == empty:
f2.write(bytes)
else:
name = bytes[0:99]
name = name[:name.find(chr(0))]
size = int(bytes[124:135], 8)
blocks = size / tarfile.BLOCKSIZE
if (size % tarfile.BLOCKSIZE) > 0:
blocks += 1
if name in membernames:
f.seek(blocks*tarfile.BLOCKSIZE + f.tell())
removednames.append(name)
else:
f2.write(bytes)
for i in range(blocks):
f2.write(f.read(tarfile.BLOCKSIZE))
f2.close()
f.close()
if gzipped:
os.remove(tarball)
tarball = tarball+".gz"
os.rename(tfile, tarball)
return removednames
def concatenate(tarfile1, tarfile2):
"""
Combines tarfile1 and tarfile2 into tarfile1. tarfile1 is modified in the
process, and tarfile2 is deleted.
"""
gzipped = False
if tarfile1[-7:] == ".tar.gz":
gzipped = True
f1 = gzip.GzipFile(tarfile1, 'r')
tarfile1 = tarfile1[:-3]
f1unzip = file(tarfile1, 'w')
f1unzip.write(f1.read())
f1unzip.close()
f1.close()
os.remove(tarfile1+".gz")
f = open(tarfile1, "r+")
done = False
e = '\0'
empty = tarfile.BLOCKSIZE*e
emptyblockcount = 0
while not done:
header = f.read(tarfile.BLOCKSIZE)
if header == "":
print "error: end of archive not found"
return
elif header == empty:
emptyblockcount += 1
if emptyblockcount == 2:
done = True
else:
emptyblockcount = 0
fsize = eval(header[124:135])
skip = int(round(float(fsize) / float(tarfile.BLOCKSIZE) + 0.5))
f.seek(skip*tarfile.BLOCKSIZE, 1)
# truncate the file to the spot before the end-of-tar marker
trueend = f.tell() - (tarfile.BLOCKSIZE*2)
f.seek(trueend)
f.truncate()
# now write the contents of the second tarfile into this spot
if tarfile2[-7:] == ".tar.gz":
f2 = gzip.GzipFile(tarfile2, 'r')
else:
f2 = open(tarfile2, "r")
done = False
while not done:
header = f2.read(tarfile.BLOCKSIZE)
if header == "":
print "error: end of archive not found"
f.seek(trueend)
f.write(empty*2)
return
else:
f.write(header)
if header == empty:
emptyblockcount += 1
if emptyblockcount == 2:
done = True
else:
emptyblockcount = 0
fsize = eval(header[124:135])
bsize = int(round(float(fsize) / float(tarfile.BLOCKSIZE)
+ 0.5))
# XXX: break this up if large
data = f2.read(bsize*tarfile.BLOCKSIZE)
f.write(data)
f2.close()
f.close()
if gzipped:
f2 = gzip.GzipFile(tarfile1+".gz", 'wb')
f = file(tarfile1, 'rb')
f2.write(f.read())
f2.close()
f.close()
os.remove(tarfile1)
# and delete the second tarfile
os.remove(tarfile2)
#print "concatenated %s to %s" % (tarfile2, tarfile1)
def verifyHashes(tarball, ignoreExt=None):
# return all the names of files in this tarball if hash checksum passes,
# otherwise return False
digests = []
done = False
if tarball[-7:] == ".tar.gz":
f = gzip.GzipFile(tarball, 'r:gz')
else:
f = open(tarball, 'r')
empty = tarfile.BLOCKSIZE * '\0'
while not done:
bytes = f.read(tarfile.BLOCKSIZE)
if bytes == "":
done = True
elif bytes == empty:
pass
else:
if bytes[0] == '\0' and bytes[124] == '\0':
print "WARNING: read nulls when expecting file header"
break
name = bytes[0:99]
name = name[:name.find(chr(0))]
size = int(bytes[124:135], 8)
blocks = size / tarfile.BLOCKSIZE
if ignoreExt and name[-len(ignoreExt):] == ignoreExt:
# gzip doesn't support f.seek(size, 1)
f.seek(f.tell()+size)
else:
digest = hashstream(f, size)
digest = fencode(int(digest,16))
if name == digest:
#print "%s == %s" % (name, digest)
digests.append(name)
else:
#print "%s != %s" % (name, digest)
f.close()
return []
if (size % tarfile.BLOCKSIZE) > 0:
blocks += 1
f.seek((blocks * tarfile.BLOCKSIZE) - size + f.tell())
f.close()
return digests
def gzipTarball(tarball):
if tarball[-4:] != '.tar':
return None
f = gzip.GzipFile(tarball+".gz", 'wb')
f.write(file(tarball, 'rb').read())
f.close()
os.remove(tarball)
return tarball+".gz"
def gunzipTarball(tarball):
if tarball[-3:] != '.gz':
return None
f = gzip.GzipFile(tarball, 'rb')
file(tarball[:-3], 'wb').write(f.read())
f.close()
os.remove(tarball)
return tarball[:-3]
if __name__ == "__main__":
if (len(sys.argv) < 4 or sys.argv[1] != "-d") \
and (len(sys.argv) != 4 or sys.argv[1] != "-c") \
and sys.argv[1] != "-v":
print "usage: [-d tarfile tarfilemembers]\n"\
+" [-c tarfile1 tarfile2]\n"\
+" [-v tarfile]\n"\
+" -d deletes tarfilemembers from tarfile,\n"\
+" -c concatenates tarfile1 and tarfile2 into tarfile1\n"\
+" -v verifies that the names of files in tarfile are sha256\n"
sys.exit(-1)
if sys.argv[1] == "-d":
deleted = delete(sys.argv[2], sys.argv[3:])
if deleted == sys.argv[3:]:
print "%s successfully deleted from %s" % (deleted, sys.argv[2])
else:
faileddeletes = [x for x in sys.argv[3:] if x not in deleted]
print "could not delete %s from %s" % (faileddeletes, sys.argv[2])
elif sys.argv[1] == "-c":
concatenate(sys.argv[2], sys.argv[3])
print "concatenated %s and %s into %s" % (sys.argv[2], sys.argv[3],
sys.argv[2])
elif sys.argv[1] == "-v":
digests = verifyHashes(sys.argv[2])
if digests:
print "verified tarfile member digests for: %s" % digests
else:
print "some tarfile members failed digest check"
| Python |
#!/usr/bin/python
"""
FludNode.py (c) 2003-2006 Alen Peacock. This program is distributed under the
terms of the GNU General Public License (the GPL), verison 3.
FludNode is the process that runs to talk with other nodes in the flud backup network.
"""
from twisted.internet import reactor, defer
import threading, signal, sys, time, os, random, logging
from flud.FludConfig import FludConfig
from flud.protocol.FludServer import FludServer
from flud.protocol.FludClient import FludClient
from flud.protocol.FludCommUtil import getCanonicalIP
PINGTIME=60
SYNCTIME=900
class FludNode(object):
"""
A node in the flud network. A node is both a client and a server. It
listens on a network accessible port for both DHT and Storage layer
requests, and it listens on a local port for client interface commands.
"""
def __init__(self, port=None):
self._initLogger()
self.config = FludConfig()
self.logger.removeHandler(self.screenhandler)
self.config.load(serverport=port)
self.client = FludClient(self)
self.DHTtstamp = time.time()+10
def _initLogger(self):
logger = logging.getLogger('flud')
self.screenhandler = logging.StreamHandler()
self.screenhandler.setLevel(logging.INFO)
logger.addHandler(self.screenhandler)
self.logger = logger
def pingRandom(self, tstamp):
return
# XXX: see pg. 4, Section 2.2 (short) or 2.3 (long) of the Kademlia
# paper -- once an hour, each node should check any buckets that
# haven't been refreshed and pick a random id within that space
# to findnode(id) on, for all buckets.
if tstamp < self.DHTtstamp:
#r = random.randrange(2**256)
n = self.config.routing.knownExternalNodes()
if len(n) > 2:
n1 = random.choice(n)[2]
n2 = random.choice(n)[2]
r = (n1+n2)/2
else:
r = random.randrange(2**256)
def badNode(error):
node.logger.warn("Couldn't ping %s:%s" %
(sys.argv[1], sys.argv[2]))
d = self.client.kFindNode(r)
d.addErrback(badNode)
pingtime = random.randrange(PINGTIME/2, PINGTIME)
reactor.callLater(pingtime, self.pingRandom, time.time())
def syncConfig(self):
self.config.save()
reactor.callLater(SYNCTIME, self.syncConfig)
def start(self, twistd=False):
""" starts the reactor in this thread """
self.webserver = FludServer(self, self.config.port)
self.logger.log(logging.INFO, "FludServer starting")
reactor.callLater(1, self.pingRandom, time.time())
reactor.callLater(random.randrange(10), self.syncConfig)
if not twistd:
reactor.run()
def run(self):
""" starts the reactor in its own thread """
#signal.signal(signal.SIGINT, self.sighandler)
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.webserver = FludServer(self, self.config.port)
self.webserver.start()
# XXX: need to do save out current config every X seconds
# XXX: need to seperate known_nodes from config, and then update this
# every X seconds. only update config when it changes.
def stop(self):
self.logger.log(logging.INFO, "shutting down FludNode")
self.webserver.stop()
def join(self):
self.webserver.join()
def sighandler(self, sig, frame):
self.logger.log(logging.INFO, "handling signal %s" % sig)
def connectViaGateway(self, host, port):
def refresh(knodes):
def refreshDone(results):
self.logger.info("bucket refreshes finished: %s" % results)
print "flud node connected and listening on port %d"\
% self.config.port
#print "found knodes %s" % knodes
dlist = []
for bucket in self.config.routing.kBuckets:
#if True:
if bucket.begin <= self.config.routing.node[2] < bucket.end:
pass
#print "passed on bucket %x-%s" % (bucket.begin, bucket.end)
else:
refreshID = random.randrange(bucket.begin, bucket.end)
#print "refreshing bucket %x-%x by finding %x" \
# % (bucket.begin, bucket.end, refreshID)
self.logger.info("refreshing bucket %x-%x by finding %x"
% (bucket.begin, bucket.end, refreshID))
deferred = self.client.kFindNode(refreshID)
dlist.append(deferred)
dl = defer.DeferredList(dlist)
dl.addCallback(refreshDone)
# XXX: do we need to ping newly discovered known nodes? If not,
# we could be vulnerable to a poisoning attack (at first
# glance, this attack seems rather impotent...)
# XXX: need to call refresh about every 60 minutes. Add a
# reactor.callLater here to do it.
def badGW(error):
self.logger.warn(error)
self.logger.warn("Couldn't connect to gateway at %s:%s" %
(sys.argv[1], sys.argv[2]))
self.logger.debug("connectViaGateway %s%d" % (host, port))
deferred = self.client.sendkFindNode(host, port,
self.config.routing.node[2])
deferred.addCallback(refresh)
deferred.addErrback(badGW)
def getPath():
# this is a hack to be able to get the location of FludNode.tac
return os.path.dirname(os.path.abspath(__file__))
| Python |
#!/usr/bin/python
"""
FludScheduler.py (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), verison 3.
FludScheduler is the process monitors files for changes, and then tells flud to
back them up.
"""
import sys, os, time, stat
from twisted.internet import reactor
from flud.FludConfig import FludConfig
from flud.protocol.LocalClient import *
from flud.CheckboxState import CheckboxState
CHECKTIME=5
class FludScheduler:
def __init__(self, config, factory):
self.config = config
self.factory = factory
self.fileconfigfile = None
self.fileconfigfileMTime = 0
self.fileChangeTime = 0
self.fileconfigSelected = set()
self.fileconfigExcluded = set()
self.getMasterMetadata()
def getMasterMetadata(self):
d = self.factory.sendLIST()
d.addCallback(self.gotMasterMetadata)
d.addErrback(self.errMasterMetadata)
return d
def gotMasterMetadata(self, master):
self.mastermetadata = master
def errMasterMetadata(self, err):
print err
reactor.stop()
def readFileConfig(self, mtime=None):
print "reading FileConfig"
file = open(self.fileconfigfile, 'r')
self.fileconfig = eval(file.read())
file.close()
if mtime:
self.fileconfigfileMTime = mtime
else:
self.fileconfigfileMTime = os.stat(
self.fileconfigfile)[stat.ST_MTIME]
self.fileconfigSelected = set([f for f in self.fileconfig
if self.fileconfig[f] == CheckboxState.SELECTED
or self.fileconfig[f] == CheckboxState.SELECTEDCHILD])
self.fileconfigExcluded = set([f for f in self.fileconfig
if self.fileconfig[f] == CheckboxState.EXCLUDED
or self.fileconfig[f] == CheckboxState.EXCLUDEDCHILD])
# The file[s]ChangeStat are the worst possible way to detect file changes.
# Much more efficient to use inotify/dnotify/fam/gamin/etc., as well as
# more correct (no way to detect cp -a or -p, for example, with stat).
# But, these are a fallback method when those aren't present, and are fine
# for testing.
def fileChangedStat(self, file, fileChangeTime=None):
if os.path.isfile(file) or os.path.isdir(file):
mtime = os.stat(file)[stat.ST_MTIME]
if not fileChangeTime:
fileChangeTime = self.fileChangeTime
if file in self.mastermetadata:
fileChangeTime = self.mastermetadata[file][1]
else:
return True
print "mtime = %s, ctime = %s (%s)" % (mtime, fileChangeTime, file)
if mtime > fileChangeTime:
return True
return False
def filesChangedStat(self, files, fileChangeTime=None):
result = []
for f in files:
if self.fileChangedStat(f, fileChangeTime):
result.append(f)
return result
# Change these to point to something other than the xxxStat() methods
def fileChanged(self, file, fileChangeTime=None):
"""
>>> now = time.time()
>>> f1 = tmpfile.mktemp()
>>>
"""
return self.fileChangedStat(file, fileChangeTime)
def filesChanged(self, files, fileChangeTime=None):
return self.filesChangedStat(files, fileChangeTime)
def checkFileConfig(self):
# check config file to see if it has changed, then reparse it
if not self.fileconfigfile:
# first time through
print "checking fileconfigfile (initial)"
if os.environ.has_key('FLUDHOME'):
fludhome = os.environ['FLUDHOME']
elif os.environ.has_key('HOME'):
fludhome = os.environ['HOME']+"/.flud"
else:
fludhome = ".flud"
# XXX: fludfile.conf should be in config
self.fileconfigfile = os.path.join(fludhome, "fludfile.conf")
if os.path.isfile(self.fileconfigfile):
self.readFileConfig()
return True
else:
print "no fileconfigfile to read"
elif os.path.isfile(self.fileconfigfile):
if self.fileChanged(self.fileconfigfile, self.fileconfigfileMTime):
print "fileconfigfile changed"
mtime = time.time()
self.readFileConfig(mtime)
return True
return False
def checkFilesystem(self):
checkedFiles = set()
changedFiles = set()
def checkList(list):
#print "checkList: %s" % list
#print "checkedFiles: %s" % checkedFiles
for entry in list:
# XXX: if entry is in master metadata, and its mtime is not
# earlier than the time used by fileChanged, skip it (add 'and'
# clause)
if entry not in checkedFiles and \
entry not in self.fileconfigExcluded and \
entry not in self.mastermetadata:
print "checkFilesystem for %s" % entry
if os.path.isdir(entry):
#print "dir %s" % entry
dirfiles = [os.path.join(entry, i)
for i in os.listdir(entry)]
checkedFiles.update([entry,])
checkList(dirfiles)
elif self.fileChanged(entry):
print "%s changed" % entry
if os.path.isfile(entry):
changedFiles.update([entry,])
#print "file %s changed" % entry
else:
print "entry ?? %s ?? changed" % entry
checkedFiles.update([entry,])
checkList(self.fileconfigSelected)
self.fileChangeTime = time.time()
return changedFiles
def storefileFailed(self, err, file):
print "storing %s failed: %s" % (file, err)
err.printTraceback()
#print dir(err)
def storefileYay(self, r, file):
print "storing %s success" % file
def storeFiles(self, changedFiles):
#print "storing %s" % changedFiles
dlist = []
for f in changedFiles:
print "storing %s" % f
deferred = self.factory.sendPUTF(f)
deferred.addCallback(self.storefileYay, f)
deferred.addErrback(self.storefileFailed, f)
dlist.append(deferred)
dl = defer.DeferredList(dlist)
return dl
#return defer.succeed(True)
def restartCheckTimer(self, v):
print "restarting timer (%d) to call run()" % CHECKTIME
reactor.callLater(CHECKTIME, self.run)
def updateMasterMetadata(self, v):
return self.getMasterMetadata()
def run(self):
print "run"
self.checkFileConfig()
changedFiles = self.checkFilesystem()
print "%s changed" % changedFiles
d = self.storeFiles(changedFiles)
d.addBoth(self.updateMasterMetadata)
d.addBoth(self.restartCheckTimer)
| Python |
#!/usr/bin/python
"""
FludLocalClient.py, (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
FludLocalClient provides a command-line client for interacting with FludNode.
"""
import sys, os, time
from twisted.internet import reactor
from flud.FludConfig import FludConfig
from flud.fencode import fencode, fdecode
from flud.FludCrypto import hashfile
from protocol.LocalClient import *
logger = logging.getLogger('flud')
class CmdClientFactory(LocalClientFactory):
def __init__(self, config):
LocalClientFactory.__init__(self, config)
self.quit = False
self.msgs = []
def callFactory(self, func, commands, msgs):
# since we can't call factory methods from the promptUser thread, we
# use this as a convenience to put those calls back in the event loop
reactor.callFromThread(self.doFactoryMethod, func, commands, msgs)
def doFactoryMethod(self, func, commands, msgs):
d = func()
d.addCallback(self.queueResult, msgs, '%s succeeded' % commands)
d.addErrback(self.queueError, msgs, '%s failed' % commands)
return d
def promptUser(self):
helpDict = {}
command = raw_input("%s> " % time.ctime())
commands = command.split(' ') # XXX: should tokenize on any whitespace
commandkey = commands[0][:4]
# core client operations
helpDict['exit'] = "exit from the client"
helpDict['help'] = "display this help message"
helpDict['ping'] = "send a GETID() message: 'ping host port'"
helpDict['putf'] = "store a file: 'putf canonicalfilepath'"
helpDict['getf'] = "retrieve a file: 'getf canonicalfilepath'"
helpDict['geti'] = "retrieve a file by CAS key: 'geti fencodedCASkey'"
helpDict['fndn'] = "send a FINDNODE() message: 'fndn hexIDstring'"
helpDict['list'] = "list stored files (read from local metadata)"
helpDict['putm'] = "store master metadata"
helpDict['getm'] = "retrieve master metadata"
helpDict['cred'] = "send encrypted private credentials: cred"\
" passphrase emailaddress"
helpDict['node'] = "list known nodes"
helpDict['buck'] = "print k buckets"
helpDict['stat'] = "show pending actions"
helpDict['stor'] = "store a block to a given node:"\
" 'stor host:port,fname'"
helpDict['rtrv'] = "retrieve a block from a given node:"\
" 'rtrv host:port,fname'"
helpDict['vrfy'] = "verify a block on a given node:"\
" 'vrfy host:port:offset-length,fname'"
helpDict['fndv'] = "retrieve a value from the DHT: 'fndv hexkey'"
helpDict['dlet'] = "delete from the stor: '[XXX]'"
if commandkey == 'exit' or commandkey == 'quit':
self.quit = True
elif commandkey == 'help':
self.printHelp(helpDict)
elif commandkey == 'ping':
# ping a host
# format: 'ping host port'
func = lambda: self.sendPING(commands[1], commands[2])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'putf':
# store a file
# format: 'putf canonicalfilepath'
func = lambda: self.sendPUTF(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'getf':
# retrieve a file
# format: 'getf canonicalfilepath'
func = lambda: self.sendGETF(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'geti':
# retrieve a file by CAS ID
# format: 'geti fencoded_CAS_ID'
func = lambda: self.sendGETI(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'fndn':
# find a node (or the k-closest nodes)
# format: 'fndn hexIDstring'
func = lambda: self.sendFNDN(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'list':
# list stored files
self.callFactory(self.sendLIST, commands, self.msgs)
elif commandkey == 'putm':
# store master metadata
self.callFactory(self.sendPUTM, commands, self.msgs)
elif commandkey == 'getm':
# retrieve master metadata
self.callFactory(self.sendGETM, commands, self.msgs)
elif commandkey == 'cred':
# send encrypted private credentials to an email address
# format: 'cred passphrase emailaddress'
func = lambda: self.sendCRED(
command[len(commands[0])+1:-len(commands[-1])-1],
commands[-1])
self.callFactory(func, commands, self.msgs)
# the following are diagnostic operations, debug-only utility
elif commandkey == 'node':
# list known nodes
self.callFactory(self.sendDIAGNODE, commands, self.msgs)
elif commandkey == 'buck':
# show k-buckets
self.callFactory(self.sendDIAGBKTS, commands, self.msgs)
elif commandkey == 'stat':
# show pending actions
print self.pending
elif commandkey == 'stor':
# stor a block to a given node. format: 'stor host:port,fname'
storcommands = commands[1].split(',')
try:
fileid = int(storcommands[1], 16)
except:
linkfile = fencode(long(hashfile(storcommands[1]),16))
if (os.path.islink(linkfile)):
os.remove(linkfile)
os.symlink(storcommands[1], linkfile)
storcommands[1] = linkfile
# XXX: delete this file when the command finishes
commands[1] = "%s,%s" % (storcommands[0], storcommands[1])
func = lambda: self.sendDIAGSTOR(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'rtrv':
# retrive a block from a given node. format: 'rtrv host:port,fname'
func = lambda: self.sendDIAGRTRV(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'vrfy':
# verify a block on a given node.
# format: 'vrfy host:port:offset-length,fname'
logger.debug("vrfy(%s)" % commands[1])
func = lambda: self.sendDIAGVRFY(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'dlet':
print "not yet implemented"
elif commandkey == 'fndv':
# try to retrieve a value from the DHT
# format: 'fndv key'
func = lambda: self.sendDIAGFNDV(commands[1])
self.callFactory(func, commands, self.msgs)
elif command != "":
reactor.callFromThread(self.queueError, None, self.msgs,
"illegal command '%s'" % command)
def queueResult(self, r, l, msg):
logger.debug("got result %s" % msg)
l.append((r, msg))
def queueError(self, r, l, msg):
logger.debug("got error %s" % msg)
if r:
l.append((r.getErrorMessage(), msg))
else:
l.append((None, msg))
def printHelp(self, helpDict):
helpkeys = helpDict.keys()
helpkeys.sort()
for i in helpkeys:
print "%s:\t %s" % (i, helpDict[i])
def promptLoop(self, r):
for c in self.pending:
for i in self.pending[c].keys():
if self.pending[c][i] == True:
print "%s on %s completed successfully" % (c, i)
self.pending[c].pop(i)
elif self.pending[c][i] == False:
print "%s on %s failed" % (c, i)
self.pending[c].pop(i)
else:
print "%s on %s pending" % (c, i)
while len(self.msgs) > 0:
# this prints in reverse order, perhaps pop() all into a new list,
# reverse, then print
(errmsg, m) = self.msgs.pop()
if errmsg:
print "<- %s:\n%s" % (m, errmsg)
else:
print "<- %s" % m
if self.quit:
reactor.stop()
else:
d = threads.deferToThread(self.promptUser)
d.addCallback(self.promptLoopDelayed)
d.addErrback(self.err)
def promptLoopDelayed(self, r):
# give the reactor loop time to fire any quick cbs/ebs
reactor.callLater(0.1, self.promptLoop, r)
def clientConnectionLost(self, connector, reason):
if not self.quit:
LocalClientFactory.clientConnectionLost(self, connector, reason)
def cleanup(self, msg):
self.quit = True
self.err(msg)
def err(self, r):
print "bah!: %s" % r
reactor.stop()
def main():
config = FludConfig()
config.load(doLogging=False)
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler('/tmp/fludclient.log')
formatter = logging.Formatter('%(asctime)s %(filename)s:%(lineno)d'
' %(name)s %(levelname)s: %(message)s', datefmt='%H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
factory = CmdClientFactory(config)
if len(sys.argv) == 2:
config.clientport = int(sys.argv[1])
print "connecting to localhost:%d" % config.clientport
reactor.connectTCP('localhost', config.clientport, factory)
factory.promptLoop(None)
reactor.run()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
"""
FludClient.py, (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), version 3.
FludClient provides a GUI Client for interacting with FludNode.
"""
#from twisted.internet import wxreactor
#wxreactor.install()
import sys, os, string, time, glob
import wx
import wx.lib.mixins.listctrl as listmix
import wx.lib.editor.editor
from flud.protocol.LocalClient import *
from flud.FludConfig import FludConfig
from flud.CheckboxState import CheckboxState
FLUSHCHECKTIME = 5*60 # s to wait to flush fludfile.conf
imgdir = os.path.join(os.path.dirname(os.path.abspath(__file__)),'images')
mimeMgr = wx.MimeTypesManager()
def getFileIcon(file, il, checkboxes, icondict):
ft = mimeMgr.GetFileTypeFromExtension(file[file.rfind('.')+1:])
# XXX: what about from mimetype or magic?
if ft == None:
return icondict['generic']
else:
desc = ft.GetDescription()
if icondict.has_key(desc):
return icondict[desc]
else:
icon = ft.GetIcon()
if icon == None or not icon.Ok():
#print "couldn't find an icon image for %s" % file
icondict[desc] = icondict['generic']
return icondict[desc]
bm = wx.BitmapFromIcon(icon)
newimages = makeCheckboxBitmaps(bm, checkboxes)
#il = self.GetImageList()
pos = il.GetImageCount()
for i in newimages:
il.Add(i)
icondict[desc] = pos
#print "%s got a %s image" % (file, ft.GetDescription())
return pos
def getEmptyBitmapAndDC(width, height):
empty = wx.EmptyBitmap(width,height)
temp_dc = wx.MemoryDC()
temp_dc.SelectObject(empty)
temp_dc.Clear()
return (empty, temp_dc)
def makeCheckboxBitmaps(basebitmap, checkboxes):
if basebitmap.GetWidth() != 16 or basebitmap.GetHeight() != 16:
img = basebitmap.ConvertToImage()
img.Rescale(16, 16)
basebitmap = img.ConvertToBitmap()
result = []
for i in checkboxes:
bm, dc = getEmptyBitmapAndDC(40,16)
dc.DrawBitmap(basebitmap, 0, 0, False)
dc.DrawBitmap(i, 20, 2, False)
result.append(bm)
return result
def createDefaultImageList():
def getDefaultCheckboxes():
ucbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-unchecked1.png")))
cbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-checked1.png")))
ccbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-checkedpartial1.png")))
cpbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-parentchecked1.png")))
ebm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-excluded1.png")))
ecbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-excludedpartial1.png")))
return (ucbm, cbm, ccbm, cpbm, ebm, ecbm)
checkboxes = getDefaultCheckboxes()
il = wx.ImageList(40, 16)
folderimgs = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_FOLDER, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
computer = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_HARDDISK, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
drives = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_HARDDISK, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
cdrom = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_CDROM, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
floppy = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_FLOPPY, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
removable = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_REMOVABLE, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
genericfile = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_NORMAL_FILE, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
execfile = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_EXECUTABLE_FILE, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
j = 0
icondict = {}
icondict['folder'] = j
for i in folderimgs:
il.Add(i)
j = j+1
icondict['computer'] = j
for i in computer:
il.Add(i)
j = j+1
icondict['drives'] = j
for i in drives:
il.Add(i)
j = j+1
icondict['cdrom'] = j
for i in cdrom:
il.Add(i)
j = j+1
icondict['floppy'] = j
for i in floppy:
il.Add(i)
j = j+1
icondict['removable'] = j
for i in removable:
il.Add(i)
j = j+1
icondict['generic'] = j
for i in genericfile:
il.Add(i)
j = j+1
icondict['exec'] = j
for i in execfile:
il.Add(i)
j = j+1
return il, checkboxes, icondict
class DirCheckboxCtrl(wx.TreeCtrl):
def __init__(self, parent, id=-1, dir=None, pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=(wx.TR_MULTIPLE
| wx.TR_HAS_BUTTONS
| wx.TR_TWIST_BUTTONS
| wx.TR_NO_LINES
| wx.TR_FULL_ROW_HIGHLIGHT
| wx.SUNKEN_BORDER),
validator=wx.DefaultValidator, name=wx.ControlNameStr,
allowExclude=True):
self.allowExclude = allowExclude
wx.TreeCtrl.__init__(self, parent, id, pos, size, style, validator,
name)
self.listeners = []
self.parent = parent
#self.il = self.GetImageList()
#self.checkboxes = self.getDefaultCheckboxes()
self.initTree(dir)
self.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.onExpand, self)
self.Bind(wx.EVT_LEFT_UP, self.onClick, self)
self.Bind(wx.EVT_TREE_ITEM_GETTOOLTIP, self.onTooltip, self)
self.Bind(wx.EVT_CHAR, self.onChar)
def initTree(self, dir):
self.expandRoot(dir)
# XXX: should expandHome() on first run, then load expanded dir state
# from saved state on subsequent runs.
self.expandHome(dir)
def expandRoot(self, dir):
if not os.path.isdir(dir):
raise ValueError("%s is not a valid directory path")
self.defaultImageList, self.checkboxes, self.icondict \
= createDefaultImageList()
self.AssignImageList(self.defaultImageList)
self.il = self.GetImageList()
if dir == None:
self.rootID = self.AddRoot(dir, self.icondict['computer'], -1,
wx.TreeItemData((dir, True, False,
CheckboxState.UNSELECTED)))
# XXX: getTopLevelDirs() and add them as children
else:
self.rootID = self.AddRoot(dir, self.icondict['folder'], -1,
wx.TreeItemData((dir, True, False,
CheckboxState.UNSELECTED)))
self.expandDir(self.rootID)
self.Expand(self.rootID)
self.stateChangeTime = time.time()
self.flushTime = time.time()
reactor.callLater(FLUSHCHECKTIME, self.checkFlush)
def expandHome(self, dir):
home = os.environ['HOME']
if home:
traversal = home.split(os.path.sep)[1:]
node = self.rootID
for d in traversal:
(ipath, isdir, expanded, istate) \
= self.GetItemData(node).GetData()
self.expandDir(node)
children = self.getChildren(node, False)
childrennames = [self.GetItemText(x) for x in children]
if d in childrennames:
p = childrennames.index(d)
node = children[p]
self.expandDir(node)
self.Expand(node)
else:
print "couldn't traverse to HOME dir on %s" % d
break
def checkFlush(self):
print "checking for flush"
if self.stateChangeTime > self.flushTime:
self.flushTime = time.time()
print "flushing"
self.parent.flushFileConfig()
reactor.callLater(FLUSHCHECKTIME, self.checkFlush)
def expandDir(self, parentID, hideHidden=False, busycursor=True):
def isDriveAvailable(path):
if len(path) == 2 and path[1] == ':':
path = path.lower()
if path[0] == 'a' or path[0] == 'b' or diExists(path):
return True
else:
return False
return True
(path, isDir, expanded, state) = self.GetItemData(parentID).GetData()
if expanded:
return
if not isDriveAvailable(path):
return
if busycursor: wx.BusyCursor()
try:
dirlist = os.listdir(path)
except:
self.SetItemHasChildren(parentID, False)
return
if len(dirlist) == 0:
self.SetItemHasChildren(parentID, False)
return
dirs = []
files = []
for i in dirlist:
if hideHidden and i[0] == '.':
# XXX: dotfile format check is *nix specific
# XXX: if this is a hidden file, don't add it.
pass
elif os.path.isdir(os.path.join(path,i)):
dirs.append(i)
else:
files.append(i)
dirs.sort()
files.sort()
for d in dirs:
child = self.AppendItem(parentID, d)
self.SetPyData(child, (os.path.join(path,d), True, False, 0))
self.SetItemImage(child, self.icondict['folder'],
wx.TreeItemIcon_Normal)
self.SetItemHasChildren(child)
il = self.GetImageList()
for f in files:
child = self.AppendItem(parentID, f) # XXX: unicode?
self.SetPyData(child, (os.path.join(path,f), False, False, 0))
idx = getFileIcon(os.path.join(path,f), il, self.checkboxes,
self.icondict)
self.SetItemImage(child, idx, wx.TreeItemIcon_Normal)
self.SetPyData(parentID, (path, isDir, True, state))
def getStates(self, node=None):
if not node:
node = self.rootID
states = {}
(path, isDir, expanded, state) = self.GetItemData(node).GetData()
if state in [CheckboxState.SELECTED, CheckboxState.EXCLUDED]:
states[path] = state
children = self.getChildren(node, False)
for child in children:
states.update(self.getStates(child))
return states
def setStates(self, states):
for i in states:
found = self.findNode(i)
if found:
self.setItemState(found, states[i])
def findNode(self, path):
if path[0] == '/':
path = path[1:] # XXX: unix only
traversal = path.split(os.path.sep)
if traversal[0] == '':
traversal.remove('')
node = self.rootID
while True:
(ipath, isdir, expanded, istate) = self.GetItemData(node).GetData()
if len(traversal) == 0:
return node
self.expandDir(node)
children = self.getChildren(node, False)
childrennames = [self.GetItemText(x) for x in children]
firstpath = traversal[0]
if firstpath in childrennames:
p = childrennames.index(firstpath)
node = children[p]
traversal.remove(firstpath)
else:
#print " the file %s is no longer present!" % path
return None
return None
def onExpand(self, event):
self.expandDir(event.GetItem())
self.renderChildren(event.GetItem(), True)
def getFullPath(self, node):
path = self.tree.GetItemText(node)
n = node
while True:
n = self.tree.GetItemParent(n)
if n and n != self.GetRootItem():
path = os.path.join(self.tree.GetItemText(n),path)
else:
break
return path
def renderParents(self, item):
if item == self.rootID:
return
n = item
(path, isDir, expanded, state) = self.GetItemData(item).GetData()
while True:
n = self.GetItemParent(n)
(parentpath, parentisDir, parentexpanded,
parentstate) = self.GetItemData(n).GetData()
#print "parent %s" % parentpath
if n and n != self.GetRootItem():
newstate = parentstate
if parentstate != CheckboxState.UNSELECTED and \
parentstate != CheckboxState.SELECTEDPARENT:
# we only care about changing UNSELECT or SELECTEDPARENT
# states
break
else:
if state == CheckboxState.SELECTED or \
state == CheckboxState.SELECTEDCHILD or \
state == CheckboxState.SELECTEDPARENT:
# if the item (child) is selected in any way, parent
# should be too.
newstate = CheckboxState.SELECTEDPARENT
elif state == CheckboxState.UNSELECTED or \
state == CheckboxState.EXCLUDED:
# if the item (child) is unselected or excluded, the
# parent should be too, /unless/ there are other
# children at the same level who are selected.
children = self.getChildren(n, False)
newstate = CheckboxState.UNSELECTED
for child in children:
(cpath, cisdir, cexp,
cstate) = self.GetItemData(child).GetData()
if cstate == CheckboxState.SELECTED or \
cstate == CheckboxState.SELECTEDCHILD or \
cstate == CheckboxState.SELECTEDPARENT:
newstate = parentstate
if newstate == parentstate:
break
imageidx = self.GetItemImage(n)
imageidx += CheckboxState.offset(parentstate, newstate)
self.SetPyData(n, (parentpath, parentisDir,
parentexpanded, newstate))
self.SetItemImage(n, imageidx)
else:
break
def renderChildren(self, parent, recurse=False):
(parentpath, parentisDir, parentexpanded,
parentstate) = self.GetItemData(parent).GetData()
children = self.getChildren(parent, False)
for child in children:
#path = self.getFullPath(child)
(path, isDir, expanded, state) = self.GetItemData(child).GetData()
imageidx = self.GetItemImage(child)
newstate = state
"""
Here are the state transitions for children based on current states:
('-' = no state change, 'x' = should never occur, '!' = should be
prevented at the parent, '?' = need to consult children)
child
unsel sel selch selpar excl exclch
unsel - ! unsel x - unsel
sel selch - - selch - selch
par selch selch - - selch - selch
selpar x x unsl?selpr x x x
excl exlch ! exlch ! - -
exclch exlch - exlch ! - -
"""
#if parentpath == '/data':
# print "/data pstate = %d" % parentstate
# print " %s = %d" % (path, state)
if state == CheckboxState.UNSELECTED:
if parentstate == CheckboxState.SELECTED or \
parentstate == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTEDCHILD
elif parentstate == CheckboxState.EXCLUDED or \
parentstate == CheckboxState.EXCLUDEDCHILD:
newstate = CheckboxState.EXCLUDEDCHILD
elif state == CheckboxState.SELECTEDCHILD:
if parentstate == CheckboxState.UNSELECTED:
newstate = CheckboxState.UNSELECTED
elif parentstate == CheckboxState.SELECTEDPARENT:
if self.checkChildrenStates(child, [CheckboxState.SELECTED,
CheckboxState.SELECTEDPARENT]):
# XXX: did we need to pass in selections to checkChldSt
newstate = CheckboxState.SELECTEDPARENT
else:
newstate = CheckboxState.UNSELECTED
elif parentstate == CheckboxState.EXCLUDED or \
parentstate == CheckboxState.EXCLUDEDCHILD:
newstate = CheckboxState.EXCLUDEDCHILD
elif state == CheckboxState.SELECTEDPARENT:
if parentstate == CheckboxState.SELECTED or \
parentstate == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTEDCHILD
elif state == CheckboxState.EXCLUDEDCHILD:
if parentstate == CheckboxState.UNSELECTED:
newstate = CheckboxState.UNSELECTED
elif parentstate == CheckboxState.SELECTED or \
parentstate == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTEDCHILD
imageidx += CheckboxState.offset(state, newstate)
self.SetPyData(child, (path, isDir, expanded, newstate))
self.SetItemImage(child, imageidx)
if recurse:
self.renderChildren(child, recurse)
# XXX: why do we renderParents here? It hits the same
# 'parent's over and over and over again. If we want to do
# this, we need to 'collect up' the parents and just call once
# -- this kills performance.
#print "renderParents(%s)" % path
#self.renderParents(child)
def getChildren(self, node, recurse=False):
result = []
child, cookie = self.GetFirstChild(node)
while child:
result.append(child)
if recurse:
result.extend(self.getChildren(child, recurse))
child, cookie = self.GetNextChild(node, cookie)
return result
def checkChildrenStates(self, node, states, ignorelist=[]):
children = self.getChildren(node)
for child in children:
if child not in ignorelist:
(p, d, e, childstate) = self.GetItemData(child).GetData()
for state in states:
if state == childstate:
#print "%s has state %d" % (p, state)
return True
if self.checkChildrenStates(child, states, ignorelist):
# do this even if it is in ignorelist, because it may have
# children which are not in the ignorelist
return True
return False
def getTooltip(self, item):
text = self.GetItemText(item)
(path, isDir, expanded, state) = self.GetItemData(item).GetData()
if state == CheckboxState.SELECTED:
if isDir:
text = "'%s' is SELECTED for backup\n" \
"ALL files within this folder will be backed up\n" \
"(except those explicitly marked for exclusion)" % text
else:
text = "'%s' is SELECTED for backup" % text
elif state == CheckboxState.UNSELECTED:
text = "'%s' is NOT selected for backup" % text
elif state == CheckboxState.SELECTEDPARENT:
text = "some files within '%s' are selected for backup" % text
elif state == CheckboxState.SELECTEDCHILD:
text = "'%s' will be backed up\n" \
"(one of its parent folders is selected)" % text
elif state == CheckboxState.EXCLUDED:
if isDir:
text = "'%s' is EXCLUDED from backup\n" \
"No files within this folder will be backed up" % text
else:
text = "'%s' is EXCLUDED from backup" % text
elif state == CheckboxState.EXCLUDEDCHILD:
text = "'%s' is EXCLUDED from backup\n" \
"(one of its parent folders is EXCLUDED)" % text
return text
def onTooltip(self, event):
item = event.GetItem()
text = self.getTooltip(item)
if text:
event.SetToolTip(text)
else:
event.StopPropagation()
#print dir(event)
def onClick(self, event):
point = (event.GetX(), event.GetY())
item, flags = self.HitTest(point)
if flags & wx.TREE_HITTEST_ONITEMICON:
selections = self.GetSelections()
self.changeState(item, selections)
def onChar(self, event):
if event.KeyCode() == ord('F') and event.ShiftDown() \
and event.ControlDown():
self.flushTime = time.time()
print "flushing"
self.parent.flushFileConfig()
event.Skip()
def changeState(self, item, selections=[]):
self.stateChangeTime = time.time()
(path, isDir, expanded, state) = self.GetItemData(item).GetData()
if item == self.rootID:
parent = None
parentstate = CheckboxState.UNSELECTED
else:
parent = self.GetItemParent(item)
(parentpath, parentisDir, parentexpanded,
parentstate) = self.GetItemData(parent).GetData()
imageidx = self.GetItemImage(item)
# determine newstate from existing state, parent state, and state
# of children
"""
Here are the state transitions for the item based on current
states and parent states: ('-' = no state change, 'x' = should
never occur, '?' = depends on children state)
item
unsel sel selch selpar excl exclch
unsel sel excl sel sel unsel excl
sel sel excl?selpar sel x selch excl
par selch x excl sel sel selch excl
selpar sel excl x sel unsel excl
excl x excl x exclch exclch excl
exclch x excl x exclch exclch excl
"""
newstate = state
if state == CheckboxState.UNSELECTED:
newstate = CheckboxState.SELECTED
elif state == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTED
elif state == CheckboxState.SELECTEDPARENT:
if parentstate == CheckboxState.EXCLUDED or \
parentstate == CheckboxState.EXCLUDEDCHILD:
# XXX: this should be impossible to reach...
newstate = CheckboxState.EXCLUDEDCHILD
else:
newstate = CheckboxState.SELECTED
elif state == CheckboxState.SELECTED:
if self.checkChildrenStates(item, [CheckboxState.SELECTED,
CheckboxState.SELECTEDPARENT], selections):
newstate = CheckboxState.SELECTEDPARENT
elif self.allowExclude:
newstate = CheckboxState.EXCLUDED
else:
if parent in selections or \
(parentstate == CheckboxState.UNSELECTED or \
parentstate == CheckboxState.SELECTEDPARENT):
newstate = CheckboxState.UNSELECTED
elif parentstate == CheckboxState.SELECTED or \
parentstate == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTEDCHILD
elif state == CheckboxState.EXCLUDED:
if parent in selections or \
(parentstate == CheckboxState.UNSELECTED or \
parentstate == CheckboxState.SELECTEDPARENT):
newstate = CheckboxState.UNSELECTED
elif parentstate == CheckboxState.SELECTED or \
parentstate == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTEDCHILD
else:
newstate = CheckboxState.EXCLUDEDCHILD
elif state == CheckboxState.EXCLUDEDCHILD:
newstate = CheckboxState.EXCLUDED
if len(selections) > 1:
# if we have multiple selections, the idea is to move all the
# selections to the newstate defined above, or to valid
# unselected or inherited states if the move to newstate would
# be invalid.
"""
Here are the state transitions for the item based on the
newstate as determined by the clicked item and the current
states: ('-' = no state change, '?' = consult children)
item
unsel sel selch selpar excl exclch
unsel - unsel - - unsel -
sel sel - sel sel sel -
newstate selch - unsel - - unsel -
selpar - unsel - - unsel -
excl excl excl?slpr excl excl - excl
exclch - unsel - - unsel -
"""
for i in selections:
(mpath, misDir, mexpanded, mstate) = self.GetItemData(
i).GetData()
mnewstate = mstate
if mstate == CheckboxState.UNSELECTED or \
mstate == CheckboxState.SELECTEDCHILD or \
mstate == CheckboxState.SELECTEDPARENT:
if newstate == CheckboxState.SELECTED or \
newstate == CheckboxState.EXCLUDED:
mnewstate = newstate
elif mstate == CheckboxState.SELECTED:
if newstate == CheckboxState.UNSELECTED or \
newstate == CheckboxState.SELECTEDCHILD or \
newstate == CheckboxState.SELECTEDPARENT or \
newstate == CheckboxState.EXCLUDEDCHILD:
mnewstate = CheckboxState.UNSELECTED
elif newstate == CheckboxState.EXCLUDED:
if self.checkChildrenStates(i,
[CheckboxState.SELECTED,
CheckboxState.SELECTEDPARENT], selections):
mnewstate = CheckboxState.SELECTEDPARENT
else:
mnewstate = newstate
elif mstate == CheckboxState.EXCLUDED:
if newstate == CheckboxState.UNSELECTED or \
newstate == CheckboxState.SELECTEDCHILD or \
newstate == CheckboxState.SELECTEDPARENT or \
newstate == CheckboxState.EXCLUDEDCHILD:
mnewstate = CheckboxState.UNSELECTED
elif newstate == CheckboxState.SELECTED:
mnewstate = newstate
elif mstate == CheckboxState.EXCLUDEDCHILD:
if newstate == CheckboxState.EXCLUDED:
mnewstate = newstate
self.setItemState(i, mnewstate)
self.setItemState(item, newstate, (path, isDir, expanded, state,
imageidx))
def setItemState(self, item, newstate, oldData=None):
if oldData:
(path, isDir, expanded, state, imageidx) = oldData
else:
(path, isDir, expanded, state) = self.GetItemData(item).GetData()
imageidx = self.GetItemImage(item)
imageidx += CheckboxState.offset(state, newstate)
self.SetPyData(item, (path, isDir, expanded, newstate))
self.SetItemImage(item, imageidx)
self.renderChildren(item, True)
self.renderParents(item)
def getTopLevelDrives(self):
sys = platform.system()
if sys == 'Windows':
# XXX: need to test this all out
import win32api, string
drives = win32api.GetLogicalDriveStrings()
driveletters = string.splitfields(drives,'\000')
for d in driveletters:
type = win32api.GetDriveType("%s:\\" % d)
# XXX: set the appropriate icon
return driveletters
else: # Unix, OSX, etc.
return ['/']
def addListener(self, callback):
self.listeners.append(callback)
def SetPyData(self, item, data):
wx.TreeCtrl.SetPyData(self, item, data)
for f in self.listeners:
f(item, data)
"""
Tests for DirCheckboxCtrl
A number of unit tests must be performed on the DirCheckboxGUI widget when
refactoring. Add to this list so that it becomes comprehensive.
Basic Tests:
1. Click on a top-level UNSELECTED object in the tree [should become SELECTED].
- Click again [should become EXCLUDED].
- Click again [should become UNSELECTED].
2. Click on a non-top-level UNSELECTED object in the tree that has no SELECTED
children [should become SELECTED, it's parents should become SELECTEDPARENT and
its children SELECTEDCHILD].
- Click again [should become EXCLUDED, it's parents who were SELECTEDPARENT
should become UNSELECTED, and it's UNSELECTED children should become
EXCLUDED].
- Click again [should become UNSELECTED, and it's children should become
UNSELECTED].
3. Change two children to their SELECTED state [parents should be in
SELECTEDPARENT state].
- Click one child to become EXCLUDED [parents should stay in SELECTEDPARENT]
- Click the same child to become UNSELECTED [parents should stay in
SELECTEDPARENT]
- Click the other child to become EXCLUDED [parents should become
UNSELECTED]
4. Choose a folder and a child item.
- Click the child to become SEL [parent should be SELPAR]
- Click the parent [parent should become SEL]
- Click the parent again [parent should become SELPAR]
5. Choose a folder and a child item.
- Click the parent to become SEL [child should become SELCHILD]
- Click the child [child should become SEL]
- Click the child again [child should become EXCL]
- Click the child again [child should become SELCHILD]
6. Pick a node with children at least two-deep. Change two of the
at-least-two-deep children to their SELECTED state [parents should be in
SELECTEDPARENT state].
- Click parent closest to SELECTED children to SELECTED [two childen remain
in SELECTED, all other children become SELECTEDCHILD. Parent[s] of parent
remain SELECTEDPARENT]
- Click one child twice to become SELECTEDCHILD [child should not be able to
be UNSELECTED, parent states should not change]
- Click other child twice to become SELECTEDCHILD [child should not be able
to be UNSELECTED, parent states should not change]
7. Pick a node with children at least two-deep.
- Click deepest parent to SELECTED [Parent[s] of parent become
SELECTEDPARENT]
- Click same parent again to become EXCLUDED [Parent[s] of parent become
UNSELECTED]
- Click same parent again to become UNSELECTED [Parent[s] of parent remain
UNSELECTED]
8. Pick a node with children at least two-deep.
- Click deepest child to become SELECTED [Parent[s] of parent become
SELECTEDPARENT]
- Click the topmost parent to become SELECTED [children become
SELECTEDCHILD]
- Click the topmost parent again to become SELECTEDPARENT [middle child
should become SELECTEDPARENT]
Multi-Selection Tests:
1. Multi-select three items at the same level and in the same state. Toggle
between the three main states [SELECTED, EXCLUDED, UNSELECTED]
2. Multi-select three items at the same level, one in each of the three states
(SEL, EXCL, UNSEL). Toggle the SEL item to see that all three items become
EXCL.
3. Multi-select three items at the same level, one in each of the three states
(SEL, EXCL, UNSEL). Toggle the EXCL item to see that all three items become
UNSEL.
4. Multi-select three items at the same level, one in each of the three states
(SEL, EXCL, UNSEL). Toggle the UNSEL item to see that all three items become
SEL.
5. Choose three items that are nested within each other: a parent folder, one
of its children folders, and a file/folder in the child folder. Choose one
other item from the child folder.
- set the top parent to UNSEL
- set the child folder to SEL [parent become SELPAR]
- set the child item to SEL
- set the other item to EXCL
- multi-select all four items
- 5A. click on the top parent (which was in SELPAR) [All four items should
become SEL, all children of any of these items should become SELCHILD].
Toggle twice more [all selected items should toggle to EXCL, then to
UNSEL]
- 5B. reset as above, click on the child folder [All four items should
become EXCL]. Toggle twice more [all selected items should go to UNSEL,
then SEL]
- 5C. reset as above, click on the child item [All four items should
become EXCL]. Toggle twice more [all selected items should go to UNSEL,
then SEL]
- 5D. reset as above, click on the other item [All four items should
become UNSEL]. Toggle twice more [all selected items should go to SEL,
then EXCL]
6. Choose a folder, one if its subfolders, a subfolder of the subfolder, and an item in the deepest subfolder, and an item in the first subfolder, e.g.:
[] A
[] B
[] C
[] D
[] E
- change item 'D' to SEL [parents 'A', 'B', and 'C' should go to SELPAR]
- change item 'E' to EXCL
- multi-select 'A', 'C', and 'E'
- toggle 'E' to UNSEL [all other selections should stay in current state]
- toggle 'E' to SEL ['A' and 'B' become SEL, their children become SELCHILD]
- toggle 'E' back to EXCL [should get our original multi-select setup back]
- toggle 'C' to SEL [all selections to SEL, children to SELCHILD]
- toggle 'C' to SELPAR ['A' and 'C' to SELPAR, 'E' to UNSEL]
- toggle 'E' twice [should get our original mulit-select setup back]
"""
class CheckFileListCtrlMixin:
# for some insane reason, we can't get EVT_LEFT_DOWN (or _UP) to bind in
# FileListCtrl itself. But we are sneaky and can do it by lots of clever
# hax0ry, like by using this silly mixin.
def __init__(self, toCall):
self.Bind(wx.EVT_LEFT_UP, toCall)
class FileListCtrl(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin,
CheckFileListCtrlMixin):
"""
Implements a file list control, with a peerctrl that contains the
filesystem model. Currently, this peerctrl must implement an
addListener(), changeState(), GetItemData(), expandDir(), GetSelections(),
and GetChildren() API similar to that implemented by DirCheckBoxCtrl.
"""
def __init__(self, parent, peerctrl, id=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.LC_REPORT,
validator=wx.DefaultValidator, name=wx.ListCtrlNameStr):
wx.ListCtrl.__init__(self, parent, id, pos, size, style, validator,
name)
CheckFileListCtrlMixin.__init__(self, self.OnClick)
listmix.ListCtrlAutoWidthMixin.__init__(self)
self.peerctrl = peerctrl
self.peerctrl.addListener(self.itemChanged)
self.itemdict = {} # a dict with filepath as key, containing tuples of
# (index into ListCtrl, reference to peerctrl object)
self.stopsearch = False
self.il, self.checkboxes, self.icondict = createDefaultImageList()
self.AssignImageList(self.il, wx.IMAGE_LIST_SMALL)
self.il = self.GetImageList(wx.IMAGE_LIST_SMALL)
self.InsertColumn(0, "Filename")
self.InsertColumn(1, "Location")
#self.InsertColumn(2, "Last Backup")
#self.SetColumnWidth(0, wx.LIST_AUTOSIZE)
#self.SetColumnWidth(1, -1) #wx.LIST_AUTOSIZE)
self.Bind(wx.EVT_MOTION, self.mouseMotion)
self.searchSourceItems = []
def itemChanged(self, item, data):
(path, isDir, expanded, state) = data
if self.itemdict.has_key(path):
item = self.itemdict[path][0]
image = getFileIcon(path, self.il, self.checkboxes,
self.icondict) + state
self.SetItemImage(item, image)
def GetAll(self, excludeStates=[]):
result = []
start = -1
for i in range(self.GetItemCount()):
item = self.GetNextItem(start, wx.LIST_NEXT_ALL)
# XXX: only append if not in excludeStates
result.append(item)
start = item
return result
def GetSelections(self):
result = []
start = -1
for i in range(self.GetSelectedItemCount()):
item = self.GetNextItem(start, wx.LIST_NEXT_ALL,
wx.LIST_STATE_SELECTED)
result.append(item)
start = item
return result
def GetPeerSelections(self, selections):
result = []
for item in selections:
path = os.path.join(self.GetItem(item,1).GetText(),
self.GetItemText(item))
if self.itemdict.has_key(path):
result.append(self.itemdict[path][1])
return result
def mouseMotion(self, event):
point = event.GetPosition()
item, flags = self.HitTest(point)
if flags == wx.LIST_HITTEST_ONITEMICON:
path = os.path.join(self.GetItem(item,1).GetText(),
self.GetItemText(item))
text = self.peerctrl.getTooltip(self.itemdict[path][1])
tip = wx.ToolTip(text)
self.SetToolTip(tip)
#tipwin = tip.GetWindow()
#tippos = tipwin.GetPosition()
#print "%s vs %s" % (tippos, point)
#tipwin.SetPosition(point)
def OnClick(self, event):
point = event.GetPosition()
item, flags = self.HitTest(point)
if flags == wx.LIST_HITTEST_ONITEMICON:
peerselections = self.GetPeerSelections(self.GetSelections())
path = os.path.join(self.GetItem(item,1).GetText(),
self.GetItemText(item))
ditem = self.itemdict[path][1] # raises if not present
self.peerctrl.changeState(ditem, peerselections)
(path, isDir, expanded, state) \
= self.peerctrl.GetItemData(ditem).GetData()
image = getFileIcon(path, self.il, self.checkboxes,
self.icondict) + state
self.SetItemImage(item, image)
def searchButtonAction(self, event):
selections = self.peerctrl.GetSelections()
if len(selections) == 0:
return ("Please tell me where to search. Select one or more"
" folders in the left-hand panel (hold down SHIFT or"
" CTRL for multiple selection), then click the 'find!'"
" button again.", None)
else:
self.DeleteAllItems()
self.itemdict = {}
b = wx.BusyCursor()
searchSourceItems = []
for i in selections:
self.addResults(i, event.searchstring)
searchSourceItems.append(i)
self.searchSourceItems = [self.peerctrl.GetItemData(s).GetData()[0]
for s in searchSourceItems]
print "sources: %s" % self.searchSourceItems
return ("Search results will appear as files that match your"
" search are found.", None)
return (None, None)
def addResults(self, ditem, searchstring):
(path, isDir, expanded, state) \
= self.peerctrl.GetItemData(ditem).GetData()
position = self.GetItemCount()
if isDir:
if not expanded:
self.peerctrl.expandDir(ditem, busycursor=False)
children = self.peerctrl.getChildren(ditem)
for c in children:
self.addResults(c, searchstring)
wx.Yield()
if self.stopsearch:
break
else:
terms = [x for x in searchstring.split(' ') if x != '']
for term in terms:
print path
if path.find(term) > 0:
image = getFileIcon(path, self.il, self.checkboxes,
self.icondict) + state
dirname, filename = os.path.split(path)
index = self.InsertImageStringItem(position, filename,
image)
self.SetStringItem(index, 1, dirname)
self.SetColumnWidth(0, wx.LIST_AUTOSIZE)
self.itemdict[path] = (index, ditem)
break
def setGroup(self, state):
items = self.GetAll()
item = items[0]
peerselections = self.GetPeerSelections(items)
path = os.path.join(self.GetItem(item,1).GetText(),
self.GetItemText(item))
ditem = self.itemdict[path][1] # raises if not present
while True:
# cycle until the items state matches the desired state
self.peerctrl.changeState(ditem, peerselections) # can be slow
(path, isDir, expanded, nstate) \
= self.peerctrl.GetItemData(ditem).GetData()
if nstate == state:
break
image = getFileIcon(path, self.il, self.checkboxes,
self.icondict) + state
self.SetItemImage(item, image)
return self.searchSourceItems
class GroupSelectionCheckbox(wx.Panel):
def __init__(self, parent, id=-1, setGroupState=None):
wx.Panel.__init__(self, parent, id)
self.setGroupState = setGroupState
self.ubm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-unchecked1.png")))
self.cbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-checked1.png")))
self.ebm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-excluded1.png")))
self.checkboxButton = wx.BitmapButton(self, -1, self.ubm,
style=wx.NO_BORDER)
self.Bind(wx.EVT_BUTTON, self.onCheckbox, self.checkboxButton)
self.description = wx.StaticText(self, -1,
"always BACKUP any files that match these search criteria ")
self.state = CheckboxState.UNSELECTED
self.gbSizer = wx.GridBagSizer(1,2)
self.gbSizer.Add(self.checkboxButton, (0,0), flag=wx.ALIGN_CENTER)
self.gbSizer.Add(self.description, (0,1), flag=wx.ALIGN_CENTER)
self.gbSizer.AddGrowableRow(1)
self.SetSizerAndFit(self.gbSizer)
def Enable(self, enable=True):
self.checkboxButton.Enable(enable)
self.description.Enable(enable)
def Disable(self):
self.Enable(False)
def clear(self):
self.checkboxButton.SetBitmapLabel(self.ubm)
self.state = CheckboxState.UNSELECTED
self.description.SetLabel(
"always BACKUP any files that match these search criteria")
def setState(self, state):
self.state = state
if self.state == CheckboxState.UNSELECTED:
self.checkboxButton.SetBitmapLabel(self.ubm)
self.description.SetLabel(
"always BACKUP any files that match these search criteria")
elif self.state == CheckboxState.SELECTED:
self.checkboxButton.SetBitmapLabel(self.cbm)
self.description.SetLabel(
"always BACKUP any files that match these search criteria")
elif self.state == CheckboxState.EXCLUDED:
self.checkboxButton.SetBitmapLabel(self.ebm)
self.description.SetLabel(
"always EXCLUDE any files that match these search criteria")
def onCheckbox(self, event):
if self.state == CheckboxState.UNSELECTED:
self.checkboxButton.SetBitmapLabel(self.cbm)
self.state = CheckboxState.SELECTED
if self.setGroupState:
self.setGroupState(CheckboxState.SELECTED)
elif self.state == CheckboxState.SELECTED:
self.checkboxButton.SetBitmapLabel(self.ebm)
self.description.SetLabel(
"always EXCLUDE any files that match these search criteria")
self.state = CheckboxState.EXCLUDED
if self.setGroupState:
self.setGroupState(CheckboxState.EXCLUDED)
elif self.state == CheckboxState.EXCLUDED:
self.checkboxButton.SetBitmapLabel(self.ubm)
self.description.SetLabel(
"always BACKUP any files that match these search criteria")
self.state = CheckboxState.UNSELECTED
if self.setGroupState:
self.setGroupState(CheckboxState.UNSELECTED)
class SearchPanel(wx.Panel):
def __init__(self, parent, dircheckbox, id=-1, searchButtonAction=None):
wx.Panel.__init__(self, parent, id)
self.dircheckbox = dircheckbox
self.searchButtonAction = searchButtonAction
self.SetAutoLayout(False)
self.rules = {} # should refer to something from fludrules
self.searchField = wx.TextCtrl(self, -1,
"search for files to backup here", size=wx.Size(-1,-1),
style=wx.TE_PROCESS_ENTER)
self.searchField.SetToolTipString('find files within directories'
' selected to the left by entering search terms here')
self.searchField.Bind(wx.EVT_TEXT_ENTER, self.onSearchClick)
self.searchField.Bind(wx.EVT_LEFT_DOWN, self.selectAllText)
self.searchField.Bind(wx.EVT_KILL_FOCUS, self.unfocused)
self.searchButton = wx.Button(self, -1, 'find!', name='searchButton')
self.Bind(wx.EVT_BUTTON, self.onSearchClick, self.searchButton)
self.searchResults = FileListCtrl(self, dircheckbox, -1,
name='searchResults', style=wx.SUNKEN_BORDER | wx.LC_REPORT)
self.searchResults.SetExtraStyle(0)
self.searchResults.SetLabel('found files')
self.groupSelection = GroupSelectionCheckbox(self, -1, self.setGroup)
self.groupSelection.Disable()
self.gbSizer = wx.GridBagSizer(3,2)
self.gbSizer.Add(self.searchField, (0,0), flag=wx.EXPAND)
self.gbSizer.Add(self.searchButton, (0,1))
self.gbSizer.Add(self.searchResults, (1,0), (1,2),
flag=wx.EXPAND|wx.TOP, border=5)
self.gbSizer.Add(self.groupSelection, (2,0) )
self.gbSizer.AddGrowableRow(1)
self.gbSizer.AddGrowableCol(0)
self.SetSizerAndFit(self.gbSizer)
def onSearchClick(self, event):
event.searchstring = self.searchField.GetValue()
if self.searchButton.GetLabel() == 'stop!':
self.searchButton.SetLabel('find!')
self.searchResults.stopsearch = True
return
else:
self.groupSelection.clear()
self.groupSelection.Disable()
self.searchButton.SetLabel('stop!')
self.searchButton.Update()
err, info = self.searchResults.searchButtonAction(event)
selections = self.searchResults.searchSourceItems
# see if we should set the checkbox button from a previous rule
state = None
if len(selections) > 0 and self.rules.has_key(selections[0]):
rule = self.rules[selections[0]]
if self.rules[selections[0]].has_key(event.searchstring):
state = self.rules[selections[0]][event.searchstring]
for i in selections:
if not self.rules.has_key(i) or self.rules[i] != rule:
state = None
break
#for j in self.rules[i]:
if state:
print "should restore checkbox to %s" % state
self.groupSelection.setState(state)
self.searchButton.SetLabel('find!')
self.searchResults.stopsearch = False
if self.searchButtonAction:
self.searchButtonAction(event, errmsg=err, infomsg=info)
self.groupSelection.Enable()
def selectAllText(self, event):
if wx.Window.FindFocus() != self.searchField:
self.searchField.SetSelection(-1,-1)
self.searchField.SetFocus()
else:
self.searchField.SetSelection(0,0)
event.Skip()
def unfocused(self, event):
self.searchField.SetSelection(0,0)
def setGroup(self, state):
b = wx.BusyCursor()
selections = self.searchResults.setGroup(state)
for s in selections:
if not self.rules.has_key(s):
self.rules[s] = {}
if state == CheckboxState.UNSELECTED:
try:
self.rules.pop(s)
except:
pass
else:
self.rules[s][self.searchField.GetValue()] = state
print self.rules
class FilePanel(wx.SplitterWindow):
def __init__(self, parent, searchButtonAction=None):
# Use the WANTS_CHARS style so the panel doesn't eat the Return key.
wx.SplitterWindow.__init__(self, parent, -1,
style=wx.SP_LIVE_UPDATE | wx.CLIP_CHILDREN | wx.WANTS_CHARS)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.SetNeedUpdating(True)
self.tree = DirCheckboxCtrl(self, -1, dir="/")
# XXX: fludrules.init path should be in config
self.fludrules = self.getFludHome()+"/fludrules.init"
if not os.path.isfile(self.fludrules):
# XXX: do the other first time stuff (email encrypted credentials,
# etc.)
parent.SetMessage("Welcome. This appears to be the first"
" time you've run flud. We've automatically selected some"
" files for backup. You can make changes by"
" selecting/deselecting files and directories. When you are"
" done, simply close this window.")
src = open('fludrules.init', 'r')
dst = open(self.fludrules, 'w')
filerules = src.read()
dst.write(filerules)
dst.close()
src.close()
filerules = eval(filerules)
rulestates = {}
for rule in filerules['baserules']:
value = filerules['baserules'][rule]
rule = glob.glob(os.path.expandvars(rule))
for r in rule:
rulestates[r] = value
self.tree.setStates(rulestates)
# XXX: fludfile.conf path should be in config
self.fludfiles = self.getFludHome()+"/fludfile.conf"
print self.fludfiles
if os.path.isfile(self.fludfiles):
file = open(self.fludfiles, 'r')
states = eval(file.read())
self.tree.setStates(states)
file.close()
self.searchPanel = SearchPanel(self, dircheckbox=self.tree,
searchButtonAction=searchButtonAction)
self.SetMinimumPaneSize(20)
self.SplitVertically(self.tree, self.searchPanel) #, 300)
def getFludHome(self):
if os.environ.has_key('FLUDHOME'):
fludhome = os.environ['FLUDHOME']
else:
fludhome = os.environ['HOME']+"/.flud"
if not os.path.isdir(fludhome):
os.mkdir(fludhome, 0700)
return fludhome
def shutdown(self, event):
self.flushFileConfig()
event.Skip()
def flushFileConfig(self):
states = self.tree.getStates()
f = open(self.fludfiles, 'w')
f.write(str(states))
f.close()
for i in states:
print "%s %s" % (i, states[i])
def OnSize(self, event):
w,h = self.GetClientSizeTuple()
if self.tree:
self.tree.SetDimensions(0, 0, w, h)
event.Skip()
class RestoreCheckboxCtrl(DirCheckboxCtrl):
# XXX: child/parent selection/deselection isn't quite right still, esp wrt
# root node. repro:
# -/
# -d1
# -f1
# -d2
# -d3
# -f2
# -f3
# with nothing selected, select d3 and f3, then select root, then deselect
# d3 and f3
def __init__(self, parent, id=-1, config=None, pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=(wx.TR_MULTIPLE
| wx.TR_HAS_BUTTONS
| wx.TR_TWIST_BUTTONS
| wx.TR_NO_LINES
| wx.TR_FULL_ROW_HIGHLIGHT
| wx.SUNKEN_BORDER),
validator=wx.DefaultValidator, name=wx.ControlNameStr):
self.config = config
DirCheckboxCtrl.__init__(self, parent, id, config, pos, size, style,
validator, name, allowExclude=False)
def initTree(self, config):
self.expandRoot(config)
self.expandUntilMultiple()
def expandRoot(self, config):
self.defaultImageList, self.checkboxes, self.icondict \
= createDefaultImageList()
self.AssignImageList(self.defaultImageList)
self.il = self.GetImageList()
self.rootID = self.AddRoot("/", self.icondict['computer'], -1,
wx.TreeItemData(("", True, False, CheckboxState.UNSELECTED)))
self.update()
def expandUntilMultiple(self):
node = self.rootID
while True:
(ipath, isdir, expanded, istate) = self.GetItemData(node).GetData()
children = self.getChildren(node, False)
if len(children) > 1 or len(children) == 0:
break;
node = children[0]
self.Expand(node)
def update(self):
master = listMeta(self.config)
for i in master:
if not isinstance(master[i], dict):
traversal = i.split(os.path.sep)
node = self.rootID
path = "/"
if traversal[0] == '':
traversal.remove('')
for n in traversal:
path = os.path.join(path, n)
children = self.getChildrenDict(node)
if n == traversal[-1] and not n in children:
child = self.AppendItem(node, n)
self.SetPyData(child, (path, False, False, 0))
idx = getFileIcon(i, self.il, self.checkboxes,
self.icondict)
self.SetItemImage(child, idx, wx.TreeItemIcon_Normal)
else:
if not n in children:
child = self.AppendItem(node, n)
self.SetPyData(child, (path, True, False, 0))
self.SetItemImage(child, self.icondict['folder'],
wx.TreeItemIcon_Normal)
else:
child = children[n]
node = child
self.Expand(self.rootID)
def getChildrenDict(self, node):
result = {}
child, cookie = self.GetFirstChild(node)
while child:
result[self.GetItemText(child)] = child
child, cookie = self.GetNextChild(node, cookie)
return result
def onExpand(self, event):
pass
def getSelected(self, startNode=None):
if not startNode:
startNode = self.rootID
children = self.getChildren(startNode)
selected = []
for n in children:
(path, isDir, expanded, state) = self.GetItemData(n).GetData()
if not isDir \
and (state == CheckboxState.SELECTED \
or state == CheckboxState.SELECTEDCHILD):
selected.append(n)
if isDir and (state == CheckboxState.SELECTED \
or state == CheckboxState.SELECTEDPARENT \
or state == CheckboxState.SELECTEDCHILD):
selected += self.getSelected(n)
return selected
class RestorePanel(wx.Panel):
def __init__(self, parent, config, factory):
self.config = config
self.factory = factory
wx.Panel.__init__(self, parent, -1)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.tree = RestoreCheckboxCtrl(self, -1, config, #wx.TreeCtrl(self, -1,
style=(wx.TR_MULTIPLE
| wx.TR_HAS_BUTTONS
| wx.TR_TWIST_BUTTONS
| wx.TR_NO_LINES
| wx.TR_FULL_ROW_HIGHLIGHT
| wx.SUNKEN_BORDER))
self.restoreButton = wx.Button(self, -1, 'restore selected files',
name='restoreButton')
self.Bind(wx.EVT_BUTTON, self.onRestoreClick, self.restoreButton)
self.gbSizer = wx.GridBagSizer(2,1)
self.gbSizer.Add(self.tree, (0,0), flag=wx.EXPAND|wx.ALL, border=0)
self.gbSizer.Add(self.restoreButton, (1,0), flag=wx.EXPAND|wx.ALL,
border=0)
self.gbSizer.AddGrowableRow(0)
self.gbSizer.AddGrowableCol(0)
self.SetSizerAndFit(self.gbSizer)
def update(self):
self.tree.update()
def OnSize(self, event):
w,h = self.GetClientSizeTuple()
event.Skip()
def onTooltip(self, event):
pass
def onRestoreClick(self, event):
for n in self.tree.getSelected():
(path, isDir, expanded, state) = self.tree.GetItemData(n).GetData()
print "restoring %s" % path
d = self.factory.sendGETF(path)
d.addCallback(self.restored, n)
d.addErrback(self.restoreFailed, n)
self.tree.UnselectAll()
def restored(self, res, n):
(path, isDir, expanded, state) = self.tree.GetItemData(n).GetData()
print "yay, %s" % path
self.tree.SetItemTextColour(n, '#005804')
self.tree.changeState(n)
def restoreFailed(self, err, n):
(path, isDir, expanded, state) = self.tree.GetItemData(n).GetData()
print "boo, %s: %s" % (path, err)
self.tree.SetItemTextColour(n, wx.RED)
self.tree.changeState(n)
class SchedulePanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, -1)
self.Bind(wx.EVT_SIZE, self.OnSize)
def OnSize(self, event):
w,h = self.GetClientSizeTuple()
event.Skip()
class FeedbackPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, -1)
self.Bind(wx.EVT_SIZE, self.OnSize)
editor = wx.lib.editor.editor.Editor(parent, -1)
def OnSize(self, event):
w,h = self.GetClientSizeTuple()
event.Skip()
class FludNotebook(wx.Notebook):
def __init__(self, parent, id=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.NB_BOTTOM|wx.NO_BORDER):
self.parent = parent
self.config = parent.config
self.factory = LocalClientFactory(self.config)
print "connecting to localhost:%d" % self.config.clientport
reactor.connectTCP('localhost', self.config.clientport, self.factory)
wx.Notebook.__init__(self, parent, id, pos, style=style)
self.filePanel = FilePanel(self,
searchButtonAction=parent.searchButtonAction)
self.AddPage(self.filePanel, "Backup Files")
self.restorePanel = RestorePanel(self, self.config, self.factory)
self.AddPage(self.restorePanel, "Restore")
self.schedulePanel = SchedulePanel(self)
self.AddPage(self.schedulePanel, "Backup Schedule")
self.feedbackPanel = FeedbackPanel(self)
self.AddPage(self.feedbackPanel, "Feedback")
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.changedPage)
def shutdown(self, event):
self.filePanel.shutdown(event)
def changedPage(self, event):
page = event.GetSelection()
if page == 0:
self.SetMessage("Select files and directories for backup"
" with the filesystem view on the left, or set up criteria"
" for finding files for backup directly with simple"
" searches, below right.")
elif page == 1:
self.SetMessage("Select files/directories to be restored to"
" your computer, then click on 'restore!' Files will turn"
" green as they arrive.")
self.restorePanel.update()
elif page == 2:
self.SetMessage("Configure how often your computer should backup."
"\n (not implemented)")
elif page == 3:
self.SetMessage("Send feedback to flud programmers. (not"
" implemented)")
def SetMessage(self, msg):
self.parent.SetMessage(msg)
class FludLogoPanel(wx.Panel):
def __init__(self, parent, id=-1, pos=wx.DefaultPosition,
size=wx.Size(10,10), style=wx.TAB_TRAVERSAL, name="logo panel"):
wx.Panel.__init__(self, parent, id, pos, size, style, name)
self.SetAutoLayout(True)
self.SetBackgroundColour(wx.BLACK)
self.SetForegroundColour(wx.WHITE)
logobmp = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"flud-backup-logo-1-150-nodrop.png")))
pad = 0
self.logowidth = logobmp.GetWidth()
self.logoheight = logobmp.GetHeight()
self.logo = wx.StaticBitmap(self, -1, logobmp)
self.messagePanel = wx.Panel(self, -1)
self.messagePanel.SetBackgroundColour(wx.BLACK)
self.messagePanel.SetForegroundColour(wx.WHITE)
self.message = wx.StaticText(self.messagePanel, -1,
"message text area", style=wx.ALIGN_CENTER|wx.ST_NO_AUTORESIZE,
size=(-1, self.logoheight-15))
self.message.Bind(wx.EVT_SIZE, self.resizeMessage)
self.bsizer = wx.BoxSizer(wx.VERTICAL)
self.bsizer.Add(self.message, flag=wx.EXPAND|wx.ALL, border=35)
self.bsizer.SetSizeHints(self.messagePanel)
self.messagePanel.SetSizer(self.bsizer)
self.gbSizer = wx.GridBagSizer(1,2)
self.gbSizer.Add(self.logo, (0,0))
self.gbSizer.Add(self.messagePanel, (0,1), flag=wx.EXPAND|wx.ALL)
self.gbSizer.AddGrowableRow(1)
self.gbSizer.AddGrowableCol(1)
self.SetSizerAndFit(self.gbSizer)
self.SetSize(wx.Size(self.logowidth, self.logoheight))
self.SetSizeHints(self.logowidth, self.logoheight, -1, self.logoheight)
def SetMessage(self, msg):
(w,h) = self.message.GetSizeTuple()
#print "msg area size is %d x %d" % (w,h)
self.message.SetLabel(msg)
self.message.Wrap(w)
#print "msg is '%s'" % self.message.GetLabel()
self.message.Center()
def resizeMessage(self, evt):
# this is mainly to deal with StaticText wonkiness (not calling Wrap()
# automatically, not centering properly automatically). It may be
# possible to get rid of this with a future wxPython release.
(w,h) = self.message.GetSizeTuple()
self.message.Wrap(w)
m = self.message.GetLabel()
m = m.replace('\n',' ')
self.message.SetLabel(m)
self.message.Wrap(w)
self.message.Center()
class FludFrame(wx.Frame):
def __init__(self, parent, id=wx.ID_ANY, label="flud bakcup client",
size=wx.Size(800,600),
style=wx.DEFAULT_FRAME_STYLE|wx.NO_FULL_REPAINT_ON_RESIZE,
config=None):
wx.Frame.__init__(self, parent, id, label, size=size, style=style)
wx.ToolTip.SetDelay(2000)
self.clearMessage = False
self.logoPanel = FludLogoPanel(self)
self.SetMessage('Welcome.')
self.config = config
self.notebook = FludNotebook(self)
self.operationStatus = wx.StatusBar(name='operationStatus',
parent=self, style=0)
self.SetStatusBar(self.operationStatus)
self.gbSizer = wx.GridBagSizer(2,1)
self.gbSizer.Add(self.logoPanel,(0,0), flag=wx.EXPAND)
self.gbSizer.Add(self.notebook, (1,0), flag=wx.EXPAND|wx.ALL, border=1)
self.gbSizer.AddGrowableRow(1)
self.gbSizer.AddGrowableCol(0)
self.SetSizerAndFit(self.gbSizer)
self.Bind(wx.EVT_CLOSE, self.shutdown)
self.SetSize(size)
self.Show(True)
def SetMessage(self, message):
self.logoPanel.SetMessage(message)
def shutdown(self, event):
self.notebook.shutdown(event)
def searchButtonAction(self, event, errmsg=None, infomsg=None):
if errmsg:
self.logoPanel.SetMessage(errmsg)
self.clearMessage = True
elif infomsg:
self.logoPanel.SetMessage(infomsg)
self.clearMessage = False
elif self.clearMessage:
self.logoPanel.SetMessage("")
#if __name__ == '__main__':
# app = wx.PySimpleApp()
#
# config = FludConfig()
# config.load(doLogging=False)
#
# f = FludFrame(None, wx.ID_ANY, 'flud backup client', size=(795,600),
# style=wx.DEFAULT_FRAME_STYLE|wx.NO_FULL_REPAINT_ON_RESIZE,
# config=config)
#
# from twisted.internet import reactor
# reactor.registerWxApp(app)
# reactor.run()
| Python |
#!/usr/bin/python
"""
FludTestGauges.py (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
Provides gauges for visualizing storage for multiple flud nodes running on
the same host. This is really only useful for demos and testing.
"""
import sys, os, signal, stat, random
import wx
import wx.lib.buttons as buttons
from flud.FludConfig import FludConfig
dutotal = 0
def visit(arg, top, files):
global dutotal
for file in files:
dutotal += os.lstat("%s" % (os.path.join(top,file)))[stat.ST_SIZE]
arg += dutotal
def du(dir):
global dutotal
dutotal = 0
os.path.walk(dir, visit, dutotal)
return dutotal
# XXX: too much manual layout. should convert to a managed layout to allow for
# resizing, etc.
SGAUGEWIDTH = 230 # storage gauge
DGAUGEWIDTH = 100 # dht gauge
GAUGEHEIGHT = 20
ROWHEIGHT = 30
SEP = 5
LABELWIDTH = 20
POWERWIDTH = 70
RATIOBARHEIGHT = 70
COLWIDTH = SGAUGEWIDTH+DGAUGEWIDTH+LABELWIDTH+POWERWIDTH
COLGAPFUDGE = 30
class FludTestGauges(wx.Frame):
def __init__(self, parent, title, dirroot, dirs):
screenHeight = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_Y)-100
rowheight = ROWHEIGHT+SEP
height = len(dirs)*(rowheight)+RATIOBARHEIGHT
columns = height / screenHeight + 1
width = COLWIDTH*columns
if columns > 1:
height = (len(dirs)/columns)*(rowheight)+RATIOBARHEIGHT
if (len(dirs) % columns) > 0:
height += rowheight
wx.Frame.__init__(self, parent, wx.ID_ANY, title, size=(width,height),
style=wx.DEFAULT_FRAME_STYLE|wx.NO_FULL_REPAINT_ON_RESIZE)
self.storebarend = 1024
self.smultiplier = 100.0 / self.storebarend
self.sdivisor = 1
self.sbytelabel = ""
self.dhtbarend = 512
self.dmultiplier = 100.0 / self.dhtbarend
self.ddivisor = 1
self.dbytelabel = ""
self.storeheading = wx.StaticText(self, -1, "block storage",
(LABELWIDTH, 5))
self.totaldht = wx.StaticText(self, -1, "metadata",
(LABELWIDTH+SGAUGEWIDTH+SEP, 5))
self.gauges = []
curCol = 0
curRow = 30
for i in range(len(dirs)):
self.gauges.append(wx.Gauge(self, -1, 100,
(curCol*COLWIDTH+LABELWIDTH, curRow),
(SGAUGEWIDTH, GAUGEHEIGHT)))
self.gauges[i].SetBezelFace(3)
self.gauges[i].SetShadowWidth(3)
self.gauges[i].SetValue(0)
self.gauges[i].dir = "%s%s" % (dirroot,dirs[i])
os.environ['FLUDHOME'] = self.gauges[i].dir;
conf = FludConfig()
conf.load(doLogging = False)
print "%s" % conf.nodeID
self.gauges[i].label = wx.StaticText(self, -1, "%2s" % dirs[i],
(curCol*COLWIDTH, curRow+(rowheight/4)),
size=(LABELWIDTH, -1))
self.gauges[i].idlabel = wx.StaticText(self, -1, "%s" % conf.nodeID,
(curCol*COLWIDTH+LABELWIDTH, curRow+20))
font = self.gauges[i].idlabel.GetFont()
font.SetPointSize(6)
self.gauges[i].idlabel.SetFont(font)
self.gauges[i].dhtgauge = wx.Gauge(self, -1, 100,
(curCol*COLWIDTH+LABELWIDTH+SGAUGEWIDTH+SEP,
curRow),
(SGAUGEWIDTH/3, GAUGEHEIGHT))
self.gauges[i].power = wx.Button(self, i, "turn OFF",
(curCol*COLWIDTH
+LABELWIDTH+SGAUGEWIDTH+2*SEP+SGAUGEWIDTH/3,
curRow),
(POWERWIDTH, ROWHEIGHT))
#self.gauges[i].power = buttons.GenBitmapToggleButton(self, i,
# None,
# (LABELWIDTH+SGAUGEWIDTH+2*SEP+SGAUGEWIDTH/3, curRow),
# (POWERWIDTH, ROWHEIGHT))
#self.gauges[i].button.SetBestSize()
self.gauges[i].power.SetToolTipString("power on/off")
self.Bind(wx.EVT_BUTTON, self.onClick, self.gauges[i].power)
curRow += rowheight
if curRow > height-RATIOBARHEIGHT:
curCol += 1
curRow = 30
self.totalstore = wx.StaticText(self, -1, "total: 0",
(LABELWIDTH, height-40))
self.totaldht = wx.StaticText(self, -1, "total: 0",
(LABELWIDTH+SGAUGEWIDTH+SEP, height-40))
self.ratiogauge = wx.Gauge(self, -1, 100, (LABELWIDTH, height-20),
(SGAUGEWIDTH+SEP+SGAUGEWIDTH/3, 10))
self.ratiogauge.SetValue(0)
self.Bind(wx.EVT_IDLE, self.IdleHandler)
self.timer = wx.PyTimer(self.update)
self.timer.Start(1000)
def onClick(self, event):
# XXX: note that under our current startNnodes.sh scheme, the first
# node spawned doesn't contact anyone, so if that one is powered off
# and then powered back on, it will not be part of the node until
# another node pings it
# XXX: unix-specific proc management stuff follows
idx = event.GetId()
home = self.gauges[idx].dir
pidfile = os.path.join(home, 'twistd.pid')
if os.path.exists(pidfile):
print "shutting down %s" % home
f = open(pidfile)
pid = int(f.read())
f.close()
# XXX: ps command no worky on windows, and "-ww" may not worker on
# oldskool unixes
self.gauges[idx].savedCmd = os.popen(
"ps f -wwp %d -o args=" % pid).read()
procline = os.popen("ps e -wwp %d" % pid).read()
self.gauges[idx].savedEnv = [e for e in procline.split()
if e[:4] == 'FLUD']
# XXX: os.kill no worky on windows, need something like:
#def windowskill(pid):
# import win32api
# handle = win32api.OpenProcess(1, 0, pid)
# return (0 != win32api.TerminateProcess(handle, 0))
os.kill(pid, signal.SIGTERM)
self.gauges[idx].power.SetLabel("turn ON")
self.gauges[idx].Hide()
self.gauges[idx].dhtgauge.Hide()
else:
print "powering up %s" % home
# XXX: this exec no worky on windows
fullcmd = "%s %s" % (' '.join(self.gauges[idx].savedEnv),
self.gauges[idx].savedCmd)
print fullcmd
result = os.popen('%s %s' % (' '.join(self.gauges[idx].savedEnv),
self.gauges[idx].savedCmd)).readlines()
self.gauges[idx].power.SetLabel("turn OFF")
self.gauges[idx].Show()
self.gauges[idx].dhtgauge.Show()
print result
def update(self):
def sizeclass(num):
divisor = 1
bytelabel = ""
if num > 1024:
divisor = 1024.0
bytelabel = 'K'
if num > 1048576:
divisor = 1048576.0
bytelabel = 'M'
if num > 1073741824:
divisor = 1073741824.0
bytelabel = 'G'
return (divisor, bytelabel)
storelargest = 0
dhtlargest = 0
storetotal = 0
dhttotal = 0
for i in self.gauges:
if os.path.isdir(i.dir):
i.storebytes = du(os.path.join(i.dir,'store'))
if i.storebytes > storelargest:
storelargest = i.storebytes
storetotal += i.storebytes
i.dhtbytes = du(os.path.join(i.dir,'dht'))
if i.dhtbytes > dhtlargest:
dhtlargest = i.dhtbytes
dhttotal += i.dhtbytes
else:
i.storebytes = 0
i.dhtbytes = 0
i.Disable()
i.power.Disable()
while storelargest > self.storebarend:
self.storebarend = self.storebarend * 2
self.smultiplier = 100.0 / self.storebarend
self.sdivisor, self.sbytelabel = sizeclass(storetotal)
while dhtlargest > self.dhtbarend:
self.dhtbarend = self.dhtbarend * 2
self.dmultiplier = 100.0 / self.dhtbarend
self.ddivisor, self.dbytelabel = sizeclass(dhttotal)
#print "-----"
for i in self.gauges:
i.SetValue(i.storebytes*self.smultiplier)
i.dhtgauge.SetValue(i.dhtbytes*self.dmultiplier)
#print "%.2f, %.2f" % ((float(i.storebytes)/float(i.dhtbytes)),
# (float(i.GetValue())/float(i.dhtgauge.GetValue())))
self.totalstore.SetLabel("total: %.1f%s"
% (float(storetotal)/self.sdivisor, self.sbytelabel))
self.totaldht.SetLabel("total: %.1f%s"
% (float(dhttotal)/self.ddivisor, self.dbytelabel))
if (dhttotal+storetotal == 0):
self.ratiogauge.SetValue(0)
else:
self.ratiogauge.SetValue((storetotal*100/(dhttotal+storetotal)))
def updateGauges(self, update):
for index, value in update:
self.monitors[index].setValue(value)
def IdleHandler(self, event):
pass
def main():
if len(sys.argv) < 2:
print "usage: %s dircommon exts" % sys.argv[0]
print " where exts will be appended to dircommon"
print " e.g., '%s /home/joe/.flud 1,2,3,4,10,15,20'"\
% sys.argv[0]
print " or, '%s /home/joe/.flud 1-10,15,20'"\
% sys.argv[0]
sys.exit()
root = sys.argv[1]
exts = []
dirs = [d.strip() for d in sys.argv[2].split(',')]
for i in dirs:
if i == "_":
exts.append('') # undocumented, means "just dircommon"
elif i.find('-') >= 0:
start, end = i.split('-')
for j in range(int(start),int(end)+1):
exts.append(j)
else:
exts.append(int(i))
app = wx.PySimpleApp()
t = FludTestGauges(None, 'Flud Test Gauges', root, exts)
t.Show(1)
app.MainLoop()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
"""
FludNode.tac (c) 2003-2006 Alen Peacock. This program is distributed under the
terms of the GNU General Public License (the GPL).
This is the application file used by twistd to daemonize FludNode.
"""
import os
from twisted.application import service, internet
import flud.FludNode
from flud.protocol.FludCommUtil import getCanonicalIP
port = None
gwhost = None
gwport = None
if 'FLUDPORT' in os.environ:
port = int(os.environ['FLUDPORT'])
if 'FLUDGWHOST' in os.environ:
gwhost = getCanonicalIP(os.environ['FLUDGWHOST'])
if 'FLUDGWPORT' in os.environ:
gwport = int(os.environ['FLUDGWPORT'])
node = flud.FludNode.FludNode(port)
if gwhost and gwport:
node.connectViaGateway(gwhost, gwport)
application = service.Application("flud.FludNode")
service = node.start(twistd=True)
#service.setServiceParent(application)
| Python |
import base64
"""
fencode.py (c) 2003-2006 Alen Peacock. This program is distributed under the
terms of the GNU General Public License (the GPL), version 3.
Provides efficient urlsafe base64 encoding of python types (int, long, string, None, dict, tuple, list) -- in the same vein as BitTorrent's bencode or MNet's mencode.
"""
class Fencoded:
# provides a typed wrapper for previously fencoded data, so that we can
# nest fencoded data inside other fencoded structures without bloating it
# (note that this could also be used to store strings without b64 bloating,
# but that forgoes url safety). See doctests in fencode() for usage
# examples.
def __init__(self, data):
self.data = data
def __eq__(self, i):
if not isinstance(i, Fencoded):
return False
return self.data == i.data
def fencode(d, lenField=False):
"""
Takes string data or a number and encodes it to an efficient URL-friendly
format.
>>> n = None
>>> i = 123455566
>>> I = 1233433243434343434343434343434343509669586958695869L
>>> s = "hello there, everyone"
>>> s2 = "long text ............................................................................... AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"
>>> d = {'a': 'adfasdfasd', 'aaa': 'rrreeeettt', 'f': 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'}
>>> d2 = {'a': 123, 'b': 'xyz'}
>>> d3 = {'a': 123, 'b': 'xyz', 'c': {'x': 456, 'y': 'abc'}}
>>> d3 = {'a': 123, 'b': 'xyz', 'c': d}
>>> d4 = {}
>>> l = [1,2,3,4,'a','b','cde']
>>> l2 = [i,I,s,d]
>>> l3 = []
>>> l4 = [n]
>>> t = (1,2,3,4,'a','b','cde')
>>> t2 = (i,I,s,d)
>>> t3 = ()
>>> d5 = {t: s, 'n': n, 'a': i, i: "a", 'd': d3, s2: s2}
>>> l5 = [1,[],2,[],(),{},3,{t: s, 'n': ()}]
>>> fdecode(fencode(n)) == n
True
>>> fdecode(fencode(i)) == i
True
>>> fdecode(fencode(I)) == I
True
>>> fdecode(fencode(-i)) == -i
True
>>> fdecode(fencode(-I)) == -I
True
>>> fdecode(fencode(s)) == s
True
>>> fdecode(fencode(d)) == d
True
>>> fdecode(fencode(d2)) == d2
True
>>> fdecode(fencode(d3)) == d3
True
>>> fdecode(fencode(d4)) == d4
True
>>> fdecode(fencode(l)) == l
True
>>> fdecode(fencode(l2)) == l2
True
>>> fdecode(fencode(l3)) == l3
True
>>> fdecode(fencode(l4)) == l4
True
>>> fdecode(fencode(t)) == t
True
>>> fdecode(fencode(t2)) == t2
True
>>> fdecode(fencode(t3)) == t3
True
>>> fdecode(fencode(d5)) == d5
True
>>> fdecode(fencode(l5)) == l5
True
>>> f = Fencoded(fencode(s))
>>> fdecode(fencode(f)) == f
True
>>> fdecode(fdecode(fencode(f))) == s
True
>>> fdecode(fencode({i: f})) == {i: f}
True
>>> fdecode(fdecode(fencode({i: f}))[i]) == s
True
>>> fdecode(fdecode(fencode({i: f, I: f}))[i]) == s
True
>>> fdecode(fencode(f), recurse=True) == s
True
>>> fdecode(fencode(f), recurse=2) == s
True
>>> f2 = Fencoded(fencode(f))
>>> f3 = Fencoded(fencode(f2))
>>> fdecode(fencode(f3), recurse=True) == s
True
>>> fdecode(fencode(f3), recurse=3) == f
True
>>> fdecode(fencode({i: f3, I: f2})) == {i: f3, I: f2}
True
>>> fdecode(fencode({i: f3, I: f2}), recurse=1) == {i: f3, I: f2}
True
>>> fdecode(fencode({i: f3, I: f2}), recurse=2) == {i: f2, I: f}
True
>>> fdecode(fencode({i: f3, I: f2}), recurse=3) == {i: f, I: s}
True
>>> fdecode(fencode({i: f3, I: f2}), recurse=4) == {i: s, I: s}
True
>>> fdecode(fencode({i: f3, I: f2}), recurse=True) == {i: s, I: s}
True
"""
def makeLen(i):
"""
Returns the integer i as a three-byte length value.
>>> makeLen(255)
'\\x00\\xff'
>>> makeLen(65535)
'\\xff\\xff'
"""
if i > 65535 or i < 0:
raise ValueError("illegal length for fencoded data"
"(0 < x <= 65535)")
return fencode(i)[1:-1]
if isinstance(d, int) or isinstance(d, long):
val = "%x" % d
neg = False
c = 'i'
if isinstance(d, long):
c = 'o'
if d < 0:
neg = True
val = val[1:]
c = c.upper()
if len(val) % 2 != 0:
val = "0%s" % val
val = val.decode('hex')
if len(val) % 2 != 0:
val = '\x00' + val
val = base64.urlsafe_b64encode(val)
if lenField:
if len(val) > 65535:
raise ValueError("value to large for encode")
return c+makeLen(len(val))+val
else:
return c+val
elif isinstance(d, str):
# String data may contain characters outside the allowed charset.
# urlsafe b64encoding ensures that data can be used inside http urls
# (and other plaintext representations).
val = base64.urlsafe_b64encode(d)
if lenField:
if len(val) > 65535:
raise ValueError("value to large for encode")
return 's'+makeLen(len(val))+val
else:
return 's'+val
elif isinstance(d, dict):
result = 'd'
contents = ""
for i in d:
contents = contents + fencode(i,True) + fencode(d[i],True)
if lenField:
result = result+makeLen(len(contents))+contents
else:
result = result+contents
return result
elif isinstance(d, list):
result = 'l'
contents = ''
for i in d:
contents = contents + fencode(i,True)
if lenField:
result = result+makeLen(len(contents))+contents
else:
result = result+contents
return result
elif isinstance(d, tuple):
result = 't'
contents = ''
for i in d:
contents = contents + fencode(i,True)
if lenField:
result = result+makeLen(len(contents))+contents
else:
result = result+contents
return result
elif isinstance(d, Fencoded):
result = 'f'
contents = d.data
if lenField:
result = result+makeLen(len(d.data))+contents
else:
result = result+contents
return result
elif d == None:
if lenField:
return 'n'+makeLen(1)+'0'
else:
return 'n0'
else:
raise ValueError("invalid value passed to fencode: %s" % type(d))
def fdecode(d, lenField=False, recurse=1):
"""
Takes previously fencoded data and decodes it into its python type(s).
'lenField' is used internally, and indicates that the fencoded data has
length fields (used for compositiong of tuples, lists, dicts, etc).
'recurse' indicates that fdecode should recursively fdecode Fencoded
objects if set to True, or that it should recurse to a depth of 'recurse'
when encountering Fencoded objects if it is an integer value.
"""
def getLen(s):
if len(s) != 3 or not isinstance(s, str):
raise ValueError("fdecode length strings must be 3 bytes long: '%s'"
% s)
return fdecode('i'+s+'=', recurse=recurse)
def scanval(valstring, lenField=False):
"""
scans the given valstring and returns a value and the offset where that
value ended (as a tuple). If valstring contains more than one value,
only the length of the first is returned. Otherwise, the entire length
is returned.
"""
valtype = valstring[0]
if lenField:
start = 4
end = start+getLen(valstring[1:4])
else:
start = 1
end = len(valstring)-1
#print " scanval calling fdecode on val[%d:%d]=%s" % (0, end, valstring)
return (fdecode(valstring[0:end], True, recurse=recurse), end)
if isinstance(d, Fencoded):
return fdecode(d.data, recurse=recurse)
if not isinstance(d, str):
raise ValueError("decode takes string data or Fencoded object only,"
" got %s" % type(d))
valtype = d[0]
if lenField:
length = getLen(d[1:4])
val = d[4:]
else:
val = d[1:len(d)]
if valtype == 'i':
val = base64.urlsafe_b64decode(val)
val = val.encode('hex')
return int(val, 16)
elif valtype == 'I':
val = base64.urlsafe_b64decode(val)
val = val.encode('hex')
return -int(val, 16)
elif valtype == 'o':
val = base64.urlsafe_b64decode(val)
val = val.encode('hex')
return long(val, 16)
elif valtype == 'O':
val = base64.urlsafe_b64decode(val)
val = val.encode('hex')
return -long(val, 16)
elif valtype == 's':
return base64.urlsafe_b64decode(val)
elif valtype == 'd':
result = {}
while len(val) != 0:
#print "string is: %s (len=%d)" % (val, len(val))
(key,l1) = scanval(val, True)
#print "got key '%s' of length %d" % (key,l1)
(value,l2) = scanval(val[l1:len(val)], True)
#print "got value '%s' of length %d" % (value,l2)
result[key] = value
val = val[l1+l2:]
return result
elif valtype == 'l':
result = []
if lenField:
pass
while len(val) != 0:
(v,l) = scanval(val, True)
result.append(v)
val = val[l:]
return result
elif valtype == 't':
result = []
if lenField:
pass
while len(val) != 0:
(v,l) = scanval(val, True)
result.append(v)
val = val[l:]
return tuple(result)
elif valtype == 'f':
if not isinstance(recurse, bool):
recurse = recurse-1
if recurse > 0:
return fdecode(val, recurse=recurse)
return Fencoded(val)
elif valtype == 'n':
return None
else:
raise ValueError("invalid value passed to fdecode"
" -- cannot fdecode data that wasn't previously fencoded: '%s'"
% d[:400])
if __name__ == '__main__':
import doctest
doctest.testmod()
| Python |
class Reputation:
"""
This is the Reputation class. Each node maintains a list of reputation
objects corresponding to reputations of other nodes. Reputations may be
self-generated (in which case the originator is this node itself), or may
be relayed (in which case some other node is the originator).
Self-generated reputations are vastly more reliable than those relayed --
relayed reputations are second-hand information, and are more likely to
have false data.
"""
def __init__(self, ID, originator):
"""
Variables designated as '%' have values between 0 and 100.
"""
self.ID = ID
self.originator = originator
self.confidence = 0 # % originator data stored / nodes the
# originator stores to
self.verifiability = 0 # % originator data verified success/failed
self.availability = 0 # % originator contact attempts success/fail
self.bandwidth = 0 # avg bandwidth observed from orig. to ID.
self.age = 0 # age of reputation in days
def score(self):
"""
Returns a score for this reputation based in member variables. The
reputation must be a local reputation, i.e., the originator must
be equal to the global myNodeID. Otherwise, call scoreRelay()
>>> myNodeID = "self"
>>> rep = Reputation("somenode","self")
>>> rep.availability = 50
>>> rep.verifiability = 50
>>> rep.score()
33
>>> rep = Reputation("somenode","someothernode")
>>> rep.availability = 30
>>> rep.score()
-1
"""
# should find a good adjustment of weights (XXX: machine learning?)
if self.originator != myNodeID:
return -1
return (self.confidence + self.verifiability + self.availability) / 3
# XXX: should also include age and bandwidth
def scoreRelay(self):
"""
Returns a score for this reputation based in member variables. The
reputation must be a remote reputation, i.e., the originator must
not be equal to the global myNodeID. Otherwise, call score()
>>> myNodeID = "self"
>>> rep = Reputation("somenode","self")
>>> rep.availability = 50
>>> rep.verifiability = 50
>>> rep.scoreRelay()
-1
>>> rep = Reputation("somenode","someothernode")
>>> rep.availability = 30
>>> rep.scoreRelay()
10
"""
if self.originator == myNodeID:
return -1
return (self.confidence + self.verifiability + self.availability) / 3
# XXX: should also include age and bandwidth
def updateConfidence(self, totalDataStored, totalNodesStoredTo):
self.confidence = totalDataStored / totalNodesStoredTo;
def _test(self):
import doctest
doctest.testmod()
if __name__ == '__main__':
myNodeID = "self"
rep = Reputation("other", "self")
rep._test()
| Python |
#!/usr/bin/python
"""
FludLocalClient.py, (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
FludLocalClient provides a command-line client for interacting with FludNode.
"""
import sys, os, time
from twisted.internet import reactor
from flud.FludConfig import FludConfig
from flud.fencode import fencode, fdecode
from flud.FludCrypto import hashfile
from protocol.LocalClient import *
logger = logging.getLogger('flud')
class CmdClientFactory(LocalClientFactory):
def __init__(self, config):
LocalClientFactory.__init__(self, config)
self.quit = False
self.msgs = []
def callFactory(self, func, commands, msgs):
# since we can't call factory methods from the promptUser thread, we
# use this as a convenience to put those calls back in the event loop
reactor.callFromThread(self.doFactoryMethod, func, commands, msgs)
def doFactoryMethod(self, func, commands, msgs):
d = func()
d.addCallback(self.queueResult, msgs, '%s succeeded' % commands)
d.addErrback(self.queueError, msgs, '%s failed' % commands)
return d
def promptUser(self):
helpDict = {}
command = raw_input("%s> " % time.ctime())
commands = command.split(' ') # XXX: should tokenize on any whitespace
commandkey = commands[0][:4]
# core client operations
helpDict['exit'] = "exit from the client"
helpDict['help'] = "display this help message"
helpDict['ping'] = "send a GETID() message: 'ping host port'"
helpDict['putf'] = "store a file: 'putf canonicalfilepath'"
helpDict['getf'] = "retrieve a file: 'getf canonicalfilepath'"
helpDict['geti'] = "retrieve a file by CAS key: 'geti fencodedCASkey'"
helpDict['fndn'] = "send a FINDNODE() message: 'fndn hexIDstring'"
helpDict['list'] = "list stored files (read from local metadata)"
helpDict['putm'] = "store master metadata"
helpDict['getm'] = "retrieve master metadata"
helpDict['cred'] = "send encrypted private credentials: cred"\
" passphrase emailaddress"
helpDict['node'] = "list known nodes"
helpDict['buck'] = "print k buckets"
helpDict['stat'] = "show pending actions"
helpDict['stor'] = "store a block to a given node:"\
" 'stor host:port,fname'"
helpDict['rtrv'] = "retrieve a block from a given node:"\
" 'rtrv host:port,fname'"
helpDict['vrfy'] = "verify a block on a given node:"\
" 'vrfy host:port:offset-length,fname'"
helpDict['fndv'] = "retrieve a value from the DHT: 'fndv hexkey'"
helpDict['dlet'] = "delete from the stor: '[XXX]'"
if commandkey == 'exit' or commandkey == 'quit':
self.quit = True
elif commandkey == 'help':
self.printHelp(helpDict)
elif commandkey == 'ping':
# ping a host
# format: 'ping host port'
func = lambda: self.sendPING(commands[1], commands[2])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'putf':
# store a file
# format: 'putf canonicalfilepath'
func = lambda: self.sendPUTF(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'getf':
# retrieve a file
# format: 'getf canonicalfilepath'
func = lambda: self.sendGETF(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'geti':
# retrieve a file by CAS ID
# format: 'geti fencoded_CAS_ID'
func = lambda: self.sendGETI(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'fndn':
# find a node (or the k-closest nodes)
# format: 'fndn hexIDstring'
func = lambda: self.sendFNDN(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'list':
# list stored files
self.callFactory(self.sendLIST, commands, self.msgs)
elif commandkey == 'putm':
# store master metadata
self.callFactory(self.sendPUTM, commands, self.msgs)
elif commandkey == 'getm':
# retrieve master metadata
self.callFactory(self.sendGETM, commands, self.msgs)
elif commandkey == 'cred':
# send encrypted private credentials to an email address
# format: 'cred passphrase emailaddress'
func = lambda: self.sendCRED(
command[len(commands[0])+1:-len(commands[-1])-1],
commands[-1])
self.callFactory(func, commands, self.msgs)
# the following are diagnostic operations, debug-only utility
elif commandkey == 'node':
# list known nodes
self.callFactory(self.sendDIAGNODE, commands, self.msgs)
elif commandkey == 'buck':
# show k-buckets
self.callFactory(self.sendDIAGBKTS, commands, self.msgs)
elif commandkey == 'stat':
# show pending actions
print self.pending
elif commandkey == 'stor':
# stor a block to a given node. format: 'stor host:port,fname'
storcommands = commands[1].split(',')
try:
fileid = int(storcommands[1], 16)
except:
linkfile = fencode(long(hashfile(storcommands[1]),16))
if (os.path.islink(linkfile)):
os.remove(linkfile)
os.symlink(storcommands[1], linkfile)
storcommands[1] = linkfile
# XXX: delete this file when the command finishes
commands[1] = "%s,%s" % (storcommands[0], storcommands[1])
func = lambda: self.sendDIAGSTOR(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'rtrv':
# retrive a block from a given node. format: 'rtrv host:port,fname'
func = lambda: self.sendDIAGRTRV(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'vrfy':
# verify a block on a given node.
# format: 'vrfy host:port:offset-length,fname'
logger.debug("vrfy(%s)" % commands[1])
func = lambda: self.sendDIAGVRFY(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'dlet':
print "not yet implemented"
elif commandkey == 'fndv':
# try to retrieve a value from the DHT
# format: 'fndv key'
func = lambda: self.sendDIAGFNDV(commands[1])
self.callFactory(func, commands, self.msgs)
elif command != "":
reactor.callFromThread(self.queueError, None, self.msgs,
"illegal command '%s'" % command)
def queueResult(self, r, l, msg):
logger.debug("got result %s" % msg)
l.append((r, msg))
def queueError(self, r, l, msg):
logger.debug("got error %s" % msg)
if r:
l.append((r.getErrorMessage(), msg))
else:
l.append((None, msg))
def printHelp(self, helpDict):
helpkeys = helpDict.keys()
helpkeys.sort()
for i in helpkeys:
print "%s:\t %s" % (i, helpDict[i])
def promptLoop(self, r):
for c in self.pending:
for i in self.pending[c].keys():
if self.pending[c][i] == True:
print "%s on %s completed successfully" % (c, i)
self.pending[c].pop(i)
elif self.pending[c][i] == False:
print "%s on %s failed" % (c, i)
self.pending[c].pop(i)
else:
print "%s on %s pending" % (c, i)
while len(self.msgs) > 0:
# this prints in reverse order, perhaps pop() all into a new list,
# reverse, then print
(errmsg, m) = self.msgs.pop()
if errmsg:
print "<- %s:\n%s" % (m, errmsg)
else:
print "<- %s" % m
if self.quit:
reactor.stop()
else:
d = threads.deferToThread(self.promptUser)
d.addCallback(self.promptLoopDelayed)
d.addErrback(self.err)
def promptLoopDelayed(self, r):
# give the reactor loop time to fire any quick cbs/ebs
reactor.callLater(0.1, self.promptLoop, r)
def clientConnectionLost(self, connector, reason):
if not self.quit:
LocalClientFactory.clientConnectionLost(self, connector, reason)
def cleanup(self, msg):
self.quit = True
self.err(msg)
def err(self, r):
print "bah!: %s" % r
reactor.stop()
def main():
config = FludConfig()
config.load(doLogging=False)
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler('/tmp/fludclient.log')
formatter = logging.Formatter('%(asctime)s %(filename)s:%(lineno)d'
' %(name)s %(levelname)s: %(message)s', datefmt='%H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
factory = CmdClientFactory(config)
if len(sys.argv) == 2:
config.clientport = int(sys.argv[1])
print "connecting to localhost:%d" % config.clientport
reactor.connectTCP('localhost', config.clientport, factory)
factory.promptLoop(None)
reactor.run()
if __name__ == '__main__':
main()
| Python |
import urlparse, os, types
from twisted.web import client
from twisted.internet import reactor, defer
from twisted.python import failure
"""
HTTPMultipartDownloader.py (c) 2003-2006 Alen Peacock. This program is
distributed under the terms of the GNU General Public License (the GPL),
version 3.
HTTPMultipartDownloader will download mulitple files from a multipart/related.
Note that it does this by using the Content-ID and Content-Length headers in
each multipart, and will fail if those are not present (this could be
genericized to operate without those fields without too much effort)
This code is modeled after twisted.web.client.HTTPDownloader, which is
copyright 2001-2004 Twisted Matrix Laboratories, MIT licensed.
"""
class HTTPMultipartDownloader(client.HTTPDownloader):
"""Download multiple files, via multipart/related."""
protocol = client.HTTPPageDownloader
value = None
def __init__(self, url, dir, method='GET', postdata=None, headers=None,
agent="Flud client", supportPartial=0):
self.requestedPartial = 0
self.filenames = []
self.dir = dir
client.HTTPClientFactory.__init__(self, url, method=method,
postdata=postdata, headers=headers, agent=agent)
self.deferred = defer.Deferred()
self.waiting = 1
def gotHeaders(self, headers):
if self.requestedPartial:
contentRange = headers.get("content-range", None)
if not contentRange:
# server doesn't support partial requests, oh well
self.requestedPartial = 0
return
start, end, realLength = http.parseContentRange(contentRange[0])
if start != self.requestedPartial:
# server is acting wierdly
self.requestedPartial = 0
def openFile(self, partialContent):
if partialContent:
file = open(self.filename, 'rb+')
file.seek(0, 2)
else:
file = open(self.filename, 'wb')
self.filenames.append(self.filename)
return file
def pageStart(self, partialContent):
"""Called on page download start.
@param partialContent: tells us if the download is partial download we
requested.
"""
if partialContent and not self.requestedPartial:
raise ValueError, "we shouldn't get partial content response if"\
" we didn't want it!"
self.partialContent = partialContent
if self.waiting:
self.waiting = 0
self.inSubHeader = True
self.file = None
self.boundary = None
def getSubHeader(self, data):
newboundary = data[:data.find('\r\n')]
data = data[len(newboundary)+2:]
if not self.boundary:
self.boundary = newboundary
if self.boundary != newboundary:
if self.boundary+"--" == newboundary:
# end of multiparts
return
else:
raise ValueError, "found illegal boundary"
# XXX: print some of newboundary *safely*
#raise ValueError, "found illegal boundary: %s, was %s" \
# % (newboundary[:80], self.boundary)
headerEnd = data.find('\r\n\r\n')
if headerEnd != -1:
self.inSubHeader = False
self.subHeaders = {}
headers = data[:headerEnd].split('\r\n')
for header in headers:
k, v = header.split(':',1)
self.subHeaders[k.lower()] = v.lstrip(' ')
if not self.subHeaders.has_key('content-id'):
raise ValueError, "no Content-ID field in multipart,"\
" can't continue"
# XXX: need to check for badness (e.g, "../../) in content-id
self.filename = os.path.join(self.dir,
self.subHeaders['content-id'])
self.file = self.openFile(self.partialContent)
if not self.subHeaders.has_key('content-length'):
raise ValueError, "no Content-Length field in multipart,"\
" can't continue"
self.filesizeRemaining = int(self.subHeaders['content-length'])
self.pagePart(data[headerEnd+4:])
def pagePart(self, data):
if self.inSubHeader:
self.getSubHeader(data)
else:
if not self.file:
raise ValueError, "file %s not open for output" % self.filename
try:
if self.filesizeRemaining > len(data):
self.file.write(data)
self.filesizeRemaining -= len(data)
else:
self.file.write(data[:self.filesizeRemaining])
skipto = self.filesizeRemaining
self.filesizeRemaining = 0
self.file.close()
self.file = None
self.inSubHeader = True
self.getSubHeader(data[skipto+2:])
except IOError:
#raise
self.file = None
self.deferred.errback(failure.Failure())
def pageEnd(self):
if self.file:
try:
self.file.close()
except IOError:
self.deferred.errback(failure.Failure())
return
self.deferred.callback(self.filenames)
def doit():
factory = HTTPMultipartDownloader("/ret", "/tmp/")
reactor.connectTCP('localhost', 1080, factory)
return factory.deferred
def didit(r):
print "didit: %s" % str(r)
reactor.stop()
if __name__ == "__main__":
# tries to request http://localhost:1080/ret, which it expects to be
# multipart/related with Content-Length headers
d = doit()
d.addBoth(didit)
reactor.run()
| Python |
#!/usr/bin/python
"""
FludClient.py, (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), version 3.
FludClient provides a GUI Client for interacting with FludNode.
"""
#from twisted.internet import wxreactor
#wxreactor.install()
import sys, os, string, time, glob
import wx
import wx.lib.mixins.listctrl as listmix
import wx.lib.editor.editor
from flud.protocol.LocalClient import *
from flud.FludConfig import FludConfig
from flud.CheckboxState import CheckboxState
FLUSHCHECKTIME = 5*60 # s to wait to flush fludfile.conf
imgdir = os.path.join(os.path.dirname(os.path.abspath(__file__)),'images')
mimeMgr = wx.MimeTypesManager()
def getFileIcon(file, il, checkboxes, icondict):
ft = mimeMgr.GetFileTypeFromExtension(file[file.rfind('.')+1:])
# XXX: what about from mimetype or magic?
if ft == None:
return icondict['generic']
else:
desc = ft.GetDescription()
if icondict.has_key(desc):
return icondict[desc]
else:
icon = ft.GetIcon()
if icon == None or not icon.Ok():
#print "couldn't find an icon image for %s" % file
icondict[desc] = icondict['generic']
return icondict[desc]
bm = wx.BitmapFromIcon(icon)
newimages = makeCheckboxBitmaps(bm, checkboxes)
#il = self.GetImageList()
pos = il.GetImageCount()
for i in newimages:
il.Add(i)
icondict[desc] = pos
#print "%s got a %s image" % (file, ft.GetDescription())
return pos
def getEmptyBitmapAndDC(width, height):
empty = wx.EmptyBitmap(width,height)
temp_dc = wx.MemoryDC()
temp_dc.SelectObject(empty)
temp_dc.Clear()
return (empty, temp_dc)
def makeCheckboxBitmaps(basebitmap, checkboxes):
if basebitmap.GetWidth() != 16 or basebitmap.GetHeight() != 16:
img = basebitmap.ConvertToImage()
img.Rescale(16, 16)
basebitmap = img.ConvertToBitmap()
result = []
for i in checkboxes:
bm, dc = getEmptyBitmapAndDC(40,16)
dc.DrawBitmap(basebitmap, 0, 0, False)
dc.DrawBitmap(i, 20, 2, False)
result.append(bm)
return result
def createDefaultImageList():
def getDefaultCheckboxes():
ucbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-unchecked1.png")))
cbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-checked1.png")))
ccbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-checkedpartial1.png")))
cpbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-parentchecked1.png")))
ebm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-excluded1.png")))
ecbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-excludedpartial1.png")))
return (ucbm, cbm, ccbm, cpbm, ebm, ecbm)
checkboxes = getDefaultCheckboxes()
il = wx.ImageList(40, 16)
folderimgs = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_FOLDER, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
computer = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_HARDDISK, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
drives = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_HARDDISK, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
cdrom = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_CDROM, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
floppy = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_FLOPPY, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
removable = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_REMOVABLE, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
genericfile = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_NORMAL_FILE, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
execfile = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_EXECUTABLE_FILE, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
j = 0
icondict = {}
icondict['folder'] = j
for i in folderimgs:
il.Add(i)
j = j+1
icondict['computer'] = j
for i in computer:
il.Add(i)
j = j+1
icondict['drives'] = j
for i in drives:
il.Add(i)
j = j+1
icondict['cdrom'] = j
for i in cdrom:
il.Add(i)
j = j+1
icondict['floppy'] = j
for i in floppy:
il.Add(i)
j = j+1
icondict['removable'] = j
for i in removable:
il.Add(i)
j = j+1
icondict['generic'] = j
for i in genericfile:
il.Add(i)
j = j+1
icondict['exec'] = j
for i in execfile:
il.Add(i)
j = j+1
return il, checkboxes, icondict
class DirCheckboxCtrl(wx.TreeCtrl):
def __init__(self, parent, id=-1, dir=None, pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=(wx.TR_MULTIPLE
| wx.TR_HAS_BUTTONS
| wx.TR_TWIST_BUTTONS
| wx.TR_NO_LINES
| wx.TR_FULL_ROW_HIGHLIGHT
| wx.SUNKEN_BORDER),
validator=wx.DefaultValidator, name=wx.ControlNameStr,
allowExclude=True):
self.allowExclude = allowExclude
wx.TreeCtrl.__init__(self, parent, id, pos, size, style, validator,
name)
self.listeners = []
self.parent = parent
#self.il = self.GetImageList()
#self.checkboxes = self.getDefaultCheckboxes()
self.initTree(dir)
self.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.onExpand, self)
self.Bind(wx.EVT_LEFT_UP, self.onClick, self)
self.Bind(wx.EVT_TREE_ITEM_GETTOOLTIP, self.onTooltip, self)
self.Bind(wx.EVT_CHAR, self.onChar)
def initTree(self, dir):
self.expandRoot(dir)
# XXX: should expandHome() on first run, then load expanded dir state
# from saved state on subsequent runs.
self.expandHome(dir)
def expandRoot(self, dir):
if not os.path.isdir(dir):
raise ValueError("%s is not a valid directory path")
self.defaultImageList, self.checkboxes, self.icondict \
= createDefaultImageList()
self.AssignImageList(self.defaultImageList)
self.il = self.GetImageList()
if dir == None:
self.rootID = self.AddRoot(dir, self.icondict['computer'], -1,
wx.TreeItemData((dir, True, False,
CheckboxState.UNSELECTED)))
# XXX: getTopLevelDirs() and add them as children
else:
self.rootID = self.AddRoot(dir, self.icondict['folder'], -1,
wx.TreeItemData((dir, True, False,
CheckboxState.UNSELECTED)))
self.expandDir(self.rootID)
self.Expand(self.rootID)
self.stateChangeTime = time.time()
self.flushTime = time.time()
reactor.callLater(FLUSHCHECKTIME, self.checkFlush)
def expandHome(self, dir):
home = os.environ['HOME']
if home:
traversal = home.split(os.path.sep)[1:]
node = self.rootID
for d in traversal:
(ipath, isdir, expanded, istate) \
= self.GetItemData(node).GetData()
self.expandDir(node)
children = self.getChildren(node, False)
childrennames = [self.GetItemText(x) for x in children]
if d in childrennames:
p = childrennames.index(d)
node = children[p]
self.expandDir(node)
self.Expand(node)
else:
print "couldn't traverse to HOME dir on %s" % d
break
def checkFlush(self):
print "checking for flush"
if self.stateChangeTime > self.flushTime:
self.flushTime = time.time()
print "flushing"
self.parent.flushFileConfig()
reactor.callLater(FLUSHCHECKTIME, self.checkFlush)
def expandDir(self, parentID, hideHidden=False, busycursor=True):
def isDriveAvailable(path):
if len(path) == 2 and path[1] == ':':
path = path.lower()
if path[0] == 'a' or path[0] == 'b' or diExists(path):
return True
else:
return False
return True
(path, isDir, expanded, state) = self.GetItemData(parentID).GetData()
if expanded:
return
if not isDriveAvailable(path):
return
if busycursor: wx.BusyCursor()
try:
dirlist = os.listdir(path)
except:
self.SetItemHasChildren(parentID, False)
return
if len(dirlist) == 0:
self.SetItemHasChildren(parentID, False)
return
dirs = []
files = []
for i in dirlist:
if hideHidden and i[0] == '.':
# XXX: dotfile format check is *nix specific
# XXX: if this is a hidden file, don't add it.
pass
elif os.path.isdir(os.path.join(path,i)):
dirs.append(i)
else:
files.append(i)
dirs.sort()
files.sort()
for d in dirs:
child = self.AppendItem(parentID, d)
self.SetPyData(child, (os.path.join(path,d), True, False, 0))
self.SetItemImage(child, self.icondict['folder'],
wx.TreeItemIcon_Normal)
self.SetItemHasChildren(child)
il = self.GetImageList()
for f in files:
child = self.AppendItem(parentID, f) # XXX: unicode?
self.SetPyData(child, (os.path.join(path,f), False, False, 0))
idx = getFileIcon(os.path.join(path,f), il, self.checkboxes,
self.icondict)
self.SetItemImage(child, idx, wx.TreeItemIcon_Normal)
self.SetPyData(parentID, (path, isDir, True, state))
def getStates(self, node=None):
if not node:
node = self.rootID
states = {}
(path, isDir, expanded, state) = self.GetItemData(node).GetData()
if state in [CheckboxState.SELECTED, CheckboxState.EXCLUDED]:
states[path] = state
children = self.getChildren(node, False)
for child in children:
states.update(self.getStates(child))
return states
def setStates(self, states):
for i in states:
found = self.findNode(i)
if found:
self.setItemState(found, states[i])
def findNode(self, path):
if path[0] == '/':
path = path[1:] # XXX: unix only
traversal = path.split(os.path.sep)
if traversal[0] == '':
traversal.remove('')
node = self.rootID
while True:
(ipath, isdir, expanded, istate) = self.GetItemData(node).GetData()
if len(traversal) == 0:
return node
self.expandDir(node)
children = self.getChildren(node, False)
childrennames = [self.GetItemText(x) for x in children]
firstpath = traversal[0]
if firstpath in childrennames:
p = childrennames.index(firstpath)
node = children[p]
traversal.remove(firstpath)
else:
#print " the file %s is no longer present!" % path
return None
return None
def onExpand(self, event):
self.expandDir(event.GetItem())
self.renderChildren(event.GetItem(), True)
def getFullPath(self, node):
path = self.tree.GetItemText(node)
n = node
while True:
n = self.tree.GetItemParent(n)
if n and n != self.GetRootItem():
path = os.path.join(self.tree.GetItemText(n),path)
else:
break
return path
def renderParents(self, item):
if item == self.rootID:
return
n = item
(path, isDir, expanded, state) = self.GetItemData(item).GetData()
while True:
n = self.GetItemParent(n)
(parentpath, parentisDir, parentexpanded,
parentstate) = self.GetItemData(n).GetData()
#print "parent %s" % parentpath
if n and n != self.GetRootItem():
newstate = parentstate
if parentstate != CheckboxState.UNSELECTED and \
parentstate != CheckboxState.SELECTEDPARENT:
# we only care about changing UNSELECT or SELECTEDPARENT
# states
break
else:
if state == CheckboxState.SELECTED or \
state == CheckboxState.SELECTEDCHILD or \
state == CheckboxState.SELECTEDPARENT:
# if the item (child) is selected in any way, parent
# should be too.
newstate = CheckboxState.SELECTEDPARENT
elif state == CheckboxState.UNSELECTED or \
state == CheckboxState.EXCLUDED:
# if the item (child) is unselected or excluded, the
# parent should be too, /unless/ there are other
# children at the same level who are selected.
children = self.getChildren(n, False)
newstate = CheckboxState.UNSELECTED
for child in children:
(cpath, cisdir, cexp,
cstate) = self.GetItemData(child).GetData()
if cstate == CheckboxState.SELECTED or \
cstate == CheckboxState.SELECTEDCHILD or \
cstate == CheckboxState.SELECTEDPARENT:
newstate = parentstate
if newstate == parentstate:
break
imageidx = self.GetItemImage(n)
imageidx += CheckboxState.offset(parentstate, newstate)
self.SetPyData(n, (parentpath, parentisDir,
parentexpanded, newstate))
self.SetItemImage(n, imageidx)
else:
break
def renderChildren(self, parent, recurse=False):
(parentpath, parentisDir, parentexpanded,
parentstate) = self.GetItemData(parent).GetData()
children = self.getChildren(parent, False)
for child in children:
#path = self.getFullPath(child)
(path, isDir, expanded, state) = self.GetItemData(child).GetData()
imageidx = self.GetItemImage(child)
newstate = state
"""
Here are the state transitions for children based on current states:
('-' = no state change, 'x' = should never occur, '!' = should be
prevented at the parent, '?' = need to consult children)
child
unsel sel selch selpar excl exclch
unsel - ! unsel x - unsel
sel selch - - selch - selch
par selch selch - - selch - selch
selpar x x unsl?selpr x x x
excl exlch ! exlch ! - -
exclch exlch - exlch ! - -
"""
#if parentpath == '/data':
# print "/data pstate = %d" % parentstate
# print " %s = %d" % (path, state)
if state == CheckboxState.UNSELECTED:
if parentstate == CheckboxState.SELECTED or \
parentstate == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTEDCHILD
elif parentstate == CheckboxState.EXCLUDED or \
parentstate == CheckboxState.EXCLUDEDCHILD:
newstate = CheckboxState.EXCLUDEDCHILD
elif state == CheckboxState.SELECTEDCHILD:
if parentstate == CheckboxState.UNSELECTED:
newstate = CheckboxState.UNSELECTED
elif parentstate == CheckboxState.SELECTEDPARENT:
if self.checkChildrenStates(child, [CheckboxState.SELECTED,
CheckboxState.SELECTEDPARENT]):
# XXX: did we need to pass in selections to checkChldSt
newstate = CheckboxState.SELECTEDPARENT
else:
newstate = CheckboxState.UNSELECTED
elif parentstate == CheckboxState.EXCLUDED or \
parentstate == CheckboxState.EXCLUDEDCHILD:
newstate = CheckboxState.EXCLUDEDCHILD
elif state == CheckboxState.SELECTEDPARENT:
if parentstate == CheckboxState.SELECTED or \
parentstate == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTEDCHILD
elif state == CheckboxState.EXCLUDEDCHILD:
if parentstate == CheckboxState.UNSELECTED:
newstate = CheckboxState.UNSELECTED
elif parentstate == CheckboxState.SELECTED or \
parentstate == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTEDCHILD
imageidx += CheckboxState.offset(state, newstate)
self.SetPyData(child, (path, isDir, expanded, newstate))
self.SetItemImage(child, imageidx)
if recurse:
self.renderChildren(child, recurse)
# XXX: why do we renderParents here? It hits the same
# 'parent's over and over and over again. If we want to do
# this, we need to 'collect up' the parents and just call once
# -- this kills performance.
#print "renderParents(%s)" % path
#self.renderParents(child)
def getChildren(self, node, recurse=False):
result = []
child, cookie = self.GetFirstChild(node)
while child:
result.append(child)
if recurse:
result.extend(self.getChildren(child, recurse))
child, cookie = self.GetNextChild(node, cookie)
return result
def checkChildrenStates(self, node, states, ignorelist=[]):
children = self.getChildren(node)
for child in children:
if child not in ignorelist:
(p, d, e, childstate) = self.GetItemData(child).GetData()
for state in states:
if state == childstate:
#print "%s has state %d" % (p, state)
return True
if self.checkChildrenStates(child, states, ignorelist):
# do this even if it is in ignorelist, because it may have
# children which are not in the ignorelist
return True
return False
def getTooltip(self, item):
text = self.GetItemText(item)
(path, isDir, expanded, state) = self.GetItemData(item).GetData()
if state == CheckboxState.SELECTED:
if isDir:
text = "'%s' is SELECTED for backup\n" \
"ALL files within this folder will be backed up\n" \
"(except those explicitly marked for exclusion)" % text
else:
text = "'%s' is SELECTED for backup" % text
elif state == CheckboxState.UNSELECTED:
text = "'%s' is NOT selected for backup" % text
elif state == CheckboxState.SELECTEDPARENT:
text = "some files within '%s' are selected for backup" % text
elif state == CheckboxState.SELECTEDCHILD:
text = "'%s' will be backed up\n" \
"(one of its parent folders is selected)" % text
elif state == CheckboxState.EXCLUDED:
if isDir:
text = "'%s' is EXCLUDED from backup\n" \
"No files within this folder will be backed up" % text
else:
text = "'%s' is EXCLUDED from backup" % text
elif state == CheckboxState.EXCLUDEDCHILD:
text = "'%s' is EXCLUDED from backup\n" \
"(one of its parent folders is EXCLUDED)" % text
return text
def onTooltip(self, event):
item = event.GetItem()
text = self.getTooltip(item)
if text:
event.SetToolTip(text)
else:
event.StopPropagation()
#print dir(event)
def onClick(self, event):
point = (event.GetX(), event.GetY())
item, flags = self.HitTest(point)
if flags & wx.TREE_HITTEST_ONITEMICON:
selections = self.GetSelections()
self.changeState(item, selections)
def onChar(self, event):
if event.KeyCode() == ord('F') and event.ShiftDown() \
and event.ControlDown():
self.flushTime = time.time()
print "flushing"
self.parent.flushFileConfig()
event.Skip()
def changeState(self, item, selections=[]):
self.stateChangeTime = time.time()
(path, isDir, expanded, state) = self.GetItemData(item).GetData()
if item == self.rootID:
parent = None
parentstate = CheckboxState.UNSELECTED
else:
parent = self.GetItemParent(item)
(parentpath, parentisDir, parentexpanded,
parentstate) = self.GetItemData(parent).GetData()
imageidx = self.GetItemImage(item)
# determine newstate from existing state, parent state, and state
# of children
"""
Here are the state transitions for the item based on current
states and parent states: ('-' = no state change, 'x' = should
never occur, '?' = depends on children state)
item
unsel sel selch selpar excl exclch
unsel sel excl sel sel unsel excl
sel sel excl?selpar sel x selch excl
par selch x excl sel sel selch excl
selpar sel excl x sel unsel excl
excl x excl x exclch exclch excl
exclch x excl x exclch exclch excl
"""
newstate = state
if state == CheckboxState.UNSELECTED:
newstate = CheckboxState.SELECTED
elif state == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTED
elif state == CheckboxState.SELECTEDPARENT:
if parentstate == CheckboxState.EXCLUDED or \
parentstate == CheckboxState.EXCLUDEDCHILD:
# XXX: this should be impossible to reach...
newstate = CheckboxState.EXCLUDEDCHILD
else:
newstate = CheckboxState.SELECTED
elif state == CheckboxState.SELECTED:
if self.checkChildrenStates(item, [CheckboxState.SELECTED,
CheckboxState.SELECTEDPARENT], selections):
newstate = CheckboxState.SELECTEDPARENT
elif self.allowExclude:
newstate = CheckboxState.EXCLUDED
else:
if parent in selections or \
(parentstate == CheckboxState.UNSELECTED or \
parentstate == CheckboxState.SELECTEDPARENT):
newstate = CheckboxState.UNSELECTED
elif parentstate == CheckboxState.SELECTED or \
parentstate == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTEDCHILD
elif state == CheckboxState.EXCLUDED:
if parent in selections or \
(parentstate == CheckboxState.UNSELECTED or \
parentstate == CheckboxState.SELECTEDPARENT):
newstate = CheckboxState.UNSELECTED
elif parentstate == CheckboxState.SELECTED or \
parentstate == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTEDCHILD
else:
newstate = CheckboxState.EXCLUDEDCHILD
elif state == CheckboxState.EXCLUDEDCHILD:
newstate = CheckboxState.EXCLUDED
if len(selections) > 1:
# if we have multiple selections, the idea is to move all the
# selections to the newstate defined above, or to valid
# unselected or inherited states if the move to newstate would
# be invalid.
"""
Here are the state transitions for the item based on the
newstate as determined by the clicked item and the current
states: ('-' = no state change, '?' = consult children)
item
unsel sel selch selpar excl exclch
unsel - unsel - - unsel -
sel sel - sel sel sel -
newstate selch - unsel - - unsel -
selpar - unsel - - unsel -
excl excl excl?slpr excl excl - excl
exclch - unsel - - unsel -
"""
for i in selections:
(mpath, misDir, mexpanded, mstate) = self.GetItemData(
i).GetData()
mnewstate = mstate
if mstate == CheckboxState.UNSELECTED or \
mstate == CheckboxState.SELECTEDCHILD or \
mstate == CheckboxState.SELECTEDPARENT:
if newstate == CheckboxState.SELECTED or \
newstate == CheckboxState.EXCLUDED:
mnewstate = newstate
elif mstate == CheckboxState.SELECTED:
if newstate == CheckboxState.UNSELECTED or \
newstate == CheckboxState.SELECTEDCHILD or \
newstate == CheckboxState.SELECTEDPARENT or \
newstate == CheckboxState.EXCLUDEDCHILD:
mnewstate = CheckboxState.UNSELECTED
elif newstate == CheckboxState.EXCLUDED:
if self.checkChildrenStates(i,
[CheckboxState.SELECTED,
CheckboxState.SELECTEDPARENT], selections):
mnewstate = CheckboxState.SELECTEDPARENT
else:
mnewstate = newstate
elif mstate == CheckboxState.EXCLUDED:
if newstate == CheckboxState.UNSELECTED or \
newstate == CheckboxState.SELECTEDCHILD or \
newstate == CheckboxState.SELECTEDPARENT or \
newstate == CheckboxState.EXCLUDEDCHILD:
mnewstate = CheckboxState.UNSELECTED
elif newstate == CheckboxState.SELECTED:
mnewstate = newstate
elif mstate == CheckboxState.EXCLUDEDCHILD:
if newstate == CheckboxState.EXCLUDED:
mnewstate = newstate
self.setItemState(i, mnewstate)
self.setItemState(item, newstate, (path, isDir, expanded, state,
imageidx))
def setItemState(self, item, newstate, oldData=None):
if oldData:
(path, isDir, expanded, state, imageidx) = oldData
else:
(path, isDir, expanded, state) = self.GetItemData(item).GetData()
imageidx = self.GetItemImage(item)
imageidx += CheckboxState.offset(state, newstate)
self.SetPyData(item, (path, isDir, expanded, newstate))
self.SetItemImage(item, imageidx)
self.renderChildren(item, True)
self.renderParents(item)
def getTopLevelDrives(self):
sys = platform.system()
if sys == 'Windows':
# XXX: need to test this all out
import win32api, string
drives = win32api.GetLogicalDriveStrings()
driveletters = string.splitfields(drives,'\000')
for d in driveletters:
type = win32api.GetDriveType("%s:\\" % d)
# XXX: set the appropriate icon
return driveletters
else: # Unix, OSX, etc.
return ['/']
def addListener(self, callback):
self.listeners.append(callback)
def SetPyData(self, item, data):
wx.TreeCtrl.SetPyData(self, item, data)
for f in self.listeners:
f(item, data)
"""
Tests for DirCheckboxCtrl
A number of unit tests must be performed on the DirCheckboxGUI widget when
refactoring. Add to this list so that it becomes comprehensive.
Basic Tests:
1. Click on a top-level UNSELECTED object in the tree [should become SELECTED].
- Click again [should become EXCLUDED].
- Click again [should become UNSELECTED].
2. Click on a non-top-level UNSELECTED object in the tree that has no SELECTED
children [should become SELECTED, it's parents should become SELECTEDPARENT and
its children SELECTEDCHILD].
- Click again [should become EXCLUDED, it's parents who were SELECTEDPARENT
should become UNSELECTED, and it's UNSELECTED children should become
EXCLUDED].
- Click again [should become UNSELECTED, and it's children should become
UNSELECTED].
3. Change two children to their SELECTED state [parents should be in
SELECTEDPARENT state].
- Click one child to become EXCLUDED [parents should stay in SELECTEDPARENT]
- Click the same child to become UNSELECTED [parents should stay in
SELECTEDPARENT]
- Click the other child to become EXCLUDED [parents should become
UNSELECTED]
4. Choose a folder and a child item.
- Click the child to become SEL [parent should be SELPAR]
- Click the parent [parent should become SEL]
- Click the parent again [parent should become SELPAR]
5. Choose a folder and a child item.
- Click the parent to become SEL [child should become SELCHILD]
- Click the child [child should become SEL]
- Click the child again [child should become EXCL]
- Click the child again [child should become SELCHILD]
6. Pick a node with children at least two-deep. Change two of the
at-least-two-deep children to their SELECTED state [parents should be in
SELECTEDPARENT state].
- Click parent closest to SELECTED children to SELECTED [two childen remain
in SELECTED, all other children become SELECTEDCHILD. Parent[s] of parent
remain SELECTEDPARENT]
- Click one child twice to become SELECTEDCHILD [child should not be able to
be UNSELECTED, parent states should not change]
- Click other child twice to become SELECTEDCHILD [child should not be able
to be UNSELECTED, parent states should not change]
7. Pick a node with children at least two-deep.
- Click deepest parent to SELECTED [Parent[s] of parent become
SELECTEDPARENT]
- Click same parent again to become EXCLUDED [Parent[s] of parent become
UNSELECTED]
- Click same parent again to become UNSELECTED [Parent[s] of parent remain
UNSELECTED]
8. Pick a node with children at least two-deep.
- Click deepest child to become SELECTED [Parent[s] of parent become
SELECTEDPARENT]
- Click the topmost parent to become SELECTED [children become
SELECTEDCHILD]
- Click the topmost parent again to become SELECTEDPARENT [middle child
should become SELECTEDPARENT]
Multi-Selection Tests:
1. Multi-select three items at the same level and in the same state. Toggle
between the three main states [SELECTED, EXCLUDED, UNSELECTED]
2. Multi-select three items at the same level, one in each of the three states
(SEL, EXCL, UNSEL). Toggle the SEL item to see that all three items become
EXCL.
3. Multi-select three items at the same level, one in each of the three states
(SEL, EXCL, UNSEL). Toggle the EXCL item to see that all three items become
UNSEL.
4. Multi-select three items at the same level, one in each of the three states
(SEL, EXCL, UNSEL). Toggle the UNSEL item to see that all three items become
SEL.
5. Choose three items that are nested within each other: a parent folder, one
of its children folders, and a file/folder in the child folder. Choose one
other item from the child folder.
- set the top parent to UNSEL
- set the child folder to SEL [parent become SELPAR]
- set the child item to SEL
- set the other item to EXCL
- multi-select all four items
- 5A. click on the top parent (which was in SELPAR) [All four items should
become SEL, all children of any of these items should become SELCHILD].
Toggle twice more [all selected items should toggle to EXCL, then to
UNSEL]
- 5B. reset as above, click on the child folder [All four items should
become EXCL]. Toggle twice more [all selected items should go to UNSEL,
then SEL]
- 5C. reset as above, click on the child item [All four items should
become EXCL]. Toggle twice more [all selected items should go to UNSEL,
then SEL]
- 5D. reset as above, click on the other item [All four items should
become UNSEL]. Toggle twice more [all selected items should go to SEL,
then EXCL]
6. Choose a folder, one if its subfolders, a subfolder of the subfolder, and an item in the deepest subfolder, and an item in the first subfolder, e.g.:
[] A
[] B
[] C
[] D
[] E
- change item 'D' to SEL [parents 'A', 'B', and 'C' should go to SELPAR]
- change item 'E' to EXCL
- multi-select 'A', 'C', and 'E'
- toggle 'E' to UNSEL [all other selections should stay in current state]
- toggle 'E' to SEL ['A' and 'B' become SEL, their children become SELCHILD]
- toggle 'E' back to EXCL [should get our original multi-select setup back]
- toggle 'C' to SEL [all selections to SEL, children to SELCHILD]
- toggle 'C' to SELPAR ['A' and 'C' to SELPAR, 'E' to UNSEL]
- toggle 'E' twice [should get our original mulit-select setup back]
"""
class CheckFileListCtrlMixin:
# for some insane reason, we can't get EVT_LEFT_DOWN (or _UP) to bind in
# FileListCtrl itself. But we are sneaky and can do it by lots of clever
# hax0ry, like by using this silly mixin.
def __init__(self, toCall):
self.Bind(wx.EVT_LEFT_UP, toCall)
class FileListCtrl(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin,
CheckFileListCtrlMixin):
"""
Implements a file list control, with a peerctrl that contains the
filesystem model. Currently, this peerctrl must implement an
addListener(), changeState(), GetItemData(), expandDir(), GetSelections(),
and GetChildren() API similar to that implemented by DirCheckBoxCtrl.
"""
def __init__(self, parent, peerctrl, id=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.LC_REPORT,
validator=wx.DefaultValidator, name=wx.ListCtrlNameStr):
wx.ListCtrl.__init__(self, parent, id, pos, size, style, validator,
name)
CheckFileListCtrlMixin.__init__(self, self.OnClick)
listmix.ListCtrlAutoWidthMixin.__init__(self)
self.peerctrl = peerctrl
self.peerctrl.addListener(self.itemChanged)
self.itemdict = {} # a dict with filepath as key, containing tuples of
# (index into ListCtrl, reference to peerctrl object)
self.stopsearch = False
self.il, self.checkboxes, self.icondict = createDefaultImageList()
self.AssignImageList(self.il, wx.IMAGE_LIST_SMALL)
self.il = self.GetImageList(wx.IMAGE_LIST_SMALL)
self.InsertColumn(0, "Filename")
self.InsertColumn(1, "Location")
#self.InsertColumn(2, "Last Backup")
#self.SetColumnWidth(0, wx.LIST_AUTOSIZE)
#self.SetColumnWidth(1, -1) #wx.LIST_AUTOSIZE)
self.Bind(wx.EVT_MOTION, self.mouseMotion)
self.searchSourceItems = []
def itemChanged(self, item, data):
(path, isDir, expanded, state) = data
if self.itemdict.has_key(path):
item = self.itemdict[path][0]
image = getFileIcon(path, self.il, self.checkboxes,
self.icondict) + state
self.SetItemImage(item, image)
def GetAll(self, excludeStates=[]):
result = []
start = -1
for i in range(self.GetItemCount()):
item = self.GetNextItem(start, wx.LIST_NEXT_ALL)
# XXX: only append if not in excludeStates
result.append(item)
start = item
return result
def GetSelections(self):
result = []
start = -1
for i in range(self.GetSelectedItemCount()):
item = self.GetNextItem(start, wx.LIST_NEXT_ALL,
wx.LIST_STATE_SELECTED)
result.append(item)
start = item
return result
def GetPeerSelections(self, selections):
result = []
for item in selections:
path = os.path.join(self.GetItem(item,1).GetText(),
self.GetItemText(item))
if self.itemdict.has_key(path):
result.append(self.itemdict[path][1])
return result
def mouseMotion(self, event):
point = event.GetPosition()
item, flags = self.HitTest(point)
if flags == wx.LIST_HITTEST_ONITEMICON:
path = os.path.join(self.GetItem(item,1).GetText(),
self.GetItemText(item))
text = self.peerctrl.getTooltip(self.itemdict[path][1])
tip = wx.ToolTip(text)
self.SetToolTip(tip)
#tipwin = tip.GetWindow()
#tippos = tipwin.GetPosition()
#print "%s vs %s" % (tippos, point)
#tipwin.SetPosition(point)
def OnClick(self, event):
point = event.GetPosition()
item, flags = self.HitTest(point)
if flags == wx.LIST_HITTEST_ONITEMICON:
peerselections = self.GetPeerSelections(self.GetSelections())
path = os.path.join(self.GetItem(item,1).GetText(),
self.GetItemText(item))
ditem = self.itemdict[path][1] # raises if not present
self.peerctrl.changeState(ditem, peerselections)
(path, isDir, expanded, state) \
= self.peerctrl.GetItemData(ditem).GetData()
image = getFileIcon(path, self.il, self.checkboxes,
self.icondict) + state
self.SetItemImage(item, image)
def searchButtonAction(self, event):
selections = self.peerctrl.GetSelections()
if len(selections) == 0:
return ("Please tell me where to search. Select one or more"
" folders in the left-hand panel (hold down SHIFT or"
" CTRL for multiple selection), then click the 'find!'"
" button again.", None)
else:
self.DeleteAllItems()
self.itemdict = {}
b = wx.BusyCursor()
searchSourceItems = []
for i in selections:
self.addResults(i, event.searchstring)
searchSourceItems.append(i)
self.searchSourceItems = [self.peerctrl.GetItemData(s).GetData()[0]
for s in searchSourceItems]
print "sources: %s" % self.searchSourceItems
return ("Search results will appear as files that match your"
" search are found.", None)
return (None, None)
def addResults(self, ditem, searchstring):
(path, isDir, expanded, state) \
= self.peerctrl.GetItemData(ditem).GetData()
position = self.GetItemCount()
if isDir:
if not expanded:
self.peerctrl.expandDir(ditem, busycursor=False)
children = self.peerctrl.getChildren(ditem)
for c in children:
self.addResults(c, searchstring)
wx.Yield()
if self.stopsearch:
break
else:
terms = [x for x in searchstring.split(' ') if x != '']
for term in terms:
print path
if path.find(term) > 0:
image = getFileIcon(path, self.il, self.checkboxes,
self.icondict) + state
dirname, filename = os.path.split(path)
index = self.InsertImageStringItem(position, filename,
image)
self.SetStringItem(index, 1, dirname)
self.SetColumnWidth(0, wx.LIST_AUTOSIZE)
self.itemdict[path] = (index, ditem)
break
def setGroup(self, state):
items = self.GetAll()
item = items[0]
peerselections = self.GetPeerSelections(items)
path = os.path.join(self.GetItem(item,1).GetText(),
self.GetItemText(item))
ditem = self.itemdict[path][1] # raises if not present
while True:
# cycle until the items state matches the desired state
self.peerctrl.changeState(ditem, peerselections) # can be slow
(path, isDir, expanded, nstate) \
= self.peerctrl.GetItemData(ditem).GetData()
if nstate == state:
break
image = getFileIcon(path, self.il, self.checkboxes,
self.icondict) + state
self.SetItemImage(item, image)
return self.searchSourceItems
class GroupSelectionCheckbox(wx.Panel):
def __init__(self, parent, id=-1, setGroupState=None):
wx.Panel.__init__(self, parent, id)
self.setGroupState = setGroupState
self.ubm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-unchecked1.png")))
self.cbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-checked1.png")))
self.ebm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-excluded1.png")))
self.checkboxButton = wx.BitmapButton(self, -1, self.ubm,
style=wx.NO_BORDER)
self.Bind(wx.EVT_BUTTON, self.onCheckbox, self.checkboxButton)
self.description = wx.StaticText(self, -1,
"always BACKUP any files that match these search criteria ")
self.state = CheckboxState.UNSELECTED
self.gbSizer = wx.GridBagSizer(1,2)
self.gbSizer.Add(self.checkboxButton, (0,0), flag=wx.ALIGN_CENTER)
self.gbSizer.Add(self.description, (0,1), flag=wx.ALIGN_CENTER)
self.gbSizer.AddGrowableRow(1)
self.SetSizerAndFit(self.gbSizer)
def Enable(self, enable=True):
self.checkboxButton.Enable(enable)
self.description.Enable(enable)
def Disable(self):
self.Enable(False)
def clear(self):
self.checkboxButton.SetBitmapLabel(self.ubm)
self.state = CheckboxState.UNSELECTED
self.description.SetLabel(
"always BACKUP any files that match these search criteria")
def setState(self, state):
self.state = state
if self.state == CheckboxState.UNSELECTED:
self.checkboxButton.SetBitmapLabel(self.ubm)
self.description.SetLabel(
"always BACKUP any files that match these search criteria")
elif self.state == CheckboxState.SELECTED:
self.checkboxButton.SetBitmapLabel(self.cbm)
self.description.SetLabel(
"always BACKUP any files that match these search criteria")
elif self.state == CheckboxState.EXCLUDED:
self.checkboxButton.SetBitmapLabel(self.ebm)
self.description.SetLabel(
"always EXCLUDE any files that match these search criteria")
def onCheckbox(self, event):
if self.state == CheckboxState.UNSELECTED:
self.checkboxButton.SetBitmapLabel(self.cbm)
self.state = CheckboxState.SELECTED
if self.setGroupState:
self.setGroupState(CheckboxState.SELECTED)
elif self.state == CheckboxState.SELECTED:
self.checkboxButton.SetBitmapLabel(self.ebm)
self.description.SetLabel(
"always EXCLUDE any files that match these search criteria")
self.state = CheckboxState.EXCLUDED
if self.setGroupState:
self.setGroupState(CheckboxState.EXCLUDED)
elif self.state == CheckboxState.EXCLUDED:
self.checkboxButton.SetBitmapLabel(self.ubm)
self.description.SetLabel(
"always BACKUP any files that match these search criteria")
self.state = CheckboxState.UNSELECTED
if self.setGroupState:
self.setGroupState(CheckboxState.UNSELECTED)
class SearchPanel(wx.Panel):
def __init__(self, parent, dircheckbox, id=-1, searchButtonAction=None):
wx.Panel.__init__(self, parent, id)
self.dircheckbox = dircheckbox
self.searchButtonAction = searchButtonAction
self.SetAutoLayout(False)
self.rules = {} # should refer to something from fludrules
self.searchField = wx.TextCtrl(self, -1,
"search for files to backup here", size=wx.Size(-1,-1),
style=wx.TE_PROCESS_ENTER)
self.searchField.SetToolTipString('find files within directories'
' selected to the left by entering search terms here')
self.searchField.Bind(wx.EVT_TEXT_ENTER, self.onSearchClick)
self.searchField.Bind(wx.EVT_LEFT_DOWN, self.selectAllText)
self.searchField.Bind(wx.EVT_KILL_FOCUS, self.unfocused)
self.searchButton = wx.Button(self, -1, 'find!', name='searchButton')
self.Bind(wx.EVT_BUTTON, self.onSearchClick, self.searchButton)
self.searchResults = FileListCtrl(self, dircheckbox, -1,
name='searchResults', style=wx.SUNKEN_BORDER | wx.LC_REPORT)
self.searchResults.SetExtraStyle(0)
self.searchResults.SetLabel('found files')
self.groupSelection = GroupSelectionCheckbox(self, -1, self.setGroup)
self.groupSelection.Disable()
self.gbSizer = wx.GridBagSizer(3,2)
self.gbSizer.Add(self.searchField, (0,0), flag=wx.EXPAND)
self.gbSizer.Add(self.searchButton, (0,1))
self.gbSizer.Add(self.searchResults, (1,0), (1,2),
flag=wx.EXPAND|wx.TOP, border=5)
self.gbSizer.Add(self.groupSelection, (2,0) )
self.gbSizer.AddGrowableRow(1)
self.gbSizer.AddGrowableCol(0)
self.SetSizerAndFit(self.gbSizer)
def onSearchClick(self, event):
event.searchstring = self.searchField.GetValue()
if self.searchButton.GetLabel() == 'stop!':
self.searchButton.SetLabel('find!')
self.searchResults.stopsearch = True
return
else:
self.groupSelection.clear()
self.groupSelection.Disable()
self.searchButton.SetLabel('stop!')
self.searchButton.Update()
err, info = self.searchResults.searchButtonAction(event)
selections = self.searchResults.searchSourceItems
# see if we should set the checkbox button from a previous rule
state = None
if len(selections) > 0 and self.rules.has_key(selections[0]):
rule = self.rules[selections[0]]
if self.rules[selections[0]].has_key(event.searchstring):
state = self.rules[selections[0]][event.searchstring]
for i in selections:
if not self.rules.has_key(i) or self.rules[i] != rule:
state = None
break
#for j in self.rules[i]:
if state:
print "should restore checkbox to %s" % state
self.groupSelection.setState(state)
self.searchButton.SetLabel('find!')
self.searchResults.stopsearch = False
if self.searchButtonAction:
self.searchButtonAction(event, errmsg=err, infomsg=info)
self.groupSelection.Enable()
def selectAllText(self, event):
if wx.Window.FindFocus() != self.searchField:
self.searchField.SetSelection(-1,-1)
self.searchField.SetFocus()
else:
self.searchField.SetSelection(0,0)
event.Skip()
def unfocused(self, event):
self.searchField.SetSelection(0,0)
def setGroup(self, state):
b = wx.BusyCursor()
selections = self.searchResults.setGroup(state)
for s in selections:
if not self.rules.has_key(s):
self.rules[s] = {}
if state == CheckboxState.UNSELECTED:
try:
self.rules.pop(s)
except:
pass
else:
self.rules[s][self.searchField.GetValue()] = state
print self.rules
class FilePanel(wx.SplitterWindow):
def __init__(self, parent, searchButtonAction=None):
# Use the WANTS_CHARS style so the panel doesn't eat the Return key.
wx.SplitterWindow.__init__(self, parent, -1,
style=wx.SP_LIVE_UPDATE | wx.CLIP_CHILDREN | wx.WANTS_CHARS)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.SetNeedUpdating(True)
self.tree = DirCheckboxCtrl(self, -1, dir="/")
# XXX: fludrules.init path should be in config
self.fludrules = self.getFludHome()+"/fludrules.init"
if not os.path.isfile(self.fludrules):
# XXX: do the other first time stuff (email encrypted credentials,
# etc.)
parent.SetMessage("Welcome. This appears to be the first"
" time you've run flud. We've automatically selected some"
" files for backup. You can make changes by"
" selecting/deselecting files and directories. When you are"
" done, simply close this window.")
src = open('fludrules.init', 'r')
dst = open(self.fludrules, 'w')
filerules = src.read()
dst.write(filerules)
dst.close()
src.close()
filerules = eval(filerules)
rulestates = {}
for rule in filerules['baserules']:
value = filerules['baserules'][rule]
rule = glob.glob(os.path.expandvars(rule))
for r in rule:
rulestates[r] = value
self.tree.setStates(rulestates)
# XXX: fludfile.conf path should be in config
self.fludfiles = self.getFludHome()+"/fludfile.conf"
print self.fludfiles
if os.path.isfile(self.fludfiles):
file = open(self.fludfiles, 'r')
states = eval(file.read())
self.tree.setStates(states)
file.close()
self.searchPanel = SearchPanel(self, dircheckbox=self.tree,
searchButtonAction=searchButtonAction)
self.SetMinimumPaneSize(20)
self.SplitVertically(self.tree, self.searchPanel) #, 300)
def getFludHome(self):
if os.environ.has_key('FLUDHOME'):
fludhome = os.environ['FLUDHOME']
else:
fludhome = os.environ['HOME']+"/.flud"
if not os.path.isdir(fludhome):
os.mkdir(fludhome, 0700)
return fludhome
def shutdown(self, event):
self.flushFileConfig()
event.Skip()
def flushFileConfig(self):
states = self.tree.getStates()
f = open(self.fludfiles, 'w')
f.write(str(states))
f.close()
for i in states:
print "%s %s" % (i, states[i])
def OnSize(self, event):
w,h = self.GetClientSizeTuple()
if self.tree:
self.tree.SetDimensions(0, 0, w, h)
event.Skip()
class RestoreCheckboxCtrl(DirCheckboxCtrl):
# XXX: child/parent selection/deselection isn't quite right still, esp wrt
# root node. repro:
# -/
# -d1
# -f1
# -d2
# -d3
# -f2
# -f3
# with nothing selected, select d3 and f3, then select root, then deselect
# d3 and f3
def __init__(self, parent, id=-1, config=None, pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=(wx.TR_MULTIPLE
| wx.TR_HAS_BUTTONS
| wx.TR_TWIST_BUTTONS
| wx.TR_NO_LINES
| wx.TR_FULL_ROW_HIGHLIGHT
| wx.SUNKEN_BORDER),
validator=wx.DefaultValidator, name=wx.ControlNameStr):
self.config = config
DirCheckboxCtrl.__init__(self, parent, id, config, pos, size, style,
validator, name, allowExclude=False)
def initTree(self, config):
self.expandRoot(config)
self.expandUntilMultiple()
def expandRoot(self, config):
self.defaultImageList, self.checkboxes, self.icondict \
= createDefaultImageList()
self.AssignImageList(self.defaultImageList)
self.il = self.GetImageList()
self.rootID = self.AddRoot("/", self.icondict['computer'], -1,
wx.TreeItemData(("", True, False, CheckboxState.UNSELECTED)))
self.update()
def expandUntilMultiple(self):
node = self.rootID
while True:
(ipath, isdir, expanded, istate) = self.GetItemData(node).GetData()
children = self.getChildren(node, False)
if len(children) > 1 or len(children) == 0:
break;
node = children[0]
self.Expand(node)
def update(self):
master = listMeta(self.config)
for i in master:
if not isinstance(master[i], dict):
traversal = i.split(os.path.sep)
node = self.rootID
path = "/"
if traversal[0] == '':
traversal.remove('')
for n in traversal:
path = os.path.join(path, n)
children = self.getChildrenDict(node)
if n == traversal[-1] and not n in children:
child = self.AppendItem(node, n)
self.SetPyData(child, (path, False, False, 0))
idx = getFileIcon(i, self.il, self.checkboxes,
self.icondict)
self.SetItemImage(child, idx, wx.TreeItemIcon_Normal)
else:
if not n in children:
child = self.AppendItem(node, n)
self.SetPyData(child, (path, True, False, 0))
self.SetItemImage(child, self.icondict['folder'],
wx.TreeItemIcon_Normal)
else:
child = children[n]
node = child
self.Expand(self.rootID)
def getChildrenDict(self, node):
result = {}
child, cookie = self.GetFirstChild(node)
while child:
result[self.GetItemText(child)] = child
child, cookie = self.GetNextChild(node, cookie)
return result
def onExpand(self, event):
pass
def getSelected(self, startNode=None):
if not startNode:
startNode = self.rootID
children = self.getChildren(startNode)
selected = []
for n in children:
(path, isDir, expanded, state) = self.GetItemData(n).GetData()
if not isDir \
and (state == CheckboxState.SELECTED \
or state == CheckboxState.SELECTEDCHILD):
selected.append(n)
if isDir and (state == CheckboxState.SELECTED \
or state == CheckboxState.SELECTEDPARENT \
or state == CheckboxState.SELECTEDCHILD):
selected += self.getSelected(n)
return selected
class RestorePanel(wx.Panel):
def __init__(self, parent, config, factory):
self.config = config
self.factory = factory
wx.Panel.__init__(self, parent, -1)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.tree = RestoreCheckboxCtrl(self, -1, config, #wx.TreeCtrl(self, -1,
style=(wx.TR_MULTIPLE
| wx.TR_HAS_BUTTONS
| wx.TR_TWIST_BUTTONS
| wx.TR_NO_LINES
| wx.TR_FULL_ROW_HIGHLIGHT
| wx.SUNKEN_BORDER))
self.restoreButton = wx.Button(self, -1, 'restore selected files',
name='restoreButton')
self.Bind(wx.EVT_BUTTON, self.onRestoreClick, self.restoreButton)
self.gbSizer = wx.GridBagSizer(2,1)
self.gbSizer.Add(self.tree, (0,0), flag=wx.EXPAND|wx.ALL, border=0)
self.gbSizer.Add(self.restoreButton, (1,0), flag=wx.EXPAND|wx.ALL,
border=0)
self.gbSizer.AddGrowableRow(0)
self.gbSizer.AddGrowableCol(0)
self.SetSizerAndFit(self.gbSizer)
def update(self):
self.tree.update()
def OnSize(self, event):
w,h = self.GetClientSizeTuple()
event.Skip()
def onTooltip(self, event):
pass
def onRestoreClick(self, event):
for n in self.tree.getSelected():
(path, isDir, expanded, state) = self.tree.GetItemData(n).GetData()
print "restoring %s" % path
d = self.factory.sendGETF(path)
d.addCallback(self.restored, n)
d.addErrback(self.restoreFailed, n)
self.tree.UnselectAll()
def restored(self, res, n):
(path, isDir, expanded, state) = self.tree.GetItemData(n).GetData()
print "yay, %s" % path
self.tree.SetItemTextColour(n, '#005804')
self.tree.changeState(n)
def restoreFailed(self, err, n):
(path, isDir, expanded, state) = self.tree.GetItemData(n).GetData()
print "boo, %s: %s" % (path, err)
self.tree.SetItemTextColour(n, wx.RED)
self.tree.changeState(n)
class SchedulePanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, -1)
self.Bind(wx.EVT_SIZE, self.OnSize)
def OnSize(self, event):
w,h = self.GetClientSizeTuple()
event.Skip()
class FeedbackPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, -1)
self.Bind(wx.EVT_SIZE, self.OnSize)
editor = wx.lib.editor.editor.Editor(parent, -1)
def OnSize(self, event):
w,h = self.GetClientSizeTuple()
event.Skip()
class FludNotebook(wx.Notebook):
def __init__(self, parent, id=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.NB_BOTTOM|wx.NO_BORDER):
self.parent = parent
self.config = parent.config
self.factory = LocalClientFactory(self.config)
print "connecting to localhost:%d" % self.config.clientport
reactor.connectTCP('localhost', self.config.clientport, self.factory)
wx.Notebook.__init__(self, parent, id, pos, style=style)
self.filePanel = FilePanel(self,
searchButtonAction=parent.searchButtonAction)
self.AddPage(self.filePanel, "Backup Files")
self.restorePanel = RestorePanel(self, self.config, self.factory)
self.AddPage(self.restorePanel, "Restore")
self.schedulePanel = SchedulePanel(self)
self.AddPage(self.schedulePanel, "Backup Schedule")
self.feedbackPanel = FeedbackPanel(self)
self.AddPage(self.feedbackPanel, "Feedback")
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.changedPage)
def shutdown(self, event):
self.filePanel.shutdown(event)
def changedPage(self, event):
page = event.GetSelection()
if page == 0:
self.SetMessage("Select files and directories for backup"
" with the filesystem view on the left, or set up criteria"
" for finding files for backup directly with simple"
" searches, below right.")
elif page == 1:
self.SetMessage("Select files/directories to be restored to"
" your computer, then click on 'restore!' Files will turn"
" green as they arrive.")
self.restorePanel.update()
elif page == 2:
self.SetMessage("Configure how often your computer should backup."
"\n (not implemented)")
elif page == 3:
self.SetMessage("Send feedback to flud programmers. (not"
" implemented)")
def SetMessage(self, msg):
self.parent.SetMessage(msg)
class FludLogoPanel(wx.Panel):
def __init__(self, parent, id=-1, pos=wx.DefaultPosition,
size=wx.Size(10,10), style=wx.TAB_TRAVERSAL, name="logo panel"):
wx.Panel.__init__(self, parent, id, pos, size, style, name)
self.SetAutoLayout(True)
self.SetBackgroundColour(wx.BLACK)
self.SetForegroundColour(wx.WHITE)
logobmp = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"flud-backup-logo-1-150-nodrop.png")))
pad = 0
self.logowidth = logobmp.GetWidth()
self.logoheight = logobmp.GetHeight()
self.logo = wx.StaticBitmap(self, -1, logobmp)
self.messagePanel = wx.Panel(self, -1)
self.messagePanel.SetBackgroundColour(wx.BLACK)
self.messagePanel.SetForegroundColour(wx.WHITE)
self.message = wx.StaticText(self.messagePanel, -1,
"message text area", style=wx.ALIGN_CENTER|wx.ST_NO_AUTORESIZE,
size=(-1, self.logoheight-15))
self.message.Bind(wx.EVT_SIZE, self.resizeMessage)
self.bsizer = wx.BoxSizer(wx.VERTICAL)
self.bsizer.Add(self.message, flag=wx.EXPAND|wx.ALL, border=35)
self.bsizer.SetSizeHints(self.messagePanel)
self.messagePanel.SetSizer(self.bsizer)
self.gbSizer = wx.GridBagSizer(1,2)
self.gbSizer.Add(self.logo, (0,0))
self.gbSizer.Add(self.messagePanel, (0,1), flag=wx.EXPAND|wx.ALL)
self.gbSizer.AddGrowableRow(1)
self.gbSizer.AddGrowableCol(1)
self.SetSizerAndFit(self.gbSizer)
self.SetSize(wx.Size(self.logowidth, self.logoheight))
self.SetSizeHints(self.logowidth, self.logoheight, -1, self.logoheight)
def SetMessage(self, msg):
(w,h) = self.message.GetSizeTuple()
#print "msg area size is %d x %d" % (w,h)
self.message.SetLabel(msg)
self.message.Wrap(w)
#print "msg is '%s'" % self.message.GetLabel()
self.message.Center()
def resizeMessage(self, evt):
# this is mainly to deal with StaticText wonkiness (not calling Wrap()
# automatically, not centering properly automatically). It may be
# possible to get rid of this with a future wxPython release.
(w,h) = self.message.GetSizeTuple()
self.message.Wrap(w)
m = self.message.GetLabel()
m = m.replace('\n',' ')
self.message.SetLabel(m)
self.message.Wrap(w)
self.message.Center()
class FludFrame(wx.Frame):
def __init__(self, parent, id=wx.ID_ANY, label="flud bakcup client",
size=wx.Size(800,600),
style=wx.DEFAULT_FRAME_STYLE|wx.NO_FULL_REPAINT_ON_RESIZE,
config=None):
wx.Frame.__init__(self, parent, id, label, size=size, style=style)
wx.ToolTip.SetDelay(2000)
self.clearMessage = False
self.logoPanel = FludLogoPanel(self)
self.SetMessage('Welcome.')
self.config = config
self.notebook = FludNotebook(self)
self.operationStatus = wx.StatusBar(name='operationStatus',
parent=self, style=0)
self.SetStatusBar(self.operationStatus)
self.gbSizer = wx.GridBagSizer(2,1)
self.gbSizer.Add(self.logoPanel,(0,0), flag=wx.EXPAND)
self.gbSizer.Add(self.notebook, (1,0), flag=wx.EXPAND|wx.ALL, border=1)
self.gbSizer.AddGrowableRow(1)
self.gbSizer.AddGrowableCol(0)
self.SetSizerAndFit(self.gbSizer)
self.Bind(wx.EVT_CLOSE, self.shutdown)
self.SetSize(size)
self.Show(True)
def SetMessage(self, message):
self.logoPanel.SetMessage(message)
def shutdown(self, event):
self.notebook.shutdown(event)
def searchButtonAction(self, event, errmsg=None, infomsg=None):
if errmsg:
self.logoPanel.SetMessage(errmsg)
self.clearMessage = True
elif infomsg:
self.logoPanel.SetMessage(infomsg)
self.clearMessage = False
elif self.clearMessage:
self.logoPanel.SetMessage("")
#if __name__ == '__main__':
# app = wx.PySimpleApp()
#
# config = FludConfig()
# config.load(doLogging=False)
#
# f = FludFrame(None, wx.ID_ANY, 'flud backup client', size=(795,600),
# style=wx.DEFAULT_FRAME_STYLE|wx.NO_FULL_REPAINT_ON_RESIZE,
# config=config)
#
# from twisted.internet import reactor
# reactor.registerWxApp(app)
# reactor.run()
| Python |
"""
FludConfig.py, (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), version 3.
manages configuration file for flud backup.
"""
import os, sys, socket, re, logging
import ConfigParser
import flud.FludCrypto as FludCrypto
from flud.FludCrypto import FludRSA
from flud.FludkRouting import kRouting
from flud.fencode import fencode, fdecode
""" default mapping of relative URLs """
def_commandmap = {'ID': 'ID', 'GROUPID': 'GROUPID', 'STORE': 'STORE',
'RETRIEVE': 'RETRIEVE', 'VERIFY': 'VERIFY', 'PROXY': 'PROXY',
'DELETE': 'DELETE',
'kFINDNODE': 'kFINDNODE', 'kFINDVAL': 'kFINDVAL', 'kSTORE': 'kSTORE'}
logger = logging.getLogger('flud')
CLIENTPORTOFFSET = 500
class FludDebugLogFilter(logging.Filter):
"""
Keeps all logging levels defined by loggers, but ups level to DEBUG for
loggers whose namespaces match patterns given by wildcards.
"""
# XXX: doesn't really interact with all logging levels by all loggers, only
# with the one defined by the root logger. If children have stricter
# loglevels set, this filter won't ever get called on them.
def __init__(self, wildcardStrings):
self.setWildcards(wildcardStrings)
root = logging.getLogger("")
if hasattr(root, 'fludDebugLogLevel'):
self.effectiveLevel = root.fludDebugLogLevel
else:
self.effectiveLevel = root.getEffectiveLevel()
self.fludDebugLogLevel = root.getEffectiveLevel()
root.setLevel(logging.NOTSET)
def setWildcards(self, wildcardStrings):
self.wildcards = []
if not isinstance(wildcardStrings, list):
wildcardStrings = [wildcardStrings]
for s in wildcardStrings:
self.setWildcard(s)
def setWildcard(self, wildcardString):
fields = wildcardString.split('.')
for i, s in enumerate(fields):
#print "%s:%s" % (i, s)
if "*" == s:
fields[i] = r'[\w.]*'
else:
try:
if s.index(s, '*') > 0:
fields[i] = s.replace('*', r'[\w]*')
except:
pass
regex = "^%s$" % r'\.'.join(fields)
self.wildcards.append(re.compile(regex))
def filter(self, record):
if record.levelno >= self.effectiveLevel:
return 1
for w in self.wildcards:
m = w.match(record.name)
if m:
return 1
return 0
# XXX: refactor out the try/except stuff that could be done with has_key()
class FludConfig:
"""
Handles configuration for Flud nodes. Most persistent settings live in
this class.
Configuration is kept in the directory specified by FLUDHOME if this value
is set in the environment, otherwise in HOME/.flud/. If no existing
configuration exists, this object will create a configuration with sane
default values.
"""
def __init__(self):
self.Kr = 0
self.Ku = 0
self.nodeID = 0
self.groupIDr = 0
self.groupIDu = 0
self.port = -1
self.commandmap = {}
self.reputations = {}
self.nodes = {}
try:
self.fludhome = os.environ['FLUDHOME']
except:
try:
home = os.environ['HOME']
self.fludhome = home+"/.flud"
except:
logger.warn("cannot determine FLUDHOME.")
logger.warn("Please set HOME or FLUDHOME environment variable")
if os.path.isdir(self.fludhome) == False:
os.mkdir(self.fludhome, 0700)
self.fludconfig = self.fludhome+"/flud.conf"
self.configParser = ConfigParser.ConfigParser()
if os.path.isfile(self.fludconfig) == False:
conffile = file(self.fludconfig, "w")
else:
conffile = file(self.fludconfig, "r")
self.configParser.readfp(conffile)
conffile.close()
logger.info('fludhome = %s' % self.fludhome)
logger.info('fludconfig = %s' % self.fludconfig)
def load(self, serverport=None, doLogging=True):
"""
If serverport is given, it overrides any value that may be in the
configuration file
"""
self.logfile, self.loglevel = self.getLoggingConf()
if doLogging:
if os.path.isfile(self.logfile):
os.remove(self.logfile)
handler = logging.FileHandler(self.logfile)
formatter = logging.Formatter('%(asctime)s %(filename)s:%(lineno)d'
' %(name)s %(levelname)s: %(message)s', datefmt='%H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
logging.getLogger("").setLevel(self.loglevel)
#logger.setLevel(self.loglevel)
#logger.setLevel(logging.WARNING) # XXX: overrides user prefs
#logger.setLevel(logging.DEBUG) # XXX: overrides user prefs
if os.environ.has_key("LOGFILTER"):
self.filter = FludDebugLogFilter(
os.environ["LOGFILTER"].split(' '))
handler.addFilter(self.filter)
# XXX: add a LocalPrimitive that can be called dynamically to
# invoke filter.setWildcards()
self.Kr, self.Ku, self.nodeID, self.groupIDr, self.groupIDu \
= self.getID()
logger.debug('Kr = %s' % self.Kr.exportPrivateKey())
logger.debug('Ku = %s' % self.Ku.exportPublicKey())
logger.debug('nodeID = %s' % self.nodeID)
logger.debug('groupIDr = %s' % self.groupIDr)
logger.debug('groupIDu = %s' % self.groupIDu)
self.port, self.clientport, self.commandmap = self.getServerConf()
if serverport != None:
self.port = serverport
self.clientport = serverport + CLIENTPORTOFFSET
self.configParser.set("server","port",self.port)
self.configParser.set("server","clientport",self.clientport)
logger.debug('port = %s' % self.port)
logger.debug('clientport = %s' % self.clientport)
logger.debug('commandmap = %s' % self.commandmap)
self.routing = kRouting((socket.getfqdn(), self.port,
long(self.nodeID, 16), self.Ku.exportPublicKey()['n']))
self.storedir, self.generosity, self.minoffer = self.getStoreConf()
if os.path.isdir(self.storedir) == False:
os.mkdir(self.storedir)
os.chmod(self.storedir, 0700)
logger.debug('storedir = %s' % self.storedir)
self.kstoredir = self.getkStoreConf()
if os.path.isdir(self.kstoredir) == False:
os.mkdir(self.kstoredir)
os.chmod(self.kstoredir, 0700)
logger.debug('kstoredir = %s' % self.kstoredir)
self.clientdir = self.getClientConf()
if os.path.isdir(self.clientdir) == False:
os.mkdir(self.clientdir)
os.chmod(self.clientdir, 0700)
logger.debug('clientdir = %s' % self.clientdir)
self.metadir, self.metamaster = self.getMetaConf()
if os.path.isdir(self.metadir) == False:
os.mkdir(self.metadir)
os.chmod(self.metadir, 0700)
logger.debug('metadir = %s' % self.metadir)
self.reputations = self.getReputations()
logger.debug("reputations = %s" % str(self.reputations))
self.nodes = self.getKnownNodes()
logger.debug("known nodes = %s" % str(self.nodes))
self.save()
os.chmod(self.fludconfig, 0600)
self.loadMasterMeta()
def save(self):
conffile = file(self.fludconfig, "w")
self.configParser.write(conffile)
conffile.close()
def addNode(self, nodeID, host, port, Ku, mygroup=None):
"""
Convenience method for adding a node to the known.
If a node with nodeID already exists, nothing changes.
This method /does not/ save the new configuration to file,
"""
if mygroup == None:
mygroup = self.groupIDu
if self.nodes.has_key(nodeID) == False:
self.nodes[nodeID] = {'host': host, 'port': port,
'Ku': Ku.exportPublicKey(), 'mygroup': mygroup}
#logger.log(logging.DEBUG, "nodes: " % str(self.nodes))
# XXX: disabled nodes saving
#for k in self.nodes:
# self.configParser.set('nodes', k, self.nodes[k])
n = self.routing.insertNode((host, int(port), long(nodeID, 16),
Ku.exportPublicKey()['n']))
if n != None:
logger.warn("need to ping %s for LRU in routing table!"
% str(n))
# XXX: instead of pinging, put it in a replacement cache table
# and when one of the nodes needs replaced (future query)
# replace it with one of these. Sec 4.1
self.routing.replacementCache.insertNode(
(host, int(port), long(nodeID, 16),
Ku.exportPublicKey()['n']))
# XXX: should also create a corresponding reputation
def getLoggingConf(self):
"""
Returns logging configuration: logfile and loglevel
"""
if (self.configParser.has_section("logging") == False):
self.configParser.add_section("logging")
try:
logfile = int(self.configParser.get("logging","logfile"))
except:
logger.debug("no logfile specified, using default")
logfile = self.fludhome+'/flud.log'
self.configParser.set("logging", "logfile", logfile)
try:
loglevel = int(self.configParser.get("logging","loglevel"))
#loglevel = logging.WARNING # XXX: remove me
except:
logger.debug("no loglevel specified, using default")
loglevel = logging.WARNING
self.configParser.set("logging", "loglevel", loglevel)
return logfile, loglevel
def getID(self):
"""
Returns a tuple: private key, public key, nodeID, private group ID, and
public group ID from config. If these values don't exist in conf file,
they are generated and added.
"""
# get the keys and IDs from the config file.
# If these values don't exist, generate a pub/priv key pair, nodeID,
# and groupIDs.
if (self.configParser.has_section("identification") == False):
self.configParser.add_section("identification")
try:
privkey = FludRSA.importPrivateKey(
eval(self.configParser.get("identification","Kr")))
except:
pubkey, privkey = FludCrypto.generateKeys()
else:
try:
pubkey = FludRSA.importPublicKey(
eval(self.configParser.get("identification","Ku")))
except:
pubkey = privkey.publickey()
try:
nodeID = self.configParser.get("identification","nodeID")
except:
#nodeID = FludCrypto.hashstring(str(pubkey.exportPublicKey()))
nodeID = pubkey.id()
try:
privgroupID = self.configParser.get("identification",
"groupIDr")[:64]
except:
privgroupID = 'fludtest' # default groupID hardcoded
try:
pubgroupID = self.configParser.get("identification","groupIDu")
except:
pubgroupID = FludCrypto.hashstring(str(pubkey.exportPublicKey())
+privgroupID)
# write the settings back out to config object
self.configParser.set("identification","Kr",privkey.exportPrivateKey())
self.configParser.set("identification","Ku",pubkey.exportPublicKey())
self.configParser.set("identification","nodeID",nodeID)
self.configParser.set("identification","groupIDr",privgroupID)
self.configParser.set("identification","groupIDu",pubgroupID)
# return the values
return privkey, pubkey, nodeID, privgroupID, pubgroupID
def getServerConf(self):
"""
Returns server configuration: port number
"""
if (self.configParser.has_section("server") == False):
self.configParser.add_section("server")
try:
port = int(self.configParser.get("server","port"))
except:
logger.debug("no port specified, using default")
port = 8080 # XXX: default should be defined elsewhere.
# Should prefer 80. If in use, use 8080+
try:
clientport = int(self.configParser.get("server","clientport"))
except:
logger.debug("no clientport specified, using default")
clientport = port+CLIENTPORTOFFSET
try:
commandmap = eval(self.configParser.get("server","commandmap"))
except:
logger.debug("no commandmap specified, using default")
commandmap = def_commandmap
for i in def_commandmap: # ensure that commandmap covers all keys
try:
j = commandmap[i]
except:
commandmap[i] = def_commandmap[i]
# XXX: could also do a 'parammap'
self.configParser.set("server","port",port)
self.configParser.set("server","clientport",clientport)
self.configParser.set("server","commandmap",commandmap)
return port, clientport, commandmap
def _getDirConf(self, configParser, section, default):
if (configParser.has_section(section) == False):
configParser.add_section(section)
try:
dir = int(self.configParser.get(section,"dir"))
except:
logger.debug("no %s directory specified, using default", section)
dir = self.fludhome+'/'+default
if not os.path.isdir(dir):
os.makedirs(dir)
self.configParser.set(section,"dir",dir)
return dir
def getClientConf(self):
"""
Returns client configuration: download directory
"""
return self._getDirConf(self.configParser, "client", "dl")
def getStoreConf(self):
"""
Returns data store configuration
"""
storedir = self._getDirConf(self.configParser, "store", "store")
try:
generosity = self.configParser.get("store", "generosity")
except:
logger.debug("no generosity specified, using default")
generosity = 1.5
try:
minoffer = self.configParser.get("store", "minoffer")
except:
logger.debug("no minoffer specified, using default")
minoffer = 1024
return storedir, generosity, minoffer
def getkStoreConf(self):
"""
Returns dht data store configuration
"""
return self._getDirConf(self.configParser, "kstore", "dht")
def getMetaConf(self):
"""
Returns metadata configuration: metadata directory
"""
metadir = self._getDirConf(self.configParser, "metadata", "meta")
try:
master = self.configParser.get("meta","master")
except:
logger.debug("no meta master file specified, using default")
master = "master"
if not os.path.isfile(metadir+'/'+master):
f = open(metadir+'/'+master, 'w')
f.close()
return (metadir, master)
def getReputations(self):
"""
Returns dict of reputations known to this node
"""
# XXX: should probably just throw these in with 'nodes' (for efficiency)
return self._getDict(self.configParser, "reputations")
def getKnownNodes(self):
"""
Returns dict of nodes known to this node
"""
return {}
# XXX: don't read known nodes for now
result = self._getDict(self.configParser, "nodes")
for i in result:
print str(i)
self.routing.insertNode(
(result[i]['host'], result[i]['port'], long(i, 16),
result[i]['nKu']))
return result
def _getDict(self, configParser, section):
"""
creates a dictionary from the list of pairs given by
ConfigParser.items(section). Requires that the right-hand side of
the config file's "=" operator be a valid python type, as eval()
will be invoked on it
"""
if (configParser.has_section(section) == False):
configParser.add_section(section)
try:
items = configParser.items(section)
result = {}
for item in items:
#print item
try:
result[str(item[0])] = eval(item[1])
configParser.set(section, item[0], item[1])
except:
logger.warn("item '%s' in section '%s'"
" of the config file has an unreadable format"
% str(item[0]), str(section))
except:
logger.warn("Couldn't read %s from config file:" % section)
return result
# XXX: note that this master metadata all-in-mem scheme doesn't really work
# long term; these methods should eventually go to a local db or db-like
# something
def updateMasterMeta(self, fname, val):
self.master[fname] = val
def getFromMasterMeta(self, fname):
try:
return self.master[fname]
except:
return None
def deleteFromMasterMeta(self, fname):
try:
self.master.pop(fname)
except:
pass
def loadMasterMeta(self):
fmaster = open(os.path.join(self.metadir, self.metamaster), 'r')
master = fmaster.read()
fmaster.close()
if master == "":
master = {}
else:
master = fdecode(master)
self.master = master
def syncMasterMeta(self):
master = fencode(self.master)
fmaster = open(os.path.join(self.metadir, self.metamaster), 'w')
fmaster.write(master)
fmaster.close()
def _test(self):
import doctest
doctest.testmod()
if __name__ == '__main__':
fludConfig = FludConfig()
fludConfig._test()
| Python |
#!/usr/bin/python
"""
FludNode.tac (c) 2003-2006 Alen Peacock. This program is distributed under the
terms of the GNU General Public License (the GPL).
This is the application file used by twistd to daemonize FludNode.
"""
import os
from twisted.application import service, internet
import flud.FludNode
from flud.protocol.FludCommUtil import getCanonicalIP
port = None
gwhost = None
gwport = None
if 'FLUDPORT' in os.environ:
port = int(os.environ['FLUDPORT'])
if 'FLUDGWHOST' in os.environ:
gwhost = getCanonicalIP(os.environ['FLUDGWHOST'])
if 'FLUDGWPORT' in os.environ:
gwport = int(os.environ['FLUDGWPORT'])
node = flud.FludNode.FludNode(port)
if gwhost and gwport:
node.connectViaGateway(gwhost, gwport)
application = service.Application("flud.FludNode")
service = node.start(twistd=True)
#service.setServiceParent(application)
| Python |
#!/usr/bin/python
"""
FludScheduler.py (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), verison 3.
FludScheduler is the process monitors files for changes, and then tells flud to
back them up.
"""
import sys, os, time, stat
from twisted.internet import reactor
from flud.FludConfig import FludConfig
from flud.protocol.LocalClient import *
from flud.CheckboxState import CheckboxState
CHECKTIME=5
class FludScheduler:
def __init__(self, config, factory):
self.config = config
self.factory = factory
self.fileconfigfile = None
self.fileconfigfileMTime = 0
self.fileChangeTime = 0
self.fileconfigSelected = set()
self.fileconfigExcluded = set()
self.getMasterMetadata()
def getMasterMetadata(self):
d = self.factory.sendLIST()
d.addCallback(self.gotMasterMetadata)
d.addErrback(self.errMasterMetadata)
return d
def gotMasterMetadata(self, master):
self.mastermetadata = master
def errMasterMetadata(self, err):
print err
reactor.stop()
def readFileConfig(self, mtime=None):
print "reading FileConfig"
file = open(self.fileconfigfile, 'r')
self.fileconfig = eval(file.read())
file.close()
if mtime:
self.fileconfigfileMTime = mtime
else:
self.fileconfigfileMTime = os.stat(
self.fileconfigfile)[stat.ST_MTIME]
self.fileconfigSelected = set([f for f in self.fileconfig
if self.fileconfig[f] == CheckboxState.SELECTED
or self.fileconfig[f] == CheckboxState.SELECTEDCHILD])
self.fileconfigExcluded = set([f for f in self.fileconfig
if self.fileconfig[f] == CheckboxState.EXCLUDED
or self.fileconfig[f] == CheckboxState.EXCLUDEDCHILD])
# The file[s]ChangeStat are the worst possible way to detect file changes.
# Much more efficient to use inotify/dnotify/fam/gamin/etc., as well as
# more correct (no way to detect cp -a or -p, for example, with stat).
# But, these are a fallback method when those aren't present, and are fine
# for testing.
def fileChangedStat(self, file, fileChangeTime=None):
if os.path.isfile(file) or os.path.isdir(file):
mtime = os.stat(file)[stat.ST_MTIME]
if not fileChangeTime:
fileChangeTime = self.fileChangeTime
if file in self.mastermetadata:
fileChangeTime = self.mastermetadata[file][1]
else:
return True
print "mtime = %s, ctime = %s (%s)" % (mtime, fileChangeTime, file)
if mtime > fileChangeTime:
return True
return False
def filesChangedStat(self, files, fileChangeTime=None):
result = []
for f in files:
if self.fileChangedStat(f, fileChangeTime):
result.append(f)
return result
# Change these to point to something other than the xxxStat() methods
def fileChanged(self, file, fileChangeTime=None):
"""
>>> now = time.time()
>>> f1 = tmpfile.mktemp()
>>>
"""
return self.fileChangedStat(file, fileChangeTime)
def filesChanged(self, files, fileChangeTime=None):
return self.filesChangedStat(files, fileChangeTime)
def checkFileConfig(self):
# check config file to see if it has changed, then reparse it
if not self.fileconfigfile:
# first time through
print "checking fileconfigfile (initial)"
if os.environ.has_key('FLUDHOME'):
fludhome = os.environ['FLUDHOME']
elif os.environ.has_key('HOME'):
fludhome = os.environ['HOME']+"/.flud"
else:
fludhome = ".flud"
# XXX: fludfile.conf should be in config
self.fileconfigfile = os.path.join(fludhome, "fludfile.conf")
if os.path.isfile(self.fileconfigfile):
self.readFileConfig()
return True
else:
print "no fileconfigfile to read"
elif os.path.isfile(self.fileconfigfile):
if self.fileChanged(self.fileconfigfile, self.fileconfigfileMTime):
print "fileconfigfile changed"
mtime = time.time()
self.readFileConfig(mtime)
return True
return False
def checkFilesystem(self):
checkedFiles = set()
changedFiles = set()
def checkList(list):
#print "checkList: %s" % list
#print "checkedFiles: %s" % checkedFiles
for entry in list:
# XXX: if entry is in master metadata, and its mtime is not
# earlier than the time used by fileChanged, skip it (add 'and'
# clause)
if entry not in checkedFiles and \
entry not in self.fileconfigExcluded and \
entry not in self.mastermetadata:
print "checkFilesystem for %s" % entry
if os.path.isdir(entry):
#print "dir %s" % entry
dirfiles = [os.path.join(entry, i)
for i in os.listdir(entry)]
checkedFiles.update([entry,])
checkList(dirfiles)
elif self.fileChanged(entry):
print "%s changed" % entry
if os.path.isfile(entry):
changedFiles.update([entry,])
#print "file %s changed" % entry
else:
print "entry ?? %s ?? changed" % entry
checkedFiles.update([entry,])
checkList(self.fileconfigSelected)
self.fileChangeTime = time.time()
return changedFiles
def storefileFailed(self, err, file):
print "storing %s failed: %s" % (file, err)
err.printTraceback()
#print dir(err)
def storefileYay(self, r, file):
print "storing %s success" % file
def storeFiles(self, changedFiles):
#print "storing %s" % changedFiles
dlist = []
for f in changedFiles:
print "storing %s" % f
deferred = self.factory.sendPUTF(f)
deferred.addCallback(self.storefileYay, f)
deferred.addErrback(self.storefileFailed, f)
dlist.append(deferred)
dl = defer.DeferredList(dlist)
return dl
#return defer.succeed(True)
def restartCheckTimer(self, v):
print "restarting timer (%d) to call run()" % CHECKTIME
reactor.callLater(CHECKTIME, self.run)
def updateMasterMetadata(self, v):
return self.getMasterMetadata()
def run(self):
print "run"
self.checkFileConfig()
changedFiles = self.checkFilesystem()
print "%s changed" % changedFiles
d = self.storeFiles(changedFiles)
d.addBoth(self.updateMasterMetadata)
d.addBoth(self.restartCheckTimer)
| Python |
"""
FludFileOperations.py (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
Implements file storage and retrieval operations using flud primitives.
"""
import os, stat, sys, logging, binascii, random, time
from zlib import crc32
from StringIO import StringIO
from twisted.internet import defer, threads
from Crypto.Cipher import AES
from flud.FludCrypto import FludRSA, hashstring, hashfile
from flud.FludFileCoder import Coder, Decoder
from flud.protocol.FludCommUtil import *
from flud.fencode import fencode, fdecode
logger = logging.getLogger('flud.fileops')
# erasure coding constants
code_n = 20
code_m = 20
code_l = 5
# temp filenaming defaults
appendEncrypt = ".crypt"
appendNodeMeta = ".nmeta"
dirReplace = "-"
# XXX: could remove trailing '=' from all stored sha256s (dht keys, storage
# keys, etc) and add them back implicitly
class Ctx(object):
"""
Object to encapsulate operation context for logger
"""
def __init__(self, ctx):
self.ctx = ctx
self.formatstr = ""
self.args = ()
def msg(self, formatstr, *args):
self.formatstr = formatstr
self.args = args
s = self.formatstr % self.args
return self
def __repr__(self):
return str(self.ctx)+": "+(self.formatstr % self.args)
def pathsplit(fname):
par, chld = os.path.split(fname)
if chld == "":
res = []
res.append(par)
return res
else:
res = pathsplit(par)
res.append(os.path.join(par,chld))
return res
def filemetadata(fname):
fstat = os.stat(fname)
return {'path' : fname, 'mode' : fstat[stat.ST_MODE],
'uid' : fstat[stat.ST_UID], 'gid' : fstat[stat.ST_GID],
'atim' : fstat[stat.ST_ATIME], 'mtim' : fstat[stat.ST_MTIME],
'ctim' : fstat[stat.ST_CTIME]}
class StoreFile:
"""
Implements the meta operations of storing, retrieving, and verifying files.
In addition to using the data primitives laid out in the protocol/
directory, manages the creation of file metadata for local and remote
storage.
Stores a file to flud by:
1. Create storage and encryption keys for file: Hashes the file once
to create an encryption key, eK=H(file), and then again to create the
storage key for the file metadata sK=H(eK)=H(H(file)).
2. Create local filesystem file metadata: Encrypts the storage key
asymetrically with public key as eeK=e_Ku(eK), and creates local copy
of fs metadata with eeK and other file metadata (ownership, name,
timestamps). Encrypts this metadata with Ku. (flud file metadata
consists of eeK and Ku(fs metadata)).
3. Create data-specific file metadata: Symmetrically encrypt the file
with e_file=eK(file). Code e_file into k+m blocks. Perform
H(block) on each k+m block.
4. Query DHT for sK. If it exists, grab the metadata record (call it
'storedMetadata') for comparison to one we are generated. Compare
data-specific metadata to storedMetadata (if it exists). If it doesn't
already exist, this step can be ignored. If it exists and the data
doesn't match, either the hash function is broken or a malicious node
has poisoned the DHT -- return failure (client can attempt restore
under a new key, or challenge the existing stored metadata).
5. Store m+n blocks. If stored metadata exists, can do VERIFYs
instead, and only store when VERIFY fails. For each failed VERIFY,
must update the data-specific part of the metadata record. Since we
don't want to be susceptible to replacement attack, the update is
treated as an 'append' to the specific block metadata by the store
target. (The store target can monitor its data-specific metadata
records and for any that grow beyond a certain size, can shrink all
block records to a size of 1 by performing RETRIEVE ops on the
block, verfiying hash, and removing any failures. If this fails for
all block entries, can prune all but the /first and last/ entries --
keep these to prevent damage by malicious nodes. This strategy is
beneficial to the node that performs it, because it prevents storage of
arbitrary data in the DHT layer, which is in turn good for the system
as a whole)
6. Store file metadata (both node-specific and data-specific) to the
DHT. Keep a local copy as well.
7. Update the master file record index for this node, and store it to
the DHT layer.
"""
# XXX: instead of doing all debugging with '"%s ...", self.mkey, ...', make
# some props (debug(), warn(), etc) that add the mkey to whatever msg is
# passed
# XXX: should follow this currentOps model for the other FludFileOps
currentOps = {}
def __init__(self, node, filename):
self.node = node
self.filename = filename
self.mkey = crc32(self.filename)
self.ctx = Ctx(self.mkey).msg
self.config = node.config
self.Ku = node.config.Ku
self.routing = self.config.routing
self.metadir = self.config.metadir
self.parentcodedir = self.config.clientdir # XXX: clientdir?
self.nodeChoices = self.routing.knownExternalNodes()
self.deferred = self._storeFile()
def _storeFile(self):
if not os.path.isfile(self.filename):
return defer.fail(ValueError("%s is not a file" % self.filename))
# 1: create encryption key (eK) and storage key (sK). Query DHT using
# sK
self.eK = hashfile(self.filename)
logger.debug(self.ctx("_storefile %s (%s)", self.filename, self.eK))
self.sK = long(hashstring(self.eK), 16)
self.eeK = self.Ku.encrypt(binascii.unhexlify(self.eK))
self.eKey = AES.new(binascii.unhexlify(self.eK))
#logger.debug(self.ctx("file %s eK:%s, storage key:%d"
# % (self.filename, self.eK, self.sK)))
# 2: create filesystem metadata locally.
sbody = filemetadata(self.filename)
sbody = fencode(sbody)
self.eNodeFileMetadata = ""
for i in range(0, len(sbody), 128):
self.eNodeFileMetadata += self.Ku.encrypt(sbody[i:i+128])[0]
self.nodeFileMetadata = {'eeK' : fencode(self.eeK[0]),
'meta' : fencode(self.eNodeFileMetadata)}
# XXX: "/" is OS specific
self.flatname = self.filename.replace("/", dirReplace)
self.mfilename = os.path.join(self.metadir,self.flatname+appendNodeMeta)
f = open(self.mfilename, "w")
f.write(fencode(self.nodeFileMetadata))
f.close();
# erasure code the metadata
c = Coder(code_n, code_m, code_l)
# XXX: bad blocking stuff, move into thread
self.encodedir = os.path.join(self.parentcodedir, self.flatname)
try:
os.mkdir(self.encodedir)
except:
return defer.fail(failure.DefaultException(
"%s already requested" % self.filename))
# XXX: mfiles should be held in mem, as StringIOs (when coder supports
# this)
self.mfiles = c.codeData(self.mfilename,
os.path.join(self.encodedir, 'm'))
# XXX: piggybacking doesn't work with new metadata scheme, must fix it
# to append metadata, or if already in progress, redo via verify ops
# if already storing identical file by CAS, piggyback on it
if self.currentOps.has_key(self.eK):
logger.debug(self.ctx("reusing callback on %s", self.eK))
(d, counter) = self.currentOps[self.eK]
self.currentOps[self.eK] = (d, counter+1)
# setting sfile, encodedir, and efilename to empty vals is kinda
# hokey -- could split _storeMetadata into two funcs instead (the
# cleanup part and the store part; see _storeMetadata)
self.sfiles = []
self.encodedir = None
self.efilename = None
d.addCallback(self._piggybackStoreMetadata)
return d
# 3: encrypt and encode the file locally.
self.efilename = os.path.join(self.metadir,self.flatname+appendEncrypt)
e = open(self.efilename, "w")
fd = os.open(self.filename, os.O_RDONLY)
fstat = os.fstat(fd)
fsize = fstat[stat.ST_SIZE]
# XXX: bad blocking stuff, move into thread
# create a pad at front of file to make it an even multiple of 16
fpad = int(16 - fsize%16);
#logger.debug(self.ctx("fsize=%d, padding with %d bytes"
# % (fsize, fpad)))
paddata = chr(fpad)+(fpad-1)*'\x00'
buf = paddata + os.read(fd,16-len(paddata))
e.write(self.eKey.encrypt(buf));
# now write the rest of the file
while 1:
# XXX: can we do this in larger than 16-byte chunks?
buf = os.read(fd,16)
if buf == "":
break
e.write(self.eKey.encrypt(buf));
e.close()
os.close(fd)
# erasure code the file
# XXX: bad blocking stuff, move into thread
self.sfiles = c.codeData(self.efilename,
os.path.join(self.encodedir, 'c'))
#logger.debug(self.ctx("coded to: %s" % str(self.sfiles)))
# take hashes and rename coded blocks
self.segHashesLocal = []
for i in range(len(self.sfiles)):
sfile = self.sfiles[i]
h = long(hashfile(sfile),16)
logger.debug(self.ctx("file block %s hashes to %s", i, fencode(h)))
destfile = os.path.join(self.encodedir,fencode(h))
if os.path.exists(destfile):
logger.warn(self.ctx("%s exists (%s)", destfile, fencode(h)))
self.segHashesLocal.append(h)
#logger.debug(self.ctx("moved %s to %s" % (sfile, destfile)))
os.rename(sfile, destfile)
self.sfiles[i] = destfile
mfile = self.mfiles[i]
os.rename(mfile, destfile+".m")
self.mfiles[i] = destfile+".m"
# 4a: query DHT for metadata.
d = self.node.client.kFindValue(self.sK)
d.addCallback(self._checkForExistingFileMetadata)
d.addErrback(self._storeFileErr, "DHT query for metadata failed")
self.currentOps[self.eK] = (d, 1)
return d
# 4b: compare hashlists (locally encrypted vs. DHT -- if available).
# for lhash, dhash in zip(segHashesLocal, segHashesDHT):
def _checkForExistingFileMetadata(self, storedMetadata):
if storedMetadata == None or isinstance(storedMetadata, dict):
logger.info(self.ctx(
"metadata doesn't yet exist, storing all data"))
d = self._storeBlocks(storedMetadata)
#d = self._storeBlocksSKIP(storedMetadata)
return d
else:
storedMetadata = fdecode(storedMetadata)
logger.info(self.ctx("metadata exists, verifying all data"))
if not self._compareMetadata(storedMetadata, self.sfiles):
raise ValueError("stored and local metadata do not match")
else:
logger.info(self.ctx("stored and local metadata match."))
# XXX: need to check for diversity. It could be that data stored
# previously to a smaller network (<k+m nodes) and that we should
# try to increase diversity and re-store the data.
# XXX: also need to make sure we still trust all the nodes in the
# metadata list. If not, we should move those blocks elsewhere.
d = self._verifyAndStoreBlocks(storedMetadata)
return d
def _storeBlocksSKIP(self, storedMetadata):
# for testing -- skip stores so we can get to storeMeta
dlist = []
self.blockMetadata = {'m': 20, 'n': 20}
for i in range(len(self.segHashesLocal)):
hash = self.segHashesLocal[i]
sfile = self.sfiles[i]
node = random.choice(self.routing.knownExternalNodes())
host = node[0]
port = node[1]
nID = node[2]
nKu = FludRSA.importPublicKey(node[3])
self.blockMetadata[(i, hash)] = long(nKu.id(), 16)
return self._storeMetadata(None)
# 5a -- store all blocks
def _storeBlocks(self, storedMetadata):
dlist = []
self.blockMetadata = {'m': 20, 'n': 20}
for i in range(len(self.segHashesLocal)):
hash = self.segHashesLocal[i]
sfile = self.sfiles[i]
deferred = self._storeBlock(i, hash, sfile, self.mfiles[i])
dlist.append(deferred)
logger.debug(self.ctx("_storeBlocksAll"))
dl = defer.DeferredList(dlist)
dl.addCallback(self._storeMetadata)
return dl
def _storeBlock(self, i, hash, sfile, mfile, retry=2):
if not self.nodeChoices:
self.nodeChoices = self.routing.knownExternalNodes()
logger.warn(self.ctx("had to reuse nodes!, %d nodes found",
len(self.nodeChoices)))
if not self.nodeChoices:
return defer.fail(failure.DefaultException(
"cannot store blocks to 0 nodes"))
node = random.choice(self.nodeChoices)
self.nodeChoices.remove(node)
host = node[0]
port = node[1]
nID = node[2]
nKu = FludRSA.importPublicKey(node[3])
location = long(nKu.id(), 16)
logger.info(self.ctx("STOREing under %s on %s:%d", fencode(hash),
host, port))
logger.debug(self.ctx("mfile is %s", mfile))
deferred = self.node.client.sendStore(sfile, (self.mkey, mfile), host,
port, nKu)
deferred.addCallback(self._fileStored, i, hash, location)
deferred.addErrback(self._retryStoreBlock, i, hash, location,
sfile, mfile, "%s (%s:%d)" % (fencode(nID), host, port), retry)
return deferred
def _retryStoreBlock(self, error, i, hash, location, sfile, mfile,
badtarget, retry=None):
retry = retry - 1
if retry > 0:
logger.warn(self.ctx("STORE to %s failed, trying again", badtarget))
d = self._storeBlock(i, hash, sfile, mfile, retry)
d.addCallback(self._fileStored, i, hash, location)
# This will fail the entire operation. This is correct
# behavior because we've tried on at least N nodes and couldn't
# get the block to store -- the caller will have to try the entire
# op again. If this proves to be a problem, up the default retry
# value in _storeBlock().
d.addErrback(self._storeFileErr, "couldn't store block %s"
% fencode(hash))
return d
else:
logger.warn(self.ctx("STORE to %s failed, giving up", badtarget))
d = defer.Deferred()
d.addErrback(self._storeFileErr, "couldn't store block %s"
% fencode(hash))
d.errback()
return d
def _fileStored(self, result, i, blockhash, location):
logger.debug(self.ctx("_filestored %s", fencode(blockhash)))
self.blockMetadata[(i, blockhash)] = location
return fencode(blockhash)
def _compareMetadata(self, storedFiles, fileNames):
# compares the block names returned from DHT to those in fileNames.
# @param storedFiles: dict of longs (hashes) to their locations,
# usually obtained from storedMetadata
# @param fileNames: local filenames. Only the os.path.basename part
# will be used for comparison
# @return true if they match up perfectly, false otherwise
logger.debug(self.ctx('# remote block names: %d', len(storedFiles)))
logger.debug(self.ctx('# local blocks: %d', len(fileNames)))
result = True
n = storedFiles.pop('n')
m = storedFiles.pop('m')
for (i, f) in storedFiles:
fname = os.path.join(self.encodedir,fencode(f))
if not fname in fileNames:
logger.warn(self.ctx("%s not in sfiles", fencode(i)))
result = False
for i, fname in enumerate(fileNames):
hname = os.path.basename(fname)
if not storedFiles.has_key((i, fdecode(hname))):
logger.warn(self.ctx("%s not in storedMetadata", hname))
result = False
if result == False:
for i in storedFiles:
logger.debug(self.ctx("storedBlock = %s", fencode(i)))
for i in fileNames:
logger.debug(self.ctx("localBlock = %s", os.path.basename(i)))
storedFiles['n'] = n
storedFiles['m'] = m
return result
def _piggybackStoreMetadata(self, piggybackMeta):
# piggybackMeta is a (nodeID, {blockID: storingNodeID, })
logger.debug(self.ctx("got piggyBackMeta data"))
meta = piggybackMeta[1]
sortedKeys = {}
n = meta['n']
m = meta['m']
for i in [x for x in meta if x != 'm' and x != 'n']:
sortedKeys[i[0]] = i
for i in xrange(n+m):
self.sfiles.append(fencode(sortedKeys[i][1]))
return self._verifyAndStoreBlocks(meta, True)
# 5b -- findnode on all stored blocks.
def _verifyAndStoreBlocks(self, storedMetadata, noopVerify=False):
self.blockMetadata = storedMetadata
dlist = []
for i, sfile in enumerate(self.sfiles):
# XXX: metadata should be StringIO to begin with
f = file(self.mfiles[i])
mfile = StringIO(f.read())
f.close()
seg = os.path.basename(sfile)
segl = fdecode(seg)
nid = self.blockMetadata[(i, segl)]
if isinstance(nid, list):
logger.info(self.ctx(
"multiple location choices, choosing one randomly."))
nid = random.choice(nid)
# XXX: for now, this just picks one of the alternatives at
# random. If the chosen one fails, should try each of the
# others until it works
logger.info(self.ctx("looking up %s...", ('%x' % nid)[:8]))
deferred = self.node.client.kFindNode(nid)
deferred.addCallback(self._verifyBlock, i, sfile, mfile,
seg, segl, nid, noopVerify)
deferred.addErrback(self._storeFileErr,
"couldn't find node %s... for VERIFY" % ('%x' % nid)[:8],
False)
dlist.append(deferred)
dl = defer.DeferredList(dlist)
#dl.addCallback(self._storeMetadata)
# XXX XXX XXX: don't _updateMaster unless we succeed!!
dl.addCallback(self._updateMaster, storedMetadata)
return dl
# 5c -- verify all blocks, store any that fail verify.
def _verifyBlock(self, kdata, i, sfile, mfile, seg, segl, nid, noopVerify):
# XXX: looks like we occasionally get in here on timed out connections.
# Should go to _storeFileErr instead, eh?
if isinstance(kdata, str):
logger.err(self.ctx("str kdata=%s", kdata))
#if len(kdata['k']) > 1:
# #logger.debug(self.ctx("type kdata: %s" % type(kdata)))
# #logger.debug(self.ctx("kdata=%s" % kdata))
# #logger.debug(self.ctx("len(kdata['k'])=%d" % len(kdata['k'])))
# raise ValueError("couldn't find node %s" % ('%x' % nid))
# #raise ValueError("this shouldn't really be a ValueError..."
# # " should be a GotMoreKnodesThanIBargainedForError"
# # " (possibly caused when kFindNode fails (timeout) and"
# # " we just get our own list of known nodes?): k=%s"
# # % kdata['k'])
node = kdata['k'][0]
host = node[0]
port = node[1]
id = node[2]
if id != nid:
logger.debug(self.ctx("couldn't find node %s", ('%x' %nid)))
raise ValueError("couldn't find node %s" % ('%x' % nid))
nKu = FludRSA.importPublicKey(node[3])
logger.info(self.ctx("verifying %s on %s:%d", seg, host, port))
if noopVerify:
offset = length = 0
verhash = long(hashstring(''), 16)
self.sfiles = []
else:
fd = os.open(sfile, os.O_RDONLY)
fsize = os.fstat(fd)[stat.ST_SIZE]
if fsize > 20: # XXX: 20?
length = 20 # XXX: 20?
offset = random.randrange(fsize-length)
else:
length = fsize
offset = 0
os.lseek(fd, offset, 0)
data = os.read(fd, length)
os.close(fd)
verhash = long(hashstring(data), 16)
deferred = self.node.client.sendVerify(seg, offset, length,
host, port, nKu, (self.mkey, mfile))
deferred.addCallback(self._checkVerify, nKu, host, port, i, segl,
sfile, mfile, verhash)
deferred.addErrback(self._checkVerifyErr, i, segl, sfile, mfile,
verhash)
return deferred
def _checkVerify(self, result, nKu, host, port, i, seg, sfile, mfile, hash):
if hash != long(result, 16):
logger.info(self.ctx(
"VERIFY hash didn't match for %s, performing STORE",
fencode(seg)))
d = self._storeBlock(i, seg, sfile, mfile)
return d
else:
#logger.debug(self.ctx("block passed verify (%s == %s)"
# % (hash, long(result,16))))
return fencode(seg)
def _checkVerifyErr(self, failure, i, seg, sfile, mfile, hash):
logger.debug(self.ctx("Couldn't VERIFY: %s", failure.getErrorMessage()))
logger.info(self.ctx("Couldn't VERIFY %s, performing STORE",
fencode(seg)))
d = self._storeBlock(i, seg, sfile, mfile)
return d
# 6 - store the metadata.
def _storeMetadata(self, dlistresults):
# cleanup part of storeMetadata:
logger.debug(self.ctx("dlist=%s", str(dlistresults)))
# XXX: for any "False" in dlistresults, need to invoke _storeBlocks
# again on corresponding entries in sfiles.
for i in dlistresults:
if i[1] == None:
logger.info(self.ctx("failed store/verify"))
return False
# storeMetadata part of storeMetadata
# XXX: should sign metadata to prevent forged entries.
#for i in self.blockMetadata:
# logger.debug(self.ctx(" %s: %s"
# % (fencode(i), fencode(self.blockMetadata[i]))))
logger.debug(self.ctx("storing metadata at %s", fencode(self.sK)))
logger.debug(self.ctx("len(segMetadata) = %d", len(self.blockMetadata)))
d = self.node.client.kStore(self.sK, self.blockMetadata)
d.addCallback(self._updateMaster, self.blockMetadata)
d.addErrback(self._storeFileErr, "couldn't store file metadata to DHT")
return d
# 7 - update local master file record (store it to the network later).
def _updateMaster(self, res, meta):
# clean up locally coded files and encrypted file
for sfile in self.sfiles:
os.remove(sfile)
for mfile in self.mfiles:
os.remove(mfile)
if self.encodedir: os.rmdir(self.encodedir)
if self.efilename: os.remove(self.efilename)
key = fencode(self.sK)
logger.info(self.ctx("updating local master metadata with %s", key))
# store the filekey locally
# update entry for file
self.config.updateMasterMeta(self.filename, (self.sK, int(time.time())))
# update entry for parent dirs
paths = pathsplit(self.filename)
for i in paths:
if not self.config.getFromMasterMeta(i):
self.config.updateMasterMeta(i, filemetadata(i))
# XXX: not too efficient to write this out for every file. consider
# local caching and periodic syncing instead
self.config.syncMasterMeta()
# cache the metadata locally (optional)
fname = os.path.join(self.metadir,key)
m = open(fname, 'wb')
m.write(fencode(meta))
m.close()
# clean up fs metadata file
os.remove(self.mfilename)
#return fencode(self.sK)
(d, counter) = self.currentOps[self.eK]
counter = counter - 1
if counter == 0:
logger.debug(self.ctx("counter 0 for currentOps %s", self.eK))
self.currentOps.pop(self.eK)
else:
logger.debug(self.ctx("setting counter = %d for %s", counter,
self.eK))
self.currentOps[self.eK] = (d, counter)
return (key, meta)
def _storeFileErr(self, failure, message, raiseException=True):
(d, counter) = self.currentOps[self.eK]
counter = counter - 1
if counter == 0:
logger.debug(self.ctx("err counter 0 for currentOps %s", self.eK))
self.currentOps.pop(self.eK)
else:
logger.debug(self.ctx("err setting counter = %d for %s", counter,
self.eK))
self.currentOps[self.eK] = (d, counter)
logger.error(self.ctx("%s: %s", message, failure.getErrorMessage()))
logger.debug(self.ctx("%s", failure.getTraceback()))
if raiseException:
raise failure
class RetrieveFile:
"""
Uses the given storage key to retrieve a file. The storage key is used
to query the DHT layer for the file metadata record. The file record
contains the locations of the file blocks. These are downloaded
until the complete file can be regenerated and saved locally.
"""
def __init__(self, node, key, mkey=True):
# 1: Query DHT for sK
# 2: Retrieve entries for sK, decoding until efile can be regenerated
# 3: Retrieve eK from sK by eK=Kp(eKe). Use eK to decrypt file. Strip
# off leading pad.
# 4: Save file as filepath=Kp(efilepath).
self.node = node
self.mkey = mkey
try:
self.sK = fdecode(key)
except Exception, inst:
self.deferred = defer.fail(inst)
return
self.ctx = Ctx(crc32(str(self.sK))).msg
self.config = node.config
self.Ku = node.config.Ku
self.Kr = node.config.Kr
self.routing = self.config.routing.knownNodes()
self.metadir = self.config.metadir
self.parentcodedir = self.config.clientdir
self.numDecoded = 0
self.deferred = self._retrieveFile()
def _retrieveFile(self):
# 1: Query DHT for sK
logger.debug(self.ctx("querying DHT for %s", self.sK))
d = self.node.client.kFindValue(self.sK)
d.addCallback(self._retrieveFileBlocks)
d.addErrback(self._retrieveFileErr, "file retrieve failed")
return d
def _retrieveFileBlocks(self, meta):
# 2: Retrieve entries for sK, decoding until efile can be regenerated
if meta == None:
raise LookupError("couldn't recover metadata for %s" % self.sK)
self.meta = fdecode(meta)
# XXX: need to check for diversity. It could be that data stored
# previously to a smaller network (<k+m nodes) and that we should
# try to increase diversity and re-store the data.
# XXX: also need to make sure we still trust all the nodes in the
# metadata list. If not, we should move those blocks elsewhere.
if self.meta == None:
raise LookupError("couldn't recover metadata for %s" % self.sK)
n = self.meta.pop('n')
m = self.meta.pop('m')
if n != code_n or m != code_m:
# XXX:
raise ValueError("unsupported coding scheme %d/%d" % (m/n))
logger.info(self.ctx("got metadata %s" % self.meta))
self.decoded = False
self.decoder = Decoder(os.path.join(self.parentcodedir,
fencode(self.sK))+".rec1", code_n, code_m, code_l)
self.mdecoder = Decoder(os.path.join(self.parentcodedir,
fencode(self.sK))+".m", code_n, code_m, code_l)
#return self._getSomeBlocks()
return self._getSomeBlocks(25) # XXX: magic 25. Should derive from k & m
def _getSomeBlocks(self, reqs=40): # XXX: magic 40. Should be k+m
tries = 0
if reqs > len(self.meta):
reqs = len(self.meta)
dlist = []
for i in range(reqs):
c = random.choice(self.meta.keys())
block = fencode(c[1])
id = self.meta[c]
if isinstance(id, list):
logger.info(self.ctx(
"multiple location choices, choosing one randomly."))
id = random.choice(id)
# XXX: for now, this just picks one of the alternatives at
# random. If the chosen one fails, should try each of the
# others until it works
#logger.info(self.ctx("retrieving %s from %s"
# % (block, fencode(id))))
logger.info(self.ctx("retrieving %s from %s" % (block, id)))
# look up nodes by id, then do a retrieve.
deferred = self.node.client.kFindNode(id)
deferred.addCallback(self._retrieveBlock, block, id)
deferred.addErrback(self._retrieveBlockErr,
"couldn't get block %s from node %s" % (block, fencode(id)))
dlist.append(deferred)
self.meta.pop(c)
tries = tries + 1
if tries >= reqs:
break;
dl = defer.DeferredList(dlist)
dl.addCallback(self._retrievedAll)
return dl
def _retrieveBlock(self, kdata, block, id):
#print type(kdata)
#print kdata
#if len(kdata['k']) > 1:
if kdata['k'][0][2] != id:
print "%s != %s" (kdata['k'][0], id)
raise ValueError("couldn't find node %s" % fencode(id))
#raise ValueError("this shouldn't really be a ValueError..."
# " should be a GotMoreKnodesThanIBargainedForError: k=%s"
# % kdata['k'])
#else:
# print kdata['k']
node = kdata['k'][0]
host = node[0]
port = node[1]
id = node[2]
nKu = FludRSA.importPublicKey(node[3])
if not self.decoded:
d = self.node.client.sendRetrieve(block, host, port, nKu, self.mkey)
d.addCallback(self._decodeBlock, block, self.mkey)
d.addErrback(self._retrieveBlockErr,
"couldn't get block %s from %s" % (block, fencode(id)))
return d
def _retrieveBlockErr(self, failure, message):
logger.info(self.ctx("%s: %s" % (message, failure.getErrorMessage())))
# don't propogate the error -- one block doesn't cause the file
# retrieve to fail.
#return failure
def _retrievedAll(self, success):
logger.info(self.ctx("tried retreiving %d blocks %s"
% (len(success), success)))
if not self.decoded and len(self.meta) > 0:
tries = 5 # XXX: magic number. Should derive from k & m
logger.info(self.ctx("requesting %d more blocks" % tries))
return self._getSomeBlocks(tries)
if self.decoded:
logger.info(self.ctx("file successfully decoded"))
return self._decryptMeta()
else:
logger.info(self.ctx("couldn't decode file after retreiving all %d"
" available blocks" %self.numDecoded))
#return False
raise RuntimeError("couldn't decode file after retreiving all %d"
" available blocks" %self.numDecoded)
def decodeData(self, decoder, data):
return decoder.decodeData(data)
def _decodeError(self, err):
logger.warn(self.ctx("could not decode: %s", err))
return err
def _decodeBlock(self, msg, block, mkey):
logger.debug(self.ctx("decode block=%s, msg=%s" % (block, msg)))
self.numDecoded += 1
blockname = [f for f in msg if f[-len(block):] == block][0]
expectedmeta = "%s.%s.meta" % (block, mkey)
metanames = [f for f in msg if f[-len(expectedmeta):] == expectedmeta]
if not metanames:
raise failure.DefaultException("expected metadata was missing")
d = threads.deferToThread(self.decodeData, self.mdecoder, metanames[0])
d.addCallback(self._metadataDecoded, blockname)
d.addErrback(self._decodeError)
return d
def _metadataDecoded(self, metadecoded, blockname):
d = threads.deferToThread(self.decodeData, self.decoder, blockname)
d.addCallback(self._dataDecoded, metadecoded)
d.addErrback(self._decodeError)
return d
def _dataDecoded(self, decoded, metadecoded):
if not self.decoded and decoded and metadecoded:
self.decoded = True
logger.info(self.ctx("successfully decoded (retrieved %d blocks --"
" all but %d blocks tried)" % (self.numDecoded,
len(self.meta))))
else:
logger.info(self.ctx("decoded=%s, mdecoded=%s" % (decoded,
metadecoded)))
def _decryptMeta(self):
# XXX: decrypt the metadatafile with Kr to get all the nmeta stuff (eeK
# etc.)
mfile = open(os.path.join(self.parentcodedir, fencode(self.sK)+".m"))
meta = mfile.read()
mfile.close()
logger.info(self.ctx("meta is %s" % meta))
self.nmeta = fdecode(meta)
return self._decryptFile()
def _decryptFile(self):
# 3: Retrieve eK from sK by eK=Kr(eeK). Use eK to decrypt file. Strip
# off leading pad.
skey = fencode(self.sK)
f1 = open(os.path.join(self.parentcodedir,skey+".rec1"), "r")
f2 = open(os.path.join(self.parentcodedir,skey+".rec2"), "w")
#logger.info(self.ctx("decoding nmeta eeK for %s" % dir(self)))
eeK = fdecode(self.nmeta['eeK'])
# d_eK business is to ensure that eK is zero-padded to 32 bytes
d_eK = self.Kr.decrypt(eeK)
d_eK = '\x00'*(32%len(d_eK))+d_eK # XXX: magic 32, should be keyspace/8
eK = binascii.hexlify(d_eK)
eKey = AES.new(binascii.unhexlify(eK))
# XXX: bad blocking stuff, move into thread
while 1:
buf = f1.read(16)
if buf == "":
break;
f2.write(eKey.decrypt(buf))
f1.close()
f2.close()
os.remove(os.path.join(self.parentcodedir,skey+".rec1"))
f2 = open(os.path.join(self.parentcodedir,skey+".rec2"), "r")
f3 = open(os.path.join(self.parentcodedir,skey+".rec3"), "w")
padlen = f2.read(1)
#print "%s" % repr(padlen)
padlen = ord(padlen)
padlen -= 1
#print "throwing away %d pad bytes" % padlen
pad = f2.read(padlen) # throw away pad.
while 1:
buf = f2.read(16)
if buf == "":
break;
f3.write(buf)
f2.close()
f3.close()
os.remove(os.path.join(self.parentcodedir,skey+".rec2"))
# 4: Move file to its correct path, imbue it with properties from
# metadata.
# XXX: should we make sure we can read metadata before downloading all
# the file data?
#print "decoding nmeta meta"
efmeta = fdecode(self.nmeta['meta'])
fmeta = ""
for i in range(0, len(efmeta), 128):
fmeta += self.Kr.decrypt(efmeta[i:i+128])
fmeta = fdecode(fmeta)
result = [fmeta['path']]
if os.path.exists(fmeta['path']):
# file is already there -- compare it. If different, save as
# path.recovered and keep a list of these (to let the user know
# that they'll need to resolve later). Or don't keep a list and
# just 'do the right thing' (use the latest version by timestamp,
# or always use the backup, or always use the local copy, or
# define some other behavior for doing the right thing).
logger.info(self.ctx("hash rec=%s" % hashfile(fmeta['path'])))
logger.info(self.ctx("hash org=%s" % eK))
if hashfile(fmeta['path']) != eK:
# XXX: do something better than log it -- see above comment
logger.info(self.ctx(
'different version of file %s already present'
% fmeta['path']))
# XXX: should generate '.recovered' extension more carefully,
# so as not to overwrite coincidentally named files.
fmeta['path'] = fmeta['path']+".recovered"
result.insert(0,fmeta['path'])
os.rename(os.path.join(self.parentcodedir,skey+".rec3"),
fmeta['path'])
else:
logger.info(self.ctx('same version of file %s already present'
% fmeta['path']))
# no need to copy:
os.remove(os.path.join(self.parentcodedir,skey+".rec3"))
else:
# recover parent directories if not present
paths = pathsplit(fmeta['path'])
for i in paths:
if not os.path.exists(i) and i != fmeta['path']:
os.mkdir(i) # best effort dir creation, even if missing
# directory metadata
# XXX: should be using an accessor method on config for
# master
if i in self.config.master:
dirmeta = self.config.getFromMasterMeta(i)
os.chmod(i,dirmeta['mode'])
os.chown(i,dirmeta['uid'],dirmeta['gid']) # XXX: windows
# XXX: atim, mtim, ctim
# XXX: should try to make sure we can write to dir, change
# perms if necessary.
# recover file by renaming to its path
os.rename(os.path.join(self.parentcodedir,skey+".rec3"),
fmeta['path'])
# XXX: chown not supported on Windows
os.chown(fmeta['path'], fmeta['uid'], fmeta['gid'])
os.utime(fmeta['path'], (fmeta['atim'], fmeta['mtim']))
os.chmod(fmeta['path'], fmeta['mode'])
return tuple(result)
def _retrieveFileErr(self, failure, message, raiseException=True):
logger.error(self.ctx("%s: %s" % (message, failure.getErrorMessage())))
if raiseException:
return failure
class RetrieveFilename:
"""
Retrieves a File given its local name. Only works if the local master
index contains an entry for this filename.
"""
def __init__(self, node, filename):
self.node = node
self.filename = filename
self.metadir = self.node.config.metadir
self.config = self.node.config
self.deferred = self._recoverFile()
def _recoverFile(self):
fmeta = self.config.getFromMasterMeta(self.filename)
if fmeta:
if isinstance(fmeta, dict):
logger.debug("%s is a directory in master metadata",
self.filename)
# RetrieveFile will restore parent dirs, so we don't need to
dlist = []
dirname = self.filename+os.path.sep
# XXX: this should be calling a config.getAllFromMasterMeta()
for i in [x for x in self.config.master.keys()
if dirname == x[:len(dirname)]]:
filekey = self.config.getFromMasterMeta[i]
metakey = crc32(i)
logger.debug("calling RetrieveFile %s" % filekey)
d = RetrieveFile(self.node, fencode(filekey),
metakey).deferred
dlist.append(d)
dl = defer.DeferredList(dlist)
return dl
else:
logger.debug("%s is file in master metadata", self.filename)
(filekey, backuptime) = self.config.getFromMasterMeta(
self.filename)
metakey = crc32(self.filename)
if filekey != None and filekey != "":
logger.debug("calling RetrieveFile %s" % filekey)
d = RetrieveFile(self.node, fencode(filekey),
metakey).deferred
return d
return defer.fail(LookupError("bad filekey %s for %s"
% (filekey, self.filename)))
return defer.fail(LookupError("no record of %s" % self.filename))
class VerifyFile:
# XXX: remove me? I don't do anything that StoreFile can't do, plus if
# I fail you'd still need to call StoreFile right after...
# Or, do we keep me around and rip out all the verify stuff from
# StoreFile and put it in here?
# -- What we really want to do is look at trading partners for all our
# files, and then check each one every day with some random bit of
# data from some random file. But we also want eventual coverage of
# all stored files. It seems like a good first approach would be to
# scan over our local copies of all DHT records and make a list of
# storing nodes, which will be of size N. Then, do k VERIFY ops to
# each node in N, using a random subset of the files we have stored.
# The VerifyFile object's purpose may be orthogonal to that, or
# completely unnecessary, as the described scheme can be accomplished
# with plain VERIFY ops.
def verifyFile(self, filepath):
"""
Chooses some random blocks from filepath to verify against the store.
The algorithm is as follows: sK = H(H(file at filepath)). Look up sK
in the local master index. If the record isn't there, return this
fact. If the record is there, retrieve its metadata. Verify k
blocks as follows:
With probibility n/(m+n), code the file locally (to verify coded
blocks with a fair probabiliy, i.e., if m=40 and n=20, 33% of the
time we will do the coding).
Choose k blocks from the resulting blocks and using the file
metadata record, do a VERIFY operation using a random offset and random
length (if we chose not to do the coding in the previous step, the k
blocks must come entirely from the non-coded portion). As we wait
for the VERIFYs to return, hash these blocks locally. As each VERIFY
returns, compare it with our local hash just computed. Return a list
of hosts/nodeids for which the VERIFY failed.
"""
pass #remove me
class RetrieveMasterIndex:
def __init__(self, node):
self.node = node
nodeID = long(self.node.config.nodeID, 16)
# 1. CAS = kfindval(nodeID) (CAS for last FLUDHOME/meta/master)
logger.info("looking for key %x" % nodeID)
self.deferred = self.node.client.kFindValue(nodeID)
self.deferred.addCallback(self._foundCAS)
self.deferred.addErrback(self._retrieveMasterIndexErr,
"couldn't find master metadata")
def _foundCAS(self, CAS):
# 2. oldmaster = kfindval(CAS)
if isinstance(CAS, dict):
return defer.fail(ValueError("couldn't find CAS key"))
CAS = fdecode(CAS)
d = RetrieveFile(self.node, CAS).deferred
d.addCallback(self._foundMaster)
d.addErrback(self._retrieveMasterIndexErr, "couldn't find Master Index")
return d
def _foundMaster(self, result):
if len(result) == 2:
# got two filenames back, must mean we should choose one: the
# one from the distributed store
os.rename(result[0], result[1])
result = (result[1],)
return result
def _retrieveMasterIndexErr(self, err, msg):
logger.warn(msg)
return err
class UpdateMasterIndex:
def __init__(self, node):
self.node = node
self.metamaster = os.path.join(self.node.config.metadir,
self.node.config.metamaster)
# 0.1. oldmaster = RetrieveMasterIndex()
self.deferred = RetrieveMasterIndex(node).deferred
self.deferred.addCallback(self._removeOldMasterIndex)
self.deferred.addErrback(self._storeMasterIndex)
def _removeOldMasterIndex(self, res):
# 0.2. for i in oldmaster: delete(i)
print "removing old master not yet implemented"
return self._storeMasterIndex(res)
def _storeMasterIndex(self, res_or_err):
# 1. store FLUDHOME/meta/master
print "going to store %s" % self.metamaster
d = StoreFile(self.node, self.metamaster).deferred
d.addCallback(self._updateCAS)
d.addErrback(self._updateMasterIndexErr, "couldn't store master index")
return d
def _updateCAS(self, stored):
# 2. kstore(nodeID, CAS(FLUDHOME/meta/master))
#print "stored = %s" % str(stored)
key, meta = stored
logger.info("storing %s at %x" % (key,
long(self.node.config.nodeID,16)))
d = self.node.client.kStore(long(self.node.config.nodeID,16),
key) # XXX: key should be fdecode()ed
return d
def _updateMasterIndexErr(self, err, msg):
logger.warn(msg)
return err
if __name__ == "__main__":
from FludNode import FludNode
def successTest(res, fname, whatfor, nextStage=None):
logger.info("finished %s" % whatfor)
def errTest(failure):
logger.info("boom: %s" % failure.getErrorMessage())
raise failure
def fileKey(fname):
EK = hashfile(fname)
return fencode(long(hashstring(EK), 16))
def clearMeta(fname):
# delete any metadata that might exist for this file.
try:
SK = fileKey(fname)
os.remove(os.path.join(n.config.kstoredir,SK))
logger.info("test removed %s" % os.path.join(n.config.kstoredir,SK))
except:
pass
def doStore(fname, msg=None, nextStage=successTest):
if msg != None:
logger.info("finished %s ------" % msg)
logger.info("---------------------------------------------------\n")
# do a store
logger.info("nextStage is %s" % nextStage)
d = StoreFile(n,fname).deferred
d.addCallback(nextStage, fname, 'store op', doCorruptSegAndStore)
d.addErrback(errTest)
def doDelSegAndStore((key, meta), fname, msg=None, nextStage=successTest):
# only works if stores are local
if msg != None:
logger.info("finished %s ------" % msg)
logger.info("---------------------------------------------------\n")
# delete a block and do a store
c = random.choice(meta.keys())
logger.info("removing %s" % fencode(c))
os.remove(os.path.join(n.config.storedir,fencode(c)))
logger.info("test removed %s" % os.path.join(n.config.storedir,
fencode(c)))
d = StoreFile(n,fname).deferred
d.addCallback(nextStage, fname, 'lost block op')
d.addErrback(errTest)
def doCorruptSegAndStore((key, meta), fname, msg=None,
nextStage=successTest):
# only works if stores are local
if msg != None:
logger.info("finished %s ------" % msg)
logger.info("---------------------------------------------------\n")
# corrupt a block and do a store
c = random.choice(meta.keys())
logger.info("corrupting %s" % fencode(c))
f = open(os.path.join(n.config.storedir,fencode(c)), 'r')
data = f.read()
f.close()
f = open(os.path.join(n.config.storedir,fencode(c)), 'w')
f.write('blah'+data)
f.close()
d = StoreFile(n,fname).deferred
d.addCallback(nextStage, fname, 'corrupted block op')
d.addErrback(errTest)
def doRetrieve(key, msg=None, nextStage=successTest):
if msg != None:
logger.info("finished %s ------" % msg)
logger.info("---------------------------------------------------\n")
d = RetrieveFile(n, key).deferred
d.addCallback(nextStage, key, 'retrieve op')
d.addErrback(errTest)
def doRetrieveName(filename, msg=None, nextStage=successTest):
if msg != None:
logger.info("finished %s ------" % msg)
logger.info("---------------------------------------------------\n")
d = RetrieveFilename(n, filename).deferred
d.addCallback(nextStage, filename, 'retrieve filename op')
d.addErrback(errTest)
def runTests(dummy):
# test against self -- all stores and queries go to self.
fname = "/tmp/nrpy.pdf"
#clearMeta(fname)
#doStore(fname, None, doDelSegAndStore) # do all stages of testing
doStore(fname) # only do one op (for manual testing)
#doRetrieve(fileKey(fname))
#doRetrieveName(fname)
n = FludNode()
n.run()
if len(sys.argv) == 3:
deferred = n.client.sendkFindNode(sys.argv[1], int(sys.argv[2]), 1)
deferred.addCallback(runTests)
deferred.addErrback(errTest)
else:
runTests(None)
n.join()
| Python |
"""
Okay, so this isn't a real python module, yet. Wanted to get down a few ideas
on versioning. First, the background.
Traditional backup systems that provide versioning support allow the user to
retrieve the current version of a file, or any of N previous versions that were
stored during previous backup operations. Since it was rather trivial to
simply keep old versions on the central backup server, this wasn't much of an
engineering problem (at worst, disk fills up quickly).
With a collaborative backup system, such a scheme is less practical. If fully
enabled, it can consume many times the storage space of a simple
single-snapshot system. If you want to enforce fairness, you must require
that the number of resources you consume are proportional to those that you
provide. Encoding already dictates that this ratio is imbalanced towards
providing more resources than consuming. But "server-side" versioning, even
when using a clever delta-compression technique, really tips the scales.
There is good news, however. We can use a single-snapshot system to provide
versioning by requiring all versioning to occur locally. That is, the
consumer's own hard drive can be used to maintain multiple versions of files,
and then the whole thing can be backed up as a single-snapshot to the flud
network. Think of a local CVS repository (with many versions contained
therein) that is set to be backed up; the backup system doesn't have to worry
about versioning -- it just backs up the current data. The local CVS repo
is in charge of worrying about versions. To the user, its all the same.
The advantages of this scheme are mainly:
1) simplicity
2) storage consumption minimization, opt-in
3) decoupling of versioning layer from backup layer
Of the three, #1 is really the most appealing. We just back up the current
view. This also greatly simplifies the verification mechanism -- the verifier
will always have the complete file from which to do challenge/response queries
to the verifiee. We don't have to worry about keeping deltas or partial
checksums or anything like that in order to do verification; we just pick a
block of bytes at random from within the file, and make sure that the storer
can return us the hash of those bytes. #1 also means that we don't have to do
anything complicated to figure out what the delta of a delta-compressed version
should be (i.e., we don't need to request the old version from the storage
system, compare it with our version, then send out a delta) -- in fact, with
this scheme we wipe out delta compression altogether, at least from the
viewpoint of the storage mechanism (some other local mechanism is welcome to
use delta compression to store versions locally, but this mechanism won't need
to download lots of data in order to do so, because it will all be local).
#2 is nice. It means that if the user isn't interested in versioning, they
don't have to do it. This will be the default, in fact. This means that we
eliminate a lot of overhead that we would have had if every user was storing
versions, even if they didn't need them. It also means that there is an
automatic cost for enabling versions, not only for the collaborative system,
but for the user's local storage resources. Not to imply that we want to
punish the user for enabling versions, but there's no free lunch (and besides,
adding local disk is cheap).
[as an aside, here, versioning does become necessary quite quickly for things
such as email clients that store all mail in a particular folder as one
large file, or other applications that use databases in single files -- we
don't want the user to have to send the whole file (which can become quite
large) every time they get a new email or change the db slightly. The good
news is that we can still provide this in its own layer].
Decoupling (#3) is always a good idea, especially when it can be done cleanly.
The user (or flud client developer) is free to implement whatever local
versioning scheme they want. They can make copies of files and store them in
other directories, they could use a version control system such as CVS, they
could do their own delta compression and store the deltas in a special
directory. They could store as many or as few versions as they want. They can
take version snapshots often or seldom, and this is independent of how often
they perform backup. And such schemes can be switched out, upgraded, or
removed on the fly without anyone really noticing.
So, what is this module all about then? Well, for now, nothing but this
documentation. If the user wants versioning, they'll have to do it themselves,
(keeping in mind that the system *will* store those local versions, and they
can rest easy knowing that they can be retrieved). At some future point, we
will implement some fancy versioning layer with local delta compression in this
module to 'complete the package,' but it is currently a low priority endeavor.
Its priority will rise as we get close to providing a 'for your grandma'
solution.
"""
| Python |
#!/usr/bin/python
import time, os, stat, random, sys, logging, socket
from twisted.python import failure
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from flud.FludNode import FludNode
from flud.protocol.FludClient import FludClient
from flud.protocol.FludCommUtil import *
from flud.fencode import fencode, fdecode
"""
Test code for primitive DHT operations.
"""
stay_alive = 1
filename = "/tmp/tempstoredata"
filekey = os.path.basename(filename)
key = 87328673569979667228965797330646992089697345905484734072690869757741450870337L
# format of block metadata is
# {(i, datakey): storingNodeID, ..., 'n': n, 'm': m}
# where i<n+m, datakey is a sha256 of the data stored, and storingNodeID is
# either a nodeID or a list of nodeIDs.
testval = {(0, 802484L): 465705L, (1, 780638L): 465705L, (2, 169688L): 465705L,
(3, 267175L): 465705L, (4, 648636L): 465705L, (5, 838315L): 465705L,
(6, 477619L): 465705L, (7, 329906L): 465705L, (8, 610565L): 465705L,
(9, 217811L): 465705L, (10, 374124L): 465705L, (11, 357214L): 465705L,
(12, 147307L): 465705L, (13, 427751L): 465705L, (14, 927853L): 465705L,
(15, 760369L): 465705L, (16, 707029L): 465705L, (17, 479234L): 465705L,
(18, 190455L): 465705L, (19, 647489L): 465705L, (20, 620470L): 465705L,
(21, 777532L): 465705L, (22, 622383L): 465705L, (23, 573283L): 465705L,
(24, 613082L): 465705L, (25, 433593L): 465705L, (26, 584543L): 465705L,
(27, 337485L): 465705L, (28, 911014L): 465705L, (29, 594065L): 465705L,
(30, 375876L): 465705L, (31, 726818L): 465705L, (32, 835759L): 465705L,
(33, 814060L): 465705L, (34, 237176L): 465705L, (35, 538268L): 465705L,
(36, 272650L): 465705L, (37, 314058L): 465705L, (38, 257714L): 465705L,
(39, 439931L): 465705L, 'n': 20, 'm': 20}
logger = logging.getLogger('test')
screenhandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s:'
' %(message)s', datefmt='%H:%M:%S')
screenhandler.setFormatter(formatter)
logger.addHandler(screenhandler)
#logger.setLevel(logging.DEBUG)
logger.setLevel(logging.INFO)
def cleanup(_, node):
logger.info("waiting %ds to shutdown..." % stay_alive)
reactor.callLater(stay_alive, node.stop)
def testerror(failure, message, node):
"""
error handler for test errbacks
"""
logger.warn("testerror message: %s" % message)
logger.warn("testerror: '%s'" % str(failure))
logger.warn("At least 1 test FAILED")
def endtests(res, nKu, node, host, port):
""" executes after all tests """
try:
res = fdecode(res)
except ValueError:
pass
if res != testval:
return testerror(None, "retrieved value does not match stored value:"
" '%s' != '%s'" % (res, testval), node)
logger.info("testkFindVal PASSED")
logger.debug("testkFindVal result: %s" % str(res))
logger.info("all tests PASSED")
return res
def testkFindVal(res, nKu, node, host, port):
logger.info("testSendkFindVal PASSED")
logger.debug("testSendkFindVal result: %s" % str(res))
logger.info("attempting testkFindValue")
deferred = node.client.kFindValue(key)
deferred.addCallback(endtests, nKu, node, host, port)
deferred.addErrback(testerror, "failed at testkFindValue", node)
return deferred
def testSendkFindVal(res, nKu, node, host, port):
logger.info("testkStore PASSED")
logger.debug("testkStore result: %s" % str(res))
logger.info("attempting testSendkFindValue")
deferred = node.client.sendkFindValue(host, port, key)
deferred.addCallback(testkFindVal, nKu, node, host, port)
deferred.addErrback(testerror, "failed at testSendkFindValue", node)
return deferred
def testkStore(res, nKu, node, host, port):
logger.info("testSendkStore PASSED")
logger.debug("testSendkStore result: %s" % str(res))
logger.info("attempting testkStore")
deferred = node.client.kStore(key, testval)
deferred.addCallback(testSendkFindVal, nKu, node, host, port)
deferred.addErrback(testerror, "failed at testkStore", node)
return deferred
def testSendkStore(res, nKu, node, host, port):
logger.info("testkFindNode PASSED")
logger.debug("testkFindNode result: %s" % str(res))
logger.info("attempting testSendkStore")
deferred = node.client.sendkStore(host, port, key, testval)
deferred.addCallback(testkStore, nKu, node, host, port)
deferred.addErrback(testerror, "failed at testkStore", node)
return deferred
def testkFindNode(res, nKu, node, host, port):
""" executes after testSendkFindNode """
logger.info("testSendkFindNode PASSED")
logger.debug("testSendkFindNode result: %s" % str(res))
logger.info("attempting kFindNode")
deferred = node.client.kFindNode(key)
deferred.addCallback(testSendkStore, nKu, node, host, port)
deferred.addErrback(testerror, "failed at kFindNode", node)
return deferred
def testSendkFindNode(nKu, node, host, port):
""" executes after testGetID """
logger.info("testkGetID PASSED")
logger.info("attempting sendkFindNode")
deferred = node.client.sendkFindNode(host, port, key)
deferred.addCallback(testkFindNode, nKu, node, host, port)
deferred.addErrback(testerror, "failed at sendkFindNode", node)
return deferred
def testGetID(node, host, port):
""" Tests sendGetID(), and invokes testSendkFindNode on success """
deferred = node.client.sendGetID(host, port)
deferred.addCallback(testSendkFindNode, node, host, port)
deferred.addErrback(testerror, "failed at testGetID", node)
return deferred
def runTests(host, port=None, listenport=None):
host = getCanonicalIP(host)
node = FludNode(port=listenport)
if port == None:
port = node.config.port
logger.info("testing against %s:%s, localport=%s" % (host,
port, listenport))
node.run()
d = testGetID(node, host, port)
d.addBoth(cleanup, node)
#testkFindVal("blah", node.config.Ku, node, host, port)
node.join()
"""
Main currently invokes test code
"""
if __name__ == '__main__':
localhost = socket.getfqdn()
if len(sys.argv) == 1:
runTests(localhost) # test by talking to self
elif len(sys.argv) == 2:
runTests(localhost, eval(sys.argv[1]))
elif len(sys.argv) == 3:
runTests(sys.argv[1], eval(sys.argv[2]))
elif len(sys.argv) == 4:
runTests(sys.argv[1], eval(sys.argv[2]), eval(sys.argv[3]))
| Python |
#!/usr/bin/python
import time, os, stat, random, sys, logging, socket, shutil, tempfile
from binascii import crc32
from StringIO import StringIO
from twisted.python import failure
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from flud.FludNode import FludNode
from flud.protocol.FludClient import FludClient
from flud.FludCrypto import generateRandom, hashfile
from flud.protocol.FludCommUtil import *
from flud.fencode import fencode
"""
Test code for primitive operations. These ops include all of the descendents
of ROOT and REQUEST in FludProtocol.
"""
smallfilekey = ""
smallfilename = ""
smallfilenamebad = ""
largefilekey = ""
largefilename = ""
largefilenamebad = ""
metadata = 'aaaa'
def testerror(failure, message, node):
"""
error handler for test errbacks
"""
print "testerror message: %s" % message
print "testerror: %s" % str(failure)
print "At least 1 test FAILED"
raise failure
def testUnexpectedSuccess(res, message, node):
print "unexpected success message: %s" % message
print "At least 1 test succeeded when it should have failed"
raise "bad"
def testDELETEBadKeyFailed(failure, msg, node, nKu, host, port):
if failure.check('protocol.FludCommUtil.BadRequestException'):
print "%s" % msg
# the end
else:
# XXX: here and elsewhere, raise something more descriptive, otherwise
# its waay confusing
print "the following trace may be misleading..."
raise failure
def testDELETEBadKey(nKu, node, host, port):
print "starting testDELETEBadKey"
path = os.path.join("somedir", largefilekey)
deferred = node.client.sendDelete(path, crc32(path), host, port, nKu)
deferred.addCallback(testUnexpectedSuccess, "DELETE with bad key succeeded",
node)
deferred.addErrback(testDELETEBadKeyFailed,
"DELETE with bad key failed as expected", node, nKu, host, port)
return deferred
def testVERIFYBadKeyFailed(failure, msg, node, nKu, host, port):
if failure.check('protocol.FludCommUtil.NotFoundException'):
print "%s" % msg
return testDELETEBadKey(nKu, node, host, port)
else:
raise failure
def testVERIFYBadKey(nKu, node, host, port):
print "starting testVERIFYBadKey"
fsize = os.stat(smallfilename)[stat.ST_SIZE]
offset = fsize-20
deferred = node.client.sendVerify(smallfilenamebad, offset, 5, host,
port, nKu)
deferred.addCallback(testUnexpectedSuccess,
"verified file with bad key succeeded", node)
deferred.addErrback(testVERIFYBadKeyFailed,
"VERIFY of bad filekey failed as expected", node, nKu, host, port)
return deferred
def testVERIFYBadLengthFailed(failure, msg, node, nKu, host, port):
if failure.check('protocol.FludCommUtil.BadRequestException'):
print "%s" % msg
return testVERIFYBadKey(nKu, node, host, port)
else:
raise failure
def testVERIFYBadLength(nKu, node, host, port):
print "starting testVERIFYBadOffset"
fsize = os.stat(smallfilename)[stat.ST_SIZE]
offset = fsize-10
deferred = node.client.sendVerify(smallfilekey, offset, 20, host, port, nKu)
deferred.addCallback(testUnexpectedSuccess, "verified file with bad length",
node)
deferred.addErrback(testVERIFYBadLengthFailed,
"VERIFY of bad length failed as expected", node, nKu, host, port)
return deferred
def testVERIFYBadOffsetFailed(failure, msg, node, nKu, host, port):
if failure.check('protocol.FludCommUtil.BadRequestException'):
print "%s" % msg
return testVERIFYBadLength(nKu, node, host, port)
else:
print "VERIFYBadOffset failed as expected, but with wrong failure"
raise failure
def testVERIFYBadOffset(nKu, node, host, port):
print "starting testVERIFYBadOffset"
fsize = os.stat(smallfilename)[stat.ST_SIZE]
offset = fsize+2
deferred = node.client.sendVerify(smallfilekey, offset, 20, host, port, nKu)
deferred.addCallback(testUnexpectedSuccess, "verified file with bad offset",
node)
deferred.addErrback(testVERIFYBadOffsetFailed,
"VERIFY of bad offset failed as expected", node, nKu, host, port)
return deferred
def testVERIFYNotFoundFailed(failure, msg, node, nKu, host, port):
if failure.check('protocol.FludCommUtil.NotFoundException'):
print "%s" % msg
return testVERIFYBadOffset(nKu, node, host, port)
else:
raise failure
def testVERIFYNotFound(nKu, node, host, port):
print "starting testVERIFYNotFound"
deferred = node.client.sendVerify(largefilekey, 10, 10, host, port, nKu)
deferred.addCallback(testUnexpectedSuccess, "verified non-existent file",
node)
deferred.addErrback(testVERIFYNotFoundFailed,
"VERIFY of non-existent file failed as expected", node, nKu,
host, port)
return deferred
def testRETRIEVEIllegalPathFailed(failure, msg, node, nKu, host, port):
if failure.check('protocol.FludCommUtil.BadRequestException'):
print "%s" % msg
return testVERIFYNotFound(nKu, node, host, port)
else:
raise failure
def testRETRIEVEIllegalPath(nKu, node, host, port):
print "starting testRETRIEVEIllegalPath"
deferred = node.client.sendRetrieve(os.path.join("somedir",smallfilekey),
host, port, nKu)
deferred.addCallback(testUnexpectedSuccess,
"retrieved file with illegal path", node)
deferred.addErrback(testRETRIEVEIllegalPathFailed,
"RETRIEVE using illegal path failed as expected", node, nKu,
host, port)
return deferred
def testRETRIEVENotFoundFailed(failure, msg, node, nKu, host, port):
if failure.check('protocol.FludCommUtil.NotFoundException'):
print "%s" % msg
return testRETRIEVEIllegalPath(nKu, node, host, port)
else:
raise failure
def testRETRIEVENotFound(nKu, node, host, port):
print "starting testRETRIEVENotFound"
deferred = node.client.sendRetrieve(largefilekey, host, port, nKu)
deferred.addCallback(testUnexpectedSuccess,
"retrieved file that shouldn't exist", node)
deferred.addErrback(testRETRIEVENotFoundFailed,
"RETRIEVE of non-existent file failed as expected", node, nKu,
host, port)
return deferred
def testSTORELargeFailed(failure, msg, node, nKu, host, port):
if failure.check('protocol.FludCommUtil.BadCASKeyException'):
print "%s" % msg
return testRETRIEVENotFound(nKu, node, host, port)
else:
raise failure
def testSTOREBadKeyLarge(nKu, node, host, port):
print "starting testSTOREBadKeyLarge"
deferred = node.client.sendStore(largefilenamebad,
(crc32(largefilenamebad), StringIO(metadata)), host, port, nKu)
deferred.addCallback(testUnexpectedSuccess, "large file, bad key succeeded",
node)
deferred.addErrback(testSTORELargeFailed,
"large STORE with bad key failed as expected", node, nKu,
host, port)
return deferred
def testSTORESmallFailed(failure, msg, node, nKu, host, port):
if failure.check('protocol.FludCommUtil.BadCASKeyException'):
print "%s" % msg
return testSTOREBadKeyLarge(nKu, node, host, port)
else:
raise failure
def testSTOREBadKeySmall(nKu, node, host, port):
print "starting testSTOREBadKeySmall"
deferred = node.client.sendStore(smallfilenamebad,
(crc32(smallfilenamebad), StringIO(metadata)), host, port, nKu)
deferred.addCallback(testUnexpectedSuccess, "small file, bad key succeeded",
node)
deferred.addErrback(testSTORESmallFailed,
"small STORE with bad key failed as expected", node, nKu,
host, port)
return deferred
def testSTORESuccess(res, nKu, node, host, port):
print "testSTORE succeeded: %s" % res
return testSTOREBadKeySmall(nKu, node, host, port)
def testSTORE(nKu, node, host, port):
# store a file successfully for later failure tests (VERIFY, etc)
print "starting testSTORE"
deferred = node.client.sendStore(smallfilename,
(crc32(smallfilename), StringIO(metadata)), host, port, nKu)
deferred.addCallback(testSTORESuccess, nKu, node, host, port)
deferred.addErrback(testerror, "failed at testSTORE", node)
return deferred
# XXX: need to test bogus headers for all commands (BAD_REQUEST)
# XXX: need to test failures for authentication
def testID(node, host, port):
""" Tests sendGetID(), and invokes testSTORE on success """
print "starting testID"
deferred = node.client.sendGetID(host, port)
deferred.addCallback(testSTORE, node, host, port)
#deferred.addCallback(testSTOREBadKeySmall, node, host, port)
deferred.addErrback(testerror, "failed at testID", node)
return deferred
def cleanup(err, node):
if err:
print "cleaning up: %s" % err
else:
print "cleaning up"
os.remove(smallfilename)
os.remove(smallfilenamebad)
os.remove(largefilename)
os.remove(largefilenamebad)
reactor.callLater(1, node.stop)
def generateTestData():
def generateFiles(minsize):
fname = tempfile.mktemp()
f = open(fname, 'w')
f.write('\0'*minsize)
f.write(generateRandom(random.randrange(256)+1))
f.close()
filekey = hashfile(fname)
filekey = fencode(int(filekey, 16))
filename = os.path.join("/tmp",filekey)
os.rename(fname,filename)
filenamebad = os.path.join("/tmp/","bad"+filekey[3:])
shutil.copy(filename, filenamebad)
return (filekey, filename, filenamebad)
global smallfilekey
global smallfilename
global smallfilenamebad
(smallfilekey, smallfilename, smallfilenamebad) = generateFiles(1024)
global largefilekey
global largefilename
global largefilenamebad
(largefilekey, largefilename, largefilenamebad) = generateFiles(512000)
def runTests(host, port=None, listenport=None):
generateTestData()
node = FludNode(port=listenport)
if port == None:
port = node.config.port
node.run()
d = testID(node, host, port)
d.addBoth(cleanup, node)
node.join()
"""
Main currently invokes test code
"""
if __name__ == '__main__':
localhost = socket.getfqdn()
if len(sys.argv) == 1:
runTests(localhost) # test by talking to self
elif len(sys.argv) == 2:
runTests(localhost, eval(sys.argv[1])) # talk to self on port [1]
elif len(sys.argv) == 3:
runTests(sys.argv[1], eval(sys.argv[2])) # talk to [1] on port [2]
elif len(sys.argv) == 4:
# talk to [1] on port [2], listen on port [3]
runTests(sys.argv[1], eval(sys.argv[2]), eval(sys.argv[3]))
| Python |
#!/usr/bin/python
"""
FludFileOpTest.py, (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
System tests for FludFileOperations
"""
import sys, os, time, logging, tempfile, shutil
from twisted.internet import reactor
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from flud.FludConfig import FludConfig
from flud.FludNode import FludNode
from flud.fencode import fencode, fdecode
from flud.FludCrypto import generateRandom
from flud.FludFileOperations import *
import flud.FludDefer as FludDefer
from flud.protocol.LocalClient import listMeta
logger = logging.getLogger('flud')
def testError(failure, message, node):
print "testError message: %s" % message
print "testError: %s" % str(failure)
print "At least 1 test FAILED"
return failure
def gotSuccess(r, desc):
print "%s succeeded" % desc
def testConcurrent(r, node, files, desc):
#print "r was %s" % r
print "testConcurrent %s" % desc
dlist = []
for file in files:
d = testStoreFile(node, file)
dlist.append(d)
dl = FludDefer.ErrDeferredList(dlist)
dl.addCallback(gotSuccess, desc)
dl.addErrback(testError)
return dl
def checkStoreFile(res, node, fname):
master = listMeta(node.config)
if fname not in master:
return defer.fail(failure.DefaultException("file not stored"))
else:
print "store on %s verified" % fname
return res # <- *VITAL* for concurrent dup ops to succeed.
def testStoreFile(node, fname):
d = StoreFile(node, fname).deferred
d.addCallback(checkStoreFile, node, fname)
d.addErrback(testError, fname, node)
return d
def doTests(node, smallfnames, largefnames, dupsmall, duplarge):
d = testStoreFile(node, smallfnames[0])
d.addCallback(testConcurrent, node, smallfnames, "small")
d.addCallback(testConcurrent, node, largefnames, "large")
d.addCallback(testConcurrent, node, dupsmall, "small duplicates")
d.addCallback(testConcurrent, node, duplarge, "large duplicates")
#d = testConcurrent(None, node, dupsmall, "small duplicates")
#d = testConcurrent(None, node, duplarge, "large duplicates")
return d
def cleanup(_, node, filenamelist):
#print _
for f in filenamelist:
try:
print "deleting %s" % f
os.remove(f)
except:
print "couldn't remove %s" % f
reactor.callLater(1, node.stop)
def generateTestFile(minSize):
fname = tempfile.mktemp()
f = open(fname, 'w')
data = generateRandom(minSize/50)
for i in range(0, 51+random.randrange(50)):
f.write(data)
f.close()
filename = os.path.join("/tmp",fname)
os.rename(fname,filename)
return filename
def runTests(host, port, listenport=None):
f1 = generateTestFile(5120)
f2 = generateTestFile(5120)
f3 = f2+".dup"
shutil.copy(f2, f3)
f4 = generateTestFile(513000)
f5 = generateTestFile(513000)
f6 = f5+".dup"
shutil.copy(f5, f6)
node = FludNode(port=listenport)
if port == None:
port = node.config.port
node.run()
node.connectViaGateway(host, port)
d = doTests(node, [f1, f2], [f4, f5], [f2, f3], [f5, f6])
d.addBoth(cleanup, node, [f1, f2, f3, f4, f5, f6])
node.join()
if __name__ == '__main__':
localhost = socket.getfqdn()
if len(sys.argv) == 3:
runTests(sys.argv[1], eval(sys.argv[2])) # talk to [1] on port [2]
elif len(sys.argv) == 4:
# talk to [1] on port [2], listen on port [3]
runTests(sys.argv[1], eval(sys.argv[2]), eval(sys.argv[3]))
else:
print "must run this test against a flud network (no single node op)"
print "usage: %s [<othernodehost othernodeport> |"\
" <othernodehost othernodeport listenport>]" % sys.argv[0]
| Python |
#!/usr/bin/python
import time, os, stat, random, sys, logging, socket, tempfile
from twisted.python import failure
from StringIO import StringIO
from zlib import crc32
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from flud.FludNode import FludNode
from flud.protocol.FludClient import FludClient
import flud.FludCrypto as FludCrypto
from flud.fencode import fencode, fdecode
from flud.protocol.FludCommUtil import *
from flud.FludDefer import ErrDeferredList
"""
Test code for primitive operations. These ops include all of the descendents
of ROOT and REQUEST in FludProtocol.
"""
# metadatablock: (block#,n,k,blockdata)
metadatablock = fencode((1,20,40,'adfdsfdffffffddddddddddddddd'))
fake_mkey_offset = 111111
def testerror(failure, message, node):
"""
error handler for test errbacks
"""
print "testerror message: %s" % message
print "testerror: %s" % str(failure)
print "At least 1 test FAILED"
return failure
def allGood(_, nKu):
print "all tests PASSED"
return nKu
def checkDELETE(res, nKu, fname, fkey, mkey, node, host, port, totalDelete):
""" checks to ensure the file was deleted """
# totalDelete = True if this delete op should remove all meta (and data)
if totalDelete:
# try to retrieve with any metakey, should fail
print "expecting failed retrieve, any metakey"
return testRETRIEVE(res, nKu, fname, fkey, True, node, host, port,
lambda args=(res, nKu): allGood(*args), False)
else:
# try to retrieve with any metakey, should succeed
print "expecting successful retrieve, any metakey"
return testRETRIEVE(res, nKu, fname, fkey, True, node, host, port,
lambda args=(res, nKu, fname, fkey, mkey+fake_mkey_offset,
node, host, port, True): testDELETE(*args))
def testDELETE(res, nKu, fname, fkey, mkey, node, host, port, totalDelete):
""" Tests sendDelete, and invokes checkDELETE on success """
print "starting testDELETE %s.%s" % (fname, mkey)
#return checkDELETE(None, nKu, fname, fkey, mkey, node, host, port, False)
deferred = node.client.sendDelete(fkey, mkey, host, port, nKu)
deferred.addCallback(checkDELETE, nKu, fname, fkey, mkey, node, host, port,
totalDelete)
deferred.addErrback(testerror, "failed at testDELETE", node)
return deferred
def checkVERIFY(res, nKu, fname, fkey, mkey, node, host, port, hash, newmeta):
""" executes after testVERIFY """
if long(hash, 16) != long(res, 16):
raise failure.DefaultException("verify didn't match: %s != %s"
% (hash, res))
print "checkVERIFY (%s) %s success" % (newmeta, fname)
if newmeta:
return testDELETE(res, nKu, fname, fkey, mkey, node, host, port, False)
else:
return testVERIFY(nKu, fname, fkey, mkey, node, host, port, True)
def testVERIFY(nKu, fname, fkey, mkey, node, host, port, newmeta):
""" Test sendVerify """
# newmeta, if True, will generate new metadata to be stored during verify
if newmeta:
thismkey = mkey+fake_mkey_offset
else:
thismkey = mkey
print "starting testVERIFY (%s) %s.%s" % (newmeta, fname, thismkey)
fd = os.open(fname, os.O_RDONLY)
fsize = os.fstat(fd)[stat.ST_SIZE]
length = 20
offset = random.randrange(fsize-length)
os.lseek(fd, offset, 0)
data = os.read(fd, length)
os.close(fd)
hash = FludCrypto.hashstring(data)
deferred = node.client.sendVerify(fkey, offset, length, host, port, nKu,
(thismkey, StringIO(metadatablock)))
deferred.addCallback(checkVERIFY, nKu, fname, fkey, mkey, node, host,
port, hash, newmeta)
deferred.addErrback(testerror, "failed at testVERIFY (%s)" % newmeta, node)
return deferred
def failedRETRIEVE(res, nextCallable):
return nextCallable();
def checkRETRIEVE(res, nKu, fname, fkey, mkey, node, host, port, nextCallable):
""" Compares the file that was stored with the one that was retrieved """
f1 = open(fname)
filename = [f for f in res if f[-len(fkey):] == fkey][0]
f2 = open(filename)
if (f1.read() != f2.read()):
f1.close()
f2.close()
raise failure.DefaultException(
"upload/download (%s, %s) files don't match" % (fname,
os.path.join(node.config.clientdir, fkey)))
#print "%s (%d) and %s (%d) match" % (fname, os.stat(fname)[stat.ST_SIZE],
# filename, os.stat(filename)[stat.ST_SIZE])
f1.close()
f2.close()
if mkey != True:
expectedmeta = "%s.%s.meta" % (fkey, mkey)
metanames = [f for f in res if f[-len(expectedmeta):] == expectedmeta]
if not metanames:
raise failure.DefaultException("expected metadata was missing")
f3 = open(metanames[0])
md = f3.read()
if md != metadatablock:
raise failure.DefaultException("upload/download metadata doesn't"
" match (%s != %s)" % (md, metadatablock))
return nextCallable()
def testRETRIEVE(res, nKu, fname, fkey, mkey, node, host, port, nextCallable,
expectSuccess=True):
""" Tests sendRetrieve, and invokes checkRETRIEVE on success """
print "starting testRETRIEVE %s.%s" % (fname, mkey)
deferred = node.client.sendRetrieve(fkey, host, port, nKu, mkey)
deferred.addCallback(checkRETRIEVE, nKu, fname, fkey, mkey, node, host,
port, nextCallable)
if expectSuccess:
deferred.addErrback(testerror, "failed at testRETRIEVE", node)
else:
deferred.addErrback(failedRETRIEVE, nextCallable)
return deferred
def testSTORE2(nKu, fname, fkey, node, host, port):
mkey = crc32(fname)
mkey2 = mkey+(2*fake_mkey_offset)
print "starting testSTORE %s.%s" % (fname, mkey2)
deferred = node.client.sendStore(fname, (mkey2, StringIO(metadatablock)),
host, port, nKu)
deferred.addCallback(testRETRIEVE, nKu, fname, fkey, mkey2, node, host,
port, lambda args=(nKu, fname, fkey, mkey, node, host, port,
False): testVERIFY(*args))
deferred.addErrback(testerror, "failed at testSTORE", node)
return deferred
def testSTORE(nKu, fname, fkey, node, host, port):
""" Tests sendStore, and invokes testRETRIEVE on success """
mkey = crc32(fname)
print "starting testSTORE %s.%s" % (fname, mkey)
deferred = node.client.sendStore(fname, (mkey, StringIO(metadatablock)),
host, port, nKu)
deferred.addCallback(testRETRIEVE, nKu, fname, fkey, mkey, node, host, port,
lambda args=(nKu, fname, fkey, node, host, port): testSTORE2(*args))
deferred.addErrback(testerror, "failed at testSTORE", node)
return deferred
def testID(node, host, port):
""" Tests sendGetID(), and invokes testSTORE on success """
print "starting testID"
deferred = node.client.sendGetID(host, port)
deferred.addErrback(testerror, "failed at testID", node)
return deferred
def testAggSTORE(nKu, aggFiles, node, host, port):
print "starting testAggSTORE"
dlist = []
for fname, fkey in aggFiles:
mkey = crc32(fname)
print "testAggSTORE %s (%s)" % (fname, mkey)
deferred = node.client.sendStore(fname, (mkey, StringIO(metadatablock)),
host, port, nKu)
deferred.addCallback(testRETRIEVE, nKu, fname, fkey, mkey, node, host,
port, lambda args=(nKu, fname, fkey, mkey, node, host,
port, False): testVERIFY(*args))
deferred.addErrback(testerror, "failed at testAggSTORE", node)
dlist.append(deferred)
dl = ErrDeferredList(dlist)
dl.addCallback(allGood, nKu)
dl.addErrback(testerror, "failed at testAggSTORE", node)
return dl
def cleanup(_, node, filenamelist):
for f in filenamelist:
try:
os.remove(f)
except:
print "couldn't remove %s" % f
reactor.callLater(1, node.stop)
def generateTestData(minSize):
fname = tempfile.mktemp()
f = open(fname, 'w')
data = FludCrypto.generateRandom(minSize/50)
for i in range(0, 51+random.randrange(50)):
f.write(data)
f.close()
filekey = FludCrypto.hashfile(fname)
filekey = fencode(int(filekey, 16))
filename = os.path.join("/tmp",filekey)
os.rename(fname,filename)
return (filename, filekey)
def runTests(host, port=None, listenport=None):
(largeFilename, largeFilekey) = generateTestData(512000)
(smallFilename, smallFilekey) = generateTestData(5120)
aggFiles = []
for i in range(4):
aggFiles.append(generateTestData(4096))
node = FludNode(port=listenport)
if port == None:
port = node.config.port
node.run()
d = testID(node, host, port)
d.addCallback(testSTORE, largeFilename, largeFilekey, node, host, port)
d.addCallback(testSTORE, smallFilename, smallFilekey, node, host, port)
d.addCallback(testAggSTORE, aggFiles, node, host, port)
d.addBoth(cleanup, node, [i[0] for i in aggFiles] + [largeFilename,
smallFilename])
node.join()
def main():
localhost = socket.getfqdn()
if len(sys.argv) == 1:
runTests(localhost) # test by talking to self
elif len(sys.argv) == 2:
runTests(localhost, eval(sys.argv[1])) # talk to self on port [1]
elif len(sys.argv) == 3:
runTests(sys.argv[1], eval(sys.argv[2])) # talk to [1] on port [2]
elif len(sys.argv) == 4:
# talk to [1] on port [2], listen on port [3]
runTests(sys.argv[1], eval(sys.argv[2]), eval(sys.argv[3]))
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import time, os, stat, random, sys, logging, socket
from twisted.python import failure
from twisted.internet import defer
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from flud.FludNode import FludNode
from flud.protocol.FludClient import FludClient
from flud.protocol.FludCommUtil import *
from flud.fencode import *
from flud.FludDefer import ErrDeferredList
"""
Test code for kprimitive operations. These ops include all of the descendents
of ROOT and REQUEST in FludProtocol.
"""
# XXX: check return from ops to see if they passed (e.g., if STORE fails, we
# are notified [currently] by the html page that is returned).
# XXX: should make a random file each time this is run...
CONCURRENT=50
CONCREPORT=10
node = None
# format of block metadata is
# {(i, datakey): storingNodeID, ..., 'n': n, 'm': m}
# where i<n+m, datakey is a sha256 of the data stored, and storingNodeID is
# either a nodeID or a list of nodeIDs
testval = {(0, 802484L): 465705L, (1, 780638L): 465705L, (2, 169688L): 465705L,
(3, 267175L): 465705L, (4, 648636L): 465705L, (5, 838315L): 465705L,
(6, 477619L): 465705L, (7, 329906L): 465705L, (8, 610565L): 465705L,
(9, 217811L): 465705L, (10, 374124L): 465705L, (11, 357214L): 465705L,
(12, 147307L): 465705L, (13, 427751L): 465705L, (14, 927853L): 465705L,
(15, 760369L): 465705L, (16, 707029L): 465705L, (17, 479234L): 465705L,
(18, 190455L): 465705L, (19, 647489L): 465705L, (20, 620470L): 465705L,
(21, 777532L): 465705L, (22, 622383L): 465705L, (23, 573283L): 465705L,
(24, 613082L): 465705L, (25, 433593L): 465705L, (26, 584543L): 465705L,
(27, 337485L): 465705L, (28, 911014L): 465705L, (29, 594065L): 465705L,
(30, 375876L): 465705L, (31, 726818L): 465705L, (32, 835759L): 465705L,
(33, 814060L): 465705L, (34, 237176L): 465705L, (35, 538268L): 465705L,
(36, 272650L): 465705L, (37, 314058L): 465705L, (38, 257714L): 465705L,
(39, 439931L): 465705L, 'n': 20, 'm': 20}
logger = logging.getLogger('test')
screenhandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s:'
' %(message)s', datefmt='%H:%M:%S')
screenhandler.setFormatter(formatter)
logger.addHandler(screenhandler)
logger.setLevel(logging.DEBUG)
def suitesuccess(results):
logger.info("all tests in suite passed")
#print results
return results
def suiteerror(failure):
logger.info("suite did not complete")
logger.info("DEBUG: %s" % failure)
return failure
def stagesuccess(result, message):
logger.info("stage %s succeeded" % message)
return result
def stageerror(failure, message):
logger.info("stage %s failed" % message)
#logger.info("DEBUG: %s" % failure)
return failure
def itersuccess(res, i, message):
if i % CONCREPORT == 0:
logger.info("itersuccess: %s" % message)
return res
def itererror(failure, message):
logger.info("itererror message: %s" % message)
#logger.info("DEBUG: %s" % failure)
#logger.info("DEBUG: %s" % dir(failure)
failure.printTraceback()
return failure
def endtests(res, nKu, node, host, port):
""" executes after all tests """
try:
res = fdecode(res)
except ValueError:
pass
if res != testval:
return failure.DefaultException("retrieved value does not match"
" stored value: '%s' != '%s'" % (res, testval))
logger.log(logging.INFO,"testkFindVal PASSED: %s\n" % str(res))
logger.log(logging.INFO,"all tests PASSED")
return res
def testkFindVal(res, nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO,"testSendkFindVal PASSED: %s\n" % str(res))
logger.log(logging.INFO,"attempting testkFindValue")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.kFindValue(key)
deferred.addErrback(itererror, "kFindValue")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "kFindValue")
d.addErrback(stageerror, 'kFindValue')
d.addCallback(endtests, nKu, node, host, port)
return d
def testSendkFindVal(res, nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO, "testkStore PASSED: %s\n" % str(res))
logger.log(logging.INFO, "attempting testSendkFindValue")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.sendkFindValue(host, port, key)
deferred.addErrback(itererror, "sendkFindValue")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "sendkFindValue")
d.addErrback(stageerror, 'sendkFindValue')
d.addCallback(testkFindVal, nKu, node, host, port, num)
return d
def testkStore(res, nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO, "testSendkStore PASSED: %s" % str(res))
logger.log(logging.INFO, "attempting testkStore")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.kStore(key, testval)
deferred.addErrback(itererror, "kStore")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "kStore")
d.addErrback(stageerror, 'kStore')
d.addCallback(testSendkFindVal, nKu, node, host, port, num)
return d
def testSendkStore(res, nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO, "testkFindNode PASSED: %s" % str(res))
logger.log(logging.INFO, "attempting testSendkStore")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.sendkStore(host, port, key, testval)
deferred.addErrback(itererror, "sendkStore")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "sendkStore")
d.addErrback(stageerror, 'sendkStore')
d.addCallback(testkStore, nKu, node, host, port, num)
return d
def testkFindNode(res, nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO, "testSendkFindNode PASSED: %s" % str(res))
logger.log(logging.INFO, "attempting kFindNode")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.kFindNode(key)
deferred.addErrback(itererror, "kFindNode")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "kFindNode")
d.addErrback(stageerror, 'kFindNode')
d.addCallback(testSendkStore, nKu, node, host, port, num)
return d
def testSendkFindNode(nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO, "testkGetID PASSED")
logger.log(logging.INFO, "attempting sendkFindNode")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.sendkFindNode(host, port, key)
deferred.addErrback(itererror, "sendkFindNode")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "sendkFindNode")
d.addErrback(stageerror, 'sendkFindNode')
d.addCallback(testkFindNode, nKu, node, host, port, num)
return d
def testGetID(node, host, port, num=CONCURRENT):
""" Tests sendGetID(), and invokes testSendkFindNode on success """
deferred = node.client.sendGetID(host, port)
deferred.addCallback(testSendkFindNode, node, host, port, num)
deferred.addErrback(stageerror, "testGetID")
return deferred
def runTests(host, port=None, listenport=None):
num = CONCURRENT
#num = 5
node = FludNode(port=listenport)
if port == None:
port = node.config.port
node.run()
d = testGetID(node, host, port, CONCURRENT)
d.addCallback(suitesuccess)
d.addErrback(suiteerror)
d.addBoth(cleanup, node)
node.join()
#node.start() # doesn't work, because reactor may not have started
# listening by time requests start flying
def cleanup(_, node):
logger.info("shutting down in 1 seconds...")
time.sleep(1)
reactor.callLater(1, node.stop)
logger.info("done cleaning up")
"""
Main currently invokes test code
"""
if __name__ == '__main__':
localhost = socket.getfqdn()
if len(sys.argv) == 1:
print "Warning: testing against self my result in timeout failures"
runTests(localhost) # test by talking to self
elif len(sys.argv) == 2:
runTests(localhost, eval(sys.argv[1])) # talk to self on port [1]
elif len(sys.argv) == 3:
runTests(sys.argv[1], eval(sys.argv[2])) # talk to [1] on port [2]
elif len(sys.argv) == 4:
# talk to [1] on port [2], listen on port [3]
runTests(sys.argv[1], eval(sys.argv[2]), eval(sys.argv[3]))
| Python |
#!/usr/bin/python
import time, os, stat, random, sys, logging, socket
from twisted.python import failure
from twisted.internet import defer
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from flud.FludNode import FludNode
from flud.protocol.FludClient import FludClient
from flud.protocol.FludCommUtil import *
from flud.fencode import *
from flud.FludDefer import ErrDeferredList
"""
Test code for kprimitive operations. These ops include all of the descendents
of ROOT and REQUEST in FludProtocol.
"""
# XXX: check return from ops to see if they passed (e.g., if STORE fails, we
# are notified [currently] by the html page that is returned).
# XXX: should make a random file each time this is run...
CONCURRENT=50
CONCREPORT=10
node = None
# format of block metadata is
# {(i, datakey): storingNodeID, ..., 'n': n, 'm': m}
# where i<n+m, datakey is a sha256 of the data stored, and storingNodeID is
# either a nodeID or a list of nodeIDs
testval = {(0, 802484L): 465705L, (1, 780638L): 465705L, (2, 169688L): 465705L,
(3, 267175L): 465705L, (4, 648636L): 465705L, (5, 838315L): 465705L,
(6, 477619L): 465705L, (7, 329906L): 465705L, (8, 610565L): 465705L,
(9, 217811L): 465705L, (10, 374124L): 465705L, (11, 357214L): 465705L,
(12, 147307L): 465705L, (13, 427751L): 465705L, (14, 927853L): 465705L,
(15, 760369L): 465705L, (16, 707029L): 465705L, (17, 479234L): 465705L,
(18, 190455L): 465705L, (19, 647489L): 465705L, (20, 620470L): 465705L,
(21, 777532L): 465705L, (22, 622383L): 465705L, (23, 573283L): 465705L,
(24, 613082L): 465705L, (25, 433593L): 465705L, (26, 584543L): 465705L,
(27, 337485L): 465705L, (28, 911014L): 465705L, (29, 594065L): 465705L,
(30, 375876L): 465705L, (31, 726818L): 465705L, (32, 835759L): 465705L,
(33, 814060L): 465705L, (34, 237176L): 465705L, (35, 538268L): 465705L,
(36, 272650L): 465705L, (37, 314058L): 465705L, (38, 257714L): 465705L,
(39, 439931L): 465705L, 'n': 20, 'm': 20}
logger = logging.getLogger('test')
screenhandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s:'
' %(message)s', datefmt='%H:%M:%S')
screenhandler.setFormatter(formatter)
logger.addHandler(screenhandler)
logger.setLevel(logging.DEBUG)
def suitesuccess(results):
logger.info("all tests in suite passed")
#print results
return results
def suiteerror(failure):
logger.info("suite did not complete")
logger.info("DEBUG: %s" % failure)
return failure
def stagesuccess(result, message):
logger.info("stage %s succeeded" % message)
return result
def stageerror(failure, message):
logger.info("stage %s failed" % message)
#logger.info("DEBUG: %s" % failure)
return failure
def itersuccess(res, i, message):
if i % CONCREPORT == 0:
logger.info("itersuccess: %s" % message)
return res
def itererror(failure, message):
logger.info("itererror message: %s" % message)
#logger.info("DEBUG: %s" % failure)
#logger.info("DEBUG: %s" % dir(failure)
failure.printTraceback()
return failure
def endtests(res, nKu, node, host, port):
""" executes after all tests """
try:
res = fdecode(res)
except ValueError:
pass
if res != testval:
return failure.DefaultException("retrieved value does not match"
" stored value: '%s' != '%s'" % (res, testval))
logger.log(logging.INFO,"testkFindVal PASSED: %s\n" % str(res))
logger.log(logging.INFO,"all tests PASSED")
return res
def testkFindVal(res, nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO,"testSendkFindVal PASSED: %s\n" % str(res))
logger.log(logging.INFO,"attempting testkFindValue")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.kFindValue(key)
deferred.addErrback(itererror, "kFindValue")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "kFindValue")
d.addErrback(stageerror, 'kFindValue')
d.addCallback(endtests, nKu, node, host, port)
return d
def testSendkFindVal(res, nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO, "testkStore PASSED: %s\n" % str(res))
logger.log(logging.INFO, "attempting testSendkFindValue")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.sendkFindValue(host, port, key)
deferred.addErrback(itererror, "sendkFindValue")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "sendkFindValue")
d.addErrback(stageerror, 'sendkFindValue')
d.addCallback(testkFindVal, nKu, node, host, port, num)
return d
def testkStore(res, nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO, "testSendkStore PASSED: %s" % str(res))
logger.log(logging.INFO, "attempting testkStore")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.kStore(key, testval)
deferred.addErrback(itererror, "kStore")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "kStore")
d.addErrback(stageerror, 'kStore')
d.addCallback(testSendkFindVal, nKu, node, host, port, num)
return d
def testSendkStore(res, nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO, "testkFindNode PASSED: %s" % str(res))
logger.log(logging.INFO, "attempting testSendkStore")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.sendkStore(host, port, key, testval)
deferred.addErrback(itererror, "sendkStore")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "sendkStore")
d.addErrback(stageerror, 'sendkStore')
d.addCallback(testkStore, nKu, node, host, port, num)
return d
def testkFindNode(res, nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO, "testSendkFindNode PASSED: %s" % str(res))
logger.log(logging.INFO, "attempting kFindNode")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.kFindNode(key)
deferred.addErrback(itererror, "kFindNode")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "kFindNode")
d.addErrback(stageerror, 'kFindNode')
d.addCallback(testSendkStore, nKu, node, host, port, num)
return d
def testSendkFindNode(nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO, "testkGetID PASSED")
logger.log(logging.INFO, "attempting sendkFindNode")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.sendkFindNode(host, port, key)
deferred.addErrback(itererror, "sendkFindNode")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "sendkFindNode")
d.addErrback(stageerror, 'sendkFindNode')
d.addCallback(testkFindNode, nKu, node, host, port, num)
return d
def testGetID(node, host, port, num=CONCURRENT):
""" Tests sendGetID(), and invokes testSendkFindNode on success """
deferred = node.client.sendGetID(host, port)
deferred.addCallback(testSendkFindNode, node, host, port, num)
deferred.addErrback(stageerror, "testGetID")
return deferred
def runTests(host, port=None, listenport=None):
num = CONCURRENT
#num = 5
node = FludNode(port=listenport)
if port == None:
port = node.config.port
node.run()
d = testGetID(node, host, port, CONCURRENT)
d.addCallback(suitesuccess)
d.addErrback(suiteerror)
d.addBoth(cleanup, node)
node.join()
#node.start() # doesn't work, because reactor may not have started
# listening by time requests start flying
def cleanup(_, node):
logger.info("shutting down in 1 seconds...")
time.sleep(1)
reactor.callLater(1, node.stop)
logger.info("done cleaning up")
"""
Main currently invokes test code
"""
if __name__ == '__main__':
localhost = socket.getfqdn()
if len(sys.argv) == 1:
print "Warning: testing against self my result in timeout failures"
runTests(localhost) # test by talking to self
elif len(sys.argv) == 2:
runTests(localhost, eval(sys.argv[1])) # talk to self on port [1]
elif len(sys.argv) == 3:
runTests(sys.argv[1], eval(sys.argv[2])) # talk to [1] on port [2]
elif len(sys.argv) == 4:
# talk to [1] on port [2], listen on port [3]
runTests(sys.argv[1], eval(sys.argv[2]), eval(sys.argv[3]))
| Python |
#!/usr/bin/python
import time, os, stat, random, sys, logging, socket, shutil, tempfile
from binascii import crc32
from StringIO import StringIO
from twisted.python import failure
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from flud.FludNode import FludNode
from flud.protocol.FludClient import FludClient
from flud.FludCrypto import generateRandom, hashfile
from flud.protocol.FludCommUtil import *
from flud.fencode import fencode
"""
Test code for primitive operations. These ops include all of the descendents
of ROOT and REQUEST in FludProtocol.
"""
smallfilekey = ""
smallfilename = ""
smallfilenamebad = ""
largefilekey = ""
largefilename = ""
largefilenamebad = ""
metadata = 'aaaa'
def testerror(failure, message, node):
"""
error handler for test errbacks
"""
print "testerror message: %s" % message
print "testerror: %s" % str(failure)
print "At least 1 test FAILED"
raise failure
def testUnexpectedSuccess(res, message, node):
print "unexpected success message: %s" % message
print "At least 1 test succeeded when it should have failed"
raise "bad"
def testDELETEBadKeyFailed(failure, msg, node, nKu, host, port):
if failure.check('protocol.FludCommUtil.BadRequestException'):
print "%s" % msg
# the end
else:
# XXX: here and elsewhere, raise something more descriptive, otherwise
# its waay confusing
print "the following trace may be misleading..."
raise failure
def testDELETEBadKey(nKu, node, host, port):
print "starting testDELETEBadKey"
path = os.path.join("somedir", largefilekey)
deferred = node.client.sendDelete(path, crc32(path), host, port, nKu)
deferred.addCallback(testUnexpectedSuccess, "DELETE with bad key succeeded",
node)
deferred.addErrback(testDELETEBadKeyFailed,
"DELETE with bad key failed as expected", node, nKu, host, port)
return deferred
def testVERIFYBadKeyFailed(failure, msg, node, nKu, host, port):
if failure.check('protocol.FludCommUtil.NotFoundException'):
print "%s" % msg
return testDELETEBadKey(nKu, node, host, port)
else:
raise failure
def testVERIFYBadKey(nKu, node, host, port):
print "starting testVERIFYBadKey"
fsize = os.stat(smallfilename)[stat.ST_SIZE]
offset = fsize-20
deferred = node.client.sendVerify(smallfilenamebad, offset, 5, host,
port, nKu)
deferred.addCallback(testUnexpectedSuccess,
"verified file with bad key succeeded", node)
deferred.addErrback(testVERIFYBadKeyFailed,
"VERIFY of bad filekey failed as expected", node, nKu, host, port)
return deferred
def testVERIFYBadLengthFailed(failure, msg, node, nKu, host, port):
if failure.check('protocol.FludCommUtil.BadRequestException'):
print "%s" % msg
return testVERIFYBadKey(nKu, node, host, port)
else:
raise failure
def testVERIFYBadLength(nKu, node, host, port):
print "starting testVERIFYBadOffset"
fsize = os.stat(smallfilename)[stat.ST_SIZE]
offset = fsize-10
deferred = node.client.sendVerify(smallfilekey, offset, 20, host, port, nKu)
deferred.addCallback(testUnexpectedSuccess, "verified file with bad length",
node)
deferred.addErrback(testVERIFYBadLengthFailed,
"VERIFY of bad length failed as expected", node, nKu, host, port)
return deferred
def testVERIFYBadOffsetFailed(failure, msg, node, nKu, host, port):
if failure.check('protocol.FludCommUtil.BadRequestException'):
print "%s" % msg
return testVERIFYBadLength(nKu, node, host, port)
else:
print "VERIFYBadOffset failed as expected, but with wrong failure"
raise failure
def testVERIFYBadOffset(nKu, node, host, port):
print "starting testVERIFYBadOffset"
fsize = os.stat(smallfilename)[stat.ST_SIZE]
offset = fsize+2
deferred = node.client.sendVerify(smallfilekey, offset, 20, host, port, nKu)
deferred.addCallback(testUnexpectedSuccess, "verified file with bad offset",
node)
deferred.addErrback(testVERIFYBadOffsetFailed,
"VERIFY of bad offset failed as expected", node, nKu, host, port)
return deferred
def testVERIFYNotFoundFailed(failure, msg, node, nKu, host, port):
if failure.check('protocol.FludCommUtil.NotFoundException'):
print "%s" % msg
return testVERIFYBadOffset(nKu, node, host, port)
else:
raise failure
def testVERIFYNotFound(nKu, node, host, port):
print "starting testVERIFYNotFound"
deferred = node.client.sendVerify(largefilekey, 10, 10, host, port, nKu)
deferred.addCallback(testUnexpectedSuccess, "verified non-existent file",
node)
deferred.addErrback(testVERIFYNotFoundFailed,
"VERIFY of non-existent file failed as expected", node, nKu,
host, port)
return deferred
def testRETRIEVEIllegalPathFailed(failure, msg, node, nKu, host, port):
if failure.check('protocol.FludCommUtil.BadRequestException'):
print "%s" % msg
return testVERIFYNotFound(nKu, node, host, port)
else:
raise failure
def testRETRIEVEIllegalPath(nKu, node, host, port):
print "starting testRETRIEVEIllegalPath"
deferred = node.client.sendRetrieve(os.path.join("somedir",smallfilekey),
host, port, nKu)
deferred.addCallback(testUnexpectedSuccess,
"retrieved file with illegal path", node)
deferred.addErrback(testRETRIEVEIllegalPathFailed,
"RETRIEVE using illegal path failed as expected", node, nKu,
host, port)
return deferred
def testRETRIEVENotFoundFailed(failure, msg, node, nKu, host, port):
if failure.check('protocol.FludCommUtil.NotFoundException'):
print "%s" % msg
return testRETRIEVEIllegalPath(nKu, node, host, port)
else:
raise failure
def testRETRIEVENotFound(nKu, node, host, port):
print "starting testRETRIEVENotFound"
deferred = node.client.sendRetrieve(largefilekey, host, port, nKu)
deferred.addCallback(testUnexpectedSuccess,
"retrieved file that shouldn't exist", node)
deferred.addErrback(testRETRIEVENotFoundFailed,
"RETRIEVE of non-existent file failed as expected", node, nKu,
host, port)
return deferred
def testSTORELargeFailed(failure, msg, node, nKu, host, port):
if failure.check('protocol.FludCommUtil.BadCASKeyException'):
print "%s" % msg
return testRETRIEVENotFound(nKu, node, host, port)
else:
raise failure
def testSTOREBadKeyLarge(nKu, node, host, port):
print "starting testSTOREBadKeyLarge"
deferred = node.client.sendStore(largefilenamebad,
(crc32(largefilenamebad), StringIO(metadata)), host, port, nKu)
deferred.addCallback(testUnexpectedSuccess, "large file, bad key succeeded",
node)
deferred.addErrback(testSTORELargeFailed,
"large STORE with bad key failed as expected", node, nKu,
host, port)
return deferred
def testSTORESmallFailed(failure, msg, node, nKu, host, port):
if failure.check('protocol.FludCommUtil.BadCASKeyException'):
print "%s" % msg
return testSTOREBadKeyLarge(nKu, node, host, port)
else:
raise failure
def testSTOREBadKeySmall(nKu, node, host, port):
print "starting testSTOREBadKeySmall"
deferred = node.client.sendStore(smallfilenamebad,
(crc32(smallfilenamebad), StringIO(metadata)), host, port, nKu)
deferred.addCallback(testUnexpectedSuccess, "small file, bad key succeeded",
node)
deferred.addErrback(testSTORESmallFailed,
"small STORE with bad key failed as expected", node, nKu,
host, port)
return deferred
def testSTORESuccess(res, nKu, node, host, port):
print "testSTORE succeeded: %s" % res
return testSTOREBadKeySmall(nKu, node, host, port)
def testSTORE(nKu, node, host, port):
# store a file successfully for later failure tests (VERIFY, etc)
print "starting testSTORE"
deferred = node.client.sendStore(smallfilename,
(crc32(smallfilename), StringIO(metadata)), host, port, nKu)
deferred.addCallback(testSTORESuccess, nKu, node, host, port)
deferred.addErrback(testerror, "failed at testSTORE", node)
return deferred
# XXX: need to test bogus headers for all commands (BAD_REQUEST)
# XXX: need to test failures for authentication
def testID(node, host, port):
""" Tests sendGetID(), and invokes testSTORE on success """
print "starting testID"
deferred = node.client.sendGetID(host, port)
deferred.addCallback(testSTORE, node, host, port)
#deferred.addCallback(testSTOREBadKeySmall, node, host, port)
deferred.addErrback(testerror, "failed at testID", node)
return deferred
def cleanup(err, node):
if err:
print "cleaning up: %s" % err
else:
print "cleaning up"
os.remove(smallfilename)
os.remove(smallfilenamebad)
os.remove(largefilename)
os.remove(largefilenamebad)
reactor.callLater(1, node.stop)
def generateTestData():
def generateFiles(minsize):
fname = tempfile.mktemp()
f = open(fname, 'w')
f.write('\0'*minsize)
f.write(generateRandom(random.randrange(256)+1))
f.close()
filekey = hashfile(fname)
filekey = fencode(int(filekey, 16))
filename = os.path.join("/tmp",filekey)
os.rename(fname,filename)
filenamebad = os.path.join("/tmp/","bad"+filekey[3:])
shutil.copy(filename, filenamebad)
return (filekey, filename, filenamebad)
global smallfilekey
global smallfilename
global smallfilenamebad
(smallfilekey, smallfilename, smallfilenamebad) = generateFiles(1024)
global largefilekey
global largefilename
global largefilenamebad
(largefilekey, largefilename, largefilenamebad) = generateFiles(512000)
def runTests(host, port=None, listenport=None):
generateTestData()
node = FludNode(port=listenport)
if port == None:
port = node.config.port
node.run()
d = testID(node, host, port)
d.addBoth(cleanup, node)
node.join()
"""
Main currently invokes test code
"""
if __name__ == '__main__':
localhost = socket.getfqdn()
if len(sys.argv) == 1:
runTests(localhost) # test by talking to self
elif len(sys.argv) == 2:
runTests(localhost, eval(sys.argv[1])) # talk to self on port [1]
elif len(sys.argv) == 3:
runTests(sys.argv[1], eval(sys.argv[2])) # talk to [1] on port [2]
elif len(sys.argv) == 4:
# talk to [1] on port [2], listen on port [3]
runTests(sys.argv[1], eval(sys.argv[2]), eval(sys.argv[3]))
| Python |
#!/usr/bin/python
"""
FludFileOpTest.py, (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
System tests for FludFileOperations
"""
import sys, os, time, logging, tempfile, shutil
from twisted.internet import reactor
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from flud.FludConfig import FludConfig
from flud.FludNode import FludNode
from flud.fencode import fencode, fdecode
from flud.FludCrypto import generateRandom
from flud.FludFileOperations import *
import flud.FludDefer as FludDefer
from flud.protocol.LocalClient import listMeta
logger = logging.getLogger('flud')
def testError(failure, message, node):
print "testError message: %s" % message
print "testError: %s" % str(failure)
print "At least 1 test FAILED"
return failure
def gotSuccess(r, desc):
print "%s succeeded" % desc
def testConcurrent(r, node, files, desc):
#print "r was %s" % r
print "testConcurrent %s" % desc
dlist = []
for file in files:
d = testStoreFile(node, file)
dlist.append(d)
dl = FludDefer.ErrDeferredList(dlist)
dl.addCallback(gotSuccess, desc)
dl.addErrback(testError)
return dl
def checkStoreFile(res, node, fname):
master = listMeta(node.config)
if fname not in master:
return defer.fail(failure.DefaultException("file not stored"))
else:
print "store on %s verified" % fname
return res # <- *VITAL* for concurrent dup ops to succeed.
def testStoreFile(node, fname):
d = StoreFile(node, fname).deferred
d.addCallback(checkStoreFile, node, fname)
d.addErrback(testError, fname, node)
return d
def doTests(node, smallfnames, largefnames, dupsmall, duplarge):
d = testStoreFile(node, smallfnames[0])
d.addCallback(testConcurrent, node, smallfnames, "small")
d.addCallback(testConcurrent, node, largefnames, "large")
d.addCallback(testConcurrent, node, dupsmall, "small duplicates")
d.addCallback(testConcurrent, node, duplarge, "large duplicates")
#d = testConcurrent(None, node, dupsmall, "small duplicates")
#d = testConcurrent(None, node, duplarge, "large duplicates")
return d
def cleanup(_, node, filenamelist):
#print _
for f in filenamelist:
try:
print "deleting %s" % f
os.remove(f)
except:
print "couldn't remove %s" % f
reactor.callLater(1, node.stop)
def generateTestFile(minSize):
fname = tempfile.mktemp()
f = open(fname, 'w')
data = generateRandom(minSize/50)
for i in range(0, 51+random.randrange(50)):
f.write(data)
f.close()
filename = os.path.join("/tmp",fname)
os.rename(fname,filename)
return filename
def runTests(host, port, listenport=None):
f1 = generateTestFile(5120)
f2 = generateTestFile(5120)
f3 = f2+".dup"
shutil.copy(f2, f3)
f4 = generateTestFile(513000)
f5 = generateTestFile(513000)
f6 = f5+".dup"
shutil.copy(f5, f6)
node = FludNode(port=listenport)
if port == None:
port = node.config.port
node.run()
node.connectViaGateway(host, port)
d = doTests(node, [f1, f2], [f4, f5], [f2, f3], [f5, f6])
d.addBoth(cleanup, node, [f1, f2, f3, f4, f5, f6])
node.join()
if __name__ == '__main__':
localhost = socket.getfqdn()
if len(sys.argv) == 3:
runTests(sys.argv[1], eval(sys.argv[2])) # talk to [1] on port [2]
elif len(sys.argv) == 4:
# talk to [1] on port [2], listen on port [3]
runTests(sys.argv[1], eval(sys.argv[2]), eval(sys.argv[3]))
else:
print "must run this test against a flud network (no single node op)"
print "usage: %s [<othernodehost othernodeport> |"\
" <othernodehost othernodeport listenport>]" % sys.argv[0]
| Python |
#!/usr/bin/python
import time, os, stat, random, sys, logging, socket
from twisted.python import failure
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from flud.FludNode import FludNode
from flud.protocol.FludClient import FludClient
from flud.protocol.FludCommUtil import *
from flud.fencode import fencode, fdecode
"""
Test code for primitive DHT operations.
"""
stay_alive = 1
filename = "/tmp/tempstoredata"
filekey = os.path.basename(filename)
key = 87328673569979667228965797330646992089697345905484734072690869757741450870337L
# format of block metadata is
# {(i, datakey): storingNodeID, ..., 'n': n, 'm': m}
# where i<n+m, datakey is a sha256 of the data stored, and storingNodeID is
# either a nodeID or a list of nodeIDs.
testval = {(0, 802484L): 465705L, (1, 780638L): 465705L, (2, 169688L): 465705L,
(3, 267175L): 465705L, (4, 648636L): 465705L, (5, 838315L): 465705L,
(6, 477619L): 465705L, (7, 329906L): 465705L, (8, 610565L): 465705L,
(9, 217811L): 465705L, (10, 374124L): 465705L, (11, 357214L): 465705L,
(12, 147307L): 465705L, (13, 427751L): 465705L, (14, 927853L): 465705L,
(15, 760369L): 465705L, (16, 707029L): 465705L, (17, 479234L): 465705L,
(18, 190455L): 465705L, (19, 647489L): 465705L, (20, 620470L): 465705L,
(21, 777532L): 465705L, (22, 622383L): 465705L, (23, 573283L): 465705L,
(24, 613082L): 465705L, (25, 433593L): 465705L, (26, 584543L): 465705L,
(27, 337485L): 465705L, (28, 911014L): 465705L, (29, 594065L): 465705L,
(30, 375876L): 465705L, (31, 726818L): 465705L, (32, 835759L): 465705L,
(33, 814060L): 465705L, (34, 237176L): 465705L, (35, 538268L): 465705L,
(36, 272650L): 465705L, (37, 314058L): 465705L, (38, 257714L): 465705L,
(39, 439931L): 465705L, 'n': 20, 'm': 20}
logger = logging.getLogger('test')
screenhandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s:'
' %(message)s', datefmt='%H:%M:%S')
screenhandler.setFormatter(formatter)
logger.addHandler(screenhandler)
#logger.setLevel(logging.DEBUG)
logger.setLevel(logging.INFO)
def cleanup(_, node):
logger.info("waiting %ds to shutdown..." % stay_alive)
reactor.callLater(stay_alive, node.stop)
def testerror(failure, message, node):
"""
error handler for test errbacks
"""
logger.warn("testerror message: %s" % message)
logger.warn("testerror: '%s'" % str(failure))
logger.warn("At least 1 test FAILED")
def endtests(res, nKu, node, host, port):
""" executes after all tests """
try:
res = fdecode(res)
except ValueError:
pass
if res != testval:
return testerror(None, "retrieved value does not match stored value:"
" '%s' != '%s'" % (res, testval), node)
logger.info("testkFindVal PASSED")
logger.debug("testkFindVal result: %s" % str(res))
logger.info("all tests PASSED")
return res
def testkFindVal(res, nKu, node, host, port):
logger.info("testSendkFindVal PASSED")
logger.debug("testSendkFindVal result: %s" % str(res))
logger.info("attempting testkFindValue")
deferred = node.client.kFindValue(key)
deferred.addCallback(endtests, nKu, node, host, port)
deferred.addErrback(testerror, "failed at testkFindValue", node)
return deferred
def testSendkFindVal(res, nKu, node, host, port):
logger.info("testkStore PASSED")
logger.debug("testkStore result: %s" % str(res))
logger.info("attempting testSendkFindValue")
deferred = node.client.sendkFindValue(host, port, key)
deferred.addCallback(testkFindVal, nKu, node, host, port)
deferred.addErrback(testerror, "failed at testSendkFindValue", node)
return deferred
def testkStore(res, nKu, node, host, port):
logger.info("testSendkStore PASSED")
logger.debug("testSendkStore result: %s" % str(res))
logger.info("attempting testkStore")
deferred = node.client.kStore(key, testval)
deferred.addCallback(testSendkFindVal, nKu, node, host, port)
deferred.addErrback(testerror, "failed at testkStore", node)
return deferred
def testSendkStore(res, nKu, node, host, port):
logger.info("testkFindNode PASSED")
logger.debug("testkFindNode result: %s" % str(res))
logger.info("attempting testSendkStore")
deferred = node.client.sendkStore(host, port, key, testval)
deferred.addCallback(testkStore, nKu, node, host, port)
deferred.addErrback(testerror, "failed at testkStore", node)
return deferred
def testkFindNode(res, nKu, node, host, port):
""" executes after testSendkFindNode """
logger.info("testSendkFindNode PASSED")
logger.debug("testSendkFindNode result: %s" % str(res))
logger.info("attempting kFindNode")
deferred = node.client.kFindNode(key)
deferred.addCallback(testSendkStore, nKu, node, host, port)
deferred.addErrback(testerror, "failed at kFindNode", node)
return deferred
def testSendkFindNode(nKu, node, host, port):
""" executes after testGetID """
logger.info("testkGetID PASSED")
logger.info("attempting sendkFindNode")
deferred = node.client.sendkFindNode(host, port, key)
deferred.addCallback(testkFindNode, nKu, node, host, port)
deferred.addErrback(testerror, "failed at sendkFindNode", node)
return deferred
def testGetID(node, host, port):
""" Tests sendGetID(), and invokes testSendkFindNode on success """
deferred = node.client.sendGetID(host, port)
deferred.addCallback(testSendkFindNode, node, host, port)
deferred.addErrback(testerror, "failed at testGetID", node)
return deferred
def runTests(host, port=None, listenport=None):
host = getCanonicalIP(host)
node = FludNode(port=listenport)
if port == None:
port = node.config.port
logger.info("testing against %s:%s, localport=%s" % (host,
port, listenport))
node.run()
d = testGetID(node, host, port)
d.addBoth(cleanup, node)
#testkFindVal("blah", node.config.Ku, node, host, port)
node.join()
"""
Main currently invokes test code
"""
if __name__ == '__main__':
localhost = socket.getfqdn()
if len(sys.argv) == 1:
runTests(localhost) # test by talking to self
elif len(sys.argv) == 2:
runTests(localhost, eval(sys.argv[1]))
elif len(sys.argv) == 3:
runTests(sys.argv[1], eval(sys.argv[2]))
elif len(sys.argv) == 4:
runTests(sys.argv[1], eval(sys.argv[2]), eval(sys.argv[3]))
| Python |
#!/usr/bin/python
import sys, os
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from flud.FludFileCoder import Coder, Decoder
if __name__ == '__main__':
if len(sys.argv) != 2:
print "usage: %s sourcefile" % sys.argv[0]
else:
fname = sys.argv[1]
stem = fname+"_seg-"
stem2 = fname+"_seg2-"
c = Coder(20, 20, 7)
stemfiles = c.codeData(sys.argv[1],stem)
print "encoded %s to:" % fname
print stemfiles
d = Decoder(fname+"-recovered", 20, 20, 7)
for f in stemfiles:
print "decoding %s" % f
ret = d.decodeData(f)
if not ret == 0:
break
print "decoded files"
| Python |
#!/usr/bin/python
import tarfile, tempfile, random, os, sys
import gzip
from Crypto.Hash import SHA256
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from flud.fencode import fencode
import flud.TarfileUtils as TarfileUtils
def maketarball(numfiles, avgsize, hashnames=False, addmetas=False):
tarballname = tempfile.mktemp()+".tar"
tarball = tarfile.open(tarballname, 'w')
if addmetas:
metafname = tempfile.mktemp()
metaf = file(metafname, 'w')
metaf.write('m'*48)
metaf.close()
for i in xrange(numfiles):
fname = tempfile.mktemp()
f = file(fname, 'wb')
size = int(avgsize * (random.random()+0.5))
blocksize = 65*1024
if hashnames:
sha256 = SHA256.new()
for j in range(0, size, blocksize):
if j+blocksize > size:
block = 'a'*(size-j)
else:
block = 'a'*blocksize
if hashnames:
sha256.update(block)
f.write(block)
f.close()
arcname = fname
if hashnames:
arcname = fencode(int(sha256.hexdigest(),16))
tarball.add(fname, arcname)
if addmetas:
tarball.add(metafname, arcname+".343434.meta")
os.remove(fname)
if addmetas:
os.remove(metafname)
contents = tarball.getnames()
tarball.close()
return tarballname, contents
def gzipTarball(tarball):
f = gzip.GzipFile(tarball+".gz", 'wb')
f.write(file(tarball, 'rb').read())
f.close()
os.remove(tarball)
return tarball+".gz"
def main():
# test plain TarfileUtils.delete()
(tballname, contents) = maketarball(5, 4096)
TarfileUtils.delete(tballname, contents[2:4])
tarball = tarfile.open(tballname, 'r')
os.remove(tballname)
assert(tarball.getnames() == contents[:2]+contents[4:])
tarball.close()
# test gzip TarfileUtils.delete()
(tballname, contents) = maketarball(5, 4096)
tballname = gzipTarball(tballname)
TarfileUtils.delete(tballname, contents[2:4])
tarball = tarfile.open(tballname, 'r')
os.remove(tballname)
assert(tarball.getnames() == contents[:2]+contents[4:])
tarball.close()
# test plain TarfileUtils.concatenate()
(tballname1, contents1) = maketarball(5, 4096)
(tballname2, contents2) = maketarball(5, 4096)
TarfileUtils.concatenate(tballname1, tballname2)
assert(not os.path.exists(tballname2))
tarball = tarfile.open(tballname1, 'r')
os.remove(tballname1)
assert(tarball.getnames() == contents1+contents2)
# test TarfileUtils.concatenate(gz, plain)
(tballname1, contents1) = maketarball(5, 4096)
(tballname2, contents2) = maketarball(5, 4096)
tballname1 = gzipTarball(tballname1)
TarfileUtils.concatenate(tballname1, tballname2)
assert(not os.path.exists(tballname2))
tarball = tarfile.open(tballname1, 'r')
os.remove(tballname1)
assert(tarball.getnames() == contents1+contents2)
# test TarfileUtils.concatenate(plain, gz)
(tballname1, contents1) = maketarball(5, 4096)
(tballname2, contents2) = maketarball(5, 4096)
tballname2 = gzipTarball(tballname2)
TarfileUtils.concatenate(tballname1, tballname2)
assert(not os.path.exists(tballname2))
tarball = tarfile.open(tballname1, 'r')
os.remove(tballname1)
assert(tarball.getnames() == contents1+contents2)
# test TarfileUtils.concatenate(gz, gz)
(tballname1, contents1) = maketarball(5, 4096)
(tballname2, contents2) = maketarball(5, 4096)
tballname1 = gzipTarball(tballname1)
tballname2 = gzipTarball(tballname2)
TarfileUtils.concatenate(tballname1, tballname2)
assert(not os.path.exists(tballname2))
tarball = tarfile.open(tballname1, 'r')
os.remove(tballname1)
assert(tarball.getnames() == contents1+contents2)
# test TarfileUtils.verifyHashes(plain no meta)
(tballname, contents) = maketarball(5, 4096, True)
assert(TarfileUtils.verifyHashes(tballname, contents[2:4]))
os.remove(tballname)
# test TarfileUtils.verifyHashes(plain with meta)
(tballname, contents) = maketarball(5, 4096, True, True)
assert(TarfileUtils.verifyHashes(tballname, contents[2:4]), ".meta")
os.remove(tballname)
# test TarfileUtils.verifyHashes(gzipped no meta)
(tballname, contents) = maketarball(5, 4096, True)
tballname = gzipTarball(tballname)
assert(TarfileUtils.verifyHashes(tballname, contents[2:4]))
os.remove(tballname)
# test TarfileUtils.verifyHashes(gzipped with meta)
(tballname, contents) = maketarball(5, 4096, True, True)
tballname = gzipTarball(tballname)
assert(TarfileUtils.verifyHashes(tballname, contents[2:4]), ".meta")
os.remove(tballname)
print "all tests passed"
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
import time, os, stat, random, sys, logging, socket
from twisted.python import failure
from twisted.internet import defer
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
import flud.FludCrypto
from flud.FludNode import FludNode
from flud.protocol.FludClient import FludClient
from flud.protocol.FludCommUtil import *
from flud.fencode import fencode, fdecode
from flud.FludDefer import ErrDeferredList
"""
Test code for primitive operations. These ops include all of the descendents
of ROOT and REQUEST in FludProtocol.
"""
CONCURRENT=300
CONCREPORT=50
node = None
files = None
logger = logging.getLogger('test')
screenhandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s:'
' %(message)s', datefmt='%H:%M:%S')
screenhandler.setFormatter(formatter)
logger.addHandler(screenhandler)
logger.setLevel(logging.DEBUG)
def suitesuccess(results):
logger.info("all tests in suite passed")
#print results
return results
def suiteerror(failure):
logger.info("suite did not complete")
logger.info("DEBUG: %s" % failure)
return failure
def stagesuccess(result, message):
logger.info("stage %s succeeded" % message)
return result
def stageerror(failure, message):
logger.info("stage %s failed" % message)
#logger.info("DEBUG: %s" % failure)
return failure
def itersuccess(res, i, message):
if i % CONCREPORT == 0:
logger.info("itersuccess: %s" % message)
return res
def itererror(failure, message):
logger.info("itererror message: %s" % message)
#logger.info("DEBUG: %s" % failure)
#logger.info("DEBUG: %s" % dir(failure)
failure.printTraceback()
return failure
def checkVERIFY(results, nKu, host, port, hashes, num=CONCURRENT):
logger.info(" checking VERIFY results...")
for i in range(num):
hash = hashes[i]
res = results[i][1]
if long(hash, 16) != long(res, 16):
raise failure.DefaultException("verify didn't match: %s != %s"
% (hash, res))
logger.info(" ...VERIFY results good.")
return results #True
def testVERIFY(res, nKu, host, port, num=CONCURRENT):
logger.info("testVERIFY started...")
dlist = []
hashes = []
for i in range(num):
#if i == 4:
# port = 21
fd = os.open(files[i], os.O_RDONLY)
fsize = os.fstat(fd)[stat.ST_SIZE]
length = 20
offset = random.randrange(fsize-length)
os.lseek(fd, offset, 0)
data = os.read(fd, length)
os.close(fd)
hashes.append(FludCrypto.hashstring(data))
filekey = os.path.basename(files[i])
deferred = node.client.sendVerify(filekey, offset, length, host,
port, nKu)
deferred.addCallback(itersuccess, i, "succeeded at testVERIFY %d" % i)
deferred.addErrback(itererror, "failed at testVERIFY %d: %s"
% (i, filekey))
dlist.append(deferred)
d = ErrDeferredList(dlist)
d.addCallback(stagesuccess, "testVERIFY")
d.addErrback(stageerror, 'failed at testVERIFY')
d.addCallback(checkVERIFY, nKu, host, port, hashes, num)
return d
def checkRETRIEVE(res, nKu, host, port, num=CONCURRENT):
logger.info(" checking RETRIEVE results...")
for i in range(num):
f1 = open(files[i])
filekey = os.path.basename(files[i])
f2 = open(node.config.clientdir+"/"+filekey)
if (f1.read() != f2.read()):
f1.close()
f2.close()
raise failure.DefaultException("upload/download files don't match")
f2.close()
f1.close()
logger.info(" ...RETRIEVE results good.")
return testVERIFY(res, nKu, host, port, num)
def testRETRIEVE(res, nKu, host, port, num=CONCURRENT):
logger.info("testRETRIEVE started...")
dlist = []
for i in range(num):
#if i == 4:
# port = 21
filekey = os.path.basename(files[i])
deferred = node.client.sendRetrieve(filekey, host, port, nKu)
deferred.addCallback(itersuccess, i, "succeeded at testRETRIEVE %d" % i)
deferred.addErrback(itererror, "failed at testRETRIEVE %d: %s"
% (i, filekey))
dlist.append(deferred)
d = ErrDeferredList(dlist)
d.addCallback(stagesuccess, "testRETRIEVE")
d.addErrback(stageerror, 'failed at testRETRIEVE')
d.addCallback(checkRETRIEVE, nKu, host, port, num)
return d
def testSTORE(nKu, host, port, num=CONCURRENT):
logger.info("testSTORE started...")
dlist = []
for i in range(num):
#if i == 4:
# port = 21
deferred = node.client.sendStore(files[i], None, host, port, nKu)
deferred.addCallback(itersuccess, i, "succeeded at testSTORE %d" % i)
deferred.addErrback(itererror, "failed at testSTORE %d" % i)
dlist.append(deferred)
d = ErrDeferredList(dlist)
d.addCallback(stagesuccess, "testSTORE")
d.addErrback(stageerror, 'failed at testSTORE')
d.addCallback(testRETRIEVE, nKu, host, port, num)
#d.addCallback(testVERIFY, nKu, host, port, num)
return d
def testID(host, port, num=CONCURRENT):
logger.info("testID started...")
dlist = []
for i in range(num):
#if i == 4:
# port = 21
deferred = node.client.sendGetID(host, port)
deferred.debug = True
deferred.addErrback(itererror, "failed at testID %d" % i)
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "testID")
d.addErrback(stageerror, 'testID')
d.addCallback(testSTORE, host, port, num)
return d
def runTests(host, port=None, listenport=None):
num = CONCURRENT
#num = 5
global files, node
files = createFakeData()
node = FludNode(port=listenport)
if port == None:
port = node.config.port
node.run()
if num > len(files):
num = len(files)
d1 = testID(host, port, num)
d1.addCallback(suitesuccess)
d1.addErrback(suiteerror)
d1.addBoth(cleanup)
#nku = FludRSA.importPublicKey({'e': 65537L, 'n': 138646504113696863667807411690225283099791076530135000331764542300161152585426296356409290228001197773401729468267448145387041995053893737880473447042984919037843163552727823101445272608470814297563395471329917904393936481407769396601027233955938405001434483474847834031774504827822809611707032477570548179411L})
#d2 = testSTORE(nku, node, host, port, files, num)
#d2.addErrback(suiteerror, 'failed at %s' % d2.testname)
node.join()
#node.start() # doesn't work, because reactor may not have started
# listening by time requests start flying
def createFakeData(dir="/tmp", num=CONCURRENT):
randsrc = open("/dev/urandom", 'rb')
files = []
for i in range(num):
randdata = randsrc.read(256)
filekey = fencode(int(FludCrypto.hashstring(randdata), 16))
filename = dir+'/'+filekey
f = open(filename, 'wb')
f.write(randdata)
f.close()
files.append(filename)
randsrc.close()
return files
def deleteFakeData(files):
for f in files:
if os.path.exists(f):
os.remove(f)
else:
logger.warn("s already deleted!" % f)
def cleanup(dummy=None):
logger.info("cleaning up files and shutting down in 1 seconds...")
time.sleep(1)
deleteFakeData(files)
reactor.callLater(1, node.stop)
logger.info("done cleaning up")
"""
Main currently invokes test code
"""
if __name__ == '__main__':
localhost = socket.getfqdn()
if len(sys.argv) == 1:
print "Warning: testing against self my result in timeout failures"
runTests(localhost) # test by talking to self
elif len(sys.argv) == 2:
runTests(localhost, eval(sys.argv[1])) # talk to self on port [1]
elif len(sys.argv) == 3:
runTests(sys.argv[1], eval(sys.argv[2])) # talk to [1] on port [2]
elif len(sys.argv) == 4:
# talk to [1] on port [2], listen on port [3]
runTests(sys.argv[1], eval(sys.argv[2]), eval(sys.argv[3]))
| Python |
#!/usr/bin/python
import time, os, stat, random, sys, logging, socket, tempfile
from twisted.python import failure
from StringIO import StringIO
from zlib import crc32
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from flud.FludNode import FludNode
from flud.protocol.FludClient import FludClient
import flud.FludCrypto as FludCrypto
from flud.fencode import fencode, fdecode
from flud.protocol.FludCommUtil import *
from flud.FludDefer import ErrDeferredList
"""
Test code for primitive operations. These ops include all of the descendents
of ROOT and REQUEST in FludProtocol.
"""
# metadatablock: (block#,n,k,blockdata)
metadatablock = fencode((1,20,40,'adfdsfdffffffddddddddddddddd'))
fake_mkey_offset = 111111
def testerror(failure, message, node):
"""
error handler for test errbacks
"""
print "testerror message: %s" % message
print "testerror: %s" % str(failure)
print "At least 1 test FAILED"
return failure
def allGood(_, nKu):
print "all tests PASSED"
return nKu
def checkDELETE(res, nKu, fname, fkey, mkey, node, host, port, totalDelete):
""" checks to ensure the file was deleted """
# totalDelete = True if this delete op should remove all meta (and data)
if totalDelete:
# try to retrieve with any metakey, should fail
print "expecting failed retrieve, any metakey"
return testRETRIEVE(res, nKu, fname, fkey, True, node, host, port,
lambda args=(res, nKu): allGood(*args), False)
else:
# try to retrieve with any metakey, should succeed
print "expecting successful retrieve, any metakey"
return testRETRIEVE(res, nKu, fname, fkey, True, node, host, port,
lambda args=(res, nKu, fname, fkey, mkey+fake_mkey_offset,
node, host, port, True): testDELETE(*args))
def testDELETE(res, nKu, fname, fkey, mkey, node, host, port, totalDelete):
""" Tests sendDelete, and invokes checkDELETE on success """
print "starting testDELETE %s.%s" % (fname, mkey)
#return checkDELETE(None, nKu, fname, fkey, mkey, node, host, port, False)
deferred = node.client.sendDelete(fkey, mkey, host, port, nKu)
deferred.addCallback(checkDELETE, nKu, fname, fkey, mkey, node, host, port,
totalDelete)
deferred.addErrback(testerror, "failed at testDELETE", node)
return deferred
def checkVERIFY(res, nKu, fname, fkey, mkey, node, host, port, hash, newmeta):
""" executes after testVERIFY """
if long(hash, 16) != long(res, 16):
raise failure.DefaultException("verify didn't match: %s != %s"
% (hash, res))
print "checkVERIFY (%s) %s success" % (newmeta, fname)
if newmeta:
return testDELETE(res, nKu, fname, fkey, mkey, node, host, port, False)
else:
return testVERIFY(nKu, fname, fkey, mkey, node, host, port, True)
def testVERIFY(nKu, fname, fkey, mkey, node, host, port, newmeta):
""" Test sendVerify """
# newmeta, if True, will generate new metadata to be stored during verify
if newmeta:
thismkey = mkey+fake_mkey_offset
else:
thismkey = mkey
print "starting testVERIFY (%s) %s.%s" % (newmeta, fname, thismkey)
fd = os.open(fname, os.O_RDONLY)
fsize = os.fstat(fd)[stat.ST_SIZE]
length = 20
offset = random.randrange(fsize-length)
os.lseek(fd, offset, 0)
data = os.read(fd, length)
os.close(fd)
hash = FludCrypto.hashstring(data)
deferred = node.client.sendVerify(fkey, offset, length, host, port, nKu,
(thismkey, StringIO(metadatablock)))
deferred.addCallback(checkVERIFY, nKu, fname, fkey, mkey, node, host,
port, hash, newmeta)
deferred.addErrback(testerror, "failed at testVERIFY (%s)" % newmeta, node)
return deferred
def failedRETRIEVE(res, nextCallable):
return nextCallable();
def checkRETRIEVE(res, nKu, fname, fkey, mkey, node, host, port, nextCallable):
""" Compares the file that was stored with the one that was retrieved """
f1 = open(fname)
filename = [f for f in res if f[-len(fkey):] == fkey][0]
f2 = open(filename)
if (f1.read() != f2.read()):
f1.close()
f2.close()
raise failure.DefaultException(
"upload/download (%s, %s) files don't match" % (fname,
os.path.join(node.config.clientdir, fkey)))
#print "%s (%d) and %s (%d) match" % (fname, os.stat(fname)[stat.ST_SIZE],
# filename, os.stat(filename)[stat.ST_SIZE])
f1.close()
f2.close()
if mkey != True:
expectedmeta = "%s.%s.meta" % (fkey, mkey)
metanames = [f for f in res if f[-len(expectedmeta):] == expectedmeta]
if not metanames:
raise failure.DefaultException("expected metadata was missing")
f3 = open(metanames[0])
md = f3.read()
if md != metadatablock:
raise failure.DefaultException("upload/download metadata doesn't"
" match (%s != %s)" % (md, metadatablock))
return nextCallable()
def testRETRIEVE(res, nKu, fname, fkey, mkey, node, host, port, nextCallable,
expectSuccess=True):
""" Tests sendRetrieve, and invokes checkRETRIEVE on success """
print "starting testRETRIEVE %s.%s" % (fname, mkey)
deferred = node.client.sendRetrieve(fkey, host, port, nKu, mkey)
deferred.addCallback(checkRETRIEVE, nKu, fname, fkey, mkey, node, host,
port, nextCallable)
if expectSuccess:
deferred.addErrback(testerror, "failed at testRETRIEVE", node)
else:
deferred.addErrback(failedRETRIEVE, nextCallable)
return deferred
def testSTORE2(nKu, fname, fkey, node, host, port):
mkey = crc32(fname)
mkey2 = mkey+(2*fake_mkey_offset)
print "starting testSTORE %s.%s" % (fname, mkey2)
deferred = node.client.sendStore(fname, (mkey2, StringIO(metadatablock)),
host, port, nKu)
deferred.addCallback(testRETRIEVE, nKu, fname, fkey, mkey2, node, host,
port, lambda args=(nKu, fname, fkey, mkey, node, host, port,
False): testVERIFY(*args))
deferred.addErrback(testerror, "failed at testSTORE", node)
return deferred
def testSTORE(nKu, fname, fkey, node, host, port):
""" Tests sendStore, and invokes testRETRIEVE on success """
mkey = crc32(fname)
print "starting testSTORE %s.%s" % (fname, mkey)
deferred = node.client.sendStore(fname, (mkey, StringIO(metadatablock)),
host, port, nKu)
deferred.addCallback(testRETRIEVE, nKu, fname, fkey, mkey, node, host, port,
lambda args=(nKu, fname, fkey, node, host, port): testSTORE2(*args))
deferred.addErrback(testerror, "failed at testSTORE", node)
return deferred
def testID(node, host, port):
""" Tests sendGetID(), and invokes testSTORE on success """
print "starting testID"
deferred = node.client.sendGetID(host, port)
deferred.addErrback(testerror, "failed at testID", node)
return deferred
def testAggSTORE(nKu, aggFiles, node, host, port):
print "starting testAggSTORE"
dlist = []
for fname, fkey in aggFiles:
mkey = crc32(fname)
print "testAggSTORE %s (%s)" % (fname, mkey)
deferred = node.client.sendStore(fname, (mkey, StringIO(metadatablock)),
host, port, nKu)
deferred.addCallback(testRETRIEVE, nKu, fname, fkey, mkey, node, host,
port, lambda args=(nKu, fname, fkey, mkey, node, host,
port, False): testVERIFY(*args))
deferred.addErrback(testerror, "failed at testAggSTORE", node)
dlist.append(deferred)
dl = ErrDeferredList(dlist)
dl.addCallback(allGood, nKu)
dl.addErrback(testerror, "failed at testAggSTORE", node)
return dl
def cleanup(_, node, filenamelist):
for f in filenamelist:
try:
os.remove(f)
except:
print "couldn't remove %s" % f
reactor.callLater(1, node.stop)
def generateTestData(minSize):
fname = tempfile.mktemp()
f = open(fname, 'w')
data = FludCrypto.generateRandom(minSize/50)
for i in range(0, 51+random.randrange(50)):
f.write(data)
f.close()
filekey = FludCrypto.hashfile(fname)
filekey = fencode(int(filekey, 16))
filename = os.path.join("/tmp",filekey)
os.rename(fname,filename)
return (filename, filekey)
def runTests(host, port=None, listenport=None):
(largeFilename, largeFilekey) = generateTestData(512000)
(smallFilename, smallFilekey) = generateTestData(5120)
aggFiles = []
for i in range(4):
aggFiles.append(generateTestData(4096))
node = FludNode(port=listenport)
if port == None:
port = node.config.port
node.run()
d = testID(node, host, port)
d.addCallback(testSTORE, largeFilename, largeFilekey, node, host, port)
d.addCallback(testSTORE, smallFilename, smallFilekey, node, host, port)
d.addCallback(testAggSTORE, aggFiles, node, host, port)
d.addBoth(cleanup, node, [i[0] for i in aggFiles] + [largeFilename,
smallFilename])
node.join()
def main():
localhost = socket.getfqdn()
if len(sys.argv) == 1:
runTests(localhost) # test by talking to self
elif len(sys.argv) == 2:
runTests(localhost, eval(sys.argv[1])) # talk to self on port [1]
elif len(sys.argv) == 3:
runTests(sys.argv[1], eval(sys.argv[2])) # talk to [1] on port [2]
elif len(sys.argv) == 4:
# talk to [1] on port [2], listen on port [3]
runTests(sys.argv[1], eval(sys.argv[2]), eval(sys.argv[3]))
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import time, os, stat, random, sys, logging, socket
from twisted.python import failure
from twisted.internet import defer
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
import flud.FludCrypto
from flud.FludNode import FludNode
from flud.protocol.FludClient import FludClient
from flud.protocol.FludCommUtil import *
from flud.fencode import fencode, fdecode
from flud.FludDefer import ErrDeferredList
"""
Test code for primitive operations. These ops include all of the descendents
of ROOT and REQUEST in FludProtocol.
"""
CONCURRENT=300
CONCREPORT=50
node = None
files = None
logger = logging.getLogger('test')
screenhandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s:'
' %(message)s', datefmt='%H:%M:%S')
screenhandler.setFormatter(formatter)
logger.addHandler(screenhandler)
logger.setLevel(logging.DEBUG)
def suitesuccess(results):
logger.info("all tests in suite passed")
#print results
return results
def suiteerror(failure):
logger.info("suite did not complete")
logger.info("DEBUG: %s" % failure)
return failure
def stagesuccess(result, message):
logger.info("stage %s succeeded" % message)
return result
def stageerror(failure, message):
logger.info("stage %s failed" % message)
#logger.info("DEBUG: %s" % failure)
return failure
def itersuccess(res, i, message):
if i % CONCREPORT == 0:
logger.info("itersuccess: %s" % message)
return res
def itererror(failure, message):
logger.info("itererror message: %s" % message)
#logger.info("DEBUG: %s" % failure)
#logger.info("DEBUG: %s" % dir(failure)
failure.printTraceback()
return failure
def checkVERIFY(results, nKu, host, port, hashes, num=CONCURRENT):
logger.info(" checking VERIFY results...")
for i in range(num):
hash = hashes[i]
res = results[i][1]
if long(hash, 16) != long(res, 16):
raise failure.DefaultException("verify didn't match: %s != %s"
% (hash, res))
logger.info(" ...VERIFY results good.")
return results #True
def testVERIFY(res, nKu, host, port, num=CONCURRENT):
logger.info("testVERIFY started...")
dlist = []
hashes = []
for i in range(num):
#if i == 4:
# port = 21
fd = os.open(files[i], os.O_RDONLY)
fsize = os.fstat(fd)[stat.ST_SIZE]
length = 20
offset = random.randrange(fsize-length)
os.lseek(fd, offset, 0)
data = os.read(fd, length)
os.close(fd)
hashes.append(FludCrypto.hashstring(data))
filekey = os.path.basename(files[i])
deferred = node.client.sendVerify(filekey, offset, length, host,
port, nKu)
deferred.addCallback(itersuccess, i, "succeeded at testVERIFY %d" % i)
deferred.addErrback(itererror, "failed at testVERIFY %d: %s"
% (i, filekey))
dlist.append(deferred)
d = ErrDeferredList(dlist)
d.addCallback(stagesuccess, "testVERIFY")
d.addErrback(stageerror, 'failed at testVERIFY')
d.addCallback(checkVERIFY, nKu, host, port, hashes, num)
return d
def checkRETRIEVE(res, nKu, host, port, num=CONCURRENT):
logger.info(" checking RETRIEVE results...")
for i in range(num):
f1 = open(files[i])
filekey = os.path.basename(files[i])
f2 = open(node.config.clientdir+"/"+filekey)
if (f1.read() != f2.read()):
f1.close()
f2.close()
raise failure.DefaultException("upload/download files don't match")
f2.close()
f1.close()
logger.info(" ...RETRIEVE results good.")
return testVERIFY(res, nKu, host, port, num)
def testRETRIEVE(res, nKu, host, port, num=CONCURRENT):
logger.info("testRETRIEVE started...")
dlist = []
for i in range(num):
#if i == 4:
# port = 21
filekey = os.path.basename(files[i])
deferred = node.client.sendRetrieve(filekey, host, port, nKu)
deferred.addCallback(itersuccess, i, "succeeded at testRETRIEVE %d" % i)
deferred.addErrback(itererror, "failed at testRETRIEVE %d: %s"
% (i, filekey))
dlist.append(deferred)
d = ErrDeferredList(dlist)
d.addCallback(stagesuccess, "testRETRIEVE")
d.addErrback(stageerror, 'failed at testRETRIEVE')
d.addCallback(checkRETRIEVE, nKu, host, port, num)
return d
def testSTORE(nKu, host, port, num=CONCURRENT):
logger.info("testSTORE started...")
dlist = []
for i in range(num):
#if i == 4:
# port = 21
deferred = node.client.sendStore(files[i], None, host, port, nKu)
deferred.addCallback(itersuccess, i, "succeeded at testSTORE %d" % i)
deferred.addErrback(itererror, "failed at testSTORE %d" % i)
dlist.append(deferred)
d = ErrDeferredList(dlist)
d.addCallback(stagesuccess, "testSTORE")
d.addErrback(stageerror, 'failed at testSTORE')
d.addCallback(testRETRIEVE, nKu, host, port, num)
#d.addCallback(testVERIFY, nKu, host, port, num)
return d
def testID(host, port, num=CONCURRENT):
logger.info("testID started...")
dlist = []
for i in range(num):
#if i == 4:
# port = 21
deferred = node.client.sendGetID(host, port)
deferred.debug = True
deferred.addErrback(itererror, "failed at testID %d" % i)
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "testID")
d.addErrback(stageerror, 'testID')
d.addCallback(testSTORE, host, port, num)
return d
def runTests(host, port=None, listenport=None):
num = CONCURRENT
#num = 5
global files, node
files = createFakeData()
node = FludNode(port=listenport)
if port == None:
port = node.config.port
node.run()
if num > len(files):
num = len(files)
d1 = testID(host, port, num)
d1.addCallback(suitesuccess)
d1.addErrback(suiteerror)
d1.addBoth(cleanup)
#nku = FludRSA.importPublicKey({'e': 65537L, 'n': 138646504113696863667807411690225283099791076530135000331764542300161152585426296356409290228001197773401729468267448145387041995053893737880473447042984919037843163552727823101445272608470814297563395471329917904393936481407769396601027233955938405001434483474847834031774504827822809611707032477570548179411L})
#d2 = testSTORE(nku, node, host, port, files, num)
#d2.addErrback(suiteerror, 'failed at %s' % d2.testname)
node.join()
#node.start() # doesn't work, because reactor may not have started
# listening by time requests start flying
def createFakeData(dir="/tmp", num=CONCURRENT):
randsrc = open("/dev/urandom", 'rb')
files = []
for i in range(num):
randdata = randsrc.read(256)
filekey = fencode(int(FludCrypto.hashstring(randdata), 16))
filename = dir+'/'+filekey
f = open(filename, 'wb')
f.write(randdata)
f.close()
files.append(filename)
randsrc.close()
return files
def deleteFakeData(files):
for f in files:
if os.path.exists(f):
os.remove(f)
else:
logger.warn("s already deleted!" % f)
def cleanup(dummy=None):
logger.info("cleaning up files and shutting down in 1 seconds...")
time.sleep(1)
deleteFakeData(files)
reactor.callLater(1, node.stop)
logger.info("done cleaning up")
"""
Main currently invokes test code
"""
if __name__ == '__main__':
localhost = socket.getfqdn()
if len(sys.argv) == 1:
print "Warning: testing against self my result in timeout failures"
runTests(localhost) # test by talking to self
elif len(sys.argv) == 2:
runTests(localhost, eval(sys.argv[1])) # talk to self on port [1]
elif len(sys.argv) == 3:
runTests(sys.argv[1], eval(sys.argv[2])) # talk to [1] on port [2]
elif len(sys.argv) == 4:
# talk to [1] on port [2], listen on port [3]
runTests(sys.argv[1], eval(sys.argv[2]), eval(sys.argv[3]))
| Python |
#!/usr/bin/python
import sys, os
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from flud.FludFileCoder import Coder, Decoder
if __name__ == '__main__':
if len(sys.argv) != 2:
print "usage: %s sourcefile" % sys.argv[0]
else:
fname = sys.argv[1]
stem = fname+"_seg-"
stem2 = fname+"_seg2-"
c = Coder(20, 20, 7)
stemfiles = c.codeData(sys.argv[1],stem)
print "encoded %s to:" % fname
print stemfiles
d = Decoder(fname+"-recovered", 20, 20, 7)
for f in stemfiles:
print "decoding %s" % f
ret = d.decodeData(f)
if not ret == 0:
break
print "decoded files"
| Python |
#!/usr/bin/python
import tarfile, tempfile, random, os, sys
import gzip
from Crypto.Hash import SHA256
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from flud.fencode import fencode
import flud.TarfileUtils as TarfileUtils
def maketarball(numfiles, avgsize, hashnames=False, addmetas=False):
tarballname = tempfile.mktemp()+".tar"
tarball = tarfile.open(tarballname, 'w')
if addmetas:
metafname = tempfile.mktemp()
metaf = file(metafname, 'w')
metaf.write('m'*48)
metaf.close()
for i in xrange(numfiles):
fname = tempfile.mktemp()
f = file(fname, 'wb')
size = int(avgsize * (random.random()+0.5))
blocksize = 65*1024
if hashnames:
sha256 = SHA256.new()
for j in range(0, size, blocksize):
if j+blocksize > size:
block = 'a'*(size-j)
else:
block = 'a'*blocksize
if hashnames:
sha256.update(block)
f.write(block)
f.close()
arcname = fname
if hashnames:
arcname = fencode(int(sha256.hexdigest(),16))
tarball.add(fname, arcname)
if addmetas:
tarball.add(metafname, arcname+".343434.meta")
os.remove(fname)
if addmetas:
os.remove(metafname)
contents = tarball.getnames()
tarball.close()
return tarballname, contents
def gzipTarball(tarball):
f = gzip.GzipFile(tarball+".gz", 'wb')
f.write(file(tarball, 'rb').read())
f.close()
os.remove(tarball)
return tarball+".gz"
def main():
# test plain TarfileUtils.delete()
(tballname, contents) = maketarball(5, 4096)
TarfileUtils.delete(tballname, contents[2:4])
tarball = tarfile.open(tballname, 'r')
os.remove(tballname)
assert(tarball.getnames() == contents[:2]+contents[4:])
tarball.close()
# test gzip TarfileUtils.delete()
(tballname, contents) = maketarball(5, 4096)
tballname = gzipTarball(tballname)
TarfileUtils.delete(tballname, contents[2:4])
tarball = tarfile.open(tballname, 'r')
os.remove(tballname)
assert(tarball.getnames() == contents[:2]+contents[4:])
tarball.close()
# test plain TarfileUtils.concatenate()
(tballname1, contents1) = maketarball(5, 4096)
(tballname2, contents2) = maketarball(5, 4096)
TarfileUtils.concatenate(tballname1, tballname2)
assert(not os.path.exists(tballname2))
tarball = tarfile.open(tballname1, 'r')
os.remove(tballname1)
assert(tarball.getnames() == contents1+contents2)
# test TarfileUtils.concatenate(gz, plain)
(tballname1, contents1) = maketarball(5, 4096)
(tballname2, contents2) = maketarball(5, 4096)
tballname1 = gzipTarball(tballname1)
TarfileUtils.concatenate(tballname1, tballname2)
assert(not os.path.exists(tballname2))
tarball = tarfile.open(tballname1, 'r')
os.remove(tballname1)
assert(tarball.getnames() == contents1+contents2)
# test TarfileUtils.concatenate(plain, gz)
(tballname1, contents1) = maketarball(5, 4096)
(tballname2, contents2) = maketarball(5, 4096)
tballname2 = gzipTarball(tballname2)
TarfileUtils.concatenate(tballname1, tballname2)
assert(not os.path.exists(tballname2))
tarball = tarfile.open(tballname1, 'r')
os.remove(tballname1)
assert(tarball.getnames() == contents1+contents2)
# test TarfileUtils.concatenate(gz, gz)
(tballname1, contents1) = maketarball(5, 4096)
(tballname2, contents2) = maketarball(5, 4096)
tballname1 = gzipTarball(tballname1)
tballname2 = gzipTarball(tballname2)
TarfileUtils.concatenate(tballname1, tballname2)
assert(not os.path.exists(tballname2))
tarball = tarfile.open(tballname1, 'r')
os.remove(tballname1)
assert(tarball.getnames() == contents1+contents2)
# test TarfileUtils.verifyHashes(plain no meta)
(tballname, contents) = maketarball(5, 4096, True)
assert(TarfileUtils.verifyHashes(tballname, contents[2:4]))
os.remove(tballname)
# test TarfileUtils.verifyHashes(plain with meta)
(tballname, contents) = maketarball(5, 4096, True, True)
assert(TarfileUtils.verifyHashes(tballname, contents[2:4]), ".meta")
os.remove(tballname)
# test TarfileUtils.verifyHashes(gzipped no meta)
(tballname, contents) = maketarball(5, 4096, True)
tballname = gzipTarball(tballname)
assert(TarfileUtils.verifyHashes(tballname, contents[2:4]))
os.remove(tballname)
# test TarfileUtils.verifyHashes(gzipped with meta)
(tballname, contents) = maketarball(5, 4096, True, True)
tballname = gzipTarball(tballname)
assert(TarfileUtils.verifyHashes(tballname, contents[2:4]), ".meta")
os.remove(tballname)
print "all tests passed"
if __name__ == "__main__":
main()
| Python |
from distutils.core import setup, Extension
import os, sys
setup(name="flud",
version="0.2.1",
description="flud decentralized backup",
long_description='a 100% decentralized backup system',
author="Alen Peacock",
author_email="apeacock@flud.org",
url='http://flud.org',
license='GPLv3 (c)2004-2007 Alen Peacock',
packages=['flud',
'flud.protocol',
'flud.bin',
'flud.test'],
package_dir={'flud': 'flud',
'flud.protocol': 'flud/protocol',
'flud.bin': 'flud/bin',
'flud.test': 'flud/test'},
package_data={'flud':
['images/*.png', 'FludNode.tac', 'fludrules.init']},
scripts = ['flud/bin/fludnode',
'flud/bin/tacpath-flud',
'flud/bin/fludscheduler',
'flud/bin/fludclient',
'flud/bin/fludlocalclient',
'flud/bin/flud-mastermetadataViewer',
'flud/bin/flud-metadataViewer',
'flud/bin/start-fludnodes',
'flud/bin/stop-fludnodes',
'flud/bin/gauges-fludnodes',
'flud/bin/clean-fludnodes']
)
| Python |
"""
FludCrypto.py (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), version 3.
Provides FludRSA (an enhanced RSA._RSAobj), as well as convenience functions
for creating hashes, finding hash collisions, etc.
"""
import binascii
import operator
import struct
import time
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA, pubkey
from Crypto.Util.randpool import RandomPool
from Crypto.Random import atfork
class FludRSA(RSA._RSAobj):
"""
Subclasses the Crypto.PublicKey.RSAobj object to add access to the
privatekey as well as methods for exporting and importing an RSA obj.
"""
rand = RandomPool()
def __init__(self, rsa):
self.__setstate__(rsa.__getstate__())
def publickey(self):
return FludRSA(RSA.construct((self.n, self.e)))
def privatekey(self):
return FludRSA(RSA.construct((self.n, self.e, self.d)))
def encrypt(self, message):
return RSA._RSAobj.encrypt(self, message, "")
def exportPublicKey(self):
return self.publickey().__getstate__()
def exportPrivateKey(self):
return self.privatekey().__getstate__()
def id(self):
"""
returns the hashstring of the public key
"""
#return hashstring(str(self.exportPublicKey()))
return hashstring(str(self.exportPublicKey()['n']))
def importPublicKey(key):
"""
Can take, as key, a dict describing the public key ('e' and 'n'), a
string describing n, or a long describing n (in the latter two cases, e
is assumed to be 65537L).
"""
if isinstance(key, str):
key = long(key, 16)
key = {'e': 65537L, 'n': key}
elif isinstance(key, long):
key = {'e': 65537L, 'n': key}
if isinstance(key, dict):
state = key
pkey = RSA.construct((0L,0L))
pkey.__setstate__(state)
return FludRSA(pkey)
else:
raise TypeError("type %s not supported by importPublicKey():"\
" try dict with keys of 'e' and 'n', string representing"\
" 'n', or long representing 'n'." % type(key))
importPublicKey = staticmethod(importPublicKey)
def importPrivateKey(key):
state = key
pkey = RSA.construct((0L,0L,0L))
pkey.__setstate__(state)
return FludRSA(pkey)
importPrivateKey = staticmethod(importPrivateKey)
def generate(keylength=2048):
return FludRSA(RSA.generate(keylength, FludRSA.rand.get_bytes))
generate = staticmethod(generate)
def generateKeys(len=2048):
fludkey = FludRSA.generate(len)
return fludkey.publickey(), fludkey.privatekey()
def hashstring(string):
sha256 = SHA256.new()
sha256.update(string)
return sha256.hexdigest()
def hashfile(filename):
sha256 = SHA256.new()
f = open(filename, "r")
while 1:
buf = f.read(1048576) # XXX: 1Mb - magic number
if buf == "":
break
sha256.update(buf)
f.close()
return sha256.hexdigest()
def hashstream(file, len):
sha256 = SHA256.new()
readsize = 1048576 # XXX: 1Mb - magic number
while len > 0:
if len < readsize:
readsize = len
buf = file.read(readsize)
if buf == "":
break
sha256.update(buf)
len = len - readsize
return sha256.hexdigest()
def generateRandom(n):
atfork()
rand = RandomPool() # using seperate instance of RandomPool purposely
return rand.get_bytes(n)
def hashcash(match, len, timestamp=False):
""" trys to find a hash collision of len significant bits. Returns
the 256-bit string that produced the collision. Uses sha256, so match
should be a sha256 hashstring (as a hexstring), and len should be between
0 and 256 (lengths close to 256 are intractable). The timestamp field
determines whether the current timestamp should be inserted into the
pre-hash result (to stem sybil attacks targetting specific IDs).
The result is hex-encoded, so to arrive at the matching hashvalue, you
would hashstring(binascii.unhexlify(result)).
"""
matchint = long(match,16)
len = 2**(256-len)
if date:
gtime = struct.pack("I",int(time.time()))
while True:
attempt = generateRandom(32) # 32 random bytes = 256 random bits
if date:
# rewrite the 2 lsBs of attempt with the 2 msBs of gtime (time
# granularity is thus 65536 seconds, or just over 18 hours between
# intervals -- more than enough for a refresh monthly, weekly, or
# even daily value)
attempt = attempt[0:30]+gtime[2:4]
attempthash = hashstring(attempt)
attemptint = long(attempthash,16)
distance = operator.xor(matchint, attemptint)
if distance < len:
break
return binascii.hexlify(attempt)
# XXX: should move all testing to doctest
if __name__ == '__main__':
fludkey = FludRSA.generate(2048)
print "fludkey (pub) is: "+str(fludkey.exportPublicKey())
print "fludkey (priv) is: "+str(fludkey.exportPrivateKey())
print ""
pubkeystring = fludkey.exportPublicKey()
pubkeylongn = pubkeystring['n']
pubkeystringn = hex(pubkeystring['n'])
privkeystring = fludkey.exportPrivateKey()
fludkeyPub = FludRSA.importPublicKey(pubkeystring)
print "fludkeyPub is: "+str(fludkeyPub.exportPublicKey())
fludkeyPub2 = FludRSA.importPublicKey(pubkeystringn)
print "fludkeyPub2 is: "+str(fludkeyPub2.exportPublicKey())
fludkeyPub3 = FludRSA.importPublicKey(pubkeylongn)
print "fludkeyPub3 is: "+str(fludkeyPub3.exportPublicKey())
fludkeyPriv = FludRSA.importPrivateKey(privkeystring)
print "fludkeyPriv is: "+str(fludkeyPriv.exportPrivateKey())
plaintext = "test message"
print "plaintext is: "+plaintext
ciphertext = fludkeyPub.encrypt(plaintext)
print "ciphertext is: "+str(ciphertext)
plaintext2 = fludkeyPriv.decrypt(ciphertext)
print "decrypted plaintext is: "+plaintext2
assert plaintext2==plaintext
randstring = str(generateRandom(80))
print "80 bytes of random data: '"+binascii.hexlify(randstring)
data1=randstring
# leading zeroes get lost, since encryption treats the data as a number
#data1='\x00\x00\x00\x1e4%`K\xef\xf6\xdd\x8a\x0eUP\x7f\xb0G\x1d\xb9\xe4\x82\x11n\n\xff\x1a\xc9\x013\xe9\x8e\x99\xb0]M@y\x86l\xb3l'
edata1=fludkeyPub.encrypt(data1)[0]
data2=fludkeyPriv.decrypt(edata1)
print binascii.hexlify(data1)
print binascii.hexlify(data2)
assert data1 == data2
| Python |
"""
FludServer.py (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), version 3.
flud server operations
"""
import threading, binascii, time, os, stat, httplib, gc, re, sys, logging, sets
from twisted.web import server, resource, client
from twisted.web.resource import Resource
from twisted.internet import reactor, threads, defer
from twisted.web import http
from twisted.python import threadable, failure
from flud.FludCrypto import FludRSA
import flud.FludkRouting
from ServerPrimitives import *
from ServerDHTPrimitives import *
from LocalPrimitives import *
from FludCommUtil import *
threadable.init()
class FludServer(threading.Thread):
"""
This class runs the webserver, responding to all requests.
"""
def __init__(self, node, port):
threading.Thread.__init__(self)
self.port = port
self.node = node
self.clientport = node.config.clientport
self.logger = node.logger
self.root = ROOT(self)
self.root.putChild('ID', ID(self)) # GET (node identity)
self.root.putChild('file', FILE(self)) # POST, GET, and DELETE (files)
self.root.putChild('hash', HASH(self)) # GET (verify op)
self.root.putChild('proxy', PROXY(self)) # currently noop
self.root.putChild('nodes', NODES(self))
self.root.putChild('meta', META(self))
self.site = server.Site(self.root)
reactor.listenTCP(self.port, self.site)
reactor.listenTCP(self.clientport, LocalFactory(node),
interface="127.0.0.1")
#print "FludServer will listen on port %d, local client on %d"\
# % (self.port, self.clientport)
self.logger.log(logging.INFO,\
"FludServer will listen on port %d, local client on %d"
% (self.port, self.clientport))
def run(self):
self.logger.log(logging.INFO, "FludServer starting")
return reactor.run(installSignalHandlers=0)
def stop(self):
self.logger.log(logging.INFO, "FludServer stopping")
reactor.stop()
| Python |
"""
LocalPrimitives.py (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
Protocol for talking to a flud node locally (from client code -- command line,
GUI, etc).
Each command in the local protocol begins with 4 bytes designating the type of
command. These are followed by a status byte, which is either '?'=request,
':'=success response, or '!'=failure response. Arguments to the command follow
the status byte.
"""
import binascii, time, os, stat, httplib, gc, re, sys, logging, sets
from twisted.web.resource import Resource
from twisted.web import server, resource, client
from twisted.internet import protocol, reactor, threads, defer
from twisted.protocols import basic
from twisted.mail import smtp
from twisted.python import failure
from Crypto.Cipher import AES
from flud.FludCrypto import FludRSA, hashstring, generateRandom
import flud.FludkRouting
from flud.fencode import fencode, fdecode
import flud.FludFileOperations as FileOps
import flud.protocol.ServerPrimitives as ServerPrimitives
logger = logging.getLogger("flud.local.server")
"""
Protocol and Factory for local client/server communication
"""
MAXCONCURRENT = 300
(CONCURR, MAX, QUEUE) = (0, 1, 2) # indexes into LocalProtocol.commands
class LocalProtocol(basic.LineReceiver):
authenticated = False
commands = {'PUTF': [0, MAXCONCURRENT, []], 'GETF': [0, MAXCONCURRENT, []],
'GETI': [0, MAXCONCURRENT, []], 'FNDN': [0, 1, []],
'STOR': [0, MAXCONCURRENT, []], 'RTRV': [0, MAXCONCURRENT, []],
'VRFY': [0, MAXCONCURRENT, []], 'FNDV': [0, 1, []],
'CRED': [0, 1, []], 'LIST': [0, 1, []], 'GETM': [0, 1, []],
'PUTM': [0, 1, []] }
def connectionMade(self):
logger.info("client connected")
self.authenticated=False
def connectionLost(self, reason):
self.authenticated=False
def doOp(self, command, fname):
#print "got command '%s'" % command
if command == "PUTF":
logger.debug("PUTF %s", fname);
return FileOps.StoreFile(self.factory.node, fname).deferred
elif command == "GETI":
logger.debug("GETI %s", fname);
return FileOps.RetrieveFile(self.factory.node, fname).deferred
elif command == "GETF":
logger.debug("GETF %s", fname);
return FileOps.RetrieveFilename(self.factory.node, fname).deferred
elif command == "FNDN":
logger.debug("FNDN %s" % fname);
try:
intval = long(fname, 16)
except:
return defer.fail("fname was not hex")
return self.factory.node.client.kFindNode(intval)
# The following is for testing aggregation of kFindNode on same key
#dl = []
#for i in [1,2,3,4,5]:
# d = self.factory.node.client.kFindNode(intval)
# dl.append(d)
#dlist = defer.DeferredList(dl)
#return dlist
elif command == "FNDV":
logger.debug("FNDV %s", fname);
try:
intval = long(fname, 16)
except:
return defer.fail("fname was not hex")
return self.factory.node.client.kFindValue(intval)
elif command == "CRED":
passphrase, email = fdecode(fname)
# XXX: allow an optional passphrase hint to be sent in email.
passphrase = self.factory.node.config.Kr.decrypt(passphrase)
logger.debug("CRED %s to %s", passphrase, email);
Kr = self.factory.node.config.Kr.exportPrivateKey()
Kr['g'] = self.factory.node.config.groupIDr
fKr = fencode(Kr)
key = AES.new(binascii.unhexlify(hashstring(passphrase)))
fKr = '\x00'*(16-(len(fKr)%16))+fKr
efKr = fencode(key.encrypt(fKr))
logger.debug("efKr = %s " % efKr)
d = smtp.sendmail('localhost', "your_flud_client@localhost",
email,
"Subject: Your encrypted flud credentials\n\n"
"Hopefully, you'll never need to use this email. Its "
"sole purpose is to help you recover your data after a "
"catastrophic and complete loss of the original computer "
"or hard drive.\n\n"
"In that unlucky event, you'll need a copy of your flud "
"credentials, which I've included below, sitting between "
"the \"---+++---\" markers. These credentials were "
"encrypted with a passphrase of your choosing when you "
"installed the flud software. I'll only say this "
"once:\n\n"
"YOU MUST REMEMBER THAT PASSWORD IN ORDER TO RECOVER YOUR "
"CREDENTIALS. If you are unable to remember the "
"passphrase and your computer fails catastrophically "
"(losing its local copy of these credentials), you will "
"not be able to recover your data."
"\n\n"
"Luckily, that's all you should ever need in order to "
"recover all your data: your passphrase and these "
"credentials."
"\n\n"
"Please save this email. You may want to print out hard "
"copies and store them safely, forward this email to "
"other email accounts, etc. Since the credentials are "
"encrypted, others won't be able to steal them "
"without guessing your passphrase. "
"\n\n"
"---+++---\n"+efKr+"\n---+++---\n")
return d
# to decode this email, we search for the '---+++---' markers, make
# sure the intervening data is all in one piece (remove any line
# breaks \r or \n inserted by email clients) and call this 'cred',
# reconstruct the AES key with the H(passphrase) (as above), and
# then use the key to .decrypt(fdecode(cred)) and call this dcred,
# then fdecode(dcred[dcred.find('d'):]) and call this ddcred, and
# finally importPrivateKey(ddcred) and set groupIDr to ddcred['g'].
elif command == "LIST":
logger.debug("LIST")
return defer.succeed(self.factory.config.master)
elif command == "GETM":
logger.debug("GETM")
return FileOps.RetrieveMasterIndex(self.factory.node).deferred
elif command == "PUTM":
logger.debug("PUTM")
return FileOps.UpdateMasterIndex(self.factory.node).deferred
else:
#print "fname is '%s'" % fname
host = fname[:fname.find(':')]
port = fname[fname.find(':')+1:fname.find(',')]
fname = fname[fname.find(',')+1:]
print "%s: %s : %s , %s" % (command, host, port, fname)
if command == "STOR":
logger.debug("STOR");
return self.factory.node.client.sendStore(fname, None,
host, int(port))
elif command == "RTRV":
logger.debug("RTRV");
return self.factory.node.client.sendRetrieve(fname, host,
int(port))
elif command == "VRFY":
logger.debug("VRFY");
offset = port[port.find(':')+1:port.find('-')]
length = port[port.find('-')+1:]
port = port[:port.find(':')]
print "%s: %s : %s %s - %s , %s" % (command, host, port,
offset, length, fname)
return self.factory.node.client.sendVerify(fname, int(offset),
int(length), host, int(port))
else:
logger.debug("bad op");
return defer.fail("bad op")
def serviceQueue(self, command):
if len(self.commands[command][QUEUE]) > 0 and \
self.commands[command][CONCURR] <= self.commands[command][MAX]:
data = self.commands[command][QUEUE].pop()
logger.info("servicing queue['%s'], item %s" % (command, data))
print "taking %s off the queue" % command
d = self.doOp(command, data)
d.addCallback(self.sendSuccess, command, data)
d.addErrback(self.sendFailure, command, data)
def sendSuccess(self, resp, command, data, prepend=None):
logger.debug("SUCCESS! "+command+":"+data)
#logger.debug("response: '%s'" % (resp,))
if prepend:
w = "%s:%s %s:%s\r\n" % (prepend, command, fencode(resp), data)
else:
w = "%s:%s:%s\r\n" % (command, fencode(resp), data)
self.transport.write(w)
self.commands[command][CONCURR] -= 1
try:
self.serviceQueue(command)
except:
print sys.exec_info()
return resp
def sendFailure(self, err, command, data, prepend=None):
logger.debug("FAILED! %s!%s" % (command, data))
errmsg = err.getErrorMessage()
if prepend:
w = "%s!%s %s!%s\r\n" % (prepend, command, errmsg, data)
else:
w = "%s!%s!%s\r\n" % (command, errmsg, data)
logger.debug("sending %s" % w)
self.transport.write(w)
self.commands[command][CONCURR] -= 1
self.serviceQueue(command)
return err
def lineReceived(self, line):
logger.debug("lineReceived: '%s'" % line)
# commands: AUTH, PUTF, GETF, VRFY
# status: ? = request, : = successful response, ! = failed response
command = line[0:4]
status = line[4]
data = line[5:]
#print "data is '%s'" % data
if not self.authenticated and command == "AUTH":
if status == '?':
# asked for AUTH challenge to be sent. send it
logger.debug("AUTH challenge requested, sending")
echallenge = self.factory.sendChallenge()
self.transport.write("AUTH?"+echallenge+"\r\n")
elif status == ':' and self.factory.challengeAnswered(data):
# sent AUTH response and it passed
logger.debug("AUTH challenge successful")
self.authenticated = True
self.transport.write("AUTH:\r\n")
elif status == ':':
logger.debug("AUTH challenge failed")
self.transport.write("AUTH!\r\n")
elif command == "DIAG":
if data == "NODE":
logger.debug("DIAG NODE")
nodetups = self.factory.config.routing.knownExternalNodes()
nodes = []
for n in nodetups:
node = list(n)
if n[2] in self.factory.config.reputations:
node.append(self.factory.config.reputations[n[2]])
else:
node.append(0)
if n[2] in self.factory.config.throttled:
node.append(self.factory.config.throttled[n[2]])
else:
node.append(0)
nodes.append(tuple(node))
self.transport.write("DIAG:NODE%s\r\n" % fencode(nodes))
elif data == "BKTS":
logger.debug("DIAG BKTS")
bucks = eval("%s" % self.factory.config.routing.kBuckets)
self.transport.write("DIAG:BKTS%s\r\n" % fencode(bucks))
else:
dcommand = data[:4]
ddata = data[5:]
logger.debug("DIAG %s %s" % (dcommand, ddata))
self.commands[dcommand][CONCURR] += 1
d = self.doOp(dcommand, ddata)
d.addCallback(self.sendSuccess, dcommand, ddata, "DIAG")
d.addErrback(self.sendFailure, dcommand, ddata, "DIAG")
elif status == '?':
# requested an operation to be performed. If we are below our
# maximum concurrent ops, do the operation. Otherwise, put it on
# the queue to be serviced when current ops finish. Response is
# sent back to client when deferreds fire.
if self.commands[command][CONCURR] >= self.commands[command][MAX]:
#print "putting %s on the queue" % line
logger.info("received %s request, enqueuing" % command)
self.commands[command][QUEUE].insert(0, data)
else:
#print "doing %s" % line
logger.info("received %s request, executing" % command)
print self.commands[command]
self.commands[command][CONCURR] += 1
d = self.doOp(command, data)
d.addCallback(self.sendSuccess, command, data)
d.addErrback(self.sendFailure, command, data)
class LocalFactory(protocol.ServerFactory):
protocol = LocalProtocol
def __init__(self, node):
self.node = node
self.config = node.config
def sendChallenge(self):
self.challenge = fencode(generateRandom(
ServerPrimitives.challengelength))
echallenge = self.config.Ku.encrypt(self.challenge)[0]
echallenge = fencode(echallenge)
return echallenge
def challengeAnswered(self, resp):
return resp == self.challenge
| Python |
#!/usr/bin/python
"""
LocalClient.py (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
LocalClient provides client functions which can be called to send commands to
a local FludNode instance.
"""
import sys, os, time
from twisted.internet.protocol import ClientFactory
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor
from twisted.python import threadable
threadable.init()
from flud.fencode import fencode, fdecode
from LocalPrimitives import *
logger = logging.getLogger("flud.local.client")
opTimeout = 1200
VALIDOPS = LocalProtocol.commands.keys() + ['AUTH', 'DIAG']
# XXX: print commands should either be raised, or put on factory.msgs
class LocalClient(LineReceiver):
MAX_LENGTH = 300000
auth=False
def connectionMade(self):
logger.debug("connection est.")
self.auth=False
self.sendLine("AUTH?")
def lineReceived(self, line):
logger.debug("received line '%s'" % line)
command = line[0:4]
if not command in VALIDOPS:
print "error: invalid command op ('%s')-- "\
" are you trying to connect to the wrong port"\
" (local client port is usually external port + 500)?"\
% command
return None
status = line[4]
data = line[5:]
if not self.auth:
if command == "AUTH" and status == '?':
# got challenge, send response
logger.debug("got AUTH challenge, sending response")
echallenge = data
self.sendLine("AUTH:"+self.factory.answerChallenge(echallenge))
return
elif command == "AUTH" and status == ':':
# response accepted, authenticated
logger.debug("AUTH challenge accepted, success")
self.auth = True
self.factory.clientReady(self)
#print "authenticated"
else:
if command == "AUTH" and status == "!":
logger.warn("authentication failed (is FLUDHOME set"
" correctly?)")
print "authentication failed (is FLUDHOME set correctly?)"
else:
logger.warn("unknown message received before being"
" authenticated:")
logger.warn(" %s : %s" % (command, status))
print "unknown message received before being authenticated:"
print " %s : %s" % (command, status)
self.factory.setDie()
elif command == "DIAG":
subcommand = data[:4]
data = data[4:]
if subcommand == "NODE":
logger.debug("DIAG NODE: %s" % data)
data = fdecode(data)
result = ""
for i in data:
score = '%d' % i[4]
petID = "%064x" % i[2]
netID = "%s:%d" % (i[0], i[1])
if i[5] != 0:
now = int(time.time())
throttle = '(%d)' % (i[5]-now)
petID = petID[:(70-len(netID)-len(throttle))]+"... " \
+throttle
else:
petID = petID[:(70-len(netID))]+"..."
result += "%s %s %s\n" % (score, netID, petID)
result += "%d known nodes\n" % len(data)
d = self.factory.pending['NODE'].pop('')
d.callback(result)
return
if subcommand == "BKTS":
logger.debug("DIAG BKTS")
data = fdecode(data)
result = ""
for i in data:
for bucket in i:
result += "Bucket %s:\n" % bucket
for k in i[bucket]:
id = "%064x" % k[2]
netID = "%s:%d" % (k[0], k[1])
result += " %s %s...\n" \
% (netID,id[:72-len(netID)])
d = self.factory.pending['BKTS'].pop('')
d.callback(result)
return
elif status == ':':
response, data = data.split(status, 1)
logger.debug("DIAG %s: success" % subcommand)
d = self.factory.pending[subcommand].pop(data)
d.callback(fdecode(response))
elif status == "!":
response, data = data.split(status, 1)
logger.debug("DIAG %s: failure" % subcommand)
d = self.factory.pending[subcommand].pop(data)
d.errback(failure.DefaultException(response))
elif status == ':':
response, data = data.split(status, 1)
logger.debug("%s: success" % command)
d = self.factory.pending[command].pop(data)
d.callback(fdecode(response))
elif status == "!":
response, data = data.split(status, 1)
logger.debug("%s: failure" % command)
if self.factory.pending.has_key(command):
if data not in self.factory.pending[command]:
#print "data key is '%s'" % data
print "pending is '%s'" % self.factory.pending[command]
if len(self.factory.pending[command]):
d = self.factory.pending[command].popitem()
d.errback(failure.DefaultException(response))
else:
d = self.factory.pending[command].pop(data)
d.errback(failure.DefaultException(response))
else:
print "failed command '%s' not in pending?" % command
print "pending is: %s" % self.factory.pending
if command != 'AUTH' and command != 'DIAG' and \
not None in self.factory.pending[command].values():
logger.debug("%s done at %s" % (command, time.ctime()))
class LocalClientFactory(ClientFactory):
protocol = LocalClient
def __init__(self, config):
self.config = config
self.messageQueue = []
self.client = None
self.die = False
self.pending = {'PUTF': {}, 'CRED': {}, 'GETI': {}, 'GETF': {},
'FNDN': {}, 'STOR': {}, 'RTRV': {}, 'VRFY': {}, 'FNDV': {},
'CRED': {}, 'LIST': {}, 'GETM': {}, 'PUTM': {}, 'NODE': {},
'BKTS': {}}
def clientConnectionFailed(self, connector, reason):
#print "connection failed: %s" % reason
logger.warn("connection failed: %s" % reason)
self.cleanup("connection failed: %s" % reason)
def clientConnectionLost(self, connector, reason):
#print "connection lost: %s" % reason
logger.debug("connection lost: %s" % reason)
self.cleanup("connection lost: %s" % reason)
def cleanup(self, msg):
# override me for cleanup
print msg;
def clientReady(self, instance):
self.client = instance
logger.debug("client ready, sending [any] queued msgs")
for i in self.messageQueue:
self._sendMessage(i)
def _sendMessage(self, msg):
if self.client:
logger.debug("sending msg '%s'" % msg)
self.client.sendLine(msg)
else:
logger.debug("queueing msg '%s'" % msg)
self.messageQueue.append(msg)
def answerChallenge(self, echallenge):
logger.debug("answering challenge")
echallenge = (fdecode(echallenge),)
challenge = self.config.Kr.decrypt(echallenge)
return challenge
def expire(self, pending, key):
if pending.has_key(fname):
logger.debug("timing out operation for %s" % key)
#print "timing out operation for %s" % key
pending.pop(key)
def addFile(self, type, fname):
logger.debug("addFile %s %s" % (type, fname))
if not self.pending[type].has_key(fname):
d = defer.Deferred()
self.pending[type][fname] = d
self._sendMessage(type+"?"+fname)
return d
else:
return self.pending[type][fname]
def sendPING(self, host, port):
logger.debug("sendPING")
d = defer.Deferred()
d.errback(failure.DefaultException(
"ping not yet implemented in FludLocalClient"))
return d
def sendPUTF(self, fname):
logger.debug("sendPUTF %s" % fname)
if os.path.isdir(fname):
dirlist = os.listdir(fname)
dlist = []
for i in dirlist:
dlist.append(self.sendPUTF(os.path.join(fname,i)))
dl = defer.DeferredList(dlist)
return dl
elif not self.pending['PUTF'].has_key(fname):
d = defer.Deferred()
self.pending['PUTF'][fname] = d
self._sendMessage("PUTF?"+fname)
#reactor.callLater(opTimeout, self.expire, self.pendingPUTF, fname)
return d
else:
return self.pending['PUTF'][fname]
def sendCRED(self, passphrase, email):
logger.debug("sendCRED")
key = fencode((self.config.Ku.encrypt(passphrase)[0], email))
if not self.pending['CRED'].has_key(key):
d = defer.Deferred()
self.pending['CRED'][key] = d
self._sendMessage("CRED?"+key)
return d
else:
return self.pending['CRED'][key]
def sendGETI(self, fID):
logger.debug("sendGETI")
if not self.pending['GETI'].has_key(fID):
d = defer.Deferred()
self.pending['GETI'][fID] = d
self._sendMessage("GETI?"+fID)
return d
else:
return self.pending['GETI'][fID]
def sendGETF(self, fname):
logger.debug("sendGETF")
master = listMeta(self.config)
if master.has_key(fname):
return self.addFile("GETF",fname)
elif fname[-1:] == os.path.sep:
dlist = []
for name in master:
if fname == name[:len(fname)]:
dlist.append(self.addFile("GETF",name))
dl = defer.DeferredList(dlist)
return dl
def sendFNDN(self, nID):
logger.debug("sendFNDN")
if not self.pending['FNDN'].has_key(nID):
d = defer.Deferred()
self.pending['FNDN'][nID] = d
self._sendMessage("FNDN?"+nID)
return d
else:
return self.pending['FNDN'][nID]
def sendLIST(self):
logger.debug("sendLIST")
if not self.pending['LIST'].has_key(""):
d = defer.Deferred()
self.pending['LIST'][''] = d
logger.debug("LIST['']=%s" % d)
self._sendMessage("LIST?")
return d
else:
return self.pending['LIST']['']
def sendGETM(self):
logger.debug("sendGETM")
if not self.pending['GETM'].has_key(''):
d = defer.Deferred()
self.pending['GETM'][''] = d
logger.debug("GETM['']=%s" % d)
self._sendMessage("GETM?")
return d
else:
return self.pending['GETM']['']
def sendPUTM(self):
logger.debug("sendPUTM")
if not self.pending['PUTM'].has_key(''):
d = defer.Deferred()
self.pending['PUTM'][''] = d
self._sendMessage("PUTM?")
return d
else:
return self.pending['PUTM']['']
def sendDIAGNODE(self):
logger.debug("sendDIAGNODE")
if not self.pending['NODE'].has_key(''):
d = defer.Deferred()
self.pending['NODE'][''] = d
self._sendMessage("DIAG?NODE")
return d
else:
return self.pending['NODE']['']
def sendDIAGBKTS(self):
logger.debug("sendDIAGBKTS")
if not self.pending['BKTS'].has_key(''):
d = defer.Deferred()
self.pending['BKTS'][''] = d
self._sendMessage("DIAG?BKTS")
return d
else:
return self.pending['BKTS']['']
def sendDIAGSTOR(self, command):
logger.debug("sendDIAGSTOR")
if not self.pending['STOR'].has_key(command):
d = defer.Deferred()
self.pending['STOR'][command] = d
self._sendMessage("DIAG?STOR "+command)
return d
else:
return self.pending['STOR'][command]
def sendDIAGRTRV(self, command):
logger.debug("sendDIAGRTRV")
if not self.pending['RTRV'].has_key(command):
d = defer.Deferred()
self.pending['RTRV'][command] = d
self._sendMessage("DIAG?RTRV "+command)
return d
else:
return self.pending['RTRV'][command]
def sendDIAGVRFY(self, command):
logger.debug("sendDIAGVRFY")
if not self.pending['VRFY'].has_key(command):
d = defer.Deferred()
self.pending['VRFY'][command] = d
self._sendMessage("DIAG?VRFY "+command)
return d
else:
return self.pending['VRFY'][command]
def sendDIAGFNDV(self, val):
logger.debug("sendDIAGFNDV")
if not self.pending['FNDV'].has_key(val):
d = defer.Deferred()
self.pending['FNDV'][val] = d
self._sendMessage("FNDV?"+val)
return d
else:
return self.pending['FNDV'][val]
def setDie(self):
self.die = True
# XXX: this should move into FludNode side of things (LocalClientPrimitives).
# anything that calls this should make calls ('LIST', others as necessary) to
# get at master metadata, otherwise we could have multiple writer problems.
# FludNode should make the file ro while running, too.
# And everyone that does anything with the master metadata should do it through
# methods of FludConfig, instead of by direct access to the file.
def listMeta(config):
fmaster = open(os.path.join(config.metadir,config.metamaster), 'r')
master = fmaster.read()
fmaster.close()
if master == "":
master = {}
else:
master = fdecode(master)
return master
| Python |
"""
ServerDHTPrimitives.py (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
Primitive server DHT protocol
"""
import binascii, time, os, stat, httplib, gc, re, sys, logging, random, sets
from twisted.web.resource import Resource
from twisted.web import server, resource, http, client
from twisted.internet import reactor, defer
from twisted.python import failure
from flud.FludCrypto import FludRSA
from flud.fencode import fencode, fdecode
from ServerPrimitives import ROOT
from FludCommUtil import *
logger = logging.getLogger("flud.server.dht")
# XXX: move kRouting.insertNode code out of FludConfig. Add a 'addNode' method
# to FludProtocol module which calls config.addNode, calls
# kRouting.insertNode and if it gets a return value, calls sendGetID with
# the callback doing nothing (unless the header comes back in error) and
# the errback calling kRouting.replaceNode. Anywhere that we are
# currently called config.addNode, call this new method instead.
# FUTURE: check flud protocol version for backwards compatibility
# XXX: need to make sure we have appropriate timeouts for all comms.
# FUTURE: DOS attacks. For now, assume that network hardware can filter these
# out (by throttling individual IPs) -- i.e., it isn't our problem. If we
# want to defend against this at some point, we need to keep track of who
# is generating requests and then ignore them.
# XXX: find everywhere we are sending longs and consider sending hex (or our
# own base-64) encoded instead
# XXX: might want to consider some self-healing for the kademlia layer, as
# outlined by this thread:
# http://zgp.org/pipermail/p2p-hackers/2003-August/001348.html (should also
# consider Zooko's links in the parent to this post)
"""
The children of ROOT beginning with 'k' are kademlia protocol based.
"""
# XXX: need to do all the challenge/response jazz in the k classes
class NODES(ROOT):
def getChild(self, name, request):
if len(request.prepath) != 2:
return Resource.getChild(self, name, request)
return self
def render_GET(self, request):
logger.debug("NODES get (findnode)")
key = request.prepath[1]
self.setHeaders(request)
return kFindNode(self.node, self.config, request, key).deferred
class META(ROOT):
def getChild(self, name, request):
return self
def render_PUT(self, request):
logger.debug("META put (storeval)")
if len(request.prepath) != 3:
request.setResponseCode(http.BAD_REQUEST, "expected key/val")
return "expected key/val, got %s" % '/'.join(request.prepath)
key = request.prepath[1]
val = request.prepath[2]
self.setHeaders(request)
return kStoreVal(self.node, self.config, request, key, val).deferred
def render_GET(self, request):
logger.debug("META get (findval)")
if len(request.prepath) != 2:
request.setResponseCode(http.BAD_REQUEST, "expected key")
return "expected key, got %s" % '/'.join(request.prepath)
key = request.prepath[1]
self.setHeaders(request)
return kFindVal(self.node, self.config, request, key).deferred
class kFindNode(object):
def __init__(self, node, config, request, key):
self.node = node
self.config = config
self.deferred = self.kfindNode(request, key)
def kfindNode(self, request, key):
"""
Return the k closest nodes to the target ID from local k-routing table
"""
self.node.DHTtstamp = time.time()
try:
required = ('nodeID', 'Ku_e', 'Ku_n', 'port')
params = requireParams(request, required)
except Exception, inst:
msg = inst.args[0] + " in request received by kFINDNODE"
logger.info(msg)
request.setResponseCode(http.BAD_REQUEST, "Bad Request")
return msg
else:
logger.info("received kFINDNODE request from %s..."
% params['nodeID'][:10])
reqKu = {}
reqKu['e'] = long(params['Ku_e'])
reqKu['n'] = long(params['Ku_n'])
reqKu = FludRSA.importPublicKey(reqKu)
if reqKu.id() != params['nodeID']:
request.setResponseCode(http.BAD_REQUEST, "Bad Identity")
return "requesting node's ID and public key do not match"
host = getCanonicalIP(request.getClientIP())
#return "{'id': '%s', 'k': %s}"\
# % (self.config.nodeID,\
# self.config.routing.findNode(fdecode(params['key'])))
kclosest = self.config.routing.findNode(fdecode(key))
notclose = list(set(self.config.routing.knownExternalNodes())
- set(kclosest))
if len(notclose) > 0 and len(kclosest) > 1:
r = random.choice(notclose)
#logger.info("**** got some notclose: %s:%d ****" % (r[0],r[1]))
kclosest.append(r)
#logger.info("returning kFINDNODE response: %s" % kclosest)
updateNode(self.node.client, self.config, host,
int(params['port']), reqKu, params['nodeID'])
return "{'id': '%s', 'k': %s}" % (self.config.nodeID, kclosest)
class kSTORE_true(ROOT):
# unrestricted kSTORE. Will store any key/value pair, as in generic
# kademlia. This should be unregistered in FludServer (can't allow
# generic stores).
isLeaf = True
def render_PUT(self, request):
try:
required = ('nodeID', 'Ku_e', 'Ku_n', 'port', 'key', 'val')
params = requireParams(request, required)
except Exception, inst:
msg = inst.args[0] + " in request received by kSTORE_true"
logger.info(msg)
request.setResponseCode(http.BAD_REQUEST, "Bad Request")
return msg
else:
logger.info("received kSTORE_true request from %s..."
% params['nodeID'][:10])
reqKu = {}
reqKu['e'] = long(params['Ku_e'])
reqKu['n'] = long(params['Ku_n'])
reqKu = FludRSA.importPublicKey(reqKu)
if reqKu.id() != params['nodeID']:
request.setResponseCode(http.BAD_REQUEST, "Bad Identity")
return "requesting node's ID and public key do not match"
host = getCanonicalIP(request.getClientIP())
updateNode(self.node.client, self.config, host,
int(params['port']), reqKu, params['nodeID'])
fname = self.config.kstoredir+'/'+params['key']
logger.info("storing dht data to %s" % fname)
f = open(fname, "wb")
f.write(params['val'])
f.close()
return ""
class kStoreVal(ROOT):
# XXX: To prevent abuse of the DHT layer, we impose restrictions on its
# format. But format alone is not sufficient -- a malicious client
# could still format its data in a way that is allowed and gain
# arbitrary amounts of freeloading storage space in the DHT. To
# prevent this, nodes storing data in the DHT layer must also validate
# it. Validation simply requires that the blockIDs described in the
# kSTORE actually reside at a significant percentage of the hosts
# described in the kSTORE op. In other words, validation requires a
# VERIFY op for each block described in the kSTORE op. Validation can
# occur randomly sometime after a kSTORE operation, or at the time of
# the kSTORE op. The former is better, because it not only allows
# purging bad kSTOREs, but prevents them from happening in the first
# place (without significant conspiring among all participants).
# Since the originator of this request also needs to do a VERIFY,
# perhaps we can piggyback these through some means. And, since the
# kSTORE is replicated to k other nodes, each of which also should to
# a VERIFY, there are several ways to optimize this. One is for the k
# nodes to elect a single verifier, and allow the client to learn the
# result of the VERIFY op. Another is to allow each k node to do its
# own VERIFY, but stagger them in a way such that they can take the
# place of the originator's first k VERIFY ops. This could be
# coordinated or (perhaps better) allow each k node to randomly pick a
# time at which it will VERIFY, distributed over a period for which it
# is likely to cover many of the first k VERIFY ops generated by
# the originator. The random approach is nice because it is the same
# mechanism used by the k nodes to occasionally verify that the DHT
# data is valid and should not be purged.
def __init__(self, node, config, request, key, val):
self.node = node
self.config = config
self.deferred = self.kstoreVal(request, key, val)
def kstoreVal(self, request, key, val):
self.node.DHTtstamp = time.time()
try:
required = ('nodeID', 'Ku_e', 'Ku_n', 'port')
params = requireParams(request, required)
except Exception, inst:
msg = inst.args[0] + " in request received by kSTORE"
logger.info(msg)
request.setResponseCode(http.BAD_REQUEST, "Bad Request")
return msg
else:
logger.info("received kSTORE request from %s..."
% params['nodeID'][:10])
reqKu = {}
reqKu['e'] = long(params['Ku_e'])
reqKu['n'] = long(params['Ku_n'])
reqKu = FludRSA.importPublicKey(reqKu)
if reqKu.id() != params['nodeID']:
request.setResponseCode(http.BAD_REQUEST, "Bad Identity")
return "requesting node's ID and public key do not match"
host = getCanonicalIP(request.getClientIP())
updateNode(self.node.client, self.config, host,
int(params['port']), reqKu, params['nodeID'])
fname = os.path.join(self.config.kstoredir,key)
md = fdecode(val)
if not self.dataAllowed(key, md, params['nodeID']):
msg = "malformed store data"
logger.info("bad data was: %s" % md)
request.setResponseCode(http.BAD_REQUEST, msg)
return msg
# XXX: see if there isn't already a 'val' for 'key' present
# - if so, compare to val. Metadata can differ. Blocks
# shouldn't. However, if blocks do differ, just add the
# new values in, up to N (3?) records per key. Flag these
# (all N) as ones we want to verify (to storer and storee).
# Expunge any blocks that fail verify, and punish storer's
# trust.
logger.info("storing dht data to %s" % fname)
if os.path.exists(fname) and isinstance(md, dict):
f = open(fname, "rb")
edata = f.read()
f.close()
md = self.mergeMetadata(md, fdecode(edata))
f = open(fname, "wb")
f.write(fencode(md))
f.close()
return "" # XXX: return a VERIFY reverse request: segname, offset
def dataAllowed(self, key, data, nodeID):
# ensures that 'data' is in [one of] the right format[s] (helps prevent
# DHT abuse)
def validValue(val):
if not isinstance(val, long) and not isinstance(val, int):
return False # not a valid key/nodeid
if val > 2**256 or val < 0: # XXX: magic 2**256, use fludkrouting
return False # not a valid key/nodeid
return True
def validMetadata(blockdata, nodeID):
# returns true if the format of data conforms to the standard for
# metadata
blocks = 0
try:
k = blockdata.pop('k')
n = blockdata.pop('n')
if not isinstance(k, int) or not isinstance(n, int):
return False
if k != 20 or n != 20:
# XXX: magic numbers '20'
# XXX: to support other than 20/20, need to constrain an
# upper bound and store multiple records with different m/n
# under the same key
return False
m = k+n
except:
return False
for (i, b) in blockdata:
if i > m:
return False
if not validValue(b):
#print "%s is invalid key" %i
return False
location = blockdata[(i,b)]
if isinstance(location, list):
if len(location) > 5:
#print "too many (list) nodeIDs" % j
return False
for j in location:
if not validValue(j):
#print "%s is invalid (list) nodeID" % j
return False
elif not validValue(location):
#print "%s is invalid nodeID" % location
return False
blocks += 1
if blocks != m:
return False # not the right number of blocks
blockdata['k'] = k
blockdata['n'] = n
return True
def validMasterCAS(key, data, nodeID):
# returns true if the data fits the characteristics of a master
# metadata CAS key, i.e., if key==nodeID and the data is the right
# length.
nodeID = fencode(long(nodeID,16))
if key != nodeID:
return False
# XXX: need to do challange/response on nodeID (just as in the
# regular primitives) here, or else imposters can store/replace
# this very important data!!!
# XXX: do some length stuff - should only be as long as a CAS key
return True
return (validMetadata(data, nodeID)
or validMasterCAS(key, data, nodeID))
def mergeMetadata(self, m1, m2):
# merges the data from m1 into m2. After calling, both m1 and m2
# contain the merged data.
"""
>>> a1 = {'b': {1: (1, 'a', 8), 2: (2, 'b', 8), 5: (1, 'a', 8)}}
>>> a2 = {'b': {1: (1, 'a', 8), 2: [(3, 'B', 80), (4, 'bb', 80)], 10: (10, 't', 80)}}
>>> mergeit(a1, a2)
{'b': {1: (1, 'a', 8), 2: [(3, 'B', 80), (4, 'bb', 80), (2, 'b', 8)], 10: (10, 't', 80), 5: (1, 'a', 8)}}
>>> a1 = {'b': {1: (1, 'a', 8), 2: (2, 'b', 8), 5: (1, 'a', 8)}}
>>> a2 = {'b': {1: (1, 'a', 8), 2: [(3, 'B', 80), (4, 'bb', 80)], 10: (10, 't', 80)}}
>>> mergeit(a2, a1)
{'b': {1: (1, 'a', 8), 2: [(3, 'B', 80), (4, 'bb', 80), (2, 'b', 8)], 10: (10, 't', 80), 5: (1, 'a', 8)}}
>>> a1 = {'b': {1: (1, 'a', 8), 2: [(2, 'b', 8), (7, 'r', 8)], 5: (1, 'a', 8)}}
>>> a2 = {'b': {1: (1, 'a', 8), 2: [(3, 'B', 80), (4, 'bb', 80)], 10: (10, 't', 80)}}
>>> mergeit(a2, a1)
{'b': {1: (1, 'a', 8), 2: [(2, 'b', 8), (7, 'r', 8), (3, 'B', 80), (4, 'bb', 80)], 10: (10, 't', 80), 5: (1, 'a', 8)}}
>>> a1 = {'b': {1: [(1, 'a', 8)], 2: [(2, 'b', 8), (7, 'r', 8)], 5: (1, 'a', 8)}}
>>> a2 = {'b': {1: (1, 'a', 8), 2: [(3, 'B', 80), (4, 'bb', 80)], 10: (10, 't', 80)}}
>>> mergeit(a1, a2)
{'b': {1: (1, 'a', 8), 2: [(2, 'b', 8), (7, 'r', 8), (3, 'B', 80), (4, 'bb', 80)], 10: (10, 't', 80), 5: (1, 'a', 8)}}
"""
# first merge blocks ('b' sections)
n = {}
for i in m2:
if m1.has_key(i) and m2[i] != m1[i]:
if isinstance(m1[i], list) and len(m1[i]) == 1:
m1[i] = m1[i][0] # collapse list of len 1
if isinstance(m2[i], list) and len(m2[i]) == 1:
m2[i] = m2[i][0] # collapse list of len 1
# combine
if isinstance(m1[i], list) and isinstance(m2[i], list):
n[i] = m2[i]
n[i].extend(m1[i])
elif isinstance(m2[i], list):
n[i] = m2[i]
n[i] = n[i].append(m1[i])
elif isinstance(m1[i], list):
n[i] = m1[i]
n[i] = n[i].append(m2[i])
elif m1[i] == m2[i]:
n[i] = m1[i]
else:
n[i] = [m1[i], m2[i]]
else:
n[i] = m2[i]
for i in m1:
if not n.has_key(i):
n[i] = m1[i]
# now n contains the merged blocks.
m1 = m2 = n
return m1
class kFindVal(object):
def __init__(self, node, config, request, key):
self.node = node
self.config = config
self.deferred = self.kfindVal(request, key)
def kfindVal(self, request, key):
"""
Return the value, or if we don't have it, the k closest nodes to the
target ID
"""
self.node.DHTtstamp = time.time()
try:
required = ('nodeID', 'Ku_e', 'Ku_n', 'port')
params = requireParams(request, required)
except Exception, inst:
msg = inst.args[0] + " in request received by kFINDVALUE"
logger.info(msg)
request.setResponseCode(http.BAD_REQUEST, "Bad Request")
return msg
else:
logger.info("received kFINDVALUE request from %s..."
% params['nodeID'][:10])
reqKu = {}
reqKu['e'] = long(params['Ku_e'])
reqKu['n'] = long(params['Ku_n'])
reqKu = FludRSA.importPublicKey(reqKu)
if reqKu.id() != params['nodeID']:
request.setResponseCode(http.BAD_REQUEST, "Bad Identity")
return "requesting node's ID and public key do not match"
host = getCanonicalIP(request.getClientIP())
updateNode(self.node.client, self.config, host,
int(params['port']), reqKu, params['nodeID'])
fname = os.path.join(self.config.kstoredir,key)
if os.path.isfile(fname):
f = open(fname, "rb")
logger.info("returning data from kFINDVAL")
request.setHeader('nodeID',str(self.config.nodeID))
request.setHeader('Content-Type','application/x-flud-data')
d = fdecode(f.read())
if isinstance(d, dict) and d.has_key(params['nodeID']):
#print d
resp = {'b': d['b'], params['nodeID']: d[params['nodeID']]}
#resp = {'b': d['b']}
#if d.has_key(params['nodeID']):
# resp[params['nodeID']] = d[params['nodeID']]
else:
resp = d
request.write(fencode(resp))
f.close()
return ""
else:
# return the following if it isn't there.
logger.info("returning nodes from kFINDVAL for %s" % key)
request.setHeader('Content-Type','application/x-flud-nodes')
return "{'id': '%s', 'k': %s}"\
% (self.config.nodeID,\
self.config.routing.findNode(fdecode(key)))
| Python |
#!/usr/bin/python
"""
LocalClient.py (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
LocalClient provides client functions which can be called to send commands to
a local FludNode instance.
"""
import sys, os, time
from twisted.internet.protocol import ClientFactory
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor
from twisted.python import threadable
threadable.init()
from flud.fencode import fencode, fdecode
from LocalPrimitives import *
logger = logging.getLogger("flud.local.client")
opTimeout = 1200
VALIDOPS = LocalProtocol.commands.keys() + ['AUTH', 'DIAG']
# XXX: print commands should either be raised, or put on factory.msgs
class LocalClient(LineReceiver):
MAX_LENGTH = 300000
auth=False
def connectionMade(self):
logger.debug("connection est.")
self.auth=False
self.sendLine("AUTH?")
def lineReceived(self, line):
logger.debug("received line '%s'" % line)
command = line[0:4]
if not command in VALIDOPS:
print "error: invalid command op ('%s')-- "\
" are you trying to connect to the wrong port"\
" (local client port is usually external port + 500)?"\
% command
return None
status = line[4]
data = line[5:]
if not self.auth:
if command == "AUTH" and status == '?':
# got challenge, send response
logger.debug("got AUTH challenge, sending response")
echallenge = data
self.sendLine("AUTH:"+self.factory.answerChallenge(echallenge))
return
elif command == "AUTH" and status == ':':
# response accepted, authenticated
logger.debug("AUTH challenge accepted, success")
self.auth = True
self.factory.clientReady(self)
#print "authenticated"
else:
if command == "AUTH" and status == "!":
logger.warn("authentication failed (is FLUDHOME set"
" correctly?)")
print "authentication failed (is FLUDHOME set correctly?)"
else:
logger.warn("unknown message received before being"
" authenticated:")
logger.warn(" %s : %s" % (command, status))
print "unknown message received before being authenticated:"
print " %s : %s" % (command, status)
self.factory.setDie()
elif command == "DIAG":
subcommand = data[:4]
data = data[4:]
if subcommand == "NODE":
logger.debug("DIAG NODE: %s" % data)
data = fdecode(data)
result = ""
for i in data:
score = '%d' % i[4]
petID = "%064x" % i[2]
netID = "%s:%d" % (i[0], i[1])
if i[5] != 0:
now = int(time.time())
throttle = '(%d)' % (i[5]-now)
petID = petID[:(70-len(netID)-len(throttle))]+"... " \
+throttle
else:
petID = petID[:(70-len(netID))]+"..."
result += "%s %s %s\n" % (score, netID, petID)
result += "%d known nodes\n" % len(data)
d = self.factory.pending['NODE'].pop('')
d.callback(result)
return
if subcommand == "BKTS":
logger.debug("DIAG BKTS")
data = fdecode(data)
result = ""
for i in data:
for bucket in i:
result += "Bucket %s:\n" % bucket
for k in i[bucket]:
id = "%064x" % k[2]
netID = "%s:%d" % (k[0], k[1])
result += " %s %s...\n" \
% (netID,id[:72-len(netID)])
d = self.factory.pending['BKTS'].pop('')
d.callback(result)
return
elif status == ':':
response, data = data.split(status, 1)
logger.debug("DIAG %s: success" % subcommand)
d = self.factory.pending[subcommand].pop(data)
d.callback(fdecode(response))
elif status == "!":
response, data = data.split(status, 1)
logger.debug("DIAG %s: failure" % subcommand)
d = self.factory.pending[subcommand].pop(data)
d.errback(failure.DefaultException(response))
elif status == ':':
response, data = data.split(status, 1)
logger.debug("%s: success" % command)
d = self.factory.pending[command].pop(data)
d.callback(fdecode(response))
elif status == "!":
response, data = data.split(status, 1)
logger.debug("%s: failure" % command)
if self.factory.pending.has_key(command):
if data not in self.factory.pending[command]:
#print "data key is '%s'" % data
print "pending is '%s'" % self.factory.pending[command]
if len(self.factory.pending[command]):
d = self.factory.pending[command].popitem()
d.errback(failure.DefaultException(response))
else:
d = self.factory.pending[command].pop(data)
d.errback(failure.DefaultException(response))
else:
print "failed command '%s' not in pending?" % command
print "pending is: %s" % self.factory.pending
if command != 'AUTH' and command != 'DIAG' and \
not None in self.factory.pending[command].values():
logger.debug("%s done at %s" % (command, time.ctime()))
class LocalClientFactory(ClientFactory):
protocol = LocalClient
def __init__(self, config):
self.config = config
self.messageQueue = []
self.client = None
self.die = False
self.pending = {'PUTF': {}, 'CRED': {}, 'GETI': {}, 'GETF': {},
'FNDN': {}, 'STOR': {}, 'RTRV': {}, 'VRFY': {}, 'FNDV': {},
'CRED': {}, 'LIST': {}, 'GETM': {}, 'PUTM': {}, 'NODE': {},
'BKTS': {}}
def clientConnectionFailed(self, connector, reason):
#print "connection failed: %s" % reason
logger.warn("connection failed: %s" % reason)
self.cleanup("connection failed: %s" % reason)
def clientConnectionLost(self, connector, reason):
#print "connection lost: %s" % reason
logger.debug("connection lost: %s" % reason)
self.cleanup("connection lost: %s" % reason)
def cleanup(self, msg):
# override me for cleanup
print msg;
def clientReady(self, instance):
self.client = instance
logger.debug("client ready, sending [any] queued msgs")
for i in self.messageQueue:
self._sendMessage(i)
def _sendMessage(self, msg):
if self.client:
logger.debug("sending msg '%s'" % msg)
self.client.sendLine(msg)
else:
logger.debug("queueing msg '%s'" % msg)
self.messageQueue.append(msg)
def answerChallenge(self, echallenge):
logger.debug("answering challenge")
echallenge = (fdecode(echallenge),)
challenge = self.config.Kr.decrypt(echallenge)
return challenge
def expire(self, pending, key):
if pending.has_key(fname):
logger.debug("timing out operation for %s" % key)
#print "timing out operation for %s" % key
pending.pop(key)
def addFile(self, type, fname):
logger.debug("addFile %s %s" % (type, fname))
if not self.pending[type].has_key(fname):
d = defer.Deferred()
self.pending[type][fname] = d
self._sendMessage(type+"?"+fname)
return d
else:
return self.pending[type][fname]
def sendPING(self, host, port):
logger.debug("sendPING")
d = defer.Deferred()
d.errback(failure.DefaultException(
"ping not yet implemented in FludLocalClient"))
return d
def sendPUTF(self, fname):
logger.debug("sendPUTF %s" % fname)
if os.path.isdir(fname):
dirlist = os.listdir(fname)
dlist = []
for i in dirlist:
dlist.append(self.sendPUTF(os.path.join(fname,i)))
dl = defer.DeferredList(dlist)
return dl
elif not self.pending['PUTF'].has_key(fname):
d = defer.Deferred()
self.pending['PUTF'][fname] = d
self._sendMessage("PUTF?"+fname)
#reactor.callLater(opTimeout, self.expire, self.pendingPUTF, fname)
return d
else:
return self.pending['PUTF'][fname]
def sendCRED(self, passphrase, email):
logger.debug("sendCRED")
key = fencode((self.config.Ku.encrypt(passphrase)[0], email))
if not self.pending['CRED'].has_key(key):
d = defer.Deferred()
self.pending['CRED'][key] = d
self._sendMessage("CRED?"+key)
return d
else:
return self.pending['CRED'][key]
def sendGETI(self, fID):
logger.debug("sendGETI")
if not self.pending['GETI'].has_key(fID):
d = defer.Deferred()
self.pending['GETI'][fID] = d
self._sendMessage("GETI?"+fID)
return d
else:
return self.pending['GETI'][fID]
def sendGETF(self, fname):
logger.debug("sendGETF")
master = listMeta(self.config)
if master.has_key(fname):
return self.addFile("GETF",fname)
elif fname[-1:] == os.path.sep:
dlist = []
for name in master:
if fname == name[:len(fname)]:
dlist.append(self.addFile("GETF",name))
dl = defer.DeferredList(dlist)
return dl
def sendFNDN(self, nID):
logger.debug("sendFNDN")
if not self.pending['FNDN'].has_key(nID):
d = defer.Deferred()
self.pending['FNDN'][nID] = d
self._sendMessage("FNDN?"+nID)
return d
else:
return self.pending['FNDN'][nID]
def sendLIST(self):
logger.debug("sendLIST")
if not self.pending['LIST'].has_key(""):
d = defer.Deferred()
self.pending['LIST'][''] = d
logger.debug("LIST['']=%s" % d)
self._sendMessage("LIST?")
return d
else:
return self.pending['LIST']['']
def sendGETM(self):
logger.debug("sendGETM")
if not self.pending['GETM'].has_key(''):
d = defer.Deferred()
self.pending['GETM'][''] = d
logger.debug("GETM['']=%s" % d)
self._sendMessage("GETM?")
return d
else:
return self.pending['GETM']['']
def sendPUTM(self):
logger.debug("sendPUTM")
if not self.pending['PUTM'].has_key(''):
d = defer.Deferred()
self.pending['PUTM'][''] = d
self._sendMessage("PUTM?")
return d
else:
return self.pending['PUTM']['']
def sendDIAGNODE(self):
logger.debug("sendDIAGNODE")
if not self.pending['NODE'].has_key(''):
d = defer.Deferred()
self.pending['NODE'][''] = d
self._sendMessage("DIAG?NODE")
return d
else:
return self.pending['NODE']['']
def sendDIAGBKTS(self):
logger.debug("sendDIAGBKTS")
if not self.pending['BKTS'].has_key(''):
d = defer.Deferred()
self.pending['BKTS'][''] = d
self._sendMessage("DIAG?BKTS")
return d
else:
return self.pending['BKTS']['']
def sendDIAGSTOR(self, command):
logger.debug("sendDIAGSTOR")
if not self.pending['STOR'].has_key(command):
d = defer.Deferred()
self.pending['STOR'][command] = d
self._sendMessage("DIAG?STOR "+command)
return d
else:
return self.pending['STOR'][command]
def sendDIAGRTRV(self, command):
logger.debug("sendDIAGRTRV")
if not self.pending['RTRV'].has_key(command):
d = defer.Deferred()
self.pending['RTRV'][command] = d
self._sendMessage("DIAG?RTRV "+command)
return d
else:
return self.pending['RTRV'][command]
def sendDIAGVRFY(self, command):
logger.debug("sendDIAGVRFY")
if not self.pending['VRFY'].has_key(command):
d = defer.Deferred()
self.pending['VRFY'][command] = d
self._sendMessage("DIAG?VRFY "+command)
return d
else:
return self.pending['VRFY'][command]
def sendDIAGFNDV(self, val):
logger.debug("sendDIAGFNDV")
if not self.pending['FNDV'].has_key(val):
d = defer.Deferred()
self.pending['FNDV'][val] = d
self._sendMessage("FNDV?"+val)
return d
else:
return self.pending['FNDV'][val]
def setDie(self):
self.die = True
# XXX: this should move into FludNode side of things (LocalClientPrimitives).
# anything that calls this should make calls ('LIST', others as necessary) to
# get at master metadata, otherwise we could have multiple writer problems.
# FludNode should make the file ro while running, too.
# And everyone that does anything with the master metadata should do it through
# methods of FludConfig, instead of by direct access to the file.
def listMeta(config):
fmaster = open(os.path.join(config.metadir,config.metamaster), 'r')
master = fmaster.read()
fmaster.close()
if master == "":
master = {}
else:
master = fdecode(master)
return master
| Python |
"""
ConnectionQueue, (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), version 3.
This module manages the connection queue. In order to reduce the
probability of the reactor getting tied up servicing requests/responses
during periods of extreme busy-ness (and thus 'starving' some ops,
causing TimeoutErrors), we throttle the number of outstanding requests
that we send to MAXOPS. The rest, we put in the 'waiting' queue, and
these are popped off when a spot becomes available.
"""
import logging
MAXOPS = 80 # maximum number of concurrent connections to maintain
pending = 0 # number of current connections
waiting = [] # queue of waiting connections to make. This queue contains
# tuples. The first element of the tuple must have a
# startRequest() func that takes the rest of the tuple as
# arguments.
logger = logging.getLogger("flud.client.connq")
def checkWaiting(resp, finishedOne=True):
"""
This function pops items off the waiting queue and invokes their
startRequest() method. It should eventually be called by any process that
also calls queueWaiting() (usually as part of callback/errback chain). The
'resp' object passed in will be returned (so that this function can sit
transparently in the errback/callback chain).
"""
numwaiting = len(waiting)
logger.debug("in checkWaiting, len(waiting) = %d" % numwaiting)
#print "resp = %s..." % fencode(long(resp,16))[:8]
#print "resp = %s..." % str(resp)
global pending
if finishedOne:
pending = pending - 1
logger.debug("decremented pending to %s" % pending)
if numwaiting > 0 and pending < MAXOPS:
saved = waiting.pop(0)
Req = saved[0]
args = saved[1:]
logger.debug("w: %d, p: %d, restoring Request %s(%s)" % (numwaiting,
pending, Req.__class__.__name__, str(args)))
Req.startRequest(*args)
pending += 1
return resp
def enqueue(requestTuple):
"""
Adds a requestTuple to those waiting. The first element of the tuple must
have a startRequest() func that takes the rest of the tuple as arguments.
This startRequest() function will be called with those arguments when it
comes off the queue (via checkWaiting).
"""
waiting.append(requestTuple)
logger.debug("trying to do %s now..." % requestTuple[0].__class__.__name__)
checkWaiting(None, finishedOne=False)
| Python |
"""
ClientPrimitives.py (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
Primitive client storage protocol
"""
from twisted.web import http, client
from twisted.internet import reactor, threads, defer, error
from twisted.python import failure
import time, os, stat, httplib, sys, logging, tarfile, gzip
from StringIO import StringIO
from flud.FludCrypto import FludRSA
from flud.fencode import fencode, fdecode
import ConnectionQueue
from FludCommUtil import *
logger = logging.getLogger("flud.client.op")
loggerid = logging.getLogger("flud.client.op.id")
loggerstor = logging.getLogger("flud.client.op.stor")
loggerstoragg = logging.getLogger("flud.client.op.stor.agg")
loggerrtrv = logging.getLogger("flud.client.op.rtrv")
loggerdele = logging.getLogger("flud.client.op.dele")
loggervrfy = logging.getLogger("flud.client.op.vrfy")
loggerauth = logging.getLogger("flud.client.op.auth")
MINSTORSIZE = 512000 # anything smaller than this tries to get aggregated
TARFILE_TO = 2 # timeout for checking aggregated tar files
MAXAUTHRETRY = 4 # number of times to retry auth
# FUTURE: check flud protocol version for backwards compatibility
# XXX: need to make sure we have appropriate timeouts for all comms.
# FUTURE: DOS attacks. For now, assume that network hardware can filter these
# out (by throttling individual IPs) -- i.e., it isn't our problem. If we
# want to defend against this at some point, we need to keep track of who
# is generating requests and then ignore them.
# XXX: might want to consider some self-healing for the kademlia layer, as
# outlined by this thread:
# http://zgp.org/pipermail/p2p-hackers/2003-August/001348.html (should also
# consider Zooko's links in the parent to this post)
# XXX: disallow requests to self.
class REQUEST(object):
"""
This is a parent class for generating http requests that follow the
FludProtocol.
"""
def __init__(self, host, port, node=None):
"""
All children should inherit. By convention, subclasses should
create a URL and attempt to retrieve it in the constructor.
@param node the requestor's node object
"""
self.host = host
self.port = port
self.dest = "%s:%d" % (host, port)
if node:
self.node = node
self.config = node.config
self.headers = {'Fludprotocol': PROTOCOL_VERSION,
'User-Agent': 'FludClient'}
class SENDGETID(REQUEST):
def __init__(self, node, host, port):
"""
Send a request to retrive the node's ID. This is a reciprocal
request -- must send my own ID in order to get one back.
"""
host = getCanonicalIP(host)
REQUEST.__init__(self, host, port, node)
Ku = self.node.config.Ku.exportPublicKey()
url = "http://"+host+":"+str(port)+"/ID?"
url += 'nodeID='+str(self.node.config.nodeID)
url += '&port='+str(self.node.config.port)
url += "&Ku_e="+str(Ku['e'])
url += "&Ku_n="+str(Ku['n'])
#self.nKu = {}
self.timeoutcount = 0
self.deferred = defer.Deferred()
ConnectionQueue.enqueue((self, node, host, port, url))
def startRequest(self, node, host, port, url):
loggerid.info("sending SENDGETID to %s" % self.dest)
d = self._sendRequest(node, host, port, url)
d.addBoth(ConnectionQueue.checkWaiting)
d.addCallback(self.deferred.callback)
d.addErrback(self.deferred.errback)
d.addErrback(self._errID, node, host, port, url)
def _sendRequest(self, node, host, port, url):
factory = getPageFactory(url, timeout=primitive_to,
headers=self.headers)
d2 = factory.deferred
d2.addCallback(self._getID, factory, host, port)
d2.addErrback(self._errID, node, host, port, url)
return d2
def _getID(self, response, factory, host, port):
loggerid.debug( "received ID response: %s" % response)
if not hasattr(factory, 'status'):
raise failure.DefaultException(
"SENDGETID FAILED: no status in factory")
if eval(factory.status) != http.OK:
raise failure.DefaultException("SENDGETID FAILED to "+self.dest+": "
+"server sent status "+factory.status+", '"+response+"'")
try:
nKu = {}
nKu = eval(response)
nKu = FludRSA.importPublicKey(nKu)
loggerid.info("SENDGETID PASSED to %s" % self.dest)
updateNode(self.node.client, self.config, host, port, nKu)
return nKu
except:
raise failure.DefaultException("SENDGETID FAILED to "+self.dest+": "
+"received response, but it did not contain valid key")
def _errID(self, err, node, host, port, url):
if err.check('twisted.internet.error.TimeoutError') or \
err.check('twisted.internet.error.ConnectionLost'):
#print "GETID request error: %s" % err.__class__.__name__
self.timeoutcount += 1
if self.timeoutcount < MAXTIMEOUTS:
#print "trying again [#%d]...." % self.timeoutcount
return self._sendRequest(node, host, port, url)
else:
#print "not trying again [#%d]" % self.timeoutcount
return err
# XXX: updateNode
#print "_errID: %s" % err
#print "_errID: %s" % str(err.stack)
return err
# XXX: either 1) produce filekey here or 2) send it in as part of API
# (similar fixes for SENDRETRIEVE and VERIFY)? Currently filekey is
# chosen by caller, and is simply the filename.
class SENDSTORE(REQUEST):
def __init__(self, nKu, node, host, port, datafile, metadata=None, fsize=0):
"""
Try to upload a file.
"""
host = getCanonicalIP(host)
REQUEST.__init__(self, host, port, node)
loggerstor.info("sending STORE request to %s" % self.dest)
if not fsize:
fsize = os.stat(datafile)[stat.ST_SIZE]
Ku = self.node.config.Ku.exportPublicKey()
filekey = os.path.basename(datafile) # check this before sending
params = [('nodeID', self.node.config.nodeID),
('Ku_e', str(Ku['e'])),
('Ku_n', str(Ku['n'])),
('port', str(self.node.config.port)),
('size', str(fsize))]
self.timeoutcount = 0
self.deferred = defer.Deferred()
ConnectionQueue.enqueue((self, self.headers, nKu, host, port,
filekey, datafile, metadata, params, True))
#self.deferred = self._sendRequest(self.headers, nKu, host, port,
# datafile, params, True)
def startRequest(self, headers, nKu, host, port, filekey,
datafile, metadata, params, skipFile):
d = self._sendRequest(headers, nKu, host, port, filekey,
datafile, metadata, params, skipFile)
d.addBoth(ConnectionQueue.checkWaiting)
d.addCallback(self.deferred.callback)
d.addErrback(self.deferred.errback)
def _sendRequest(self, headers, nKu, host, port, filekey,
datafile, metadata, params, skipfile=False):
"""
skipfile - set to True if you want to send everything but file data
(used to send the unauthorized request before responding to challenge)
"""
if skipfile:
files = [(None, 'filename')]
elif metadata:
metakey = metadata[0]
params.append(('metakey', metakey))
metafile = metadata[1]
files = [(datafile, 'filename'), (metafile, 'meta')]
else:
files = [(datafile, 'filename')]
deferred = threads.deferToThread(fileUpload, host, port,
'/file/%s' % filekey, files, params, headers=self.headers)
deferred.addCallback(self._getSendStore, nKu, host, port, filekey,
datafile, metadata, params, self.headers)
deferred.addErrback(self._errSendStore,
"Couldn't upload file %s to %s:%d" % (datafile, host, port),
self.headers, nKu, host, port, filekey, datafile, metadata,
params)
return deferred
def _getSendStore(self, httpconn, nKu, host, port, filekey,
datafile, metadata, params, headers):
"""
Check the response for status.
"""
deferred2 = threads.deferToThread(httpconn.getresponse)
deferred2.addCallback(self._getSendStore2, httpconn, nKu, host, port,
filekey, datafile, metadata, params, headers)
deferred2.addErrback(self._errSendStore, "Couldn't get response",
headers, nKu, host, port, filekey, datafile, metadata,
params, httpconn)
return deferred2
def _getSendStore2(self, response, httpconn, nKu, host, port,
filekey, datafile, metadata, params, headers):
httpconn.close()
if response.status == http.UNAUTHORIZED:
loggerstor.info("SENDSTORE unauthorized, sending credentials")
challenge = response.reason
d = answerChallengeDeferred(challenge, self.node.config.Kr,
self.node.config.groupIDu, nKu.id(), headers)
d.addCallback(self._sendRequest, nKu, host, port, filekey,
datafile, metadata, params)
d.addErrback(self._errSendStore, "Couldn't answerChallenge",
headers, nKu, host, port, filekey, datafile, metadata,
params, httpconn)
return d
elif response.status == http.CONFLICT:
result = response.read()
# XXX: client should check key before ever sending request
raise BadCASKeyException("%s %s"
% (response.status, response.reason))
elif response.status != http.OK:
result = response.read()
raise failure.DefaultException(
"received %s in SENDSTORE response: %s"
% (response.status, result))
else:
result = response.read()
updateNode(self.node.client, self.config, host, port, nKu)
loggerstor.info("received SENDSTORE response from %s: %s"
% (self.dest, str(result)))
return result
def _errSendStore(self, err, msg, headers, nKu, host, port,
filekey, datafile, metadata, params, httpconn=None):
if err.check('socket.error'):
#print "SENDSTORE request error: %s" % err.__class__.__name__
self.timeoutcount += 1
if self.timeoutcount < MAXTIMEOUTS:
print "trying again [#%d]...." % self.timeoutcount
return self._sendRequest(headers, nKu, host, port, filekey,
datafile, metadata, params)
else:
print "Maxtimeouts exceeded: %d" % self.timeoutcount
elif err.check(BadCASKeyException):
pass
else:
print "%s: unexpected error in SENDSTORE: %s" % (msg,
str(err.getErrorMessage()))
# XXX: updateNode
if httpconn:
httpconn.close()
#loggerstor.info(msg+": "+err.getErrorMessage())
return err
aggDeferredMap = {} # a map of maps, containing a list of deferreds. The
# deferred(s) for file 'x' in tarball 'y' are accessed as
# aggDeferredMap['y']['x']
aggTimeoutMap = {} # a map of timout calls for a tarball. The timeout for
# tarball 'y' is stored in aggTimeoutMap['y']
class AggregateStore:
# XXX: if multiple guys store the same file, we're going to get into bad
# cb state (the except clause in errbackTarfiles). Need to catch this
# as it happens... (this happens e.g. for small files with the same
# filehash, e.g, 0-byte files, file copies etc). Should fix this in
# FludClient -- non-agg store has a similar problem (encoded file chunks
# get deleted out from under successive STOR ops for the same chunk, i.e.
# from two concurrent STORs of the same file contents)
def __init__(self, nKu, node, host, port, datafile, metadata):
tarfilename = os.path.join(node.config.clientdir,nKu.id())\
+'-'+host+'-'+str(port)+".tar"
loggerstoragg.debug("tarfile name is %s" % tarfilename)
if not os.path.exists(tarfilename) \
or not aggDeferredMap.has_key(tarfilename):
loggerstoragg.debug("creating tarfile %s to append %s"
% (tarfilename, datafile))
tar = tarfile.open(tarfilename, "w")
tarfileTimeout = reactor.callLater(TARFILE_TO, self.sendTar,
tarfilename, nKu, node, host, port)
aggDeferredMap[tarfilename] = {}
aggTimeoutMap[tarfilename] = tarfileTimeout
else:
loggerstoragg.debug("opening tarfile %s to append %s"
% (tarfilename, datafile))
tar = tarfile.open(tarfilename, "a")
if os.path.basename(datafile) not in tar.getnames():
loggerstoragg.info("adding datafile %s to tarball, %s"
% (os.path.basename(datafile), tar.getnames()))
loggerstoragg.debug("adding data to tarball")
tar.add(datafile, os.path.basename(datafile))
else:
loggerstoragg.info("skip adding datafile %s to tarball" % datafile)
if metadata:
metafilename = "%s.%s.meta" % (os.path.basename(datafile),
metadata[0])
loggerstoragg.debug("metadata filename is %s" % metafilename)
try:
if isinstance(metadata[1], StringIO):
loggerstoragg.debug("metadata is StringIO")
tinfo = tarfile.TarInfo(metafilename)
metadata[1].seek(0,2)
tinfo.size = metadata[1].tell()
metadata[1].seek(0,0)
tar.addfile(tinfo, metadata[1])
else:
loggerstoragg.debug("metadata is file")
tar.add(metadata[1], metafilename)
except:
import traceback
loggerstoragg.debug("exception while adding metadata to"
" tarball")
print sys.exc_info()[2]
traceback.print_exc()
tar.close()
loggerstoragg.debug("prepping deferred")
# XXX: (re)set timeout for tarfilename
self.deferred = defer.Deferred()
loggerstoragg.debug("adding deferred on %s for %s"
% (tarfilename, datafile))
try:
aggDeferredMap[tarfilename][os.path.basename(datafile)].append(
self.deferred)
except KeyError:
aggDeferredMap[tarfilename][os.path.basename(datafile)] \
= [self.deferred]
self.resetTimeout(aggTimeoutMap[tarfilename], tarfilename)
def resetTimeout(self, timeoutFunc, tarball):
loggerstoragg.debug("in resetTimeout...")
if timeoutFunc.active():
#timeoutFunc.reset(TARFILE_TO)
if os.stat(tarball)[stat.ST_SIZE] < MINSTORSIZE:
loggerstoragg.debug("...reset")
timeoutFunc.reset(TARFILE_TO)
return
loggerstoragg.debug("...didn't reset")
def sendTar(self, tarball, nKu, node, host, port):
gtarball = tarball+".gz"
loggerstoragg.info(
"aggregation op triggered, sending tarfile %s to %s:%d"
% (gtarball, host, port))
# XXX: bad blocking io
gtar = gzip.GzipFile(gtarball, 'wb')
gtar.write(file(tarball, 'r').read())
gtar.close()
os.remove(tarball)
self.deferred = SENDSTORE(nKu, node, host, port, gtarball).deferred
self.deferred.addCallback(self.callbackTarfiles, tarball)
self.deferred.addErrback(self.errbackTarfiles, tarball)
# XXX: make aggDeferredMap use a non-.tar key, so that we don't have to
# keep passing 'tarball' around (since we removed it and are really only
# interested in gtarball now, use gtarball at the least)
def callbackTarfiles(self, result, tarball):
loggerstoragg.debug("callbackTarfiles")
gtarball = tarball+".gz"
tar = tarfile.open(gtarball, "r:gz")
cbs = []
try:
for tarinfo in tar:
if tarinfo.name[-5:] != '.meta':
dlist = aggDeferredMap[tarball].pop(tarinfo.name)
loggerstoragg.debug("callingback for %s in %s"
" (%d deferreds)"
% (tarinfo.name, tarball, len(dlist)))
for d in dlist:
cbs.append(d)
except KeyError:
loggerstoragg.warn("aggDeferredMap has keys: %s"
% str(aggDeferredMap.keys()))
loggerstoragg.warn("aggDeferredMap[%s] has keys: %s" % (tarball,
str(aggDeferredMap[tarball].keys())))
tar.close()
loggerstoragg.debug("deleting tarball %s" % gtarball)
os.remove(gtarball)
for cb in cbs:
cb.callback(result)
def errbackTarfiles(self, failure, tarball):
loggerstoragg.debug("errbackTarfiles")
gtarball = tarball+".gz"
tar = tarfile.open(gtarball, "r:gz")
cbs = []
try:
for tarinfo in tar:
dlist = aggDeferredMap[tarball].pop(tarinfo.name)
loggerstoragg.debug("erringback for %s in %s"
" (%d deferreds)"
% (tarinfo.name, tarball, len(dlist)))
for d in dlist:
cbs.append(d)
except KeyError:
loggerstoragg.warn("aggDeferredMap has keys: %s"
% str(aggDeferredMap.keys()))
loggerstoragg.warn("aggDeferredMap[%s] has keys: %s" % (tarball,
str(aggDeferredMap[tarball].keys())))
tar.close()
loggerstoragg.debug("NOT deleting tarball %s (for debug)" % gtarball)
#os.remove(gtarball)
for cb in cbs:
cb.errback(failure)
class SENDRETRIEVE(REQUEST):
def __init__(self, nKu, node, host, port, filekey, metakey=True):
"""
Try to download a file.
"""
host = getCanonicalIP(host)
REQUEST.__init__(self, host, port, node)
loggerrtrv.info("sending RETRIEVE request to %s:%s" % (host, str(port)))
Ku = self.node.config.Ku.exportPublicKey()
url = 'http://'+host+':'+str(port)+'/file/'+filekey+'?'
url += 'nodeID='+str(self.node.config.nodeID)
url += '&port='+str(self.node.config.port)
url += "&Ku_e="+str(Ku['e'])
url += "&Ku_n="+str(Ku['n'])
url += "&metakey="+str(metakey)
#filename = self.node.config.clientdir+'/'+filekey
self.timeoutcount = 0
self.deferred = defer.Deferred()
ConnectionQueue.enqueue((self, self.headers, nKu, host, port, url))
def startRequest(self, headers, nKu, host, port, url):
#print "doing RET: %s" % filename
loggerrtrv.info("startRequest to %s:%s" % (host, str(port)))
d = self._sendRequest(headers, nKu, host, port, url)
d.addBoth(ConnectionQueue.checkWaiting)
d.addCallback(self.deferred.callback)
d.addErrback(self.deferred.errback)
def _sendRequest(self, headers, nKu, host, port, url):
loggerrtrv.info("_sendRequest to %s:%s" % (host, str(port)))
factory = multipartDownloadPageFactory(url, self.node.config.clientdir,
headers=headers, timeout=transfer_to)
deferred = factory.deferred
deferred.addCallback(self._getSendRetrieve, nKu, host, port, factory)
deferred.addErrback(self._errSendRetrieve, nKu, host, port, factory,
url, headers)
return deferred
def _getSendRetrieve(self, response, nKu, host, port, factory):
loggerrtrv.info("_getSendRetrieve to %s:%s" % (host, str(port)))
if eval(factory.status) == http.OK:
# response is None, since it went to file (if a server error
# occured, it may be printed in this file)
# XXX: need to check that file hashes to key! If we don't do this,
# malicious nodes can corrupt entire files without detection!
#result = "received SENDRETRIEVE response"
loggerrtrv.info("_getSendRetrieve OK from %s:%s"
% (host, str(port)))
loggerrtrv.info(response)
updateNode(self.node.client, self.config, host, port, nKu)
return response
else:
loggerrtrv.info("_getSendRetrieve fail from %s:%s"
% (host, str(port)))
raise failure.DefaultException("SENDRETRIEVE FAILED: "
+"server sent status "+factory.status+", '"+response+"'")
def _errSendRetrieve(self, err, nKu, host, port, factory, url, headers):
loggerrtrv.info("_errSendRetrieve from %s:%s" % (host, str(port)))
if err.check('twisted.internet.error.TimeoutError') or \
err.check('twisted.internet.error.ConnectionLost'): #or \
#err.check('twisted.internet.error.ConnectBindError'):
loggerrtrv.info("_errSendRetrieve timeout/connlost from %s:%s"
% (host, str(port)))
self.timeoutcount += 1
if self.timeoutcount < MAXTIMEOUTS:
#print "RETR trying again [#%d]..." % self.timeoutcount
return self._sendRequest(headers, nKu, host, port, url)
else:
#print "RETR timeout exceeded: %d" % self.timeoutcount
pass
elif hasattr(factory, 'status') and \
eval(factory.status) == http.UNAUTHORIZED:
loggerrtrv.info("SENDRETRIEVE unauthorized, sending credentials")
challenge = err.getErrorMessage()[4:]
d = answerChallengeDeferred(challenge, self.node.config.Kr,
self.node.config.groupIDu, nKu.id(), headers)
d.addCallback(self._sendRequest, nKu, host, port, url)
#d.addErrback(self._errSendRetrieve, nKu, host, port, factory,
# url, headers)
return d
#extraheaders = answerChallenge(challenge, self.node.config.Kr,
# self.node.config.groupIDu, nKu.id(), self.headers)
#return self._sendRequest(nKu, host, port, url, extraheaders)
# XXX: these remaining else clauses are really just for debugging...
elif hasattr(factory, 'status'):
if eval(factory.status) == http.NOT_FOUND:
err = NotFoundException(err)
elif eval(factory.status) == http.BAD_REQUEST:
err = BadRequestException(err)
elif err.check('twisted.internet.error.ConnectionRefusedError'):
pass # fall through to return err
else:
print "non-timeout, non-UNAUTH RETR request error: %s" % err
# XXX: updateNode
loggerrtrv.info("SENDRETRIEVE failed")
raise err
class SENDDELETE(REQUEST):
def __init__(self, nKu, node, host, port, filekey, metakey):
"""
Try to delete a file.
"""
host = getCanonicalIP(host)
REQUEST.__init__(self, host, port, node)
loggerdele.info("sending DELETE request to %s:%s" % (host, str(port)))
Ku = self.node.config.Ku.exportPublicKey()
url = 'http://'+host+':'+str(port)+'/file/'+filekey+'?'
url += 'nodeID='+str(self.node.config.nodeID)
url += '&port='+str(self.node.config.port)
url += "&Ku_e="+str(Ku['e'])
url += "&Ku_n="+str(Ku['n'])
url += "&metakey="+str(metakey)
self.timeoutcount = 0
self.authRetry = 0
self.deferred = defer.Deferred()
ConnectionQueue.enqueue((self, self.headers, nKu, host, port, url))
def startRequest(self, headers, nKu, host, port, url):
d = self._sendRequest(headers, nKu, host, port, url)
d.addBoth(ConnectionQueue.checkWaiting)
d.addCallback(self.deferred.callback)
d.addErrback(self.deferred.errback)
def _sendRequest(self, headers, nKu, host, port, url):
factory = getPageFactory(url, method="DELETE", headers=headers,
timeout=primitive_to)
deferred = factory.deferred
deferred.addCallback(self._getSendDelete, nKu, host, port, factory)
deferred.addErrback(self._errSendDelete, nKu, host, port, factory, url,
headers)
return deferred
def _getSendDelete(self, response, nKu, host, port, factory):
if eval(factory.status) == http.OK:
loggerdele.info("received SENDDELETE response")
updateNode(self.node.client, self.config, host, port, nKu)
return response
else:
# XXX: updateNode
raise failure.DefaultException("SENDDELETE FAILED: "
+"server sent status "+factory.status+", '"+response+"'")
def _errSendDelete(self, err, nKu, host, port, factory, url, headers):
if err.check('twisted.internet.error.TimeoutError') or \
err.check('twisted.internet.error.ConnectionLost'):
#print "DELETE request error: %s" % err.__class__.__name__
self.timeoutcount += 1
if self.timeoutcount < MAXTIMEOUTS:
#print "trying again [#%d]...." % self.timeoutcount
return self._sendRequest(headers, nKu, host, port, url)
elif hasattr(factory, 'status') and \
eval(factory.status) == http.UNAUTHORIZED and \
self.authRetry < MAXAUTHRETRY:
# XXX: add this authRetry stuff to all the other op classes (so
# that we don't DOS ourselves and another node
self.authRetry += 1
loggerdele.info("SENDDELETE unauthorized, sending credentials")
challenge = err.getErrorMessage()[4:]
d = answerChallengeDeferred(challenge, self.node.config.Kr,
self.node.config.groupIDu, nKu.id(), headers)
d.addCallback(self._sendRequest, nKu, host, port, url)
d.addErrback(self._errSendDelete, nKu, host, port, factory,
url, headers)
return d
elif hasattr(factory, 'status'):
# XXX: updateNode
loggerdele.info("SENDDELETE failed")
if eval(factory.status) == http.NOT_FOUND:
err = NotFoundException(err)
elif eval(factory.status) == http.BAD_REQUEST:
err = BadRequestException(err)
raise err
return err
class SENDVERIFY(REQUEST):
def __init__(self, nKu, node, host, port, filename, offset, length,
meta=None):
"""
Try to verify a file.
If meta is present, it should be a (metakey, filelikeobj) pair.
"""
host = getCanonicalIP(host)
REQUEST.__init__(self, host, port, node)
filekey = os.path.basename(filename) # XXX: filekey should be hash
loggervrfy.info("sending VERIFY request to %s:%s" % (host, str(port)))
Ku = self.node.config.Ku.exportPublicKey()
url = 'http://'+host+':'+str(port)+'/hash/'+filekey+'?'
url += 'nodeID='+str(self.node.config.nodeID)
url += '&port='+str(self.node.config.port)
url += "&Ku_e="+str(Ku['e'])
url += "&Ku_n="+str(Ku['n'])
url += "&offset="+str(offset)
url += "&length="+str(length)
if meta:
url += "&metakey="+str(meta[0])
url += "&meta="+fencode(meta[1].read())
self.timeoutcount = 0
if not isinstance(nKu, FludRSA):
raise ValueError("must pass in a FludRSA as nKu to SENDVERIFY")
self.deferred = defer.Deferred()
ConnectionQueue.enqueue((self, self.headers, nKu, host, port, url))
def startRequest(self, headers, nKu, host, port, url):
#loggervrfy.debug("*Doing* VERIFY Request %s" % port)
d = self._sendRequest(headers, nKu, host, port, url)
d.addBoth(ConnectionQueue.checkWaiting)
d.addCallback(self.deferred.callback)
d.addErrback(self.deferred.errback)
def _sendRequest(self, headers, nKu, host, port, url):
loggervrfy.debug("in VERIFY sendReq %s" % port)
factory = getPageFactory(url, headers=headers, timeout=primitive_to)
deferred = factory.deferred
deferred.addCallback(self._getSendVerify, nKu, host, port, factory)
deferred.addErrback(self._errSendVerify, nKu, host, port, factory, url,
headers)
return deferred
def _getSendVerify(self, response, nKu, host, port, factory):
loggervrfy.debug("got vrfy response")
if eval(factory.status) == http.OK:
loggervrfy.info("received SENDVERIFY response")
updateNode(self.node.client, self.config, host, port, nKu)
return response
else:
# XXX: updateNode
loggervrfy.debug("received non-OK SENDVERIFY response")
raise failure.DefaultException("SENDVERIFY FAILED: "
+"server sent status "+factory.status+", '"+response+"'")
def _errSendVerify(self, err, nKu, host, port, factory, url, headers):
loggervrfy.debug("got vrfy err")
if err.check('twisted.internet.error.TimeoutError') or \
err.check('twisted.internet.error.ConnectionLost'):
#print "VERIFY request error: %s" % err.__class__.__name__
self.timeoutcount += 1
if self.timeoutcount < MAXTIMEOUTS:
#print "trying again [#%d]...." % self.timeoutcount
return self._sendRequest(headers, nKu, host, port, url)
elif hasattr(factory, 'status') and \
eval(factory.status) == http.UNAUTHORIZED:
loggervrfy.info("SENDVERIFY unauthorized, sending credentials")
challenge = err.getErrorMessage()[4:]
d = answerChallengeDeferred(challenge, self.node.config.Kr,
self.node.config.groupIDu, nKu.id(), headers)
d.addCallback(self._sendRequest, nKu, host, port, url)
d.addErrback(self._errVerify, nKu, host, port, factory,
url, headers, challenge)
#d.addErrback(self._errSendVerify, nKu, host, port, factory,
# url, headers)
return d
elif hasattr(factory, 'status'):
# XXX: updateNode
loggervrfy.info("SENDVERIFY failed: %s" % err.getErrorMessage())
if eval(factory.status) == http.NOT_FOUND:
err = NotFoundException(err)
elif eval(factory.status) == http.BAD_REQUEST:
err = BadRequestException(err)
raise err
def _errVerify(self, err, nKu, host, port, factory, url, headers,
challenge):
# we can get in here after storing the same file as another node when
# that data is stored in tarballs under its ID. It was expected that
# this would be caught up in _getSendVerify... figure out why it isn't.
loggervrfy.debug("factory status=%s" % factory.status)
loggervrfy.debug("couldn't answer challenge from %s:%d, WHOOPS: %s"
% (host, port, err.getErrorMessage()))
loggervrfy.debug("challenge was: '%s'" % challenge)
return err
def answerChallengeDeferred(challenge, Kr, groupIDu, sID, headers):
return threads.deferToThread(answerChallenge, challenge, Kr, groupIDu, sID,
headers)
def answerChallenge(challenge, Kr, groupIDu, sID, headers={}):
loggerauth.debug("got challenge: '%s'" % challenge)
sID = binascii.unhexlify(sID)
challenge = (fdecode(challenge),)
response = fencode(Kr.decrypt(challenge))
# XXX: RSA.decrypt won't restore leading 0's. This causes
# some challenges to fail when they shouldn't -- solved for now
# on the server side by generating non-0 leading challenges.
loggerauth.debug("decrypted challenge to %s" % response)
responseID = fdecode(response)[:len(sID)]
loggerauth.debug(" response id: %s" % fencode(responseID))
if responseID != sID:
# fail the op.
# If we don't do this, we may be allowing the server to build a
# dictionary useful for attack. The attack is as follows: node A
# (server) collects a bunch of un-IDed challenge/response pairs by
# issuing challenges to node B (client). Then node A uses those
# responses to pose as B to some other server C. This sounds
# farfetched, in that such a database would need to be huge, but in
# reality, such an attack can happen in real-time, with node A
# simultaneously serving requests from B, relaying challenges from C to
# B, and then responding with B's responses to C to gain resources
# there as an imposter. The ID string prevents this attack.
# XXX: trust-- (must go by ip:port, since ID could be innocent)
raise ImposterException("node %s is issuing invalid challenges --"
" claims to have id=%s" % (fencode(sID), fencode(responseID)))
response = fdecode(response)[len(sID):]
loggerauth.debug(" challenge response: '%s'" % fencode(response))
response = fencode(response)+":"+groupIDu
loggerauth.debug("response:groupIDu=%s" % response)
response = binascii.b2a_base64(response)
loggerauth.debug("b64(response:groupIDu)=%s" % response)
response = "Basic %s" % response
headers['Authorization'] = response
return headers
| Python |
"""
FludClient.py (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), version 3.
flud client ops.
"""
from twisted.web import client
from twisted.internet import error
import os, stat, httplib, sys, logging
from ClientPrimitives import *
from ClientDHTPrimitives import *
import FludCommUtil
logger = logging.getLogger('flud.client')
class FludClient(object):
"""
This class contains methods which create request objects
"""
def __init__(self, node):
self.node = node
self.currentStorOps = {}
"""
Data storage primitives
"""
def redoTO(self, f, node, host, port):
print "in redoTO: %s" % f
#print "in redoTO: %s" % dir(f.getTraceback())
if f.getTraceback().find("error.TimeoutError"):
print "retrying........"
return self.sendGetID(host, port)
else:
return f
def sendGetID(self, host, port):
#return SENDGETID(self.node, host, port).deferred
d = SENDGETID(self.node, host, port).deferred
#d.addErrback(self.redoTO, self.node, host, port)
return d
# XXX: we should cache nKu so that we don't do the GETID for all of these
# ops every single time
def sendStore(self, filename, metadata, host, port, nKu=None):
# XXX: need to keep a map of 'filename' to deferreds, in case we are
# asked to store the same chunk more than once concurrently (happens
# for 0-byte files or from identical copies of the same file, for
# example). both SENDSTORE and AggregateStore will choke on this.
# if we find a store req in said map, just return that deferred instead
# of redoing the op. [note, could mess up node choice... should also do
# this on whole-file level in FileOps]
# XXX: need to remove from currentStorOps on success or failure
key = "%s:%d:%s" % (host, port, filename)
if self.currentStorOps.has_key(key):
logger.debug("returning saved deferred for %s in sendStore"
% filename)
return self.currentStorOps[key]
def sendStoreWithnKu(nKu, host, port, filename, metadata):
return SENDSTORE(nKu, self.node, host, port, filename,
metadata).deferred
def removeKey(r, key):
self.currentStorOps.pop(key)
return r
if not nKu:
# XXX: doesn't do AggregateStore if file is small. Can fix by
# moving this AggStore v. SENDSTORE choice into SENDSTORE
# proper
logger.warn("not doing AggregateStore on small file because"
" of missing nKu")
print "not doing AggregateStore on small file because" \
" of missing nKu"
d = self.sendGetID(host, port)
d.addCallback(sendStoreWithnKu, host, port, filename, metadata)
self.currentStorOps[key] = d
return d
fsize = os.stat(filename)[stat.ST_SIZE];
if fsize < MINSTORSIZE:
logger.debug("doing AggStore")
if metadata:
logger.debug("with metadata")
d = AggregateStore(nKu, self.node, host, port, filename,
metadata).deferred
else:
logger.debug("SENDSTORE")
d = SENDSTORE(nKu, self.node, host, port, filename,
metadata).deferred
self.currentStorOps[key] = d
d.addBoth(removeKey, key)
return d
# XXX: need a version that takes a metakey, too
def sendRetrieve(self, filekey, host, port, nKu=None, metakey=True):
def sendRetrieveWithNKu(nKu, host, port, filekey, metakey=True):
return SENDRETRIEVE(nKu, self.node, host, port, filekey,
metakey).deferred
if not nKu:
d = self.sendGetID(host, port)
d.addCallback(sendRetrieveWithNKu, host, port, filekey, metakey)
return d
else:
return SENDRETRIEVE(nKu, self.node, host, port, filekey,
metakey).deferred
def sendVerify(self, filekey, offset, length, host, port, nKu=None,
meta=None):
def sendVerifyWithNKu(nKu, host, port, filekey, offset, length,
meta=True):
return SENDVERIFY(nKu, self.node, host, port, filekey, offset,
length, meta).deferred
if not nKu:
d = self.sendGetID(host, port)
d.addCallback(sendVerifyWithNKu, host, port, filekey, offset,
length, meta)
return d
else:
s = SENDVERIFY(nKu, self.node, host, port, filekey, offset, length,
meta)
return s.deferred
def sendDelete(self, filekey, metakey, host, port, nKu=None):
def sendDeleteWithNKu(nKu, host, port, filekey, metakey):
return SENDDELETE(nKu, self.node, host, port, filekey,
metakey).deferred
if not nKu:
d = self.sendGetID(host, port)
d.addCallback(sendDeleteWithNKu, host, port, filekey, metakey)
return d
else:
return SENDDELETE(nKu, self.node, host, port, filekey,
metakey).deferred
"""
DHT single primitives (single call to single peer). These should probably
only be called for testing or bootstrapping (sendkFindNode can be used to
'connect' to the flud network via a gateway, for instance). Use the
recursive primitives for doing DHT ops.
"""
def sendkFindNode(self, host, port, key):
return SENDkFINDNODE(self.node, host, port, key).deferred
def sendkStore(self, host, port, key, val):
return SENDkSTORE(self.node, host, port, key, val).deferred
def sendkFindValue(self, host, port, key):
return SENDkFINDVALUE(self.node, host, port, key).deferred
"""
DHT recursive primitives (recursive calls to muliple peers)
"""
def kFindNode(self, key):
return kFindNode(self.node, key).deferred
def kStore(self, key, val):
return kStore(self.node, key, val).deferred
def kFindValue(self, key):
return kFindValue(self.node, key).deferred
| Python |
"""
ServerPrimitives.py (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
Primitive server storage protocol
"""
import binascii, time, os, stat, httplib, gc, re, sys, logging, sets
import tempfile, tarfile
from StringIO import StringIO
from twisted.web.resource import Resource
from twisted.web import server, resource, client
from twisted.internet import reactor, threads, defer
from twisted.web import http
from twisted.python import failure
from flud.FludCrypto import FludRSA, hashstring, hashfile, generateRandom
import flud.TarfileUtils as TarfileUtils
from flud.fencode import fencode, fdecode
import BlockFile
from FludCommUtil import *
logger = logging.getLogger("flud.server.op")
loggerid = logging.getLogger("flud.server.op.id")
loggerstor = logging.getLogger("flud.server.op.stor")
loggerretr = logging.getLogger("flud.server.op.rtrv")
loggervrfy = logging.getLogger("flud.server.op.vrfy")
loggerdele = logging.getLogger("flud.server.op.dele")
loggerauth = logging.getLogger("flud.server.op.auth")
"""
These classes represent http requests received by this node, and the actions
taken to respond.
"""
# FUTURE: check flud protocol version for backwards compatibility
# XXX: need to refactor challengeResponse stuff so that all share this same
# code (put it in REQUEST obj). See if we can do the same for some of
# the other bits.
# XXX: need to make sure we have appropriate timeouts for all comms.
# FUTURE: DOS attacks. For now, assume that network hardware can filter these
# out (by throttling individual IPs) -- i.e., it isn't our problem. If we
# want to defend against this at some point, we need to keep track of who
# is generating requests and then ignore them.
# XXX: find everywhere we are sending longs and consider sending hex (or our
# own base-64) encoded instead
# XXX: might want to consider some self-healing for the kademlia layer, as
# outlined by this thread:
# http://zgp.org/pipermail/p2p-hackers/2003-August/001348.html (should
# also consider Zooko's links in the parent to this post)
# XXX: can man-in-the-middle get us with http basic auth, i.e., a node (Bob)
# receives a store request from node Alice. Bob 'forwards' this request
# to another node, Charlie, posing as Alice. When the challenge comes
# back from Charlie, Bob relays it back to Alice, gets Alice's response,
# and uses that response to answer Charlie's challenge. Bob then pretends
# to store the data, while in reality the data is at Charlie (VERIFYs can
# be performed similarly).
# [This scenario appears legitimate, but it isn't problematic. Bob can
# only store at Charlie if Charlie trusts Bob. And if Bob trust Charlie
# so much that he is willing to stake his trust with Alice on Charlie's
# reliability, there shouldn't be anything wrong with allowing this. If
# Charlie goes down, Bob will go down (partly) with him, as far as trust
# is concerned].
# [should disable/remove CHALLENGE and GROUPID so that this only works
# for forwarding. Leaving CHALLENGE/GROUPID allows imposters. Limiting
# only to forwarding imposters.]
# XXX: Auth problem still exists. Http-auth allows servers to know that clients
# legitimate, but clients don't know the legitimacy of servers. Proposal:
# all client requests are currently responded to with a challenge that is
# copied in the header and body. Instead, require that the server sign
# the url it was handed by the client, and return this in the body
# (challenge remains in the header). Now, the client can verify that the
# server is who it says it is. This is still somewhat vulnerable to replay
# (if the same url is repeated, which it can be), but this only matters if
# the client sends the exact same message again later to an imposter
# (imposters can only impersonate the server). Alternatively, could send
# a challenge as a param of the request. [I think this is fixed now?]
# XXX: disallow requests originating from self.
challengelength = 40 # XXX: is 40 bytes sufficient?
class ROOT(Resource):
"""
Notes on parameters common to most requests:
Ku_e - the public key RSA exponent (usually 65537L, but can be other values)
Ku_n - the public key RSA modulus.
port - the port that the reqestor runs their fludserver on.
All REQs will contain a public key ('Ku_e', 'Ku_n') from which nodeID can
be derived, and a requestID ('reqID'). For the ops that don't require
authentication (currently ID and dht ops), it may be convenient to also
require nodeID (in this case, it is a good idea to check nodeID against
Ku).
Client Authentication works as follows: Each client request is responded to
with a 401 response in the header, with a challenge in the header message
and repeated in the body. The client reissues the response, filling the
user 'username' field with the challenge-response and the 'password' field
with groupIDu. The client must compute the response to the challenge each
time by examining this value. The groupIDu never changes, so the client
can simply send this value back with the response. This implies some
server-side state; since the server responds with a 401 and expects the
client to send a new request with an Authorization header that contains the
challenge response, the server has to keep some state for each client.
This state expires after a short time, and implies that client must make
requests to individual servers serially.
A node's groupIDu is the same globally for each peer, so exposing it to one
adversarial node means exposing it to all. At first glance, this seems
bad, but groupIDu is useless by itself -- in order to use it, a node must
also be able to answer challenges based on Kr. In other words, the
security of groupIDu in this setting depends on Kr.
"""
# XXX: web2 will let us do cool streaming things someday
def __init__(self, fludserver):
"""
All children should inherit. Make sure to call super.__init__ if you
override __init__
"""
Resource.__init__(self)
self.fludserver = fludserver
self.node = fludserver.node
self.config = fludserver.node.config
def getChild(self, name, request):
"""
should override.
"""
if name == "":
return self
return Resource.getChild(self, name, request)
def render_GET(self, request):
self.setHeaders(request)
return "<html>Flud</hrml>"
def setHeaders(self, request):
request.setHeader('Server','FludServer 0.1')
request.setHeader('FludProtocol', PROTOCOL_VERSION)
class ID(ROOT):
""" self identification / kad ping """
def getChild(self, name, request):
return self
def render_GET(self, request):
"""
Just received a request to expose my identity. Send public key (from
which requestor can determine nodeID).
Response codes: 200- OK (default)
204- No Content (returned in case of error or not
wanting to divulge ID)
400- Bad Request (missing params or bad requesting ID)
"""
self.setHeaders(request)
try:
required = ('nodeID', 'Ku_e', 'Ku_n', 'port')
params = requireParams(request, required)
except Exception, inst:
msg = "%s in request received by ID" % inst.args[0]
loggerid.info(msg)
request.setResponseCode(http.BAD_REQUEST, "Bad Request")
return msg
else:
loggerid.info("received ID request from %s..."
% params['nodeID'][:10])
loggerid.info("returning ID response")
#try:
reqKu = {}
reqKu['e'] = long(params['Ku_e'])
reqKu['n'] = long(params['Ku_n'])
reqKu = FludRSA.importPublicKey(reqKu)
if reqKu.id() != params['nodeID']:
request.setResponseCode(http.BAD_REQUEST, "Bad Identity")
return "requesting node's ID and public key do not match"
host = getCanonicalIP(request.getClientIP())
updateNode(self.node.client, self.config, host,
int(params['port']), reqKu, params['nodeID'])
return str(self.config.Ku.exportPublicKey())
#except:
# msg = "can't return ID"
# loggerid.log(logging.WARN, msg)
# request.setResponseCode(http.NO_CONTENT, msg)
# return msg
class FILE(ROOT):
""" data storage file operations: POST, GET, DELETE """
def getChild(self, name, request):
if len(request.prepath) != 2:
return Resource.getChild(self, name, request)
return self
def render_POST(self, request):
"""
A request to store data via http upload. The file to delete is
indicated by the URL path, e.g.
POST http://server:port/a35cd1339766ef209657a7b
Response codes: 200- OK (default)
400- Bad Request (missing params)
401- Unauthorized (ID hash, CHALLENGE, or
GROUPCHALLENGE failed)
Each file fragment is stored with its storage key as the file name.
The file fragment can be 'touched' (or use last access time if
supported) each time it is verified or read, so that we have a way to
record age (which also allows a purge strategy). Files are
reference-listed (reference count with owners) by the BlockFile object.
"""
loggerstor.debug("file POST, %s", request.prepath)
filekey = request.prepath[1]
self.setHeaders(request)
return StoreFile(self.node, self.config, request, filekey).deferred
def render_GET(self, request):
"""
A request to retrieve data. The file to retrieve is indicated by the
URL path, e.g. GET http://server:port/a35cd1339766ef209657a7b
Response codes: 200- OK (default)
400- Bad Request (missing params)
401- Unauthorized (ID hash, CHALLENGE, or
GROUPCHALLENGE failed)
"""
loggerstor.debug("file GET, %s", request.prepath)
filekey = request.prepath[1]
self.setHeaders(request)
return RetrieveFile(self.node, self.config, request, filekey).deferred
def render_DELETE(self, request):
"""
A request to delete data. The file to delete is indicated by the URL
path, e.g. DELETE http://server:port/a35cd1339766ef209657a7b
Response codes: 200- OK (default)
400- Bad Request (missing params)
401- Unauthorized (ID hash, CHALLENGE, or
GROUPCHALLENGE failed, or nodeID doesn't own this
block)
"""
loggerstor.debug("file DELETE, %s", request.prepath)
filekey = request.prepath[1]
self.setHeaders(request)
return DeleteFile(self.node, self.config, request, filekey).deferred
class HASH(ROOT):
""" verification of data via challenge-response """
def getChild(self, name, request):
return self
def render_GET(self, request):
"""
Just received a storage VERIFY request.
Response codes: 200- OK (default)
400- Bad Request (missing params)
401- Unauthorized (ID hash, CHALLENGE, or
GROUPCHALLENGE failed)
[VERIFY is the most important of the trust-building ops. Issues:
1) how often do we verify.
a) each file
A) each block of each file
2) proxying hurts trust (see PROXY below)
The answer to #1 is, ideally, we check every node who is storing for us
every quanta. But doing the accounting for keeping track of who is
storing for us is heavy, so instead we just want to check all of our
files and hope that gives sufficient coverage, on average, of the nodes
we store to. But we have to put some limits on this, and the limits
can't be imposed by the sender (since a greedy sender can just modify
this), so peer nodes have to do some throttling of these types of
requests. But such throttling is tricky, as the requestor must
decrease trust when VERIFY ops fail. Also, since we will just randomly
select from our files, such a throttling scheme will reduce our
accuracy as we store more and more data (we can only verify a smaller
percentage). Peers could enforce limits as a ratio of total data
stored for a node, but then the peers could act maliciously by
artificially lowering this number. In summary, if we don't enforce
limits, misbehaving nodes could flood VERIFY requests resulting in
effecitve DOS attack. If we do enforce limits, we have to watch out
for trust wars where both nodes end up destroying all trust between
them. Possible answer: set an agreed-upon threshold a priori. This
could be a hardcoded limit, or (better) negotiated between node pairs.
If the requestor goes over this limit, he should understand that his
trust will be decreased by requestee. If he doesn't understand this,
his trust *should* be decreased, and if he decreases his own trust in
us as well, we don't care -- he's misbehaving.]
"""
loggervrfy.debug("file VERIFY, %s", request.prepath)
if len(request.prepath) != 2:
# XXX: add support for sending multiple verify ops in a single
# request
request.setResponseCode(http.BAD_REQUEST, "expected filekey")
return "expected file/[filekey], got %s" % '/'.join(request.prepath)
filekey = request.prepath[1]
self.setHeaders(request)
return VerifyFile(self.node, self.config, request, filekey).deferred
class StoreFile(object):
def __init__(self, node, config, request, filekey):
self.node = node
self.config = config
self.deferred = self.storeFile(request, filekey)
def storeFile(self, request, filekey):
try:
required = ('size', 'Ku_e', 'Ku_n', 'port')
params = requireParams(request, required)
except Exception, inst:
msg = inst.args[0] + " in request received by STORE"
loggerstor.info(msg)
request.setResponseCode(http.BAD_REQUEST, "Bad Request")
return msg
else:
host = getCanonicalIP(request.getClientIP())
port = int(params['port'])
loggerstor.info("received STORE request from %s:%s", host, port)
requestedSize = int(params['size'])
reqKu = {}
reqKu['e'] = long(params['Ku_e'])
reqKu['n'] = long(params['Ku_n'])
reqKu = FludRSA.importPublicKey(reqKu)
nodeID = reqKu.id()
#if requestedSize > 10:
# msg = "unwilling to enter into storage relationship"
# request.setResponseCode(http.PAYMENT_REQUIRED, msg)
# return msg
return authenticate(request, reqKu, host, port,
self.node.client, self.config,
self._storeFile, request, filekey, reqKu, nodeID)
def _storeFile(self, request, filekey, reqKu, nodeID):
# [XXX: memory management is not happy here. might want to look at
# request.registerProducer(). Otherwise, might have to scrap
# using the STORE(ROOT(RESOURCE)) deal in favor of
# producer/consumer model for STORE ops
# (http://itamarst.org/writings/OSCON03/twisted_internet-108.html).
# Another option might include subclassing web.resource.Resource
# and making this derive from that... Or might be web.Site that
# needs to be subclassed... Or maybe web.site.Request -
# web.site.Request.process()? Request seems doubly-bad: perhaps a
# copy is made somewhere, because memory mushrooms to 2x big
# upload, then goes back down to around 1x.
# [update: This should be fixable in twisted.web2, but I am informed
# that in the current version, there is no workaround]
# get the data to a tmp file
loggerstor.debug("writing store data to tmpfile")
tmpfile = tempfile.mktemp(dir=self.config.storedir)
tarball = os.path.join(self.config.storedir,reqKu.id()+".tar")
# rename and/or prepend the data appropriately
tmpTarMode = None
if filekey[-4:] == ".tar":
tmpfile = tmpfile+".tar"
tmpTarMode = 'r'
targetTar = tarball
elif filekey[-7:] == ".tar.gz":
tmpfile = tmpfile+".tar.gz"
tmpTarMode = 'r:gz'
targetTar = tarball+".gz"
loggerstor.debug("tmpfile is %s" % tmpfile)
# XXX: if the server supports both .tar and tar.gz, this is wrong; we'd
# need to check *both* for already existing dudes instead of just
# choosing one
if os.path.exists(tarball+'.gz'):
tarball = (tarball+'.gz', 'r:gz')
elif os.path.exists(tarball):
tarball = (tarball, 'r')
else:
tarball = None
loggerstor.debug("tarball is %s" % str(tarball))
data = request.args.get('filename')[0] # XXX: file in mem! need web2.
# XXX: bad blocking stuff here
f = open(tmpfile, 'wb')
f.write(data)
f.close()
ftype = os.popen('file %s' % tmpfile)
loggerstor.debug("ftype of %s is %s" % (tmpfile, ftype.read()))
ftype.close()
if tmpTarMode:
# client sent a tarball
loggerstor.debug("about to chksum %s" % tmpfile)
digests = TarfileUtils.verifyHashes(tmpfile, '.meta')
loggerstor.debug("chksum returned %s" % digests)
ftype = os.popen('file %s' % tmpfile)
loggerstor.debug("ftype of %s is %s" % (tmpfile, ftype.read()))
ftype.close()
if not digests:
msg = "Attempted to use non-CAS storage key(s) for" \
" STORE tarball"
loggerstor.debug(msg)
os.remove(tmpfile)
request.setResponseCode(http.CONFLICT, msg)
return msg
# XXX: add digests to a db of already stored files (for quick
# lookup)
if tarball:
tarname, tarnameMode = tarball
loggerstor.debug("concatenating tarfiles %s and %s"
% (tarname, tmpfile))
f1 = tarfile.open(tarname, tarnameMode)
f2 = tarfile.open(tmpfile, tmpTarMode)
f1names = f1.getnames()
f2names = f2.getnames()
f1.close()
f2.close()
dupes = [f for f in f1names if f in f2names]
TarfileUtils.delete(tmpfile, dupes)
ftype = os.popen('file %s' % tarname)
loggerstor.debug("ftype of %s is %s" % (tarname, ftype.read()))
ftype.close()
TarfileUtils.concatenate(tarname, tmpfile)
ftype = os.popen('file %s' % tarname)
loggerstor.debug("ftype of %s is %s" % (tarname, ftype.read()))
ftype.close()
else:
loggerstor.debug("saving %s as tarfile %s" % (tmpfile,
targetTar))
os.rename(tmpfile, targetTar)
else:
# client sent regular file
h = hashfile(tmpfile)
if request.args.has_key('meta') and request.args.has_key('metakey'):
metakey = request.args.get('metakey')[0]
meta = request.args.get('meta')[0] # XXX: file in mem!
else:
metakey = None
meta = None
if fencode(long(h, 16)) != filekey:
msg = "Attempted to use non-CAS storage key for STORE data "
msg += "(%s != %s)" % (filekey, fencode(long(h, 16)))
os.remove(tmpfile)
request.setResponseCode(http.CONFLICT, msg)
return msg
fname = os.path.join(self.config.storedir, filekey)
if os.path.exists(fname):
loggerstor.debug("adding metadata to %s" % fname)
f = BlockFile.open(fname,'rb+')
if not f.hasNode(nodeID):
f.addNode(int(nodeID,16), {metakey: meta})
f.close()
os.remove(tmpfile)
else:
if os.path.exists(nodeID+".tar"):
# XXX: need to do something with metadata!
print "XXX: need to do something with metadata for tar!"
tarball = tarfile.open(tarname, 'r')
if fname in tarball.getnames():
loggerstor.debug("%s already stored in tarball" % fname)
# if the file is already in the corresponding tarball,
# update its timestamp and return success.
loggerstor.debug("%s already stored" % filekey)
# XXX: update timestamp for filekey in tarball
return "Successful STORE"
else:
loggerstor.debug("tarball for %s, but %s not in tarball"
% (nodeID,fname))
if len(data) < 8192 and fname != tarname: #XXX: magic # (blk sz)
# If the file is small, move it into the appropriate
# tarball. Note that this code is unlikely to ever be
# executed if the client is an official flud client, as
# they do the tarball aggregation thing already, and all
# tarballs will be > 8192. This is, then, really just
# defensive coding -- clients aren't required to implement
# that tarball aggregation strategy. And it is really only
# useful for filesystems with inefficient small file
# storage.
loggerstor.debug("moving small file '%s' into tarball"
% fname)
if not os.path.exists(tarname):
tarball = tarfile.open(tarname, 'w')
else:
tarball = tarfile.open(tarname, 'a')
# XXX: more bad blocking stuff
tarball.add(tmpfile, os.path.basename(fname))
if meta:
metafilename = "%s.%s.meta" % (os.path.basename(fname),
metakey)
loggerstor.debug("adding metadata file to tarball %s"
% metafilename)
metaio = StringIO(meta)
tinfo = tarfile.TarInfo(metafilename)
tinfo.size = len(meta)
tarball.addfile(tinfo, metaio)
tarball.close()
os.remove(tmpfile)
else:
# store the file
loggerstor.debug("storing %s" % fname)
os.rename(tmpfile, fname)
BlockFile.convert(fname, (int(nodeID,16), {metakey: meta}))
loggerstor.debug("successful STORE for %s" % filekey)
return "Successful STORE"
def _storeErr(self, error, request, msg):
out = msg+": "+error.getErrorMessage()
print "%s" % str(error)
loggerstor.info(out)
# update trust, routing
request.setResponseCode(http.UNAUTHORIZED,
"Unauthorized: %s" % msg) # XXX: wrong code
return msg
def render_GET(self, request):
request.setResponseCode(http.BAD_REQUEST, "Bad Request")
return "STORE request must be sent using POST"
class RetrieveFile(object):
def __init__(self, node, config, request, filekey):
self.node = node
self.config = config
self.deferred = self.retrieveFile(request, filekey)
def retrieveFile(self, request, filekey):
try:
required = ('Ku_e', 'Ku_n', 'port')
params = requireParams(request, required)
except Exception, inst:
msg = inst.args[0] + " in request received by RETRIEVE"
loggerretr.log(logging.INFO, msg)
request.setResponseCode(http.BAD_REQUEST, "Bad Request")
return msg
else:
host = getCanonicalIP(request.getClientIP())
port = int(params['port'])
loggerretr.info("received RETRIEVE request for %s from %s:%s...",
request.path, host, port)
reqKu = {}
reqKu['e'] = long(params['Ku_e'])
reqKu['n'] = long(params['Ku_n'])
reqKu = FludRSA.importPublicKey(reqKu)
if request.args.has_key('metakey'):
returnMeta = request.args['metakey']
if returnMeta == 'True':
returnMeta = True
else:
returnMeta = True
return authenticate(request, reqKu, host, int(params['port']),
self.node.client, self.config,
self._sendFile, request, filekey, reqKu, returnMeta)
def _sendFile(self, request, filekey, reqKu, returnMeta):
fname = os.path.join(self.config.storedir,filekey)
loggerretr.debug("reading file data from %s" % fname)
# XXX: make sure requestor owns the file?
if returnMeta:
loggerretr.debug("returnMeta = %s" % returnMeta)
request.setHeader('Content-type', 'Multipart/Related')
rand_bound = binascii.hexlify(generateRandom(13))
request.setHeader('boundary', rand_bound)
if not os.path.exists(fname):
# check for tarball for originator
tarball = os.path.join(self.config.storedir,reqKu.id()+".tar")
tarballs = []
if os.path.exists(tarball+'.gz'):
tarballs.append((tarball+'.gz', 'r:gz'))
if os.path.exists(tarball):
tarballs.append((tarball, 'r'))
loggerretr.debug("tarballs = %s" % tarballs)
# XXX: does this work? does it close both tarballs if both got
# opened?
for tarball, openmode in tarballs:
tar = tarfile.open(tarball, openmode)
try:
tinfo = tar.getmember(filekey)
returnedMeta = False
if returnMeta:
loggerretr.debug("tar returnMeta %s" % filekey)
try:
metas = [f for f in tar.getnames()
if f[:len(filekey)] == filekey
and f[-4:] == 'meta']
loggerretr.debug("tar returnMetas=%s" % metas)
for m in metas:
minfo = tar.getmember(m)
H = []
H.append("--%s" % rand_bound)
H.append("Content-Type: "
"Application/octet-stream")
H.append("Content-ID: %s" % m)
H.append("Content-Length: %d" % minfo.size)
H.append("")
H = '\r\n'.join(H)
request.write(H)
request.write('\r\n')
tarm = tar.extractfile(minfo)
loggerretr.debug("successful metadata"
" RETRIEVE (from %s)" % tarball)
# XXX: bad blocking stuff
while 1:
buf = tarm.read()
if buf == "":
break
request.write(buf)
request.write('\r\n')
tarm.close()
H = []
H.append("--%s" % rand_bound)
H.append("Content-Type: Application/octet-stream")
H.append("Content-ID: %s" % filekey)
H.append("Content-Length: %d" % tinfo.size)
H.append("")
H = '\r\n'.join(H)
request.write(H)
request.write('\r\n')
returnedMeta = True
except:
# couldn't find any metadata, just return normal
# file
loggerretr.debug("no metadata found")
pass
# XXX: bad blocking stuff
tarf = tar.extractfile(tinfo)
# XXX: update timestamp on tarf in tarball
loggerretr.debug("successful RETRIEVE (from %s)"
% tarball)
# XXX: bad blocking stuff
while 1:
buf = tarf.read()
if buf == "":
break
request.write(buf)
tarf.close()
tar.close()
if returnedMeta:
T = []
T.append("")
T.append("--%s--" % rand_bound)
T.append("")
T = '\r\n'.join(T)
request.write(T)
return ""
except:
tar.close()
request.setResponseCode(http.NOT_FOUND, "Not found: %s" % filekey)
request.write("Not found: %s" % filekey)
else:
f = BlockFile.open(fname,"rb")
loggerretr.log(logging.INFO, "successful RETRIEVE for %s" % filekey)
meta = f.meta(int(reqKu.id(),16))
if returnMeta and meta:
loggerretr.debug("returnMeta %s" % filekey)
loggerretr.debug("returnMetas=%s" % meta)
for m in meta:
H = []
H.append("--%s" % rand_bound)
H.append("Content-Type: Application/octet-stream")
H.append("Content-ID: %s.%s.meta" % (filekey, m))
H.append("Content-Length: %d" % len(meta[m]))
H.append("")
H.append(meta[m])
H = '\r\n'.join(H)
request.write(H)
request.write('\r\n')
H = []
H.append("--%s" % rand_bound)
H.append("Content-Type: Application/octet-stream")
H.append("Content-ID: %s" % filekey)
H.append("Content-Length: %d" % f.size())
H.append("")
H = '\r\n'.join(H)
request.write(H)
request.write('\r\n')
# XXX: bad blocking stuff
while 1:
buf = f.read()
if buf == "":
break
request.write(buf)
f.close()
if returnMeta and meta:
T = []
T.append("")
T.append("--%s--" % rand_bound)
T.append("")
request.write('\r\n'.join(T))
return ""
def _sendErr(self, error, request, msg):
out = msg+": "+error.getErrorMessage()
loggerretr.log(logging.INFO, out )
# update trust, routing
request.setResponseCode(http.UNAUTHORIZED, "Unauthorized: %s" % msg)
request.write(msg)
request.finish()
class VerifyFile(object):
def __init__(self, node, config, request, filekey):
self.node = node
self.config = config
self.deferred = self.verifyFile(request, filekey)
isLeaf = True
def verifyFile(self, request, filekey):
"""
A VERIFY contains a file[fragment]id, an offset, and a length.
When this message is received, the given file[fragment] should be
accessed and its bytes, from offset to offset+length, should be
sha256 hashed, and the result returned.
[in theory, we respond to all VERIFY requests. In practice, however,
we should probably do some throttling of responses to prevent DOS
attacks and probing. Or, maybe require that the sender encrypt the
challenge with their private key...]
[should also enforce some idea of reasonableness on length of bytes
to verify]
"""
try:
required = ('Ku_e', 'Ku_n', 'port', 'offset', 'length')
params = requireParams(request, required)
except Exception, inst:
msg = inst.args[0] + " in request received by VERIFY"
loggervrfy.log(logging.INFO, msg)
request.setResponseCode(http.BAD_REQUEST, "Bad Request")
loggervrfy.debug("BAD REQUEST")
return msg
else:
host = getCanonicalIP(request.getClientIP())
port = int(params['port'])
loggervrfy.log(logging.INFO,
"received VERIFY request from %s:%s...", host, port)
if 'meta' in request.args:
params['metakey'] = request.args['metakey'][0]
params['meta'] = fdecode(request.args['meta'][0])
loggerretr.info("VERIFY contained meta field with %d chars"
% len(params['meta']))
meta = (params['metakey'], params['meta'])
else:
meta = None
offset = int(params['offset'])
length = int(params['length'])
paths = [p for p in filekey.split(os.path.sep) if p != '']
if len(paths) > 1:
msg = "Bad request:"\
" filekey contains illegal path seperator tokens."
loggerretr.debug(msg)
request.setResponseCode(http.BAD_REQUEST, msg)
return msg
reqKu = {}
reqKu['e'] = long(params['Ku_e'])
reqKu['n'] = long(params['Ku_n'])
reqKu = FludRSA.importPublicKey(reqKu)
nodeID = reqKu.id()
return authenticate(request, reqKu, host, port,
self.node.client, self.config,
self._sendVerify, request, filekey, offset, length, reqKu,
nodeID, meta)
def _sendVerify(self, request, filekey, offset, length, reqKu, nodeID,
meta):
fname = os.path.join(self.config.storedir,filekey)
loggervrfy.debug("request for %s" % fname)
if os.path.exists(fname):
loggervrfy.debug("looking in regular blockfile for %s" % fname)
if meta:
f = BlockFile.open(fname, 'rb+')
else:
f = BlockFile.open(fname, 'rb')
else:
# check for tarball for originator
loggervrfy.debug("checking tarball for %s" % fname)
tarballs = []
tarballbase = os.path.join(self.config.storedir, reqKu.id())+".tar"
if os.path.exists(tarballbase+".gz"):
tarballs.append((tarballbase+".gz", 'r:gz'))
if os.path.exists(tarballbase):
tarballs.append((tarballbase, 'r'))
loggervrfy.debug("tarballs is %s" % tarballs)
for tarball, openmode in tarballs:
loggervrfy.debug("looking in tarball %s..." % tarball)
tar = tarfile.open(tarball, openmode)
try:
tarf = tar.extractfile(filekey)
tari = tar.getmember(filekey)
# XXX: update timestamp on tarf in tarball
fsize = tari.size
if offset > fsize or (offset+length) > fsize:
# XXX: should limit length
loggervrfy.debug("VERIFY response failed (from %s):"
" bad offset/length" % tarball)
msg = "Bad request: bad offset/length in VERIFY"
request.setResponseCode(http.BAD_REQUEST, msg)
return msg
# XXX: could avoid seek/read if length == 0
tarf.seek(offset)
# XXX: bad blocking read
data = tarf.read(length)
tarf.close()
if meta:
mfname = "%s.%s.meta" % (filekey, meta[0])
loggervrfy.debug("looking for %s" % mfname)
if mfname in tar.getnames():
# make sure that the data is the same, if not,
# remove it and re-add it
tarmf = tar.extractfile(mfname)
# XXX: bad blocking read
stored_meta = tarmf.read()
tarmf.close()
if meta[1] != stored_meta:
loggervrfy.debug("updating tarball"
" metadata for %s.%s"
% (filekey, meta[0]))
tar.close()
TarfileUtils.delete(tarball, mfname)
if openmode == 'r:gz':
tarball = TarfileUtils.gunzipTarball(
tarball)
tar = tarfile.open(tarball, 'a')
metaio = StringIO(meta[1])
tinfo = tarfile.TarInfo(mfname)
tinfo.size = len(meta[1])
tar.addfile(tinfo, metaio)
tar.close()
if openmode == 'r:gz':
tarball = TarfileUtils.gzipTarball(
tarball)
else:
loggervrfy.debug("no need to update tarball"
" metadata for %s.%s"
% (filekey, meta[0]))
else:
# add it
loggervrfy.debug("adding tarball metadata"
" for %s.%s" % (filekey, meta[0]))
tar.close()
if openmode == 'r:gz':
tarball = TarfileUtils.gunzipTarball(tarball)
tar = tarfile.open(tarball, 'a')
metaio = StringIO(meta[1])
tinfo = tarfile.TarInfo(mfname)
tinfo.size = len(meta[1])
tar.addfile(tinfo, metaio)
tar.close()
if openmode == 'r:gz':
tarball = TarfileUtils.gzipTarball(
tarball)
tar.close()
hash = hashstring(data)
loggervrfy.info("successful VERIFY (from %s)" % tarball)
return hash
except:
tar.close()
loggervrfy.debug("requested file %s doesn't exist" % fname)
msg = "Not found: not storing %s" % filekey
request.setResponseCode(http.NOT_FOUND, msg)
return msg
# make sure request is reasonable
fsize = os.stat(fname)[stat.ST_SIZE]
if offset > fsize or (offset+length) > fsize:
# XXX: should limit length
loggervrfy.debug("VERIFY response failed (bad offset/length)")
msg = "Bad request: bad offset/length in VERIFY"
request.setResponseCode(http.BAD_REQUEST, msg)
return msg
else:
# XXX: blocking
# XXX: could avoid seek/read if length == 0 (noop for meta update)
f.seek(offset)
data = f.read(length)
if meta:
loggervrfy.debug("adding metadata for %s.%s"
% (fname, meta[0]))
f.addNode(int(nodeID, 16) , {meta[0]: meta[1]})
# XXX: blocking
f.close()
hash = hashstring(data)
loggervrfy.debug("returning VERIFY")
return hash
def _sendErr(self, error, request, msg):
out = "%s:%s" % (msg, error.getErrorMessage())
loggervrfy.info(out)
# update trust, routing
request.setResponseCode(http.UNAUTHORIZED, "Unauthorized: %s" % msg)
request.write(msg)
request.finish()
class DeleteFile(object):
def __init__(self, node, config, request, filekey):
self.node = node
self.config = config
self.deferred = self.deleteFile(request, filekey)
def deleteFile(self, request, filekey):
try:
required = ('Ku_e', 'Ku_n', 'port', 'metakey')
params = requireParams(request, required)
except Exception, inst:
msg = inst.args[0] + " in request received by DELETE"
loggerdele.log(logging.INFO, msg)
request.setResponseCode(http.BAD_REQUEST, "Bad Request")
return msg
else:
host = getCanonicalIP(request.getClientIP())
port = int(params['port'])
loggerdele.debug("received DELETE request for %s from %s:%s"
% (request.path, host, port))
reqKu = {}
reqKu['e'] = long(params['Ku_e'])
reqKu['n'] = long(params['Ku_n'])
reqKu = FludRSA.importPublicKey(reqKu)
nodeID = reqKu.id()
metakey = params['metakey']
return authenticate(request, reqKu, host, port,
self.node.client, self.config,
self._deleteFile, request, filekey, metakey, reqKu, nodeID)
def _deleteFile(self, request, filekey, metakey, reqKu, reqID):
fname = os.path.join(self.config.storedir, filekey)
loggerdele.debug("reading file data from %s" % fname)
if not os.path.exists(fname):
# check for tarball for originator
tarballs = []
tarballbase = os.path.join(self.config.storedir, reqKu.id())+".tar"
if os.path.exists(tarballbase+".gz"):
tarballs.append((tarballbase+".gz", 'r:gz'))
if os.path.exists(tarballbase):
tarballs.append((tarballbase, 'r'))
for tarball, openmode in tarballs:
mfilekey = "%s.%s.meta" % (filekey, metakey)
loggerdele.debug("opening %s, %s for delete..."
% (tarball, openmode))
ftype = os.popen('file %s' % tarball)
loggerdele.debug("ftype of %s is %s" % (tarball, ftype.read()))
ftype.close()
tar = tarfile.open(tarball, openmode)
mnames = [n for n in tar.getnames()
if n[:len(filekey)] == filekey]
tar.close()
if len(mnames) > 2:
deleted = TarfileUtils.delete(tarball, mfilekey)
else:
deleted = TarfileUtils.delete(tarball, [filekey, mfilekey])
if deleted:
loggerdele.info("DELETED %s (from %s)" % (deleted, tarball))
return ""
request.setResponseCode(http.NOT_FOUND, "Not found: %s" % filekey)
request.write("Not found: %s" % filekey)
else:
f = BlockFile.open(fname,"rb+")
nID = int(reqID, 16)
if f.hasNode(nID):
# remove this node/metakey from owning this file block
f.delNode(nID, metakey)
if f.emptyNodes():
# if this was the only owning node, delete it
f.close()
os.remove(fname)
f.close()
loggerdele.debug("returning DELETE response")
return ""
def _sendErr(self, error, request, msg):
out = msg+": "+error.getErrorMessage()
loggerdele.log(logging.INFO, out )
# update trust, routing
request.setResponseCode(http.UNAUTHORIZED, "Unauthorized: %s" % msg)
request.write(msg)
request.finish()
class PROXY(ROOT):
"""
This is a special request which wraps another request. If received,
the node is required to forward the request and keep state to re-wrap when
the response returns.
This is useful for several reasons:
1) insert a bit of anonymity into VERIFY ops, so that a malicious node
can't know to do an early purge data for a missing node.
2) NAT'd boxes will need to use relays for STORE, VERIFY, and RETRIEVE
(since all those ops cause the receive to make a connection back)
[There must be an incentive for nodes to offer PROXY (trust), otherwise
it is advantageous to turn it off.]
[disadvantages of proxy: it hurts trust. How do we know that a bad (trust
decreasing) op really was caused by the originator? Couldn't the failure
be caused by one of the proxies?]
[Should consider using GnuNet's "Excess-Based Economic Model" where each
request contains a priority which 'spends' some of the trust at the
requestee when resources are scarce. In this model, the proxying node[s]
charge a fee on the priority, reducing it by a small amount as they
forward the request.]
"""
# XXX: needs to be RESTified when implemented
isLeaf = True
def render_GET(self, request):
self.setHeaders(request)
result = "NOT YET IMPLEMENTED"
return result
def authenticate(request, reqKu, host, port, client, config, callable,
*callargs):
# 1- make sure that reqKu hashes to reqID
# 2- send a challenge/groupchallenge to reqID (encrypt with reqKu)
challengeResponse = request.getUser()
groupResponse = request.getPassword()
if not challengeResponse or not groupResponse:
loggerauth.info("returning challenge for request from %s:%d" \
% (host, port))
return sendChallenge(request, reqKu, config.nodeID)
else:
if getChallenge(challengeResponse):
expireChallenge(challengeResponse)
if groupResponse == hashstring(
str(reqKu.exportPublicKey())
+str(config.groupIDr)):
updateNode(client, config, host, port, reqKu, reqKu.id())
return callable(*callargs)
else:
err = "Group Challenge Failed"
loggerauth.info(err)
loggerauth.debug("Group Challenge received was %s" \
% groupResponse)
# XXX: update trust, routing
request.setResponseCode(http.FORBIDDEN, err);
return err
else:
err = "Challenge Failed"
loggerauth.info(err)
loggerauth.debug("Challenge received was %s" % challengeResponse)
# XXX: update trust, routing
request.setResponseCode(http.FORBIDDEN, err);
return err
def sendChallenge(request, reqKu, id):
challenge = generateRandom(challengelength)
while challenge[0] == '\x00':
# make sure we have at least challengelength bytes
challenge = generateRandom(challengelength)
addChallenge(challenge)
loggerauth.debug("unencrypted challenge is %s"
% fencode(binascii.unhexlify(id)+challenge))
echallenge = reqKu.encrypt(binascii.unhexlify(id)+challenge)[0]
echallenge = fencode(echallenge)
loggerauth.debug("echallenge = %s" % echallenge)
# since challenges will result in a new req/resp pair being generated,
# these could take much longer than the primitive_to. Expire in
# 15*primitive_to
reactor.callLater(primitive_to*15, expireChallenge, challenge, True)
resp = 'challenge = %s' % echallenge
loggerauth.debug("resp = %s" % resp)
request.setResponseCode(http.UNAUTHORIZED, echallenge)
request.setHeader('Connection', 'close')
request.setHeader('WWW-Authenticate', 'Basic realm="%s"' % 'default')
request.setHeader('Content-Length', str(len(resp)))
request.setHeader('Content-Type', 'text/html')
request.setHeader('Pragma','claimreserve=5555') # XXX: this doesn't work
return resp
outstandingChallenges = {}
def addChallenge(challenge):
outstandingChallenges[challenge] = True
loggerauth.debug("added challenge %s" % fencode(challenge))
def expireChallenge(challenge, expired=False):
try:
challenge = fdecode(challenge)
except:
pass
if outstandingChallenges.has_key(challenge):
del outstandingChallenges[challenge]
if expired:
# XXX: should put these in an expired challenge list so that we
# can send a more useful message on failure (must then do
# expirations on expired list -- maybe better to just make
# these expirations really liberal).
loggerauth.debug("expired challenge %s" % fencode(challenge))
else:
loggerauth.debug("deleted challenge %s" % fencode(challenge))
def getChallenge(challenge):
try:
challenge = fdecode(challenge)
except:
pass
if outstandingChallenges.has_key(challenge):
return outstandingChallenges[challenge]
else:
return None
| Python |
"""
FludCommUtil.py (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), version 3.
Communications routines used by both client and server code.
"""
from twisted.web import client
from twisted.internet import reactor, defer
from twisted.python import failure
import binascii, httplib, logging, os, stat, random, socket
import inspect
from flud.FludExceptions import FludException
from flud.FludCrypto import FludRSA, generateRandom
from flud.HTTPMultipartDownloader import HTTPMultipartDownloader
"""
Some constants used by the Flud Protocol classes
"""
PROTOCOL_VERSION = '0.2'
# XXX: when things timeout, bad news. Unintuitive exceptions spewed. Make this
# small and fix all issues.
primitive_to = 3800 # default timeout for primitives
kprimitive_to = primitive_to/2 # default timeout for kademlia primitives
#kprimitive_to = 10 # default timeout for kademlia primitives
transfer_to = 3600 # 10-hr limit on file transfers
MAXTIMEOUTS = 5 # number of times to retry after connection timeout failure
CONNECT_TO = 60
CONNECT_TO_VAR = 5
logger = logging.getLogger('flud.comm')
class BadCASKeyException(failure.DefaultException):
pass
class NotFoundException(failure.DefaultException):
pass
class BadRequestException(failure.DefaultException):
pass
i = 0
"""
Some utility functions used by both client and server.
"""
def updateNodes(client, config, nodes):
if nodes and not isinstance(nodes, list) and not isinstance(nodes, tuple):
raise TypeError("updateNodes must be called with node list, tuple,"
" or kData dict")
logger.debug("updateNodes(%s)" % nodes)
for i in nodes:
host = i[0]
port = i[1]
nID = i[2]
nKu = FludRSA.importPublicKey(i[3])
updateNode(client, config, host, port, nKu, nID)
updateNodePendingGETID = {}
def updateNode(client, config, host, port, nKu=None, nID=None):
"""
Updates this node's view of the given node. This includes updating
the known-nodes record, trust, and routing table information
"""
def updateNodeFail(failure, host, port):
logging.getLogger('flud').log(logging.INFO,
"couldn't get nodeID from %s:%d: %s" % (host, port, failure))
def callUpdateNode(nKu, client, config, host, port, nID):
return updateNode(client, config, host, port, nKu, nID)
if isinstance(nID, long):
nID = "%064x" % nID
if nKu is None:
#print "updateNode, no nKu"
if nID is None:
d = client.sendGetID(host, port)
d.addCallback(callUpdateNode, client, config, host, port, nID)
d.addErrback(updateNodeFail, host, port)
else:
#print "updateNode, no nKu but got a nID"
if config.nodes.has_key(nID):
return updateNode(client, config, host, port,
FludRSA.importPublicKey(config.nodes[nID]['Ku']), nID)
elif updateNodePendingGETID.has_key(nID):
pass
else:
#print "updateNode, sending GETID"
updateNodePendingGETID[nID] = True
d = client.sendGetID(host, port)
d.addCallback(callUpdateNode, client, config, host, port, nID)
d.addErrback(updateNodeFail, host, port)
elif isinstance(nKu, FludRSA):
#print "updateNode with nKu"
if updateNodePendingGETID.has_key(nID):
del updateNodePendingGETID[nID]
if nID == None:
nID = nKu.id()
elif nID != nKu.id():
raise ValueError("updateNode: given nID doesn't match given nKu."
" '%s' != '%s'" % (nID, nKu.id()))
# XXX: looks like an imposter -- instead of raising, mark host:port
# pair as bad (trust-- on host:port alone, since we don't know id).
if config.nodes.has_key(nID) == False:
config.addNode(nID, host, port, nKu)
# XXX: trust
# routing
node = (host, port, long(nID, 16), nKu.exportPublicKey()['n'])
replacee = config.routing.updateNode(node)
#logger.info("knownnodes now: %s" % config.routing.knownNodes())
#print "knownnodes now: %s" % config.routing.knownNodes()
if replacee != None:
logging.getLogger('flud').info(
"determining if replacement in ktable is needed")
s = SENDGETID(replacee[0], replacee[1])
s.addErrback(replaceNode, config.routing, replacee, node)
else:
#print "updateNode nKu=%s, type=%s" % (nKu, type(nKu))
logging.getLogger('flud').warn(
"updateNode can't update without a public key or nodeID")
frame = inspect.currentframe()
# XXX: try/except here for debugging only
try:
stack = inspect.stack()
for i in stack:
print "from %s:%d" % (i[1], i[2])
except:
print "couldn't get stack trace"
raise ValueError("updateNode needs an nKu of type FludRSA"
" (received %s) or an nID of type long or str (received %s)"
% (type(nKu), type(nID)))
# XXX: should really make it impossible to call without one of these...
def replaceNode(error, routing, replacee, replacer):
routing.replaceNode(replacee, replacer)
print "replaced node in ktable"
def requireParams(request, paramNames):
# Looks for the named parameters in request. If found, returns
# a dict of param/value mappings. If any named parameter is missing,
# raises an exception
params = {}
for i in paramNames:
try:
params[i] = request.args[i][0]
except:
raise Exception, "missing parameter '"+i+"'" #XXX: use cust Exc
return params
def getCanonicalIP(IP):
# if IP is 'localhost' or '127.0.0.1', use the canonical local hostname.
# (this is mostly useful when multiple clients run on the same host)
# XXX: could use gethostbyname to get IP addy instead.
if IP == '127.0.0.1' or IP == 'localhost':
return socket.getfqdn()
else:
return socket.getfqdn(IP)
def getPageFactory(url, contextFactory=None, *args, **kwargs):
def failedConnect(reason, factory):
try:
i = factory.status
return reason
except:
pass
#logger.warn("couldn't connect to %s:%d in getPageFactory: %s"
# % (factory.host, factory.port, reason))
#logger.warn("state of factory is %s" % factory)
#logger.warn("dir() of factory is %s" % dir(factory))
return reason
if len(url) >= 16384:
raise ValueError(
"Too much data sent: twisted server doesn't appear to"
" support urls longer than 16384")
scheme, host, port, path = client._parse(url)
factory = client.HTTPClientFactory(url, *args, **kwargs)
factory.deferred.addErrback(failedConnect, factory)
to = CONNECT_TO+random.randrange(2+CONNECT_TO_VAR)-CONNECT_TO_VAR
if scheme == 'https':
from twisted.internet import ssl
if contextFactory is None:
contextFactory = ssl.ClientContextFactory()
reactor.connectSSL(host, port, factory, contextFactory)
else:
reactor.connectTCP(host, port, factory, timeout=to)
return factory
def _dlPageFactory(url, target, factoryClass, contextFactory=None, timeout=None,
*args, **kwargs):
scheme, host, port, path = client._parse(url)
if timeout != None:
# XXX: do something like http://twistedmatrix.com/pipermail/twisted-python/2003-August/005504.html
pass
factory = factoryClass(url, target, *args, **kwargs)
to = CONNECT_TO+random.randrange(2+CONNECT_TO_VAR)-CONNECT_TO_VAR
if scheme == 'https':
from twisted.internet import ssl
if contextFactory is None:
contextFactory = ssl.ClientContextFactory()
reactor.connectSSL(host, port, factory, contextFactory)
else:
reactor.connectTCP(host, port, factory, timeout=to)
return factory
def downloadPageFactory(url, file, contextFactory=None, timeout=None,
*args, **kwargs):
return _dlPageFactory(url, file, client.HTTPDownloader, contextFactory,
timeout, *args, **kwargs)
def multipartDownloadPageFactory(url, dir, contextFactory=None, timeout=None,
*args, **kwargs):
return _dlPageFactory(url, dir, HTTPMultipartDownloader, contextFactory,
timeout, *args, **kwargs)
def fileUpload(host, port, selector, files, form=(), headers={}):
"""
Performs a file upload via http.
host - webserver hostname
port - webserver listen port
selector - the request (relative URL)
files - list of files to upload. list contains tuples, with the first
entry as filename/file-like obj and the second as form element name.
If the first element is a file-like obj, the element will be used as
the filename. If the first element is a filename, the filename's
basename will be used as the filename on the form. Type will be
"application/octet-stream"
form (optional) - a list of pairs of additional name/value form elements
(param/values).
[hopefully, this method goes away in twisted-web2]
"""
# XXX: set timeout (based on filesize?)
port = int(port)
rand_bound = binascii.hexlify(generateRandom(13))
boundary = "---------------------------"+rand_bound
CRLF = '\r\n'
body_content_type = "application/octet-stream"
content_type = "multipart/form-data; boundary="+boundary
content_length = 0
H = []
for (param, value) in form:
H.append('--' + boundary)
H.append('Content-Disposition: form-data; name="%s"' % param)
H.append('')
H.append('%s' % value)
form_data = CRLF.join(H)+CRLF
content_length = content_length + len(form_data)
fuploads = []
for file, element in files:
if file == None:
file = "/dev/null" # XXX: not portable
if 'read' in dir(file):
fname = element
file.seek(0,2)
file_length = file.tell()
file.seek(0,0)
else:
fname = os.path.basename(file)
file_length = os.stat(file)[stat.ST_SIZE]
#logger.info("upload file %s len is %d" % (fname, file_length))
H = [] # stuff that goes above file data
T = [] # stuff that goes below file data
H.append('--' + boundary)
H.append('Content-Disposition: form-data; name="%s"; filename="%s"'
% (element, fname))
H.append('Content-Type: %s\n' % body_content_type)
H.append('')
file_headers = CRLF.join(H)
content_length = content_length + len(file_headers) + file_length
fuploads.append((file_headers, file, file_length))
T.append('--'+boundary+'--')
T.append('')
T.append('')
trailer = CRLF.join(T)
content_length = content_length + len(trailer)
h = httplib.HTTPConnection(host, port) # XXX: blocking
h.putrequest('POST', selector)
for pageheader in headers:
h.putheader(pageheader, headers[pageheader])
h.putheader('Content-Type', content_type)
h.putheader('Content-Length', content_length)
h.endheaders()
h.send(form_data)
for fheader, file, flen in fuploads:
if 'read' not in dir(file):
file = open(file, 'r')
h.send(fheader)
h.send(file.read(flen)+CRLF) # XXX: blocking
file.close()
h.send(trailer)
return h
class ImposterException(FludException):
pass
| Python |
"""
ClientDHTPrimitives.py (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
Primitive client DHT protocol
"""
import time, os, stat, httplib, sys, random, logging
from twisted.web import http, client
from twisted.internet import reactor, threads, defer
from twisted.python import failure
import inspect, pdb
from flud.FludCrypto import FludRSA
import flud.FludkRouting as FludkRouting
from flud.fencode import fencode, fdecode
import flud.FludDefer as FludDefer
import ConnectionQueue
from ClientPrimitives import REQUEST
from FludCommUtil import *
logger = logging.getLogger("flud.client.dht")
# FUTURE: check flud protocol version for backwards compatibility
# XXX: need to make sure we have appropriate timeouts for all comms.
# FUTURE: DOS attacks. For now, assume that network hardware can filter these
# out (by throttling individual IPs) -- i.e., it isn't our problem. If we
# want to defend against this at some point, we need to keep track of who
# is generating requests and then ignore them.
# XXX: might want to consider some self-healing for the kademlia layer, as
# outlined by this thread:
# http://zgp.org/pipermail/p2p-hackers/2003-August/001348.html (should also
# consider Zooko's links in the parent to this post). Basic idea: don't
# always take the k-closest -- take x random and k-x of the k-closest.
# Can alternate each round (k-closest / x + k-x-closest) for a bit more
# diversity (as in "Sybil-resistent DHT routing").
# XXX: right now, calls to updateNode are chained. Might want to think about
# doing some of this more asynchronously, so that the recursive parts
# aren't waiting for remote GETIDs to return before recursing.
"""
The first set of classes (those beginning with 'k') perform [multiple] queries
given a key or key/value pair. They use the second set of classes (those
beginning with 'SEND'), which perform a single query to a given node.
"""
def serviceWaiting(res, key, pending, waiting):
# provides a method for calling multiple callbacks on a saved query.
# add serviceWaiting as a callback before returning, and pass in the result,
# pending dict and waiting dict. All deferreds in the waiting dict will
# be called with the result, the waiting dict will be emptied of those
# deferreds, and the pending dict will likewise be emptied.
if waiting.has_key(key):
for d in waiting[key]:
#print "offbacking %s" % key
d.callback(res)
waiting.pop(key)
pending.pop(key)
return res
pendingkFindNodes = {}
waitingkFindNodes = {}
class kFindNode:
"""
Perform a kfindnode lookup.
"""
def __init__(self, node, key):
if pendingkFindNodes.has_key(key):
d = defer.Deferred()
if not waitingkFindNodes.has_key(key):
waitingkFindNodes[key] = []
waitingkFindNodes[key].append(d)
logger.debug("piggybacking on previous kfindnode for %s" % key)
self.deferred = d
return
self.node = node
self.node.DHTtstamp = time.time()
self.key = key
self.queried = {}
self.outstanding = []
self.pending = []
self.kclosest = []
self.abbrvkey = ("%x" % key)[:8]+"..."
self.abbrv = "(%s%s)" % (self.abbrvkey, str(self.node.DHTtstamp)[-7:])
self.debugpath = []
self.deferred = self.startQuery(key)
def startQuery(self, key):
# query self first
kclosest = self.node.config.routing.findNode(key)
#logger.debug("local kclosest: %s" % kclosest)
localhost = getCanonicalIP('localhost')
kd = {'id': self.node.config.nodeID, 'k': kclosest}
d = self.updateLists(kd, key, localhost, self.node.config.port,
long(self.node.config.nodeID, 16))
d.addErrback(self.errkfindnode, key, localhost, self.node.config.port)
pendingkFindNodes[key] = d
d.addCallback(serviceWaiting, key, pendingkFindNodes, waitingkFindNodes)
return d
def sendQuery(self, host, port, id, key):
self.outstanding.append((host, port, id))
#d = self.node.client.sendkFindNode(host, port, key)
d = SENDkFINDNODE(self.node, host, port, key).deferred
return d
def updateLists(self, response, key, host, port, closestyet, x=0):
logger.info("FN: received kfindnode %s response from %s:%d"
% (self.abbrv, host, port))
self.debugpath.append("FN: rec. resp from %s:%d" % (host, port))
if not isinstance(response, dict):
# a data value is being returned from findval
# XXX: moved this bit into findval and call parent for the rest
if response == None:
logger.warn("got None from key=%s, %s:%d, x=%d, this usually"
" means that the host replied None to a findval query"
% (key, host, port, x))
# if we found the fencoded value data, return it
return defer.succeed(response)
logger.debug("updateLists(%s)" % response)
if len(response['k']) == 1 and response['k'][0][2] == key:
# if we've found the key, don't keep making queries.
logger.debug("FN: %s:%d found key %s" % (host, port, key))
self.debugpath.append("FN: %s:%d found key %s" % (host, port, key))
if response['k'][0] not in self.kclosest:
self.kclosest.insert(0,response['k'][0])
self.kclosest = self.kclosest[:FludkRouting.k]
return defer.succeed(response)
#for i in response['k']:
# print " res: %s:%d" % (i[0], i[1])
id = long(response['id'], 16)
responder = (host, port, id)
if responder in self.outstanding:
self.outstanding.remove(responder)
self.queried[id] = (host, port)
knodes = response['k']
for n in knodes:
if not self.queried.has_key(n[2])\
and not n in self.pending and not n in self.outstanding:
self.pending.append((n[0], n[1], n[2]))
if n not in self.kclosest:
k = FludkRouting.k
# XXX: remove self it in the list?
self.kclosest.append(n)
self.kclosest.sort(
lambda a, b: FludkRouting.kCompare(a[2], b[2], key))
self.kclosest = self.kclosest[:k]
#for n in self.outstanding:
# if n in self.pending:
# self.pending.remove(n) # remove anyone we've sent queries to...
self.pending = list(set(self.pending) - set(self.outstanding))
for i in self.queried:
n = (self.queried[i][0], self.queried[i][1], i)
if n in self.pending:
self.pending.remove(n) # ...and anyone who has responded.
self.pending.sort(lambda a, b: FludkRouting.kCompare(a[2], b[2], key))
#print "queried: %s" % str(self.queried)
#print "outstanding: %s" % str(self.outstanding)
#print "pending: %s" % str(self.pending)
return self.decideToContinue(response, key, x)
def decideToContinue(self, response, key, x):
##print "x is %s" % str(x)
##for i in self.kclosest:
## print " kclosest %s" % str(i)
##for i in self.queried:
## print " queried %s" % str(self.queried[i])
#if len(filter(lambda x: x not in self.queried, self.kclosest)) <= 0:
# print "finishing up at round %d" % x
# # XXX: never gets in here...
# # XXX: remove anything not in self.kclosest from self.pending
# self.pending =\
# filter(lambda x: x not in self.kclosest, self.pending)
# #self.pending = self.pending[:FludkRouting.k]
#else:
# return self.makeQueries(key, x)
# this is here so that kFindVal can plug-in by overriding
return self.makeQueries(key, x)
def makeQueries(self, key, x):
#print "doing round %d" % x
self.debugpath.append("FN: doing round %d" % x)
dlist = []
for n in self.pending[:(FludkRouting.a - len(self.outstanding))]:
#print " querying %s:%d" % (n[0], n[1])
self.debugpath.append("FN: querying %s:%d" % (n[0], n[1]))
d = self.sendQuery(n[0], n[1], n[2], key)
d.addCallback(self.updateLists, key, n[0], n[1],
self.kclosest[0][2], x+1)
d.addErrback(self.errkfindnode, key, n[0], n[1],
raiseException=False)
dlist.append(d)
dl = defer.DeferredList(dlist)
dl.addCallback(self.roundDone, key, x)
return dl
def roundDone(self, responses, key, x):
#print "done %d:" % x
#print "roundDone: %s" % responses
if len(self.pending) != 0 or len(self.outstanding) != 0:
# should only get here for nodes that don't accept connections
# XXX: updatenode -- decrease trust
for i in self.pending:
logger.debug("FN: %s couldn't contact node %s (%s:%d)"
% (self.abbrv, fencode(i[2]), i[0], i[1]))
self.debugpath.append(
"FN: %s couldn't contact node %s (%s:%d)"
% (self.abbrv, fencode(i[2]), i[0], i[1]))
for n in self.kclosest:
if (n[0],n[1],n[2]) == i:
self.kclosest.remove(n)
logger.info("kFindNode %s terminated successfully after %d queries."
% (self.abbrv, len(self.queried)))
self.debugpath.append("FN: %s terminated successfully after %d queries."
% (self.abbrv, len(self.queried)))
self.kclosest.sort(
lambda a, b: FludkRouting.kCompare(a[2], b[2], key))
result = {}
if FludkRouting.k > len(self.kclosest):
k = len(self.kclosest)
else:
k = FludkRouting.k
result['k'] = self.kclosest[:k]
#print "result: %s" % result
#if len(result['k']) > 1:
# # if the results (aggregated from multiple responses) contains the
# # exact key, just return the correct answer (successful node
# # lookup done).
# #print "len(result): %d" % len(result['k'])
# #print "result[0][2]: %s %d" % (type(result['k'][0][2]),
# # result['k'][0][2])
# #print " key: %s %d" % (type(key), key)
# if result['k'][0][2] == key:
# #print "key matched!"
# result['k'] = (result['k'][0],)
return result
def errkfindnode(self, failure, key, host, port, raiseException=True):
logger.info("kFindNode %s request to %s:%d failed -- %s" % (self.abbrv,
host, port, failure.getErrorMessage()))
# XXX: updateNode--
if raiseException:
return failure
class kStore(kFindNode):
"""
Perform a kStore operation.
"""
def __init__(self, node, key, val):
self.node = node
self.node.DHTtstamp = time.time()
self.key = key
self.val = val
d = kFindNode(node,key).deferred
d.addCallback(self.store)
d.addErrback(self._kStoreErr, None, 0)
self.deferred = d
def store(self, knodes):
knodes = knodes['k']
if len(knodes) < 1:
raise RuntimeError("can't complete kStore -- no nodes")
dlist = []
for knode in knodes:
host = knode[0]
port = knode[1]
deferred = SENDkSTORE(self.node, host, port, self.key,
self.val).deferred
deferred.addErrback(self._kStoreErr, host, port)
dlist.append(deferred)
dl = FludDefer.ErrDeferredList(dlist)
dl.addCallback(self._kStoreFinished)
dl.addErrback(self._kStoreErr, None, 0)
return dl
def _kStoreFinished(self, response):
#print "_kStoreFinished: %s" % response
logger.info("kStore finished")
return ""
def _kStoreErr(self, failure, host, port):
logger.info("couldn't store on %s:%d -- %s"
% (host, port, failure.getErrorMessage()))
print "_kStoreErr was: %s" % failure
# XXX: updateNode--
return failure
class kFindValue(kFindNode):
"""
Perform a kFindValue.
"""
def __init__(self, node, key):
self.done = False
kFindNode.__init__(self, node, key)
def startQuery(self, key):
# query self first. We override kFindNode.startQuery here so that
# we don't just return the closest nodeID, but the value itself (if
# present)
localhost = getCanonicalIP('localhost')
d = self.sendQuery(localhost, self.node.config.port,
long(self.node.config.nodeID, 16), key)
d.addCallback(self.updateLists, key, localhost, self.node.config.port,
long(self.node.config.nodeID, 16), 0)
d.addErrback(self.errkfindnode, key, localhost, self.node.config.port)
return d
def sendQuery(self, host, port, id, key):
# We override sendQuery here in order to call sendkFindValue and handle
# its response
self.outstanding.append((host, port, id))
d = SENDkFINDVALUE(self.node, host, port, key).deferred
d.addCallback(self._handleFindVal, host, port)
d.addErrback(self.errkfindnode, key, host, port)
return d
def _handleFindVal(self, response, host, port):
if not isinstance(response, dict):
# stop sending out more queries.
self.pending = []
self.done = True
#print "%s:%d sent value: %s" % (host, port, str(response)[:50])
#f = {}
#f['k'] = []
#f['id'] = "0"
#f['val'] = response # pass on returned value
#return f
else:
pass
#print "%s:%d sent kData: %s" % (host, port, response)
return response
def decideToContinue(self, response, key, x):
if self.done:
#if not response.has_key('val'):
# logger.warn("response has no 'val', response is: %s" % response)
#return response['val']
return response
else:
return self.makeQueries(key, x)
def roundDone(self, responses, key, x):
self.debugpath.append("FV: roundDone %d" % x)
if self.done:
result = {}
# see if everyone's responses agreed...
for success, resp in responses:
# only look at successful non-kData (dict) responses.
if success and resp != None and not isinstance(resp, dict):
if result.has_key(resp):
result[resp] += 1
else:
result[resp] = 1
if len(result) == 0:
# ... if no one responded, XXX: do something orther than None?
logger.info("couldn't get any results")
return None
elif len(result) == 1:
# ... if they did, return the result
return result.keys()[0]
else:
# ... otherwise, return the result of the majority
# (other options include returning all results)
logger.info("got conflicting results, determining best...")
quorumResult = None
bestScore = 0
for r in result:
#logger.debug("result %s scored %d" % (r, result[r]))
if result[r] > bestScore:
bestScore = result[r]
quorumResult = r
#logger.debug("result %s is new best" % r)
logger.info("returning result %s", fdecode(quorumResult))
return quorumResult
class SENDkFINDNODE(REQUEST):
"""
Makes one request to a node for its k-closest nodes closest to key
"""
def __init__(self, node, host, port, key, commandName="nodes"):
"""
"""
logger.info("sending %s (findnode) for %s... to %s:%d"
% (commandName, ("%x" % key)[:10], host, port))
self.commandName = commandName
host = getCanonicalIP(host)
REQUEST.__init__(self, host, port, node)
Ku = self.node.config.Ku.exportPublicKey()
url = 'http://'+host+':'+str(port)+'/'+self.commandName+'/'
url += fencode(key)
url += '?nodeID='+str(self.node.config.nodeID)
url += "&Ku_e="+str(Ku['e'])
url += "&Ku_n="+str(Ku['n'])
url += '&port='+str(self.node.config.port)
self.timeoutcount = 0
self.deferred = defer.Deferred()
ConnectionQueue.enqueue((self, node, host, port, key, url))
def startRequest(self, node, host, port, key, url):
d = self._sendRequest(node, host, port, key, url)
d.addBoth(ConnectionQueue.checkWaiting)
d.addCallback(self.deferred.callback)
d.addErrback(self.deferred.errback)
def _sendRequest(self, node, host, port, key, url):
factory = getPageFactory(url,
headers=self.headers, timeout=kprimitive_to)
factory.deferred.addCallback(self._gotResponse, factory,
node, host, port, key)
factory.deferred.addErrback(self._errSendk, factory, node,
host, port, key, url)
return factory.deferred
def _gotResponse(self, response, factory, node, host, port, key):
logger.debug("kfindnode._gotResponse()")
self._checkStatus(factory.status, response, host, port)
response = eval(response)
nID = long(response['id'], 16)
updateNode(node.client, node.config, host, port, None, nID)
updateNodes(node.client, node.config, response['k'])
return response
def _checkStatus(self, status, response, host, port):
logger.debug("kfindnode._checkStatus()")
if eval(status) != http.OK:
raise failure.DefaultException(self.commandName+" FAILED from "
+host+":"+str(port)+": received status "+status+", '"
+response+"'")
def _errSendk(self, err, factory, node, host, port, key, url):
if err.check('twisted.internet.error.TimeoutError') or \
err.check('twisted.internet.error.ConnectionLost'):
#print "GETID request error: %s" % err.__class__.__name__
self.timeoutcount += 1
if self.timeoutcount < MAXTIMEOUTS:
#print "trying again [#%d]...." % self.timeoutcount
return self._sendRequest(node, host, port, key, url)
else:
#print "not trying again [#%d]" % self.timeoutcount
return err
logger.info("%s to %s failed -- %s"
% (self.commandName, self.dest, err.getErrorMessage()))
# XXX: updateNode--
return err
class SENDkSTORE(REQUEST):
"""
Sends a single kSTORE to the given host:port, with key=val
"""
def __init__(self, node, host, port, key, val):
logger.info("sending kSTORE to %s:%d" % (host, port))
REQUEST.__init__(self, host, port, node)
Ku = node.config.Ku.exportPublicKey()
url = 'http://'+host+':'+str(port)+'/meta/'
url += fencode(key)+"/"+fencode(val)
url += '?nodeID='+str(node.config.nodeID)
url += "&Ku_e="+str(Ku['e'])
url += "&Ku_n="+str(Ku['n'])
url += '&port='+str(node.config.port)
# XXX: instead of a single key/val, protocol will take a series of
# vals representing the blocks of the coded file and their
# locations (by nodeID). The entire thing will be stored under
# the given key. Also may need things like signature[s] from
# storing node[s], etc.
#print "in SENDkSTORE.__init__, len(val)=%d" % len(str(val))
#print "in SENDkSTORE.__init__, len(enc(val))=%d" % len(fencode(val))
#print "in SENDkSTORE.__init__, len(url)=%d" % len(url)
self.timeoutcount = 0
self.deferred = defer.Deferred()
ConnectionQueue.enqueue((self, host, port, url))
def startRequest(self, host, port, url):
d = self._sendRequest(host, port, url)
d.addBoth(ConnectionQueue.checkWaiting)
d.addCallback(self.deferred.callback)
d.addErrback(self.deferred.errback)
def _sendRequest(self, host, port, url):
factory = getPageFactory(url,\
headers=self.headers, method='PUT', timeout=kprimitive_to)
self.deferred.addCallback(self._kStoreFinished, host, port)
self.deferred.addErrback(self._storeErr, host, port, url)
return factory.deferred
def _kStoreFinished(self, response, host, port):
logger.info("kSTORE to %s:%d finished" % (host, port))
return response
def _storeErr(self, err, host, port, url):
if err.check('twisted.internet.error.TimeoutError') or \
err.check('twisted.internet.error.ConnectionLost'):
#print "GETID request error: %s" % err.__class__.__name__
self.timeoutcount += 1
if self.timeoutcount < MAXTIMEOUTS:
#print "trying again [#%d]...." % self.timeoutcount
return self._sendRequest(host, port, url)
else:
#print "not trying again [#%d]" % self.timeoutcount
return err
logger.info("kSTORE to %s failed: %s"
% (self.dest, err.getErrorMessage()))
# XXX: updateNode--
return err
class SENDkFINDVALUE(SENDkFINDNODE):
"""
Issues a single kFINDVALUE request to host:port for the key.
If the value is found at host:port, it is returned, otherwise, a
404 response is received and any errbacks are called.
"""
def __init__(self, node, host, port, key):
SENDkFINDNODE.__init__(self, node, host, port, key, "meta")
def _gotResponse(self, response, factory, node, host, port, key):
self._checkStatus(factory.status, response, host, port)
# The following 'if' block is the only thing different from kFINDNODE.
# If a node returns the value, content-type will be set to x-flud-data
# and we should grab the data instead of continuing the recursive
# search.
if factory.response_headers.has_key('content-type')\
and factory.response_headers['content-type']\
== ['application/x-flud-data']:
logger.info("received SENDkFINDVALUE data.")
nID = None
if factory.response_headers.has_key('nodeid'):
nID = factory.response_headers['nodeid'][0]
updateNode(node.client, node.config, host, port, None, nID)
return response
response = eval(response)
nID = long(response['id'], 16)
updateNode(node.client, node.config, host, port, None, nID)
logger.info("received SENDkFINDVALUE nodes")
logger.debug("received SENDkFINDVALUE nodes: %s" % response)
updateNodes(node.client, node.config, response['k'])
return response
| Python |
"""
FludkRouting.py (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), version 3.
Implements kademlia-style kbuckets (the routing table for the DHT layer).
Although this is not a derivative of Khashmir (written by Andrew Loewenstern,
Aaron Swartz, et. al.), we would like to give Khashmir a nod for inspiring
portions of the design. Khashmir is distributed under the MIT License and is a
very nice piece of work. Take a look at http://khashmir.sourceforge.net/ for
more information.
"""
from bisect import *
import logging
#k = 5 # This is the max depth of a kBucket
k = 12 # This is the max depth of a kBucket. k is generally used as
# the replication factor, too (because findNode returns k
# entries), but this is determined by a higher layer.
a = 3 # alpha, the system-wide concurrency parameter
idspace = 256 # using sha-256
logger = logging.getLogger("flud.k")
class NodeCache:
"""
An LRU cache for nodes
"""
def __init__(self, size):
self.size = size
self.cache = {}
self.cacheOrder = []
def insertNode(self, node):
"""
adds a node to the cache. if this displaces a node, the displaced node
is returned
"""
if node[2] not in self.cache:
self.cache[node[2]] = node
self.cacheOrder.append(node[2])
if len(self.cacheOrder) > self.size:
popped = self.cacheOrder.pop(0)
self.cache.pop(popped)
return popped
def getNode(self, nodeID):
"""
returns a node from the cache, or None
"""
if nodeID in self.cache:
return self.cache[node]
return None
def removeNode(self, node):
"""
removes a node from the cache
"""
if node[2] in self.cache:
self.cache.pop(node[2])
self.cacheOrder.pop(node[2])
def nodes(self):
return [self.cache[i] for i in self.cache]
def kCompare(a, b, target):
"""
Uses the XOR metric to compare target to a and b (useful for sorting)
@param a an integer (or long) value
@param b an integer (or long) value
@param target the target ID as an integer (or long) value
@return 1 if b > a, -1 if a < b, 0 if a == b
>>> l = [1, 2, 23, 14, 5, 4, 5, 3, 20]
>>> l.sort(lambda a, b: kCompare(a, b, 5))
>>> l
[5, 5, 4, 1, 3, 2, 14, 20, 23]
"""
x, y = target^a, target^b
if x == y:
return 0
return int((x - y) / abs(x - y))
class kRouting:
"""
Contains the kBuckets for this node. Provides methods for inserting,
updating, and removing nodes. Most importantly, performs kademlia-style
routing by returning the node[s] closest to a particular id.
>>> table = kRouting(('1.2.3.4', 34, 123456), 20, 5)
>>> table.insertNode(('2.2.3.4', 34, 23456))
>>> table.insertNode(('3.2.3.4', 34, 223456))
>>> table.insertNode(('4.2.3.4', 34, 723456))
>>> table.insertNode(('5.2.3.4', 34, 423456))
>>> table.insertNode(('6.2.3.4', 34, 323456))
>>> table.kBuckets
[{'0-80000': [('1.2.3.4', 34, 123456), ('2.2.3.4', 34, 23456), ('3.2.3.4', 34, 223456), ('5.2.3.4', 34, 423456), ('6.2.3.4', 34, 323456)]}, {'80001-100000': [('4.2.3.4', 34, 723456)]}]
>>> table.findNode(23456)
[('2.2.3.4', 34, 23456), ('1.2.3.4', 34, 123456), ('3.2.3.4', 34, 223456), ('6.2.3.4', 34, 323456), ('5.2.3.4', 34, 423456)]
>>> table.findNode(55555)
[('2.2.3.4', 34, 23456), ('1.2.3.4', 34, 123456), ('3.2.3.4', 34, 223456), ('6.2.3.4', 34, 323456), ('5.2.3.4', 34, 423456)]
>>> table.findNode(722222)
[('4.2.3.4', 34, 723456), ('3.2.3.4', 34, 223456), ('1.2.3.4', 34, 123456), ('2.2.3.4', 34, 23456), ('5.2.3.4', 34, 423456)]
>>> table.insertNode(('7.2.3.4', 34, 733456))
>>> table.insertNode(('8.2.3.4', 34, 743456))
>>> table.insertNode(('9.2.3.4', 34, 753456))
>>> table.insertNode(('10.2.3.4', 34, 763456))
>>> table.insertNode(('11.2.3.4', 34, 773456))
('4.2.3.4', 34, 723456)
>>> table.replaceNode(('4.2.3.4', 34, 723456), ('11.2.3.4', 34, 773456))
>>> table.kBuckets
[{'0-80000': [('1.2.3.4', 34, 123456), ('2.2.3.4', 34, 23456), ('3.2.3.4', 34, 223456), ('5.2.3.4', 34, 423456), ('6.2.3.4', 34, 323456)]}, {'80001-100000': [('7.2.3.4', 34, 733456), ('8.2.3.4', 34, 743456), ('9.2.3.4', 34, 753456), ('10.2.3.4', 34, 763456), ('11.2.3.4', 34, 773456)]}]
>>> table.removeNode(('1.2.3.4', 34, 123456))
>>> table.kBuckets
[{'0-80000': [('2.2.3.4', 34, 23456), ('3.2.3.4', 34, 223456), ('5.2.3.4', 34, 423456), ('6.2.3.4', 34, 323456)]}, {'80001-100000': [('7.2.3.4', 34, 733456), ('8.2.3.4', 34, 743456), ('9.2.3.4', 34, 753456), ('10.2.3.4', 34, 763456), ('11.2.3.4', 34, 773456)]}]
>>> table.knownNodes()
[('2.2.3.4', 34, 23456), ('3.2.3.4', 34, 223456), ('5.2.3.4', 34, 423456), ('6.2.3.4', 34, 323456), ('7.2.3.4', 34, 733456), ('8.2.3.4', 34, 743456), ('9.2.3.4', 34, 753456), ('10.2.3.4', 34, 763456), ('11.2.3.4', 34, 773456)]
>>> table.knownExternalNodes()
[('2.2.3.4', 34, 23456), ('3.2.3.4', 34, 223456), ('5.2.3.4', 34, 423456), ('6.2.3.4', 34, 323456), ('7.2.3.4', 34, 733456), ('8.2.3.4', 34, 743456), ('9.2.3.4', 34, 753456), ('10.2.3.4', 34, 763456), ('11.2.3.4', 34, 773456)]
"""
def __init__(self, node, bits=idspace, depth=k):
"""
@param node a (ip, port, id) triple, where id is an int (this is
needed to know when to split a bucket).
"""
self.k = depth
self.replacementCache = NodeCache(300)
self.kBuckets = [kBucket(0, 2**bits, depth),]
#self.kBuckets = [kBucket(0, 1, depth),]
#for i in xrange(1,bits):
# self.kBuckets.append(kBucket(2**i, 2**(i+1)-1, depth))
self.insertNode(node)
self.node = node
def insertNode(self, node):
"""
Inserts a node into the appropriate kBucket. If the node already
exists in the appropriate kBucket, it is moved to the tail of the list.
If the bucket is full, this method returns the oldest node, which the
caller should then ping. If the oldest node is alive, the caller
does nothing. Otherwise, the caller should call replaceNode.
@param node a (ip, port, id) triple, where id is a long.
"""
if len(node) < 3:
raise ValueError("node must be a triple (ip, port, id)")
id = node[2]
bucket = self._findBucket(id)
try:
# XXX: need to transfer key/vals that belong to new node?
bucket.updateNode(node)
self.replacementCache.removeNode(node)
except BucketFullException, e:
if (bucket.begin <= self.node[2] < bucket.end):
# bucket is full /and/ the local node is in this bucket,
# split and try adding it again.
self._splitBucket(bucket)
self.insertNode(node)
logger.debug("split and added %x" % node[2])
return
# XXX: need to also split for some other cases, see sections 2.4
# and 4.2.
else:
# bucket is full but we won't split. Return the oldest node
# so that the caller can determine if it should be expunged.
# If the old node is not reachable, caller should call
# replaceNode()
logger.debug("didn't add %x" % node[2])
return bucket.contents[0]
logger.debug("didn't add %x" % node[2])
return bucket.contents[0]
def removeNode(self, node):
"""
Invalidates a node.
"""
bucket = self._findBucket(node[2])
bucket.delNode(node)
def replaceNode(self, replacee, replacer):
"""
Expunges replacee from its bucket, making room to add replacer
"""
# XXX: constraint checks: replacee & replacer belong to the same bucket,
# bucket is currently full, adding replacer doesn't overfill, etc.
self.removeNode(replacee)
self.insertNode(replacer)
def findNode(self, nodeID):
"""
Returns k closest node triples with which the caller may make
additional queries. If nodeID is found, it will be the first result.
@param nodeID an int
"""
nodes = []
bucket = self._findBucket(nodeID)
#n = bucket.findNode(nodeID)
#if n != None:
# nodes.append(n)
nodes += bucket.contents
if len(nodes) < self.k:
nextbucket = self._nextbucket(bucket)
prevbucket = self._prevbucket(bucket)
while len(nodes) < self.k \
and (nextbucket != None or prevbucket != None):
if nextbucket != None:
nodes += nextbucket.contents
if prevbucket != None:
nodes += prevbucket.contents
nextbucket = self._nextbucket(nextbucket)
prevbucket = self._prevbucket(prevbucket)
nodes.sort(lambda a, b, n=nodeID: cmp(n ^ a[2], n ^ b[2]))
return nodes[:self.k]
def getNode(self, nodeID):
"""
Attempts to get the given node, returning a <ip, port, id> triple.
If the node is not found locally, returns None
@param nodeID an int
"""
bucket = self._findBucket(nodeID)
n = bucket.findNode(nodeID)
if not n:
n = self.replacementCache.getNode(nodeID)
if n != None:
return n
return None
def updateNode(self, node):
"""
Call to update a node, i.e., whenever the node has been recently seen
@param node a (ip, port, id) triple, where id is an int.
"""
self.insertNode(node)
def knownExternalNodes(self):
result = []
for i in self.kBuckets:
for j in i.contents:
if j[2] != self.node[2]:
result.append(j)
result += self.replacementCache.nodes()
return result
def knownNodes(self):
result = []
for i in self.kBuckets:
for j in i.contents:
result.append(j)
result += self.replacementCache.nodes()
return result
def _nextbucket(self, bucket):
if bucket == None:
return bucket
i = self.kBuckets.index(bucket)+1
if i >= len(self.kBuckets):
return None
return self.kBuckets[i]
def _prevbucket(self, bucket):
if bucket == None:
return bucket
i = self.kBuckets.index(bucket)-1
if i < 0:
return None
return self.kBuckets[i]
def _findBucket(self, i):
"""
returns the bucket which would contain i.
@param i an int
"""
#print "kBuckets = %s" % str(self.kBuckets)
bl = bisect_left(self.kBuckets, i)
if bl >= len(self.kBuckets):
raise Exception(
"tried to find an ID that is larger than ID space: %s" % i)
return self.kBuckets[bisect_left(self.kBuckets, i)]
def _splitBucket(self, bucket):
"""
This is called for the special case when the bucket is full and this
node is a member of the bucket. When this occurs, the bucket should
be split into two new buckets.
"""
halfpoint = (bucket.end - bucket.begin) / 2
newbucket = kBucket(bucket.end - halfpoint + 1, bucket.end, self.k)
self.kBuckets.insert(self.kBuckets.index(bucket.begin) + 1, newbucket)
bucket.end -= halfpoint
for node in bucket.contents[:]:
if node[2] > bucket.end:
bucket.delNode(node)
newbucket.addNode(node)
class kBucket:
"""
A kBucket is a list of <ip, port, id> triples, ordered according to time
last seen (most recent at tail). Every kBucket has a begin and end
number, indicating the chunk of the id space that it contains.
>>> b = kBucket(0,100,5)
>>> b
{'0-64': []}
>>> n1 = ('1.2.3.4', 45, 'd234a53546e4c23')
>>> n2 = ('10.20.30.40', 45, 'abcd234a53546e4')
>>> n3 = ('10.20.30.4', 5, 'abcd')
>>> b.addNode(n1)
>>> b
{'0-64': [('1.2.3.4', 45, 'd234a53546e4c23')]}
>>> b.addNode(n2)
>>> b
{'0-64': [('1.2.3.4', 45, 'd234a53546e4c23'), ('10.20.30.40', 45, 'abcd234a53546e4')]}
>>> b.addNode(n1)
>>> b
{'0-64': [('10.20.30.40', 45, 'abcd234a53546e4'), ('1.2.3.4', 45, 'd234a53546e4c23')]}
>>> b.delNode(n3)
>>> b
{'0-64': [('10.20.30.40', 45, 'abcd234a53546e4'), ('1.2.3.4', 45, 'd234a53546e4c23')]}
>>> b.addNode(n2)
>>> b
{'0-64': [('1.2.3.4', 45, 'd234a53546e4c23'), ('10.20.30.40', 45, 'abcd234a53546e4')]}
>>> b.updateNode(n1)
>>> b
{'0-64': [('10.20.30.40', 45, 'abcd234a53546e4'), ('1.2.3.4', 45, 'd234a53546e4c23')]}
>>> b.delNode(n2)
>>> b
{'0-64': [('1.2.3.4', 45, 'd234a53546e4c23')]}
>>> b.addNode(n3)
>>> f = b.findNode(n3[2])
>>> f == n3
True
>>> c = kBucket(101,200,5)
>>> d = kBucket(150,250,5) # wouldn't really have overlap in practice
>>> e = kBucket(251, 2**256,5)
>>> buckets = (b, c, d, e) # if not added insort, must sort for bisect
>>> b1 = b
>>> b1 == b
True
>>> b1 != b
False
>>> b == 50
True
>>> b == 0
True
>>> b == 100
True
>>> b == -1
False
>>> b > -1
True
>>> b == 101
False
>>> b < 101
True
>>> b <= 90
True
>>> b <= 100
True
>>> b <= 101
True
>>> b < d
True
>>> b <= c
True
>>> b > c
False
>>> bisect_left(buckets, 98)
0
>>> bisect_left(buckets, 198)
1
>>> bisect_left(buckets, 238)
2
>>> bisect_left(buckets, 298)
3
"""
def __init__(self, begin, end, depth=k):
self.k = depth
self.begin = begin
self.end = end
self.contents = []
def __repr__(self):
return "{'%x-%x': %s}" % (self.begin, self.end, self.contents)
#return "{'"+repr(self.begin)+'-'+repr(self.end)+"': "\
# +repr(self.contents)+"}"
#return "<kBucket "+repr(self.begin)+'-'+repr(self.end)+": "\
# +repr(self.contents)+">"
def addNode(self, node):
""" adds the given node to this bucket. If the node is already a member
of this bucket, its position is updated to the end of the list. If the
bucket is full, raises an exception
"""
if node in self.contents:
self.contents.remove(node)
self.contents.append(node)
elif len(self.contents) >= self.k:
raise BucketFullException()
else:
ids = [x[2] for x in self.contents]
if node[2] in ids:
# remove the matching node's old contact info
self.contents.pop(ids.index(node[2]))
self.contents.append(node)
def updateNode(self, node):
""" Moves the given node to the tail of the list. If the node isn't
present in this bucket, this method attempts to add it by calling
addNode (which may throw a BucketFullException if bucket is full)
"""
self.addNode(node)
def delNode(self, node):
""" removes the given node, if present, from this bucket """
try:
self.contents.remove(node)
except:
pass
def findNode(self, nodeID):
for i in self.contents:
if i[2] == nodeID:
return i
return None
# The following comparators allow us to use list & bisect on the buckets.
# integers, longs, and buckets all may be compared to a bucket.
def __eq__(self, i):
return i >= self.begin and self.end >= i
def __ne__(self, i):
return i < self.begin or self.end < i
def __lt__(self, i):
return self.end < i
def __le__(self, i):
return self.begin <= i
def __gt__(self, i):
return self.begin > i
def __ge__(self, i):
return self.end >= i
class BucketFullException(Exception):
pass
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
| Python |
import zfec
import zfec.easyfec as easyfec
import zfec.filefec as filefec
from pyutil import fileutil
from pyutil.mathutil import pad_size, log_ceil
import array, os, re, struct, traceback
FORMAT_FORMAT = "%%s.%%0%dd_%%0%dd%%s"
RE_FORMAT = "%s.[0-9]+_[0-9]+%s"
def encode_to_files(inf, fsize, dirname, prefix, k, m, suffix=".fec", overwrite=False, verbose=False):
"""
Encode inf, writing the shares to specially named, newly created files.
@param fsize: calling read() on inf must yield fsize bytes of data and
then raise an EOFError
@param dirname: the name of the directory into which the sharefiles will
be written
"""
mlen = len(str(m))
format = FORMAT_FORMAT % (mlen, mlen,)
padbytes = pad_size(fsize, k)
fns = []
fs = []
try:
for shnum in range(m):
hdr = filefec._build_header(m, k, padbytes, shnum)
fn = os.path.join(dirname, format % (prefix, shnum, m, suffix,))
if verbose:
print "Creating share file %r..." % (fn,)
if overwrite:
f = open(fn, "wb")
else:
flags = os.O_WRONLY|os.O_CREAT|os.O_EXCL | (hasattr(os,
'O_BINARY') and os.O_BINARY)
fd = os.open(fn, flags)
f = os.fdopen(fd, "wb")
f.write(hdr)
fs.append(f)
fns.append(fn)
sumlen = [0]
def cb(blocks, length):
assert len(blocks) == len(fs)
oldsumlen = sumlen[0]
sumlen[0] += length
if verbose:
if int((float(oldsumlen) / fsize) * 10) \
!= int((float(sumlen[0]) / fsize) * 10):
print str(int((float(sumlen[0]) / fsize) * 10) * 10) \
+ "% ...",
if sumlen[0] > fsize:
raise IOError("Wrong file size -- possibly the size of the"
" file changed during encoding. Original size: %d,"
" observed size at least: %s" % (fsize, sumlen[0],))
for i in range(len(blocks)):
data = blocks[i]
fs[i].write(data)
length -= len(data)
filefec.encode_file_stringy_easyfec(inf, cb, k, m, chunksize=4096)
except EnvironmentError, le:
print "Cannot complete because of exception: "
print le
print "Cleaning up..."
# clean up
while fs:
f = fs.pop()
f.close() ; del f
fn = fns.pop()
if verbose:
print "Cleaning up: trying to remove %r..." % (fn,)
fileutil.remove_if_possible(fn)
return None
if verbose:
print
print "Done!"
return fns
# Note: if you really prefer base-2 and you change this code, then please
# denote 2^20 as "MiB" instead of "MB" in order to avoid ambiguity.
# Thanks.
# http://en.wikipedia.org/wiki/Megabyte
MILLION_BYTES=10**6
def decode_from_files(outf, infiles, verbose=False):
"""
Decode from the first k files in infiles, writing the results to outf.
"""
assert len(infiles) >= 2
infs = []
shnums = []
m = None
k = None
padlen = None
byteswritten = 0
for f in infiles:
(nm, nk, npadlen, shnum,) = filefec._parse_header(f)
if not (m is None or m == nm):
raise CorruptedShareFilesError("Share files were corrupted --"
" share file %r said that m was %s but another share file"
" previously said that m was %s" % (f.name, nm, m,))
m = nm
if not (k is None or k == nk):
raise CorruptedShareFilesError("Share files were corrupted --"
" share file %r said that k was %s but another share file"
" previously said that k was %s" % (f.name, nk, k,))
if k > len(infiles):
raise InsufficientShareFilesError(k, len(infiles))
k = nk
if not (padlen is None or padlen == npadlen):
raise CorruptedShareFilesError("Share files were corrupted --"
" share file %r said that pad length was %s but another"
" share file previously said that pad length was %s"
% (f.name, npadlen, padlen,))
padlen = npadlen
infs.append(f)
shnums.append(shnum)
if len(infs) == k:
break
dec = easyfec.Decoder(k, m)
while True:
chunks = [ inf.read(filefec.CHUNKSIZE) for inf in infs ]
if [ch for ch in chunks if len(ch) != len(chunks[-1])]:
raise CorruptedShareFilesError("Share files were corrupted --"
" all share files are required to be the same length,"
" but they weren't.")
if len(chunks[-1]) == filefec.CHUNKSIZE:
# Then this was a full read, so we're still in the sharefiles.
resultdata = dec.decode(chunks, shnums, padlen=0)
outf.write(resultdata)
byteswritten += len(resultdata)
if verbose:
if ((byteswritten - len(resultdata)) / (10*MILLION_BYTES)) \
!= (byteswritten / (10*MILLION_BYTES)):
print str(byteswritten / MILLION_BYTES) + " MB ...",
else:
# Then this was a short read, so we've reached the end of the
# sharefiles.
resultdata = dec.decode(chunks, shnums, padlen)
outf.write(resultdata)
return True
if verbose:
print
print "Done!"
return True
# fludfilefec -- modified zfec filefec for use with flud, based on:
#
# zfec -- fast forward error correction library with Python interface
#
# Copyright (C) 2007 Allmydata, Inc.
# Author: Zooko Wilcox-O'Hearn
#
# This file is part of zfec.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version, with the added permission that, if you become obligated
# to release a derived work under this licence (as per section 2.b), you may
# delay the fulfillment of this obligation for up to 12 months. See the file
# COPYING for details.
#
# If you would like to inquire about a commercial relationship with Allmydata,
# Inc., please contact partnerships@allmydata.com and visit
# http://allmydata.com/.
| Python |
from twisted.python import failure
from twisted.internet import defer
"""
FludDefer.py (c) 2003-2006 Alen Peacock. This program is distributed under the
terms of the GNU General Public License (the GPL), version 3.
"""
class ErrDeferredList(defer.DeferredList):
"""
ErrDeferredList acts just like DeferredList, except that if *any* of the
Deferreds in the DeferredList errback(), the NewDeferredList also
errback()s. This is different from DeferredList(fireOnOneErrback=True) in
that if you use that method, you only know about the first failure, and you
won't learn of subsequent failures/success in the list. returnOne indicates
whether the full result of the DeferredList should be returned, or just the
first result (or first error)
"""
def __init__(self, list, returnOne=False):
defer.DeferredList.__init__(self, list, consumeErrors=True)
self.returnOne = returnOne
self.addCallback(self.wrapResult)
def wrapResult(self, result):
#print "DEBUG: result= %s" % result
for i in result:
if i[0] == False:
if self.returnOne:
raise failure.DefaultException(i[1])
else:
raise failure.DefaultException(result)
if self.returnOne:
#print "DEBUG: returning %s" % str(result[0][1])
return result[0][1]
else:
#print "DEBUG: returning %s" % result
return result
| Python |
#!/usr/bin/python
"""
FludNode.py (c) 2003-2006 Alen Peacock. This program is distributed under the
terms of the GNU General Public License (the GPL), verison 3.
FludNode is the process that runs to talk with other nodes in the flud backup network.
"""
from twisted.internet import reactor, defer
import threading, signal, sys, time, os, random, logging
from flud.FludConfig import FludConfig
from flud.protocol.FludServer import FludServer
from flud.protocol.FludClient import FludClient
from flud.protocol.FludCommUtil import getCanonicalIP
PINGTIME=60
SYNCTIME=900
class FludNode(object):
"""
A node in the flud network. A node is both a client and a server. It
listens on a network accessible port for both DHT and Storage layer
requests, and it listens on a local port for client interface commands.
"""
def __init__(self, port=None):
self._initLogger()
self.config = FludConfig()
self.logger.removeHandler(self.screenhandler)
self.config.load(serverport=port)
self.client = FludClient(self)
self.DHTtstamp = time.time()+10
def _initLogger(self):
logger = logging.getLogger('flud')
self.screenhandler = logging.StreamHandler()
self.screenhandler.setLevel(logging.INFO)
logger.addHandler(self.screenhandler)
self.logger = logger
def pingRandom(self, tstamp):
return
# XXX: see pg. 4, Section 2.2 (short) or 2.3 (long) of the Kademlia
# paper -- once an hour, each node should check any buckets that
# haven't been refreshed and pick a random id within that space
# to findnode(id) on, for all buckets.
if tstamp < self.DHTtstamp:
#r = random.randrange(2**256)
n = self.config.routing.knownExternalNodes()
if len(n) > 2:
n1 = random.choice(n)[2]
n2 = random.choice(n)[2]
r = (n1+n2)/2
else:
r = random.randrange(2**256)
def badNode(error):
node.logger.warn("Couldn't ping %s:%s" %
(sys.argv[1], sys.argv[2]))
d = self.client.kFindNode(r)
d.addErrback(badNode)
pingtime = random.randrange(PINGTIME/2, PINGTIME)
reactor.callLater(pingtime, self.pingRandom, time.time())
def syncConfig(self):
self.config.save()
reactor.callLater(SYNCTIME, self.syncConfig)
def start(self, twistd=False):
""" starts the reactor in this thread """
self.webserver = FludServer(self, self.config.port)
self.logger.log(logging.INFO, "FludServer starting")
reactor.callLater(1, self.pingRandom, time.time())
reactor.callLater(random.randrange(10), self.syncConfig)
if not twistd:
reactor.run()
def run(self):
""" starts the reactor in its own thread """
#signal.signal(signal.SIGINT, self.sighandler)
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.webserver = FludServer(self, self.config.port)
self.webserver.start()
# XXX: need to do save out current config every X seconds
# XXX: need to seperate known_nodes from config, and then update this
# every X seconds. only update config when it changes.
def stop(self):
self.logger.log(logging.INFO, "shutting down FludNode")
self.webserver.stop()
def join(self):
self.webserver.join()
def sighandler(self, sig, frame):
self.logger.log(logging.INFO, "handling signal %s" % sig)
def connectViaGateway(self, host, port):
def refresh(knodes):
def refreshDone(results):
self.logger.info("bucket refreshes finished: %s" % results)
print "flud node connected and listening on port %d"\
% self.config.port
#print "found knodes %s" % knodes
dlist = []
for bucket in self.config.routing.kBuckets:
#if True:
if bucket.begin <= self.config.routing.node[2] < bucket.end:
pass
#print "passed on bucket %x-%s" % (bucket.begin, bucket.end)
else:
refreshID = random.randrange(bucket.begin, bucket.end)
#print "refreshing bucket %x-%x by finding %x" \
# % (bucket.begin, bucket.end, refreshID)
self.logger.info("refreshing bucket %x-%x by finding %x"
% (bucket.begin, bucket.end, refreshID))
deferred = self.client.kFindNode(refreshID)
dlist.append(deferred)
dl = defer.DeferredList(dlist)
dl.addCallback(refreshDone)
# XXX: do we need to ping newly discovered known nodes? If not,
# we could be vulnerable to a poisoning attack (at first
# glance, this attack seems rather impotent...)
# XXX: need to call refresh about every 60 minutes. Add a
# reactor.callLater here to do it.
def badGW(error):
self.logger.warn(error)
self.logger.warn("Couldn't connect to gateway at %s:%s" %
(sys.argv[1], sys.argv[2]))
self.logger.debug("connectViaGateway %s%d" % (host, port))
deferred = self.client.sendkFindNode(host, port,
self.config.routing.node[2])
deferred.addCallback(refresh)
deferred.addErrback(badGW)
def getPath():
# this is a hack to be able to get the location of FludNode.tac
return os.path.dirname(os.path.abspath(__file__))
| Python |
"""
CheckboxState.py, (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
CheckboxState represents the states which a checkbox in DirCtrl can take
"""
class CheckboxState:
(UNSELECTED, SELECTED, SELECTEDCHILD, SELECTEDPARENT, EXCLUDED,
EXCLUDEDCHILD) = range(6)
def offset(oldstate, newstate):
return newstate - oldstate
offset = staticmethod(offset)
| Python |
class FludException(Exception):
pass
| Python |
#!/usr/bin/python
"""
FludTestGauges.py (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
Provides gauges for visualizing storage for multiple flud nodes running on
the same host. This is really only useful for demos and testing.
"""
import sys, os, signal, stat, random
import wx
import wx.lib.buttons as buttons
from flud.FludConfig import FludConfig
dutotal = 0
def visit(arg, top, files):
global dutotal
for file in files:
dutotal += os.lstat("%s" % (os.path.join(top,file)))[stat.ST_SIZE]
arg += dutotal
def du(dir):
global dutotal
dutotal = 0
os.path.walk(dir, visit, dutotal)
return dutotal
# XXX: too much manual layout. should convert to a managed layout to allow for
# resizing, etc.
SGAUGEWIDTH = 230 # storage gauge
DGAUGEWIDTH = 100 # dht gauge
GAUGEHEIGHT = 20
ROWHEIGHT = 30
SEP = 5
LABELWIDTH = 20
POWERWIDTH = 70
RATIOBARHEIGHT = 70
COLWIDTH = SGAUGEWIDTH+DGAUGEWIDTH+LABELWIDTH+POWERWIDTH
COLGAPFUDGE = 30
class FludTestGauges(wx.Frame):
def __init__(self, parent, title, dirroot, dirs):
screenHeight = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_Y)-100
rowheight = ROWHEIGHT+SEP
height = len(dirs)*(rowheight)+RATIOBARHEIGHT
columns = height / screenHeight + 1
width = COLWIDTH*columns
if columns > 1:
height = (len(dirs)/columns)*(rowheight)+RATIOBARHEIGHT
if (len(dirs) % columns) > 0:
height += rowheight
wx.Frame.__init__(self, parent, wx.ID_ANY, title, size=(width,height),
style=wx.DEFAULT_FRAME_STYLE|wx.NO_FULL_REPAINT_ON_RESIZE)
self.storebarend = 1024
self.smultiplier = 100.0 / self.storebarend
self.sdivisor = 1
self.sbytelabel = ""
self.dhtbarend = 512
self.dmultiplier = 100.0 / self.dhtbarend
self.ddivisor = 1
self.dbytelabel = ""
self.storeheading = wx.StaticText(self, -1, "block storage",
(LABELWIDTH, 5))
self.totaldht = wx.StaticText(self, -1, "metadata",
(LABELWIDTH+SGAUGEWIDTH+SEP, 5))
self.gauges = []
curCol = 0
curRow = 30
for i in range(len(dirs)):
self.gauges.append(wx.Gauge(self, -1, 100,
(curCol*COLWIDTH+LABELWIDTH, curRow),
(SGAUGEWIDTH, GAUGEHEIGHT)))
self.gauges[i].SetBezelFace(3)
self.gauges[i].SetShadowWidth(3)
self.gauges[i].SetValue(0)
self.gauges[i].dir = "%s%s" % (dirroot,dirs[i])
os.environ['FLUDHOME'] = self.gauges[i].dir;
conf = FludConfig()
conf.load(doLogging = False)
print "%s" % conf.nodeID
self.gauges[i].label = wx.StaticText(self, -1, "%2s" % dirs[i],
(curCol*COLWIDTH, curRow+(rowheight/4)),
size=(LABELWIDTH, -1))
self.gauges[i].idlabel = wx.StaticText(self, -1, "%s" % conf.nodeID,
(curCol*COLWIDTH+LABELWIDTH, curRow+20))
font = self.gauges[i].idlabel.GetFont()
font.SetPointSize(6)
self.gauges[i].idlabel.SetFont(font)
self.gauges[i].dhtgauge = wx.Gauge(self, -1, 100,
(curCol*COLWIDTH+LABELWIDTH+SGAUGEWIDTH+SEP,
curRow),
(SGAUGEWIDTH/3, GAUGEHEIGHT))
self.gauges[i].power = wx.Button(self, i, "turn OFF",
(curCol*COLWIDTH
+LABELWIDTH+SGAUGEWIDTH+2*SEP+SGAUGEWIDTH/3,
curRow),
(POWERWIDTH, ROWHEIGHT))
#self.gauges[i].power = buttons.GenBitmapToggleButton(self, i,
# None,
# (LABELWIDTH+SGAUGEWIDTH+2*SEP+SGAUGEWIDTH/3, curRow),
# (POWERWIDTH, ROWHEIGHT))
#self.gauges[i].button.SetBestSize()
self.gauges[i].power.SetToolTipString("power on/off")
self.Bind(wx.EVT_BUTTON, self.onClick, self.gauges[i].power)
curRow += rowheight
if curRow > height-RATIOBARHEIGHT:
curCol += 1
curRow = 30
self.totalstore = wx.StaticText(self, -1, "total: 0",
(LABELWIDTH, height-40))
self.totaldht = wx.StaticText(self, -1, "total: 0",
(LABELWIDTH+SGAUGEWIDTH+SEP, height-40))
self.ratiogauge = wx.Gauge(self, -1, 100, (LABELWIDTH, height-20),
(SGAUGEWIDTH+SEP+SGAUGEWIDTH/3, 10))
self.ratiogauge.SetValue(0)
self.Bind(wx.EVT_IDLE, self.IdleHandler)
self.timer = wx.PyTimer(self.update)
self.timer.Start(1000)
def onClick(self, event):
# XXX: note that under our current startNnodes.sh scheme, the first
# node spawned doesn't contact anyone, so if that one is powered off
# and then powered back on, it will not be part of the node until
# another node pings it
# XXX: unix-specific proc management stuff follows
idx = event.GetId()
home = self.gauges[idx].dir
pidfile = os.path.join(home, 'twistd.pid')
if os.path.exists(pidfile):
print "shutting down %s" % home
f = open(pidfile)
pid = int(f.read())
f.close()
# XXX: ps command no worky on windows, and "-ww" may not worker on
# oldskool unixes
self.gauges[idx].savedCmd = os.popen(
"ps f -wwp %d -o args=" % pid).read()
procline = os.popen("ps e -wwp %d" % pid).read()
self.gauges[idx].savedEnv = [e for e in procline.split()
if e[:4] == 'FLUD']
# XXX: os.kill no worky on windows, need something like:
#def windowskill(pid):
# import win32api
# handle = win32api.OpenProcess(1, 0, pid)
# return (0 != win32api.TerminateProcess(handle, 0))
os.kill(pid, signal.SIGTERM)
self.gauges[idx].power.SetLabel("turn ON")
self.gauges[idx].Hide()
self.gauges[idx].dhtgauge.Hide()
else:
print "powering up %s" % home
# XXX: this exec no worky on windows
fullcmd = "%s %s" % (' '.join(self.gauges[idx].savedEnv),
self.gauges[idx].savedCmd)
print fullcmd
result = os.popen('%s %s' % (' '.join(self.gauges[idx].savedEnv),
self.gauges[idx].savedCmd)).readlines()
self.gauges[idx].power.SetLabel("turn OFF")
self.gauges[idx].Show()
self.gauges[idx].dhtgauge.Show()
print result
def update(self):
def sizeclass(num):
divisor = 1
bytelabel = ""
if num > 1024:
divisor = 1024.0
bytelabel = 'K'
if num > 1048576:
divisor = 1048576.0
bytelabel = 'M'
if num > 1073741824:
divisor = 1073741824.0
bytelabel = 'G'
return (divisor, bytelabel)
storelargest = 0
dhtlargest = 0
storetotal = 0
dhttotal = 0
for i in self.gauges:
if os.path.isdir(i.dir):
i.storebytes = du(os.path.join(i.dir,'store'))
if i.storebytes > storelargest:
storelargest = i.storebytes
storetotal += i.storebytes
i.dhtbytes = du(os.path.join(i.dir,'dht'))
if i.dhtbytes > dhtlargest:
dhtlargest = i.dhtbytes
dhttotal += i.dhtbytes
else:
i.storebytes = 0
i.dhtbytes = 0
i.Disable()
i.power.Disable()
while storelargest > self.storebarend:
self.storebarend = self.storebarend * 2
self.smultiplier = 100.0 / self.storebarend
self.sdivisor, self.sbytelabel = sizeclass(storetotal)
while dhtlargest > self.dhtbarend:
self.dhtbarend = self.dhtbarend * 2
self.dmultiplier = 100.0 / self.dhtbarend
self.ddivisor, self.dbytelabel = sizeclass(dhttotal)
#print "-----"
for i in self.gauges:
i.SetValue(i.storebytes*self.smultiplier)
i.dhtgauge.SetValue(i.dhtbytes*self.dmultiplier)
#print "%.2f, %.2f" % ((float(i.storebytes)/float(i.dhtbytes)),
# (float(i.GetValue())/float(i.dhtgauge.GetValue())))
self.totalstore.SetLabel("total: %.1f%s"
% (float(storetotal)/self.sdivisor, self.sbytelabel))
self.totaldht.SetLabel("total: %.1f%s"
% (float(dhttotal)/self.ddivisor, self.dbytelabel))
if (dhttotal+storetotal == 0):
self.ratiogauge.SetValue(0)
else:
self.ratiogauge.SetValue((storetotal*100/(dhttotal+storetotal)))
def updateGauges(self, update):
for index, value in update:
self.monitors[index].setValue(value)
def IdleHandler(self, event):
pass
def main():
if len(sys.argv) < 2:
print "usage: %s dircommon exts" % sys.argv[0]
print " where exts will be appended to dircommon"
print " e.g., '%s /home/joe/.flud 1,2,3,4,10,15,20'"\
% sys.argv[0]
print " or, '%s /home/joe/.flud 1-10,15,20'"\
% sys.argv[0]
sys.exit()
root = sys.argv[1]
exts = []
dirs = [d.strip() for d in sys.argv[2].split(',')]
for i in dirs:
if i == "_":
exts.append('') # undocumented, means "just dircommon"
elif i.find('-') >= 0:
start, end = i.split('-')
for j in range(int(start),int(end)+1):
exts.append(j)
else:
exts.append(int(i))
app = wx.PySimpleApp()
t = FludTestGauges(None, 'Flud Test Gauges', root, exts)
t.Show(1)
app.MainLoop()
if __name__ == '__main__':
main()
| Python |
import os, stat, sys, tarfile, tempfile
import gzip
from flud.FludCrypto import hashstream
from flud.fencode import fencode
"""
TarfileUtils.py (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), version 3.
Provides additional tarfile functionality (deletion of a member from a
tarball, and concatenation of tarballs).
"""
def delete(tarball, membernames):
"""
Deletes a member file[s] from a tarball. Returns the names of deleted
members if they are removed, False if the file[s] aren't members. If
membernames contains all the members in the tarball, the entire tarball is
deleted
"""
gzipped = False
if tarball[-7:] == ".tar.gz":
gzipped = True
f = tarfile.open(tarball, 'r:gz')
else:
f = tarfile.open(tarball, 'r')
if not isinstance(membernames, list):
membernames = [membernames]
tarnames = f.getnames()
for membername in membernames:
if not membername in tarnames:
membernames.remove(membername)
if len(membernames) < 1:
f.close()
return False
if len(tarnames) == len(membernames):
f.close()
os.remove(tarball)
return True
f.close()
if gzipped:
tarball = gunzipTarball(tarball)
f = open(tarball, 'r+')
tfile = tempfile.mktemp()
if gzipped:
f2 = gzip.GzipFile(tfile, 'w')
else:
f2 = open(tfile, 'w')
empty = tarfile.BLOCKSIZE * '\0'
done = False
removednames = []
while not done:
bytes = f.read(tarfile.BLOCKSIZE)
if bytes == "":
done = True
elif bytes == empty:
f2.write(bytes)
else:
name = bytes[0:99]
name = name[:name.find(chr(0))]
size = int(bytes[124:135], 8)
blocks = size / tarfile.BLOCKSIZE
if (size % tarfile.BLOCKSIZE) > 0:
blocks += 1
if name in membernames:
f.seek(blocks*tarfile.BLOCKSIZE + f.tell())
removednames.append(name)
else:
f2.write(bytes)
for i in range(blocks):
f2.write(f.read(tarfile.BLOCKSIZE))
f2.close()
f.close()
if gzipped:
os.remove(tarball)
tarball = tarball+".gz"
os.rename(tfile, tarball)
return removednames
def concatenate(tarfile1, tarfile2):
"""
Combines tarfile1 and tarfile2 into tarfile1. tarfile1 is modified in the
process, and tarfile2 is deleted.
"""
gzipped = False
if tarfile1[-7:] == ".tar.gz":
gzipped = True
f1 = gzip.GzipFile(tarfile1, 'r')
tarfile1 = tarfile1[:-3]
f1unzip = file(tarfile1, 'w')
f1unzip.write(f1.read())
f1unzip.close()
f1.close()
os.remove(tarfile1+".gz")
f = open(tarfile1, "r+")
done = False
e = '\0'
empty = tarfile.BLOCKSIZE*e
emptyblockcount = 0
while not done:
header = f.read(tarfile.BLOCKSIZE)
if header == "":
print "error: end of archive not found"
return
elif header == empty:
emptyblockcount += 1
if emptyblockcount == 2:
done = True
else:
emptyblockcount = 0
fsize = eval(header[124:135])
skip = int(round(float(fsize) / float(tarfile.BLOCKSIZE) + 0.5))
f.seek(skip*tarfile.BLOCKSIZE, 1)
# truncate the file to the spot before the end-of-tar marker
trueend = f.tell() - (tarfile.BLOCKSIZE*2)
f.seek(trueend)
f.truncate()
# now write the contents of the second tarfile into this spot
if tarfile2[-7:] == ".tar.gz":
f2 = gzip.GzipFile(tarfile2, 'r')
else:
f2 = open(tarfile2, "r")
done = False
while not done:
header = f2.read(tarfile.BLOCKSIZE)
if header == "":
print "error: end of archive not found"
f.seek(trueend)
f.write(empty*2)
return
else:
f.write(header)
if header == empty:
emptyblockcount += 1
if emptyblockcount == 2:
done = True
else:
emptyblockcount = 0
fsize = eval(header[124:135])
bsize = int(round(float(fsize) / float(tarfile.BLOCKSIZE)
+ 0.5))
# XXX: break this up if large
data = f2.read(bsize*tarfile.BLOCKSIZE)
f.write(data)
f2.close()
f.close()
if gzipped:
f2 = gzip.GzipFile(tarfile1+".gz", 'wb')
f = file(tarfile1, 'rb')
f2.write(f.read())
f2.close()
f.close()
os.remove(tarfile1)
# and delete the second tarfile
os.remove(tarfile2)
#print "concatenated %s to %s" % (tarfile2, tarfile1)
def verifyHashes(tarball, ignoreExt=None):
# return all the names of files in this tarball if hash checksum passes,
# otherwise return False
digests = []
done = False
if tarball[-7:] == ".tar.gz":
f = gzip.GzipFile(tarball, 'r:gz')
else:
f = open(tarball, 'r')
empty = tarfile.BLOCKSIZE * '\0'
while not done:
bytes = f.read(tarfile.BLOCKSIZE)
if bytes == "":
done = True
elif bytes == empty:
pass
else:
if bytes[0] == '\0' and bytes[124] == '\0':
print "WARNING: read nulls when expecting file header"
break
name = bytes[0:99]
name = name[:name.find(chr(0))]
size = int(bytes[124:135], 8)
blocks = size / tarfile.BLOCKSIZE
if ignoreExt and name[-len(ignoreExt):] == ignoreExt:
# gzip doesn't support f.seek(size, 1)
f.seek(f.tell()+size)
else:
digest = hashstream(f, size)
digest = fencode(int(digest,16))
if name == digest:
#print "%s == %s" % (name, digest)
digests.append(name)
else:
#print "%s != %s" % (name, digest)
f.close()
return []
if (size % tarfile.BLOCKSIZE) > 0:
blocks += 1
f.seek((blocks * tarfile.BLOCKSIZE) - size + f.tell())
f.close()
return digests
def gzipTarball(tarball):
if tarball[-4:] != '.tar':
return None
f = gzip.GzipFile(tarball+".gz", 'wb')
f.write(file(tarball, 'rb').read())
f.close()
os.remove(tarball)
return tarball+".gz"
def gunzipTarball(tarball):
if tarball[-3:] != '.gz':
return None
f = gzip.GzipFile(tarball, 'rb')
file(tarball[:-3], 'wb').write(f.read())
f.close()
os.remove(tarball)
return tarball[:-3]
if __name__ == "__main__":
if (len(sys.argv) < 4 or sys.argv[1] != "-d") \
and (len(sys.argv) != 4 or sys.argv[1] != "-c") \
and sys.argv[1] != "-v":
print "usage: [-d tarfile tarfilemembers]\n"\
+" [-c tarfile1 tarfile2]\n"\
+" [-v tarfile]\n"\
+" -d deletes tarfilemembers from tarfile,\n"\
+" -c concatenates tarfile1 and tarfile2 into tarfile1\n"\
+" -v verifies that the names of files in tarfile are sha256\n"
sys.exit(-1)
if sys.argv[1] == "-d":
deleted = delete(sys.argv[2], sys.argv[3:])
if deleted == sys.argv[3:]:
print "%s successfully deleted from %s" % (deleted, sys.argv[2])
else:
faileddeletes = [x for x in sys.argv[3:] if x not in deleted]
print "could not delete %s from %s" % (faileddeletes, sys.argv[2])
elif sys.argv[1] == "-c":
concatenate(sys.argv[2], sys.argv[3])
print "concatenated %s and %s into %s" % (sys.argv[2], sys.argv[3],
sys.argv[2])
elif sys.argv[1] == "-v":
digests = verifyHashes(sys.argv[2])
if digests:
print "verified tarfile member digests for: %s" % digests
else:
print "some tarfile members failed digest check"
| Python |
#!/usr/bin/python
"""
FludNode.py (c) 2003-2006 Alen Peacock. This program is distributed under the
terms of the GNU General Public License (the GPL), verison 3.
FludNode is the process that runs to talk with other nodes in the flud backup network.
"""
from twisted.internet import reactor, defer
import threading, signal, sys, time, os, random, logging
from flud.FludConfig import FludConfig
from flud.protocol.FludServer import FludServer
from flud.protocol.FludClient import FludClient
from flud.protocol.FludCommUtil import getCanonicalIP
PINGTIME=60
SYNCTIME=900
class FludNode(object):
"""
A node in the flud network. A node is both a client and a server. It
listens on a network accessible port for both DHT and Storage layer
requests, and it listens on a local port for client interface commands.
"""
def __init__(self, port=None):
self._initLogger()
self.config = FludConfig()
self.logger.removeHandler(self.screenhandler)
self.config.load(serverport=port)
self.client = FludClient(self)
self.DHTtstamp = time.time()+10
def _initLogger(self):
logger = logging.getLogger('flud')
self.screenhandler = logging.StreamHandler()
self.screenhandler.setLevel(logging.INFO)
logger.addHandler(self.screenhandler)
self.logger = logger
def pingRandom(self, tstamp):
return
# XXX: see pg. 4, Section 2.2 (short) or 2.3 (long) of the Kademlia
# paper -- once an hour, each node should check any buckets that
# haven't been refreshed and pick a random id within that space
# to findnode(id) on, for all buckets.
if tstamp < self.DHTtstamp:
#r = random.randrange(2**256)
n = self.config.routing.knownExternalNodes()
if len(n) > 2:
n1 = random.choice(n)[2]
n2 = random.choice(n)[2]
r = (n1+n2)/2
else:
r = random.randrange(2**256)
def badNode(error):
node.logger.warn("Couldn't ping %s:%s" %
(sys.argv[1], sys.argv[2]))
d = self.client.kFindNode(r)
d.addErrback(badNode)
pingtime = random.randrange(PINGTIME/2, PINGTIME)
reactor.callLater(pingtime, self.pingRandom, time.time())
def syncConfig(self):
self.config.save()
reactor.callLater(SYNCTIME, self.syncConfig)
def start(self, twistd=False):
""" starts the reactor in this thread """
self.webserver = FludServer(self, self.config.port)
self.logger.log(logging.INFO, "FludServer starting")
reactor.callLater(1, self.pingRandom, time.time())
reactor.callLater(random.randrange(10), self.syncConfig)
if not twistd:
reactor.run()
def run(self):
""" starts the reactor in its own thread """
#signal.signal(signal.SIGINT, self.sighandler)
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.webserver = FludServer(self, self.config.port)
self.webserver.start()
# XXX: need to do save out current config every X seconds
# XXX: need to seperate known_nodes from config, and then update this
# every X seconds. only update config when it changes.
def stop(self):
self.logger.log(logging.INFO, "shutting down FludNode")
self.webserver.stop()
def join(self):
self.webserver.join()
def sighandler(self, sig, frame):
self.logger.log(logging.INFO, "handling signal %s" % sig)
def connectViaGateway(self, host, port):
def refresh(knodes):
def refreshDone(results):
self.logger.info("bucket refreshes finished: %s" % results)
print "flud node connected and listening on port %d"\
% self.config.port
#print "found knodes %s" % knodes
dlist = []
for bucket in self.config.routing.kBuckets:
#if True:
if bucket.begin <= self.config.routing.node[2] < bucket.end:
pass
#print "passed on bucket %x-%s" % (bucket.begin, bucket.end)
else:
refreshID = random.randrange(bucket.begin, bucket.end)
#print "refreshing bucket %x-%x by finding %x" \
# % (bucket.begin, bucket.end, refreshID)
self.logger.info("refreshing bucket %x-%x by finding %x"
% (bucket.begin, bucket.end, refreshID))
deferred = self.client.kFindNode(refreshID)
dlist.append(deferred)
dl = defer.DeferredList(dlist)
dl.addCallback(refreshDone)
# XXX: do we need to ping newly discovered known nodes? If not,
# we could be vulnerable to a poisoning attack (at first
# glance, this attack seems rather impotent...)
# XXX: need to call refresh about every 60 minutes. Add a
# reactor.callLater here to do it.
def badGW(error):
self.logger.warn(error)
self.logger.warn("Couldn't connect to gateway at %s:%s" %
(sys.argv[1], sys.argv[2]))
self.logger.debug("connectViaGateway %s%d" % (host, port))
deferred = self.client.sendkFindNode(host, port,
self.config.routing.node[2])
deferred.addCallback(refresh)
deferred.addErrback(badGW)
def getPath():
# this is a hack to be able to get the location of FludNode.tac
return os.path.dirname(os.path.abspath(__file__))
| Python |
#!/usr/bin/python
"""
FludScheduler.py (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), verison 3.
FludScheduler is the process monitors files for changes, and then tells flud to
back them up.
"""
import sys, os, time, stat
from twisted.internet import reactor
from flud.FludConfig import FludConfig
from flud.protocol.LocalClient import *
from flud.CheckboxState import CheckboxState
CHECKTIME=5
class FludScheduler:
def __init__(self, config, factory):
self.config = config
self.factory = factory
self.fileconfigfile = None
self.fileconfigfileMTime = 0
self.fileChangeTime = 0
self.fileconfigSelected = set()
self.fileconfigExcluded = set()
self.getMasterMetadata()
def getMasterMetadata(self):
d = self.factory.sendLIST()
d.addCallback(self.gotMasterMetadata)
d.addErrback(self.errMasterMetadata)
return d
def gotMasterMetadata(self, master):
self.mastermetadata = master
def errMasterMetadata(self, err):
print err
reactor.stop()
def readFileConfig(self, mtime=None):
print "reading FileConfig"
file = open(self.fileconfigfile, 'r')
self.fileconfig = eval(file.read())
file.close()
if mtime:
self.fileconfigfileMTime = mtime
else:
self.fileconfigfileMTime = os.stat(
self.fileconfigfile)[stat.ST_MTIME]
self.fileconfigSelected = set([f for f in self.fileconfig
if self.fileconfig[f] == CheckboxState.SELECTED
or self.fileconfig[f] == CheckboxState.SELECTEDCHILD])
self.fileconfigExcluded = set([f for f in self.fileconfig
if self.fileconfig[f] == CheckboxState.EXCLUDED
or self.fileconfig[f] == CheckboxState.EXCLUDEDCHILD])
# The file[s]ChangeStat are the worst possible way to detect file changes.
# Much more efficient to use inotify/dnotify/fam/gamin/etc., as well as
# more correct (no way to detect cp -a or -p, for example, with stat).
# But, these are a fallback method when those aren't present, and are fine
# for testing.
def fileChangedStat(self, file, fileChangeTime=None):
if os.path.isfile(file) or os.path.isdir(file):
mtime = os.stat(file)[stat.ST_MTIME]
if not fileChangeTime:
fileChangeTime = self.fileChangeTime
if file in self.mastermetadata:
fileChangeTime = self.mastermetadata[file][1]
else:
return True
print "mtime = %s, ctime = %s (%s)" % (mtime, fileChangeTime, file)
if mtime > fileChangeTime:
return True
return False
def filesChangedStat(self, files, fileChangeTime=None):
result = []
for f in files:
if self.fileChangedStat(f, fileChangeTime):
result.append(f)
return result
# Change these to point to something other than the xxxStat() methods
def fileChanged(self, file, fileChangeTime=None):
"""
>>> now = time.time()
>>> f1 = tmpfile.mktemp()
>>>
"""
return self.fileChangedStat(file, fileChangeTime)
def filesChanged(self, files, fileChangeTime=None):
return self.filesChangedStat(files, fileChangeTime)
def checkFileConfig(self):
# check config file to see if it has changed, then reparse it
if not self.fileconfigfile:
# first time through
print "checking fileconfigfile (initial)"
if os.environ.has_key('FLUDHOME'):
fludhome = os.environ['FLUDHOME']
elif os.environ.has_key('HOME'):
fludhome = os.environ['HOME']+"/.flud"
else:
fludhome = ".flud"
# XXX: fludfile.conf should be in config
self.fileconfigfile = os.path.join(fludhome, "fludfile.conf")
if os.path.isfile(self.fileconfigfile):
self.readFileConfig()
return True
else:
print "no fileconfigfile to read"
elif os.path.isfile(self.fileconfigfile):
if self.fileChanged(self.fileconfigfile, self.fileconfigfileMTime):
print "fileconfigfile changed"
mtime = time.time()
self.readFileConfig(mtime)
return True
return False
def checkFilesystem(self):
checkedFiles = set()
changedFiles = set()
def checkList(list):
#print "checkList: %s" % list
#print "checkedFiles: %s" % checkedFiles
for entry in list:
# XXX: if entry is in master metadata, and its mtime is not
# earlier than the time used by fileChanged, skip it (add 'and'
# clause)
if entry not in checkedFiles and \
entry not in self.fileconfigExcluded and \
entry not in self.mastermetadata:
print "checkFilesystem for %s" % entry
if os.path.isdir(entry):
#print "dir %s" % entry
dirfiles = [os.path.join(entry, i)
for i in os.listdir(entry)]
checkedFiles.update([entry,])
checkList(dirfiles)
elif self.fileChanged(entry):
print "%s changed" % entry
if os.path.isfile(entry):
changedFiles.update([entry,])
#print "file %s changed" % entry
else:
print "entry ?? %s ?? changed" % entry
checkedFiles.update([entry,])
checkList(self.fileconfigSelected)
self.fileChangeTime = time.time()
return changedFiles
def storefileFailed(self, err, file):
print "storing %s failed: %s" % (file, err)
err.printTraceback()
#print dir(err)
def storefileYay(self, r, file):
print "storing %s success" % file
def storeFiles(self, changedFiles):
#print "storing %s" % changedFiles
dlist = []
for f in changedFiles:
print "storing %s" % f
deferred = self.factory.sendPUTF(f)
deferred.addCallback(self.storefileYay, f)
deferred.addErrback(self.storefileFailed, f)
dlist.append(deferred)
dl = defer.DeferredList(dlist)
return dl
#return defer.succeed(True)
def restartCheckTimer(self, v):
print "restarting timer (%d) to call run()" % CHECKTIME
reactor.callLater(CHECKTIME, self.run)
def updateMasterMetadata(self, v):
return self.getMasterMetadata()
def run(self):
print "run"
self.checkFileConfig()
changedFiles = self.checkFilesystem()
print "%s changed" % changedFiles
d = self.storeFiles(changedFiles)
d.addBoth(self.updateMasterMetadata)
d.addBoth(self.restartCheckTimer)
| Python |
#!/usr/bin/python
"""
FludLocalClient.py, (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
FludLocalClient provides a command-line client for interacting with FludNode.
"""
import sys, os, time
from twisted.internet import reactor
from flud.FludConfig import FludConfig
from flud.fencode import fencode, fdecode
from flud.FludCrypto import hashfile
from protocol.LocalClient import *
logger = logging.getLogger('flud')
class CmdClientFactory(LocalClientFactory):
def __init__(self, config):
LocalClientFactory.__init__(self, config)
self.quit = False
self.msgs = []
def callFactory(self, func, commands, msgs):
# since we can't call factory methods from the promptUser thread, we
# use this as a convenience to put those calls back in the event loop
reactor.callFromThread(self.doFactoryMethod, func, commands, msgs)
def doFactoryMethod(self, func, commands, msgs):
d = func()
d.addCallback(self.queueResult, msgs, '%s succeeded' % commands)
d.addErrback(self.queueError, msgs, '%s failed' % commands)
return d
def promptUser(self):
helpDict = {}
command = raw_input("%s> " % time.ctime())
commands = command.split(' ') # XXX: should tokenize on any whitespace
commandkey = commands[0][:4]
# core client operations
helpDict['exit'] = "exit from the client"
helpDict['help'] = "display this help message"
helpDict['ping'] = "send a GETID() message: 'ping host port'"
helpDict['putf'] = "store a file: 'putf canonicalfilepath'"
helpDict['getf'] = "retrieve a file: 'getf canonicalfilepath'"
helpDict['geti'] = "retrieve a file by CAS key: 'geti fencodedCASkey'"
helpDict['fndn'] = "send a FINDNODE() message: 'fndn hexIDstring'"
helpDict['list'] = "list stored files (read from local metadata)"
helpDict['putm'] = "store master metadata"
helpDict['getm'] = "retrieve master metadata"
helpDict['cred'] = "send encrypted private credentials: cred"\
" passphrase emailaddress"
helpDict['node'] = "list known nodes"
helpDict['buck'] = "print k buckets"
helpDict['stat'] = "show pending actions"
helpDict['stor'] = "store a block to a given node:"\
" 'stor host:port,fname'"
helpDict['rtrv'] = "retrieve a block from a given node:"\
" 'rtrv host:port,fname'"
helpDict['vrfy'] = "verify a block on a given node:"\
" 'vrfy host:port:offset-length,fname'"
helpDict['fndv'] = "retrieve a value from the DHT: 'fndv hexkey'"
helpDict['dlet'] = "delete from the stor: '[XXX]'"
if commandkey == 'exit' or commandkey == 'quit':
self.quit = True
elif commandkey == 'help':
self.printHelp(helpDict)
elif commandkey == 'ping':
# ping a host
# format: 'ping host port'
func = lambda: self.sendPING(commands[1], commands[2])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'putf':
# store a file
# format: 'putf canonicalfilepath'
func = lambda: self.sendPUTF(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'getf':
# retrieve a file
# format: 'getf canonicalfilepath'
func = lambda: self.sendGETF(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'geti':
# retrieve a file by CAS ID
# format: 'geti fencoded_CAS_ID'
func = lambda: self.sendGETI(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'fndn':
# find a node (or the k-closest nodes)
# format: 'fndn hexIDstring'
func = lambda: self.sendFNDN(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'list':
# list stored files
self.callFactory(self.sendLIST, commands, self.msgs)
elif commandkey == 'putm':
# store master metadata
self.callFactory(self.sendPUTM, commands, self.msgs)
elif commandkey == 'getm':
# retrieve master metadata
self.callFactory(self.sendGETM, commands, self.msgs)
elif commandkey == 'cred':
# send encrypted private credentials to an email address
# format: 'cred passphrase emailaddress'
func = lambda: self.sendCRED(
command[len(commands[0])+1:-len(commands[-1])-1],
commands[-1])
self.callFactory(func, commands, self.msgs)
# the following are diagnostic operations, debug-only utility
elif commandkey == 'node':
# list known nodes
self.callFactory(self.sendDIAGNODE, commands, self.msgs)
elif commandkey == 'buck':
# show k-buckets
self.callFactory(self.sendDIAGBKTS, commands, self.msgs)
elif commandkey == 'stat':
# show pending actions
print self.pending
elif commandkey == 'stor':
# stor a block to a given node. format: 'stor host:port,fname'
storcommands = commands[1].split(',')
try:
fileid = int(storcommands[1], 16)
except:
linkfile = fencode(long(hashfile(storcommands[1]),16))
if (os.path.islink(linkfile)):
os.remove(linkfile)
os.symlink(storcommands[1], linkfile)
storcommands[1] = linkfile
# XXX: delete this file when the command finishes
commands[1] = "%s,%s" % (storcommands[0], storcommands[1])
func = lambda: self.sendDIAGSTOR(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'rtrv':
# retrive a block from a given node. format: 'rtrv host:port,fname'
func = lambda: self.sendDIAGRTRV(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'vrfy':
# verify a block on a given node.
# format: 'vrfy host:port:offset-length,fname'
logger.debug("vrfy(%s)" % commands[1])
func = lambda: self.sendDIAGVRFY(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'dlet':
print "not yet implemented"
elif commandkey == 'fndv':
# try to retrieve a value from the DHT
# format: 'fndv key'
func = lambda: self.sendDIAGFNDV(commands[1])
self.callFactory(func, commands, self.msgs)
elif command != "":
reactor.callFromThread(self.queueError, None, self.msgs,
"illegal command '%s'" % command)
def queueResult(self, r, l, msg):
logger.debug("got result %s" % msg)
l.append((r, msg))
def queueError(self, r, l, msg):
logger.debug("got error %s" % msg)
if r:
l.append((r.getErrorMessage(), msg))
else:
l.append((None, msg))
def printHelp(self, helpDict):
helpkeys = helpDict.keys()
helpkeys.sort()
for i in helpkeys:
print "%s:\t %s" % (i, helpDict[i])
def output(self):
for c in self.pending:
for i in self.pending[c].keys():
if self.pending[c][i] == True:
print "%s on %s completed successfully" % (c, i)
self.pending[c].pop(i)
elif self.pending[c][i] == False:
print "%s on %s failed" % (c, i)
self.pending[c].pop(i)
else:
print "%s on %s pending" % (c, i)
def promptLoop(self, r):
self.output()
while len(self.msgs) > 0:
# this prints in reverse order, perhaps pop() all into a new list,
# reverse, then print
(errmsg, m) = self.msgs.pop()
if errmsg:
print "<- %s:\n%s" % (m, errmsg)
else:
print "<- %s" % m
if self.quit:
reactor.stop()
else:
d = threads.deferToThread(self.promptUser)
d.addCallback(self.promptLoopDelayed)
d.addErrback(self.err)
def promptLoopDelayed(self, r):
# give the reactor loop time to fire any quick cbs/ebs
reactor.callLater(0.1, self.promptLoop, r)
def clientConnectionLost(self, connector, reason):
if not self.quit:
LocalClientFactory.clientConnectionLost(self, connector, reason)
def cleanup(self, msg):
self.quit = True
self.err(msg)
def err(self, r):
self.output()
print "bah!: %s" % r
reactor.stop()
def main():
config = FludConfig()
config.load(doLogging=False)
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler('/tmp/fludclient.log')
formatter = logging.Formatter('%(asctime)s %(filename)s:%(lineno)d'
' %(name)s %(levelname)s: %(message)s', datefmt='%H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
factory = CmdClientFactory(config)
if len(sys.argv) == 2:
config.clientport = int(sys.argv[1])
print "connecting to localhost:%d" % config.clientport
reactor.connectTCP('localhost', config.clientport, factory)
factory.promptLoop(None)
reactor.run()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
"""
FludClient.py, (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), version 3.
FludClient provides a GUI Client for interacting with FludNode.
"""
#from twisted.internet import wxreactor
#wxreactor.install()
import sys, os, string, time, glob
import wx
import wx.lib.mixins.listctrl as listmix
import wx.lib.editor.editor
from flud.protocol.LocalClient import *
from flud.FludConfig import FludConfig
from flud.CheckboxState import CheckboxState
FLUSHCHECKTIME = 5*60 # s to wait to flush fludfile.conf
datadir = os.path.dirname(os.path.abspath(__file__))
imgdir = os.path.join(datadir,'images')
mimeMgr = wx.MimeTypesManager()
def getFileIcon(file, il, checkboxes, icondict):
ft = mimeMgr.GetFileTypeFromExtension(file[file.rfind('.')+1:])
# XXX: what about from mimetype or magic?
if ft == None:
return icondict['generic']
else:
desc = ft.GetDescription()
if icondict.has_key(desc):
return icondict[desc]
else:
icon = ft.GetIcon()
if icon == None or not icon.Ok():
#print "couldn't find an icon image for %s" % file
icondict[desc] = icondict['generic']
return icondict[desc]
bm = wx.BitmapFromIcon(icon)
newimages = makeCheckboxBitmaps(bm, checkboxes)
#il = self.GetImageList()
pos = il.GetImageCount()
for i in newimages:
il.Add(i)
icondict[desc] = pos
#print "%s got a %s image" % (file, ft.GetDescription())
return pos
def getEmptyBitmapAndDC(width, height):
empty = wx.EmptyBitmap(width,height)
temp_dc = wx.MemoryDC()
temp_dc.SelectObject(empty)
temp_dc.Clear()
return (empty, temp_dc)
def makeCheckboxBitmaps(basebitmap, checkboxes):
if basebitmap.GetWidth() != 16 or basebitmap.GetHeight() != 16:
img = basebitmap.ConvertToImage()
img.Rescale(16, 16)
basebitmap = img.ConvertToBitmap()
result = []
for i in checkboxes:
bm, dc = getEmptyBitmapAndDC(40,16)
dc.DrawBitmap(basebitmap, 0, 0, False)
dc.DrawBitmap(i, 20, 2, False)
result.append(bm)
return result
def createDefaultImageList():
def getDefaultCheckboxes():
ucbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-unchecked1.png")))
cbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-checked1.png")))
ccbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-checkedpartial1.png")))
cpbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-parentchecked1.png")))
ebm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-excluded1.png")))
ecbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-excludedpartial1.png")))
return (ucbm, cbm, ccbm, cpbm, ebm, ecbm)
checkboxes = getDefaultCheckboxes()
il = wx.ImageList(40, 16)
folderimgs = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_FOLDER, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
computer = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_HARDDISK, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
drives = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_HARDDISK, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
cdrom = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_CDROM, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
floppy = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_FLOPPY, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
removable = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_REMOVABLE, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
genericfile = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_NORMAL_FILE, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
execfile = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_EXECUTABLE_FILE, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
j = 0
icondict = {}
icondict['folder'] = j
for i in folderimgs:
il.Add(i)
j = j+1
icondict['computer'] = j
for i in computer:
il.Add(i)
j = j+1
icondict['drives'] = j
for i in drives:
il.Add(i)
j = j+1
icondict['cdrom'] = j
for i in cdrom:
il.Add(i)
j = j+1
icondict['floppy'] = j
for i in floppy:
il.Add(i)
j = j+1
icondict['removable'] = j
for i in removable:
il.Add(i)
j = j+1
icondict['generic'] = j
for i in genericfile:
il.Add(i)
j = j+1
icondict['exec'] = j
for i in execfile:
il.Add(i)
j = j+1
return il, checkboxes, icondict
class DirCheckboxCtrl(wx.TreeCtrl):
def __init__(self, parent, id=-1, dir=None, pos=wx.DefaultPosition,
size=wx.Size(300,300), #wx.DefaultSize,
style=(wx.TR_MULTIPLE
| wx.TR_HAS_BUTTONS
| wx.TR_TWIST_BUTTONS
| wx.TR_NO_LINES
| wx.TR_FULL_ROW_HIGHLIGHT
| wx.SUNKEN_BORDER),
validator=wx.DefaultValidator, name=wx.ControlNameStr,
allowExclude=True):
self.allowExclude = allowExclude
wx.TreeCtrl.__init__(self, parent, id, pos, size, style, validator,
name)
self.listeners = []
self.parent = parent
#self.il = self.GetImageList()
#self.checkboxes = self.getDefaultCheckboxes()
self.initTree(dir)
self.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.onExpand, self)
self.Bind(wx.EVT_LEFT_UP, self.onClick, self)
self.Bind(wx.EVT_TREE_ITEM_GETTOOLTIP, self.onTooltip, self)
self.Bind(wx.EVT_CHAR, self.onChar)
def initTree(self, dir):
self.expandRoot(dir)
# XXX: should expandHome() on first run, then load expanded dir state
# from saved state on subsequent runs.
self.expandHome(dir)
def expandRoot(self, dir):
if not os.path.isdir(dir):
raise ValueError("%s is not a valid directory path")
self.defaultImageList, self.checkboxes, self.icondict \
= createDefaultImageList()
self.AssignImageList(self.defaultImageList)
self.il = self.GetImageList()
if dir == None:
self.rootID = self.AddRoot(dir, self.icondict['computer'], -1,
wx.TreeItemData((dir, True, False,
CheckboxState.UNSELECTED)))
# XXX: getTopLevelDirs() and add them as children
else:
self.rootID = self.AddRoot(dir, self.icondict['folder'], -1,
wx.TreeItemData((dir, True, False,
CheckboxState.UNSELECTED)))
self.expandDir(self.rootID)
self.Expand(self.rootID)
self.stateChangeTime = time.time()
self.flushTime = time.time()
reactor.callLater(FLUSHCHECKTIME, self.checkFlush)
def expandHome(self, dir):
home = os.environ['HOME']
if home:
traversal = home.split(os.path.sep)[1:]
node = self.rootID
for d in traversal:
(ipath, isdir, expanded, istate) \
= self.GetItemData(node).GetData()
self.expandDir(node)
children = self.getChildren(node, False)
childrennames = [self.GetItemText(x) for x in children]
if d in childrennames:
p = childrennames.index(d)
node = children[p]
self.expandDir(node)
self.Expand(node)
else:
print "couldn't traverse to HOME dir on %s" % d
break
def checkFlush(self):
print "checking for flush"
if self.stateChangeTime > self.flushTime:
self.flushTime = time.time()
print "flushing"
self.parent.flushFileConfig()
reactor.callLater(FLUSHCHECKTIME, self.checkFlush)
def expandDir(self, parentID, hideHidden=False, busycursor=True):
def isDriveAvailable(path):
if len(path) == 2 and path[1] == ':':
path = path.lower()
if path[0] == 'a' or path[0] == 'b' or diExists(path):
return True
else:
return False
return True
(path, isDir, expanded, state) = self.GetItemData(parentID).GetData()
if expanded:
return
if not isDriveAvailable(path):
return
if busycursor: wx.BusyCursor()
try:
dirlist = os.listdir(path)
except:
self.SetItemHasChildren(parentID, False)
return
if len(dirlist) == 0:
self.SetItemHasChildren(parentID, False)
return
dirs = []
files = []
for i in dirlist:
if hideHidden and i[0] == '.':
# XXX: dotfile format check is *nix specific
# XXX: if this is a hidden file, don't add it.
pass
elif os.path.isdir(os.path.join(path,i)):
dirs.append(i)
else:
files.append(i)
dirs.sort()
files.sort()
for d in dirs:
child = self.AppendItem(parentID, d)
self.SetPyData(child, (os.path.join(path,d), True, False, 0))
self.SetItemImage(child, self.icondict['folder'],
wx.TreeItemIcon_Normal)
self.SetItemHasChildren(child)
il = self.GetImageList()
for f in files:
child = self.AppendItem(parentID, f) # XXX: unicode?
self.SetPyData(child, (os.path.join(path,f), False, False, 0))
idx = getFileIcon(os.path.join(path,f), il, self.checkboxes,
self.icondict)
self.SetItemImage(child, idx, wx.TreeItemIcon_Normal)
self.SetPyData(parentID, (path, isDir, True, state))
def getStates(self, node=None):
if not node:
node = self.rootID
states = {}
(path, isDir, expanded, state) = self.GetItemData(node).GetData()
if state in [CheckboxState.SELECTED, CheckboxState.EXCLUDED]:
states[path] = state
children = self.getChildren(node, False)
for child in children:
states.update(self.getStates(child))
return states
def setStates(self, states):
for i in states:
found = self.findNode(i)
if found:
self.setItemState(found, states[i])
def findNode(self, path):
if path[0] == '/':
path = path[1:] # XXX: unix only
traversal = path.split(os.path.sep)
if traversal[0] == '':
traversal.remove('')
node = self.rootID
while True:
(ipath, isdir, expanded, istate) = self.GetItemData(node).GetData()
if len(traversal) == 0:
return node
self.expandDir(node)
children = self.getChildren(node, False)
childrennames = [self.GetItemText(x) for x in children]
firstpath = traversal[0]
if firstpath in childrennames:
p = childrennames.index(firstpath)
node = children[p]
traversal.remove(firstpath)
else:
#print " the file %s is no longer present!" % path
return None
return None
def onExpand(self, event):
self.expandDir(event.GetItem())
self.renderChildren(event.GetItem(), True)
def getFullPath(self, node):
path = self.tree.GetItemText(node)
n = node
while True:
n = self.tree.GetItemParent(n)
if n and n != self.GetRootItem():
path = os.path.join(self.tree.GetItemText(n),path)
else:
break
return path
def renderParents(self, item):
if item == self.rootID:
return
n = item
(path, isDir, expanded, state) = self.GetItemData(item).GetData()
while True:
n = self.GetItemParent(n)
(parentpath, parentisDir, parentexpanded,
parentstate) = self.GetItemData(n).GetData()
#print "parent %s" % parentpath
if n and n != self.GetRootItem():
newstate = parentstate
if parentstate != CheckboxState.UNSELECTED and \
parentstate != CheckboxState.SELECTEDPARENT:
# we only care about changing UNSELECT or SELECTEDPARENT
# states
break
else:
if state == CheckboxState.SELECTED or \
state == CheckboxState.SELECTEDCHILD or \
state == CheckboxState.SELECTEDPARENT:
# if the item (child) is selected in any way, parent
# should be too.
newstate = CheckboxState.SELECTEDPARENT
elif state == CheckboxState.UNSELECTED or \
state == CheckboxState.EXCLUDED:
# if the item (child) is unselected or excluded, the
# parent should be too, /unless/ there are other
# children at the same level who are selected.
children = self.getChildren(n, False)
newstate = CheckboxState.UNSELECTED
for child in children:
(cpath, cisdir, cexp,
cstate) = self.GetItemData(child).GetData()
if cstate == CheckboxState.SELECTED or \
cstate == CheckboxState.SELECTEDCHILD or \
cstate == CheckboxState.SELECTEDPARENT:
newstate = parentstate
if newstate == parentstate:
break
imageidx = self.GetItemImage(n)
imageidx += CheckboxState.offset(parentstate, newstate)
self.SetPyData(n, (parentpath, parentisDir,
parentexpanded, newstate))
self.SetItemImage(n, imageidx)
else:
break
def renderChildren(self, parent, recurse=False):
(parentpath, parentisDir, parentexpanded,
parentstate) = self.GetItemData(parent).GetData()
children = self.getChildren(parent, False)
for child in children:
#path = self.getFullPath(child)
(path, isDir, expanded, state) = self.GetItemData(child).GetData()
imageidx = self.GetItemImage(child)
newstate = state
"""
Here are the state transitions for children based on current states:
('-' = no state change, 'x' = should never occur, '!' = should be
prevented at the parent, '?' = need to consult children)
child
unsel sel selch selpar excl exclch
unsel - ! unsel x - unsel
sel selch - - selch - selch
par selch selch - - selch - selch
selpar x x unsl?selpr x x x
excl exlch ! exlch ! - -
exclch exlch - exlch ! - -
"""
#if parentpath == '/data':
# print "/data pstate = %d" % parentstate
# print " %s = %d" % (path, state)
if state == CheckboxState.UNSELECTED:
if parentstate == CheckboxState.SELECTED or \
parentstate == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTEDCHILD
elif parentstate == CheckboxState.EXCLUDED or \
parentstate == CheckboxState.EXCLUDEDCHILD:
newstate = CheckboxState.EXCLUDEDCHILD
elif state == CheckboxState.SELECTEDCHILD:
if parentstate == CheckboxState.UNSELECTED:
newstate = CheckboxState.UNSELECTED
elif parentstate == CheckboxState.SELECTEDPARENT:
if self.checkChildrenStates(child, [CheckboxState.SELECTED,
CheckboxState.SELECTEDPARENT]):
# XXX: did we need to pass in selections to checkChldSt
newstate = CheckboxState.SELECTEDPARENT
else:
newstate = CheckboxState.UNSELECTED
elif parentstate == CheckboxState.EXCLUDED or \
parentstate == CheckboxState.EXCLUDEDCHILD:
newstate = CheckboxState.EXCLUDEDCHILD
elif state == CheckboxState.SELECTEDPARENT:
if parentstate == CheckboxState.SELECTED or \
parentstate == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTEDCHILD
elif state == CheckboxState.EXCLUDEDCHILD:
if parentstate == CheckboxState.UNSELECTED:
newstate = CheckboxState.UNSELECTED
elif parentstate == CheckboxState.SELECTED or \
parentstate == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTEDCHILD
imageidx += CheckboxState.offset(state, newstate)
self.SetPyData(child, (path, isDir, expanded, newstate))
self.SetItemImage(child, imageidx)
if recurse:
self.renderChildren(child, recurse)
# XXX: why do we renderParents here? It hits the same
# 'parent's over and over and over again. If we want to do
# this, we need to 'collect up' the parents and just call once
# -- this kills performance.
#print "renderParents(%s)" % path
#self.renderParents(child)
def getChildren(self, node, recurse=False):
result = []
child, cookie = self.GetFirstChild(node)
while child:
result.append(child)
if recurse:
result.extend(self.getChildren(child, recurse))
child, cookie = self.GetNextChild(node, cookie)
return result
def checkChildrenStates(self, node, states, ignorelist=[]):
children = self.getChildren(node)
for child in children:
if child not in ignorelist:
(p, d, e, childstate) = self.GetItemData(child).GetData()
for state in states:
if state == childstate:
#print "%s has state %d" % (p, state)
return True
if self.checkChildrenStates(child, states, ignorelist):
# do this even if it is in ignorelist, because it may have
# children which are not in the ignorelist
return True
return False
def getTooltip(self, item):
text = self.GetItemText(item)
(path, isDir, expanded, state) = self.GetItemData(item).GetData()
if state == CheckboxState.SELECTED:
if isDir:
text = "'%s' is SELECTED for backup\n" \
"ALL files within this folder will be backed up\n" \
"(except those explicitly marked for exclusion)" % text
else:
text = "'%s' is SELECTED for backup" % text
elif state == CheckboxState.UNSELECTED:
text = "'%s' is NOT selected for backup" % text
elif state == CheckboxState.SELECTEDPARENT:
text = "some files within '%s' are selected for backup" % text
elif state == CheckboxState.SELECTEDCHILD:
text = "'%s' will be backed up\n" \
"(one of its parent folders is selected)" % text
elif state == CheckboxState.EXCLUDED:
if isDir:
text = "'%s' is EXCLUDED from backup\n" \
"No files within this folder will be backed up" % text
else:
text = "'%s' is EXCLUDED from backup" % text
elif state == CheckboxState.EXCLUDEDCHILD:
text = "'%s' is EXCLUDED from backup\n" \
"(one of its parent folders is EXCLUDED)" % text
return text
def onTooltip(self, event):
item = event.GetItem()
text = self.getTooltip(item)
if text:
event.SetToolTip(text)
else:
event.StopPropagation()
#print dir(event)
def onClick(self, event):
point = (event.GetX(), event.GetY())
item, flags = self.HitTest(point)
if flags & wx.TREE_HITTEST_ONITEMICON:
selections = self.GetSelections()
self.changeState(item, selections)
def onChar(self, event):
if event.KeyCode() == ord('F') and event.ShiftDown() \
and event.ControlDown():
self.flushTime = time.time()
print "flushing"
self.parent.flushFileConfig()
event.Skip()
def changeState(self, item, selections=[]):
self.stateChangeTime = time.time()
(path, isDir, expanded, state) = self.GetItemData(item).GetData()
if item == self.rootID:
parent = None
parentstate = CheckboxState.UNSELECTED
else:
parent = self.GetItemParent(item)
(parentpath, parentisDir, parentexpanded,
parentstate) = self.GetItemData(parent).GetData()
imageidx = self.GetItemImage(item)
# determine newstate from existing state, parent state, and state
# of children
"""
Here are the state transitions for the item based on current
states and parent states: ('-' = no state change, 'x' = should
never occur, '?' = depends on children state)
item
unsel sel selch selpar excl exclch
unsel sel excl sel sel unsel excl
sel sel excl?selpar sel x selch excl
par selch x excl sel sel selch excl
selpar sel excl x sel unsel excl
excl x excl x exclch exclch excl
exclch x excl x exclch exclch excl
"""
newstate = state
if state == CheckboxState.UNSELECTED:
newstate = CheckboxState.SELECTED
elif state == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTED
elif state == CheckboxState.SELECTEDPARENT:
if parentstate == CheckboxState.EXCLUDED or \
parentstate == CheckboxState.EXCLUDEDCHILD:
# XXX: this should be impossible to reach...
newstate = CheckboxState.EXCLUDEDCHILD
else:
newstate = CheckboxState.SELECTED
elif state == CheckboxState.SELECTED:
if self.checkChildrenStates(item, [CheckboxState.SELECTED,
CheckboxState.SELECTEDPARENT], selections):
newstate = CheckboxState.SELECTEDPARENT
elif self.allowExclude:
newstate = CheckboxState.EXCLUDED
else:
if parent in selections or \
(parentstate == CheckboxState.UNSELECTED or \
parentstate == CheckboxState.SELECTEDPARENT):
newstate = CheckboxState.UNSELECTED
elif parentstate == CheckboxState.SELECTED or \
parentstate == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTEDCHILD
elif state == CheckboxState.EXCLUDED:
if parent in selections or \
(parentstate == CheckboxState.UNSELECTED or \
parentstate == CheckboxState.SELECTEDPARENT):
newstate = CheckboxState.UNSELECTED
elif parentstate == CheckboxState.SELECTED or \
parentstate == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTEDCHILD
else:
newstate = CheckboxState.EXCLUDEDCHILD
elif state == CheckboxState.EXCLUDEDCHILD:
newstate = CheckboxState.EXCLUDED
if len(selections) > 1:
# if we have multiple selections, the idea is to move all the
# selections to the newstate defined above, or to valid
# unselected or inherited states if the move to newstate would
# be invalid.
"""
Here are the state transitions for the item based on the
newstate as determined by the clicked item and the current
states: ('-' = no state change, '?' = consult children)
item
unsel sel selch selpar excl exclch
unsel - unsel - - unsel -
sel sel - sel sel sel -
newstate selch - unsel - - unsel -
selpar - unsel - - unsel -
excl excl excl?slpr excl excl - excl
exclch - unsel - - unsel -
"""
for i in selections:
(mpath, misDir, mexpanded, mstate) = self.GetItemData(
i).GetData()
mnewstate = mstate
if mstate == CheckboxState.UNSELECTED or \
mstate == CheckboxState.SELECTEDCHILD or \
mstate == CheckboxState.SELECTEDPARENT:
if newstate == CheckboxState.SELECTED or \
newstate == CheckboxState.EXCLUDED:
mnewstate = newstate
elif mstate == CheckboxState.SELECTED:
if newstate == CheckboxState.UNSELECTED or \
newstate == CheckboxState.SELECTEDCHILD or \
newstate == CheckboxState.SELECTEDPARENT or \
newstate == CheckboxState.EXCLUDEDCHILD:
mnewstate = CheckboxState.UNSELECTED
elif newstate == CheckboxState.EXCLUDED:
if self.checkChildrenStates(i,
[CheckboxState.SELECTED,
CheckboxState.SELECTEDPARENT], selections):
mnewstate = CheckboxState.SELECTEDPARENT
else:
mnewstate = newstate
elif mstate == CheckboxState.EXCLUDED:
if newstate == CheckboxState.UNSELECTED or \
newstate == CheckboxState.SELECTEDCHILD or \
newstate == CheckboxState.SELECTEDPARENT or \
newstate == CheckboxState.EXCLUDEDCHILD:
mnewstate = CheckboxState.UNSELECTED
elif newstate == CheckboxState.SELECTED:
mnewstate = newstate
elif mstate == CheckboxState.EXCLUDEDCHILD:
if newstate == CheckboxState.EXCLUDED:
mnewstate = newstate
self.setItemState(i, mnewstate)
self.setItemState(item, newstate, (path, isDir, expanded, state,
imageidx))
def setItemState(self, item, newstate, oldData=None):
if oldData:
(path, isDir, expanded, state, imageidx) = oldData
else:
(path, isDir, expanded, state) = self.GetItemData(item).GetData()
imageidx = self.GetItemImage(item)
imageidx += CheckboxState.offset(state, newstate)
self.SetPyData(item, (path, isDir, expanded, newstate))
self.SetItemImage(item, imageidx)
self.renderChildren(item, True)
self.renderParents(item)
def getTopLevelDrives(self):
sys = platform.system()
if sys == 'Windows':
# XXX: need to test this all out
import win32api, string
drives = win32api.GetLogicalDriveStrings()
driveletters = string.splitfields(drives,'\000')
for d in driveletters:
type = win32api.GetDriveType("%s:\\" % d)
# XXX: set the appropriate icon
return driveletters
else: # Unix, OSX, etc.
return ['/']
def addListener(self, callback):
self.listeners.append(callback)
def SetPyData(self, item, data):
wx.TreeCtrl.SetPyData(self, item, data)
for f in self.listeners:
f(item, data)
"""
Tests for DirCheckboxCtrl
A number of unit tests must be performed on the DirCheckboxGUI widget when
refactoring. Add to this list so that it becomes comprehensive.
Basic Tests:
1. Click on a top-level UNSELECTED object in the tree [should become SELECTED].
- Click again [should become EXCLUDED].
- Click again [should become UNSELECTED].
2. Click on a non-top-level UNSELECTED object in the tree that has no SELECTED
children [should become SELECTED, it's parents should become SELECTEDPARENT and
its children SELECTEDCHILD].
- Click again [should become EXCLUDED, it's parents who were SELECTEDPARENT
should become UNSELECTED, and it's UNSELECTED children should become
EXCLUDED].
- Click again [should become UNSELECTED, and it's children should become
UNSELECTED].
3. Change two children to their SELECTED state [parents should be in
SELECTEDPARENT state].
- Click one child to become EXCLUDED [parents should stay in SELECTEDPARENT]
- Click the same child to become UNSELECTED [parents should stay in
SELECTEDPARENT]
- Click the other child to become EXCLUDED [parents should become
UNSELECTED]
4. Choose a folder and a child item.
- Click the child to become SEL [parent should be SELPAR]
- Click the parent [parent should become SEL]
- Click the parent again [parent should become SELPAR]
5. Choose a folder and a child item.
- Click the parent to become SEL [child should become SELCHILD]
- Click the child [child should become SEL]
- Click the child again [child should become EXCL]
- Click the child again [child should become SELCHILD]
6. Pick a node with children at least two-deep. Change two of the
at-least-two-deep children to their SELECTED state [parents should be in
SELECTEDPARENT state].
- Click parent closest to SELECTED children to SELECTED [two childen remain
in SELECTED, all other children become SELECTEDCHILD. Parent[s] of parent
remain SELECTEDPARENT]
- Click one child twice to become SELECTEDCHILD [child should not be able to
be UNSELECTED, parent states should not change]
- Click other child twice to become SELECTEDCHILD [child should not be able
to be UNSELECTED, parent states should not change]
7. Pick a node with children at least two-deep.
- Click deepest parent to SELECTED [Parent[s] of parent become
SELECTEDPARENT]
- Click same parent again to become EXCLUDED [Parent[s] of parent become
UNSELECTED]
- Click same parent again to become UNSELECTED [Parent[s] of parent remain
UNSELECTED]
8. Pick a node with children at least two-deep.
- Click deepest child to become SELECTED [Parent[s] of parent become
SELECTEDPARENT]
- Click the topmost parent to become SELECTED [children become
SELECTEDCHILD]
- Click the topmost parent again to become SELECTEDPARENT [middle child
should become SELECTEDPARENT]
Multi-Selection Tests:
1. Multi-select three items at the same level and in the same state. Toggle
between the three main states [SELECTED, EXCLUDED, UNSELECTED]
2. Multi-select three items at the same level, one in each of the three states
(SEL, EXCL, UNSEL). Toggle the SEL item to see that all three items become
EXCL.
3. Multi-select three items at the same level, one in each of the three states
(SEL, EXCL, UNSEL). Toggle the EXCL item to see that all three items become
UNSEL.
4. Multi-select three items at the same level, one in each of the three states
(SEL, EXCL, UNSEL). Toggle the UNSEL item to see that all three items become
SEL.
5. Choose three items that are nested within each other: a parent folder, one
of its children folders, and a file/folder in the child folder. Choose one
other item from the child folder.
- set the top parent to UNSEL
- set the child folder to SEL [parent become SELPAR]
- set the child item to SEL
- set the other item to EXCL
- multi-select all four items
- 5A. click on the top parent (which was in SELPAR) [All four items should
become SEL, all children of any of these items should become SELCHILD].
Toggle twice more [all selected items should toggle to EXCL, then to
UNSEL]
- 5B. reset as above, click on the child folder [All four items should
become EXCL]. Toggle twice more [all selected items should go to UNSEL,
then SEL]
- 5C. reset as above, click on the child item [All four items should
become EXCL]. Toggle twice more [all selected items should go to UNSEL,
then SEL]
- 5D. reset as above, click on the other item [All four items should
become UNSEL]. Toggle twice more [all selected items should go to SEL,
then EXCL]
6. Choose a folder, one if its subfolders, a subfolder of the subfolder, and an item in the deepest subfolder, and an item in the first subfolder, e.g.:
[] A
[] B
[] C
[] D
[] E
- change item 'D' to SEL [parents 'A', 'B', and 'C' should go to SELPAR]
- change item 'E' to EXCL
- multi-select 'A', 'C', and 'E'
- toggle 'E' to UNSEL [all other selections should stay in current state]
- toggle 'E' to SEL ['A' and 'B' become SEL, their children become SELCHILD]
- toggle 'E' back to EXCL [should get our original multi-select setup back]
- toggle 'C' to SEL [all selections to SEL, children to SELCHILD]
- toggle 'C' to SELPAR ['A' and 'C' to SELPAR, 'E' to UNSEL]
- toggle 'E' twice [should get our original mulit-select setup back]
"""
class CheckFileListCtrlMixin:
# for some insane reason, we can't get EVT_LEFT_DOWN (or _UP) to bind in
# FileListCtrl itself. But we are sneaky and can do it by lots of clever
# hax0ry, like by using this silly mixin.
def __init__(self, toCall):
self.Bind(wx.EVT_LEFT_UP, toCall)
class FileListCtrl(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin,
CheckFileListCtrlMixin):
"""
Implements a file list control, with a peerctrl that contains the
filesystem model. Currently, this peerctrl must implement an
addListener(), changeState(), GetItemData(), expandDir(), GetSelections(),
and GetChildren() API similar to that implemented by DirCheckBoxCtrl.
"""
def __init__(self, parent, peerctrl, id=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.LC_REPORT,
validator=wx.DefaultValidator, name=wx.ListCtrlNameStr):
wx.ListCtrl.__init__(self, parent, id, pos, size, style, validator,
name)
CheckFileListCtrlMixin.__init__(self, self.OnClick)
listmix.ListCtrlAutoWidthMixin.__init__(self)
self.peerctrl = peerctrl
self.peerctrl.addListener(self.itemChanged)
self.itemdict = {} # a dict with filepath as key, containing tuples of
# (index into ListCtrl, reference to peerctrl object)
self.stopsearch = False
self.il, self.checkboxes, self.icondict = createDefaultImageList()
self.AssignImageList(self.il, wx.IMAGE_LIST_SMALL)
self.il = self.GetImageList(wx.IMAGE_LIST_SMALL)
self.InsertColumn(0, "Filename")
self.InsertColumn(1, "Location")
#self.InsertColumn(2, "Last Backup")
#self.SetColumnWidth(0, wx.LIST_AUTOSIZE)
#self.SetColumnWidth(1, -1) #wx.LIST_AUTOSIZE)
self.Bind(wx.EVT_MOTION, self.mouseMotion)
self.searchSourceItems = []
def itemChanged(self, item, data):
(path, isDir, expanded, state) = data
if self.itemdict.has_key(path):
item = self.itemdict[path][0]
image = getFileIcon(path, self.il, self.checkboxes,
self.icondict) + state
self.SetItemImage(item, image)
def GetAll(self, excludeStates=[]):
result = []
start = -1
for i in range(self.GetItemCount()):
item = self.GetNextItem(start, wx.LIST_NEXT_ALL)
# XXX: only append if not in excludeStates
result.append(item)
start = item
return result
def GetSelections(self):
result = []
start = -1
for i in range(self.GetSelectedItemCount()):
item = self.GetNextItem(start, wx.LIST_NEXT_ALL,
wx.LIST_STATE_SELECTED)
result.append(item)
start = item
return result
def GetPeerSelections(self, selections):
result = []
for item in selections:
path = os.path.join(self.GetItem(item,1).GetText(),
self.GetItemText(item))
if self.itemdict.has_key(path):
result.append(self.itemdict[path][1])
return result
def mouseMotion(self, event):
point = event.GetPosition()
item, flags = self.HitTest(point)
if flags == wx.LIST_HITTEST_ONITEMICON:
path = os.path.join(self.GetItem(item,1).GetText(),
self.GetItemText(item))
text = self.peerctrl.getTooltip(self.itemdict[path][1])
tip = wx.ToolTip(text)
self.SetToolTip(tip)
#tipwin = tip.GetWindow()
#tippos = tipwin.GetPosition()
#print "%s vs %s" % (tippos, point)
#tipwin.SetPosition(point)
def OnClick(self, event):
point = event.GetPosition()
item, flags = self.HitTest(point)
if flags == wx.LIST_HITTEST_ONITEMICON:
peerselections = self.GetPeerSelections(self.GetSelections())
path = os.path.join(self.GetItem(item,1).GetText(),
self.GetItemText(item))
ditem = self.itemdict[path][1] # raises if not present
self.peerctrl.changeState(ditem, peerselections)
(path, isDir, expanded, state) \
= self.peerctrl.GetItemData(ditem).GetData()
image = getFileIcon(path, self.il, self.checkboxes,
self.icondict) + state
self.SetItemImage(item, image)
def searchButtonAction(self, event):
selections = self.peerctrl.GetSelections()
if len(selections) == 0:
return ("Please tell me where to search. Select one or more"
" folders in the left-hand panel (hold down SHIFT or"
" CTRL for multiple selection), then click the 'find!'"
" button again.", None)
else:
self.DeleteAllItems()
self.itemdict = {}
b = wx.BusyCursor()
searchSourceItems = []
for i in selections:
self.addResults(i, event.searchstring)
searchSourceItems.append(i)
self.searchSourceItems = [self.peerctrl.GetItemData(s).GetData()[0]
for s in searchSourceItems]
print "sources: %s" % self.searchSourceItems
return ("Search results will appear as files that match your"
" search are found.", None)
return (None, None)
def addResults(self, ditem, searchstring):
(path, isDir, expanded, state) \
= self.peerctrl.GetItemData(ditem).GetData()
position = self.GetItemCount()
if isDir:
if not expanded:
self.peerctrl.expandDir(ditem, busycursor=False)
children = self.peerctrl.getChildren(ditem)
for c in children:
self.addResults(c, searchstring)
wx.Yield()
if self.stopsearch:
break
else:
terms = [x for x in searchstring.split(' ') if x != '']
for term in terms:
print path
if path.find(term) > 0:
image = getFileIcon(path, self.il, self.checkboxes,
self.icondict) + state
dirname, filename = os.path.split(path)
index = self.InsertImageStringItem(position, filename,
image)
self.SetStringItem(index, 1, dirname)
self.SetColumnWidth(0, wx.LIST_AUTOSIZE)
self.itemdict[path] = (index, ditem)
break
def setGroup(self, state):
items = self.GetAll()
item = items[0]
peerselections = self.GetPeerSelections(items)
path = os.path.join(self.GetItem(item,1).GetText(),
self.GetItemText(item))
ditem = self.itemdict[path][1] # raises if not present
while True:
# cycle until the items state matches the desired state
self.peerctrl.changeState(ditem, peerselections) # can be slow
(path, isDir, expanded, nstate) \
= self.peerctrl.GetItemData(ditem).GetData()
if nstate == state:
break
image = getFileIcon(path, self.il, self.checkboxes,
self.icondict) + state
self.SetItemImage(item, image)
return self.searchSourceItems
class GroupSelectionCheckbox(wx.Panel):
def __init__(self, parent, id=-1, setGroupState=None):
wx.Panel.__init__(self, parent, id)
self.setGroupState = setGroupState
self.ubm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-unchecked1.png")))
self.cbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-checked1.png")))
self.ebm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-excluded1.png")))
self.checkboxButton = wx.BitmapButton(self, -1, self.ubm,
style=wx.NO_BORDER)
self.Bind(wx.EVT_BUTTON, self.onCheckbox, self.checkboxButton)
self.description = wx.StaticText(self, -1,
"always BACKUP any files that match these search criteria ")
self.state = CheckboxState.UNSELECTED
self.gbSizer = wx.GridBagSizer(1,2)
self.gbSizer.Add(self.checkboxButton, (0,0), flag=wx.ALIGN_CENTER)
self.gbSizer.Add(self.description, (0,1), flag=wx.ALIGN_CENTER)
self.gbSizer.AddGrowableRow(1)
self.SetSizerAndFit(self.gbSizer)
def Enable(self, enable=True):
self.checkboxButton.Enable(enable)
self.description.Enable(enable)
def Disable(self):
self.Enable(False)
def clear(self):
self.checkboxButton.SetBitmapLabel(self.ubm)
self.state = CheckboxState.UNSELECTED
self.description.SetLabel(
"always BACKUP any files that match these search criteria")
def setState(self, state):
self.state = state
if self.state == CheckboxState.UNSELECTED:
self.checkboxButton.SetBitmapLabel(self.ubm)
self.description.SetLabel(
"always BACKUP any files that match these search criteria")
elif self.state == CheckboxState.SELECTED:
self.checkboxButton.SetBitmapLabel(self.cbm)
self.description.SetLabel(
"always BACKUP any files that match these search criteria")
elif self.state == CheckboxState.EXCLUDED:
self.checkboxButton.SetBitmapLabel(self.ebm)
self.description.SetLabel(
"always EXCLUDE any files that match these search criteria")
def onCheckbox(self, event):
if self.state == CheckboxState.UNSELECTED:
self.checkboxButton.SetBitmapLabel(self.cbm)
self.state = CheckboxState.SELECTED
if self.setGroupState:
self.setGroupState(CheckboxState.SELECTED)
elif self.state == CheckboxState.SELECTED:
self.checkboxButton.SetBitmapLabel(self.ebm)
self.description.SetLabel(
"always EXCLUDE any files that match these search criteria")
self.state = CheckboxState.EXCLUDED
if self.setGroupState:
self.setGroupState(CheckboxState.EXCLUDED)
elif self.state == CheckboxState.EXCLUDED:
self.checkboxButton.SetBitmapLabel(self.ubm)
self.description.SetLabel(
"always BACKUP any files that match these search criteria")
self.state = CheckboxState.UNSELECTED
if self.setGroupState:
self.setGroupState(CheckboxState.UNSELECTED)
class SearchPanel(wx.Panel):
def __init__(self, parent, dircheckbox, id=-1, searchButtonAction=None):
wx.Panel.__init__(self, parent, id)
self.dircheckbox = dircheckbox
self.searchButtonAction = searchButtonAction
self.SetAutoLayout(False)
self.rules = {} # should refer to something from fludrules
self.searchField = wx.TextCtrl(self, -1,
"search for files to backup here", size=wx.Size(-1,-1),
style=wx.TE_PROCESS_ENTER)
self.searchField.SetToolTipString('find files within directories'
' selected to the left by entering search terms here')
self.searchField.Bind(wx.EVT_TEXT_ENTER, self.onSearchClick)
self.searchField.Bind(wx.EVT_LEFT_DOWN, self.selectAllText)
self.searchField.Bind(wx.EVT_KILL_FOCUS, self.unfocused)
self.searchButton = wx.Button(self, -1, 'find!', name='searchButton')
self.Bind(wx.EVT_BUTTON, self.onSearchClick, self.searchButton)
self.searchResults = FileListCtrl(self, dircheckbox, -1,
name='searchResults', style=wx.SUNKEN_BORDER | wx.LC_REPORT)
self.searchResults.SetExtraStyle(0)
self.searchResults.SetLabel('found files')
self.groupSelection = GroupSelectionCheckbox(self, -1, self.setGroup)
self.groupSelection.Disable()
self.gbSizer = wx.GridBagSizer(3,2)
self.gbSizer.Add(self.searchField, (0,0), flag=wx.EXPAND)
self.gbSizer.Add(self.searchButton, (0,1))
self.gbSizer.Add(self.searchResults, (1,0), (1,2),
flag=wx.EXPAND|wx.TOP, border=5)
self.gbSizer.Add(self.groupSelection, (2,0) )
self.gbSizer.AddGrowableRow(1)
self.gbSizer.AddGrowableCol(0)
self.SetSizerAndFit(self.gbSizer)
def onSearchClick(self, event):
event.searchstring = self.searchField.GetValue()
if self.searchButton.GetLabel() == 'stop!':
self.searchButton.SetLabel('find!')
self.searchResults.stopsearch = True
return
else:
self.groupSelection.clear()
self.groupSelection.Disable()
self.searchButton.SetLabel('stop!')
self.searchButton.Update()
err, info = self.searchResults.searchButtonAction(event)
selections = self.searchResults.searchSourceItems
# see if we should set the checkbox button from a previous rule
state = None
if len(selections) > 0 and self.rules.has_key(selections[0]):
rule = self.rules[selections[0]]
if self.rules[selections[0]].has_key(event.searchstring):
state = self.rules[selections[0]][event.searchstring]
for i in selections:
if not self.rules.has_key(i) or self.rules[i] != rule:
state = None
break
#for j in self.rules[i]:
if state:
print "should restore checkbox to %s" % state
self.groupSelection.setState(state)
self.searchButton.SetLabel('find!')
self.searchResults.stopsearch = False
if self.searchButtonAction:
self.searchButtonAction(event, errmsg=err, infomsg=info)
self.groupSelection.Enable()
def selectAllText(self, event):
if wx.Window.FindFocus() != self.searchField:
self.searchField.SetSelection(-1,-1)
self.searchField.SetFocus()
else:
self.searchField.SetSelection(0,0)
event.Skip()
def unfocused(self, event):
self.searchField.SetSelection(0,0)
def setGroup(self, state):
b = wx.BusyCursor()
selections = self.searchResults.setGroup(state)
for s in selections:
if not self.rules.has_key(s):
self.rules[s] = {}
if state == CheckboxState.UNSELECTED:
try:
self.rules.pop(s)
except:
pass
else:
self.rules[s][self.searchField.GetValue()] = state
print self.rules
class FilePanel(wx.SplitterWindow):
def __init__(self, parent, searchButtonAction=None):
# Use the WANTS_CHARS style so the panel doesn't eat the Return key.
wx.SplitterWindow.__init__(self, parent, -1,
style=wx.SP_LIVE_UPDATE | wx.CLIP_CHILDREN | wx.WANTS_CHARS)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.SetNeedUpdating(True)
print self.GetSize()
self.tree = DirCheckboxCtrl(self, -1, dir="/")
# XXX: fludrules.init path should be in config
self.fludrules = self.getFludHome()+"/fludrules.init"
if not os.path.isfile(self.fludrules):
# XXX: do the other first time stuff (email encrypted credentials,
# etc.)
parent.SetMessage("Welcome. This appears to be the first"
" time you've run flud. We've automatically selected some"
" files for backup. You can make changes by"
" selecting/deselecting files and directories. When you are"
" done, simply close this window.")
src = open(os.path.join(datadir,'fludrules.init'), 'r')
dst = open(self.fludrules, 'w')
filerules = src.read()
dst.write(filerules)
dst.close()
src.close()
filerules = eval(filerules)
rulestates = {}
for rule in filerules['baserules']:
value = filerules['baserules'][rule]
rule = glob.glob(os.path.expandvars(rule))
for r in rule:
rulestates[r] = value
self.tree.setStates(rulestates)
# XXX: fludfile.conf path should be in config
self.fludfiles = self.getFludHome()+"/fludfile.conf"
print self.fludfiles
if os.path.isfile(self.fludfiles):
file = open(self.fludfiles, 'r')
states = eval(file.read())
self.tree.setStates(states)
file.close()
self.searchPanel = SearchPanel(self, dircheckbox=self.tree,
searchButtonAction=searchButtonAction)
self.SetMinimumPaneSize(50)
self.SplitVertically(self.tree, self.searchPanel) #, 300)
print self.GetSize()
def getFludHome(self):
if os.environ.has_key('FLUDHOME'):
fludhome = os.environ['FLUDHOME']
else:
fludhome = os.environ['HOME']+"/.flud"
if not os.path.isdir(fludhome):
os.mkdir(fludhome, 0700)
return fludhome
def shutdown(self, event):
self.flushFileConfig()
event.Skip()
def flushFileConfig(self):
states = self.tree.getStates()
f = open(self.fludfiles, 'w')
f.write(str(states))
f.close()
for i in states:
print "%s %s" % (i, states[i])
def OnSize(self, event):
w,h = self.GetClientSizeTuple()
if self.tree:
self.tree.SetDimensions(0, 0, w, h)
event.Skip()
class RestoreCheckboxCtrl(DirCheckboxCtrl):
# XXX: child/parent selection/deselection isn't quite right still, esp wrt
# root node. repro:
# -/
# -d1
# -f1
# -d2
# -d3
# -f2
# -f3
# with nothing selected, select d3 and f3, then select root, then deselect
# d3 and f3
def __init__(self, parent, id=-1, config=None, pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=(wx.TR_MULTIPLE
| wx.TR_HAS_BUTTONS
| wx.TR_TWIST_BUTTONS
| wx.TR_NO_LINES
| wx.TR_FULL_ROW_HIGHLIGHT
| wx.SUNKEN_BORDER),
validator=wx.DefaultValidator, name=wx.ControlNameStr):
self.config = config
DirCheckboxCtrl.__init__(self, parent, id, config, pos, size, style,
validator, name, allowExclude=False)
def initTree(self, config):
self.expandRoot(config)
self.expandUntilMultiple()
def expandRoot(self, config):
self.defaultImageList, self.checkboxes, self.icondict \
= createDefaultImageList()
self.AssignImageList(self.defaultImageList)
self.il = self.GetImageList()
self.rootID = self.AddRoot("/", self.icondict['computer'], -1,
wx.TreeItemData(("", True, False, CheckboxState.UNSELECTED)))
self.update()
def expandUntilMultiple(self):
node = self.rootID
while True:
(ipath, isdir, expanded, istate) = self.GetItemData(node).GetData()
children = self.getChildren(node, False)
if len(children) > 1 or len(children) == 0:
break;
node = children[0]
self.Expand(node)
def update(self):
master = listMeta(self.config)
for i in master:
if not isinstance(master[i], dict):
traversal = i.split(os.path.sep)
node = self.rootID
path = "/"
if traversal[0] == '':
traversal.remove('')
for n in traversal:
path = os.path.join(path, n)
children = self.getChildrenDict(node)
if n == traversal[-1] and not n in children:
child = self.AppendItem(node, n)
self.SetPyData(child, (path, False, False, 0))
idx = getFileIcon(i, self.il, self.checkboxes,
self.icondict)
self.SetItemImage(child, idx, wx.TreeItemIcon_Normal)
else:
if not n in children:
child = self.AppendItem(node, n)
self.SetPyData(child, (path, True, False, 0))
self.SetItemImage(child, self.icondict['folder'],
wx.TreeItemIcon_Normal)
else:
child = children[n]
node = child
self.Expand(self.rootID)
def getChildrenDict(self, node):
result = {}
child, cookie = self.GetFirstChild(node)
while child:
result[self.GetItemText(child)] = child
child, cookie = self.GetNextChild(node, cookie)
return result
def onExpand(self, event):
pass
def getSelected(self, startNode=None):
if not startNode:
startNode = self.rootID
children = self.getChildren(startNode)
selected = []
for n in children:
(path, isDir, expanded, state) = self.GetItemData(n).GetData()
if not isDir \
and (state == CheckboxState.SELECTED \
or state == CheckboxState.SELECTEDCHILD):
selected.append(n)
if isDir and (state == CheckboxState.SELECTED \
or state == CheckboxState.SELECTEDPARENT \
or state == CheckboxState.SELECTEDCHILD):
selected += self.getSelected(n)
return selected
class RestorePanel(wx.Panel):
def __init__(self, parent, config, factory):
self.config = config
self.factory = factory
wx.Panel.__init__(self, parent, -1)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.tree = RestoreCheckboxCtrl(self, -1, config, #wx.TreeCtrl(self, -1,
style=(wx.TR_MULTIPLE
| wx.TR_HAS_BUTTONS
| wx.TR_TWIST_BUTTONS
| wx.TR_NO_LINES
| wx.TR_FULL_ROW_HIGHLIGHT
| wx.SUNKEN_BORDER))
self.restoreButton = wx.Button(self, -1, 'restore selected files',
name='restoreButton')
self.Bind(wx.EVT_BUTTON, self.onRestoreClick, self.restoreButton)
self.gbSizer = wx.GridBagSizer(2,1)
self.gbSizer.Add(self.tree, (0,0), flag=wx.EXPAND|wx.ALL, border=0)
self.gbSizer.Add(self.restoreButton, (1,0), flag=wx.EXPAND|wx.ALL,
border=0)
self.gbSizer.AddGrowableRow(0)
self.gbSizer.AddGrowableCol(0)
self.SetSizerAndFit(self.gbSizer)
def update(self):
self.tree.update()
def OnSize(self, event):
w,h = self.GetClientSizeTuple()
event.Skip()
def onTooltip(self, event):
pass
def onRestoreClick(self, event):
for n in self.tree.getSelected():
(path, isDir, expanded, state) = self.tree.GetItemData(n).GetData()
print "restoring %s" % path
d = self.factory.sendGETF(path)
d.addCallback(self.restored, n)
d.addErrback(self.restoreFailed, n)
self.tree.UnselectAll()
def restored(self, res, n):
(path, isDir, expanded, state) = self.tree.GetItemData(n).GetData()
print "yay, %s" % path
self.tree.SetItemTextColour(n, '#005804')
self.tree.changeState(n)
def restoreFailed(self, err, n):
(path, isDir, expanded, state) = self.tree.GetItemData(n).GetData()
print "boo, %s: %s" % (path, err)
self.tree.SetItemTextColour(n, wx.RED)
self.tree.changeState(n)
class SchedulePanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, -1)
self.Bind(wx.EVT_SIZE, self.OnSize)
def OnSize(self, event):
w,h = self.GetClientSizeTuple()
event.Skip()
class FeedbackPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, -1)
self.Bind(wx.EVT_SIZE, self.OnSize)
editor = wx.lib.editor.editor.Editor(parent, -1)
def OnSize(self, event):
w,h = self.GetClientSizeTuple()
event.Skip()
class FludNotebook(wx.Notebook):
def __init__(self, parent, id=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.NB_BOTTOM|wx.NO_BORDER):
self.parent = parent
self.config = parent.config
self.factory = LocalClientFactory(self.config)
print "connecting to localhost:%d" % self.config.clientport
reactor.connectTCP('localhost', self.config.clientport, self.factory)
wx.Notebook.__init__(self, parent, id, pos, style=style)
self.filePanel = FilePanel(self,
searchButtonAction=parent.searchButtonAction)
self.AddPage(self.filePanel, "Backup Files")
self.restorePanel = RestorePanel(self, self.config, self.factory)
self.AddPage(self.restorePanel, "Restore")
self.schedulePanel = SchedulePanel(self)
self.AddPage(self.schedulePanel, "Backup Schedule")
self.feedbackPanel = FeedbackPanel(self)
self.AddPage(self.feedbackPanel, "Feedback")
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.changedPage)
def shutdown(self, event):
self.filePanel.shutdown(event)
def changedPage(self, event):
page = event.GetSelection()
if page == 0:
self.SetMessage("Select files and directories for backup"
" with the filesystem view on the left, or set up criteria"
" for finding files for backup with simple searches,"
" below right.")
elif page == 1:
self.SetMessage("Select files/directories to be restored to"
" your computer, then click on 'restore!' Files will turn"
" green as they arrive.")
self.restorePanel.update()
elif page == 2:
self.SetMessage("Configure how often your computer should backup."
"\n (not implemented)")
elif page == 3:
self.SetMessage("Send feedback to flud programmers. (not"
" implemented)")
def SetMessage(self, msg):
self.parent.SetMessage(msg)
class FludLogoPanel(wx.Panel):
def __init__(self, parent, id=-1, pos=wx.DefaultPosition,
size=wx.Size(10,10), style=wx.TAB_TRAVERSAL, name="logo panel"):
wx.Panel.__init__(self, parent, id, pos, size, style, name)
self.SetAutoLayout(True)
self.SetBackgroundColour(wx.BLACK)
self.SetForegroundColour(wx.WHITE)
logobmp = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"flud-backup-logo-1-150-nodrop.png")))
pad = 0
self.logowidth = logobmp.GetWidth()
self.logoheight = logobmp.GetHeight()
self.logo = wx.StaticBitmap(self, -1, logobmp)
self.messagePanel = wx.Panel(self, -1)
self.messagePanel.SetBackgroundColour(wx.BLACK)
self.messagePanel.SetForegroundColour(wx.WHITE)
self.message = wx.StaticText(self.messagePanel, -1,
"message text area", style=wx.ALIGN_CENTER|wx.ST_NO_AUTORESIZE,
size=(-1, self.logoheight-15))
self.message.Bind(wx.EVT_SIZE, self.resizeMessage)
self.bsizer = wx.BoxSizer(wx.VERTICAL)
self.bsizer.Add(self.message, flag=wx.EXPAND|wx.ALL, border=35)
self.bsizer.SetSizeHints(self.messagePanel)
self.messagePanel.SetSizer(self.bsizer)
self.gbSizer = wx.GridBagSizer(1,2)
self.gbSizer.Add(self.logo, (0,0))
self.gbSizer.Add(self.messagePanel, (0,1), flag=wx.EXPAND|wx.ALL)
self.gbSizer.AddGrowableRow(1)
self.gbSizer.AddGrowableCol(1)
self.SetSizerAndFit(self.gbSizer)
self.SetSize(wx.Size(self.logowidth, self.logoheight))
self.SetSizeHints(self.logowidth, self.logoheight, -1, self.logoheight)
def SetMessage(self, msg):
(w,h) = self.message.GetSizeTuple()
#print "msg area size is %d x %d" % (w,h)
self.message.SetLabel(msg)
self.message.Wrap(w)
#print "msg is '%s'" % self.message.GetLabel()
self.message.Center()
def resizeMessage(self, evt):
# this is mainly to deal with StaticText wonkiness (not calling Wrap()
# automatically, not centering properly automatically). It may be
# possible to get rid of this with a future wxPython release.
(w,h) = self.message.GetSizeTuple()
self.message.Wrap(w)
m = self.message.GetLabel()
m = m.replace('\n',' ')
self.message.SetLabel(m)
self.message.Wrap(w)
self.message.Center()
class FludFrame(wx.Frame):
def __init__(self, parent, id=wx.ID_ANY, label="flud bakcup client",
size=wx.Size(800,600),
style=wx.DEFAULT_FRAME_STYLE|wx.NO_FULL_REPAINT_ON_RESIZE,
config=None):
wx.Frame.__init__(self, parent, id, label, size=size, style=style)
self.lognull = wx.LogNull()
wx.ToolTip.SetDelay(2000)
self.clearMessage = False
self.logoPanel = FludLogoPanel(self)
self.SetMessage('Welcome.')
self.config = config
self.notebook = FludNotebook(self, size=wx.Size(200,200))
self.operationStatus = wx.StatusBar(name='operationStatus',
parent=self, style=0)
self.SetStatusBar(self.operationStatus)
self.gbSizer = wx.GridBagSizer(2,1)
self.gbSizer.Add(self.logoPanel,(0,0), flag=wx.EXPAND)
self.gbSizer.Add(self.notebook, (1,0), flag=wx.EXPAND|wx.ALL, border=1)
self.gbSizer.AddGrowableRow(1)
self.gbSizer.AddGrowableCol(0)
self.SetSizerAndFit(self.gbSizer)
self.Bind(wx.EVT_CLOSE, self.shutdown)
self.SetSize(size)
self.Show(True)
def SetMessage(self, message):
self.logoPanel.SetMessage(message)
def shutdown(self, event):
self.notebook.shutdown(event)
def searchButtonAction(self, event, errmsg=None, infomsg=None):
if errmsg:
self.logoPanel.SetMessage(errmsg)
self.clearMessage = True
elif infomsg:
self.logoPanel.SetMessage(infomsg)
self.clearMessage = False
elif self.clearMessage:
self.logoPanel.SetMessage("")
#if __name__ == '__main__':
# app = wx.PySimpleApp()
#
# config = FludConfig()
# config.load(doLogging=False)
#
# f = FludFrame(None, wx.ID_ANY, 'flud backup client', size=(795,600),
# style=wx.DEFAULT_FRAME_STYLE|wx.NO_FULL_REPAINT_ON_RESIZE,
# config=config)
#
# from twisted.internet import reactor
# reactor.registerWxApp(app)
# reactor.run()
| Python |
#!/usr/bin/python
"""
FludTestGauges.py (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
Provides gauges for visualizing storage for multiple flud nodes running on
the same host. This is really only useful for demos and testing.
"""
import sys, os, signal, stat, random
import wx
import wx.lib.buttons as buttons
from flud.FludConfig import FludConfig
dutotal = 0
def visit(arg, top, files):
global dutotal
for file in files:
dutotal += os.lstat("%s" % (os.path.join(top,file)))[stat.ST_SIZE]
arg += dutotal
def du(dir):
global dutotal
dutotal = 0
os.path.walk(dir, visit, dutotal)
return dutotal
# XXX: too much manual layout. should convert to a managed layout to allow for
# resizing, etc.
SGAUGEWIDTH = 230 # storage gauge
DGAUGEWIDTH = 100 # dht gauge
GAUGEHEIGHT = 20
ROWHEIGHT = 30
SEP = 5
LABELWIDTH = 20
POWERWIDTH = 70
RATIOBARHEIGHT = 70
COLWIDTH = SGAUGEWIDTH+DGAUGEWIDTH+LABELWIDTH+POWERWIDTH
COLGAPFUDGE = 30
class FludTestGauges(wx.Frame):
def __init__(self, parent, title, dirroot, dirs):
screenHeight = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_Y)-100
rowheight = ROWHEIGHT+SEP
height = len(dirs)*(rowheight)+RATIOBARHEIGHT
columns = height / screenHeight + 1
width = COLWIDTH*columns
if columns > 1:
height = (len(dirs)/columns)*(rowheight)+RATIOBARHEIGHT
if (len(dirs) % columns) > 0:
height += rowheight
wx.Frame.__init__(self, parent, wx.ID_ANY, title, size=(width,height),
style=wx.DEFAULT_FRAME_STYLE|wx.NO_FULL_REPAINT_ON_RESIZE)
self.storebarend = 1024
self.smultiplier = 100.0 / self.storebarend
self.sdivisor = 1
self.sbytelabel = ""
self.dhtbarend = 512
self.dmultiplier = 100.0 / self.dhtbarend
self.ddivisor = 1
self.dbytelabel = ""
self.storeheading = wx.StaticText(self, -1, "block storage",
(LABELWIDTH, 5))
self.totaldht = wx.StaticText(self, -1, "metadata",
(LABELWIDTH+SGAUGEWIDTH+SEP, 5))
self.gauges = []
curCol = 0
curRow = 30
for i in range(len(dirs)):
self.gauges.append(wx.Gauge(self, -1, 100,
(curCol*COLWIDTH+LABELWIDTH, curRow),
(SGAUGEWIDTH, GAUGEHEIGHT)))
self.gauges[i].SetBezelFace(3)
self.gauges[i].SetShadowWidth(3)
self.gauges[i].SetValue(0)
self.gauges[i].dir = "%s%s" % (dirroot,dirs[i])
os.environ['FLUDHOME'] = self.gauges[i].dir;
conf = FludConfig()
conf.load(doLogging = False)
print "%s" % conf.nodeID
self.gauges[i].label = wx.StaticText(self, -1, "%2s" % dirs[i],
(curCol*COLWIDTH, curRow+(rowheight/4)),
size=(LABELWIDTH, -1))
self.gauges[i].idlabel = wx.StaticText(self, -1, "%s" % conf.nodeID,
(curCol*COLWIDTH+LABELWIDTH, curRow+20))
font = self.gauges[i].idlabel.GetFont()
font.SetPointSize(6)
self.gauges[i].idlabel.SetFont(font)
self.gauges[i].dhtgauge = wx.Gauge(self, -1, 100,
(curCol*COLWIDTH+LABELWIDTH+SGAUGEWIDTH+SEP,
curRow),
(SGAUGEWIDTH/3, GAUGEHEIGHT))
self.gauges[i].power = wx.Button(self, i, "turn OFF",
(curCol*COLWIDTH
+LABELWIDTH+SGAUGEWIDTH+2*SEP+SGAUGEWIDTH/3,
curRow),
(POWERWIDTH, ROWHEIGHT))
#self.gauges[i].power = buttons.GenBitmapToggleButton(self, i,
# None,
# (LABELWIDTH+SGAUGEWIDTH+2*SEP+SGAUGEWIDTH/3, curRow),
# (POWERWIDTH, ROWHEIGHT))
#self.gauges[i].button.SetBestSize()
self.gauges[i].power.SetToolTipString("power on/off")
self.Bind(wx.EVT_BUTTON, self.onClick, self.gauges[i].power)
curRow += rowheight
if curRow > height-RATIOBARHEIGHT:
curCol += 1
curRow = 30
self.totalstore = wx.StaticText(self, -1, "total: 0",
(LABELWIDTH, height-40))
self.totaldht = wx.StaticText(self, -1, "total: 0",
(LABELWIDTH+SGAUGEWIDTH+SEP, height-40))
self.ratiogauge = wx.Gauge(self, -1, 100, (LABELWIDTH, height-20),
(SGAUGEWIDTH+SEP+SGAUGEWIDTH/3, 10))
self.ratiogauge.SetValue(0)
self.Bind(wx.EVT_IDLE, self.IdleHandler)
self.timer = wx.PyTimer(self.update)
self.timer.Start(1000)
def onClick(self, event):
# XXX: note that under our current startNnodes.sh scheme, the first
# node spawned doesn't contact anyone, so if that one is powered off
# and then powered back on, it will not be part of the node until
# another node pings it
# XXX: unix-specific proc management stuff follows
idx = event.GetId()
home = self.gauges[idx].dir
pidfile = os.path.join(home, 'twistd.pid')
if os.path.exists(pidfile):
print "shutting down %s" % home
f = open(pidfile)
pid = int(f.read())
f.close()
# XXX: ps command no worky on windows, and "-ww" may not worker on
# oldskool unixes
self.gauges[idx].savedCmd = os.popen(
"ps f -wwp %d -o args=" % pid).read()
procline = os.popen("ps e -wwp %d" % pid).read()
self.gauges[idx].savedEnv = [e for e in procline.split()
if e[:4] == 'FLUD']
# XXX: os.kill no worky on windows, need something like:
#def windowskill(pid):
# import win32api
# handle = win32api.OpenProcess(1, 0, pid)
# return (0 != win32api.TerminateProcess(handle, 0))
os.kill(pid, signal.SIGTERM)
self.gauges[idx].power.SetLabel("turn ON")
self.gauges[idx].Hide()
self.gauges[idx].dhtgauge.Hide()
else:
print "powering up %s" % home
# XXX: this exec no worky on windows
fullcmd = "%s %s" % (' '.join(self.gauges[idx].savedEnv),
self.gauges[idx].savedCmd)
print fullcmd
result = os.popen('%s %s' % (' '.join(self.gauges[idx].savedEnv),
self.gauges[idx].savedCmd)).readlines()
self.gauges[idx].power.SetLabel("turn OFF")
self.gauges[idx].Show()
self.gauges[idx].dhtgauge.Show()
print result
def update(self):
def sizeclass(num):
divisor = 1
bytelabel = ""
if num > 1024:
divisor = 1024.0
bytelabel = 'K'
if num > 1048576:
divisor = 1048576.0
bytelabel = 'M'
if num > 1073741824:
divisor = 1073741824.0
bytelabel = 'G'
return (divisor, bytelabel)
storelargest = 0
dhtlargest = 0
storetotal = 0
dhttotal = 0
for i in self.gauges:
if os.path.isdir(i.dir):
i.storebytes = du(os.path.join(i.dir,'store'))
if i.storebytes > storelargest:
storelargest = i.storebytes
storetotal += i.storebytes
i.dhtbytes = du(os.path.join(i.dir,'dht'))
if i.dhtbytes > dhtlargest:
dhtlargest = i.dhtbytes
dhttotal += i.dhtbytes
else:
i.storebytes = 0
i.dhtbytes = 0
i.Disable()
i.power.Disable()
while storelargest > self.storebarend:
self.storebarend = self.storebarend * 2
self.smultiplier = 100.0 / self.storebarend
self.sdivisor, self.sbytelabel = sizeclass(storetotal)
while dhtlargest > self.dhtbarend:
self.dhtbarend = self.dhtbarend * 2
self.dmultiplier = 100.0 / self.dhtbarend
self.ddivisor, self.dbytelabel = sizeclass(dhttotal)
#print "-----"
for i in self.gauges:
i.SetValue(i.storebytes*self.smultiplier)
i.dhtgauge.SetValue(i.dhtbytes*self.dmultiplier)
#print "%.2f, %.2f" % ((float(i.storebytes)/float(i.dhtbytes)),
# (float(i.GetValue())/float(i.dhtgauge.GetValue())))
self.totalstore.SetLabel("total: %.1f%s"
% (float(storetotal)/self.sdivisor, self.sbytelabel))
self.totaldht.SetLabel("total: %.1f%s"
% (float(dhttotal)/self.ddivisor, self.dbytelabel))
if (dhttotal+storetotal == 0):
self.ratiogauge.SetValue(0)
else:
self.ratiogauge.SetValue((storetotal*100/(dhttotal+storetotal)))
def updateGauges(self, update):
for index, value in update:
self.monitors[index].setValue(value)
def IdleHandler(self, event):
pass
def main():
if len(sys.argv) < 2:
print "usage: %s dircommon exts" % sys.argv[0]
print " where exts will be appended to dircommon"
print " e.g., '%s /home/joe/.flud 1,2,3,4,10,15,20'"\
% sys.argv[0]
print " or, '%s /home/joe/.flud 1-10,15,20'"\
% sys.argv[0]
sys.exit()
root = sys.argv[1]
exts = []
dirs = [d.strip() for d in sys.argv[2].split(',')]
for i in dirs:
if i == "_":
exts.append('') # undocumented, means "just dircommon"
elif i.find('-') >= 0:
start, end = i.split('-')
for j in range(int(start),int(end)+1):
exts.append(j)
else:
exts.append(int(i))
app = wx.PySimpleApp()
t = FludTestGauges(None, 'Flud Test Gauges', root, exts)
t.Show(1)
app.MainLoop()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
"""
FludNode.tac (c) 2003-2006 Alen Peacock. This program is distributed under the
terms of the GNU General Public License (the GPL).
This is the application file used by twistd to daemonize FludNode.
"""
import os
from twisted.application import service, internet
import flud.FludNode
from flud.protocol.FludCommUtil import getCanonicalIP
port = None
gwhost = None
gwport = None
if 'FLUDPORT' in os.environ:
port = int(os.environ['FLUDPORT'])
if 'FLUDGWHOST' in os.environ:
gwhost = getCanonicalIP(os.environ['FLUDGWHOST'])
if 'FLUDGWPORT' in os.environ:
gwport = int(os.environ['FLUDGWPORT'])
node = flud.FludNode.FludNode(port)
if gwhost and gwport:
node.connectViaGateway(gwhost, gwport)
application = service.Application("flud.FludNode")
service = node.start(twistd=True)
#service.setServiceParent(application)
| Python |
import base64
"""
fencode.py (c) 2003-2006 Alen Peacock. This program is distributed under the
terms of the GNU General Public License (the GPL), version 3.
Provides efficient urlsafe base64 encoding of python types (int, long, string,
None, dict, tuple, list) -- in the same vein as BitTorrent's bencode or MNet's
mencode.
"""
class Fencoded:
# provides a typed wrapper for previously fencoded data, so that we can
# nest fencoded data inside other fencoded structures without bloating it
# (note that this could also be used to store strings without b64 bloating,
# but that forgoes url safety). See doctests in fencode() for usage
# examples.
def __init__(self, data):
self.data = data
def __eq__(self, i):
if not isinstance(i, Fencoded):
return False
return self.data == i.data
def fencode(d, lenField=False):
"""
Takes string data or a number and encodes it to an efficient URL-friendly
format.
>>> n = None
>>> i = 123455566
>>> I = 1233433243434343434343434343434343509669586958695869L
>>> s = "hello there, everyone"
>>> s2 = "long text ............................................................................... AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"
>>> d = {'a': 'adfasdfasd', 'aaa': 'rrreeeettt', 'f': 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'}
>>> d2 = {'a': 123, 'b': 'xyz'}
>>> d3 = {'a': 123, 'b': 'xyz', 'c': {'x': 456, 'y': 'abc'}}
>>> d3 = {'a': 123, 'b': 'xyz', 'c': d}
>>> d4 = {}
>>> l = [1,2,3,4,'a','b','cde']
>>> l2 = [i,I,s,d]
>>> l3 = []
>>> l4 = [n]
>>> t = (1,2,3,4,'a','b','cde')
>>> t2 = (i,I,s,d)
>>> t3 = ()
>>> d5 = {t: s, 'n': n, 'a': i, i: "a", 'd': d3, s2: s2}
>>> l5 = [1,[],2,[],(),{},3,{t: s, 'n': ()}]
>>> fdecode(fencode(n)) == n
True
>>> fdecode(fencode(i)) == i
True
>>> fdecode(fencode(I)) == I
True
>>> fdecode(fencode(-i)) == -i
True
>>> fdecode(fencode(-I)) == -I
True
>>> fdecode(fencode(s)) == s
True
>>> fdecode(fencode(d)) == d
True
>>> fdecode(fencode(d2)) == d2
True
>>> fdecode(fencode(d3)) == d3
True
>>> fdecode(fencode(d4)) == d4
True
>>> fdecode(fencode(l)) == l
True
>>> fdecode(fencode(l2)) == l2
True
>>> fdecode(fencode(l3)) == l3
True
>>> fdecode(fencode(l4)) == l4
True
>>> fdecode(fencode(t)) == t
True
>>> fdecode(fencode(t2)) == t2
True
>>> fdecode(fencode(t3)) == t3
True
>>> fdecode(fencode(d5)) == d5
True
>>> fdecode(fencode(l5)) == l5
True
>>> f = Fencoded(fencode(s))
>>> fdecode(fencode(f)) == f
True
>>> fdecode(fdecode(fencode(f))) == s
True
>>> fdecode(fencode({i: f})) == {i: f}
True
>>> fdecode(fdecode(fencode({i: f}))[i]) == s
True
>>> fdecode(fdecode(fencode({i: f, I: f}))[i]) == s
True
>>> fdecode(fencode(f), recurse=True) == s
True
>>> fdecode(fencode(f), recurse=2) == s
True
>>> f2 = Fencoded(fencode(f))
>>> f3 = Fencoded(fencode(f2))
>>> fdecode(fencode(f3), recurse=True) == s
True
>>> fdecode(fencode(f3), recurse=3) == f
True
>>> fdecode(fencode({i: f3, I: f2})) == {i: f3, I: f2}
True
>>> fdecode(fencode({i: f3, I: f2}), recurse=1) == {i: f3, I: f2}
True
>>> fdecode(fencode({i: f3, I: f2}), recurse=2) == {i: f2, I: f}
True
>>> fdecode(fencode({i: f3, I: f2}), recurse=3) == {i: f, I: s}
True
>>> fdecode(fencode({i: f3, I: f2}), recurse=4) == {i: s, I: s}
True
>>> fdecode(fencode({i: f3, I: f2}), recurse=True) == {i: s, I: s}
True
"""
def makeLen(i):
"""
Returns the integer i as a three-byte length value.
>>> makeLen(255)
'\\x00\\xff'
>>> makeLen(65535)
'\\xff\\xff'
"""
if i > 65535 or i < 0:
raise ValueError("illegal length for fencoded data"
"(0 < x <= 65535)")
return fencode(i)[1:-1]
if isinstance(d, int) or isinstance(d, long):
val = "%x" % d
neg = False
c = 'i'
if isinstance(d, long):
c = 'o'
if d < 0:
neg = True
val = val[1:]
c = c.upper()
if len(val) % 2 != 0:
val = "0%s" % val
val = val.decode('hex')
if len(val) % 2 != 0:
val = '\x00' + val
val = base64.urlsafe_b64encode(val)
if lenField:
if len(val) > 65535:
raise ValueError("value to large for encode")
return c+makeLen(len(val))+val
else:
return c+val
elif isinstance(d, str):
# String data may contain characters outside the allowed charset.
# urlsafe b64encoding ensures that data can be used inside http urls
# (and other plaintext representations).
val = base64.urlsafe_b64encode(d)
if lenField:
if len(val) > 65535:
raise ValueError("value to large for encode")
return 's'+makeLen(len(val))+val
else:
return 's'+val
elif isinstance(d, dict):
result = 'd'
contents = ""
for i in d:
contents = contents + fencode(i,True) + fencode(d[i],True)
if lenField:
result = result+makeLen(len(contents))+contents
else:
result = result+contents
return result
elif isinstance(d, list):
result = 'l'
contents = ''
for i in d:
contents = contents + fencode(i,True)
if lenField:
result = result+makeLen(len(contents))+contents
else:
result = result+contents
return result
elif isinstance(d, tuple):
result = 't'
contents = ''
for i in d:
contents = contents + fencode(i,True)
if lenField:
result = result+makeLen(len(contents))+contents
else:
result = result+contents
return result
elif isinstance(d, Fencoded):
result = 'f'
contents = d.data
if lenField:
result = result+makeLen(len(d.data))+contents
else:
result = result+contents
return result
elif d == None:
if lenField:
return 'n'+makeLen(1)+'0'
else:
return 'n0'
else:
raise ValueError("invalid value passed to fencode: %s" % type(d))
def fdecode(d, lenField=False, recurse=1):
"""
Takes previously fencoded data and decodes it into its python type(s).
'lenField' is used internally, and indicates that the fencoded data has
length fields (used for compositiong of tuples, lists, dicts, etc).
'recurse' indicates that fdecode should recursively fdecode Fencoded
objects if set to True, or that it should recurse to a depth of 'recurse'
when encountering Fencoded objects if it is an integer value.
"""
def getLen(s):
if len(s) != 3 or not isinstance(s, str):
raise ValueError("fdecode length strings must be 3 bytes long: '%s'"
% s)
return fdecode('i'+s+'=', recurse=recurse)
def scanval(valstring, lenField=False):
"""
scans the given valstring and returns a value and the offset where that
value ended (as a tuple). If valstring contains more than one value,
only the length of the first is returned. Otherwise, the entire length
is returned.
"""
valtype = valstring[0]
if lenField:
start = 4
end = start+getLen(valstring[1:4])
else:
start = 1
end = len(valstring)-1
#print " scanval calling fdecode on val[%d:%d]=%s" % (0, end, valstring)
return (fdecode(valstring[0:end], True, recurse=recurse), end)
if isinstance(d, Fencoded):
return fdecode(d.data, recurse=recurse)
if not isinstance(d, str):
raise ValueError("decode takes string data or Fencoded object only,"
" got %s" % type(d))
valtype = d[0]
if lenField:
length = getLen(d[1:4])
val = d[4:]
else:
val = d[1:len(d)]
if valtype == 'i':
val = base64.urlsafe_b64decode(val)
val = val.encode('hex')
return int(val, 16)
elif valtype == 'I':
val = base64.urlsafe_b64decode(val)
val = val.encode('hex')
return -int(val, 16)
elif valtype == 'o':
val = base64.urlsafe_b64decode(val)
val = val.encode('hex')
return long(val, 16)
elif valtype == 'O':
val = base64.urlsafe_b64decode(val)
val = val.encode('hex')
return -long(val, 16)
elif valtype == 's':
return base64.urlsafe_b64decode(val)
elif valtype == 'd':
result = {}
while len(val) != 0:
#print "string is: %s (len=%d)" % (val, len(val))
(key,l1) = scanval(val, True)
#print "got key '%s' of length %d" % (key,l1)
(value,l2) = scanval(val[l1:len(val)], True)
#print "got value '%s' of length %d" % (value,l2)
result[key] = value
val = val[l1+l2:]
return result
elif valtype == 'l':
result = []
if lenField:
pass
while len(val) != 0:
(v,l) = scanval(val, True)
result.append(v)
val = val[l:]
return result
elif valtype == 't':
result = []
if lenField:
pass
while len(val) != 0:
(v,l) = scanval(val, True)
result.append(v)
val = val[l:]
return tuple(result)
elif valtype == 'f':
if not isinstance(recurse, bool):
recurse = recurse-1
if recurse > 0:
return fdecode(val, recurse=recurse)
return Fencoded(val)
elif valtype == 'n':
return None
else:
raise ValueError("invalid value passed to fdecode"
" -- cannot fdecode data that wasn't previously fencoded: '%s'"
% d[:400])
if __name__ == '__main__':
import doctest
doctest.testmod()
| Python |
# XXX: this class goes away
class Reputation:
"""
Each node maintains a list of reputation
objects corresponding to reputations of other nodes. Reputations may be
self-generated (in which case the originator is this node itself), or may
be relayed (in which case some other node is the originator).
Self-generated reputations are vastly more reliable than those relayed --
relayed reputations are second-hand information, and are more likely to
have false data.
"""
def __init__(self, ID, originator):
"""
Variables designated as '%' have values between 0 and 100.
"""
self.ID = ID
self.originator = originator
self.confidence = 0 # % originator data stored / nodes the
# originator stores to
self.verifiability = 0 # % originator data verified success/failed
self.availability = 0 # % originator contact attempts success/fail
self.bandwidth = 0 # avg bandwidth observed from orig. to ID.
self.age = 0 # age of reputation in days
def score(self):
"""
Returns a score for this reputation based in member variables. The
reputation must be a local reputation, i.e., the originator must
be equal to the global myNodeID. Otherwise, call scoreRelay()
>>> myNodeID = "self"
>>> rep = Reputation("somenode","self")
>>> rep.availability = 50
>>> rep.verifiability = 50
>>> rep.score()
33
>>> rep = Reputation("somenode","someothernode")
>>> rep.availability = 30
>>> rep.score()
-1
"""
# should find a good adjustment of weights (XXX: machine learning?)
if self.originator != myNodeID:
return -1
return (self.confidence + self.verifiability + self.availability) / 3
# XXX: should also include age and bandwidth
def scoreRelay(self):
"""
Returns a score for this reputation based in member variables. The
reputation must be a remote reputation, i.e., the originator must
not be equal to the global myNodeID. Otherwise, call score()
>>> myNodeID = "self"
>>> rep = Reputation("somenode","self")
>>> rep.availability = 50
>>> rep.verifiability = 50
>>> rep.scoreRelay()
-1
>>> rep = Reputation("somenode","someothernode")
>>> rep.availability = 30
>>> rep.scoreRelay()
10
"""
if self.originator == myNodeID:
return -1
return (self.confidence + self.verifiability + self.availability) / 3
# XXX: should also include age and bandwidth
def updateConfidence(self, totalDataStored, totalNodesStoredTo):
self.confidence = totalDataStored / totalNodesStoredTo;
def _test(self):
import doctest
doctest.testmod()
if __name__ == '__main__':
myNodeID = "self"
rep = Reputation("other", "self")
rep._test()
| Python |
#!/usr/bin/python
"""
FludLocalClient.py, (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
FludLocalClient provides a command-line client for interacting with FludNode.
"""
import sys, os, time
from twisted.internet import reactor
from flud.FludConfig import FludConfig
from flud.fencode import fencode, fdecode
from flud.FludCrypto import hashfile
from protocol.LocalClient import *
logger = logging.getLogger('flud')
class CmdClientFactory(LocalClientFactory):
def __init__(self, config):
LocalClientFactory.__init__(self, config)
self.quit = False
self.msgs = []
def callFactory(self, func, commands, msgs):
# since we can't call factory methods from the promptUser thread, we
# use this as a convenience to put those calls back in the event loop
reactor.callFromThread(self.doFactoryMethod, func, commands, msgs)
def doFactoryMethod(self, func, commands, msgs):
d = func()
d.addCallback(self.queueResult, msgs, '%s succeeded' % commands)
d.addErrback(self.queueError, msgs, '%s failed' % commands)
return d
def promptUser(self):
helpDict = {}
command = raw_input("%s> " % time.ctime())
commands = command.split(' ') # XXX: should tokenize on any whitespace
commandkey = commands[0][:4]
# core client operations
helpDict['exit'] = "exit from the client"
helpDict['help'] = "display this help message"
helpDict['ping'] = "send a GETID() message: 'ping host port'"
helpDict['putf'] = "store a file: 'putf canonicalfilepath'"
helpDict['getf'] = "retrieve a file: 'getf canonicalfilepath'"
helpDict['geti'] = "retrieve a file by CAS key: 'geti fencodedCASkey'"
helpDict['fndn'] = "send a FINDNODE() message: 'fndn hexIDstring'"
helpDict['list'] = "list stored files (read from local metadata)"
helpDict['putm'] = "store master metadata"
helpDict['getm'] = "retrieve master metadata"
helpDict['cred'] = "send encrypted private credentials: cred"\
" passphrase emailaddress"
helpDict['node'] = "list known nodes"
helpDict['buck'] = "print k buckets"
helpDict['stat'] = "show pending actions"
helpDict['stor'] = "store a block to a given node:"\
" 'stor host:port,fname'"
helpDict['rtrv'] = "retrieve a block from a given node:"\
" 'rtrv host:port,fname'"
helpDict['vrfy'] = "verify a block on a given node:"\
" 'vrfy host:port:offset-length,fname'"
helpDict['fndv'] = "retrieve a value from the DHT: 'fndv hexkey'"
helpDict['dlet'] = "delete from the stor: '[XXX]'"
if commandkey == 'exit' or commandkey == 'quit':
self.quit = True
elif commandkey == 'help':
self.printHelp(helpDict)
elif commandkey == 'ping':
# ping a host
# format: 'ping host port'
func = lambda: self.sendPING(commands[1], commands[2])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'putf':
# store a file
# format: 'putf canonicalfilepath'
func = lambda: self.sendPUTF(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'getf':
# retrieve a file
# format: 'getf canonicalfilepath'
func = lambda: self.sendGETF(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'geti':
# retrieve a file by CAS ID
# format: 'geti fencoded_CAS_ID'
func = lambda: self.sendGETI(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'fndn':
# find a node (or the k-closest nodes)
# format: 'fndn hexIDstring'
func = lambda: self.sendFNDN(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'list':
# list stored files
self.callFactory(self.sendLIST, commands, self.msgs)
elif commandkey == 'putm':
# store master metadata
self.callFactory(self.sendPUTM, commands, self.msgs)
elif commandkey == 'getm':
# retrieve master metadata
self.callFactory(self.sendGETM, commands, self.msgs)
elif commandkey == 'cred':
# send encrypted private credentials to an email address
# format: 'cred passphrase emailaddress'
func = lambda: self.sendCRED(
command[len(commands[0])+1:-len(commands[-1])-1],
commands[-1])
self.callFactory(func, commands, self.msgs)
# the following are diagnostic operations, debug-only utility
elif commandkey == 'node':
# list known nodes
self.callFactory(self.sendDIAGNODE, commands, self.msgs)
elif commandkey == 'buck':
# show k-buckets
self.callFactory(self.sendDIAGBKTS, commands, self.msgs)
elif commandkey == 'stat':
# show pending actions
print self.pending
elif commandkey == 'stor':
# stor a block to a given node. format: 'stor host:port,fname'
storcommands = commands[1].split(',')
try:
fileid = int(storcommands[1], 16)
except:
linkfile = fencode(long(hashfile(storcommands[1]),16))
if (os.path.islink(linkfile)):
os.remove(linkfile)
os.symlink(storcommands[1], linkfile)
storcommands[1] = linkfile
# XXX: delete this file when the command finishes
commands[1] = "%s,%s" % (storcommands[0], storcommands[1])
func = lambda: self.sendDIAGSTOR(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'rtrv':
# retrive a block from a given node. format: 'rtrv host:port,fname'
func = lambda: self.sendDIAGRTRV(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'vrfy':
# verify a block on a given node.
# format: 'vrfy host:port:offset-length,fname'
logger.debug("vrfy(%s)" % commands[1])
func = lambda: self.sendDIAGVRFY(commands[1])
self.callFactory(func, commands, self.msgs)
elif commandkey == 'dlet':
print "not yet implemented"
elif commandkey == 'fndv':
# try to retrieve a value from the DHT
# format: 'fndv key'
func = lambda: self.sendDIAGFNDV(commands[1])
self.callFactory(func, commands, self.msgs)
elif command != "":
reactor.callFromThread(self.queueError, None, self.msgs,
"illegal command '%s'" % command)
def queueResult(self, r, l, msg):
logger.debug("got result %s" % msg)
l.append((r, msg))
def queueError(self, r, l, msg):
logger.debug("got error %s" % msg)
if r:
l.append((r.getErrorMessage(), msg))
else:
l.append((None, msg))
def printHelp(self, helpDict):
helpkeys = helpDict.keys()
helpkeys.sort()
for i in helpkeys:
print "%s:\t %s" % (i, helpDict[i])
def output(self):
for c in self.pending:
for i in self.pending[c].keys():
if self.pending[c][i] == True:
print "%s on %s completed successfully" % (c, i)
self.pending[c].pop(i)
elif self.pending[c][i] == False:
print "%s on %s failed" % (c, i)
self.pending[c].pop(i)
else:
print "%s on %s pending" % (c, i)
def promptLoop(self, r):
self.output()
while len(self.msgs) > 0:
# this prints in reverse order, perhaps pop() all into a new list,
# reverse, then print
(errmsg, m) = self.msgs.pop()
if errmsg:
print "<- %s:\n%s" % (m, errmsg)
else:
print "<- %s" % m
if self.quit:
reactor.stop()
else:
d = threads.deferToThread(self.promptUser)
d.addCallback(self.promptLoopDelayed)
d.addErrback(self.err)
def promptLoopDelayed(self, r):
# give the reactor loop time to fire any quick cbs/ebs
reactor.callLater(0.1, self.promptLoop, r)
def clientConnectionLost(self, connector, reason):
if not self.quit:
LocalClientFactory.clientConnectionLost(self, connector, reason)
def cleanup(self, msg):
self.quit = True
self.err(msg)
def err(self, r):
self.output()
print "bah!: %s" % r
reactor.stop()
def main():
config = FludConfig()
config.load(doLogging=False)
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler('/tmp/fludclient.log')
formatter = logging.Formatter('%(asctime)s %(filename)s:%(lineno)d'
' %(name)s %(levelname)s: %(message)s', datefmt='%H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
factory = CmdClientFactory(config)
if len(sys.argv) == 2:
config.clientport = int(sys.argv[1])
print "connecting to localhost:%d" % config.clientport
reactor.connectTCP('localhost', config.clientport, factory)
factory.promptLoop(None)
reactor.run()
if __name__ == '__main__':
main()
| Python |
import urlparse, os, types
from twisted.web import client
from twisted.internet import reactor, defer
from twisted.python import failure
"""
HTTPMultipartDownloader.py (c) 2003-2006 Alen Peacock. This program is
distributed under the terms of the GNU General Public License (the GPL),
version 3.
HTTPMultipartDownloader will download mulitple files from a multipart/related.
Note that it does this by using the Content-ID and Content-Length headers in
each multipart, and will fail if those are not present (this could be
genericized to operate without those fields without too much effort)
This code is modeled after twisted.web.client.HTTPDownloader, which is
copyright 2001-2004 Twisted Matrix Laboratories, MIT licensed.
"""
class HTTPMultipartDownloader(client.HTTPDownloader):
"""Download multiple files, via multipart/related."""
protocol = client.HTTPPageDownloader
value = None
def __init__(self, url, dir, method='GET', postdata=None, headers=None,
agent="Flud client", supportPartial=0):
self.requestedPartial = 0
self.file = None
self.filenames = []
self.dir = dir
client.HTTPClientFactory.__init__(self, url, method=method,
postdata=postdata, headers=headers, agent=agent)
self.deferred = defer.Deferred()
self.waiting = 1
def gotHeaders(self, headers):
if self.requestedPartial:
contentRange = headers.get("content-range", None)
if not contentRange:
# server doesn't support partial requests, oh well
self.requestedPartial = 0
return
start, end, realLength = http.parseContentRange(contentRange[0])
if start != self.requestedPartial:
# server is acting wierdly
self.requestedPartial = 0
def openFile(self, partialContent):
if partialContent:
file = open(self.filename, 'rb+')
file.seek(0, 2)
else:
file = open(self.filename, 'wb')
self.filenames.append(self.filename)
return file
def pageStart(self, partialContent):
"""Called on page download start.
@param partialContent: tells us if the download is partial download we
requested.
"""
if partialContent and not self.requestedPartial:
raise ValueError, "we shouldn't get partial content response if"\
" we didn't want it!"
self.partialContent = partialContent
if self.waiting:
self.waiting = 0
self.inSubHeader = True
self.file = None
self.boundary = None
def getSubHeader(self, data):
newboundary = data[:data.find('\r\n')]
data = data[len(newboundary)+2:]
if not self.boundary:
self.boundary = newboundary
if self.boundary != newboundary:
if self.boundary+"--" == newboundary:
# end of multiparts
return
else:
raise ValueError, "found illegal boundary"
# XXX: print some of newboundary *safely*
#raise ValueError, "found illegal boundary: %s, was %s" \
# % (newboundary[:80], self.boundary)
headerEnd = data.find('\r\n\r\n')
if headerEnd != -1:
self.inSubHeader = False
self.subHeaders = {}
headers = data[:headerEnd].split('\r\n')
for header in headers:
k, v = header.split(':',1)
self.subHeaders[k.lower()] = v.lstrip(' ')
if not self.subHeaders.has_key('content-id'):
raise ValueError, "no Content-ID field in multipart,"\
" can't continue"
# XXX: need to check for badness (e.g, "../../) in content-id
self.filename = os.path.join(self.dir,
self.subHeaders['content-id'])
self.file = self.openFile(self.partialContent)
if not self.subHeaders.has_key('content-length'):
raise ValueError, "no Content-Length field in multipart,"\
" can't continue"
self.filesizeRemaining = int(self.subHeaders['content-length'])
self.pagePart(data[headerEnd+4:])
def pagePart(self, data):
if self.inSubHeader:
self.getSubHeader(data)
else:
if not self.file:
raise ValueError, "file %s not open for output" % self.filename
try:
if self.filesizeRemaining > len(data):
self.file.write(data)
self.filesizeRemaining -= len(data)
else:
self.file.write(data[:self.filesizeRemaining])
skipto = self.filesizeRemaining
self.filesizeRemaining = 0
self.file.close()
self.file = None
self.inSubHeader = True
self.getSubHeader(data[skipto+2:])
except IOError:
#raise
self.file = None
self.deferred.errback(failure.Failure())
def pageEnd(self):
if self.file:
try:
self.file.close()
except IOError:
self.deferred.errback(failure.Failure())
return
self.deferred.callback(self.filenames)
def doit():
factory = HTTPMultipartDownloader("/ret", "/tmp/")
reactor.connectTCP('localhost', 1080, factory)
return factory.deferred
def didit(r):
print "didit: %s" % str(r)
reactor.stop()
if __name__ == "__main__":
# tries to request http://localhost:1080/ret, which it expects to be
# multipart/related with Content-Length headers
d = doit()
d.addBoth(didit)
reactor.run()
| Python |
#!/usr/bin/python
"""
FludClient.py, (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), version 3.
FludClient provides a GUI Client for interacting with FludNode.
"""
#from twisted.internet import wxreactor
#wxreactor.install()
import sys, os, string, time, glob
import wx
import wx.lib.mixins.listctrl as listmix
import wx.lib.editor.editor
from flud.protocol.LocalClient import *
from flud.FludConfig import FludConfig
from flud.CheckboxState import CheckboxState
FLUSHCHECKTIME = 5*60 # s to wait to flush fludfile.conf
datadir = os.path.dirname(os.path.abspath(__file__))
imgdir = os.path.join(datadir,'images')
mimeMgr = wx.MimeTypesManager()
def getFileIcon(file, il, checkboxes, icondict):
ft = mimeMgr.GetFileTypeFromExtension(file[file.rfind('.')+1:])
# XXX: what about from mimetype or magic?
if ft == None:
return icondict['generic']
else:
desc = ft.GetDescription()
if icondict.has_key(desc):
return icondict[desc]
else:
icon = ft.GetIcon()
if icon == None or not icon.Ok():
#print "couldn't find an icon image for %s" % file
icondict[desc] = icondict['generic']
return icondict[desc]
bm = wx.BitmapFromIcon(icon)
newimages = makeCheckboxBitmaps(bm, checkboxes)
#il = self.GetImageList()
pos = il.GetImageCount()
for i in newimages:
il.Add(i)
icondict[desc] = pos
#print "%s got a %s image" % (file, ft.GetDescription())
return pos
def getEmptyBitmapAndDC(width, height):
empty = wx.EmptyBitmap(width,height)
temp_dc = wx.MemoryDC()
temp_dc.SelectObject(empty)
temp_dc.Clear()
return (empty, temp_dc)
def makeCheckboxBitmaps(basebitmap, checkboxes):
if basebitmap.GetWidth() != 16 or basebitmap.GetHeight() != 16:
img = basebitmap.ConvertToImage()
img.Rescale(16, 16)
basebitmap = img.ConvertToBitmap()
result = []
for i in checkboxes:
bm, dc = getEmptyBitmapAndDC(40,16)
dc.DrawBitmap(basebitmap, 0, 0, False)
dc.DrawBitmap(i, 20, 2, False)
result.append(bm)
return result
def createDefaultImageList():
def getDefaultCheckboxes():
ucbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-unchecked1.png")))
cbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-checked1.png")))
ccbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-checkedpartial1.png")))
cpbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-parentchecked1.png")))
ebm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-excluded1.png")))
ecbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-excludedpartial1.png")))
return (ucbm, cbm, ccbm, cpbm, ebm, ecbm)
checkboxes = getDefaultCheckboxes()
il = wx.ImageList(40, 16)
folderimgs = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_FOLDER, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
computer = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_HARDDISK, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
drives = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_HARDDISK, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
cdrom = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_CDROM, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
floppy = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_FLOPPY, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
removable = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_REMOVABLE, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
genericfile = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_NORMAL_FILE, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
execfile = makeCheckboxBitmaps(wx.ArtProvider_GetBitmap(
wx.ART_EXECUTABLE_FILE, wx.ART_CMN_DIALOG, wx.Size(16, 16)), checkboxes)
j = 0
icondict = {}
icondict['folder'] = j
for i in folderimgs:
il.Add(i)
j = j+1
icondict['computer'] = j
for i in computer:
il.Add(i)
j = j+1
icondict['drives'] = j
for i in drives:
il.Add(i)
j = j+1
icondict['cdrom'] = j
for i in cdrom:
il.Add(i)
j = j+1
icondict['floppy'] = j
for i in floppy:
il.Add(i)
j = j+1
icondict['removable'] = j
for i in removable:
il.Add(i)
j = j+1
icondict['generic'] = j
for i in genericfile:
il.Add(i)
j = j+1
icondict['exec'] = j
for i in execfile:
il.Add(i)
j = j+1
return il, checkboxes, icondict
class DirCheckboxCtrl(wx.TreeCtrl):
def __init__(self, parent, id=-1, dir=None, pos=wx.DefaultPosition,
size=wx.Size(300,300), #wx.DefaultSize,
style=(wx.TR_MULTIPLE
| wx.TR_HAS_BUTTONS
| wx.TR_TWIST_BUTTONS
| wx.TR_NO_LINES
| wx.TR_FULL_ROW_HIGHLIGHT
| wx.SUNKEN_BORDER),
validator=wx.DefaultValidator, name=wx.ControlNameStr,
allowExclude=True):
self.allowExclude = allowExclude
wx.TreeCtrl.__init__(self, parent, id, pos, size, style, validator,
name)
self.listeners = []
self.parent = parent
#self.il = self.GetImageList()
#self.checkboxes = self.getDefaultCheckboxes()
self.initTree(dir)
self.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.onExpand, self)
self.Bind(wx.EVT_LEFT_UP, self.onClick, self)
self.Bind(wx.EVT_TREE_ITEM_GETTOOLTIP, self.onTooltip, self)
self.Bind(wx.EVT_CHAR, self.onChar)
def initTree(self, dir):
self.expandRoot(dir)
# XXX: should expandHome() on first run, then load expanded dir state
# from saved state on subsequent runs.
self.expandHome(dir)
def expandRoot(self, dir):
if not os.path.isdir(dir):
raise ValueError("%s is not a valid directory path")
self.defaultImageList, self.checkboxes, self.icondict \
= createDefaultImageList()
self.AssignImageList(self.defaultImageList)
self.il = self.GetImageList()
if dir == None:
self.rootID = self.AddRoot(dir, self.icondict['computer'], -1,
wx.TreeItemData((dir, True, False,
CheckboxState.UNSELECTED)))
# XXX: getTopLevelDirs() and add them as children
else:
self.rootID = self.AddRoot(dir, self.icondict['folder'], -1,
wx.TreeItemData((dir, True, False,
CheckboxState.UNSELECTED)))
self.expandDir(self.rootID)
self.Expand(self.rootID)
self.stateChangeTime = time.time()
self.flushTime = time.time()
reactor.callLater(FLUSHCHECKTIME, self.checkFlush)
def expandHome(self, dir):
home = os.environ['HOME']
if home:
traversal = home.split(os.path.sep)[1:]
node = self.rootID
for d in traversal:
(ipath, isdir, expanded, istate) \
= self.GetItemData(node).GetData()
self.expandDir(node)
children = self.getChildren(node, False)
childrennames = [self.GetItemText(x) for x in children]
if d in childrennames:
p = childrennames.index(d)
node = children[p]
self.expandDir(node)
self.Expand(node)
else:
print "couldn't traverse to HOME dir on %s" % d
break
def checkFlush(self):
print "checking for flush"
if self.stateChangeTime > self.flushTime:
self.flushTime = time.time()
print "flushing"
self.parent.flushFileConfig()
reactor.callLater(FLUSHCHECKTIME, self.checkFlush)
def expandDir(self, parentID, hideHidden=False, busycursor=True):
def isDriveAvailable(path):
if len(path) == 2 and path[1] == ':':
path = path.lower()
if path[0] == 'a' or path[0] == 'b' or diExists(path):
return True
else:
return False
return True
(path, isDir, expanded, state) = self.GetItemData(parentID).GetData()
if expanded:
return
if not isDriveAvailable(path):
return
if busycursor: wx.BusyCursor()
try:
dirlist = os.listdir(path)
except:
self.SetItemHasChildren(parentID, False)
return
if len(dirlist) == 0:
self.SetItemHasChildren(parentID, False)
return
dirs = []
files = []
for i in dirlist:
if hideHidden and i[0] == '.':
# XXX: dotfile format check is *nix specific
# XXX: if this is a hidden file, don't add it.
pass
elif os.path.isdir(os.path.join(path,i)):
dirs.append(i)
else:
files.append(i)
dirs.sort()
files.sort()
for d in dirs:
child = self.AppendItem(parentID, d)
self.SetPyData(child, (os.path.join(path,d), True, False, 0))
self.SetItemImage(child, self.icondict['folder'],
wx.TreeItemIcon_Normal)
self.SetItemHasChildren(child)
il = self.GetImageList()
for f in files:
child = self.AppendItem(parentID, f) # XXX: unicode?
self.SetPyData(child, (os.path.join(path,f), False, False, 0))
idx = getFileIcon(os.path.join(path,f), il, self.checkboxes,
self.icondict)
self.SetItemImage(child, idx, wx.TreeItemIcon_Normal)
self.SetPyData(parentID, (path, isDir, True, state))
def getStates(self, node=None):
if not node:
node = self.rootID
states = {}
(path, isDir, expanded, state) = self.GetItemData(node).GetData()
if state in [CheckboxState.SELECTED, CheckboxState.EXCLUDED]:
states[path] = state
children = self.getChildren(node, False)
for child in children:
states.update(self.getStates(child))
return states
def setStates(self, states):
for i in states:
found = self.findNode(i)
if found:
self.setItemState(found, states[i])
def findNode(self, path):
if path[0] == '/':
path = path[1:] # XXX: unix only
traversal = path.split(os.path.sep)
if traversal[0] == '':
traversal.remove('')
node = self.rootID
while True:
(ipath, isdir, expanded, istate) = self.GetItemData(node).GetData()
if len(traversal) == 0:
return node
self.expandDir(node)
children = self.getChildren(node, False)
childrennames = [self.GetItemText(x) for x in children]
firstpath = traversal[0]
if firstpath in childrennames:
p = childrennames.index(firstpath)
node = children[p]
traversal.remove(firstpath)
else:
#print " the file %s is no longer present!" % path
return None
return None
def onExpand(self, event):
self.expandDir(event.GetItem())
self.renderChildren(event.GetItem(), True)
def getFullPath(self, node):
path = self.tree.GetItemText(node)
n = node
while True:
n = self.tree.GetItemParent(n)
if n and n != self.GetRootItem():
path = os.path.join(self.tree.GetItemText(n),path)
else:
break
return path
def renderParents(self, item):
if item == self.rootID:
return
n = item
(path, isDir, expanded, state) = self.GetItemData(item).GetData()
while True:
n = self.GetItemParent(n)
(parentpath, parentisDir, parentexpanded,
parentstate) = self.GetItemData(n).GetData()
#print "parent %s" % parentpath
if n and n != self.GetRootItem():
newstate = parentstate
if parentstate != CheckboxState.UNSELECTED and \
parentstate != CheckboxState.SELECTEDPARENT:
# we only care about changing UNSELECT or SELECTEDPARENT
# states
break
else:
if state == CheckboxState.SELECTED or \
state == CheckboxState.SELECTEDCHILD or \
state == CheckboxState.SELECTEDPARENT:
# if the item (child) is selected in any way, parent
# should be too.
newstate = CheckboxState.SELECTEDPARENT
elif state == CheckboxState.UNSELECTED or \
state == CheckboxState.EXCLUDED:
# if the item (child) is unselected or excluded, the
# parent should be too, /unless/ there are other
# children at the same level who are selected.
children = self.getChildren(n, False)
newstate = CheckboxState.UNSELECTED
for child in children:
(cpath, cisdir, cexp,
cstate) = self.GetItemData(child).GetData()
if cstate == CheckboxState.SELECTED or \
cstate == CheckboxState.SELECTEDCHILD or \
cstate == CheckboxState.SELECTEDPARENT:
newstate = parentstate
if newstate == parentstate:
break
imageidx = self.GetItemImage(n)
imageidx += CheckboxState.offset(parentstate, newstate)
self.SetPyData(n, (parentpath, parentisDir,
parentexpanded, newstate))
self.SetItemImage(n, imageidx)
else:
break
def renderChildren(self, parent, recurse=False):
(parentpath, parentisDir, parentexpanded,
parentstate) = self.GetItemData(parent).GetData()
children = self.getChildren(parent, False)
for child in children:
#path = self.getFullPath(child)
(path, isDir, expanded, state) = self.GetItemData(child).GetData()
imageidx = self.GetItemImage(child)
newstate = state
"""
Here are the state transitions for children based on current states:
('-' = no state change, 'x' = should never occur, '!' = should be
prevented at the parent, '?' = need to consult children)
child
unsel sel selch selpar excl exclch
unsel - ! unsel x - unsel
sel selch - - selch - selch
par selch selch - - selch - selch
selpar x x unsl?selpr x x x
excl exlch ! exlch ! - -
exclch exlch - exlch ! - -
"""
#if parentpath == '/data':
# print "/data pstate = %d" % parentstate
# print " %s = %d" % (path, state)
if state == CheckboxState.UNSELECTED:
if parentstate == CheckboxState.SELECTED or \
parentstate == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTEDCHILD
elif parentstate == CheckboxState.EXCLUDED or \
parentstate == CheckboxState.EXCLUDEDCHILD:
newstate = CheckboxState.EXCLUDEDCHILD
elif state == CheckboxState.SELECTEDCHILD:
if parentstate == CheckboxState.UNSELECTED:
newstate = CheckboxState.UNSELECTED
elif parentstate == CheckboxState.SELECTEDPARENT:
if self.checkChildrenStates(child, [CheckboxState.SELECTED,
CheckboxState.SELECTEDPARENT]):
# XXX: did we need to pass in selections to checkChldSt
newstate = CheckboxState.SELECTEDPARENT
else:
newstate = CheckboxState.UNSELECTED
elif parentstate == CheckboxState.EXCLUDED or \
parentstate == CheckboxState.EXCLUDEDCHILD:
newstate = CheckboxState.EXCLUDEDCHILD
elif state == CheckboxState.SELECTEDPARENT:
if parentstate == CheckboxState.SELECTED or \
parentstate == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTEDCHILD
elif state == CheckboxState.EXCLUDEDCHILD:
if parentstate == CheckboxState.UNSELECTED:
newstate = CheckboxState.UNSELECTED
elif parentstate == CheckboxState.SELECTED or \
parentstate == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTEDCHILD
imageidx += CheckboxState.offset(state, newstate)
self.SetPyData(child, (path, isDir, expanded, newstate))
self.SetItemImage(child, imageidx)
if recurse:
self.renderChildren(child, recurse)
# XXX: why do we renderParents here? It hits the same
# 'parent's over and over and over again. If we want to do
# this, we need to 'collect up' the parents and just call once
# -- this kills performance.
#print "renderParents(%s)" % path
#self.renderParents(child)
def getChildren(self, node, recurse=False):
result = []
child, cookie = self.GetFirstChild(node)
while child:
result.append(child)
if recurse:
result.extend(self.getChildren(child, recurse))
child, cookie = self.GetNextChild(node, cookie)
return result
def checkChildrenStates(self, node, states, ignorelist=[]):
children = self.getChildren(node)
for child in children:
if child not in ignorelist:
(p, d, e, childstate) = self.GetItemData(child).GetData()
for state in states:
if state == childstate:
#print "%s has state %d" % (p, state)
return True
if self.checkChildrenStates(child, states, ignorelist):
# do this even if it is in ignorelist, because it may have
# children which are not in the ignorelist
return True
return False
def getTooltip(self, item):
text = self.GetItemText(item)
(path, isDir, expanded, state) = self.GetItemData(item).GetData()
if state == CheckboxState.SELECTED:
if isDir:
text = "'%s' is SELECTED for backup\n" \
"ALL files within this folder will be backed up\n" \
"(except those explicitly marked for exclusion)" % text
else:
text = "'%s' is SELECTED for backup" % text
elif state == CheckboxState.UNSELECTED:
text = "'%s' is NOT selected for backup" % text
elif state == CheckboxState.SELECTEDPARENT:
text = "some files within '%s' are selected for backup" % text
elif state == CheckboxState.SELECTEDCHILD:
text = "'%s' will be backed up\n" \
"(one of its parent folders is selected)" % text
elif state == CheckboxState.EXCLUDED:
if isDir:
text = "'%s' is EXCLUDED from backup\n" \
"No files within this folder will be backed up" % text
else:
text = "'%s' is EXCLUDED from backup" % text
elif state == CheckboxState.EXCLUDEDCHILD:
text = "'%s' is EXCLUDED from backup\n" \
"(one of its parent folders is EXCLUDED)" % text
return text
def onTooltip(self, event):
item = event.GetItem()
text = self.getTooltip(item)
if text:
event.SetToolTip(text)
else:
event.StopPropagation()
#print dir(event)
def onClick(self, event):
point = (event.GetX(), event.GetY())
item, flags = self.HitTest(point)
if flags & wx.TREE_HITTEST_ONITEMICON:
selections = self.GetSelections()
self.changeState(item, selections)
def onChar(self, event):
if event.KeyCode() == ord('F') and event.ShiftDown() \
and event.ControlDown():
self.flushTime = time.time()
print "flushing"
self.parent.flushFileConfig()
event.Skip()
def changeState(self, item, selections=[]):
self.stateChangeTime = time.time()
(path, isDir, expanded, state) = self.GetItemData(item).GetData()
if item == self.rootID:
parent = None
parentstate = CheckboxState.UNSELECTED
else:
parent = self.GetItemParent(item)
(parentpath, parentisDir, parentexpanded,
parentstate) = self.GetItemData(parent).GetData()
imageidx = self.GetItemImage(item)
# determine newstate from existing state, parent state, and state
# of children
"""
Here are the state transitions for the item based on current
states and parent states: ('-' = no state change, 'x' = should
never occur, '?' = depends on children state)
item
unsel sel selch selpar excl exclch
unsel sel excl sel sel unsel excl
sel sel excl?selpar sel x selch excl
par selch x excl sel sel selch excl
selpar sel excl x sel unsel excl
excl x excl x exclch exclch excl
exclch x excl x exclch exclch excl
"""
newstate = state
if state == CheckboxState.UNSELECTED:
newstate = CheckboxState.SELECTED
elif state == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTED
elif state == CheckboxState.SELECTEDPARENT:
if parentstate == CheckboxState.EXCLUDED or \
parentstate == CheckboxState.EXCLUDEDCHILD:
# XXX: this should be impossible to reach...
newstate = CheckboxState.EXCLUDEDCHILD
else:
newstate = CheckboxState.SELECTED
elif state == CheckboxState.SELECTED:
if self.checkChildrenStates(item, [CheckboxState.SELECTED,
CheckboxState.SELECTEDPARENT], selections):
newstate = CheckboxState.SELECTEDPARENT
elif self.allowExclude:
newstate = CheckboxState.EXCLUDED
else:
if parent in selections or \
(parentstate == CheckboxState.UNSELECTED or \
parentstate == CheckboxState.SELECTEDPARENT):
newstate = CheckboxState.UNSELECTED
elif parentstate == CheckboxState.SELECTED or \
parentstate == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTEDCHILD
elif state == CheckboxState.EXCLUDED:
if parent in selections or \
(parentstate == CheckboxState.UNSELECTED or \
parentstate == CheckboxState.SELECTEDPARENT):
newstate = CheckboxState.UNSELECTED
elif parentstate == CheckboxState.SELECTED or \
parentstate == CheckboxState.SELECTEDCHILD:
newstate = CheckboxState.SELECTEDCHILD
else:
newstate = CheckboxState.EXCLUDEDCHILD
elif state == CheckboxState.EXCLUDEDCHILD:
newstate = CheckboxState.EXCLUDED
if len(selections) > 1:
# if we have multiple selections, the idea is to move all the
# selections to the newstate defined above, or to valid
# unselected or inherited states if the move to newstate would
# be invalid.
"""
Here are the state transitions for the item based on the
newstate as determined by the clicked item and the current
states: ('-' = no state change, '?' = consult children)
item
unsel sel selch selpar excl exclch
unsel - unsel - - unsel -
sel sel - sel sel sel -
newstate selch - unsel - - unsel -
selpar - unsel - - unsel -
excl excl excl?slpr excl excl - excl
exclch - unsel - - unsel -
"""
for i in selections:
(mpath, misDir, mexpanded, mstate) = self.GetItemData(
i).GetData()
mnewstate = mstate
if mstate == CheckboxState.UNSELECTED or \
mstate == CheckboxState.SELECTEDCHILD or \
mstate == CheckboxState.SELECTEDPARENT:
if newstate == CheckboxState.SELECTED or \
newstate == CheckboxState.EXCLUDED:
mnewstate = newstate
elif mstate == CheckboxState.SELECTED:
if newstate == CheckboxState.UNSELECTED or \
newstate == CheckboxState.SELECTEDCHILD or \
newstate == CheckboxState.SELECTEDPARENT or \
newstate == CheckboxState.EXCLUDEDCHILD:
mnewstate = CheckboxState.UNSELECTED
elif newstate == CheckboxState.EXCLUDED:
if self.checkChildrenStates(i,
[CheckboxState.SELECTED,
CheckboxState.SELECTEDPARENT], selections):
mnewstate = CheckboxState.SELECTEDPARENT
else:
mnewstate = newstate
elif mstate == CheckboxState.EXCLUDED:
if newstate == CheckboxState.UNSELECTED or \
newstate == CheckboxState.SELECTEDCHILD or \
newstate == CheckboxState.SELECTEDPARENT or \
newstate == CheckboxState.EXCLUDEDCHILD:
mnewstate = CheckboxState.UNSELECTED
elif newstate == CheckboxState.SELECTED:
mnewstate = newstate
elif mstate == CheckboxState.EXCLUDEDCHILD:
if newstate == CheckboxState.EXCLUDED:
mnewstate = newstate
self.setItemState(i, mnewstate)
self.setItemState(item, newstate, (path, isDir, expanded, state,
imageidx))
def setItemState(self, item, newstate, oldData=None):
if oldData:
(path, isDir, expanded, state, imageidx) = oldData
else:
(path, isDir, expanded, state) = self.GetItemData(item).GetData()
imageidx = self.GetItemImage(item)
imageidx += CheckboxState.offset(state, newstate)
self.SetPyData(item, (path, isDir, expanded, newstate))
self.SetItemImage(item, imageidx)
self.renderChildren(item, True)
self.renderParents(item)
def getTopLevelDrives(self):
sys = platform.system()
if sys == 'Windows':
# XXX: need to test this all out
import win32api, string
drives = win32api.GetLogicalDriveStrings()
driveletters = string.splitfields(drives,'\000')
for d in driveletters:
type = win32api.GetDriveType("%s:\\" % d)
# XXX: set the appropriate icon
return driveletters
else: # Unix, OSX, etc.
return ['/']
def addListener(self, callback):
self.listeners.append(callback)
def SetPyData(self, item, data):
wx.TreeCtrl.SetPyData(self, item, data)
for f in self.listeners:
f(item, data)
"""
Tests for DirCheckboxCtrl
A number of unit tests must be performed on the DirCheckboxGUI widget when
refactoring. Add to this list so that it becomes comprehensive.
Basic Tests:
1. Click on a top-level UNSELECTED object in the tree [should become SELECTED].
- Click again [should become EXCLUDED].
- Click again [should become UNSELECTED].
2. Click on a non-top-level UNSELECTED object in the tree that has no SELECTED
children [should become SELECTED, it's parents should become SELECTEDPARENT and
its children SELECTEDCHILD].
- Click again [should become EXCLUDED, it's parents who were SELECTEDPARENT
should become UNSELECTED, and it's UNSELECTED children should become
EXCLUDED].
- Click again [should become UNSELECTED, and it's children should become
UNSELECTED].
3. Change two children to their SELECTED state [parents should be in
SELECTEDPARENT state].
- Click one child to become EXCLUDED [parents should stay in SELECTEDPARENT]
- Click the same child to become UNSELECTED [parents should stay in
SELECTEDPARENT]
- Click the other child to become EXCLUDED [parents should become
UNSELECTED]
4. Choose a folder and a child item.
- Click the child to become SEL [parent should be SELPAR]
- Click the parent [parent should become SEL]
- Click the parent again [parent should become SELPAR]
5. Choose a folder and a child item.
- Click the parent to become SEL [child should become SELCHILD]
- Click the child [child should become SEL]
- Click the child again [child should become EXCL]
- Click the child again [child should become SELCHILD]
6. Pick a node with children at least two-deep. Change two of the
at-least-two-deep children to their SELECTED state [parents should be in
SELECTEDPARENT state].
- Click parent closest to SELECTED children to SELECTED [two childen remain
in SELECTED, all other children become SELECTEDCHILD. Parent[s] of parent
remain SELECTEDPARENT]
- Click one child twice to become SELECTEDCHILD [child should not be able to
be UNSELECTED, parent states should not change]
- Click other child twice to become SELECTEDCHILD [child should not be able
to be UNSELECTED, parent states should not change]
7. Pick a node with children at least two-deep.
- Click deepest parent to SELECTED [Parent[s] of parent become
SELECTEDPARENT]
- Click same parent again to become EXCLUDED [Parent[s] of parent become
UNSELECTED]
- Click same parent again to become UNSELECTED [Parent[s] of parent remain
UNSELECTED]
8. Pick a node with children at least two-deep.
- Click deepest child to become SELECTED [Parent[s] of parent become
SELECTEDPARENT]
- Click the topmost parent to become SELECTED [children become
SELECTEDCHILD]
- Click the topmost parent again to become SELECTEDPARENT [middle child
should become SELECTEDPARENT]
Multi-Selection Tests:
1. Multi-select three items at the same level and in the same state. Toggle
between the three main states [SELECTED, EXCLUDED, UNSELECTED]
2. Multi-select three items at the same level, one in each of the three states
(SEL, EXCL, UNSEL). Toggle the SEL item to see that all three items become
EXCL.
3. Multi-select three items at the same level, one in each of the three states
(SEL, EXCL, UNSEL). Toggle the EXCL item to see that all three items become
UNSEL.
4. Multi-select three items at the same level, one in each of the three states
(SEL, EXCL, UNSEL). Toggle the UNSEL item to see that all three items become
SEL.
5. Choose three items that are nested within each other: a parent folder, one
of its children folders, and a file/folder in the child folder. Choose one
other item from the child folder.
- set the top parent to UNSEL
- set the child folder to SEL [parent become SELPAR]
- set the child item to SEL
- set the other item to EXCL
- multi-select all four items
- 5A. click on the top parent (which was in SELPAR) [All four items should
become SEL, all children of any of these items should become SELCHILD].
Toggle twice more [all selected items should toggle to EXCL, then to
UNSEL]
- 5B. reset as above, click on the child folder [All four items should
become EXCL]. Toggle twice more [all selected items should go to UNSEL,
then SEL]
- 5C. reset as above, click on the child item [All four items should
become EXCL]. Toggle twice more [all selected items should go to UNSEL,
then SEL]
- 5D. reset as above, click on the other item [All four items should
become UNSEL]. Toggle twice more [all selected items should go to SEL,
then EXCL]
6. Choose a folder, one if its subfolders, a subfolder of the subfolder, and an item in the deepest subfolder, and an item in the first subfolder, e.g.:
[] A
[] B
[] C
[] D
[] E
- change item 'D' to SEL [parents 'A', 'B', and 'C' should go to SELPAR]
- change item 'E' to EXCL
- multi-select 'A', 'C', and 'E'
- toggle 'E' to UNSEL [all other selections should stay in current state]
- toggle 'E' to SEL ['A' and 'B' become SEL, their children become SELCHILD]
- toggle 'E' back to EXCL [should get our original multi-select setup back]
- toggle 'C' to SEL [all selections to SEL, children to SELCHILD]
- toggle 'C' to SELPAR ['A' and 'C' to SELPAR, 'E' to UNSEL]
- toggle 'E' twice [should get our original mulit-select setup back]
"""
class CheckFileListCtrlMixin:
# for some insane reason, we can't get EVT_LEFT_DOWN (or _UP) to bind in
# FileListCtrl itself. But we are sneaky and can do it by lots of clever
# hax0ry, like by using this silly mixin.
def __init__(self, toCall):
self.Bind(wx.EVT_LEFT_UP, toCall)
class FileListCtrl(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin,
CheckFileListCtrlMixin):
"""
Implements a file list control, with a peerctrl that contains the
filesystem model. Currently, this peerctrl must implement an
addListener(), changeState(), GetItemData(), expandDir(), GetSelections(),
and GetChildren() API similar to that implemented by DirCheckBoxCtrl.
"""
def __init__(self, parent, peerctrl, id=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.LC_REPORT,
validator=wx.DefaultValidator, name=wx.ListCtrlNameStr):
wx.ListCtrl.__init__(self, parent, id, pos, size, style, validator,
name)
CheckFileListCtrlMixin.__init__(self, self.OnClick)
listmix.ListCtrlAutoWidthMixin.__init__(self)
self.peerctrl = peerctrl
self.peerctrl.addListener(self.itemChanged)
self.itemdict = {} # a dict with filepath as key, containing tuples of
# (index into ListCtrl, reference to peerctrl object)
self.stopsearch = False
self.il, self.checkboxes, self.icondict = createDefaultImageList()
self.AssignImageList(self.il, wx.IMAGE_LIST_SMALL)
self.il = self.GetImageList(wx.IMAGE_LIST_SMALL)
self.InsertColumn(0, "Filename")
self.InsertColumn(1, "Location")
#self.InsertColumn(2, "Last Backup")
#self.SetColumnWidth(0, wx.LIST_AUTOSIZE)
#self.SetColumnWidth(1, -1) #wx.LIST_AUTOSIZE)
self.Bind(wx.EVT_MOTION, self.mouseMotion)
self.searchSourceItems = []
def itemChanged(self, item, data):
(path, isDir, expanded, state) = data
if self.itemdict.has_key(path):
item = self.itemdict[path][0]
image = getFileIcon(path, self.il, self.checkboxes,
self.icondict) + state
self.SetItemImage(item, image)
def GetAll(self, excludeStates=[]):
result = []
start = -1
for i in range(self.GetItemCount()):
item = self.GetNextItem(start, wx.LIST_NEXT_ALL)
# XXX: only append if not in excludeStates
result.append(item)
start = item
return result
def GetSelections(self):
result = []
start = -1
for i in range(self.GetSelectedItemCount()):
item = self.GetNextItem(start, wx.LIST_NEXT_ALL,
wx.LIST_STATE_SELECTED)
result.append(item)
start = item
return result
def GetPeerSelections(self, selections):
result = []
for item in selections:
path = os.path.join(self.GetItem(item,1).GetText(),
self.GetItemText(item))
if self.itemdict.has_key(path):
result.append(self.itemdict[path][1])
return result
def mouseMotion(self, event):
point = event.GetPosition()
item, flags = self.HitTest(point)
if flags == wx.LIST_HITTEST_ONITEMICON:
path = os.path.join(self.GetItem(item,1).GetText(),
self.GetItemText(item))
text = self.peerctrl.getTooltip(self.itemdict[path][1])
tip = wx.ToolTip(text)
self.SetToolTip(tip)
#tipwin = tip.GetWindow()
#tippos = tipwin.GetPosition()
#print "%s vs %s" % (tippos, point)
#tipwin.SetPosition(point)
def OnClick(self, event):
point = event.GetPosition()
item, flags = self.HitTest(point)
if flags == wx.LIST_HITTEST_ONITEMICON:
peerselections = self.GetPeerSelections(self.GetSelections())
path = os.path.join(self.GetItem(item,1).GetText(),
self.GetItemText(item))
ditem = self.itemdict[path][1] # raises if not present
self.peerctrl.changeState(ditem, peerselections)
(path, isDir, expanded, state) \
= self.peerctrl.GetItemData(ditem).GetData()
image = getFileIcon(path, self.il, self.checkboxes,
self.icondict) + state
self.SetItemImage(item, image)
def searchButtonAction(self, event):
selections = self.peerctrl.GetSelections()
if len(selections) == 0:
return ("Please tell me where to search. Select one or more"
" folders in the left-hand panel (hold down SHIFT or"
" CTRL for multiple selection), then click the 'find!'"
" button again.", None)
else:
self.DeleteAllItems()
self.itemdict = {}
b = wx.BusyCursor()
searchSourceItems = []
for i in selections:
self.addResults(i, event.searchstring)
searchSourceItems.append(i)
self.searchSourceItems = [self.peerctrl.GetItemData(s).GetData()[0]
for s in searchSourceItems]
print "sources: %s" % self.searchSourceItems
return ("Search results will appear as files that match your"
" search are found.", None)
return (None, None)
def addResults(self, ditem, searchstring):
(path, isDir, expanded, state) \
= self.peerctrl.GetItemData(ditem).GetData()
position = self.GetItemCount()
if isDir:
if not expanded:
self.peerctrl.expandDir(ditem, busycursor=False)
children = self.peerctrl.getChildren(ditem)
for c in children:
self.addResults(c, searchstring)
wx.Yield()
if self.stopsearch:
break
else:
terms = [x for x in searchstring.split(' ') if x != '']
for term in terms:
print path
if path.find(term) > 0:
image = getFileIcon(path, self.il, self.checkboxes,
self.icondict) + state
dirname, filename = os.path.split(path)
index = self.InsertImageStringItem(position, filename,
image)
self.SetStringItem(index, 1, dirname)
self.SetColumnWidth(0, wx.LIST_AUTOSIZE)
self.itemdict[path] = (index, ditem)
break
def setGroup(self, state):
items = self.GetAll()
item = items[0]
peerselections = self.GetPeerSelections(items)
path = os.path.join(self.GetItem(item,1).GetText(),
self.GetItemText(item))
ditem = self.itemdict[path][1] # raises if not present
while True:
# cycle until the items state matches the desired state
self.peerctrl.changeState(ditem, peerselections) # can be slow
(path, isDir, expanded, nstate) \
= self.peerctrl.GetItemData(ditem).GetData()
if nstate == state:
break
image = getFileIcon(path, self.il, self.checkboxes,
self.icondict) + state
self.SetItemImage(item, image)
return self.searchSourceItems
class GroupSelectionCheckbox(wx.Panel):
def __init__(self, parent, id=-1, setGroupState=None):
wx.Panel.__init__(self, parent, id)
self.setGroupState = setGroupState
self.ubm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-unchecked1.png")))
self.cbm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-checked1.png")))
self.ebm = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"checkbox-excluded1.png")))
self.checkboxButton = wx.BitmapButton(self, -1, self.ubm,
style=wx.NO_BORDER)
self.Bind(wx.EVT_BUTTON, self.onCheckbox, self.checkboxButton)
self.description = wx.StaticText(self, -1,
"always BACKUP any files that match these search criteria ")
self.state = CheckboxState.UNSELECTED
self.gbSizer = wx.GridBagSizer(1,2)
self.gbSizer.Add(self.checkboxButton, (0,0), flag=wx.ALIGN_CENTER)
self.gbSizer.Add(self.description, (0,1), flag=wx.ALIGN_CENTER)
self.gbSizer.AddGrowableRow(1)
self.SetSizerAndFit(self.gbSizer)
def Enable(self, enable=True):
self.checkboxButton.Enable(enable)
self.description.Enable(enable)
def Disable(self):
self.Enable(False)
def clear(self):
self.checkboxButton.SetBitmapLabel(self.ubm)
self.state = CheckboxState.UNSELECTED
self.description.SetLabel(
"always BACKUP any files that match these search criteria")
def setState(self, state):
self.state = state
if self.state == CheckboxState.UNSELECTED:
self.checkboxButton.SetBitmapLabel(self.ubm)
self.description.SetLabel(
"always BACKUP any files that match these search criteria")
elif self.state == CheckboxState.SELECTED:
self.checkboxButton.SetBitmapLabel(self.cbm)
self.description.SetLabel(
"always BACKUP any files that match these search criteria")
elif self.state == CheckboxState.EXCLUDED:
self.checkboxButton.SetBitmapLabel(self.ebm)
self.description.SetLabel(
"always EXCLUDE any files that match these search criteria")
def onCheckbox(self, event):
if self.state == CheckboxState.UNSELECTED:
self.checkboxButton.SetBitmapLabel(self.cbm)
self.state = CheckboxState.SELECTED
if self.setGroupState:
self.setGroupState(CheckboxState.SELECTED)
elif self.state == CheckboxState.SELECTED:
self.checkboxButton.SetBitmapLabel(self.ebm)
self.description.SetLabel(
"always EXCLUDE any files that match these search criteria")
self.state = CheckboxState.EXCLUDED
if self.setGroupState:
self.setGroupState(CheckboxState.EXCLUDED)
elif self.state == CheckboxState.EXCLUDED:
self.checkboxButton.SetBitmapLabel(self.ubm)
self.description.SetLabel(
"always BACKUP any files that match these search criteria")
self.state = CheckboxState.UNSELECTED
if self.setGroupState:
self.setGroupState(CheckboxState.UNSELECTED)
class SearchPanel(wx.Panel):
def __init__(self, parent, dircheckbox, id=-1, searchButtonAction=None):
wx.Panel.__init__(self, parent, id)
self.dircheckbox = dircheckbox
self.searchButtonAction = searchButtonAction
self.SetAutoLayout(False)
self.rules = {} # should refer to something from fludrules
self.searchField = wx.TextCtrl(self, -1,
"search for files to backup here", size=wx.Size(-1,-1),
style=wx.TE_PROCESS_ENTER)
self.searchField.SetToolTipString('find files within directories'
' selected to the left by entering search terms here')
self.searchField.Bind(wx.EVT_TEXT_ENTER, self.onSearchClick)
self.searchField.Bind(wx.EVT_LEFT_DOWN, self.selectAllText)
self.searchField.Bind(wx.EVT_KILL_FOCUS, self.unfocused)
self.searchButton = wx.Button(self, -1, 'find!', name='searchButton')
self.Bind(wx.EVT_BUTTON, self.onSearchClick, self.searchButton)
self.searchResults = FileListCtrl(self, dircheckbox, -1,
name='searchResults', style=wx.SUNKEN_BORDER | wx.LC_REPORT)
self.searchResults.SetExtraStyle(0)
self.searchResults.SetLabel('found files')
self.groupSelection = GroupSelectionCheckbox(self, -1, self.setGroup)
self.groupSelection.Disable()
self.gbSizer = wx.GridBagSizer(3,2)
self.gbSizer.Add(self.searchField, (0,0), flag=wx.EXPAND)
self.gbSizer.Add(self.searchButton, (0,1))
self.gbSizer.Add(self.searchResults, (1,0), (1,2),
flag=wx.EXPAND|wx.TOP, border=5)
self.gbSizer.Add(self.groupSelection, (2,0) )
self.gbSizer.AddGrowableRow(1)
self.gbSizer.AddGrowableCol(0)
self.SetSizerAndFit(self.gbSizer)
def onSearchClick(self, event):
event.searchstring = self.searchField.GetValue()
if self.searchButton.GetLabel() == 'stop!':
self.searchButton.SetLabel('find!')
self.searchResults.stopsearch = True
return
else:
self.groupSelection.clear()
self.groupSelection.Disable()
self.searchButton.SetLabel('stop!')
self.searchButton.Update()
err, info = self.searchResults.searchButtonAction(event)
selections = self.searchResults.searchSourceItems
# see if we should set the checkbox button from a previous rule
state = None
if len(selections) > 0 and self.rules.has_key(selections[0]):
rule = self.rules[selections[0]]
if self.rules[selections[0]].has_key(event.searchstring):
state = self.rules[selections[0]][event.searchstring]
for i in selections:
if not self.rules.has_key(i) or self.rules[i] != rule:
state = None
break
#for j in self.rules[i]:
if state:
print "should restore checkbox to %s" % state
self.groupSelection.setState(state)
self.searchButton.SetLabel('find!')
self.searchResults.stopsearch = False
if self.searchButtonAction:
self.searchButtonAction(event, errmsg=err, infomsg=info)
self.groupSelection.Enable()
def selectAllText(self, event):
if wx.Window.FindFocus() != self.searchField:
self.searchField.SetSelection(-1,-1)
self.searchField.SetFocus()
else:
self.searchField.SetSelection(0,0)
event.Skip()
def unfocused(self, event):
self.searchField.SetSelection(0,0)
def setGroup(self, state):
b = wx.BusyCursor()
selections = self.searchResults.setGroup(state)
for s in selections:
if not self.rules.has_key(s):
self.rules[s] = {}
if state == CheckboxState.UNSELECTED:
try:
self.rules.pop(s)
except:
pass
else:
self.rules[s][self.searchField.GetValue()] = state
print self.rules
class FilePanel(wx.SplitterWindow):
def __init__(self, parent, searchButtonAction=None):
# Use the WANTS_CHARS style so the panel doesn't eat the Return key.
wx.SplitterWindow.__init__(self, parent, -1,
style=wx.SP_LIVE_UPDATE | wx.CLIP_CHILDREN | wx.WANTS_CHARS)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.SetNeedUpdating(True)
print self.GetSize()
self.tree = DirCheckboxCtrl(self, -1, dir="/")
# XXX: fludrules.init path should be in config
self.fludrules = self.getFludHome()+"/fludrules.init"
if not os.path.isfile(self.fludrules):
# XXX: do the other first time stuff (email encrypted credentials,
# etc.)
parent.SetMessage("Welcome. This appears to be the first"
" time you've run flud. We've automatically selected some"
" files for backup. You can make changes by"
" selecting/deselecting files and directories. When you are"
" done, simply close this window.")
src = open(os.path.join(datadir,'fludrules.init'), 'r')
dst = open(self.fludrules, 'w')
filerules = src.read()
dst.write(filerules)
dst.close()
src.close()
filerules = eval(filerules)
rulestates = {}
for rule in filerules['baserules']:
value = filerules['baserules'][rule]
rule = glob.glob(os.path.expandvars(rule))
for r in rule:
rulestates[r] = value
self.tree.setStates(rulestates)
# XXX: fludfile.conf path should be in config
self.fludfiles = self.getFludHome()+"/fludfile.conf"
print self.fludfiles
if os.path.isfile(self.fludfiles):
file = open(self.fludfiles, 'r')
states = eval(file.read())
self.tree.setStates(states)
file.close()
self.searchPanel = SearchPanel(self, dircheckbox=self.tree,
searchButtonAction=searchButtonAction)
self.SetMinimumPaneSize(50)
self.SplitVertically(self.tree, self.searchPanel) #, 300)
print self.GetSize()
def getFludHome(self):
if os.environ.has_key('FLUDHOME'):
fludhome = os.environ['FLUDHOME']
else:
fludhome = os.environ['HOME']+"/.flud"
if not os.path.isdir(fludhome):
os.mkdir(fludhome, 0700)
return fludhome
def shutdown(self, event):
self.flushFileConfig()
event.Skip()
def flushFileConfig(self):
states = self.tree.getStates()
f = open(self.fludfiles, 'w')
f.write(str(states))
f.close()
for i in states:
print "%s %s" % (i, states[i])
def OnSize(self, event):
w,h = self.GetClientSizeTuple()
if self.tree:
self.tree.SetDimensions(0, 0, w, h)
event.Skip()
class RestoreCheckboxCtrl(DirCheckboxCtrl):
# XXX: child/parent selection/deselection isn't quite right still, esp wrt
# root node. repro:
# -/
# -d1
# -f1
# -d2
# -d3
# -f2
# -f3
# with nothing selected, select d3 and f3, then select root, then deselect
# d3 and f3
def __init__(self, parent, id=-1, config=None, pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=(wx.TR_MULTIPLE
| wx.TR_HAS_BUTTONS
| wx.TR_TWIST_BUTTONS
| wx.TR_NO_LINES
| wx.TR_FULL_ROW_HIGHLIGHT
| wx.SUNKEN_BORDER),
validator=wx.DefaultValidator, name=wx.ControlNameStr):
self.config = config
DirCheckboxCtrl.__init__(self, parent, id, config, pos, size, style,
validator, name, allowExclude=False)
def initTree(self, config):
self.expandRoot(config)
self.expandUntilMultiple()
def expandRoot(self, config):
self.defaultImageList, self.checkboxes, self.icondict \
= createDefaultImageList()
self.AssignImageList(self.defaultImageList)
self.il = self.GetImageList()
self.rootID = self.AddRoot("/", self.icondict['computer'], -1,
wx.TreeItemData(("", True, False, CheckboxState.UNSELECTED)))
self.update()
def expandUntilMultiple(self):
node = self.rootID
while True:
(ipath, isdir, expanded, istate) = self.GetItemData(node).GetData()
children = self.getChildren(node, False)
if len(children) > 1 or len(children) == 0:
break;
node = children[0]
self.Expand(node)
def update(self):
master = listMeta(self.config)
for i in master:
if not isinstance(master[i], dict):
traversal = i.split(os.path.sep)
node = self.rootID
path = "/"
if traversal[0] == '':
traversal.remove('')
for n in traversal:
path = os.path.join(path, n)
children = self.getChildrenDict(node)
if n == traversal[-1] and not n in children:
child = self.AppendItem(node, n)
self.SetPyData(child, (path, False, False, 0))
idx = getFileIcon(i, self.il, self.checkboxes,
self.icondict)
self.SetItemImage(child, idx, wx.TreeItemIcon_Normal)
else:
if not n in children:
child = self.AppendItem(node, n)
self.SetPyData(child, (path, True, False, 0))
self.SetItemImage(child, self.icondict['folder'],
wx.TreeItemIcon_Normal)
else:
child = children[n]
node = child
self.Expand(self.rootID)
def getChildrenDict(self, node):
result = {}
child, cookie = self.GetFirstChild(node)
while child:
result[self.GetItemText(child)] = child
child, cookie = self.GetNextChild(node, cookie)
return result
def onExpand(self, event):
pass
def getSelected(self, startNode=None):
if not startNode:
startNode = self.rootID
children = self.getChildren(startNode)
selected = []
for n in children:
(path, isDir, expanded, state) = self.GetItemData(n).GetData()
if not isDir \
and (state == CheckboxState.SELECTED \
or state == CheckboxState.SELECTEDCHILD):
selected.append(n)
if isDir and (state == CheckboxState.SELECTED \
or state == CheckboxState.SELECTEDPARENT \
or state == CheckboxState.SELECTEDCHILD):
selected += self.getSelected(n)
return selected
class RestorePanel(wx.Panel):
def __init__(self, parent, config, factory):
self.config = config
self.factory = factory
wx.Panel.__init__(self, parent, -1)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.tree = RestoreCheckboxCtrl(self, -1, config, #wx.TreeCtrl(self, -1,
style=(wx.TR_MULTIPLE
| wx.TR_HAS_BUTTONS
| wx.TR_TWIST_BUTTONS
| wx.TR_NO_LINES
| wx.TR_FULL_ROW_HIGHLIGHT
| wx.SUNKEN_BORDER))
self.restoreButton = wx.Button(self, -1, 'restore selected files',
name='restoreButton')
self.Bind(wx.EVT_BUTTON, self.onRestoreClick, self.restoreButton)
self.gbSizer = wx.GridBagSizer(2,1)
self.gbSizer.Add(self.tree, (0,0), flag=wx.EXPAND|wx.ALL, border=0)
self.gbSizer.Add(self.restoreButton, (1,0), flag=wx.EXPAND|wx.ALL,
border=0)
self.gbSizer.AddGrowableRow(0)
self.gbSizer.AddGrowableCol(0)
self.SetSizerAndFit(self.gbSizer)
def update(self):
self.tree.update()
def OnSize(self, event):
w,h = self.GetClientSizeTuple()
event.Skip()
def onTooltip(self, event):
pass
def onRestoreClick(self, event):
for n in self.tree.getSelected():
(path, isDir, expanded, state) = self.tree.GetItemData(n).GetData()
print "restoring %s" % path
d = self.factory.sendGETF(path)
d.addCallback(self.restored, n)
d.addErrback(self.restoreFailed, n)
self.tree.UnselectAll()
def restored(self, res, n):
(path, isDir, expanded, state) = self.tree.GetItemData(n).GetData()
print "yay, %s" % path
self.tree.SetItemTextColour(n, '#005804')
self.tree.changeState(n)
def restoreFailed(self, err, n):
(path, isDir, expanded, state) = self.tree.GetItemData(n).GetData()
print "boo, %s: %s" % (path, err)
self.tree.SetItemTextColour(n, wx.RED)
self.tree.changeState(n)
class SchedulePanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, -1)
self.Bind(wx.EVT_SIZE, self.OnSize)
def OnSize(self, event):
w,h = self.GetClientSizeTuple()
event.Skip()
class FeedbackPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, -1)
self.Bind(wx.EVT_SIZE, self.OnSize)
editor = wx.lib.editor.editor.Editor(parent, -1)
def OnSize(self, event):
w,h = self.GetClientSizeTuple()
event.Skip()
class FludNotebook(wx.Notebook):
def __init__(self, parent, id=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.NB_BOTTOM|wx.NO_BORDER):
self.parent = parent
self.config = parent.config
self.factory = LocalClientFactory(self.config)
print "connecting to localhost:%d" % self.config.clientport
reactor.connectTCP('localhost', self.config.clientport, self.factory)
wx.Notebook.__init__(self, parent, id, pos, style=style)
self.filePanel = FilePanel(self,
searchButtonAction=parent.searchButtonAction)
self.AddPage(self.filePanel, "Backup Files")
self.restorePanel = RestorePanel(self, self.config, self.factory)
self.AddPage(self.restorePanel, "Restore")
self.schedulePanel = SchedulePanel(self)
self.AddPage(self.schedulePanel, "Backup Schedule")
self.feedbackPanel = FeedbackPanel(self)
self.AddPage(self.feedbackPanel, "Feedback")
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.changedPage)
def shutdown(self, event):
self.filePanel.shutdown(event)
def changedPage(self, event):
page = event.GetSelection()
if page == 0:
self.SetMessage("Select files and directories for backup"
" with the filesystem view on the left, or set up criteria"
" for finding files for backup with simple searches,"
" below right.")
elif page == 1:
self.SetMessage("Select files/directories to be restored to"
" your computer, then click on 'restore!' Files will turn"
" green as they arrive.")
self.restorePanel.update()
elif page == 2:
self.SetMessage("Configure how often your computer should backup."
"\n (not implemented)")
elif page == 3:
self.SetMessage("Send feedback to flud programmers. (not"
" implemented)")
def SetMessage(self, msg):
self.parent.SetMessage(msg)
class FludLogoPanel(wx.Panel):
def __init__(self, parent, id=-1, pos=wx.DefaultPosition,
size=wx.Size(10,10), style=wx.TAB_TRAVERSAL, name="logo panel"):
wx.Panel.__init__(self, parent, id, pos, size, style, name)
self.SetAutoLayout(True)
self.SetBackgroundColour(wx.BLACK)
self.SetForegroundColour(wx.WHITE)
logobmp = wx.BitmapFromImage(wx.Image(os.path.join(imgdir,
"flud-backup-logo-1-150-nodrop.png")))
pad = 0
self.logowidth = logobmp.GetWidth()
self.logoheight = logobmp.GetHeight()
self.logo = wx.StaticBitmap(self, -1, logobmp)
self.messagePanel = wx.Panel(self, -1)
self.messagePanel.SetBackgroundColour(wx.BLACK)
self.messagePanel.SetForegroundColour(wx.WHITE)
self.message = wx.StaticText(self.messagePanel, -1,
"message text area", style=wx.ALIGN_CENTER|wx.ST_NO_AUTORESIZE,
size=(-1, self.logoheight-15))
self.message.Bind(wx.EVT_SIZE, self.resizeMessage)
self.bsizer = wx.BoxSizer(wx.VERTICAL)
self.bsizer.Add(self.message, flag=wx.EXPAND|wx.ALL, border=35)
self.bsizer.SetSizeHints(self.messagePanel)
self.messagePanel.SetSizer(self.bsizer)
self.gbSizer = wx.GridBagSizer(1,2)
self.gbSizer.Add(self.logo, (0,0))
self.gbSizer.Add(self.messagePanel, (0,1), flag=wx.EXPAND|wx.ALL)
self.gbSizer.AddGrowableRow(1)
self.gbSizer.AddGrowableCol(1)
self.SetSizerAndFit(self.gbSizer)
self.SetSize(wx.Size(self.logowidth, self.logoheight))
self.SetSizeHints(self.logowidth, self.logoheight, -1, self.logoheight)
def SetMessage(self, msg):
(w,h) = self.message.GetSizeTuple()
#print "msg area size is %d x %d" % (w,h)
self.message.SetLabel(msg)
self.message.Wrap(w)
#print "msg is '%s'" % self.message.GetLabel()
self.message.Center()
def resizeMessage(self, evt):
# this is mainly to deal with StaticText wonkiness (not calling Wrap()
# automatically, not centering properly automatically). It may be
# possible to get rid of this with a future wxPython release.
(w,h) = self.message.GetSizeTuple()
self.message.Wrap(w)
m = self.message.GetLabel()
m = m.replace('\n',' ')
self.message.SetLabel(m)
self.message.Wrap(w)
self.message.Center()
class FludFrame(wx.Frame):
def __init__(self, parent, id=wx.ID_ANY, label="flud bakcup client",
size=wx.Size(800,600),
style=wx.DEFAULT_FRAME_STYLE|wx.NO_FULL_REPAINT_ON_RESIZE,
config=None):
wx.Frame.__init__(self, parent, id, label, size=size, style=style)
self.lognull = wx.LogNull()
wx.ToolTip.SetDelay(2000)
self.clearMessage = False
self.logoPanel = FludLogoPanel(self)
self.SetMessage('Welcome.')
self.config = config
self.notebook = FludNotebook(self, size=wx.Size(200,200))
self.operationStatus = wx.StatusBar(name='operationStatus',
parent=self, style=0)
self.SetStatusBar(self.operationStatus)
self.gbSizer = wx.GridBagSizer(2,1)
self.gbSizer.Add(self.logoPanel,(0,0), flag=wx.EXPAND)
self.gbSizer.Add(self.notebook, (1,0), flag=wx.EXPAND|wx.ALL, border=1)
self.gbSizer.AddGrowableRow(1)
self.gbSizer.AddGrowableCol(0)
self.SetSizerAndFit(self.gbSizer)
self.Bind(wx.EVT_CLOSE, self.shutdown)
self.SetSize(size)
self.Show(True)
def SetMessage(self, message):
self.logoPanel.SetMessage(message)
def shutdown(self, event):
self.notebook.shutdown(event)
def searchButtonAction(self, event, errmsg=None, infomsg=None):
if errmsg:
self.logoPanel.SetMessage(errmsg)
self.clearMessage = True
elif infomsg:
self.logoPanel.SetMessage(infomsg)
self.clearMessage = False
elif self.clearMessage:
self.logoPanel.SetMessage("")
#if __name__ == '__main__':
# app = wx.PySimpleApp()
#
# config = FludConfig()
# config.load(doLogging=False)
#
# f = FludFrame(None, wx.ID_ANY, 'flud backup client', size=(795,600),
# style=wx.DEFAULT_FRAME_STYLE|wx.NO_FULL_REPAINT_ON_RESIZE,
# config=config)
#
# from twisted.internet import reactor
# reactor.registerWxApp(app)
# reactor.run()
| Python |
"""
FludConfig.py, (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), version 3.
manages configuration file for flud backup.
"""
import os, sys, socket, re, logging, time
import ConfigParser
import flud.FludCrypto as FludCrypto
from flud.FludCrypto import FludRSA
from flud.FludkRouting import kRouting
from flud.fencode import fencode, fdecode
logger = logging.getLogger('flud')
CLIENTPORTOFFSET = 500
""" default mapping of trust deltas """
class TrustDeltas:
INITIAL_SCORE = 1
POSITIVE_CAP = 500
NEGATIVE_CAP = -500
MAX_INC_PERDAY = 100 # XXX: currently unused
MAX_DEC_PERDAY = -250
# note: the rest of these are classes so that we can pass them around kind
# of like enums (identify what was passed by type, instead of by value)
class PUT_SUCCEED:
value = 2
class GET_SUCCEED:
value = 4
class VRFY_SUCCEED:
value = 4
class FNDN_FAIL:
value = -1
class PUT_FAIL:
value = -2
class GET_FAIL:
value = -10
class VRFY_FAIL:
value = -10
class FludDebugLogFilter(logging.Filter):
"""
Keeps all logging levels defined by loggers, but ups level to DEBUG for
loggers whose namespaces match patterns given by wildcards.
"""
# XXX: doesn't really interact with all logging levels by all loggers, only
# with the one defined by the root logger. If children have stricter
# loglevels set, this filter won't ever get called on them.
def __init__(self, wildcardStrings):
self.setWildcards(wildcardStrings)
root = logging.getLogger("")
if hasattr(root, 'fludDebugLogLevel'):
self.effectiveLevel = root.fludDebugLogLevel
else:
self.effectiveLevel = root.getEffectiveLevel()
self.fludDebugLogLevel = root.getEffectiveLevel()
root.setLevel(logging.NOTSET)
def setWildcards(self, wildcardStrings):
self.wildcards = []
if not isinstance(wildcardStrings, list):
wildcardStrings = [wildcardStrings]
for s in wildcardStrings:
self.setWildcard(s)
def setWildcard(self, wildcardString):
fields = wildcardString.split('.')
for i, s in enumerate(fields):
#print "%s:%s" % (i, s)
if "*" == s:
fields[i] = r'[\w.]*'
else:
try:
if s.index(s, '*') > 0:
fields[i] = s.replace('*', r'[\w]*')
except:
pass
regex = "^%s$" % r'\.'.join(fields)
self.wildcards.append(re.compile(regex))
def filter(self, record):
if record.levelno >= self.effectiveLevel:
return 1
for w in self.wildcards:
m = w.match(record.name)
if m:
return 1
return 0
# XXX: refactor out the try/except stuff that could be done with has_key()
class FludConfig:
"""
Handles configuration for Flud nodes. Most persistent settings live in
this class.
Configuration is kept in the directory specified by FLUDHOME if this value
is set in the environment, otherwise in HOME/.flud/. If no existing
configuration exists, this object will create a configuration with sane
default values.
"""
def __init__(self):
self.Kr = 0
self.Ku = 0
self.nodeID = 0
self.groupIDr = 0
self.groupIDu = 0
self.port = -1
self.reputations = {}
self.nodes = {}
self.throttled = {} # XXX: should persist this to config file
try:
self.fludhome = os.environ['FLUDHOME']
except:
try:
home = os.environ['HOME']
self.fludhome = home+"/.flud"
except:
logger.warn("cannot determine FLUDHOME.")
logger.warn("Please set HOME or FLUDHOME environment variable")
if not os.path.isdir(self.fludhome):
os.mkdir(self.fludhome, 0700)
self.fludconfig = self.fludhome+"/flud.conf"
self.configParser = ConfigParser.ConfigParser()
if not os.path.isfile(self.fludconfig):
conffile = file(self.fludconfig, "w")
else:
conffile = file(self.fludconfig, "r")
self.configParser.readfp(conffile)
conffile.close()
logger.info('fludhome = %s' % self.fludhome)
logger.info('fludconfig = %s' % self.fludconfig)
def load(self, serverport=None, doLogging=True):
"""
If serverport is given, it overrides any value that may be in the
configuration file
"""
self.logfile, self.loglevel = self._getLoggingConf()
if doLogging:
if os.path.isfile(self.logfile):
os.remove(self.logfile)
handler = logging.FileHandler(self.logfile)
formatter = logging.Formatter('%(asctime)s %(filename)s:%(lineno)d'
' %(name)s %(levelname)s: %(message)s', datefmt='%H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
logging.getLogger("").setLevel(self.loglevel)
#logger.setLevel(self.loglevel)
#logger.setLevel(logging.WARNING) # XXX: overrides user prefs
#logger.setLevel(logging.DEBUG) # XXX: overrides user prefs
if os.environ.has_key("LOGFILTER"):
self.filter = FludDebugLogFilter(
os.environ["LOGFILTER"].split(' '))
handler.addFilter(self.filter)
# XXX: add a LocalPrimitive that can be called dynamically to
# invoke filter.setWildcards()
self.Kr, self.Ku, self.nodeID, self.groupIDr, self.groupIDu \
= self._getID()
logger.debug('Kr = %s' % self.Kr.exportPrivateKey())
logger.debug('Ku = %s' % self.Ku.exportPublicKey())
logger.debug('nodeID = %s' % self.nodeID)
logger.debug('groupIDr = %s' % self.groupIDr)
logger.debug('groupIDu = %s' % self.groupIDu)
self.port, self.clientport = self._getServerConf()
if serverport != None:
self.port = serverport
self.clientport = serverport + CLIENTPORTOFFSET
self.configParser.set("server","port",self.port)
self.configParser.set("server","clientport",self.clientport)
logger.debug('port = %s' % self.port)
logger.debug('clientport = %s' % self.clientport)
logger.debug('trustdeltas = %s'
% [v for v in dir(TrustDeltas) if v[0] != '_'])
self.routing = kRouting((socket.getfqdn(), self.port,
long(self.nodeID, 16), self.Ku.exportPublicKey()['n']))
self.storedir, self.generosity, self.minoffer = self._getStoreConf()
if not os.path.isdir(self.storedir):
os.mkdir(self.storedir)
os.chmod(self.storedir, 0700)
logger.debug('storedir = %s' % self.storedir)
self.kstoredir = self._getkStoreConf()
if not os.path.isdir(self.kstoredir):
os.mkdir(self.kstoredir)
os.chmod(self.kstoredir, 0700)
logger.debug('kstoredir = %s' % self.kstoredir)
self.clientdir = self._getClientConf()
if not os.path.isdir(self.clientdir):
os.mkdir(self.clientdir)
os.chmod(self.clientdir, 0700)
logger.debug('clientdir = %s' % self.clientdir)
self.metadir, self.metamaster = self._getMetaConf()
if not os.path.isdir(self.metadir):
os.mkdir(self.metadir)
os.chmod(self.metadir, 0700)
logger.debug('metadir = %s' % self.metadir)
self.reputations = self._getReputations()
logger.debug("reputations = %s" % str(self.reputations))
self.nodes = self._getKnownNodes()
logger.debug("known nodes = %s" % str(self.nodes))
self.save()
os.chmod(self.fludconfig, 0600)
self.loadMasterMeta()
def save(self):
"""
saves configuration
"""
conffile = file(self.fludconfig, "w")
self.configParser.write(conffile)
conffile.close()
def _getLoggingConf(self):
"""
Returns logging configuration: logfile and loglevel
"""
if not self.configParser.has_section("logging"):
self.configParser.add_section("logging")
try:
logfile = int(self.configParser.get("logging","logfile"))
except:
logger.debug("no logfile specified, using default")
logfile = self.fludhome+'/flud.log'
self.configParser.set("logging", "logfile", logfile)
try:
loglevel = int(self.configParser.get("logging","loglevel"))
#loglevel = logging.WARNING # XXX: remove me
except:
logger.debug("no loglevel specified, using default")
loglevel = logging.WARNING
self.configParser.set("logging", "loglevel", loglevel)
return logfile, loglevel
def _getID(self):
"""
Returns a tuple: private key, public key, nodeID, private group ID, and
public group ID from config. If these values don't exist in conf file,
they are generated and added.
"""
# get the keys and IDs from the config file.
# If these values don't exist, generate a pub/priv key pair, nodeID,
# and groupIDs.
if not self.configParser.has_section("identification"):
self.configParser.add_section("identification")
try:
privkey = FludRSA.importPrivateKey(
eval(self.configParser.get("identification","Kr")))
except:
pubkey, privkey = FludCrypto.generateKeys()
else:
try:
pubkey = FludRSA.importPublicKey(
eval(self.configParser.get("identification","Ku")))
except:
pubkey = privkey.publickey()
try:
nodeID = self.configParser.get("identification","nodeID")
except:
#nodeID = FludCrypto.hashstring(str(pubkey.exportPublicKey()))
nodeID = pubkey.id()
try:
privgroupID = self.configParser.get("identification",
"groupIDr")[:64]
except:
privgroupID = 'fludtest' # default groupID hardcoded
try:
pubgroupID = self.configParser.get("identification","groupIDu")
except:
pubgroupID = FludCrypto.hashstring(str(pubkey.exportPublicKey())
+privgroupID)
# write the settings back out to config object
self.configParser.set("identification","Kr",privkey.exportPrivateKey())
self.configParser.set("identification","Ku",pubkey.exportPublicKey())
self.configParser.set("identification","nodeID",nodeID)
self.configParser.set("identification","groupIDr",privgroupID)
self.configParser.set("identification","groupIDu",pubgroupID)
# return the values
return privkey, pubkey, nodeID, privgroupID, pubgroupID
def _getServerConf(self):
"""
Returns server configuration: port number
"""
if not self.configParser.has_section("server"):
self.configParser.add_section("server")
try:
port = int(self.configParser.get("server","port"))
except:
logger.debug("no port specified, using default")
port = 8080 # XXX: default should be defined elsewhere.
# Should prefer 80. If in use, use 8080+
try:
clientport = int(self.configParser.get("server","clientport"))
except:
logger.debug("no clientport specified, using default")
clientport = port+CLIENTPORTOFFSET
self.configParser.set("server","port",port)
self.configParser.set("server","clientport",clientport)
return port, clientport
def _getDirConf(self, configParser, section, default):
"""
Returns directory configuration
"""
if not configParser.has_section(section):
configParser.add_section(section)
try:
dir = int(self.configParser.get(section,"dir"))
except:
logger.debug("no %s directory specified, using default", section)
dir = self.fludhome+'/'+default
if not os.path.isdir(dir):
os.makedirs(dir)
self.configParser.set(section,"dir",dir)
return dir
def _getClientConf(self):
"""
Returns client configuration: download directory
"""
try:
trustdeltas = eval(self.configParser.get("client","trustdeltas"))
for i in trustdeltas:
if not hasattr(TrustDeltas, i):
logger.error("setting non-useful TrustDelta field %s", i)
setattr(TrustDeltas, i, trustdeltas[i])
except:
logger.debug("no trustdeltas specified, using default")
if not self.configParser.has_section("client"):
self.configParser.add_section("client")
self.configParser.set("client", "trustdeltas",
dict((v, eval("TrustDeltas.%s" % v)) for v in dir(TrustDeltas)
if v[0] != '_'))
return self._getDirConf(self.configParser, "client", "dl")
def _getStoreConf(self):
"""
Returns data store configuration
"""
storedir = self._getDirConf(self.configParser, "store", "store")
try:
generosity = self.configParser.get("store", "generosity")
except:
logger.debug("no generosity specified, using default")
generosity = 1.5
try:
minoffer = self.configParser.get("store", "minoffer")
except:
logger.debug("no minoffer specified, using default")
minoffer = 1024
return storedir, generosity, minoffer
def _getkStoreConf(self):
"""
Returns dht data store configuration
"""
return self._getDirConf(self.configParser, "kstore", "dht")
def _getMetaConf(self):
"""
Returns metadata configuration: metadata directory
"""
metadir = self._getDirConf(self.configParser, "metadata", "meta")
try:
master = self.configParser.get("meta","master")
except:
logger.debug("no meta master file specified, using default")
master = "master"
if not os.path.isfile(metadir+'/'+master):
f = open(metadir+'/'+master, 'w')
f.close()
return (metadir, master)
def _getReputations(self):
"""
Returns dict of reputations known to this node
"""
# XXX: should probably just throw these in with 'nodes' (for efficiency)
return self._getDict(self.configParser, "reputations")
def _getKnownNodes(self):
"""
Returns dict of nodes known to this node
"""
return {}
# XXX: don't read known nodes for now
result = self._getDict(self.configParser, "nodes")
for i in result:
print str(i)
self.routing.insertNode(
(result[i]['host'], result[i]['port'], long(i, 16),
result[i]['nKu']))
return result
def _getDict(self, configParser, section):
"""
creates a dictionary from the list of pairs given by
ConfigParser.items(section). Requires that the right-hand side of
the config file's "=" operator be a valid python type, as eval()
will be invoked on it
"""
if not configParser.has_section(section):
configParser.add_section(section)
try:
items = configParser.items(section)
result = {}
for item in items:
#print item
try:
result[str(item[0])] = eval(item[1])
configParser.set(section, item[0], item[1])
except:
logger.warn("item '%s' in section '%s'"
" of the config file has an unreadable format"
% str(item[0]), str(section))
except:
logger.warn("Couldn't read %s from config file:" % section)
return result
def addNode(self, nodeID, host, port, Ku, mygroup=None):
"""
Convenience method for adding a node to the known.
If a node with nodeID already exists, nothing changes.
This method /does not/ save the new configuration to file,
"""
if mygroup == None:
mygroup = self.groupIDu
if not self.nodes.has_key(nodeID):
self.nodes[nodeID] = {'host': host, 'port': port,
'Ku': Ku.exportPublicKey(), 'mygroup': mygroup}
#logger.log(logging.DEBUG, "nodes: " % str(self.nodes))
# XXX: disabled nodes saving
#for k in self.nodes:
# self.configParser.set('nodes', k, self.nodes[k])
n = self.routing.insertNode((host, int(port), long(nodeID, 16),
Ku.exportPublicKey()['n']))
if n != None:
logger.warn("need to ping %s for LRU in routing table!"
% str(n))
# XXX: instead of pinging, put it in a replacement cache table
# and when one of the nodes needs replaced (future query)
# replace it with one of these. Sec 4.1
self.routing.replacementCache.insertNode(
(host, int(port), long(nodeID, 16),
Ku.exportPublicKey()['n']))
self.reputations[long(nodeID,16)] = TrustDeltas.INITIAL_SCORE
# XXX: no management of reputations size: need to manage as a cache
def modifyReputation(self, nodeID, reason):
"""
change reputation of nodeID by reason.value
"""
logger.info("modify %s %s" % (nodeID, reason.value))
if isinstance(nodeID, str):
nodeID = long(nodeID,16)
if not self.reputations.has_key(nodeID):
self.reputations[nodeID] = TrustDeltas.INITIAL_SCORE
# XXX: no management of reputations size: need to manage as a cache
self.reputations[nodeID] += reason.value
logger.debug("reputation for %d now %d", nodeID,
self.reputations[nodeID])
curtime = int(time.time())
if reason.value < 0:
self.throttleNode(nodeID, reason, curtime)
elif nodeID in self.throttled and self.throttled[nodeID] < curtime:
self.throttled.pop(nodeID)
def throttleNode(self, nodeID, reason, curtime=None):
"""
puts a node in the throttle list.
"""
if not curtime:
curtime = int(time.time())
pause = curtime \
+ (reason.value * 24 * 60 * 60) / TrustDeltas.MAX_DEC_PERDAY
self.throttled[nodeID] = pause
def getPreferredNodes(self, num=None, exclude=None, throttle=False):
"""
Get nodes ordered by reputation. If num is passed in, return the first
'num' nodes, otherwise all. If exclude list is passed in, try to
return nodes not on this list (but do return some excluded if nodes are
exhausted, i.e., there aren't num nodes available). If throttle
(default), do not return any nodes which are currently throttled.
"""
# XXX: O(n) each time this is called. Better performance if we
# maintain sorted list when modified (modifyReputation, addNode), at a
# bit higher mem expense.
items = self.reputations.items()
numitems = len(items)
logger.debug("%d items in reps" % numitems)
if throttle:
now = int(time.time())
for t in self.throttled:
if self.throttled[t] < now:
self.throttled.pop(t)
if exclude:
items = [(v,k) for (k,v) in items if k not in throttle and
k not in exclude]
if num and len(items) < num and numitems >= num:
exitems = [(v,k) for (k,v) in items if k not in throttle
and k in exclude]
items += exitems[num-len(item):]
logger.debug("%d items now in reps" % len(items))
else:
items = [(v,k) for (k,v) in items if k not in throttle]
else:
# XXX: refactor; 'if exclude else' is same as above, but without
# the 'if k not in throttle' bits
if exclude:
items = [(v,k) for (k,v) in items if k not in exclude]
if num and len(items) < num and numitems >= num:
exitems = [(v,k) for (k,v) in items if k in exclude]
items += exitems[num-len(item):]
logger.debug("%d items now in reps" % len(items))
else:
items = [(v,k) for (k,v) in items]
items.sort()
items.reverse()
items = [(k,v) for (v,k) in items]
# need to call routing.getNode() to get node triple and return those
if num:
logger.debug("returning %d of the %d items" % (num, len(items)))
return [self.routing.getNode(f) for (f,v) in items[:num]]
else:
logger.debug("returning all %d of the items" % len(items))
return [self.routing.getNode(f) for (f,v) in items]
# XXX: note that this master metadata all-in-mem scheme doesn't really work
# long term; these methods should eventually go to a local db or db-like
# something
def updateMasterMeta(self, fname, val):
"""
update fname with val (sK)
"""
self.master[fname] = val
def getFromMasterMeta(self, fname):
"""
get val (sK) for fname
"""
try:
return self.master[fname]
except:
return None
def deleteFromMasterMeta(self, fname):
"""
remove fname
"""
try:
self.master.pop(fname)
except:
pass
def loadMasterMeta(self):
"""
loads fname->sK mappings from file
"""
fmaster = open(os.path.join(self.metadir, self.metamaster), 'r')
master = fmaster.read()
fmaster.close()
if master == "":
master = {}
else:
master = fdecode(master)
self.master = master
def syncMasterMeta(self):
"""
sync in-mem fname->sK mappings to disk
"""
master = fencode(self.master)
fmaster = open(os.path.join(self.metadir, self.metamaster), 'w')
fmaster.write(master)
fmaster.close()
def _test(self):
import doctest
doctest.testmod()
if __name__ == '__main__':
fludConfig = FludConfig()
fludConfig._test()
| Python |
#!/usr/bin/python
"""
FludNode.tac (c) 2003-2006 Alen Peacock. This program is distributed under the
terms of the GNU General Public License (the GPL).
This is the application file used by twistd to daemonize FludNode.
"""
import os
from twisted.application import service, internet
import flud.FludNode
from flud.protocol.FludCommUtil import getCanonicalIP
port = None
gwhost = None
gwport = None
if 'FLUDPORT' in os.environ:
port = int(os.environ['FLUDPORT'])
if 'FLUDGWHOST' in os.environ:
gwhost = getCanonicalIP(os.environ['FLUDGWHOST'])
if 'FLUDGWPORT' in os.environ:
gwport = int(os.environ['FLUDGWPORT'])
node = flud.FludNode.FludNode(port)
if gwhost and gwport:
node.connectViaGateway(gwhost, gwport)
application = service.Application("flud.FludNode")
service = node.start(twistd=True)
#service.setServiceParent(application)
| Python |
#!/usr/bin/python
"""
FludScheduler.py (c) 2003-2006 Alen Peacock. This program is distributed under
the terms of the GNU General Public License (the GPL), verison 3.
FludScheduler is the process monitors files for changes, and then tells flud to
back them up.
"""
import sys, os, time, stat
from twisted.internet import reactor
from flud.FludConfig import FludConfig
from flud.protocol.LocalClient import *
from flud.CheckboxState import CheckboxState
CHECKTIME=5
class FludScheduler:
def __init__(self, config, factory):
self.config = config
self.factory = factory
self.fileconfigfile = None
self.fileconfigfileMTime = 0
self.fileChangeTime = 0
self.fileconfigSelected = set()
self.fileconfigExcluded = set()
self.getMasterMetadata()
def getMasterMetadata(self):
d = self.factory.sendLIST()
d.addCallback(self.gotMasterMetadata)
d.addErrback(self.errMasterMetadata)
return d
def gotMasterMetadata(self, master):
self.mastermetadata = master
def errMasterMetadata(self, err):
print err
reactor.stop()
def readFileConfig(self, mtime=None):
print "reading FileConfig"
file = open(self.fileconfigfile, 'r')
self.fileconfig = eval(file.read())
file.close()
if mtime:
self.fileconfigfileMTime = mtime
else:
self.fileconfigfileMTime = os.stat(
self.fileconfigfile)[stat.ST_MTIME]
self.fileconfigSelected = set([f for f in self.fileconfig
if self.fileconfig[f] == CheckboxState.SELECTED
or self.fileconfig[f] == CheckboxState.SELECTEDCHILD])
self.fileconfigExcluded = set([f for f in self.fileconfig
if self.fileconfig[f] == CheckboxState.EXCLUDED
or self.fileconfig[f] == CheckboxState.EXCLUDEDCHILD])
# The file[s]ChangeStat are the worst possible way to detect file changes.
# Much more efficient to use inotify/dnotify/fam/gamin/etc., as well as
# more correct (no way to detect cp -a or -p, for example, with stat).
# But, these are a fallback method when those aren't present, and are fine
# for testing.
def fileChangedStat(self, file, fileChangeTime=None):
if os.path.isfile(file) or os.path.isdir(file):
mtime = os.stat(file)[stat.ST_MTIME]
if not fileChangeTime:
fileChangeTime = self.fileChangeTime
if file in self.mastermetadata:
fileChangeTime = self.mastermetadata[file][1]
else:
return True
print "mtime = %s, ctime = %s (%s)" % (mtime, fileChangeTime, file)
if mtime > fileChangeTime:
return True
return False
def filesChangedStat(self, files, fileChangeTime=None):
result = []
for f in files:
if self.fileChangedStat(f, fileChangeTime):
result.append(f)
return result
# Change these to point to something other than the xxxStat() methods
def fileChanged(self, file, fileChangeTime=None):
"""
>>> now = time.time()
>>> f1 = tmpfile.mktemp()
>>>
"""
return self.fileChangedStat(file, fileChangeTime)
def filesChanged(self, files, fileChangeTime=None):
return self.filesChangedStat(files, fileChangeTime)
def checkFileConfig(self):
# check config file to see if it has changed, then reparse it
if not self.fileconfigfile:
# first time through
print "checking fileconfigfile (initial)"
if os.environ.has_key('FLUDHOME'):
fludhome = os.environ['FLUDHOME']
elif os.environ.has_key('HOME'):
fludhome = os.environ['HOME']+"/.flud"
else:
fludhome = ".flud"
# XXX: fludfile.conf should be in config
self.fileconfigfile = os.path.join(fludhome, "fludfile.conf")
if os.path.isfile(self.fileconfigfile):
self.readFileConfig()
return True
else:
print "no fileconfigfile to read"
elif os.path.isfile(self.fileconfigfile):
if self.fileChanged(self.fileconfigfile, self.fileconfigfileMTime):
print "fileconfigfile changed"
mtime = time.time()
self.readFileConfig(mtime)
return True
return False
def checkFilesystem(self):
checkedFiles = set()
changedFiles = set()
def checkList(list):
#print "checkList: %s" % list
#print "checkedFiles: %s" % checkedFiles
for entry in list:
# XXX: if entry is in master metadata, and its mtime is not
# earlier than the time used by fileChanged, skip it (add 'and'
# clause)
if entry not in checkedFiles and \
entry not in self.fileconfigExcluded and \
entry not in self.mastermetadata:
print "checkFilesystem for %s" % entry
if os.path.isdir(entry):
#print "dir %s" % entry
dirfiles = [os.path.join(entry, i)
for i in os.listdir(entry)]
checkedFiles.update([entry,])
checkList(dirfiles)
elif self.fileChanged(entry):
print "%s changed" % entry
if os.path.isfile(entry):
changedFiles.update([entry,])
#print "file %s changed" % entry
else:
print "entry ?? %s ?? changed" % entry
checkedFiles.update([entry,])
checkList(self.fileconfigSelected)
self.fileChangeTime = time.time()
return changedFiles
def storefileFailed(self, err, file):
print "storing %s failed: %s" % (file, err)
err.printTraceback()
#print dir(err)
def storefileYay(self, r, file):
print "storing %s success" % file
def storeFiles(self, changedFiles):
#print "storing %s" % changedFiles
dlist = []
for f in changedFiles:
print "storing %s" % f
deferred = self.factory.sendPUTF(f)
deferred.addCallback(self.storefileYay, f)
deferred.addErrback(self.storefileFailed, f)
dlist.append(deferred)
dl = defer.DeferredList(dlist)
return dl
#return defer.succeed(True)
def restartCheckTimer(self, v):
print "restarting timer (%d) to call run()" % CHECKTIME
reactor.callLater(CHECKTIME, self.run)
def updateMasterMetadata(self, v):
return self.getMasterMetadata()
def run(self):
print "run"
self.checkFileConfig()
changedFiles = self.checkFilesystem()
print "%s changed" % changedFiles
d = self.storeFiles(changedFiles)
d.addBoth(self.updateMasterMetadata)
d.addBoth(self.restartCheckTimer)
| Python |
"""
FludFileOperations.py (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
Implements file storage and retrieval operations using flud primitives.
"""
import os, stat, sys, logging, binascii, random, time
from zlib import crc32
from StringIO import StringIO
from twisted.internet import defer, threads
from Crypto.Cipher import AES
from flud.FludCrypto import FludRSA, hashstring, hashfile
from flud.protocol.FludCommUtil import *
from flud.fencode import fencode, fdecode
from flud.FludConfig import TrustDeltas
import fludfilefec
logger = logging.getLogger('flud.fileops')
# erasure coding constants
code_k = 20 # data blocks
code_n = 20 # parity blocks
code_m = code_k+code_n # total blocks
# temp filenaming defaults
appendEncrypt = ".crypt"
appendFsMeta = ".nmeta"
# XXX: could remove trailing '=' from all stored sha256s (dht keys, storage
# keys, etc) and add them back implicitly
class Ctx(object):
"""
Object to encapsulate operation context for logger
"""
def __init__(self, ctx):
self.ctx = ctx
self.formatstr = ""
self.args = ()
def msg(self, formatstr, *args):
self.formatstr = formatstr
self.args = args
s = self.formatstr % self.args
return self
def __repr__(self):
return str(self.ctx)+": "+(self.formatstr % self.args)
def pathsplit(fname):
par, chld = os.path.split(fname)
if chld == "":
res = []
res.append(par)
return res
else:
res = pathsplit(par)
res.append(os.path.join(par,chld))
return res
def filemetadata(fname):
fstat = os.stat(fname)
return {'path' : fname, 'mode' : fstat[stat.ST_MODE],
'uid' : fstat[stat.ST_UID], 'gid' : fstat[stat.ST_GID],
'atim' : fstat[stat.ST_ATIME], 'mtim' : fstat[stat.ST_MTIME],
'ctim' : fstat[stat.ST_CTIME]}
class StoreFile:
"""
Implements the meta operations of storing, retrieving, and verifying files.
In addition to using the data primitives laid out in the protocol/
directory, manages the creation of file metadata for local and remote
storage.
Stores a file to flud by:
1. Create storage and encryption keys for file: Hashes the file once
to create an encryption key, eK=H(file), and then again to create the
storage key for the file metadata sK=H(eK)=H(H(file)).
2. Create local filesystem file metadata: Encrypts the storage key
asymetrically with public key as eeK=e_Ku(eK), and creates local copy
of fs metadata with eeK and other file metadata (ownership, name,
timestamps). Encrypts this metadata with Ku. (flud file metadata
consists of eeK and Ku(fs metadata)).
3. Create data-specific file metadata: Symmetrically encrypt the file
with e_file=eK(file). Code e_file into k+m blocks. Perform
H(block) on each k+m block.
4. Query DHT for sK. If it exists, grab the metadata record (call it
'storedMetadata') for comparison to one we are generated. Compare
data-specific metadata to storedMetadata (if it exists). If it doesn't
already exist, this step can be ignored. If it exists and the data
doesn't match, either the hash function is broken or a malicious node
has poisoned the DHT -- return failure (client can attempt restore
under a new key, or challenge the existing stored metadata).
5. Store m+n blocks. If stored metadata exists, can do VERIFYs
instead, and only store when VERIFY fails. For each failed VERIFY,
must update the data-specific part of the metadata record. Since we
don't want to be susceptible to replacement attack, the update is
treated as an 'append' to the specific block metadata by the store
target. (The store target can monitor its data-specific metadata
records and for any that grow beyond a certain size, can shrink all
block records to a size of 1 by performing RETRIEVE ops on the
block, verfiying hash, and removing any failures. If this fails for
all block entries, can prune all but the /first and last/ entries --
keep these to prevent damage by malicious nodes. This strategy is
beneficial to the node that performs it, because it prevents storage of
arbitrary data in the DHT layer, which is in turn good for the system
as a whole)
6. Store file metadata (both node-specific and data-specific) to the
DHT. Keep a local copy as well.
7. Update the master file record index for this node, and store it to
the DHT layer.
"""
# XXX: instead of doing all debugging with '"%s ...", self.mkey, ...', make
# some props (debug(), warn(), etc) that add the mkey to whatever msg is
# passed
# XXX: should follow this currentOps model for the other FludFileOps
currentOps = {}
def __init__(self, node, filename):
self.node = node
self.filename = filename
self.mkey = crc32(self.filename)
self.ctx = Ctx(self.mkey).msg
self.config = node.config
self.Ku = node.config.Ku
self.routing = self.config.routing
self.metadir = self.config.metadir
self.parentcodedir = self.config.clientdir # XXX: clientdir?
# ask for code_m + X nodes (we prefer a pool slightly larger than
# code_m). XXX: X=10 is magic
self.nodeChoices = self.config.getPreferredNodes(code_m+10)
self.usedNodes = {}
self.deferred = self._storeFile()
def _storeFile(self):
if not os.path.isfile(self.filename):
return defer.fail(ValueError("%s is not a file" % self.filename))
# 1: create encryption key (eK) and storage key (sK). Query DHT using
# sK
self.eK = hashfile(self.filename)
logger.debug(self.ctx("_storefile %s (%s)", self.filename, self.eK))
self.sK = long(hashstring(self.eK), 16)
self.eeK = self.Ku.encrypt(binascii.unhexlify(self.eK))
self.eKey = AES.new(binascii.unhexlify(self.eK))
#logger.debug(self.ctx("file %s eK:%s, storage key:%d"
# % (self.filename, self.eK, self.sK)))
# 2: create filesystem metadata locally.
sbody = filemetadata(self.filename)
sbody = fencode(sbody)
cryptSize = (self.Ku.size()+1) / 8
self.eNodeFileMetadata = ""
for i in range(0, len(sbody), cryptSize):
self.eNodeFileMetadata += self.Ku.encrypt(sbody[i:i+cryptSize])[0]
fsMetadata = fencode({'eeK' : fencode(self.eeK[0]),
'meta' : fencode(self.eNodeFileMetadata)})
# erasure code the metadata
# XXX: bad blocking stuff, move into thread
self.flatname = fencode(generateRandom(16))
self.mfilename = os.path.join(self.metadir, self.flatname+appendFsMeta)
self.encodedir = os.path.join(self.parentcodedir, self.flatname)
try:
os.mkdir(self.encodedir)
except:
return defer.fail(failure.DefaultException(
"%s already requested" % self.filename))
# XXX: mfiles should be held in mem, as StringIOs (when coder supports
# this)
self.mfiles = fludfilefec.encode_to_files(StringIO(fsMetadata),
len(fsMetadata), self.encodedir, self.mfilename,
code_k, code_m)
# XXX: piggybacking doesn't work with new metadata scheme, must fix it
# to append metadata, or if already in progress, redo via verify ops
# if already storing identical file by CAS, piggyback on it
if self.currentOps.has_key(self.eK):
logger.debug(self.ctx("reusing callback on %s", self.eK))
(d, counter) = self.currentOps[self.eK]
self.currentOps[self.eK] = (d, counter+1)
# setting sfile, encodedir, and efilename to empty vals is kinda
# hokey -- could split _storeMetadata into two funcs instead (the
# cleanup part and the store part; see _storeMetadata)
self.sfiles = []
self.encodedir = None
self.efilename = None
d.addCallback(self._piggybackStoreMetadata)
return d
# 3: encrypt and encode the file locally.
self.efilename = os.path.join(self.metadir,self.flatname+appendEncrypt)
e = open(self.efilename, "w+")
fd = os.open(self.filename, os.O_RDONLY)
fstat = os.fstat(fd)
fsize = fstat[stat.ST_SIZE]
# XXX: bad blocking stuff, move into thread
# create a pad at front of file to make it an even multiple of 16
fpad = int(16 - fsize%16);
#logger.debug(self.ctx("fsize=%d, padding with %d bytes"
# % (fsize, fpad)))
paddata = chr(fpad)+(fpad-1)*'\x00'
buf = paddata + os.read(fd,16-len(paddata))
e.write(self.eKey.encrypt(buf));
# now write the rest of the file
while 1:
# XXX: can we do this in larger than 16-byte chunks?
buf = os.read(fd,16)
if buf == "":
break
e.write(self.eKey.encrypt(buf));
#e.close()
elen = e.tell()
e.seek(0,0)
os.close(fd)
# erasure code the file
# XXX: bad blocking stuff, move into thread
self.sfiles = fludfilefec.encode_to_files(e, elen, self.encodedir, 'c',
code_k, code_m)
#logger.debug(self.ctx("coded to: %s" % str(self.sfiles)))
# take hashes and rename coded blocks
self.segHashesLocal = []
for i in range(len(self.sfiles)):
sfile = self.sfiles[i]
h = long(hashfile(sfile),16)
logger.debug(self.ctx("file block %s hashes to %s", i, fencode(h)))
destfile = os.path.join(self.encodedir,fencode(h))
if os.path.exists(destfile):
logger.warn(self.ctx("%s exists (%s)", destfile, fencode(h)))
self.segHashesLocal.append(h)
#logger.debug(self.ctx("moved %s to %s" % (sfile, destfile)))
os.rename(sfile, destfile)
self.sfiles[i] = destfile
mfile = self.mfiles[i]
os.rename(mfile, destfile+".m")
self.mfiles[i] = destfile+".m"
# 4a: query DHT for metadata.
d = self.node.client.kFindValue(self.sK)
d.addCallback(self._checkForExistingFileMetadata)
d.addErrback(self._storeFileErr, "DHT query for metadata failed")
self.currentOps[self.eK] = (d, 1)
return d
# 4b: compare hashlists (locally encrypted vs. DHT -- if available).
# for lhash, dhash in zip(segHashesLocal, segHashesDHT):
def _checkForExistingFileMetadata(self, storedMetadata):
if storedMetadata == None or isinstance(storedMetadata, dict):
logger.info(self.ctx(
"metadata doesn't yet exist, storing all data"))
d = self._storeBlocks(storedMetadata)
#d = self._storeBlocksSKIP(storedMetadata)
return d
else:
storedMetadata = fdecode(storedMetadata)
logger.info(self.ctx("metadata exists, verifying all data"))
if not self._compareMetadata(storedMetadata, self.sfiles):
raise ValueError("stored and local metadata do not match")
else:
logger.info(self.ctx("stored and local metadata match."))
# XXX: need to check for diversity. It could be that data stored
# previously to a smaller network (<k+m nodes) and that we should
# try to increase diversity and re-store the data.
# XXX: also need to make sure we still trust all the nodes in the
# metadata list. If not, we should move those blocks elsewhere.
d = self._verifyAndStoreBlocks(storedMetadata)
return d
def _storeBlocksSKIP(self, storedMetadata):
# for testing -- skip stores so we can get to storeMeta
dlist = []
self.blockMetadata = {'k': code_k, 'n': code_n}
for i in range(len(self.segHashesLocal)):
hash = self.segHashesLocal[i]
sfile = self.sfiles[i]
node = random.choice(self.routing.knownExternalNodes())
host = node[0]
port = node[1]
nID = node[2]
nKu = FludRSA.importPublicKey(node[3])
self.blockMetadata[(i, hash)] = long(nKu.id(), 16)
return self._storeMetadata(None)
# 5a -- store all blocks
def _storeBlocks(self, storedMetadata):
dlist = []
self.blockMetadata = {'k': code_k, 'n': code_n}
for i in range(len(self.segHashesLocal)):
hash = self.segHashesLocal[i]
sfile = self.sfiles[i]
deferred = self._storeBlock(i, hash, sfile, self.mfiles[i])
dlist.append(deferred)
logger.debug(self.ctx("_storeBlocksAll"))
dl = defer.DeferredList(dlist)
dl.addCallback(self._storeMetadata)
return dl
def _storeBlock(self, i, hash, sfile, mfile, retry=2):
if not self.nodeChoices:
#self.nodeChoices = self.routing.knownExternalNodes()
# XXX: instead of asking for code_k, ask for code_k - still needed
self.nodeChoices = self.config.getPreferredNodes(code_k,
self.usedNodes.keys())
logger.warn(self.ctx("asked for more nodes, %d nodes found",
len(self.nodeChoices)))
if not self.nodeChoices:
return defer.fail(failure.DefaultException(
"cannot store blocks to 0 nodes"))
node = random.choice(self.nodeChoices)
self.nodeChoices.remove(node)
host = node[0]
port = node[1]
nID = node[2]
nKu = FludRSA.importPublicKey(node[3])
location = long(nKu.id(), 16)
logger.info(self.ctx("STOREing under %s on %s:%d", fencode(hash),
host, port))
logger.debug(self.ctx("mfile is %s", mfile))
deferred = self.node.client.sendStore(sfile, (self.mkey, mfile), host,
port, nKu)
deferred.addCallback(self._blockStored, nID, i, hash, location)
deferred.addErrback(self._retryStoreBlock, i, hash, location,
sfile, mfile, nID, host, port, retry)
return deferred
def _retryStoreBlock(self, error, i, hash, location, sfile, mfile,
nID, host, port, retry=None):
retry = retry - 1
if retry > 0:
logger.warn(self.ctx("STORE to %s (%s:%d) failed, trying again",
nID, host, port))
d = self._storeBlock(i, hash, sfile, mfile, retry)
d.addCallback(self._blockStored, nID, i, hash, location)
# This will fail the entire operation. This is correct
# behavior because we've tried on at least N nodes and couldn't
# get the block to store -- the caller will have to try the entire
# op again. If this proves to be a problem, up the default retry
# value in _storeBlock().
d.addErrback(self._storeFileErr, "couldn't store block %s"
% fencode(hash), lambda: self.config.modifyReputation(nID,
TrustDeltas.PUT_FAIL))
return d
else:
self.usedNodes[nID] = True
logger.warn(self.ctx("STORE to %s (%s:%d) failed, giving up",
nID, host, port))
d = defer.Deferred()
d.addErrback(self._storeFileErr, "couldn't store block %s"
% fencode(hash), lambda: self.config.modifyReputation(nID,
TrustDeltas.PUT_FAIL))
d.errback()
return d
def _blockStored(self, result, nID, i, blockhash, location):
self.usedNodes[nID] = True
logger.debug(self.ctx("_blcokStored %s", fencode(blockhash)))
self.config.modifyReputation(location, TrustDeltas.PUT_SUCCEED)
self.blockMetadata[(i, blockhash)] = location
return fencode(blockhash)
def _compareMetadata(self, storedFiles, fileNames):
# compares the block names returned from DHT to those in fileNames.
# @param storedFiles: dict of longs (hashes) to their locations,
# usually obtained from storedMetadata
# @param fileNames: local filenames. Only the os.path.basename part
# will be used for comparison
# @return true if they match up perfectly, false otherwise
logger.debug(self.ctx('# remote block names: %d', len(storedFiles)))
logger.debug(self.ctx('# local blocks: %d', len(fileNames)))
result = True
k = storedFiles.pop('k')
n = storedFiles.pop('n')
for (i, f) in storedFiles:
fname = os.path.join(self.encodedir,fencode(f))
if not fname in fileNames:
logger.warn(self.ctx("%s not in sfiles", fencode(i)))
result = False
for i, fname in enumerate(fileNames):
hname = os.path.basename(fname)
if not storedFiles.has_key((i, fdecode(hname))):
logger.warn(self.ctx("%s not in storedMetadata", hname))
result = False
if result == False:
for i in storedFiles:
logger.debug(self.ctx("storedBlock = %s", fencode(i)))
for i in fileNames:
logger.debug(self.ctx("localBlock = %s", os.path.basename(i)))
storedFiles['k'] = k
storedFiles['n'] = n
return result
def _piggybackStoreMetadata(self, piggybackMeta):
# piggybackMeta is a (nodeID, {blockID: storingNodeID, })
logger.debug(self.ctx("got piggyBackMeta data"))
meta = piggybackMeta[1]
sortedKeys = {}
k = meta['k']
n = meta['n']
for i in [x for x in meta if x != 'k' and x != 'n']:
sortedKeys[i[0]] = i
for i in xrange(k+n):
self.sfiles.append(fencode(sortedKeys[i][1]))
return self._verifyAndStoreBlocks(meta, True)
# 5b -- findnode on all stored blocks.
def _verifyAndStoreBlocks(self, storedMetadata, noopVerify=False):
self.blockMetadata = storedMetadata
dlist = []
for i, sfile in enumerate(self.sfiles):
# XXX: metadata should be StringIO to begin with
f = file(self.mfiles[i])
mfile = StringIO(f.read())
f.close()
seg = os.path.basename(sfile)
segl = fdecode(seg)
nID = self.blockMetadata[(i, segl)]
if isinstance(nID, list):
logger.info(self.ctx(
"multiple location choices, choosing one randomly."))
nID = random.choice(nID)
# XXX: for now, this just picks one of the alternatives at
# random. If the chosen one fails, should try each of the
# others until it works
logger.info(self.ctx("looking up %s...", ('%x' % nID)[:8]))
deferred = self.node.client.kFindNode(nID)
deferred.addCallback(self._verifyBlock, i, sfile, mfile,
seg, segl, nID, noopVerify)
deferred.addErrback(self._storeFileErr,
"couldn't find node %s... for VERIFY" % ('%x' % nID)[:8],
self.config.modifyReputation(nID, TrustDeltas.VRFY_FAIL))
dlist.append(deferred)
dl = defer.DeferredList(dlist)
#dl.addCallback(self._storeMetadata)
# XXX XXX XXX: don't _updateMaster unless we succeed!!
dl.addCallback(self._updateMaster, storedMetadata)
return dl
# 5c -- verify all blocks, store any that fail verify.
def _verifyBlock(self, kdata, i, sfile, mfile, seg, segl, nID, noopVerify):
# XXX: looks like we occasionally get in here on timed out connections.
# Should go to _storeFileErr instead, eh?
if isinstance(kdata, str):
logger.err(self.ctx("str kdata=%s", kdata))
#if len(kdata['k']) > 1:
# #logger.debug(self.ctx("type kdata: %s" % type(kdata)))
# #logger.debug(self.ctx("kdata=%s" % kdata))
# #logger.debug(self.ctx("len(kdata['k'])=%d" % len(kdata['k'])))
# raise ValueError("couldn't find node %s" % ('%x' % nID))
# #raise ValueError("this shouldn't really be a ValueError..."
# # " should be a GotMoreKnodesThanIBargainedForError"
# # " (possibly caused when kFindNode fails (timeout) and"
# # " we just get our own list of known nodes?): k=%s"
# # % kdata['k'])
node = kdata['k'][0]
host = node[0]
port = node[1]
id = node[2]
if id != nID:
logger.debug(self.ctx("couldn't find node %s", ('%x' % nID)))
raise ValueError("couldn't find node %s" % ('%x' % nID))
nKu = FludRSA.importPublicKey(node[3])
logger.info(self.ctx("verifying %s on %s:%d", seg, host, port))
if noopVerify:
offset = length = 0
verhash = long(hashstring(''), 16)
self.sfiles = []
else:
fd = os.open(sfile, os.O_RDONLY)
fsize = os.fstat(fd)[stat.ST_SIZE]
if fsize > 20: # XXX: 20?
length = 20 # XXX: 20?
offset = random.randrange(fsize-length)
else:
length = fsize
offset = 0
os.lseek(fd, offset, 0)
data = os.read(fd, length)
os.close(fd)
verhash = long(hashstring(data), 16)
deferred = self.node.client.sendVerify(seg, offset, length,
host, port, nKu, (self.mkey, mfile))
deferred.addCallback(self._checkVerify, nKu, host, port, i, segl,
sfile, mfile, verhash)
deferred.addErrback(self._checkVerifyErr, nID, i, segl, sfile, mfile,
verhash)
return deferred
def _checkVerify(self, result, nKu, host, port, i, seg, sfile, mfile, hash):
if hash != long(result, 16):
logger.info(self.ctx(
"VERIFY hash didn't match for %s, performing STORE",
fencode(seg)))
d = self._storeBlock(i, seg, sfile, mfile)
return d
else:
#logger.debug(self.ctx("block passed verify (%s == %s)"
# % (hash, long(result,16))))
return fencode(seg)
def _checkVerifyErr(self, failure, nID, i, seg, sfile, mfile, hash):
self.config.modifyReputations(nID, TrustDeltas.VRFY_FAIL)
logger.debug(self.ctx("Couldn't VERIFY: %s", failure.getErrorMessage()))
logger.info(self.ctx("Couldn't VERIFY %s, performing STORE",
fencode(seg)))
d = self._storeBlock(i, seg, sfile, mfile)
return d
# 6 - store the metadata.
def _storeMetadata(self, dlistresults):
# cleanup part of storeMetadata:
logger.debug(self.ctx("dlist=%s", str(dlistresults)))
# XXX: for any "False" in dlistresults, need to invoke _storeBlocks
# again on corresponding entries in sfiles.
for i in dlistresults:
if i[1] == None:
logger.info(self.ctx("failed store/verify"))
return False
# storeMetadata part of storeMetadata
# XXX: should sign metadata to prevent forged entries.
#for i in self.blockMetadata:
# logger.debug(self.ctx(" %s: %s"
# % (fencode(i), fencode(self.blockMetadata[i]))))
logger.debug(self.ctx("storing metadata at %s", fencode(self.sK)))
logger.debug(self.ctx("len(segMetadata) = %d", len(self.blockMetadata)))
d = self.node.client.kStore(self.sK, self.blockMetadata)
d.addCallback(self._updateMaster, self.blockMetadata)
d.addErrback(self._storeFileErr, "couldn't store file metadata to DHT")
return d
# 7 - update local master file record (store it to the network later).
def _updateMaster(self, res, meta):
# clean up locally coded files and encrypted file
for sfile in self.sfiles:
os.remove(sfile)
for mfile in self.mfiles:
os.remove(mfile)
if self.encodedir: os.rmdir(self.encodedir)
if self.efilename: os.remove(self.efilename)
key = fencode(self.sK)
logger.info(self.ctx("updating local master metadata with %s", key))
# store the filekey locally
# update entry for file
self.config.updateMasterMeta(self.filename, (self.sK, int(time.time())))
# update entry for parent dirs
paths = pathsplit(self.filename)
for i in paths:
if not self.config.getFromMasterMeta(i):
self.config.updateMasterMeta(i, filemetadata(i))
# XXX: not too efficient to write this out for every file. consider
# local caching and periodic syncing instead
self.config.syncMasterMeta()
# cache the metadata locally (optional)
fname = os.path.join(self.metadir,key)
m = open(fname, 'wb')
m.write(fencode(meta))
m.close()
# clean up fs metadata file
#os.remove(self.mfilename)
#return fencode(self.sK)
(d, counter) = self.currentOps[self.eK]
counter = counter - 1
if counter == 0:
logger.debug(self.ctx("counter 0 for currentOps %s", self.eK))
self.currentOps.pop(self.eK)
else:
logger.debug(self.ctx("setting counter = %d for %s", counter,
self.eK))
self.currentOps[self.eK] = (d, counter)
return (key, meta)
def _storeFileErr(self, failure, message, raiseException=True,
functor=None):
if functor:
functor()
if self.eK not in self.currentOps:
logger.warn(self.ctx("no %s in currentOps for StoreFile", self.eK))
raise failure
(d, counter) = self.currentOps[self.eK]
counter = counter - 1
if counter == 0:
logger.debug(self.ctx("err counter 0 for currentOps %s", self.eK))
self.currentOps.pop(self.eK)
else:
logger.debug(self.ctx("err setting counter = %d for %s", counter,
self.eK))
self.currentOps[self.eK] = (d, counter)
logger.error(self.ctx("%s: %s", message, failure.getErrorMessage()))
logger.debug(self.ctx("%s", failure.getTraceback()))
if raiseException:
raise failure
class RetrieveFile:
"""
Uses the given storage key to retrieve a file. The storage key is used
to query the DHT layer for the file metadata record. The file record
contains the locations of the file blocks. These are downloaded
until the complete file can be regenerated and saved locally.
"""
def __init__(self, node, key, mkey=True):
# 1: Query DHT for sK
# 2: Retrieve entries for sK, decoding until efile can be regenerated
# 3: Retrieve eK from sK by eK=Kp(eKe). Use eK to decrypt file. Strip
# off leading pad.
# 4: Save file as filepath=Kp(efilepath).
self.node = node
self.mkey = mkey
try:
self.sK = fdecode(key)
except Exception, inst:
self.deferred = defer.fail(inst)
return
self.ctx = Ctx(crc32(str(self.sK))).msg
self.config = node.config
self.Ku = node.config.Ku
self.Kr = node.config.Kr
self.routing = self.config.routing.knownNodes()
self.metadir = self.config.metadir
self.parentcodedir = self.config.clientdir
self.numBlocksRetrieved = 0
self.blocks = {}
self.fsmetas = {}
self.deferred = self._retrieveFile()
def _retrieveFile(self):
# 1: Query DHT for sK
logger.debug(self.ctx("querying DHT for %s", self.sK))
d = self.node.client.kFindValue(self.sK)
d.addCallback(self._retrieveFileBlocks)
d.addErrback(self._findFileErr, "file retrieve failed")
return d
def _findFileErr(self, failure, message, raiseException=True):
logger.error(self.ctx("%s: %s" % (message, failure.getErrorMessage())))
logger.debug(self.ctx("%s" % (failure.getTraceback(),)))
if raiseException:
return failure
def _retrieveFileBlocks(self, meta):
# 2: Retrieve entries for sK, decoding until efile can be regenerated
if meta == None:
raise LookupError("couldn't recover metadata for %s" % self.sK)
self.meta = fdecode(meta)
# XXX: need to check for diversity. It could be that data stored
# previously to a smaller network (<k+m nodes) and that we should
# try to increase diversity and re-store the data.
# XXX: also need to make sure we still trust all the nodes in the
# metadata list. If not, we should move those blocks elsewhere.
if self.meta == None:
raise LookupError("couldn't recover metadata for %s" % self.sK)
k = self.meta.pop('k')
n = self.meta.pop('n')
if k != code_k or n != code_n:
# XXX:
raise ValueError("unsupported coding scheme %d/%d" % (k, n))
logger.info(self.ctx("got metadata %s" % self.meta))
self.decoded = False
return self._getSomeBlocks(code_k)
def _orderNodes(self, meta):
def score(k, node):
logger.debug(self.ctx("scoring node %s" % node))
v = (k, self.config.reputations[node] \
if node in self.config.reputations \
else TrustDeltas.INITIAL_SCORE)
if node in self.config.throttled \
and self.config.throttled[node] > curtime:
v = (k, TrustDeltas.NEGATIVE_CAP)
return v
keys = meta.keys()
r = []
logger.info(self.ctx("_orderNodes"))
curtime = int(time.time())
for k in keys:
logger.debug(self.ctx("thinkin bout key %s" % str(k)))
node = meta[k]
if isinstance(node, list):
# score it according to the highest scored node
v = (k, TrustDeltas.NEGATIVE_CAP)
for n in node:
newv = score(k, n)
if newv[1] > v[1]:
v = newv
r.append(v)
else:
r.append(score(k, node))
logger.debug(self.ctx("done scoring, now sorting"))
r.sort(lambda x,y:cmp(x[1],y[1]))
return keys
def _getSomeBlocks(self, reqs=code_k):
tries = 0
if reqs > len(self.meta):
reqs = len(self.meta)
dlist = []
logger.debug(self.ctx("about to order nodes"))
keys = self._orderNodes(self.meta)
logger.debug(self.ctx("keys are now: %s" % str(keys)))
for i in range(reqs):
#c = random.choice(self.meta.keys())
choice = keys.pop(0)
logger.info(self.ctx("choice is %s" % str(choice)))
block = fencode(choice[1])
id = self.meta[choice]
if isinstance(id, list):
logger.info(self.ctx(
"multiple location choices, choosing one randomly."))
id = random.choice(id)
# If the chosen one fails, try each of the others until
# it works or exhausts the list (thus the keys.insert bit)
self.meta[choice].remove(id)
if len(self.meta[choice]) == 0:
#self.meta.pop(choice)
pass
else:
keys.insert(0,choice)
else:
#self.meta.pop(choice)
pass
logger.info(self.ctx("retrieving %s from %s" % (block, id)))
# look up nodes by id, then do a retrieve.
deferred = self.node.client.kFindNode(id)
deferred.addCallback(self._retrieveBlock, block, id)
deferred.addErrback(self._findNodeErr,
"couldn't find node %s for block %s" % (fencode(id), block),
id)
dlist.append(deferred)
self.meta.pop(choice)
tries = tries + 1
if tries >= reqs:
break;
dl = defer.DeferredList(dlist)
dl.addCallback(self._retrievedAll)
return dl
def _findNodeErr(self, failure, msg, id):
logger.info(self.ctx("%s: %s" % (message, failure.getErrorMessage())))
self.config.modifyReputation(id, TrustDeltas.FNDN_FAIL)
def _retrieveBlock(self, kdata, block, id):
#print type(kdata)
#print kdata
#if len(kdata['k']) > 1:
if kdata['k'][0][2] != id:
print "%s != %s" (kdata['k'][0], id)
raise ValueError("couldn't find node %s" % fencode(id))
#raise ValueError("this shouldn't really be a ValueError..."
# " should be a GotMoreKnodesThanIBargainedForError: k=%s"
# % kdata['k'])
#else:
# print kdata['k']
node = kdata['k'][0]
host = node[0]
port = node[1]
id = node[2]
nKu = FludRSA.importPublicKey(node[3])
if not self.decoded:
d = self.node.client.sendRetrieve(block, host, port, nKu, self.mkey)
d.addCallback(self._retrievedBlock, id, block, self.mkey)
d.addErrback(self._retrieveBlockErr, id,
"couldn't get block %s from %s" % (block, fencode(id)),
host, port, id)
return d
def _retrievedBlock(self, msg, nID, block, mkey):
logger.debug(self.ctx("retrieved block=%s, msg=%s" % (block, msg)))
self.config.modifyReputation(nID, TrustDeltas.GET_SUCCEED)
blockname = [f for f in msg if f[-len(block):] == block][0]
expectedmeta = "%s.%s.meta" % (block, mkey)
metanames = [f for f in msg if f[-len(expectedmeta):] == expectedmeta]
if not metanames:
raise failure.DefaultException("expected metadata was missing")
self.blocks[block] = blockname
self.fsmetas[block] = metanames[0]
self.numBlocksRetrieved += 1
return True
def _retrieveBlockErr(self, failure, nID, message, host, port, id):
logger.info(self.ctx("%s: %s" % (message, failure.getErrorMessage())))
self.config.modifyReputation(nID, TrustDeltas.GET_FAIL)
# don't propogate the error -- one block doesn't cause the file
# retrieve to fail.
def _retrievedAll(self, success):
logger.info(self.ctx("tried retreiving %d blocks %s"
% (len(success), success)))
if self.numBlocksRetrieved >= code_k:
# XXX: need to make this try again with other blocks if decode
# fails
return self._decodeData()
elif len(self.meta) > 0:
tries = 3 # XXX: magic number. Should derive from k & m
logger.info(self.ctx("requesting %d more blocks" % tries))
return self._getSomeBlocks(tries)
else:
logger.info(self.ctx("couldn't decode file after retreiving"
" all %d available blocks" % self.numBlocksRetrieved))
#return False
raise RuntimeError("couldn't decode file after retreiving all"
" %d available blocks" % self.numBlocksRetrieved)
def _decodeData(self):
logger.debug(self.ctx("_decodeData"))
self.fname = os.path.join(self.parentcodedir,fencode(self.sK))+".rec1"
d = threads.deferToThread(self.decodeData, self.fname,
self.blocks.values(), self.config.clientdir)
d.addCallback(self._decodeFsMetadata, self.blocks)
d.addErrback(self._decodeError)
return d
def _decodeFsMetadata(self, decoded, blockname):
logger.debug(self.ctx("_decodeFsMetadata"))
self.mfname = os.path.join(self.parentcodedir,fencode(self.sK))+".m"
d = threads.deferToThread(self.decodeData, self.mfname,
self.fsmetas.values(), self.config.clientdir)
d.addCallback(self._decodeDone, decoded)
d.addErrback(self._decodeError)
return d
def decodeData(self, outfname, datafnames, datadir=None):
logger.info(self.ctx("decoding %s to %s" % (datafnames, outfname)))
outf = open(outfname, 'wb')
data = []
for fname in datafnames:
if datadir:
fname = os.path.join(datadir, fname)
data.append(open(fname, 'rb'))
result = fludfilefec.decode_from_files(outf, data)
outf.close()
for f in data:
f.close()
if result:
for fname in datafnames:
if datadir:
fname = os.path.join(datadir, fname)
os.remove(os.path.join(datadir, fname))
return result
def _decodeError(self, err):
logger.warn(self.ctx("could not decode: %s", str(err)))
logger.debug(self.ctx("%s", (err.getTraceback(),)))
return err
def _decodeDone(self, decoded, metadecoded):
if not self.decoded and decoded and metadecoded:
logger.info(self.ctx("successfully decoded (retrieved %d blocks --"
" all but %d blocks tried)" % (self.numBlocksRetrieved,
len(self.meta))))
return self._decryptMeta()
else:
logger.info(self.ctx("decoded=%s, mdecoded=%s" % (decoded,
metadecoded)))
raise RuntimeError("decode error after retreiving "
" %d blocks: decoded=%s, mdecoded=%"
% (self.numBlocksRetrieved, decoded, metadecoded))
def _decryptMeta(self):
# XXX: decrypt the metadatafile with Kr to get all the nmeta stuff (eeK
# etc.)
#mfile = open(os.path.join(self.parentcodedir, fencode(self.sK)+".m"))
mfile = open(self.mfname, 'r')
meta = mfile.read()
mfile.close()
logger.info(self.ctx("meta is %s" % str(meta)))
self.nmeta = fdecode(meta)
os.remove(self.mfname)
return self._decryptFile()
def _decryptFile(self):
# 3: Retrieve eK from sK by eK=Kr(eeK). Use eK to decrypt file. Strip
# off leading pad.
skey = fencode(self.sK)
f1 = open(os.path.join(self.parentcodedir,skey+".rec1"), "r")
f2 = open(os.path.join(self.parentcodedir,skey+".rec2"), "w")
#logger.info(self.ctx("decoding nmeta eeK for %s" % dir(self)))
eeK = fdecode(self.nmeta['eeK'])
# d_eK business is to ensure that eK is zero-padded to 32 bytes
d_eK = self.Kr.decrypt(eeK)
d_eK = '\x00'*(32%len(d_eK))+d_eK # XXX: magic 32, should be keyspace/8
eK = binascii.hexlify(d_eK)
eKey = AES.new(binascii.unhexlify(eK))
# XXX: bad blocking stuff, move into thread
while 1:
buf = f1.read(16)
if buf == "":
break;
f2.write(eKey.decrypt(buf))
f1.close()
f2.close()
os.remove(os.path.join(self.parentcodedir,skey+".rec1"))
f2 = open(os.path.join(self.parentcodedir,skey+".rec2"), "r")
f3 = open(os.path.join(self.parentcodedir,skey+".rec3"), "w")
padlen = f2.read(1)
#print "%s" % repr(padlen)
padlen = ord(padlen)
padlen -= 1
#print "throwing away %d pad bytes" % padlen
pad = f2.read(padlen) # throw away pad.
while 1:
buf = f2.read(16)
if buf == "":
break;
f3.write(buf)
f2.close()
f3.close()
os.remove(os.path.join(self.parentcodedir,skey+".rec2"))
# 4: Move file to its correct path, imbue it with properties from
# metadata.
# XXX: should we make sure we can read metadata before downloading all
# the file data?
#print "decoding nmeta meta"
efmeta = fdecode(self.nmeta['meta'])
cryptSize = (self.Kr.size()+1) / 8
fmeta = ""
for i in range(0, len(efmeta), cryptSize):
fmeta += self.Kr.decrypt(efmeta[i:i+cryptSize])
fmeta = fdecode(fmeta)
result = [fmeta['path']]
if os.path.exists(fmeta['path']):
# file is already there -- compare it. If different, save as
# path.recovered and keep a list of these (to let the user know
# that they'll need to resolve later). Or don't keep a list and
# just 'do the right thing' (use the latest version by timestamp,
# or always use the backup, or always use the local copy, or
# define some other behavior for doing the right thing).
logger.info(self.ctx("hash rec=%s" % hashfile(fmeta['path'])))
logger.info(self.ctx("hash org=%s" % eK))
if hashfile(fmeta['path']) != eK:
# XXX: do something better than log it -- see above comment
logger.info(self.ctx(
'different version of file %s already present'
% fmeta['path']))
# XXX: should generate '.recovered' extension more carefully,
# so as not to overwrite coincidentally named files.
fmeta['path'] = fmeta['path']+".recovered"
result.insert(0,fmeta['path'])
os.rename(os.path.join(self.parentcodedir,skey+".rec3"),
fmeta['path'])
else:
logger.info(self.ctx('same version of file %s already present'
% fmeta['path']))
# no need to copy:
os.remove(os.path.join(self.parentcodedir,skey+".rec3"))
else:
# recover parent directories if not present
paths = pathsplit(fmeta['path'])
for i in paths:
if not os.path.exists(i) and i != fmeta['path']:
os.mkdir(i) # best effort dir creation, even if missing
# directory metadata
# XXX: should be using an accessor method on config for
# master
if i in self.config.master:
dirmeta = self.config.getFromMasterMeta(i)
os.chmod(i,dirmeta['mode'])
os.chown(i,dirmeta['uid'],dirmeta['gid']) # XXX: windows
# XXX: atim, mtim, ctim
# XXX: should try to make sure we can write to dir, change
# perms if necessary.
# recover file by renaming to its path
os.rename(os.path.join(self.parentcodedir,skey+".rec3"),
fmeta['path'])
# XXX: chown not supported on Windows
os.chown(fmeta['path'], fmeta['uid'], fmeta['gid'])
os.utime(fmeta['path'], (fmeta['atim'], fmeta['mtim']))
os.chmod(fmeta['path'], fmeta['mode'])
logger.info(self.ctx("successfully restored file metadata"))
return tuple(result)
class RetrieveFilename:
"""
Retrieves a File given its local name. Only works if the local master
index contains an entry for this filename.
"""
def __init__(self, node, filename):
self.node = node
self.filename = filename
self.metadir = self.node.config.metadir
self.config = self.node.config
self.deferred = self._recoverFile()
def _recoverFile(self):
fmeta = self.config.getFromMasterMeta(self.filename)
if fmeta:
if isinstance(fmeta, dict):
logger.debug("%s is a directory in master metadata",
self.filename)
# RetrieveFile will restore parent dirs, so we don't need to
dlist = []
dirname = self.filename+os.path.sep
# XXX: this should be calling a config.getAllFromMasterMeta()
for i in [x for x in self.config.master.keys()
if dirname == x[:len(dirname)]]:
filekey = self.config.getFromMasterMeta[i]
metakey = crc32(i)
logger.debug("calling RetrieveFile %s" % filekey)
d = RetrieveFile(self.node, fencode(filekey),
metakey).deferred
dlist.append(d)
dl = defer.DeferredList(dlist)
return dl
else:
logger.debug("%s is file in master metadata", self.filename)
(filekey, backuptime) = self.config.getFromMasterMeta(
self.filename)
metakey = crc32(self.filename)
if filekey != None and filekey != "":
logger.debug("calling RetrieveFile %s" % filekey)
d = RetrieveFile(self.node, fencode(filekey),
metakey).deferred
return d
return defer.fail(LookupError("bad filekey %s for %s"
% (filekey, self.filename)))
return defer.fail(LookupError("no record of %s" % self.filename))
class VerifyFile:
# XXX: remove me? I don't do anything that StoreFile can't do, plus if
# I fail you'd still need to call StoreFile right after...
# Or, do we keep me around and rip out all the verify stuff from
# StoreFile and put it in here?
# -- What we really want to do is look at trading partners for all our
# files, and then check each one every day with some random bit of
# data from some random file. But we also want eventual coverage of
# all stored files. It seems like a good first approach would be to
# scan over our local copies of all DHT records and make a list of
# storing nodes, which will be of size N. Then, do k VERIFY ops to
# each node in N, using a random subset of the files we have stored.
# The VerifyFile object's purpose may be orthogonal to that, or
# completely unnecessary, as the described scheme can be accomplished
# with plain VERIFY ops.
def verifyFile(self, filepath):
"""
Chooses some random blocks from filepath to verify against the store.
The algorithm is as follows: sK = H(H(file at filepath)). Look up sK
in the local master index. If the record isn't there, return this
fact. If the record is there, retrieve its metadata. Verify k
blocks as follows:
With probibility n/(m+n), code the file locally (to verify coded
blocks with a fair probabiliy, i.e., if m=40 and n=20, 33% of the
time we will do the coding).
Choose k blocks from the resulting blocks and using the file
metadata record, do a VERIFY operation using a random offset and random
length (if we chose not to do the coding in the previous step, the k
blocks must come entirely from the non-coded portion). As we wait
for the VERIFYs to return, hash these blocks locally. As each VERIFY
returns, compare it with our local hash just computed. Return a list
of hosts/nodeids for which the VERIFY failed.
"""
pass #remove me
class RetrieveMasterIndex:
def __init__(self, node):
self.node = node
nodeID = long(self.node.config.nodeID, 16)
# 1. CAS = kfindval(nodeID) (CAS for last FLUDHOME/meta/master)
logger.info("looking for key %x" % nodeID)
self.deferred = self.node.client.kFindValue(nodeID)
self.deferred.addCallback(self._foundCAS)
self.deferred.addErrback(self._retrieveMasterIndexErr,
"couldn't find master metadata")
def _foundCAS(self, CAS):
# 2. oldmaster = kfindval(CAS)
if isinstance(CAS, dict):
return defer.fail(ValueError("couldn't find CAS key"))
CAS = fdecode(CAS)
d = RetrieveFile(self.node, CAS).deferred
d.addCallback(self._foundMaster)
d.addErrback(self._retrieveMasterIndexErr, "couldn't find Master Index")
return d
def _foundMaster(self, result):
if len(result) == 2:
# got two filenames back, must mean we should choose one: the
# one from the distributed store
os.rename(result[0], result[1])
result = (result[1],)
return result
def _retrieveMasterIndexErr(self, err, msg):
logger.warn(msg)
return err
class UpdateMasterIndex:
def __init__(self, node):
self.node = node
self.metamaster = os.path.join(self.node.config.metadir,
self.node.config.metamaster)
# 0.1. oldmaster = RetrieveMasterIndex()
self.deferred = RetrieveMasterIndex(node).deferred
self.deferred.addCallback(self._removeOldMasterIndex)
self.deferred.addErrback(self._storeMasterIndex)
def _removeOldMasterIndex(self, res):
# 0.2. for i in oldmaster: delete(i)
print "removing old master not yet implemented"
return self._storeMasterIndex(res)
def _storeMasterIndex(self, res_or_err):
# 1. store FLUDHOME/meta/master
print "going to store %s" % self.metamaster
d = StoreFile(self.node, self.metamaster).deferred
d.addCallback(self._updateCAS)
d.addErrback(self._updateMasterIndexErr, "couldn't store master index")
return d
def _updateCAS(self, stored):
# 2. kstore(nodeID, CAS(FLUDHOME/meta/master))
#print "stored = %s" % str(stored)
key, meta = stored
logger.info("storing %s at %x" % (key,
long(self.node.config.nodeID,16)))
d = self.node.client.kStore(long(self.node.config.nodeID,16),
key) # XXX: key should be fdecode()ed
return d
def _updateMasterIndexErr(self, err, msg):
logger.warn(msg)
return err
if __name__ == "__main__":
from FludNode import FludNode
def successTest(res, fname, whatfor, nextStage=None):
logger.info("finished %s" % whatfor)
def errTest(failure):
logger.info("boom: %s" % failure.getErrorMessage())
raise failure
def fileKey(fname):
EK = hashfile(fname)
return fencode(long(hashstring(EK), 16))
def clearMeta(fname):
# delete any metadata that might exist for this file.
try:
SK = fileKey(fname)
os.remove(os.path.join(n.config.kstoredir,SK))
logger.info("test removed %s" % os.path.join(n.config.kstoredir,SK))
except:
pass
def doStore(fname, msg=None, nextStage=successTest):
if msg != None:
logger.info("finished %s ------" % msg)
logger.info("---------------------------------------------------\n")
# do a store
logger.info("nextStage is %s" % nextStage)
d = StoreFile(n,fname).deferred
d.addCallback(nextStage, fname, 'store op', doCorruptSegAndStore)
d.addErrback(errTest)
def doDelSegAndStore((key, meta), fname, msg=None, nextStage=successTest):
# only works if stores are local
if msg != None:
logger.info("finished %s ------" % msg)
logger.info("---------------------------------------------------\n")
# delete a block and do a store
c = random.choice(meta.keys())
logger.info("removing %s" % fencode(c))
os.remove(os.path.join(n.config.storedir,fencode(c)))
logger.info("test removed %s" % os.path.join(n.config.storedir,
fencode(c)))
d = StoreFile(n,fname).deferred
d.addCallback(nextStage, fname, 'lost block op')
d.addErrback(errTest)
def doCorruptSegAndStore((key, meta), fname, msg=None,
nextStage=successTest):
# only works if stores are local
if msg != None:
logger.info("finished %s ------" % msg)
logger.info("---------------------------------------------------\n")
# corrupt a block and do a store
c = random.choice(meta.keys())
logger.info("corrupting %s" % fencode(c))
f = open(os.path.join(n.config.storedir,fencode(c)), 'r')
data = f.read()
f.close()
f = open(os.path.join(n.config.storedir,fencode(c)), 'w')
f.write('blah'+data)
f.close()
d = StoreFile(n,fname).deferred
d.addCallback(nextStage, fname, 'corrupted block op')
d.addErrback(errTest)
def doRetrieve(key, msg=None, nextStage=successTest):
if msg != None:
logger.info("finished %s ------" % msg)
logger.info("---------------------------------------------------\n")
d = RetrieveFile(n, key).deferred
d.addCallback(nextStage, key, 'retrieve op')
d.addErrback(errTest)
def doRetrieveName(filename, msg=None, nextStage=successTest):
if msg != None:
logger.info("finished %s ------" % msg)
logger.info("---------------------------------------------------\n")
d = RetrieveFilename(n, filename).deferred
d.addCallback(nextStage, filename, 'retrieve filename op')
d.addErrback(errTest)
def runTests(dummy):
# test against self -- all stores and queries go to self.
fname = "/tmp/nrpy.pdf"
#clearMeta(fname)
#doStore(fname, None, doDelSegAndStore) # do all stages of testing
doStore(fname) # only do one op (for manual testing)
#doRetrieve(fileKey(fname))
#doRetrieveName(fname)
n = FludNode()
n.run()
if len(sys.argv) == 3:
deferred = n.client.sendkFindNode(sys.argv[1], int(sys.argv[2]), 1)
deferred.addCallback(runTests)
deferred.addErrback(errTest)
else:
runTests(None)
n.join()
| Python |
"""
Okay, so this isn't a real python module, yet. Wanted to get down a few ideas
on versioning. First, the background.
Traditional backup systems that provide versioning support allow the user to
retrieve the current version of a file, or any of N previous versions that were
stored during previous backup operations. Since it was rather trivial to
simply keep old versions on the central backup server, this wasn't much of an
engineering problem (at worst, disk fills up quickly).
With a collaborative backup system, such a scheme is less practical. If fully
enabled, it can consume many times the storage space of a simple
single-snapshot system. If you want to enforce fairness, you must require
that the number of resources you consume are proportional to those that you
provide. Encoding already dictates that this ratio is imbalanced towards
providing more resources than consuming. But "server-side" versioning, even
when using a clever delta-compression technique, really tips the scales.
There is good news, however. We can use a single-snapshot system to provide
versioning by requiring all versioning to occur locally. That is, the
consumer's own hard drive can be used to maintain multiple versions of files,
and then the whole thing can be backed up as a single-snapshot to the flud
network. Think of a local CVS repository (with many versions contained
therein) that is set to be backed up; the backup system doesn't have to worry
about versioning -- it just backs up the current data. The local CVS repo
is in charge of worrying about versions. To the user, its all the same.
The advantages of this scheme are mainly:
1) simplicity
2) storage consumption minimization, opt-in
3) decoupling of versioning layer from backup layer
Of the three, #1 is really the most appealing. We just back up the current
view. This also greatly simplifies the verification mechanism -- the verifier
will always have the complete file from which to do challenge/response queries
to the verifiee. We don't have to worry about keeping deltas or partial
checksums or anything like that in order to do verification; we just pick a
block of bytes at random from within the file, and make sure that the storer
can return us the hash of those bytes. #1 also means that we don't have to do
anything complicated to figure out what the delta of a delta-compressed version
should be (i.e., we don't need to request the old version from the storage
system, compare it with our version, then send out a delta) -- in fact, with
this scheme we wipe out delta compression altogether, at least from the
viewpoint of the storage mechanism (some other local mechanism is welcome to
use delta compression to store versions locally, but this mechanism won't need
to download lots of data in order to do so, because it will all be local).
#2 is nice. It means that if the user isn't interested in versioning, they
don't have to do it. This will be the default, in fact. This means that we
eliminate a lot of overhead that we would have had if every user was storing
versions, even if they didn't need them. It also means that there is an
automatic cost for enabling versions, not only for the collaborative system,
but for the user's local storage resources. Not to imply that we want to
punish the user for enabling versions, but there's no free lunch (and besides,
adding local disk is cheap).
[as an aside, here, versioning does become necessary quite quickly for things
such as email clients that store all mail in a particular folder as one
large file, or other applications that use databases in single files -- we
don't want the user to have to send the whole file (which can become quite
large) every time they get a new email or change the db slightly. The good
news is that we can still provide this in its own layer].
Decoupling (#3) is always a good idea, especially when it can be done cleanly.
The user (or flud client developer) is free to implement whatever local
versioning scheme they want. They can make copies of files and store them in
other directories, they could use a version control system such as CVS, they
could do their own delta compression and store the deltas in a special
directory. They could store as many or as few versions as they want. They can
take version snapshots often or seldom, and this is independent of how often
they perform backup. And such schemes can be switched out, upgraded, or
removed on the fly without anyone really noticing.
So, what is this module all about then? Well, for now, nothing but this
documentation. If the user wants versioning, they'll have to do it themselves,
(keeping in mind that the system *will* store those local versions, and they
can rest easy knowing that they can be retrieved). At some future point, we
will implement some fancy versioning layer with local delta compression in this
module to 'complete the package,' but it is currently a low priority endeavor.
Its priority will rise as we get close to providing a 'for your grandma'
solution.
"""
| Python |
#!/usr/bin/python
import time, os, stat, random, sys, logging, socket
from twisted.python import failure
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from flud.FludNode import FludNode
from flud.protocol.FludClient import FludClient
from flud.protocol.FludCommUtil import *
from flud.fencode import fencode, fdecode
"""
Test code for primitive DHT operations.
"""
stay_alive = 1
filename = "/tmp/tempstoredata"
filekey = os.path.basename(filename)
key = 87328673569979667228965797330646992089697345905484734072690869757741450870337L
# format of block metadata is
# {(i, datakey): storingNodeID, ..., 'n': n, 'm': m}
# where i<n+m, datakey is a sha256 of the data stored, and storingNodeID is
# either a nodeID or a list of nodeIDs.
testval = {(0, 802484L): 465705L, (1, 780638L): 465705L, (2, 169688L): 465705L,
(3, 267175L): 465705L, (4, 648636L): 465705L, (5, 838315L): 465705L,
(6, 477619L): 465705L, (7, 329906L): 465705L, (8, 610565L): 465705L,
(9, 217811L): 465705L, (10, 374124L): 465705L, (11, 357214L): 465705L,
(12, 147307L): 465705L, (13, 427751L): 465705L, (14, 927853L): 465705L,
(15, 760369L): 465705L, (16, 707029L): 465705L, (17, 479234L): 465705L,
(18, 190455L): 465705L, (19, 647489L): 465705L, (20, 620470L): 465705L,
(21, 777532L): 465705L, (22, 622383L): 465705L, (23, 573283L): 465705L,
(24, 613082L): 465705L, (25, 433593L): 465705L, (26, 584543L): 465705L,
(27, 337485L): 465705L, (28, 911014L): 465705L, (29, 594065L): 465705L,
(30, 375876L): 465705L, (31, 726818L): 465705L, (32, 835759L): 465705L,
(33, 814060L): 465705L, (34, 237176L): 465705L, (35, 538268L): 465705L,
(36, 272650L): 465705L, (37, 314058L): 465705L, (38, 257714L): 465705L,
(39, 439931L): 465705L, 'k': 20, 'n': 20}
logger = logging.getLogger('test')
screenhandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s:'
' %(message)s', datefmt='%H:%M:%S')
screenhandler.setFormatter(formatter)
logger.addHandler(screenhandler)
#logger.setLevel(logging.DEBUG)
logger.setLevel(logging.INFO)
def cleanup(_, node):
logger.info("waiting %ds to shutdown..." % stay_alive)
reactor.callLater(stay_alive, node.stop)
def testerror(failure, message, node):
"""
error handler for test errbacks
"""
logger.warn("testerror message: %s" % message)
logger.warn("testerror: '%s'" % str(failure))
logger.warn("At least 1 test FAILED")
def endtests(res, nKu, node, host, port):
""" executes after all tests """
try:
res = fdecode(res)
except ValueError:
pass
if res != testval:
return testerror(None, "retrieved value does not match stored value:"
" '%s' != '%s'" % (res, testval), node)
logger.info("testkFindVal PASSED")
logger.debug("testkFindVal result: %s" % str(res))
logger.info("all tests PASSED")
return res
def testkFindVal(res, nKu, node, host, port):
logger.info("testSendkFindVal PASSED")
logger.debug("testSendkFindVal result: %s" % str(res))
logger.info("attempting testkFindValue")
deferred = node.client.kFindValue(key)
deferred.addCallback(endtests, nKu, node, host, port)
deferred.addErrback(testerror, "failed at testkFindValue", node)
return deferred
def testSendkFindVal(res, nKu, node, host, port):
logger.info("testkStore PASSED")
logger.debug("testkStore result: %s" % str(res))
logger.info("attempting testSendkFindValue")
deferred = node.client.sendkFindValue(host, port, key)
deferred.addCallback(testkFindVal, nKu, node, host, port)
deferred.addErrback(testerror, "failed at testSendkFindValue", node)
return deferred
def testkStore(res, nKu, node, host, port):
logger.info("testSendkStore PASSED")
logger.debug("testSendkStore result: %s" % str(res))
logger.info("attempting testkStore")
deferred = node.client.kStore(key, testval)
deferred.addCallback(testSendkFindVal, nKu, node, host, port)
deferred.addErrback(testerror, "failed at testkStore", node)
return deferred
def testSendkStore(res, nKu, node, host, port):
logger.info("testkFindNode PASSED")
logger.debug("testkFindNode result: %s" % str(res))
logger.info("attempting testSendkStore")
deferred = node.client.sendkStore(host, port, key, testval)
deferred.addCallback(testkStore, nKu, node, host, port)
deferred.addErrback(testerror, "failed at testkStore", node)
return deferred
def testkFindNode(res, nKu, node, host, port):
""" executes after testSendkFindNode """
logger.info("testSendkFindNode PASSED")
logger.debug("testSendkFindNode result: %s" % str(res))
logger.info("attempting kFindNode")
deferred = node.client.kFindNode(key)
deferred.addCallback(testSendkStore, nKu, node, host, port)
deferred.addErrback(testerror, "failed at kFindNode", node)
return deferred
def testSendkFindNode(nKu, node, host, port):
""" executes after testGetID """
logger.info("testkGetID PASSED")
logger.info("attempting sendkFindNode")
deferred = node.client.sendkFindNode(host, port, key)
deferred.addCallback(testkFindNode, nKu, node, host, port)
deferred.addErrback(testerror, "failed at sendkFindNode", node)
return deferred
def testGetID(node, host, port):
""" Tests sendGetID(), and invokes testSendkFindNode on success """
deferred = node.client.sendGetID(host, port)
deferred.addCallback(testSendkFindNode, node, host, port)
deferred.addErrback(testerror, "failed at testGetID", node)
return deferred
def runTests(host, port=None, listenport=None):
host = getCanonicalIP(host)
node = FludNode(port=listenport)
if port == None:
port = node.config.port
logger.info("testing against %s:%s, localport=%s" % (host,
port, listenport))
node.run()
d = testGetID(node, host, port)
d.addBoth(cleanup, node)
#testkFindVal("blah", node.config.Ku, node, host, port)
node.join()
"""
Main currently invokes test code
"""
if __name__ == '__main__':
localhost = socket.getfqdn()
if len(sys.argv) == 1:
runTests(localhost) # test by talking to self
elif len(sys.argv) == 2:
runTests(localhost, eval(sys.argv[1]))
elif len(sys.argv) == 3:
runTests(sys.argv[1], eval(sys.argv[2]))
elif len(sys.argv) == 4:
runTests(sys.argv[1], eval(sys.argv[2]), eval(sys.argv[3]))
| Python |
#!/usr/bin/python
import time, os, stat, random, sys, logging, socket, shutil, tempfile
from binascii import crc32
from StringIO import StringIO
from twisted.python import failure
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from flud.FludNode import FludNode
from flud.protocol.FludClient import FludClient
from flud.FludCrypto import generateRandom, hashfile
from flud.protocol.FludCommUtil import *
from flud.fencode import fencode
"""
Test code for primitive operations. These ops include all of the descendents
of ROOT and REQUEST in FludProtocol.
"""
smallfilekey = ""
smallfilename = ""
smallfilenamebad = ""
largefilekey = ""
largefilename = ""
largefilenamebad = ""
metadata = 'aaaa'
def testerror(failure, message, node):
"""
error handler for test errbacks
"""
print "testerror message: %s" % message
print "testerror: %s" % str(failure)
print "At least 1 test FAILED"
raise failure
def testUnexpectedSuccess(res, message, node):
print "unexpected success message: %s" % message
print "At least 1 test succeeded when it should have failed"
raise "bad"
def testDELETEBadKeyFailed(failure, msg, node, nKu, host, port):
if failure.check('flud.protocol.FludCommUtil.NotFoundException'):
print "%s" % msg
# the end
else:
print "\nDELETEBadKey expected NotFoundException," \
" but got a different failure:"
raise failure
def testDELETEBadKey(nKu, node, host, port):
print "starting testDELETEBadKey"
path = os.path.join("somedir", largefilekey)
deferred = node.client.sendDelete(path, crc32(path), host, port, nKu)
deferred.addCallback(testUnexpectedSuccess, "DELETE with bad key succeeded",
node)
deferred.addErrback(testDELETEBadKeyFailed,
"DELETE with bad key failed as expected", node, nKu, host, port)
return deferred
def testVERIFYBadKeyFailed(failure, msg, node, nKu, host, port):
if failure.check('flud.protocol.FludCommUtil.NotFoundException'):
print "%s" % msg
return testDELETEBadKey(nKu, node, host, port)
else:
print "\nVERIFYBadKey expected NotFoundException," \
" but got a different failure:"
raise failure
def testVERIFYBadKey(nKu, node, host, port):
print "starting testVERIFYBadKey"
fsize = os.stat(smallfilename)[stat.ST_SIZE]
offset = fsize-20
deferred = node.client.sendVerify(smallfilenamebad, offset, 5, host,
port, nKu)
deferred.addCallback(testUnexpectedSuccess,
"verified file with bad key succeeded", node)
deferred.addErrback(testVERIFYBadKeyFailed,
"VERIFY of bad filekey failed as expected", node, nKu, host, port)
return deferred
def testVERIFYBadLengthFailed(failure, msg, node, nKu, host, port):
if failure.check('flud.protocol.FludCommUtil.BadRequestException'):
print "%s" % msg
return testVERIFYBadKey(nKu, node, host, port)
else:
print "\nVERIFYBadLength expected BadRequestException," \
" but got a different failure:"
raise failure
def testVERIFYBadLength(nKu, node, host, port):
print "starting testVERIFYBadOffset"
fsize = os.stat(smallfilename)[stat.ST_SIZE]
offset = fsize-10
deferred = node.client.sendVerify(smallfilekey, offset, 20, host, port, nKu)
deferred.addCallback(testUnexpectedSuccess, "verified file with bad length",
node)
deferred.addErrback(testVERIFYBadLengthFailed,
"VERIFY of bad length failed as expected", node, nKu, host, port)
return deferred
def testVERIFYBadOffsetFailed(failure, msg, node, nKu, host, port):
if failure.check('flud.protocol.FludCommUtil.BadRequestException'):
print "%s" % msg
return testVERIFYBadLength(nKu, node, host, port)
else:
print "\nVERIFYBadOffset expected BadRequestException," \
" but got a different failure:"
raise failure
def testVERIFYBadOffset(nKu, node, host, port):
print "starting testVERIFYBadOffset"
fsize = os.stat(smallfilename)[stat.ST_SIZE]
offset = fsize+2
deferred = node.client.sendVerify(smallfilekey, offset, 20, host, port, nKu)
deferred.addCallback(testUnexpectedSuccess, "verified file with bad offset",
node)
deferred.addErrback(testVERIFYBadOffsetFailed,
"VERIFY of bad offset failed as expected", node, nKu, host, port)
return deferred
def testVERIFYNotFoundFailed(failure, msg, node, nKu, host, port):
if failure.check('flud.protocol.FludCommUtil.NotFoundException'):
print "%s" % msg
return testVERIFYBadOffset(nKu, node, host, port)
else:
print "\nVERIFYNotFound expected NotFoundException," \
" but got a different failure:"
raise failure
def testVERIFYNotFound(nKu, node, host, port):
print "starting testVERIFYNotFound"
deferred = node.client.sendVerify(largefilekey, 10, 10, host, port, nKu)
deferred.addCallback(testUnexpectedSuccess, "verified non-existent file",
node)
deferred.addErrback(testVERIFYNotFoundFailed,
"VERIFY of non-existent file failed as expected", node, nKu,
host, port)
return deferred
def testRETRIEVEIllegalPathFailed(failure, msg, node, nKu, host, port):
if failure.check('flud.protocol.FludCommUtil.NotFoundException'):
print "%s" % msg
return testVERIFYNotFound(nKu, node, host, port)
else:
print "\nRETRIEVEIllegalPath expected NotFoundException," \
" but got a different failure:"
raise failure
def testRETRIEVEIllegalPath(nKu, node, host, port):
print "starting testRETRIEVEIllegalPath"
deferred = node.client.sendRetrieve(os.path.join("somedir",smallfilekey),
host, port, nKu)
deferred.addCallback(testUnexpectedSuccess,
"retrieved file with illegal path", node)
deferred.addErrback(testRETRIEVEIllegalPathFailed,
"RETRIEVE using illegal path failed as expected", node, nKu,
host, port)
return deferred
def testRETRIEVENotFoundFailed(failure, msg, node, nKu, host, port):
if failure.check('flud.protocol.FludCommUtil.NotFoundException'):
print "%s" % msg
return testRETRIEVEIllegalPath(nKu, node, host, port)
else:
print "\nRETRIEVENotFound expected NotFoundException," \
" but got a different failure:"
raise failure
def testRETRIEVENotFound(nKu, node, host, port):
print "starting testRETRIEVENotFound"
deferred = node.client.sendRetrieve(largefilekey, host, port, nKu)
deferred.addCallback(testUnexpectedSuccess,
"retrieved file that shouldn't exist", node)
deferred.addErrback(testRETRIEVENotFoundFailed,
"RETRIEVE of non-existent file failed as expected", node, nKu,
host, port)
return deferred
def testSTORELargeFailed(failure, msg, node, nKu, host, port):
if failure.check('flud.protocol.FludCommUtil.BadCASKeyException'):
print "%s" % msg
return testRETRIEVENotFound(nKu, node, host, port)
else:
print "\nSTORELarge expected BadCASKeyException," \
" but got a different failure:"
raise failure
def testSTOREBadKeyLarge(nKu, node, host, port):
print "starting testSTOREBadKeyLarge"
deferred = node.client.sendStore(largefilenamebad,
(crc32(largefilenamebad), StringIO(metadata)), host, port, nKu)
deferred.addCallback(testUnexpectedSuccess, "large file, bad key succeeded",
node)
deferred.addErrback(testSTORELargeFailed,
"large STORE with bad key failed as expected", node, nKu,
host, port)
return deferred
def testSTORESmallFailed(failure, msg, node, nKu, host, port):
if failure.check('flud.protocol.FludCommUtil.BadCASKeyException'):
print "%s" % msg
return testSTOREBadKeyLarge(nKu, node, host, port)
else:
print "\nSTORESmall expected BadCASKeyException," \
" but got a different failure:"
raise failure
def testSTOREBadKeySmall(nKu, node, host, port):
print "starting testSTOREBadKeySmall"
deferred = node.client.sendStore(smallfilenamebad,
(crc32(smallfilenamebad), StringIO(metadata)), host, port, nKu)
deferred.addCallback(testUnexpectedSuccess, "small file, bad key succeeded",
node)
deferred.addErrback(testSTORESmallFailed,
"small STORE with bad key failed as expected", node, nKu,
host, port)
return deferred
def testSTORESuccess(res, nKu, node, host, port):
print "testSTORE succeeded: %s" % res
return testSTOREBadKeySmall(nKu, node, host, port)
def testSTORE(nKu, node, host, port):
# store a file successfully for later failure tests (VERIFY, etc)
print "starting testSTORE"
deferred = node.client.sendStore(smallfilename,
(crc32(smallfilename), StringIO(metadata)), host, port, nKu)
deferred.addCallback(testSTORESuccess, nKu, node, host, port)
deferred.addErrback(testerror, "failed at testSTORE", node)
return deferred
# XXX: need to test bogus headers for all commands (BAD_REQUEST)
# XXX: need to test failures for authentication
def testID(node, host, port):
""" Tests sendGetID(), and invokes testSTORE on success """
print "starting testID"
deferred = node.client.sendGetID(host, port)
deferred.addCallback(testSTORE, node, host, port)
#deferred.addCallback(testSTOREBadKeySmall, node, host, port)
deferred.addErrback(testerror, "failed at testID", node)
return deferred
def cleanup(err, node):
if err:
print "cleaning up: %s" % err
else:
print "cleaning up"
os.remove(smallfilename)
os.remove(smallfilenamebad)
os.remove(largefilename)
os.remove(largefilenamebad)
reactor.callLater(1, node.stop)
def generateTestData():
def generateFiles(minsize):
fname = tempfile.mktemp()
f = open(fname, 'w')
f.write('\0'*minsize)
f.write(generateRandom(random.randrange(256)+1))
f.close()
filekey = hashfile(fname)
filekey = fencode(int(filekey, 16))
filename = os.path.join("/tmp",filekey)
os.rename(fname,filename)
filenamebad = os.path.join("/tmp/","bad"+filekey[3:])
shutil.copy(filename, filenamebad)
return (filekey, filename, filenamebad)
global smallfilekey
global smallfilename
global smallfilenamebad
(smallfilekey, smallfilename, smallfilenamebad) = generateFiles(1024)
global largefilekey
global largefilename
global largefilenamebad
(largefilekey, largefilename, largefilenamebad) = generateFiles(512000)
def runTests(host, port=None, listenport=None):
generateTestData()
node = FludNode(port=listenport)
if port == None:
port = node.config.port
node.run()
d = testID(node, host, port)
d.addBoth(cleanup, node)
node.join()
"""
Main currently invokes test code
"""
if __name__ == '__main__':
localhost = socket.getfqdn()
if len(sys.argv) == 1:
runTests(localhost) # test by talking to self
elif len(sys.argv) == 2:
runTests(localhost, eval(sys.argv[1])) # talk to self on port [1]
elif len(sys.argv) == 3:
runTests(sys.argv[1], eval(sys.argv[2])) # talk to [1] on port [2]
elif len(sys.argv) == 4:
# talk to [1] on port [2], listen on port [3]
runTests(sys.argv[1], eval(sys.argv[2]), eval(sys.argv[3]))
| Python |
#!/usr/bin/python
"""
FludFileOpTest.py, (c) 2003-2006 Alen Peacock. This program is distributed
under the terms of the GNU General Public License (the GPL), version 3.
System tests for FludFileOperations
"""
import sys, os, time, logging, tempfile, shutil
from twisted.internet import reactor
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from flud.FludConfig import FludConfig
from flud.FludNode import FludNode
from flud.fencode import fencode, fdecode
from flud.FludCrypto import generateRandom
from flud.FludFileOperations import *
import flud.FludDefer as FludDefer
from flud.protocol.LocalClient import listMeta
logger = logging.getLogger('flud')
def testError(failure, message, node):
print "testError message: %s" % message
print "testError: %s" % str(failure)
print "At least 1 test FAILED"
return failure
def verifySuccess(r, desc):
print "%s succeeded" % desc
def checkRetrieveFile(res, node, fname):
print "retrieve of %s succeeded" % fname
return res # <- *VITAL* for concurrent dup ops to succeed.
def testRetrieveFile(node, fname):
d = RetrieveFilename(node, fname).deferred
d.addCallback(checkRetrieveFile, node, fname)
d.addErrback(testError, fname, node)
return d
def retrieveSequential(r, node, filenamelist, desc):
def loop(r, node, filenamelist, desc):
if filenamelist:
fname = filenamelist.pop()
print "testing retrieve (%s) %s" % (desc, fname)
d = testRetrieveFile(node, fname)
d.addCallback(loop, node, filenamelist, desc)
d.addErrback(testError)
return d
else:
print "retrieve sequential (%s) done" % desc
print "test retrieveSequential %s" % desc
return loop(None, node, filenamelist, desc)
def storeSuccess(r, desc):
print "%s succeeded" % desc
def storeConcurrent(r, node, files, desc):
#print "r was %s" % r
print "test storeConcurrent %s" % desc
dlist = []
for file in files:
d = testStoreFile(node, file)
dlist.append(d)
dl = FludDefer.ErrDeferredList(dlist)
dl.addCallback(storeSuccess, desc)
dl.addErrback(testError)
return dl
def checkStoreFile(res, node, fname):
master = listMeta(node.config)
if fname not in master:
return defer.fail(failure.DefaultException("file not stored"))
else:
print "store of %s appeared successful" % fname
return res # <- *VITAL* for concurrent dup ops to succeed.
def testStoreFile(node, fname):
d = StoreFile(node, fname).deferred
d.addCallback(checkStoreFile, node, fname)
d.addErrback(testError, fname, node)
return d
def doTests(node, smallfnames, largefnames, dupsmall, duplarge):
d = testStoreFile(node, smallfnames[0])
d.addCallback(storeConcurrent, node, smallfnames, "small")
d.addCallback(storeConcurrent, node, largefnames, "large")
d.addCallback(storeConcurrent, node, dupsmall, "small duplicates")
d.addCallback(storeConcurrent, node, duplarge, "large duplicates")
#d = storeConcurrent(None, node, dupsmall, "small duplicates")
#d = storeConcurrent(None, node, duplarge, "large duplicates")
d.addCallback(retrieveSequential, node, smallfnames, "small")
d.addCallback(retrieveSequential, node, largefnames, "large")
d.addCallback(retrieveSequential, node, dupsmall, "small duplicates")
d.addCallback(retrieveSequential, node, duplarge, "large duplicates")
return d
def cleanup(_, node, filenamelist):
#print _
for f in filenamelist:
try:
print "deleting %s" % f
os.remove(f)
except:
print "couldn't remove %s" % f
reactor.callLater(1, node.stop)
def generateTestFile(minSize):
fname = tempfile.mktemp()
f = open(fname, 'w')
data = generateRandom(minSize/50)
for i in range(0, 51+random.randrange(50)):
f.write(data)
f.close()
filename = os.path.join("/tmp",fname)
os.rename(fname,filename)
return filename
def runTests(host, port, listenport=None):
f1 = generateTestFile(5120)
f2 = generateTestFile(5120)
f3 = f2+".dup"
shutil.copy(f2, f3)
f4 = generateTestFile(513000)
f5 = generateTestFile(513000)
f6 = f5+".dup"
shutil.copy(f5, f6)
node = FludNode(port=listenport)
if port == None:
port = node.config.port
node.run()
node.connectViaGateway(host, port)
d = doTests(node, [f1, f2], [f4, f5], [f2, f3], [f5, f6])
d.addBoth(cleanup, node, [f1, f2, f3, f4, f5, f6])
node.join()
if __name__ == '__main__':
localhost = socket.getfqdn()
if len(sys.argv) == 3:
runTests(sys.argv[1], eval(sys.argv[2])) # talk to [1] on port [2]
elif len(sys.argv) == 4:
# talk to [1] on port [2], listen on port [3]
runTests(sys.argv[1], eval(sys.argv[2]), eval(sys.argv[3]))
else:
print "must run this test against a flud network (no single node op)"
print "usage: %s [<othernodehost othernodeport> |"\
" <othernodehost othernodeport listenport>]" % sys.argv[0]
| Python |
#!/usr/bin/python
import time, os, stat, random, sys, logging, socket, tempfile
from twisted.python import failure
from StringIO import StringIO
from zlib import crc32
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from flud.FludNode import FludNode
from flud.protocol.FludClient import FludClient
import flud.FludCrypto as FludCrypto
from flud.fencode import fencode, fdecode
from flud.protocol.FludCommUtil import *
from flud.FludDefer import ErrDeferredList
"""
Test code for primitive operations. These ops include all of the descendents
of ROOT and REQUEST in FludProtocol.
"""
# metadatablock: (block#,n,k,blockdata)
metadatablock = fencode((1,20,40,'adfdsfdffffffddddddddddddddd'))
fake_mkey_offset = 111111
def testerror(failure, message, node):
"""
error handler for test errbacks
"""
print "testerror message: %s" % message
print "testerror: %s" % str(failure)
print "At least 1 test FAILED"
return failure
def allGood(_, nKu):
print "all tests PASSED"
return nKu
def checkDELETE(res, nKu, fname, fkey, mkey, node, host, port, totalDelete):
""" checks to ensure the file was deleted """
# totalDelete = True if this delete op should remove all meta (and data)
if totalDelete:
# try to retrieve with any metakey, should fail
print "expecting failed retrieve, any metakey"
return testRETRIEVE(res, nKu, fname, fkey, True, node, host, port,
lambda args=(res, nKu): allGood(*args), False)
else:
# try to retrieve with any metakey, should succeed
print "expecting successful retrieve, any metakey"
return testRETRIEVE(res, nKu, fname, fkey, True, node, host, port,
lambda args=(res, nKu, fname, fkey, mkey+fake_mkey_offset,
node, host, port, True): testDELETE(*args))
def testDELETE(res, nKu, fname, fkey, mkey, node, host, port, totalDelete):
""" Tests sendDelete, and invokes checkDELETE on success """
print "starting testDELETE %s.%s" % (fname, mkey)
#return checkDELETE(None, nKu, fname, fkey, mkey, node, host, port, False)
deferred = node.client.sendDelete(fkey, mkey, host, port, nKu)
deferred.addCallback(checkDELETE, nKu, fname, fkey, mkey, node, host, port,
totalDelete)
deferred.addErrback(testerror, "failed at testDELETE", node)
return deferred
def checkVERIFY(res, nKu, fname, fkey, mkey, node, host, port, hash, newmeta):
""" executes after testVERIFY """
if long(hash, 16) != long(res, 16):
raise failure.DefaultException("verify didn't match: %s != %s"
% (hash, res))
print "checkVERIFY (%s) %s success" % (newmeta, fname)
if newmeta:
return testDELETE(res, nKu, fname, fkey, mkey, node, host, port, False)
else:
return testVERIFY(nKu, fname, fkey, mkey, node, host, port, True)
def testVERIFY(nKu, fname, fkey, mkey, node, host, port, newmeta):
""" Test sendVerify """
# newmeta, if True, will generate new metadata to be stored during verify
if newmeta:
thismkey = mkey+fake_mkey_offset
else:
thismkey = mkey
print "starting testVERIFY (%s) %s.%s" % (newmeta, fname, thismkey)
fd = os.open(fname, os.O_RDONLY)
fsize = os.fstat(fd)[stat.ST_SIZE]
length = 20
offset = random.randrange(fsize-length)
os.lseek(fd, offset, 0)
data = os.read(fd, length)
os.close(fd)
hash = FludCrypto.hashstring(data)
deferred = node.client.sendVerify(fkey, offset, length, host, port, nKu,
(thismkey, StringIO(metadatablock)))
deferred.addCallback(checkVERIFY, nKu, fname, fkey, mkey, node, host,
port, hash, newmeta)
deferred.addErrback(testerror, "failed at testVERIFY (%s)" % newmeta, node)
return deferred
def failedRETRIEVE(res, nextCallable):
return nextCallable();
def checkRETRIEVE(res, nKu, fname, fkey, mkey, node, host, port, nextCallable):
""" Compares the file that was stored with the one that was retrieved """
f1 = open(fname)
filename = [f for f in res if f[-len(fkey):] == fkey][0]
f2 = open(filename)
if (f1.read() != f2.read()):
f1.close()
f2.close()
raise failure.DefaultException(
"upload/download (%s, %s) files don't match" % (fname,
os.path.join(node.config.clientdir, fkey)))
#print "%s (%d) and %s (%d) match" % (fname, os.stat(fname)[stat.ST_SIZE],
# filename, os.stat(filename)[stat.ST_SIZE])
f1.close()
f2.close()
if mkey != True:
expectedmeta = "%s.%s.meta" % (fkey, mkey)
metanames = [f for f in res if f[-len(expectedmeta):] == expectedmeta]
if not metanames:
raise failure.DefaultException("expected metadata was missing")
f3 = open(metanames[0])
md = f3.read()
if md != metadatablock:
raise failure.DefaultException("upload/download metadata doesn't"
" match (%s != %s)" % (md, metadatablock))
return nextCallable()
def testRETRIEVE(res, nKu, fname, fkey, mkey, node, host, port, nextCallable,
expectSuccess=True):
""" Tests sendRetrieve, and invokes checkRETRIEVE on success """
print "starting testRETRIEVE %s.%s" % (fname, mkey)
deferred = node.client.sendRetrieve(fkey, host, port, nKu, mkey)
deferred.addCallback(checkRETRIEVE, nKu, fname, fkey, mkey, node, host,
port, nextCallable)
if expectSuccess:
deferred.addErrback(testerror, "failed at testRETRIEVE", node)
else:
deferred.addErrback(failedRETRIEVE, nextCallable)
return deferred
def testSTORE2(nKu, fname, fkey, node, host, port):
mkey = crc32(fname)
mkey2 = mkey+(2*fake_mkey_offset)
print "starting testSTORE %s.%s" % (fname, mkey2)
deferred = node.client.sendStore(fname, (mkey2, StringIO(metadatablock)),
host, port, nKu)
deferred.addCallback(testRETRIEVE, nKu, fname, fkey, mkey2, node, host,
port, lambda args=(nKu, fname, fkey, mkey, node, host, port,
False): testVERIFY(*args))
deferred.addErrback(testerror, "failed at testSTORE", node)
return deferred
def testSTORE(nKu, fname, fkey, node, host, port):
""" Tests sendStore, and invokes testRETRIEVE on success """
mkey = crc32(fname)
print "starting testSTORE %s.%s" % (fname, mkey)
deferred = node.client.sendStore(fname, (mkey, StringIO(metadatablock)),
host, port, nKu)
deferred.addCallback(testRETRIEVE, nKu, fname, fkey, mkey, node, host, port,
lambda args=(nKu, fname, fkey, node, host, port): testSTORE2(*args))
deferred.addErrback(testerror, "failed at testSTORE", node)
return deferred
def testID(node, host, port):
""" Tests sendGetID(), and invokes testSTORE on success """
print "starting testID"
deferred = node.client.sendGetID(host, port)
deferred.addErrback(testerror, "failed at testID", node)
return deferred
def testAggSTORE(nKu, aggFiles, node, host, port):
print "starting testAggSTORE"
dlist = []
for fname, fkey in aggFiles:
mkey = crc32(fname)
print "testAggSTORE %s (%s)" % (fname, mkey)
deferred = node.client.sendStore(fname, (mkey, StringIO(metadatablock)),
host, port, nKu)
deferred.addCallback(testRETRIEVE, nKu, fname, fkey, mkey, node, host,
port, lambda args=(nKu, fname, fkey, mkey, node, host,
port, False): testVERIFY(*args))
deferred.addErrback(testerror, "failed at testAggSTORE", node)
dlist.append(deferred)
dl = ErrDeferredList(dlist)
dl.addCallback(allGood, nKu)
dl.addErrback(testerror, "failed at testAggSTORE", node)
return dl
def cleanup(_, node, filenamelist):
for f in filenamelist:
try:
os.remove(f)
except:
print "couldn't remove %s" % f
reactor.callLater(1, node.stop)
def generateTestData(minSize):
fname = tempfile.mktemp()
f = open(fname, 'w')
data = FludCrypto.generateRandom(minSize/50)
for i in range(0, 51+random.randrange(50)):
f.write(data)
f.close()
filekey = FludCrypto.hashfile(fname)
filekey = fencode(int(filekey, 16))
filename = os.path.join("/tmp",filekey)
os.rename(fname,filename)
return (filename, filekey)
def runTests(host, port=None, listenport=None):
(largeFilename, largeFilekey) = generateTestData(512000)
(smallFilename, smallFilekey) = generateTestData(5120)
aggFiles = []
for i in range(4):
aggFiles.append(generateTestData(4096))
node = FludNode(port=listenport)
if port == None:
port = node.config.port
node.run()
d = testID(node, host, port)
d.addCallback(testSTORE, largeFilename, largeFilekey, node, host, port)
d.addCallback(testSTORE, smallFilename, smallFilekey, node, host, port)
d.addCallback(testAggSTORE, aggFiles, node, host, port)
d.addBoth(cleanup, node, [i[0] for i in aggFiles] + [largeFilename,
smallFilename])
node.join()
def main():
localhost = socket.getfqdn()
if len(sys.argv) == 1:
runTests(localhost) # test by talking to self
elif len(sys.argv) == 2:
runTests(localhost, eval(sys.argv[1])) # talk to self on port [1]
elif len(sys.argv) == 3:
runTests(sys.argv[1], eval(sys.argv[2])) # talk to [1] on port [2]
elif len(sys.argv) == 4:
# talk to [1] on port [2], listen on port [3]
runTests(sys.argv[1], eval(sys.argv[2]), eval(sys.argv[3]))
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import time, os, stat, random, sys, logging, socket
from twisted.python import failure
from twisted.internet import defer
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from flud.FludNode import FludNode
from flud.protocol.FludClient import FludClient
from flud.protocol.FludCommUtil import *
from flud.fencode import *
from flud.FludDefer import ErrDeferredList
"""
Test code for kprimitive operations. These ops include all of the descendents
of ROOT and REQUEST in FludProtocol.
"""
# XXX: check return from ops to see if they passed (e.g., if STORE fails, we
# are notified [currently] by the html page that is returned).
# XXX: should make a random file each time this is run...
CONCURRENT=50
CONCREPORT=10
node = None
# format of block metadata is
# {(i, datakey): storingNodeID, ..., 'n': n, 'm': m}
# where i<n+m, datakey is a sha256 of the data stored, and storingNodeID is
# either a nodeID or a list of nodeIDs
testval = {(0, 802484L): 465705L, (1, 780638L): 465705L, (2, 169688L): 465705L,
(3, 267175L): 465705L, (4, 648636L): 465705L, (5, 838315L): 465705L,
(6, 477619L): 465705L, (7, 329906L): 465705L, (8, 610565L): 465705L,
(9, 217811L): 465705L, (10, 374124L): 465705L, (11, 357214L): 465705L,
(12, 147307L): 465705L, (13, 427751L): 465705L, (14, 927853L): 465705L,
(15, 760369L): 465705L, (16, 707029L): 465705L, (17, 479234L): 465705L,
(18, 190455L): 465705L, (19, 647489L): 465705L, (20, 620470L): 465705L,
(21, 777532L): 465705L, (22, 622383L): 465705L, (23, 573283L): 465705L,
(24, 613082L): 465705L, (25, 433593L): 465705L, (26, 584543L): 465705L,
(27, 337485L): 465705L, (28, 911014L): 465705L, (29, 594065L): 465705L,
(30, 375876L): 465705L, (31, 726818L): 465705L, (32, 835759L): 465705L,
(33, 814060L): 465705L, (34, 237176L): 465705L, (35, 538268L): 465705L,
(36, 272650L): 465705L, (37, 314058L): 465705L, (38, 257714L): 465705L,
(39, 439931L): 465705L, 'k': 20, 'n': 20}
logger = logging.getLogger('test')
screenhandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s:'
' %(message)s', datefmt='%H:%M:%S')
screenhandler.setFormatter(formatter)
logger.addHandler(screenhandler)
logger.setLevel(logging.DEBUG)
def suitesuccess(results):
logger.info("all tests in suite passed")
#print results
return results
def suiteerror(failure):
logger.info("suite did not complete")
logger.info("DEBUG: %s" % failure)
return failure
def stagesuccess(result, message):
logger.info("stage %s succeeded" % message)
return result
def stageerror(failure, message):
logger.info("stage %s failed" % message)
#logger.info("DEBUG: %s" % failure)
return failure
def itersuccess(res, i, message):
if i % CONCREPORT == 0:
logger.info("itersuccess: %s" % message)
return res
def itererror(failure, message):
logger.info("itererror message: %s" % message)
#logger.info("DEBUG: %s" % failure)
#logger.info("DEBUG: %s" % dir(failure)
failure.printTraceback()
return failure
def endtests(res, nKu, node, host, port):
""" executes after all tests """
try:
res = fdecode(res)
except ValueError:
pass
if res != testval:
return failure.DefaultException("retrieved value does not match"
" stored value: '%s' != '%s'" % (res, testval))
logger.log(logging.INFO,"testkFindVal PASSED: %s\n" % str(res))
logger.log(logging.INFO,"all tests PASSED")
return res
def testkFindVal(res, nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO,"testSendkFindVal PASSED: %s\n" % str(res))
logger.log(logging.INFO,"attempting testkFindValue")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.kFindValue(key)
deferred.addErrback(itererror, "kFindValue")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "kFindValue")
d.addErrback(stageerror, 'kFindValue')
d.addCallback(endtests, nKu, node, host, port)
return d
def testSendkFindVal(res, nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO, "testkStore PASSED: %s\n" % str(res))
logger.log(logging.INFO, "attempting testSendkFindValue")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.sendkFindValue(host, port, key)
deferred.addErrback(itererror, "sendkFindValue")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "sendkFindValue")
d.addErrback(stageerror, 'sendkFindValue')
d.addCallback(testkFindVal, nKu, node, host, port, num)
return d
def testkStore(res, nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO, "testSendkStore PASSED: %s" % str(res))
logger.log(logging.INFO, "attempting testkStore")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.kStore(key, testval)
deferred.addErrback(itererror, "kStore")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "kStore")
d.addErrback(stageerror, 'kStore')
d.addCallback(testSendkFindVal, nKu, node, host, port, num)
return d
def testSendkStore(res, nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO, "testkFindNode PASSED: %s" % str(res))
logger.log(logging.INFO, "attempting testSendkStore")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.sendkStore(host, port, key, testval)
deferred.addErrback(itererror, "sendkStore")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "sendkStore")
d.addErrback(stageerror, 'sendkStore')
d.addCallback(testkStore, nKu, node, host, port, num)
return d
def testkFindNode(res, nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO, "testSendkFindNode PASSED: %s" % str(res))
logger.log(logging.INFO, "attempting kFindNode")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.kFindNode(key)
deferred.addErrback(itererror, "kFindNode")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "kFindNode")
d.addErrback(stageerror, 'kFindNode')
d.addCallback(testSendkStore, nKu, node, host, port, num)
return d
def testSendkFindNode(nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO, "testkGetID PASSED")
logger.log(logging.INFO, "attempting sendkFindNode")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.sendkFindNode(host, port, key)
deferred.addErrback(itererror, "sendkFindNode")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "sendkFindNode")
d.addErrback(stageerror, 'sendkFindNode')
d.addCallback(testkFindNode, nKu, node, host, port, num)
return d
def testGetID(node, host, port, num=CONCURRENT):
""" Tests sendGetID(), and invokes testSendkFindNode on success """
deferred = node.client.sendGetID(host, port)
deferred.addCallback(testSendkFindNode, node, host, port, num)
deferred.addErrback(stageerror, "testGetID")
return deferred
def runTests(host, port=None, listenport=None):
num = CONCURRENT
#num = 5
node = FludNode(port=listenport)
if port == None:
port = node.config.port
node.run()
d = testGetID(node, host, port, CONCURRENT)
d.addCallback(suitesuccess)
d.addErrback(suiteerror)
d.addBoth(cleanup, node)
node.join()
#node.start() # doesn't work, because reactor may not have started
# listening by time requests start flying
def cleanup(_, node):
logger.info("shutting down in 1 seconds...")
time.sleep(1)
reactor.callLater(1, node.stop)
logger.info("done cleaning up")
"""
Main currently invokes test code
"""
if __name__ == '__main__':
localhost = socket.getfqdn()
if len(sys.argv) == 1:
print "Warning: testing against self may result in timeout failures"
runTests(localhost) # test by talking to self
elif len(sys.argv) == 2:
runTests(localhost, eval(sys.argv[1])) # talk to self on port [1]
elif len(sys.argv) == 3:
runTests(sys.argv[1], eval(sys.argv[2])) # talk to [1] on port [2]
elif len(sys.argv) == 4:
# talk to [1] on port [2], listen on port [3]
runTests(sys.argv[1], eval(sys.argv[2]), eval(sys.argv[3]))
| Python |
#!/usr/bin/python
import time, os, stat, random, sys, logging, socket
from twisted.python import failure
from twisted.internet import defer
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from flud.FludNode import FludNode
from flud.protocol.FludClient import FludClient
from flud.protocol.FludCommUtil import *
from flud.fencode import *
from flud.FludDefer import ErrDeferredList
"""
Test code for kprimitive operations. These ops include all of the descendents
of ROOT and REQUEST in FludProtocol.
"""
# XXX: check return from ops to see if they passed (e.g., if STORE fails, we
# are notified [currently] by the html page that is returned).
# XXX: should make a random file each time this is run...
CONCURRENT=50
CONCREPORT=10
node = None
# format of block metadata is
# {(i, datakey): storingNodeID, ..., 'n': n, 'm': m}
# where i<n+m, datakey is a sha256 of the data stored, and storingNodeID is
# either a nodeID or a list of nodeIDs
testval = {(0, 802484L): 465705L, (1, 780638L): 465705L, (2, 169688L): 465705L,
(3, 267175L): 465705L, (4, 648636L): 465705L, (5, 838315L): 465705L,
(6, 477619L): 465705L, (7, 329906L): 465705L, (8, 610565L): 465705L,
(9, 217811L): 465705L, (10, 374124L): 465705L, (11, 357214L): 465705L,
(12, 147307L): 465705L, (13, 427751L): 465705L, (14, 927853L): 465705L,
(15, 760369L): 465705L, (16, 707029L): 465705L, (17, 479234L): 465705L,
(18, 190455L): 465705L, (19, 647489L): 465705L, (20, 620470L): 465705L,
(21, 777532L): 465705L, (22, 622383L): 465705L, (23, 573283L): 465705L,
(24, 613082L): 465705L, (25, 433593L): 465705L, (26, 584543L): 465705L,
(27, 337485L): 465705L, (28, 911014L): 465705L, (29, 594065L): 465705L,
(30, 375876L): 465705L, (31, 726818L): 465705L, (32, 835759L): 465705L,
(33, 814060L): 465705L, (34, 237176L): 465705L, (35, 538268L): 465705L,
(36, 272650L): 465705L, (37, 314058L): 465705L, (38, 257714L): 465705L,
(39, 439931L): 465705L, 'k': 20, 'n': 20}
logger = logging.getLogger('test')
screenhandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s:'
' %(message)s', datefmt='%H:%M:%S')
screenhandler.setFormatter(formatter)
logger.addHandler(screenhandler)
logger.setLevel(logging.DEBUG)
def suitesuccess(results):
logger.info("all tests in suite passed")
#print results
return results
def suiteerror(failure):
logger.info("suite did not complete")
logger.info("DEBUG: %s" % failure)
return failure
def stagesuccess(result, message):
logger.info("stage %s succeeded" % message)
return result
def stageerror(failure, message):
logger.info("stage %s failed" % message)
#logger.info("DEBUG: %s" % failure)
return failure
def itersuccess(res, i, message):
if i % CONCREPORT == 0:
logger.info("itersuccess: %s" % message)
return res
def itererror(failure, message):
logger.info("itererror message: %s" % message)
#logger.info("DEBUG: %s" % failure)
#logger.info("DEBUG: %s" % dir(failure)
failure.printTraceback()
return failure
def endtests(res, nKu, node, host, port):
""" executes after all tests """
try:
res = fdecode(res)
except ValueError:
pass
if res != testval:
return failure.DefaultException("retrieved value does not match"
" stored value: '%s' != '%s'" % (res, testval))
logger.log(logging.INFO,"testkFindVal PASSED: %s\n" % str(res))
logger.log(logging.INFO,"all tests PASSED")
return res
def testkFindVal(res, nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO,"testSendkFindVal PASSED: %s\n" % str(res))
logger.log(logging.INFO,"attempting testkFindValue")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.kFindValue(key)
deferred.addErrback(itererror, "kFindValue")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "kFindValue")
d.addErrback(stageerror, 'kFindValue')
d.addCallback(endtests, nKu, node, host, port)
return d
def testSendkFindVal(res, nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO, "testkStore PASSED: %s\n" % str(res))
logger.log(logging.INFO, "attempting testSendkFindValue")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.sendkFindValue(host, port, key)
deferred.addErrback(itererror, "sendkFindValue")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "sendkFindValue")
d.addErrback(stageerror, 'sendkFindValue')
d.addCallback(testkFindVal, nKu, node, host, port, num)
return d
def testkStore(res, nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO, "testSendkStore PASSED: %s" % str(res))
logger.log(logging.INFO, "attempting testkStore")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.kStore(key, testval)
deferred.addErrback(itererror, "kStore")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "kStore")
d.addErrback(stageerror, 'kStore')
d.addCallback(testSendkFindVal, nKu, node, host, port, num)
return d
def testSendkStore(res, nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO, "testkFindNode PASSED: %s" % str(res))
logger.log(logging.INFO, "attempting testSendkStore")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.sendkStore(host, port, key, testval)
deferred.addErrback(itererror, "sendkStore")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "sendkStore")
d.addErrback(stageerror, 'sendkStore')
d.addCallback(testkStore, nKu, node, host, port, num)
return d
def testkFindNode(res, nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO, "testSendkFindNode PASSED: %s" % str(res))
logger.log(logging.INFO, "attempting kFindNode")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.kFindNode(key)
deferred.addErrback(itererror, "kFindNode")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "kFindNode")
d.addErrback(stageerror, 'kFindNode')
d.addCallback(testSendkStore, nKu, node, host, port, num)
return d
def testSendkFindNode(nKu, node, host, port, num=CONCURRENT):
logger.log(logging.INFO, "testkGetID PASSED")
logger.log(logging.INFO, "attempting sendkFindNode")
dlist = []
for i in range(num):
key = random.randrange(2**256)
deferred = node.client.sendkFindNode(host, port, key)
deferred.addErrback(itererror, "sendkFindNode")
dlist.append(deferred)
d = ErrDeferredList(dlist, returnOne=True)
d.addCallback(stagesuccess, "sendkFindNode")
d.addErrback(stageerror, 'sendkFindNode')
d.addCallback(testkFindNode, nKu, node, host, port, num)
return d
def testGetID(node, host, port, num=CONCURRENT):
""" Tests sendGetID(), and invokes testSendkFindNode on success """
deferred = node.client.sendGetID(host, port)
deferred.addCallback(testSendkFindNode, node, host, port, num)
deferred.addErrback(stageerror, "testGetID")
return deferred
def runTests(host, port=None, listenport=None):
num = CONCURRENT
#num = 5
node = FludNode(port=listenport)
if port == None:
port = node.config.port
node.run()
d = testGetID(node, host, port, CONCURRENT)
d.addCallback(suitesuccess)
d.addErrback(suiteerror)
d.addBoth(cleanup, node)
node.join()
#node.start() # doesn't work, because reactor may not have started
# listening by time requests start flying
def cleanup(_, node):
logger.info("shutting down in 1 seconds...")
time.sleep(1)
reactor.callLater(1, node.stop)
logger.info("done cleaning up")
"""
Main currently invokes test code
"""
if __name__ == '__main__':
localhost = socket.getfqdn()
if len(sys.argv) == 1:
print "Warning: testing against self may result in timeout failures"
runTests(localhost) # test by talking to self
elif len(sys.argv) == 2:
runTests(localhost, eval(sys.argv[1])) # talk to self on port [1]
elif len(sys.argv) == 3:
runTests(sys.argv[1], eval(sys.argv[2])) # talk to [1] on port [2]
elif len(sys.argv) == 4:
# talk to [1] on port [2], listen on port [3]
runTests(sys.argv[1], eval(sys.argv[2]), eval(sys.argv[3]))
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.