code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
# ==================================================================
# Copyright (c) 2007,2008,2009 Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ====================================================================
import os, sys, re, time
from fbutil import *
import simplejson
import freebase.rison as rison
from freebase.api import attrdict
def cmd_help(fb, command=None):
"""get help on commands
%prog help [cmd]
"""
if command is not None:
cmd = fb.commands[command]
print ' %s: %s' % (cmd.name, cmd.shortdoc)
print cmd.doc
return
print """
the interface to this tool is loosely modeled on the
"svn" command-line tool. many commands work slightly
differently from their unix and svn equivalents -
read the doc for the command first.
use "%s help <subcommand>" for help on a particular
subcommand.
""" % fb.progpath
fb.oparser.print_help()
print 'available subcommands:'
cmds = sorted(fb.commands.keys())
for fk in cmds:
cmd = fb.commands[fk]
print ' %s: %s' % (cmd.name, cmd.shortdoc)
def cmd_wikihelp(fb):
"""get help on commands in mediawiki markup format
%prog wikihelp
"""
print """usage: %s subcommand [ args ... ]
the interface to this tool is loosely modeled on the
"svn" command-line tool. many commands work slightly
differently from their unix and svn equivalents -
read the doc for the command first.
""" % fb.progpath
print 'subcommands:'
for fk in sorted(fb.commands.keys()):
cmd = fb.commands[fk]
# mediawiki markup cares about the difference between
# a blank line and a blank line with a bunch of
# spaces at the start of it. ewww.
doc = re.sub(r'\r?\n {0,8}', '\n ', cmd.doc)
print '==== %s %s: %s ====' % (fb.progpath, cmd.name, cmd.shortdoc)
print doc
print
# this command is disabled because it relies on some svn glue that
# was not sufficiently well thought out.
def old_cmd_pwid(fb):
"""show the "current working namespace id"
%prog pwid
by default, relative ids can't be resolved. however, if you
run the fb tool from a directory that has the svn property
'freebase:id' set, relative ids will be resolved relative
to that id. the idea is that you can create an svn tree
that parallels the freebase namespace tree.
for example:
$ %prog pwid
$ svn propset freebase:id "/freebase" .
$ %prog pwid
/freebase
"""
if fb.cwid is not None:
print fb.cwid
else:
print ''
def cmd_ls(fb, path=None):
"""list the keys in a namespace
%prog ls [id]
"""
path = fb.absid(path)
q = {'id': path,
'/type/namespace/keys': [{'value': None,
'namespace': {
'id': None,
'type': []
},
'optional':True
}]
}
r = fb.mss.mqlread(q)
if r is None:
raise CmdException('query for id %r failed' % path)
#sys.stdout.write(' '.join([mk.value for mk in r['/type/namespace/keys']]))
for mk in r['/type/namespace/keys']:
print mk.value
if 0:
suffix = ''
if ('/type/namespace' in mk.namespace.type
or '/type/domain' in mk.namespace.type):
suffix = '/'
print mk.value, mk.namespace.id+suffix
def cmd_mkdir(fb, path):
"""create a new freebase namespace
%prog mkdir id
create a new instance of /type/namespace at the given
point in id space. if id already exists, it should be
a namespace.
"""
path = fb.absid(path)
dir,file = dirsplit(path)
wq = { 'create': 'unless_exists',
'key':{
'connect': 'insert',
'namespace': dir,
'value': file
},
'name': path,
'type': '/type/namespace'
}
r = fb.mss.mqlwrite(wq)
def cmd_ln(fb, src, dst):
"""create a namespace key
%prog ln srcid dstid
create a new namespace link at dstid to the object
currently at srcid.
"""
src = fb.absid(src)
dst = fb.absid(dst)
dir,file = dirsplit(dst)
wq = { 'id': src,
'key':{
'connect': 'insert',
'namespace': dir,
'value': file
}
}
r = fb.mss.mqlwrite(wq)
def cmd_rm(fb, path):
"""unlink a namespace key
%prog rm id
remove the /type/key that connects the given id to its
parent. id must be a path for this to make any sense.
note that is like unix 'unlink' rather than 'rm'.
it won't complain if the 'subdirectory' contains data,
since that data will still be accessible to other queries.
it's not like 'rm -rf' either, because it doesn't
disturb anything other than the one directory entry.
"""
path = fb.absid(path)
dir,file = dirsplit(path)
wq = { 'id': path,
'key':{
'connect': 'delete',
'namespace': dir,
'value': file
}
}
r = fb.mss.mqlwrite(wq)
def cmd_mv(fb, src, dst):
"""rename srcid to dstid.
%prog mv srcid dstid
equivalent to:
$ fb ln <srcid> <dstid>
$ fb rm <srcid>
"""
cmd_ln(fb, src, dst)
cmd_rm(fb, src)
def cmd_cat(fb, id, include_headers=False):
"""download a document from freebase to stdout
%prog cat id
equivalent to "%prog get id -".
"""
return cmd_get(fb, id, localfile='-', include_headers=include_headers)
def cmd_get(fb, id, localfile=None, include_headers=False):
"""download a file from freebase
%prog get id [localfile]
download the document or image with the given id from freebase
into localfile. localfile '-' means stdout. localfile
defaults to a file in the current directory with the same name
as the last key in the path, possibly followed by a metadata
extension like .html or .txt.
"""
id = fb.absid(id)
dir,file = dirsplit_unsafe(id)
def read_content(id, content_only=False):
c = attrdict(id=id)
cq = { 'id': id,
'type': [],
'/common/document/content': None,
'/common/document/source_uri': None,
'/type/content/media_type': { 'name':None,
'optional': True },
#'/type/content/text_encoding': { 'name':None },
'/type/content/blob_id':None,
}
cd = fb.mss.mqlread(cq)
if '/type/content' in cd.type:
c.media_type = cd['/type/content/media_type'].name
#c.text_encoding = cd['/type/content/text_encoding'].name
c.sha256 = cd['/type/content/blob_id']
return c
if content_only:
raise CmdException('%s is not a content id' % id)
cid = cd['/common/document/content']
if cid is not None:
return read_content(cid, content_only=True)
# in this case we don't have a content object
if cd['/common/document/source_uri'] is not None:
return None
raise CmdException('%s is not a content or document id' % id)
content = read_content(id)
log.debug('COBJ %r' % content)
if content is not None:
fileext = media_type_to_extension.get(content.media_type, None)
else:
fileext = None
if localfile == '-':
ofp = sys.stdout
else:
if localfile is None:
implicit_outfile = True
localfile = file
elif re.match(r'[/\\]$', localfile):
implicit_outfile = True
localfile = localfile + file
else:
implicit_outfile = False
localfile = os.path.abspath(localfile)
# add file extension based on content-type:
# should be an option to disable this
if implicit_outfile and fileext is not None:
localfile += '.' + fileext
# if we didn't explicitly name the output file,
# don't destroy an existing file
localfile_base = localfile
count = 0
while implicit_outfile and os.path.exists(localfile):
count += 1
localfile = '%s.%d' % (localfile_base, count)
ofp = open(localfile, 'wb')
body = fb.mss.trans(id)
if include_headers:
# XXX show content-type, what else?
pass
ofp.write(body)
if localfile != '-':
print ('%s saved (%d bytes)' % (localfile, len(body)))
ofp.close()
def cmd_put(fb, localfile, id=None, content_type=None):
"""upload a document to freebase -- EXPERIMENTAL
%prog put localfile [id] [content-type]
upload the document or image in localfile to given freebase
id. if localfile is '-' the data will be read from stdin.
if id is missing or empty, a new document will be created.
later the id might default to something computed from localfile
and any svn attributes it has.
output: a single line, the id of the document.
"""
if content_type is None:
ext = re.sub('^.*\.([^/.]+)$', r'\1', localfile)
media_type = extension_to_media_type.get(ext, None)
if media_type is None:
raise CmdException('could not infer a media type from extension %r: please specify it'
% ext)
if media_type.startswith('text/'):
# this is a bad assumption. should sniff it?
text_encoding = 'utf-8'
content_type = '%s;charset=%s' % (media_type, text_encoding)
else:
content_type = media_type
new_id = None
if id is not None:
idinfo = fb.mss.mqlread({ 'id': id, 'type': '/common/document' })
if idinfo is None:
new_id = id
id = None
body = open(localfile, 'rb').read()
r = fb.mss.upload(body, content_type, document_id=id)
if new_id is None:
print r.document
else:
cmd_ln(fb, r.document, new_id)
print new_id
def cmd_dump(fb, id):
"""show all properties of a freebase object
%prog dump object_id
"""
id = fb.absid(id)
import inspect
r = inspect.inspect_object(fb.mss, id)
if r is None:
raise CmdException('no match for id %r' % id)
for k in sorted(r.keys()):
vs = r[k]
for v in vs:
id = v.get('id', '')
name = '%r' % (v.get('name') or v.get('value'))
if name == 'None': name = ''
type = v.get('type', '')
if type == '/type/text':
extra = v.get('lang', '')
elif type == '/type/key':
extra = v.get('namespace', '')
else:
extra = ''
fb.trow(k, id, name, type, extra)
def cmd_pget(fb, id, propid):
"""get a property of a freebase object -- EXPERIMENTAL
%prog pget object_id property_id
get the property named by property_id from the object.
XXX output quoting is not well specified.
property_id must be a fully qualified id for now.
prints one line for each match.
if propid ends in '*' this does a wildcard for a particular type.
"""
id = fb.absid(id)
proptype, propkey = dirsplit(propid)
if propkey != '*':
# look up the prop
q = { 'id': id,
propid: [{}],
}
r = fb.mss.mqlread(q)
for v in r[propid]:
if 'value' in v:
print v.value
else:
print v.id
else:
# look up the prop
q = { 'id': id,
'*': [{}],
}
if isinstance(proptype, basestring):
q['type'] = proptype
r = fb.mss.mqlread(q)
for k in sorted(r.keys()):
v = r[k];
if 'value' in v:
print '%s %s' % (k, v.value)
else:
print '%s %s' % (k, v.id)
def cmd_pdel(fb, id, propid, oldval):
"""delete a property of a freebase object -- EXPERIMENTAL
%prog pdel object_id property_id oldvalue
set the property named by property_id on the object.
value is an id or a json value. XXX this is ambiguous.
property_id must be a fully qualified id for now.
for now you need to provide a "oldval" argument,
later this tool will query and perhaps prompt if the
deletion is ambiguous.
prints a single line, either 'deleted' or 'missing'
"""
return cmd_pset(fb, id, propid, None, oldval)
def cmd_touch(fb):
"""bypass any cached query results the service may have. use sparingly.
"""
fb.mss.mqlflush()
def cmd_pset(fb, id, propkey, val, oldval=None, extra=None):
"""set a property of a freebase object -- EXPERIMENTAL
%prog pset object_id property_id value
set the property named by property_id on the object.
value is an id or a json value. XXX this is ambiguous.
property_id must be a fully qualified id for now.
if the property should be a unique property, this will
write with 'connect:update'. if the property may have
multiple, it is written with 'connect:insert'.
prints a single line, either 'inserted' or 'present'
"""
id = fb.absid(id)
propid = fb.absprop(propkey)
# look up the prop
pq = { 'id': propid,
'type': '/type/property',
'name': None,
'unique': None,
'expected_type': {
'id': None,
'name': None,
'default_property': None,
'optional': True,
},
}
prop = fb.mss.mqlread(pq)
if prop is None:
raise CmdException('can\'t resolve property key %r - use an absolute id' % propid);
if propid.startswith('/type/object/') or propid.startswith('/type/value/'):
propkey = re.sub('/type/[^/]+/', '', propid);
else:
propkey = propid
wq = { 'id': id,
propkey: {
}
}
if val is None:
val = oldval
wq[propkey]['connect'] = 'delete'
elif prop.unique:
wq[propkey]['connect'] = 'update'
else:
wq[propkey]['connect'] = 'insert'
if prop.expected_type is None:
wq[propkey]['id'] = val
elif prop.expected_type.id not in value_types:
wq[propkey]['id'] = val
else:
wq[propkey]['value'] = val
if prop.expected_type.id == '/type/text':
if extra is not None:
lang = extra
else:
lang = '/lang/en'
wq[propkey]['lang'] = lang
if prop.expected_type.id == '/type/key':
if extra is not None:
wq[propkey]['namespace'] = extra
else:
raise CmdException('must specify a namespace to pset /type/key')
r = fb.mss.mqlwrite(wq)
print r[propkey]['connect']
def cmd_login(fb, username=None, password=None):
"""login to the freebase service
%prog login [username [password]]
cookies are maintained in a file so
they are available to the next invocation.
prompts for username and password if not given
"""
import getpass
if username is None:
sys.stdout.write('freebase.com username: ')
username = sys.stdin.readline()
if not username:
raise CmdException('usernmae required for login')
username = re.compile('\n$').sub('', username)
if password is None:
password = getpass.getpass('freebase.com password: ')
fb.mss.username = username
fb.mss.password = password
fb.mss.login()
def cmd_logout(fb):
"""logout from the freebase service
%prog logout
deletes the login cookies
"""
fb.cookiejar.clear(domain=fb.service_host.split(':')[0])
def cmd_find(fb, qstr):
"""print all ids matching a given constraint.
if the query string starts with "{" it is treated as json.
otherwise it is treated as o-rison.
%prog find
"""
if qstr.startswith('{'):
q = simplejson.loads(qstr)
else:
q = rison.loads('(' + qstr + ')')
if 'id' not in q:
q['id'] = None
results = fb.mss.mqlreaditer(q)
for r in results:
print r.id
def cmd_q(fb, qstr):
"""run a freebase query.
if the query string starts with "{" it is treated as json.
otherwise it is treated as o-rison.
dump the result as json.
%prog q
"""
if qstr.startswith('{'):
q = simplejson.loads(qstr)
else:
q = rison.loads('(' + qstr + ')')
# results could be streamed with a little more work
results = fb.mss.mqlreaditer(q)
print simplejson.dumps(list(results), indent=2)
def cmd_open(fb, id):
"""open a web browser on the given id. works on OSX only for now.
%prog open /some/id
"""
os.system("open 'http://www.freebase.com/view%s'" % id)
def cmd_log(fb, id):
"""log changes pertaining to a given id.
INCOMPLETE
%prog log /some/id
"""
null = None
true = True
false = False
baseq = {
'type': '/type/link',
'source': null,
'master_property': null,
'attribution': null,
'timestamp': null,
'operation': null,
'valid': null,
'sort': '-timestamp'
};
queries = [
{
'target_value': { '*': null },
'target': { 'id': null, 'name': null, 'optional': true },
},
{
'target': { 'id': null, 'name': null },
}]
for i,q in list(enumerate(queries)):
q.update(baseq)
queries[i] = [q]
valuesfrom,linksfrom = fb.mss.mqlreadmulti(queries)
for link in linksfrom:
# fb.trow(link.master_property.id, ...)
print simplejson.dumps(link, indent=2)
for link in valuesfrom:
# fb.trow(link.master_property.id, ...)
print simplejson.dumps(link, indent=2)
| Python |
#!/usr/bin/env python
# ==================================================================
# Copyright (c) 2007,2008,2009 Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ====================================================================
import os, sys, re, time, stat
from optparse import OptionParser
import getpass
import cookielib
import logging
import simplejson
from fbutil import FbException, CmdException, log, default_propkeys
console = logging.StreamHandler()
log.addHandler(console)
from freebase.api import HTTPMetawebSession, MetawebError, attrdict
_cookiedir = None
if os.environ.has_key('HOME'):
_cookiedir = os.path.join(os.environ['HOME'], '.pyfreebase')
class Command(object):
def __init__(self, module, name, func):
self.module = module
self.name = name
self.func = func
PROG = re.compile(r'\%prog')
NEWLINE = re.compile(r'(?:\r?\n)+')
# fill in self.doc
if isinstance(func.__doc__, basestring):
doc = PROG.sub('fcl', func.__doc__ + '\n')
self.shortdoc, self.doc = NEWLINE.split(doc, 1)
else:
self.shortdoc = '(missing documentation)'
self.doc = '(missing documentation)'
class FbCommandHandler(object):
def __init__(self):
self.service_host = 'www.freebase.com'
self.cookiejar = None
self.cwid = ''
self.progpath = 'fcl'
self.commands = {}
self.cookiefile = None
if _cookiedir is not None:
self.cookiefile = os.path.join(_cookiedir, 'cookiejar')
def init(self):
if self.cookiefile is not None:
self.cookiejar = cookielib.LWPCookieJar(self.cookiefile)
if os.path.exists(self.cookiefile):
try:
self.cookiejar.load(ignore_discard=True)
except cookielib.LoadError:
log.warn('error loading cookies')
#print 'start cookies %r' % self.cookiejar
self.mss = HTTPMetawebSession(self.service_host,
cookiejar=self.cookiejar)
def absid(self, path):
if path is None:
path = ''
if path.startswith('/'):
return path
if not isinstance(self.cwid, basestring) or not self.cwid.startswith('/'):
# svn cwid support is disabled because it relies on some svn glue that
# was not sufficiently well thought out.
# raise CmdException("can't resolve relative id %r without cwid - see 'fcl help pwid'" % (path))
raise CmdException("no support for relative id %r" % (path))
if path == '' or path == '.':
return self.cwid
return self.cwid + '/' + path
def absprop(self, propkey):
if propkey.startswith('/'):
return propkey
# check schemas of /type/object and /type/value,
# as well as other reserved names
if propkey in default_propkeys:
return default_propkeys[propkey]
return self.absid(propkey)
def thead(self, *args):
strs = ['%r' % arg
for arg in args]
print '\t'.join(strs)
def trow(self, *args):
print '\t'.join(args)
return
strs = ['%r' % arg
for arg in args]
print '\t'.join(strs)
def save(self):
#print 'end cookies %r' % self.cookiejar
if _cookiedir and self.cookiefile.startswith(_cookiedir):
# create private cookiedir if needed
if not os.path.exists(_cookiedir):
os.mkdir(_cookiedir, 0700)
os.chmod(_cookiedir, stat.S_IRWXU)
if self.cookiejar is None:
return
self.cookiejar.save(ignore_discard=True)
# save the cwd and other state too
def import_commands(self, modname):
"""
import new fb commands from a file
"""
namespace = {}
pyimport = 'from %s import *' % modname
exec pyimport in namespace
mod = sys.modules.get(modname)
commands = [Command(mod, k[4:], getattr(mod, k))
for k in getattr(mod, '__all__', dir(mod))
if (k.startswith('cmd_')
and callable(getattr(mod, k)))]
for cmd in commands:
log.info('importing %r' % ((cmd.name, cmd.func),))
self.commands[cmd.name] = cmd
log.info('imported %r from %r' % (modname, mod.__file__))
def dispatch(self, cmd, args):
if cmd in self.commands:
try:
self.commands[cmd].func(self, *args)
except KeyboardInterrupt, e:
sys.stderr.write('%s\n' % (str(e),))
except FbException, e:
sys.stderr.write('%s\n' % (str(e),))
except CmdException, e:
sys.stderr.write('%s\n' % (str(e),))
except MetawebError, e:
sys.stderr.write('%s\n' % (str(e),))
else:
self.oparser.error('unknown subcommand %r, try "%s help"' % (cmd, self.progpath))
self.save()
def cmdline_main(self):
op = OptionParser(usage='%prog [options] command [args...] ')
self.oparser = op
op.disable_interspersed_args()
op.add_option('-d', '--debug', dest='debug',
default=False, action='store_true',
help='turn on debugging output')
op.add_option('-v', '--verbose', dest='verbose',
default=False, action='store_true',
help='verbose output')
op.add_option('-V', '--very-verbose', dest='very_verbose',
default=False, action='store_true',
help='lots of debug output')
op.add_option('-s', '--service', dest='service_host',
metavar='HOST',
default=self.service_host,
help='Freebase HTTP service address:port')
op.add_option('-S', '--sandbox', dest='use_sandbox',
default=False, action='store_true',
help='shortcut for --service=sandbox.freebase.com')
op.add_option('-c', '--cookiejar', dest='cookiefile',
metavar='FILE',
default=self.cookiefile,
help='Cookie storage file (will be created if missing)')
options,args = op.parse_args()
if len(args) < 1:
op.error('required subcommand missing')
loglevel = logging.WARNING
if options.verbose:
loglevel = logging.INFO
if options.very_verbose:
loglevel = logging.DEBUG
console.setLevel(loglevel)
log.setLevel(loglevel)
if options.use_sandbox:
self.service_host = 'sandbox.freebase.com'
else:
self.service_host = options.service_host
self.cookiefile = options.cookiefile
#self.progpath = sys.argv[0]
self.init()
self.mss.log.setLevel(loglevel)
self.mss.log.addHandler(console)
self.import_commands('freebase.fcl.commands')
self.import_commands('freebase.fcl.mktype')
cmd = args.pop(0)
self.dispatch(cmd, args)
# entry point for script
def main():
try:
# turn off crlf output on windows so we work properly
# with unix tools.
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
except ImportError:
pass
fb = FbCommandHandler()
fb.cmdline_main()
if __name__ == '__main__':
main()
| Python |
# ==================================================================
# Copyright (c) 2007,2008,2009 Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ====================================================================
import re
import logging
log = logging.getLogger()
class FbException(Exception):
pass
class CmdException(Exception):
pass
media_types = {
'html': ['text/html'],
'txt': ['text/plain'],
'xml': ['text/xml',
'application/xml'],
'atom': ['application/atom+xml'],
'js': ['text/javascript',
'application/javascript',
'application/x-javascript'],
'json': ['application/json'],
'jpg': ['image/jpeg',
'image/pjpeg'],
'gif': ['image/gif'],
'png': ['image/png'],
}
extension_to_media_type = dict([(k,vs[0]) for k,vs in media_types.items()])
media_type_to_extension = {}
for k,vs in media_types.items():
for v in vs:
media_type_to_extension[v] = k
DIRSPLIT = re.compile(r'^(.+)/([^/]+)$')
def dirsplit_unsafe(id):
m = DIRSPLIT.match(id)
if m is None:
return (None, id)
dir,file = m.groups()
return (dir,file)
def dirsplit(id):
dir,file = dirsplit_unsafe(id)
if dir == '/guid':
raise FbException('%r is not a freebase keypath' % (id,))
return (dir,file)
value_types = [
'/type/text',
'/type/key',
'/type/rawstring',
'/type/float',
'/type/int',
'/type/boolean',
'/type/uri',
'/type/datetime',
'/type/id',
'/type/enumeration',
]
default_propkeys = {
'value': '/type/value/value',
'id': '/type/object/id',
'guid': '/type/object/guid',
'type': '/type/object/type',
'name': '/type/object/name',
'key': '/type/object/key',
'timestamp': '/type/object/timestamp',
'permission': '/type/object/permission',
'creator': '/type/object/creator',
'attribution': '/type/object/attribution'
};
| Python |
# ==================================================================
# Copyright (c) 2007,2008,2009 Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ====================================================================
#
#
# wrap all the nastiness needed for a general mql inspect query
#
#
import os, sys, re
null = None
true = True
false = False
inspect_query = {
'name': null,
'type': [],
'/type/reflect/any_master': [{
'optional':true,
'id': null,
'name': null,
'link': {
'master_property': {
'id': null,
'schema': null
}
}
}],
'/type/reflect/any_reverse': [{
'optional':true,
'id': null,
'name': null,
'link': {
'master_property': {
'id':null,
'schema': null,
'expected_type': null,
'reverse_property': {
'id': null,
'schema': null,
'optional': true
}
}
}
}],
'/type/reflect/any_value': [{
'optional':true,
'value': null,
'link': {
'master_property': {
'id':null,
'schema': null,
'expected_type': null
},
}
}],
't:/type/reflect/any_value': [{
'optional':true,
'type': '/type/text',
'value': null,
'lang': null,
'link': {
'master_property': {
'id':null,
'schema': null
},
}
}],
'/type/object/creator': [{
'optional':true,
'id':null,
'name':null
}],
'/type/object/timestamp': [{
'optional':true,
'value': null,
}],
'/type/object/key': [{
'optional':true,
'value': null,
'namespace': null
}],
'/type/namespace/keys': [{
'optional':true,
'value': null,
'namespace': null
}]
}
def transform_result(result):
proptypes = {}
props = {}
# copy a property from a /type/reflect clause
def pushtype(propdesc, prop):
tid = propdesc['schema']
propid = propdesc['id']
if isinstance(prop, dict):
prop = dict(prop)
if 'link' in prop:
prop.pop('link')
if tid not in proptypes:
proptypes[tid] = {}
if propid not in proptypes[tid]:
proptypes[tid][propid] = []
if propid not in props:
props[propid] = []
props[propid].append(prop)
# copy a property that isn't enumerated by /type/reflect
def pushprop(propid):
ps = result[propid]
if ps is None:
return
# hack to infer the schema from id, not always reliable!
schema = re.sub(r'/[^/]+$', '', propid)
keyprop = dict(id=propid, schema=schema)
for p in ps:
pushtype(keyprop, p)
ps = result['/type/reflect/any_master'] or []
for p in ps:
propdesc = p.link.master_property
pushtype(propdesc, p)
# non-text non-key values
ps = result['/type/reflect/any_value'] or []
for p in ps:
propdesc = p.link.master_property
# /type/text values are queried specially
# so that we can get the lang, so ignore
# them here.
if propdesc.expected_type == '/type/text':
continue
pushtype(propdesc, p)
# text values
ps = result['t:/type/reflect/any_value'] or []
for p in ps:
propdesc = p.link.master_property
pushtype(propdesc, p)
pushprop('/type/object/creator')
pushprop('/type/object/timestamp')
pushprop('/type/object/key')
pushprop('/type/namespace/keys')
# now the reverse properties
ps = result['/type/reflect/any_reverse'] or []
for prop in ps:
propdesc = prop.link.master_property.reverse_property
# synthetic property descriptor for the reverse of
# a property with no reverse descriptor.
# note the bogus id starting with '-'.
if propdesc is None:
# schema = prop.link.master_property.expected_type
# if schema is None:
# schema = 'other'
schema = 'other'
propdesc = dict(id='-' + prop.link.master_property.id,
schema=schema)
pushtype(propdesc, prop)
#return proptypes
return props
def inspect_object(mss, id):
q = dict(inspect_query)
q['id'] = id
r = mss.mqlread(q)
if r is None:
return None
return transform_result(r)
| Python |
#!/usr/bin/env python
# ==================================================================
# Copyright (c) 2007,2008,2009 Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ====================================================================
import os, sys, re, time, stat
from optparse import OptionParser
import getpass
import cookielib
import logging
import simplejson
from fbutil import FbException, CmdException, log, default_propkeys
console = logging.StreamHandler()
log.addHandler(console)
from freebase.api import HTTPMetawebSession, MetawebError, attrdict
_cookiedir = None
if os.environ.has_key('HOME'):
_cookiedir = os.path.join(os.environ['HOME'], '.pyfreebase')
class Command(object):
def __init__(self, module, name, func):
self.module = module
self.name = name
self.func = func
PROG = re.compile(r'\%prog')
NEWLINE = re.compile(r'(?:\r?\n)+')
# fill in self.doc
if isinstance(func.__doc__, basestring):
doc = PROG.sub('fcl', func.__doc__ + '\n')
self.shortdoc, self.doc = NEWLINE.split(doc, 1)
else:
self.shortdoc = '(missing documentation)'
self.doc = '(missing documentation)'
class FbCommandHandler(object):
def __init__(self):
self.service_host = 'www.freebase.com'
self.cookiejar = None
self.cwid = ''
self.progpath = 'fcl'
self.commands = {}
self.cookiefile = None
if _cookiedir is not None:
self.cookiefile = os.path.join(_cookiedir, 'cookiejar')
def init(self):
if self.cookiefile is not None:
self.cookiejar = cookielib.LWPCookieJar(self.cookiefile)
if os.path.exists(self.cookiefile):
try:
self.cookiejar.load(ignore_discard=True)
except cookielib.LoadError:
log.warn('error loading cookies')
#print 'start cookies %r' % self.cookiejar
self.mss = HTTPMetawebSession(self.service_host,
cookiejar=self.cookiejar)
def absid(self, path):
if path is None:
path = ''
if path.startswith('/'):
return path
if not isinstance(self.cwid, basestring) or not self.cwid.startswith('/'):
# svn cwid support is disabled because it relies on some svn glue that
# was not sufficiently well thought out.
# raise CmdException("can't resolve relative id %r without cwid - see 'fcl help pwid'" % (path))
raise CmdException("no support for relative id %r" % (path))
if path == '' or path == '.':
return self.cwid
return self.cwid + '/' + path
def absprop(self, propkey):
if propkey.startswith('/'):
return propkey
# check schemas of /type/object and /type/value,
# as well as other reserved names
if propkey in default_propkeys:
return default_propkeys[propkey]
return self.absid(propkey)
def thead(self, *args):
strs = ['%r' % arg
for arg in args]
print '\t'.join(strs)
def trow(self, *args):
print '\t'.join(args)
return
strs = ['%r' % arg
for arg in args]
print '\t'.join(strs)
def save(self):
#print 'end cookies %r' % self.cookiejar
if _cookiedir and self.cookiefile.startswith(_cookiedir):
# create private cookiedir if needed
if not os.path.exists(_cookiedir):
os.mkdir(_cookiedir, 0700)
os.chmod(_cookiedir, stat.S_IRWXU)
if self.cookiejar is None:
return
self.cookiejar.save(ignore_discard=True)
# save the cwd and other state too
def import_commands(self, modname):
"""
import new fb commands from a file
"""
namespace = {}
pyimport = 'from %s import *' % modname
exec pyimport in namespace
mod = sys.modules.get(modname)
commands = [Command(mod, k[4:], getattr(mod, k))
for k in getattr(mod, '__all__', dir(mod))
if (k.startswith('cmd_')
and callable(getattr(mod, k)))]
for cmd in commands:
log.info('importing %r' % ((cmd.name, cmd.func),))
self.commands[cmd.name] = cmd
log.info('imported %r from %r' % (modname, mod.__file__))
def dispatch(self, cmd, args):
if cmd in self.commands:
try:
self.commands[cmd].func(self, *args)
except KeyboardInterrupt, e:
sys.stderr.write('%s\n' % (str(e),))
except FbException, e:
sys.stderr.write('%s\n' % (str(e),))
except CmdException, e:
sys.stderr.write('%s\n' % (str(e),))
except MetawebError, e:
sys.stderr.write('%s\n' % (str(e),))
else:
self.oparser.error('unknown subcommand %r, try "%s help"' % (cmd, self.progpath))
self.save()
def cmdline_main(self):
op = OptionParser(usage='%prog [options] command [args...] ')
self.oparser = op
op.disable_interspersed_args()
op.add_option('-d', '--debug', dest='debug',
default=False, action='store_true',
help='turn on debugging output')
op.add_option('-v', '--verbose', dest='verbose',
default=False, action='store_true',
help='verbose output')
op.add_option('-V', '--very-verbose', dest='very_verbose',
default=False, action='store_true',
help='lots of debug output')
op.add_option('-s', '--service', dest='service_host',
metavar='HOST',
default=self.service_host,
help='Freebase HTTP service address:port')
op.add_option('-S', '--sandbox', dest='use_sandbox',
default=False, action='store_true',
help='shortcut for --service=sandbox.freebase.com')
op.add_option('-c', '--cookiejar', dest='cookiefile',
metavar='FILE',
default=self.cookiefile,
help='Cookie storage file (will be created if missing)')
options,args = op.parse_args()
if len(args) < 1:
op.error('required subcommand missing')
loglevel = logging.WARNING
if options.verbose:
loglevel = logging.INFO
if options.very_verbose:
loglevel = logging.DEBUG
console.setLevel(loglevel)
log.setLevel(loglevel)
if options.use_sandbox:
self.service_host = 'sandbox.freebase.com'
else:
self.service_host = options.service_host
self.cookiefile = options.cookiefile
#self.progpath = sys.argv[0]
self.init()
self.mss.log.setLevel(loglevel)
self.mss.log.addHandler(console)
self.import_commands('freebase.fcl.commands')
self.import_commands('freebase.fcl.mktype')
cmd = args.pop(0)
self.dispatch(cmd, args)
# entry point for script
def main():
try:
# turn off crlf output on windows so we work properly
# with unix tools.
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
except ImportError:
pass
fb = FbCommandHandler()
fb.cmdline_main()
if __name__ == '__main__':
main()
| Python |
# ==================================================================
# Copyright (c) 2007,2008,2009 Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ====================================================================
import os, sys, re, time
from fbutil import *
def cmd_mkobj(fb, id, typeid='/common/topic', name=''):
"""create a new object with a given type -- EXPERIMENTAL
%prog mkobj new_id typeid name
create a new object with type typeid at the given
namespace location.
if present, name gives the display name of the new object.
"""
id = fb.absid(id)
nsid, key = dirsplit(id)
typeid = fb.absid(typeid)
if name == '':
name = key
wq = { 'create': 'unless_exists',
'id': None,
'name': name,
'type': typeid,
'key':{
'namespace': nsid,
'value': key
},
}
# TODO add included types
r = fb.mss.mqlwrite(wq)
print r.id,r.create
def cmd_mktype(fb, id, name=''):
"""create a new type -- EXPERIMENTAL
%prog mktype new_id name
create a new object with type Type at the given
namespace location.
this doesn't create any type hints.
if present, name gives the display name of the new property
"""
id = fb.absid(id)
nsid, key = dirsplit(id)
if name == '':
name = key
wq = { 'create': 'unless_exists',
'id': None,
'name': name,
'type': '/type/type',
'key':{
'namespace': nsid,
'value': key
},
}
r = fb.mss.mqlwrite(wq)
print r.id,r.create
def mkprop(fb, typeid, key, name='', vtype=None, master_property=None):
"""helper to create a new property
"""
if name == '':
name = key
wq = { 'create': 'unless_exists',
'id': None,
'type': '/type/property',
'name': name,
'schema': typeid,
'key': {
'namespace': typeid,
'value': key
}
}
if vtype is not None:
wq['expected_type'] = vtype
if master_property is not None:
wq['master_property'] = master_property
return fb.mss.mqlwrite(wq)
def cmd_mkprop(fb, id, name='', vtype=None, revkey=None, revname=''):
"""create a new property -- EXPERIMENTAL
%prog mkprop new_id [name] [expected_type] [reverse_property] [reverse_name]
create a new object with type Property at the given
location. creates both the "schema" and "key" links
for the property, but doesn't create any freebase property
hints.
if present, name gives the display name of the new property
"""
id = fb.absid(id)
if vtype is not None:
vtype = fb.absid(vtype)
typeid, key = dirsplit(id)
r = mkprop(fb, typeid, key, name, vtype)
# write the reverse property if specified
print r.id, r.create
if revkey is None:
return
assert vtype is not None
rr = mkprop(fb, vtype, revkey, revname, typeid, id)
print rr.id, rr.create
def cmd_publish_type(fb, typeid):
"""try to publish a freebase type for the client
%prog publish_type typeid
set /freebase/type_profile/published to the /freebase/type_status
instance named 'Published'
should also try to set the domain to some namespace that
has type:/type/domain
"""
id = fb.absid(typeid)
w = {
'id': id,
'/freebase/type_profile/published': {
'connect': 'insert',
'type': '/freebase/type_status',
'name': 'Published'
}
}
r = fb.mss.mqlwrite(w)
print r['/freebase/type_profile/published']['connect']
| Python |
#!/usr/bin/python
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
import sys
from freebase.api.session import HTTPMetawebSession
_base = HTTPMetawebSession("sandbox.freebase.com")
__all__ = ["HTTPMetawebSession"]
# we want to add base's functions to __init__.py
# so that we can say freebase.func() and really
# just call base.func()
# a little trick to refer to ourselves
self = sys.modules[__name__]
for funcname in dir(_base):
# we only want the 'real' functions
if not funcname.startswith("_"):
func = getattr(_base, funcname)
# let's make sure we're getting functions
# instead of constants or whatever
if callable(func):
# we're setting these functions
# so that they can be called like
# freebase.funcname -> base.func()
setattr(self, funcname, func)
# make sure we import the base's
# functions if we import freebase
__all__.append(funcname)
# we don't want any self-referencing
# business going. Plus, this is cleaner.
del self
# we want dir(freebase) to be clean
del funcname, func
| Python |
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
#
# URI Templating in Python
#
# see http://bitworking.org/projects/URI-Templates/
# and http://bitworking.org/news/URI_Templates
#
# note that this implementation may go away soon in
# favor of joe gregorio's own code:
# http://code.google.com/p/uri-templates/
#
#
# this implementation can also parse URIs, as long as the
# template is sufficiently specific. to allow this to work
# the '/' character is forbidden in keys when parsing.
# later it should be possible to loosen this restriction.
#
#
# example:
# from whatever.uritemplate import expand_uri_template
# expand_uri_template('http://{host}/{file}',
# dict(host='example.org',
# file='fred'))
#
# TODO:
# allow parsing to be aware of http://www. and trailing /
# nail down quoting issues
#
import os, sys, re
import urllib
__all__ = ['expand_uri_template', 'URITemplate']
def expand_uri_template(template, args):
"""Expand a URI template using the given args dictionary.
"""
return URITemplate(template).run(args)
def _uri_encode_var(v):
return urllib.quote(v, safe="-_.~!$&'()*+,;=:/?[]#@")
class URITemplate(object):
"""a URITemplate is a URI with simple variable substitution.
"""
VARREF = re.compile(r'\{([0-9a-zA-Z_]+)\}')
def __init__(self, s):
"""Compile a URITemplate from a string.
"""
self.template = s;
self.params = []
tsplit = self.VARREF.split(s)
rxs = ['^']
for i in range(len(tsplit)):
if i % 2:
# track the vars used
self.params.append(tsplit[i])
# vars match any string
# vars are assumed to lack '/' - this is imperfect...
rxs.append('([^/]*)')
else:
# quote special chars regexp interpretation
rxs.append(re.escape(tsplit[i]))
rxs.append('$')
self._parser = re.compile(''.join(rxs))
def __repr__(self):
return '<URITemplate %r>' % self.template
def run (self, args):
"""Expand the template using the given args.
"""
def repl(m):
key = m.group(1)
return _uri_encode_var(args.get(key, ''))
uri = self.VARREF.sub(repl,self.template)
#if self.parse(uri) is None:
# print 're-parsing generated uri failed: %r, %r' % (uri, self.template)
return uri
def parse(self, uri):
"""Try to parse a URI, extracting variable values.
"""
m = self._parser.match(uri)
if m is None:
return None
return dict(zip(self.params, m.groups()))
if __name__ == '__main__':
#
# testcases are imported from the URI::Template module on CPAN
#
import urllib2, simplejson
fp = urllib2.urlopen('http://search.cpan.org/src/BRICAS/URI-Template-0.09/t/data/spec.json')
testsuite = simplejson.loads(fp.read())
vars = dict([(k.encode('utf-8'),v) for k,v in testsuite['variables'].items()])
nsucceed = 0
nfail = 0
for test in testsuite['tests']:
ut = URITemplate(test['template'])
uri = ut.run(vars)
if uri != test['expected']:
print 'FAILED %r expected %r' % (uri, test['expected'])
print ' vars: %r' % (vars,)
nfail += 1
else:
nsucceed += 1
print 'tests: %d succeeded, %d failed' % (nsucceed, nfail)
| Python |
import sys
from freebase.api.session import HTTPMetawebSession
import sandbox
__all__ = ["HTTPMetawebSession", "sandbox"]
_base = HTTPMetawebSession("freebase.com")
# we want to add base's functions to __init__.py
# so that we can say freebase.func() and really
# just call base.func()
# a little trick to refer to __init__
# self isn't defined because __init__ is in
# a world in and of itself
self = sys.modules[__name__]
for funcname in dir(_base):
# we only want the 'real' functions
if not funcname.startswith("_"):
func = getattr(_base, funcname)
# let's make sure we're getting functions
# instead of constants or whatever
if callable(func):
# we're setting these functions
# so that they can be called like
# freebase.funcname -> base.func()
setattr(self, funcname, func)
# make sure we import the base's
# functions if we import freebase
__all__.append(funcname)
# we don't want any self-referencing
# business going. Plus, this is cleaner.
del self
# we want dir(freebase) to be clean
del funcname, func
| Python |
#
# rison for python (parser only so far)
# see http://mjtemplate.org/examples/rison.html for more info
#
######################################################################
#
# the rison parser is based on javascript openlaszlo-json:
# Author: Oliver Steele
# Copyright: Copyright 2006 Oliver Steele. All rights reserved.
# Homepage: http:#osteele.com/sources/openlaszlo/json
# License: MIT License.
# Version: 1.0
#
# hacked by nix for use in uris
# ported to python by nix
#
# TODO
#
# switch to unicode
# fall through to simplejson if first char is not in '!(' -
# this allows code to use just one parser
#
import os, sys, re
#import simplejson
simplejson = None
class ParserException(Exception):
pass
class Parser(object):
WHITESPACE = ''
#WHITESPACE = " \t\n\r\f"
# we divide the uri-safe glyphs into three sets
# <rison> and <reserved> classes are illegal in ids.
# <rison> - used by rison (possibly later)
# <reserved> - not common in strings, reserved
#not_idchar = "'!=:(),*@$;&";
idchar_punctuation = '_-./~'
not_idchar = ''.join([c for c in (chr(i) for i in range(127))
if not (c.isalnum()
or c in idchar_punctuation)])
# additionally, we need to distinguish ids and numbers by first char
not_idstart = "-0123456789";
# regexp string matching a valid id
idrx = ('[^' + not_idstart + not_idchar +
'][^' + not_idchar + ']*')
# regexp to check for valid rison ids
id_ok_re = re.compile('^' + idrx + '$', re.M)
# regexp to find the end of an id when parsing
next_id_re = re.compile(idrx, re.M)
def parse_json(self, str):
if len(str) > 0 and str[0] not in '!(':
return simplejson.loads(str)
return self.parse(str)
def parse(self, str):
self.string = str
self.index = 0
value = self.readValue()
if self.next():
raise ParserException("unable to parse rison string %r" % (str,))
return value
def readValue(self):
c = self.next()
if c == '!':
return self.parse_bang()
if c == '(':
return self.parse_open_paren()
if c == "'":
return self.parse_single_quote()
if c in '-0123456789':
return self.parse_number()
# fell through table, parse as an id
s = self.string
i = self.index-1
m = self.next_id_re.match(s, i)
if m:
id = m.group(0)
self.index = i + len(id)
return id # a string
if c:
raise ParserException("invalid character: '" + c + "'")
raise ParserException("empty expression")
def parse_array(self):
ar = []
while 1:
c = self.next()
if c == ')':
return ar
if c is None:
raise ParserException("unmatched '!('")
if len(ar):
if c != ',':
raise ParserException("missing ','")
elif c == ',':
raise ParserException("extra ','")
else:
self.index -= 1
n = self.readValue()
ar.append(n)
return ar
def parse_bang (self):
s = self.string
c = s[self.index]
self.index += 1
if c is None:
raise ParserException('"!" at end of input')
if c not in self.bangs:
raise ParserException('unknown literal: "!' + c + '"')
x = self.bangs[c]
if callable(x):
return x(self)
return x
def parse_open_paren (self):
count = 0
o = {}
while 1:
c = self.next()
if c == ')':
return o
if count:
if c != ',':
raise ParserException("missing ','")
elif c == ',':
raise ParserException("extra ','")
else:
self.index -= 1
k = self.readValue()
if self.next() != ':':
raise ParserException("missing ':'")
v = self.readValue()
o[k] = v
count += 1
def parse_single_quote(self):
s = self.string
i = self.index
start = i
segments = []
while 1:
if i >= len(s):
raise ParserException('unmatched "\'"')
c = s[i]
i += 1
if c == "'":
break
if c == '!':
if start < i-1:
segments.append(s[start:i-1])
c = s[i]
i += 1
if c in "!'":
segments.append(c)
else:
raise ParserException('invalid string escape: "!'+c+'"')
start = i
if start < i-1:
segments.append(s[start:i-1])
self.index = i
return ''.join(segments)
# Also any number start (digit or '-')
def parse_number(self):
s = self.string
i = self.index
start = i-1
state = 'int'
permittedSigns = '-'
transitions = {
'int+.': 'frac',
'int+e': 'exp',
'frac+e': 'exp'
}
while 1:
if i >= len(s):
i += 1
break
c = s[i]
i += 1
if '0' <= c and c <= '9':
continue
if permittedSigns.find(c) >= 0:
permittedSigns = ''
continue
state = transitions.get(state + '+' + c.lower(), None)
if state is None:
break
if state == 'exp':
permittedSigns = '-'
self.index = i - 1
s = s[start:self.index]
if s == '-':
raise ParserException("invalid number")
if re.search('[.e]', s):
return float(s)
return int(s)
# return the next non-whitespace character, or undefined
def next(self):
l = len(self.string)
s = self.string
i = self.index
while 1:
if i == len(s):
return None
c = s[i]
i += 1
if c not in self.WHITESPACE:
break
self.index = i
return c
bangs = {
't': True,
'f': False,
'n': None,
'(': parse_array
}
def loads(s):
return Parser().parse(s)
if __name__ == '__main__':
p = Parser()
rison_examples = [
"(a:0,b:1)",
"(a:0,b:foo,c:'23skidoo')",
"!t",
"!f",
"!n",
"''",
"0",
"1.5",
"-3",
"1e30",
"1e-30",
"G.",
"a",
"'0a'",
"'abc def'",
"()",
"(a:0)",
"(id:!n,type:/common/document)",
"!()",
"!(!t,!f,!n,'')",
"'-h'",
"a-z",
"'wow!!'",
"domain.com",
"'user@domain.com'",
"'US $10'",
"'can!'t'",
];
for s in rison_examples:
print
print '*'*70
print
print s
print '%r' % (p.parse(s),)
| Python |
"""Parse (absolute and relative) URLs.
See RFC 1808: "Relative Uniform Resource Locators", by R. Fielding,
UC Irvine, June 1995.
"""
__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
"urlsplit", "urlunsplit"]
# A classification of schemes ('' means apply by default)
uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap',
'wais', 'file', 'https', 'shttp', 'mms',
'prospero', 'rtsp', 'rtspu', '', 'sftp']
uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
'imap', 'wais', 'file', 'mms', 'https', 'shttp',
'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
'svn', 'svn+ssh', 'sftp']
non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
'mms', '', 'sftp']
uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
'nntp', 'wais', 'https', 'shttp', 'snews',
'file', 'prospero', '']
# Characters valid in scheme names
scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789'
'+-.')
MAX_CACHE_SIZE = 20
_parse_cache = {}
def clear_cache():
"""Clear the parse cache."""
global _parse_cache
_parse_cache = {}
class BaseResult(tuple):
"""Base class for the parsed result objects.
This provides the attributes shared by the two derived result
objects as read-only properties. The derived classes are
responsible for checking the right number of arguments were
supplied to the constructor.
"""
__slots__ = ()
# Attributes that access the basic components of the URL:
@property
def scheme(self):
return self[0]
@property
def netloc(self):
return self[1]
@property
def path(self):
return self[2]
@property
def query(self):
return self[-2]
@property
def fragment(self):
return self[-1]
# Additional attributes that provide access to parsed-out portions
# of the netloc:
@property
def username(self):
netloc = self.netloc
if "@" in netloc:
userinfo = netloc.split("@", 1)[0]
if ":" in userinfo:
userinfo = userinfo.split(":", 1)[0]
return userinfo
return None
@property
def password(self):
netloc = self.netloc
if "@" in netloc:
userinfo = netloc.split("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)[1]
return None
@property
def hostname(self):
netloc = self.netloc
if "@" in netloc:
netloc = netloc.split("@", 1)[1]
if ":" in netloc:
netloc = netloc.split(":", 1)[0]
return netloc.lower() or None
@property
def port(self):
netloc = self.netloc
if "@" in netloc:
netloc = netloc.split("@", 1)[1]
if ":" in netloc:
port = netloc.split(":", 1)[1]
return int(port, 10)
return None
class SplitResult(BaseResult):
__slots__ = ()
def __new__(cls, scheme, netloc, path, query, fragment):
return BaseResult.__new__(
cls, (scheme, netloc, path, query, fragment))
def geturl(self):
return urlunsplit(self)
class ParseResult(BaseResult):
__slots__ = ()
def __new__(cls, scheme, netloc, path, params, query, fragment):
return BaseResult.__new__(
cls, (scheme, netloc, path, params, query, fragment))
@property
def params(self):
return self[3]
def geturl(self):
return urlunparse(self)
def urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
tuple = urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = tuple
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
return ParseResult(scheme, netloc, url, params, query, fragment)
def _splitparams(url):
if '/' in url:
i = url.find(';', url.rfind('/'))
if i < 0:
return url, ''
else:
i = url.find(';')
return url[:i], url[i+1:]
def _splitnetloc(url, start=0):
delim = len(url) # position of end of domain part of url, default is end
for c in '/?#': # look for delimiters; the order is NOT important
wdelim = url.find(c, start) # find first of this delim
if wdelim >= 0: # if found
delim = min(delim, wdelim) # use earliest delim position
return url[start:delim], url[delim:] # return (domain, rest)
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
allow_fragments = bool(allow_fragments)
key = url, scheme, allow_fragments, type(url), type(scheme)
cached = _parse_cache.get(key, None)
if cached:
return cached
if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
clear_cache()
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
if url[:i] == 'http': # optimize the common case
scheme = url[:i].lower()
url = url[i+1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
for c in url[:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i+1:]
if scheme in uses_netloc and url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if allow_fragments and scheme in uses_fragment and '#' in url:
url, fragment = url.split('#', 1)
if scheme in uses_query and '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
def urlunparse((scheme, netloc, url, params, query, fragment)):
"""Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent)."""
if params:
url = "%s;%s" % (url, params)
return urlunsplit((scheme, netloc, url, query, fragment))
def urlunsplit((scheme, netloc, url, query, fragment)):
if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
if url and url[:1] != '/': url = '/' + url
url = '//' + (netloc or '') + url
if scheme:
url = scheme + ':' + url
if query:
url = url + '?' + query
if fragment:
url = url + '#' + fragment
return url
def urljoin(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter."""
if not base:
return url
if not url:
return base
bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
urlparse(base, '', allow_fragments)
scheme, netloc, path, params, query, fragment = \
urlparse(url, bscheme, allow_fragments)
if scheme != bscheme or scheme not in uses_relative:
return url
if scheme in uses_netloc:
if netloc:
return urlunparse((scheme, netloc, path,
params, query, fragment))
netloc = bnetloc
if path[:1] == '/':
return urlunparse((scheme, netloc, path,
params, query, fragment))
if not (path or params or query):
return urlunparse((scheme, netloc, bpath,
bparams, bquery, fragment))
segments = bpath.split('/')[:-1] + path.split('/')
# XXX The stuff below is bogus in various ways...
if segments[-1] == '.':
segments[-1] = ''
while '.' in segments:
segments.remove('.')
while 1:
i = 1
n = len(segments) - 1
while i < n:
if (segments[i] == '..'
and segments[i-1] not in ('', '..')):
del segments[i-1:i+1]
break
i = i+1
else:
break
if segments == ['', '..']:
segments[-1] = ''
elif len(segments) >= 2 and segments[-1] == '..':
segments[-2:] = ['']
return urlunparse((scheme, netloc, '/'.join(segments),
params, query, fragment))
def urldefrag(url):
"""Removes any existing fragment from URL.
Returns a tuple of the defragmented URL and the fragment. If
the URL contained no fragments, the second element is the
empty string.
"""
if '#' in url:
s, n, p, a, q, frag = urlparse(url)
defrag = urlunparse((s, n, p, a, q, ''))
return defrag, frag
else:
return url, ''
test_input = """
http://a/b/c/d
g:h = <URL:g:h>
http:g = <URL:http://a/b/c/g>
http: = <URL:http://a/b/c/d>
g = <URL:http://a/b/c/g>
./g = <URL:http://a/b/c/g>
g/ = <URL:http://a/b/c/g/>
/g = <URL:http://a/g>
//g = <URL:http://g>
?y = <URL:http://a/b/c/d?y>
g?y = <URL:http://a/b/c/g?y>
g?y/./x = <URL:http://a/b/c/g?y/./x>
. = <URL:http://a/b/c/>
./ = <URL:http://a/b/c/>
.. = <URL:http://a/b/>
../ = <URL:http://a/b/>
../g = <URL:http://a/b/g>
../.. = <URL:http://a/>
../../g = <URL:http://a/g>
../../../g = <URL:http://a/../g>
./../g = <URL:http://a/b/g>
./g/. = <URL:http://a/b/c/g/>
/./g = <URL:http://a/./g>
g/./h = <URL:http://a/b/c/g/h>
g/../h = <URL:http://a/b/c/h>
http:g = <URL:http://a/b/c/g>
http: = <URL:http://a/b/c/d>
http:?y = <URL:http://a/b/c/d?y>
http:g?y = <URL:http://a/b/c/g?y>
http:g?y/./x = <URL:http://a/b/c/g?y/./x>
"""
def test():
import sys
base = ''
if sys.argv[1:]:
fn = sys.argv[1]
if fn == '-':
fp = sys.stdin
else:
fp = open(fn)
else:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
fp = StringIO(test_input)
while 1:
line = fp.readline()
if not line: break
words = line.split()
if not words:
continue
url = words[0]
parts = urlparse(url)
print '%-10s : %s' % (url, parts)
abs = urljoin(base, url)
if not base:
base = abs
wrapped = '<URL:%s>' % abs
print '%-10s = %s' % (url, wrapped)
if len(words) == 3 and words[1] == '=':
if wrapped != words[2]:
print 'EXPECTED', words[2], '!!!!!!!!!!'
if __name__ == '__main__':
test()
| Python |
"""HTTP cookie handling for web clients.
This module has (now fairly distant) origins in Gisle Aas' Perl module
HTTP::Cookies, from the libwww-perl library.
Docstrings, comments and debug strings in this code refer to the
attributes of the HTTP cookie system as cookie-attributes, to distinguish
them clearly from Python attributes.
Class diagram (note that BSDDBCookieJar and the MSIE* classes are not
distributed with the Python standard library, but are available from
http://wwwsearch.sf.net/):
CookieJar____
/ \ \
FileCookieJar \ \
/ | \ \ \
MozillaCookieJar | LWPCookieJar \ \
| | \
| ---MSIEBase | \
| / | | \
| / MSIEDBCookieJar BSDDBCookieJar
|/
MSIECookieJar
"""
__all__ = ['Cookie', 'CookieJar', 'CookiePolicy', 'DefaultCookiePolicy',
'FileCookieJar', 'LWPCookieJar', 'LoadError', 'MozillaCookieJar']
import re, urlparse, copy, time
import urllib_stub as urllib
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
from calendar import timegm
debug = False # set to True to enable debugging via the logging module
logger = None
def _debug(*args):
if not debug:
return
global logger
if not logger:
import logging
logger = logging.getLogger("cookielib")
return logger.debug(*args)
DEFAULT_HTTP_PORT = '80'
MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar "
"instance initialised with one)")
def _warn_unhandled_exception():
# There are a few catch-all except: statements in this module, for
# catching input that's bad in unexpected ways. Warn if any
# exceptions are caught there.
import warnings, traceback, StringIO
f = StringIO.StringIO()
traceback.print_exc(None, f)
msg = f.getvalue()
warnings.warn("cookielib bug!\n%s" % msg, stacklevel=2)
# Date/time conversion
# -----------------------------------------------------------------------------
EPOCH_YEAR = 1970
def _timegm(tt):
year, month, mday, hour, min, sec = tt[:6]
if ((year >= EPOCH_YEAR) and (1 <= month <= 12) and (1 <= mday <= 31) and
(0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61)):
return timegm(tt)
else:
return None
DAYS = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
MONTHS = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
MONTHS_LOWER = []
for month in MONTHS: MONTHS_LOWER.append(month.lower())
def time2isoz(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ",
representing Universal Time (UTC, aka GMT). An example of this format is:
1994-11-24 08:49:37Z
"""
if t is None: t = time.time()
year, mon, mday, hour, min, sec = time.gmtime(t)[:6]
return "%04d-%02d-%02d %02d:%02d:%02dZ" % (
year, mon, mday, hour, min, sec)
def time2netscape(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like this:
Wed, DD-Mon-YYYY HH:MM:SS GMT
"""
if t is None: t = time.time()
year, mon, mday, hour, min, sec, wday = time.gmtime(t)[:7]
return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % (
DAYS[wday], mday, MONTHS[mon-1], year, hour, min, sec)
UTC_ZONES = {"GMT": None, "UTC": None, "UT": None, "Z": None}
TIMEZONE_RE = re.compile(r"^([-+])?(\d\d?):?(\d\d)?$")
def offset_from_tz_string(tz):
offset = None
if tz in UTC_ZONES:
offset = 0
else:
m = TIMEZONE_RE.search(tz)
if m:
offset = 3600 * int(m.group(2))
if m.group(3):
offset = offset + 60 * int(m.group(3))
if m.group(1) == '-':
offset = -offset
return offset
def _str2time(day, mon, yr, hr, min, sec, tz):
# translate month name to number
# month numbers start with 1 (January)
try:
mon = MONTHS_LOWER.index(mon.lower())+1
except ValueError:
# maybe it's already a number
try:
imon = int(mon)
except ValueError:
return None
if 1 <= imon <= 12:
mon = imon
else:
return None
# make sure clock elements are defined
if hr is None: hr = 0
if min is None: min = 0
if sec is None: sec = 0
yr = int(yr)
day = int(day)
hr = int(hr)
min = int(min)
sec = int(sec)
if yr < 1000:
# find "obvious" year
cur_yr = time.localtime(time.time())[0]
m = cur_yr % 100
tmp = yr
yr = yr + cur_yr - m
m = m - tmp
if abs(m) > 50:
if m > 0: yr = yr + 100
else: yr = yr - 100
# convert UTC time tuple to seconds since epoch (not timezone-adjusted)
t = _timegm((yr, mon, day, hr, min, sec, tz))
if t is not None:
# adjust time using timezone string, to get absolute time since epoch
if tz is None:
tz = "UTC"
tz = tz.upper()
offset = offset_from_tz_string(tz)
if offset is None:
return None
t = t - offset
return t
STRICT_DATE_RE = re.compile(
r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) "
"(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$")
WEEKDAY_RE = re.compile(
r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I)
LOOSE_HTTP_DATE_RE = re.compile(
r"""^
(\d\d?) # day
(?:\s+|[-\/])
(\w+) # month
(?:\s+|[-\/])
(\d+) # year
(?:
(?:\s+|:) # separator before clock
(\d\d?):(\d\d) # hour:min
(?::(\d\d))? # optional seconds
)? # optional clock
\s*
([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+)? # timezone
\s*
(?:\(\w+\))? # ASCII representation of timezone in parens.
\s*$""", re.X)
def http2time(text):
"""Returns time in seconds since epoch of time represented by a string.
Return value is an integer.
None is returned if the format of str is unrecognized, the time is outside
the representable range, or the timezone string is not recognized. If the
string contains no timezone, UTC is assumed.
The timezone in the string may be numerical (like "-0800" or "+0100") or a
string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the
timezone strings equivalent to UTC (zero offset) are known to the function.
The function loosely parses the following formats:
Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format
Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format
Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format
09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday)
08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday)
08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday)
The parser ignores leading and trailing whitespace. The time may be
absent.
If the year is given with only 2 digits, the function will select the
century that makes the year closest to the current date.
"""
# fast exit for strictly conforming string
m = STRICT_DATE_RE.search(text)
if m:
g = m.groups()
mon = MONTHS_LOWER.index(g[1].lower()) + 1
tt = (int(g[2]), mon, int(g[0]),
int(g[3]), int(g[4]), float(g[5]))
return _timegm(tt)
# No, we need some messy parsing...
# clean up
text = text.lstrip()
text = WEEKDAY_RE.sub("", text, 1) # Useless weekday
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = LOOSE_HTTP_DATE_RE.search(text)
if m is not None:
day, mon, yr, hr, min, sec, tz = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
ISO_DATE_RE = re.compile(
"""^
(\d{4}) # year
[-\/]?
(\d\d?) # numerical month
[-\/]?
(\d\d?) # day
(?:
(?:\s+|[-:Tt]) # separator before clock
(\d\d?):?(\d\d) # hour:min
(?::?(\d\d(?:\.\d*)?))? # optional seconds (and fractional)
)? # optional clock
\s*
([-+]?\d\d?:?(:?\d\d)?
|Z|z)? # timezone (Z is "zero meridian", i.e. GMT)
\s*$""", re.X)
def iso2time(text):
"""
As for http2time, but parses the ISO 8601 formats:
1994-02-03 14:15:29 -0100 -- ISO 8601 format
1994-02-03 14:15:29 -- zone is optional
1994-02-03 -- only date
1994-02-03T14:15:29 -- Use T as separator
19940203T141529Z -- ISO 8601 compact format
19940203 -- only date
"""
# clean up
text = text.lstrip()
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = ISO_DATE_RE.search(text)
if m is not None:
# XXX there's an extra bit of the timezone I'm ignoring here: is
# this the right thing to do?
yr, mon, day, hr, min, sec, tz, _ = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
# Header parsing
# -----------------------------------------------------------------------------
def unmatched(match):
"""Return unmatched part of re.Match object."""
start, end = match.span(0)
return match.string[:start]+match.string[end:]
HEADER_TOKEN_RE = re.compile(r"^\s*([^=\s;,]+)")
HEADER_QUOTED_VALUE_RE = re.compile(r"^\s*=\s*\"([^\"\\]*(?:\\.[^\"\\]*)*)\"")
HEADER_VALUE_RE = re.compile(r"^\s*=\s*([^\s;,]*)")
HEADER_ESCAPE_RE = re.compile(r"\\(.)")
def split_header_words(header_values):
r"""Parse header values into a list of lists containing key,value pairs.
The function knows how to deal with ",", ";" and "=" as well as quoted
values after "=". A list of space separated tokens are parsed as if they
were separated by ";".
If the header_values passed as argument contains multiple values, then they
are treated as if they were a single value separated by comma ",".
This means that this function is useful for parsing header fields that
follow this syntax (BNF as from the HTTP/1.1 specification, but we relax
the requirement for tokens).
headers = #header
header = (token | parameter) *( [";"] (token | parameter))
token = 1*<any CHAR except CTLs or separators>
separators = "(" | ")" | "<" | ">" | "@"
| "," | ";" | ":" | "\" | <">
| "/" | "[" | "]" | "?" | "="
| "{" | "}" | SP | HT
quoted-string = ( <"> *(qdtext | quoted-pair ) <"> )
qdtext = <any TEXT except <">>
quoted-pair = "\" CHAR
parameter = attribute "=" value
attribute = token
value = token | quoted-string
Each header is represented by a list of key/value pairs. The value for a
simple token (not part of a parameter) is None. Syntactically incorrect
headers will not necessarily be parsed as you would want.
This is easier to describe with some examples:
>>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz'])
[[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]]
>>> split_header_words(['text/html; charset="iso-8859-1"'])
[[('text/html', None), ('charset', 'iso-8859-1')]]
>>> split_header_words([r'Basic realm="\"foo\bar\""'])
[[('Basic', None), ('realm', '"foobar"')]]
"""
assert not isinstance(header_values, basestring)
result = []
for text in header_values:
orig_text = text
pairs = []
while text:
m = HEADER_TOKEN_RE.search(text)
if m:
text = unmatched(m)
name = m.group(1)
m = HEADER_QUOTED_VALUE_RE.search(text)
if m: # quoted value
text = unmatched(m)
value = m.group(1)
value = HEADER_ESCAPE_RE.sub(r"\1", value)
else:
m = HEADER_VALUE_RE.search(text)
if m: # unquoted value
text = unmatched(m)
value = m.group(1)
value = value.rstrip()
else:
# no value, a lone token
value = None
pairs.append((name, value))
elif text.lstrip().startswith(","):
# concatenated headers, as per RFC 2616 section 4.2
text = text.lstrip()[1:]
if pairs: result.append(pairs)
pairs = []
else:
# skip junk
non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text)
assert nr_junk_chars > 0, (
"split_header_words bug: '%s', '%s', %s" %
(orig_text, text, pairs))
text = non_junk
if pairs: result.append(pairs)
return result
HEADER_JOIN_ESCAPE_RE = re.compile(r"([\"\\])")
def join_header_words(lists):
"""Do the inverse (almost) of the conversion done by split_header_words.
Takes a list of lists of (key, value) pairs and produces a single header
value. Attribute values are quoted if needed.
>>> join_header_words([[("text/plain", None), ("charset", "iso-8859/1")]])
'text/plain; charset="iso-8859/1"'
>>> join_header_words([[("text/plain", None)], [("charset", "iso-8859/1")]])
'text/plain, charset="iso-8859/1"'
"""
headers = []
for pairs in lists:
attr = []
for k, v in pairs:
if v is not None:
if not re.search(r"^\w+$", v):
v = HEADER_JOIN_ESCAPE_RE.sub(r"\\\1", v) # escape " and \
v = '"%s"' % v
k = "%s=%s" % (k, v)
attr.append(k)
if attr: headers.append("; ".join(attr))
return ", ".join(headers)
def parse_ns_headers(ns_headers):
"""Ad-hoc parser for Netscape protocol cookie-attributes.
The old Netscape cookie format for Set-Cookie can for instance contain
an unquoted "," in the expires field, so we have to use this ad-hoc
parser instead of split_header_words.
XXX This may not make the best possible effort to parse all the crap
that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient
parser is probably better, so could do worse than following that if
this ever gives any trouble.
Currently, this is also used for parsing RFC 2109 cookies.
"""
known_attrs = ("expires", "domain", "path", "secure",
# RFC 2109 attrs (may turn up in Netscape cookies, too)
"port", "max-age")
result = []
for ns_header in ns_headers:
pairs = []
version_set = False
for ii, param in enumerate(re.split(r";\s*", ns_header)):
param = param.rstrip()
if param == "": continue
if "=" not in param:
k, v = param, None
else:
k, v = re.split(r"\s*=\s*", param, 1)
k = k.lstrip()
if ii != 0:
lc = k.lower()
if lc in known_attrs:
k = lc
if k == "version":
# This is an RFC 2109 cookie.
version_set = True
if k == "expires":
# convert expires date to seconds since epoch
if v.startswith('"'): v = v[1:]
if v.endswith('"'): v = v[:-1]
v = http2time(v) # None if invalid
pairs.append((k, v))
if pairs:
if not version_set:
pairs.append(("version", "0"))
result.append(pairs)
return result
IPV4_RE = re.compile(r"\.\d+$")
def is_HDN(text):
"""Return True if text is a host domain name."""
# XXX
# This may well be wrong. Which RFC is HDN defined in, if any (for
# the purposes of RFC 2965)?
# For the current implementation, what about IPv6? Remember to look
# at other uses of IPV4_RE also, if change this.
if IPV4_RE.search(text):
return False
if text == "":
return False
if text[0] == "." or text[-1] == ".":
return False
return True
def domain_match(A, B):
"""Return True if domain A domain-matches domain B, according to RFC 2965.
A and B may be host domain names or IP addresses.
RFC 2965, section 1:
Host names can be specified either as an IP address or a HDN string.
Sometimes we compare one host name with another. (Such comparisons SHALL
be case-insensitive.) Host A's name domain-matches host B's if
* their host name strings string-compare equal; or
* A is a HDN string and has the form NB, where N is a non-empty
name string, B has the form .B', and B' is a HDN string. (So,
x.y.com domain-matches .Y.com but not Y.com.)
Note that domain-match is not a commutative operation: a.b.c.com
domain-matches .c.com, but not the reverse.
"""
# Note that, if A or B are IP addresses, the only relevant part of the
# definition of the domain-match algorithm is the direct string-compare.
A = A.lower()
B = B.lower()
if A == B:
return True
if not is_HDN(A):
return False
i = A.rfind(B)
if i == -1 or i == 0:
# A does not have form NB, or N is the empty string
return False
if not B.startswith("."):
return False
if not is_HDN(B[1:]):
return False
return True
def liberal_is_HDN(text):
"""Return True if text is a sort-of-like a host domain name.
For accepting/blocking domains.
"""
if IPV4_RE.search(text):
return False
return True
def user_domain_match(A, B):
"""For blocking/accepting domains.
A and B may be host domain names or IP addresses.
"""
A = A.lower()
B = B.lower()
if not (liberal_is_HDN(A) and liberal_is_HDN(B)):
if A == B:
# equal IP addresses
return True
return False
initial_dot = B.startswith(".")
if initial_dot and A.endswith(B):
return True
if not initial_dot and A == B:
return True
return False
cut_port_re = re.compile(r":\d+$")
def request_host(request):
"""Return request-host, as defined by RFC 2965.
Variation from RFC: returned value is lowercased, for convenient
comparison.
"""
url = request.get_full_url()
host = urlparse.urlparse(url)[1]
if host == "":
host = request.get_header("Host", "")
# remove port, if present
host = cut_port_re.sub("", host, 1)
return host.lower()
def eff_request_host(request):
"""Return a tuple (request-host, effective request-host name).
As defined by RFC 2965, except both are lowercased.
"""
erhn = req_host = request_host(request)
if req_host.find(".") == -1 and not IPV4_RE.search(req_host):
erhn = req_host + ".local"
return req_host, erhn
def request_path(request):
"""request-URI, as defined by RFC 2965."""
url = request.get_full_url()
#scheme, netloc, path, parameters, query, frag = urlparse.urlparse(url)
#req_path = escape_path("".join(urlparse.urlparse(url)[2:]))
path, parameters, query, frag = urlparse.urlparse(url)[2:]
if parameters:
path = "%s;%s" % (path, parameters)
path = escape_path(path)
req_path = urlparse.urlunparse(("", "", path, "", query, frag))
if not req_path.startswith("/"):
# fix bad RFC 2396 absoluteURI
req_path = "/"+req_path
return req_path
def request_port(request):
host = request.get_host()
i = host.find(':')
if i >= 0:
port = host[i+1:]
try:
int(port)
except ValueError:
_debug("nonnumeric port: '%s'", port)
return None
else:
port = DEFAULT_HTTP_PORT
return port
# Characters in addition to A-Z, a-z, 0-9, '_', '.', and '-' that don't
# need to be escaped to form a valid HTTP URL (RFCs 2396 and 1738).
HTTP_PATH_SAFE = "%/;:@&=+$,!~*'()"
ESCAPED_CHAR_RE = re.compile(r"%([0-9a-fA-F][0-9a-fA-F])")
def uppercase_escaped_char(match):
return "%%%s" % match.group(1).upper()
def escape_path(path):
"""Escape any invalid characters in HTTP URL, and uppercase all escapes."""
# There's no knowing what character encoding was used to create URLs
# containing %-escapes, but since we have to pick one to escape invalid
# path characters, we pick UTF-8, as recommended in the HTML 4.0
# specification:
# http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1
# And here, kind of: draft-fielding-uri-rfc2396bis-03
# (And in draft IRI specification: draft-duerst-iri-05)
# (And here, for new URI schemes: RFC 2718)
if isinstance(path, unicode):
path = path.encode("utf-8")
path = urlib.quote(path, HTTP_PATH_SAFE)
path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path)
return path
def reach(h):
"""Return reach of host h, as defined by RFC 2965, section 1.
The reach R of a host name H is defined as follows:
* If
- H is the host domain name of a host; and,
- H has the form A.B; and
- A has no embedded (that is, interior) dots; and
- B has at least one embedded dot, or B is the string "local".
then the reach of H is .B.
* Otherwise, the reach of H is H.
>>> reach("www.acme.com")
'.acme.com'
>>> reach("acme.com")
'acme.com'
>>> reach("acme.local")
'.local'
"""
i = h.find(".")
if i >= 0:
#a = h[:i] # this line is only here to show what a is
b = h[i+1:]
i = b.find(".")
if is_HDN(h) and (i >= 0 or b == "local"):
return "."+b
return h
def is_third_party(request):
"""
RFC 2965, section 3.3.6:
An unverifiable transaction is to a third-party host if its request-
host U does not domain-match the reach R of the request-host O in the
origin transaction.
"""
req_host = request_host(request)
if not domain_match(req_host, reach(request.get_origin_req_host())):
return True
else:
return False
class Cookie:
"""HTTP Cookie.
This class represents both Netscape and RFC 2965 cookies.
This is deliberately a very simple class. It just holds attributes. It's
possible to construct Cookie instances that don't comply with the cookie
standards. CookieJar.make_cookies is the factory function for Cookie
objects -- it deals with cookie parsing, supplying defaults, and
normalising to the representation used in this class. CookiePolicy is
responsible for checking them to see whether they should be accepted from
and returned to the server.
Note that the port may be present in the headers, but unspecified ("Port"
rather than"Port=80", for example); if this is the case, port is None.
"""
def __init__(self, version, name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest,
rfc2109=False,
):
if version is not None: version = int(version)
if expires is not None: expires = int(expires)
if port is None and port_specified is True:
raise ValueError("if port is None, port_specified must be false")
self.version = version
self.name = name
self.value = value
self.port = port
self.port_specified = port_specified
# normalise case, as per RFC 2965 section 3.3.3
self.domain = domain.lower()
self.domain_specified = domain_specified
# Sigh. We need to know whether the domain given in the
# cookie-attribute had an initial dot, in order to follow RFC 2965
# (as clarified in draft errata). Needed for the returned $Domain
# value.
self.domain_initial_dot = domain_initial_dot
self.path = path
self.path_specified = path_specified
self.secure = secure
self.expires = expires
self.discard = discard
self.comment = comment
self.comment_url = comment_url
self.rfc2109 = rfc2109
self._rest = copy.copy(rest)
def has_nonstandard_attr(self, name):
return name in self._rest
def get_nonstandard_attr(self, name, default=None):
return self._rest.get(name, default)
def set_nonstandard_attr(self, name, value):
self._rest[name] = value
def is_expired(self, now=None):
if now is None: now = time.time()
if (self.expires is not None) and (self.expires <= now):
return True
return False
def __str__(self):
if self.port is None: p = ""
else: p = ":"+self.port
limit = self.domain + p + self.path
if self.value is not None:
namevalue = "%s=%s" % (self.name, self.value)
else:
namevalue = self.name
return "<Cookie %s for %s>" % (namevalue, limit)
def __repr__(self):
args = []
for name in ("version", "name", "value",
"port", "port_specified",
"domain", "domain_specified", "domain_initial_dot",
"path", "path_specified",
"secure", "expires", "discard", "comment", "comment_url",
):
attr = getattr(self, name)
args.append("%s=%s" % (name, repr(attr)))
args.append("rest=%s" % repr(self._rest))
args.append("rfc2109=%s" % repr(self.rfc2109))
return "Cookie(%s)" % ", ".join(args)
class CookiePolicy:
"""Defines which cookies get accepted from and returned to server.
May also modify cookies, though this is probably a bad idea.
The subclass DefaultCookiePolicy defines the standard rules for Netscape
and RFC 2965 cookies -- override that if you want a customised policy.
"""
def set_ok(self, cookie, request):
"""Return true if (and only if) cookie should be accepted from server.
Currently, pre-expired cookies never get this far -- the CookieJar
class deletes such cookies itself.
"""
raise NotImplementedError()
def return_ok(self, cookie, request):
"""Return true if (and only if) cookie should be returned to server."""
raise NotImplementedError()
def domain_return_ok(self, domain, request):
"""Return false if cookies should not be returned, given cookie domain.
"""
return True
def path_return_ok(self, path, request):
"""Return false if cookies should not be returned, given cookie path.
"""
return True
class DefaultCookiePolicy(CookiePolicy):
"""Implements the standard rules for accepting and returning cookies."""
DomainStrictNoDots = 1
DomainStrictNonDomain = 2
DomainRFC2965Match = 4
DomainLiberal = 0
DomainStrict = DomainStrictNoDots|DomainStrictNonDomain
def __init__(self,
blocked_domains=None, allowed_domains=None,
netscape=True, rfc2965=False,
rfc2109_as_netscape=None,
hide_cookie2=False,
strict_domain=False,
strict_rfc2965_unverifiable=True,
strict_ns_unverifiable=False,
strict_ns_domain=DomainLiberal,
strict_ns_set_initial_dollar=False,
strict_ns_set_path=False,
):
"""Constructor arguments should be passed as keyword arguments only."""
self.netscape = netscape
self.rfc2965 = rfc2965
self.rfc2109_as_netscape = rfc2109_as_netscape
self.hide_cookie2 = hide_cookie2
self.strict_domain = strict_domain
self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable
self.strict_ns_unverifiable = strict_ns_unverifiable
self.strict_ns_domain = strict_ns_domain
self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar
self.strict_ns_set_path = strict_ns_set_path
if blocked_domains is not None:
self._blocked_domains = tuple(blocked_domains)
else:
self._blocked_domains = ()
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def blocked_domains(self):
"""Return the sequence of blocked domains (as a tuple)."""
return self._blocked_domains
def set_blocked_domains(self, blocked_domains):
"""Set the sequence of blocked domains."""
self._blocked_domains = tuple(blocked_domains)
def is_blocked(self, domain):
for blocked_domain in self._blocked_domains:
if user_domain_match(domain, blocked_domain):
return True
return False
def allowed_domains(self):
"""Return None, or the sequence of allowed domains (as a tuple)."""
return self._allowed_domains
def set_allowed_domains(self, allowed_domains):
"""Set the sequence of allowed domains, or None."""
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def is_not_allowed(self, domain):
if self._allowed_domains is None:
return False
for allowed_domain in self._allowed_domains:
if user_domain_match(domain, allowed_domain):
return False
return True
def set_ok(self, cookie, request):
"""
If you override .set_ok(), be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to accept).
"""
_debug(" - checking cookie %s=%s", cookie.name, cookie.value)
assert cookie.name is not None
for n in "version", "verifiability", "name", "path", "domain", "port":
fn_name = "set_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def set_ok_version(self, cookie, request):
if cookie.version is None:
# Version is always set to 0 by parse_ns_headers if it's a Netscape
# cookie, so this must be an invalid RFC 2965 cookie.
_debug(" Set-Cookie2 without version attribute (%s=%s)",
cookie.name, cookie.value)
return False
if cookie.version > 0 and not self.rfc2965:
_debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
_debug(" Netscape cookies are switched off")
return False
return True
def set_ok_verifiability(self, cookie, request):
if request.is_unverifiable() and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
_debug(" third-party RFC 2965 cookie during "
"unverifiable transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
_debug(" third-party Netscape cookie during "
"unverifiable transaction")
return False
return True
def set_ok_name(self, cookie, request):
# Try and stop servers setting V0 cookies designed to hack other
# servers that know both V0 and V1 protocols.
if (cookie.version == 0 and self.strict_ns_set_initial_dollar and
cookie.name.startswith("$")):
_debug(" illegal name (starts with '$'): '%s'", cookie.name)
return False
return True
def set_ok_path(self, cookie, request):
if cookie.path_specified:
req_path = request_path(request)
if ((cookie.version > 0 or
(cookie.version == 0 and self.strict_ns_set_path)) and
not req_path.startswith(cookie.path)):
_debug(" path attribute %s is not a prefix of request "
"path %s", cookie.path, req_path)
return False
return True
def set_ok_domain(self, cookie, request):
if self.is_blocked(cookie.domain):
_debug(" domain %s is in user block-list", cookie.domain)
return False
if self.is_not_allowed(cookie.domain):
_debug(" domain %s is not in user allow-list", cookie.domain)
return False
if cookie.domain_specified:
req_host, erhn = eff_request_host(request)
domain = cookie.domain
if self.strict_domain and (domain.count(".") >= 2):
# XXX This should probably be compared with the Konqueror
# (kcookiejar.cpp) and Mozilla implementations, but it's a
# losing battle.
i = domain.rfind(".")
j = domain.rfind(".", 0, i)
if j == 0: # domain like .foo.bar
tld = domain[i+1:]
sld = domain[j+1:i]
if sld.lower() in ("co", "ac", "com", "edu", "org", "net",
"gov", "mil", "int", "aero", "biz", "cat", "coop",
"info", "jobs", "mobi", "museum", "name", "pro",
"travel", "eu") and len(tld) == 2:
# domain like .co.uk
_debug(" country-code second level domain %s", domain)
return False
if domain.startswith("."):
undotted_domain = domain[1:]
else:
undotted_domain = domain
embedded_dots = (undotted_domain.find(".") >= 0)
if not embedded_dots and domain != ".local":
_debug(" non-local domain %s contains no embedded dot",
domain)
return False
if cookie.version == 0:
if (not erhn.endswith(domain) and
(not erhn.startswith(".") and
not ("."+erhn).endswith(domain))):
_debug(" effective request-host %s (even with added "
"initial dot) does not end end with %s",
erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainRFC2965Match)):
if not domain_match(erhn, domain):
_debug(" effective request-host %s does not domain-match "
"%s", erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainStrictNoDots)):
host_prefix = req_host[:-len(domain)]
if (host_prefix.find(".") >= 0 and
not IPV4_RE.search(req_host)):
_debug(" host prefix %s for domain %s contains a dot",
host_prefix, domain)
return False
return True
def set_ok_port(self, cookie, request):
if cookie.port_specified:
req_port = request_port(request)
if req_port is None:
req_port = "80"
else:
req_port = str(req_port)
for p in cookie.port.split(","):
try:
int(p)
except ValueError:
_debug(" bad port %s (not numeric)", p)
return False
if p == req_port:
break
else:
_debug(" request port (%s) not found in %s",
req_port, cookie.port)
return False
return True
def return_ok(self, cookie, request):
"""
If you override .return_ok(), be sure to call this method. If it
returns false, so should your subclass (assuming your subclass wants to
be more strict about which cookies to return).
"""
# Path has already been checked by .path_return_ok(), and domain
# blocking done by .domain_return_ok().
_debug(" - checking cookie %s=%s", cookie.name, cookie.value)
for n in "version", "verifiability", "secure", "expires", "port", "domain":
fn_name = "return_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def return_ok_version(self, cookie, request):
if cookie.version > 0 and not self.rfc2965:
_debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
_debug(" Netscape cookies are switched off")
return False
return True
def return_ok_verifiability(self, cookie, request):
if request.is_unverifiable() and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
_debug(" third-party RFC 2965 cookie during unverifiable "
"transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
_debug(" third-party Netscape cookie during unverifiable "
"transaction")
return False
return True
def return_ok_secure(self, cookie, request):
if cookie.secure and request.get_type() != "https":
_debug(" secure cookie with non-secure request")
return False
return True
def return_ok_expires(self, cookie, request):
if cookie.is_expired(self._now):
_debug(" cookie expired")
return False
return True
def return_ok_port(self, cookie, request):
if cookie.port:
req_port = request_port(request)
if req_port is None:
req_port = "80"
for p in cookie.port.split(","):
if p == req_port:
break
else:
_debug(" request port %s does not match cookie port %s",
req_port, cookie.port)
return False
return True
def return_ok_domain(self, cookie, request):
req_host, erhn = eff_request_host(request)
domain = cookie.domain
# strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
if (cookie.version == 0 and
(self.strict_ns_domain & self.DomainStrictNonDomain) and
not cookie.domain_specified and domain != erhn):
_debug(" cookie with unspecified domain does not string-compare "
"equal to request domain")
return False
if cookie.version > 0 and not domain_match(erhn, domain):
_debug(" effective request-host name %s does not domain-match "
"RFC 2965 cookie domain %s", erhn, domain)
return False
if cookie.version == 0 and not ("."+erhn).endswith(domain):
_debug(" request-host %s does not match Netscape cookie domain "
"%s", req_host, domain)
return False
return True
def domain_return_ok(self, domain, request):
# Liberal check of. This is here as an optimization to avoid
# having to load lots of MSIE cookie files unless necessary.
req_host, erhn = eff_request_host(request)
if not req_host.startswith("."):
req_host = "."+req_host
if not erhn.startswith("."):
erhn = "."+erhn
if not (req_host.endswith(domain) or erhn.endswith(domain)):
#_debug(" request domain %s does not match cookie domain %s",
# req_host, domain)
return False
if self.is_blocked(domain):
_debug(" domain %s is in user block-list", domain)
return False
if self.is_not_allowed(domain):
_debug(" domain %s is not in user allow-list", domain)
return False
return True
def path_return_ok(self, path, request):
_debug("- checking cookie path=%s", path)
req_path = request_path(request)
if not req_path.startswith(path):
_debug(" %s does not path-match %s", req_path, path)
return False
return True
def vals_sorted_by_key(adict):
keys = adict.keys()
keys.sort()
return map(adict.get, keys)
def deepvalues(mapping):
"""Iterates over nested mapping, depth-first, in sorted order by key."""
values = vals_sorted_by_key(mapping)
for obj in values:
mapping = False
try:
obj.items
except AttributeError:
pass
else:
mapping = True
for subobj in deepvalues(obj):
yield subobj
if not mapping:
yield obj
# Used as second parameter to dict.get() method, to distinguish absent
# dict key from one with a None value.
class Absent: pass
class CookieJar:
"""Collection of HTTP cookies.
You may not need to know about this class: try
urllib2.build_opener(HTTPCookieProcessor).open(url).
"""
non_word_re = re.compile(r"\W")
quote_re = re.compile(r"([\"\\])")
strict_domain_re = re.compile(r"\.?[^.]*")
domain_re = re.compile(r"[^.]*")
dots_re = re.compile(r"^\.+")
magic_re = r"^\#LWP-Cookies-(\d+\.\d+)"
def __init__(self, policy=None):
if policy is None:
policy = DefaultCookiePolicy()
self._policy = policy
self._cookies_lock = _threading.RLock()
self._cookies = {}
def set_policy(self, policy):
self._policy = policy
def _cookies_for_domain(self, domain, request):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
_debug("Checking %s for cookies to return", domain)
cookies_by_path = self._cookies[domain]
for path in cookies_by_path.keys():
if not self._policy.path_return_ok(path, request):
continue
cookies_by_name = cookies_by_path[path]
for cookie in cookies_by_name.values():
if not self._policy.return_ok(cookie, request):
_debug(" not returning cookie")
continue
_debug(" it's a match")
cookies.append(cookie)
return cookies
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
cookies = []
for domain in self._cookies.keys():
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
def _cookie_attrs(self, cookies):
"""Return a list of cookie-attributes to be returned to server.
like ['foo="bar"; $Path="/"', ...]
The $Version attribute is also added when appropriate (currently only
once per request).
"""
# add cookies in order of most specific (ie. longest) path first
def decreasing_size(a, b): return cmp(len(b.path), len(a.path))
cookies.sort(decreasing_size)
version_set = False
attrs = []
for cookie in cookies:
# set version of Cookie header
# XXX
# What should it be if multiple matching Set-Cookie headers have
# different versions themselves?
# Answer: there is no answer; was supposed to be settled by
# RFC 2965 errata, but that may never appear...
version = cookie.version
if not version_set:
version_set = True
if version > 0:
attrs.append("$Version=%s" % version)
# quote cookie value if necessary
# (not for Netscape protocol, which already has any quotes
# intact, due to the poorly-specified Netscape Cookie: syntax)
if ((cookie.value is not None) and
self.non_word_re.search(cookie.value) and version > 0):
value = self.quote_re.sub(r"\\\1", cookie.value)
else:
value = cookie.value
# add cookie-attributes to be returned in Cookie header
if cookie.value is None:
attrs.append(cookie.name)
else:
attrs.append("%s=%s" % (cookie.name, value))
if version > 0:
if cookie.path_specified:
attrs.append('$Path="%s"' % cookie.path)
if cookie.domain.startswith("."):
domain = cookie.domain
if (not cookie.domain_initial_dot and
domain.startswith(".")):
domain = domain[1:]
attrs.append('$Domain="%s"' % domain)
if cookie.port is not None:
p = "$Port"
if cookie.port_specified:
p = p + ('="%s"' % cookie.port)
attrs.append(p)
return attrs
def add_cookie_header(self, request):
"""Add correct Cookie: header to request (urllib2.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
"""
_debug("add_cookie_header")
self._cookies_lock.acquire()
self._policy._now = self._now = int(time.time())
cookies = self._cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if not request.has_header("Cookie"):
request.add_unredirected_header(
"Cookie", "; ".join(attrs))
# if necessary, advertise that we know RFC 2965
if (self._policy.rfc2965 and not self._policy.hide_cookie2 and
not request.has_header("Cookie2")):
for cookie in cookies:
if cookie.version != 1:
request.add_unredirected_header("Cookie2", '$Version="1"')
break
self._cookies_lock.release()
self.clear_expired_cookies()
def _normalized_cookie_tuples(self, attrs_set):
"""Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.
"""
cookie_tuples = []
boolean_attrs = "discard", "secure"
value_attrs = ("version",
"expires", "max-age",
"domain", "path", "port",
"comment", "commenturl")
for cookie_attrs in attrs_set:
name, value = cookie_attrs[0]
# Build dictionary of standard cookie-attributes (standard) and
# dictionary of other cookie-attributes (rest).
# Note: expiry time is normalised to seconds since epoch. V0
# cookies should have the Expires cookie-attribute, and V1 cookies
# should have Max-Age, but since V1 includes RFC 2109 cookies (and
# since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
# accept either (but prefer Max-Age).
max_age_set = False
bad_cookie = False
standard = {}
rest = {}
for k, v in cookie_attrs[1:]:
lc = k.lower()
# don't lose case distinction for unknown fields
if lc in value_attrs or lc in boolean_attrs:
k = lc
if k in boolean_attrs and v is None:
# boolean cookie-attribute is present, but has no value
# (like "discard", rather than "port=80")
v = True
if k in standard:
# only first value is significant
continue
if k == "domain":
if v is None:
_debug(" missing value for domain attribute")
bad_cookie = True
break
# RFC 2965 section 3.3.3
v = v.lower()
if k == "expires":
if max_age_set:
# Prefer max-age to expires (like Mozilla)
continue
if v is None:
_debug(" missing or invalid value for expires "
"attribute: treating as session cookie")
continue
if k == "max-age":
max_age_set = True
try:
v = int(v)
except ValueError:
_debug(" missing or invalid (non-numeric) value for "
"max-age attribute")
bad_cookie = True
break
# convert RFC 2965 Max-Age to seconds since epoch
# XXX Strictly you're supposed to follow RFC 2616
# age-calculation rules. Remember that zero Max-Age is a
# is a request to discard (old and new) cookie, though.
k = "expires"
v = self._now + v
if (k in value_attrs) or (k in boolean_attrs):
if (v is None and
k not in ("port", "comment", "commenturl")):
_debug(" missing value for %s attribute" % k)
bad_cookie = True
break
standard[k] = v
else:
rest[k] = v
if bad_cookie:
continue
cookie_tuples.append((name, value, standard, rest))
return cookie_tuples
def _cookie_from_cookie_tuple(self, tup, request):
# standard is dict of standard cookie-attributes, rest is dict of the
# rest of them
name, value, standard, rest = tup
domain = standard.get("domain", Absent)
path = standard.get("path", Absent)
port = standard.get("port", Absent)
expires = standard.get("expires", Absent)
# set the easy defaults
version = standard.get("version", None)
if version is not None: version = int(version)
secure = standard.get("secure", False)
# (discard is also set if expires is Absent)
discard = standard.get("discard", False)
comment = standard.get("comment", None)
comment_url = standard.get("commenturl", None)
# set default path
if path is not Absent and path != "":
path_specified = True
path = escape_path(path)
else:
path_specified = False
path = request_path(request)
i = path.rfind("/")
if i != -1:
if version == 0:
# Netscape spec parts company from reality here
path = path[:i]
else:
path = path[:i+1]
if len(path) == 0: path = "/"
# set default domain
domain_specified = domain is not Absent
# but first we have to remember whether it starts with a dot
domain_initial_dot = False
if domain_specified:
domain_initial_dot = bool(domain.startswith("."))
if domain is Absent:
req_host, erhn = eff_request_host(request)
domain = erhn
elif not domain.startswith("."):
domain = "."+domain
# set default port
port_specified = False
if port is not Absent:
if port is None:
# Port attr present, but has no value: default to request port.
# Cookie should then only be sent back on that port.
port = request_port(request)
else:
port_specified = True
port = re.sub(r"\s+", "", port)
else:
# No port attr present. Cookie can be sent back on any port.
port = None
# set default expires and discard
if expires is Absent:
expires = None
discard = True
elif expires <= self._now:
# Expiry date in past is request to delete cookie. This can't be
# in DefaultCookiePolicy, because can't delete cookies there.
try:
self.clear(domain, path, name)
except KeyError:
pass
_debug("Expiring cookie, domain='%s', path='%s', name='%s'",
domain, path, name)
return None
return Cookie(version,
name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest)
def _cookies_from_attrs_set(self, attrs_set, request):
cookie_tuples = self._normalized_cookie_tuples(attrs_set)
cookies = []
for tup in cookie_tuples:
cookie = self._cookie_from_cookie_tuple(tup, request)
if cookie: cookies.append(cookie)
return cookies
def _process_rfc2109_cookies(self, cookies):
rfc2109_as_ns = getattr(self._policy, 'rfc2109_as_netscape', None)
if rfc2109_as_ns is None:
rfc2109_as_ns = not self._policy.rfc2965
for cookie in cookies:
if cookie.version == 1:
cookie.rfc2109 = True
if rfc2109_as_ns:
# treat 2109 cookies as Netscape cookies rather than
# as RFC2965 cookies
cookie.version = 0
def make_cookies(self, response, request):
"""Return sequence of Cookie objects extracted from response object."""
# get cookie-attributes for RFC 2965 and Netscape protocols
headers = response.info()
rfc2965_hdrs = headers.getheaders("Set-Cookie2")
ns_hdrs = headers.getheaders("Set-Cookie")
rfc2965 = self._policy.rfc2965
netscape = self._policy.netscape
if ((not rfc2965_hdrs and not ns_hdrs) or
(not ns_hdrs and not rfc2965) or
(not rfc2965_hdrs and not netscape) or
(not netscape and not rfc2965)):
return [] # no relevant cookie headers: quick exit
try:
cookies = self._cookies_from_attrs_set(
split_header_words(rfc2965_hdrs), request)
except Exception:
_warn_unhandled_exception()
cookies = []
if ns_hdrs and netscape:
try:
# RFC 2109 and Netscape cookies
ns_cookies = self._cookies_from_attrs_set(
parse_ns_headers(ns_hdrs), request)
except Exception:
_warn_unhandled_exception()
ns_cookies = []
self._process_rfc2109_cookies(ns_cookies)
# Look for Netscape cookies (from Set-Cookie headers) that match
# corresponding RFC 2965 cookies (from Set-Cookie2 headers).
# For each match, keep the RFC 2965 cookie and ignore the Netscape
# cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are
# bundled in with the Netscape cookies for this purpose, which is
# reasonable behaviour.
if rfc2965:
lookup = {}
for cookie in cookies:
lookup[(cookie.domain, cookie.path, cookie.name)] = None
def no_matching_rfc2965(ns_cookie, lookup=lookup):
key = ns_cookie.domain, ns_cookie.path, ns_cookie.name
return key not in lookup
ns_cookies = filter(no_matching_rfc2965, ns_cookies)
if ns_cookies:
cookies.extend(ns_cookies)
return cookies
def set_cookie_if_ok(self, cookie, request):
"""Set a cookie if policy says it's OK to do so."""
self._cookies_lock.acquire()
self._policy._now = self._now = int(time.time())
if self._policy.set_ok(cookie, request):
self.set_cookie(cookie)
self._cookies_lock.release()
def set_cookie(self, cookie):
"""Set a cookie, without checking whether or not it should be set."""
c = self._cookies
self._cookies_lock.acquire()
try:
if cookie.domain not in c: c[cookie.domain] = {}
c2 = c[cookie.domain]
if cookie.path not in c2: c2[cookie.path] = {}
c3 = c2[cookie.path]
c3[cookie.name] = cookie
finally:
self._cookies_lock.release()
def extract_cookies(self, response, request):
"""Extract cookies from response, where allowable given the request."""
_debug("extract_cookies: %s", response.info())
self._cookies_lock.acquire()
self._policy._now = self._now = int(time.time())
for cookie in self.make_cookies(response, request):
if self._policy.set_ok(cookie, request):
_debug(" setting cookie: %s", cookie)
self.set_cookie(cookie)
self._cookies_lock.release()
def clear(self, domain=None, path=None, name=None):
"""Clear some cookies.
Invoking this method without arguments will clear all cookies. If
given a single argument, only cookies belonging to that domain will be
removed. If given two arguments, cookies belonging to the specified
path within that domain are removed. If given three arguments, then
the cookie with the specified name, path and domain is removed.
Raises KeyError if no matching cookie exists.
"""
if name is not None:
if (domain is None) or (path is None):
raise ValueError(
"domain and path must be given to remove a cookie by name")
del self._cookies[domain][path][name]
elif path is not None:
if domain is None:
raise ValueError(
"domain must be given to remove cookies by path")
del self._cookies[domain][path]
elif domain is not None:
del self._cookies[domain]
else:
self._cookies = {}
def clear_session_cookies(self):
"""Discard all session cookies.
Note that the .save() method won't save session cookies anyway, unless
you ask otherwise by passing a true ignore_discard argument.
"""
self._cookies_lock.acquire()
for cookie in self:
if cookie.discard:
self.clear(cookie.domain, cookie.path, cookie.name)
self._cookies_lock.release()
def clear_expired_cookies(self):
"""Discard all expired cookies.
You probably don't need to call this method: expired cookies are never
sent back to the server (provided you're using DefaultCookiePolicy),
this method is called by CookieJar itself every so often, and the
.save() method won't save expired cookies anyway (unless you ask
otherwise by passing a true ignore_expires argument).
"""
self._cookies_lock.acquire()
now = time.time()
for cookie in self:
if cookie.is_expired(now):
self.clear(cookie.domain, cookie.path, cookie.name)
self._cookies_lock.release()
def __iter__(self):
return deepvalues(self._cookies)
def __len__(self):
"""Return number of contained cookies."""
i = 0
for cookie in self: i = i + 1
return i
def __repr__(self):
r = []
for cookie in self: r.append(repr(cookie))
return "<%s[%s]>" % (self.__class__, ", ".join(r))
def __str__(self):
r = []
for cookie in self: r.append(str(cookie))
return "<%s[%s]>" % (self.__class__, ", ".join(r))
# derives from IOError for backwards-compatibility with Python 2.4.0
class LoadError(IOError): pass
class FileCookieJar(CookieJar):
"""CookieJar that can be loaded from and saved to a file."""
def __init__(self, filename=None, delayload=False, policy=None):
"""
Cookies are NOT loaded from the named file until either the .load() or
.revert() method is called.
"""
CookieJar.__init__(self, policy)
if filename is not None:
try:
filename+""
except:
raise ValueError("filename must be string-like")
self.filename = filename
self.delayload = bool(delayload)
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Save cookies to a file."""
raise NotImplementedError()
def load(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Load cookies from a file."""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename)
try:
self._really_load(f, filename, ignore_discard, ignore_expires)
finally:
f.close()
def revert(self, filename=None,
ignore_discard=False, ignore_expires=False):
"""Clear all cookies and reload cookies from a saved file.
Raises LoadError (or IOError) if reversion is not successful; the
object's state will not be altered if this happens.
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
self._cookies_lock.acquire()
old_state = copy.deepcopy(self._cookies)
self._cookies = {}
try:
self.load(filename, ignore_discard, ignore_expires)
except (LoadError, IOError):
self._cookies = old_state
raise
self._cookies_lock.release()
from _LWPCookieJar import LWPCookieJar, lwp_cookie_str
from _MozillaCookieJar import MozillaCookieJar
| Python |
always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789' '_.-')
_safemaps = {}
def quote(s, safe = '/'):
"""quote('abc def') -> 'abc%20def'
Each part of a URL, e.g. the path info, the query, etc., has a
different set of reserved characters that must be quoted.
RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
the following reserved characters.
reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
"$" | ","
Each of these characters is reserved in some component of a URL,
but not necessarily in all of them.
By default, the quote function is intended for quoting the path
section of a URL. Thus, it will not encode '/'. This character
is reserved, but in typical usage the quote function is being
called on a path where the existing slash characters are used as
reserved characters.
"""
cachekey = (safe, always_safe)
try:
safe_map = _safemaps[cachekey]
except KeyError:
safe += always_safe
safe_map = {}
for i in range(256):
c = chr(i)
safe_map[c] = (c in safe) and c or ('%%%02X' % i)
_safemaps[cachekey] = safe_map
res = map(safe_map.__getitem__, s)
return ''.join(res)
_hextochr = dict(('%02x' % i, chr(i)) for i in range(256))
_hextochr.update(('%02X' % i, chr(i)) for i in range(256))
def unquote(s):
"""unquote('abc%20def') -> 'abc def'."""
res = s.split('%')
for i in xrange(1, len(res)):
item = res[i]
try:
res[i] = _hextochr[item[:2]] + item[2:]
except KeyError:
res[i] = '%' + item
except UnicodeDecodeError:
res[i] = unichr(int(item[:2], 16)) + item[2:]
return "".join(res)
_hostprog = None
def splithost(url):
"""splithost('//host[:port]/path') --> 'host[:port]', '/path'."""
global _hostprog
if _hostprog is None:
import re
_hostprog = re.compile('^//([^/?]*)(.*)$')
match = _hostprog.match(url)
if match: return match.group(1, 2)
return None, url
_typeprog = None
def splittype(url):
"""splittype('type:opaquestring') --> 'type', 'opaquestring'."""
global _typeprog
if _typeprog is None:
import re
_typeprog = re.compile('^([^/:]+):')
match = _typeprog.match(url)
if match:
scheme = match.group(1)
return scheme.lower(), url[len(scheme) + 1:]
return None, url
| Python |
#!/usr/bin/python
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
json = []
# if jsonlib2 is already installed, then we're fine
# we don't need anything else
try:
import jsonlib2
except ImportError:
# if python version < 2.6, require simplejson
# if python version >= 2.6, it comes with json
major, minor, micro, releaselevel, serial = sys.version_info
if major <= 2 and minor < 6:
json.append("simplejson")
setup(
name='freebase',
version='1.01',
author='Nick Thompson',
author_email='nix@metaweb.com',
maintainer_email='developers@freebase.com',
license='BSD',
url='http://code.google.com/p/freebase-python/',
description='Python client library for the freebase.com service',
long_description="""A Python library providing a convenient
wrapper around the freebase.com service api, as well as some
utility functions helpful in writing clients of the api.""",
packages=['freebase', 'freebase.api', 'freebase.fcl'],
entry_points = {
'console_scripts': [
'fcl = freebase.fcl.fcl:main',
'fb_save_base = freebase.schema_cmd:fb_save_base',
'fb_save_type = freebase.schema_cmd:fb_save_type',
'fb_restore = freebase.schema_cmd:fb_restore'
]
},
test_suite = "test.runtests.main",
install_requires=[] + json,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Database :: Front-Ends',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| Python |
#!/usr/bin/python
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB TECHNOLOGIES BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
import unittest
import sys, logging
import freebase
import random
import getlogindetails
from freebase.api import HTTPMetawebSession, MetawebError
USERNAME = 'username'
PASSWORD = 'password'
API_HOST = 'http://sandbox-freebase.com'
TEST_QUERY = {'id': 'null', 'name': 'Sting'}
s = HTTPMetawebSession(API_HOST)
if USERNAME == "username" and PASSWORD == "password":
USERNAME, PASSWORD = getlogindetails.main()
s.login(USERNAME, PASSWORD)
class TestFreebase(unittest.TestCase):
def test_freebase_dot_login_logout(self):
freebase.login(username=USERNAME, password=PASSWORD)
self.assertNotEqual(freebase.user_info(), None)
self.assertEqual(freebase.loggedin(), True)
freebase.logout()
self.assertRaises(MetawebError, freebase.user_info)
self.assertEqual(freebase.loggedin(), False)
def test_login_logout(self):
mss = HTTPMetawebSession(API_HOST, username=USERNAME,
password=PASSWORD)
mss.login()
user_info = mss.user_info()
self.assertNotEqual(None, user_info)
self.assertEqual(user_info.code, "/api/status/ok")
self.assertEqual(mss.loggedin(), True)
mss.logout()
self.assertRaises(MetawebError, mss.user_info)
self.assertEqual(mss.loggedin(), False)
def test_freebase_dot_read(self):
query = {'type':'/music/artist','guid':[{}],'name':'Sting', 'album':[{}]}
result = freebase.mqlread(query)
self.assertNotEqual(None, result)
self.assert_(result.has_key('guid'))
self.assert_(result.has_key('type'))
self.assert_(result.has_key('name'))
self.assert_(result.has_key('album'))
self.assertEqual(type([]), type(result['album']))
self.assert_(len(result['album']) > 0)
self.assertEqual( 'Sting', result['name'])
self.assertEqual('#9202a8c04000641f8000000000092a01', result['guid'][0]['value'])
def test_freebase_dot_write(self):
read_query = {'type':'/music/artist','name':'Yanni\'s Cousin Tom', 'id':{}}
freebase.sandbox.login(username=USERNAME, password=PASSWORD)
result = freebase.sandbox.mqlread(read_query)
self.assertEqual(None, result)
write_query = {'create':'unless_exists', 'type':'/music/artist','name':'Yanni'}
write_result = freebase.sandbox.mqlwrite(write_query)
self.assertNotEqual(None, write_result)
self.assert_(write_result.has_key('create'))
self.assert_(write_result.has_key('type'))
self.assert_(write_result.has_key('name'))
self.assertEqual('existed', write_result['create'])
self.assertEqual('Yanni', write_result['name'])
self.assertEqual('/music/artist', write_result['type'])
def test_read(self):
query = {'type':'/music/artist','guid':[{}],'name':'Sting', 'album':[{}]}
mss = HTTPMetawebSession(API_HOST)
result = mss.mqlread(query)
self.assertNotEqual(None, result)
self.assert_(result.has_key('guid'))
self.assert_(result.has_key('type'))
self.assert_(result.has_key('name'))
self.assert_(result.has_key('album'))
self.assertEqual(type([]), type(result['album']))
self.assert_(len(result['album']) > 0)
self.assertEqual( 'Sting', result['name'])
self.assertEqual('#9202a8c04000641f8000000000092a01', result['guid'][0]['value'])
def test_mqlreaditer(self):
filmq = [{'id': None,
'initial_release_date>=': '2009',
'name': None,
'type': '/film/film'
}]
r0 = freebase.mqlreaditer(filmq)
r1 = freebase.mqlreaditer(filmq[0]) # The difference between [{}] and []. mqlreaditer should be able to handle both
self.assertNotEqual(r0, None)
self.assertEqual([a for a in r0], [b for b in r1])
# and let's test it for mqlread, just in case
# actually, for mqlread, it must be [{}], because there are lots of elements
m0 = freebase.mqlread(filmq)
m1 = lambda : freebase.mqlread(filmq[0])
self.assertRaises(MetawebError, m1)
self.assertNotEqual(m0, None)
def test_ridiculously_long_write(self):
q = [{
"id":None,
"id|=":["/guid/9202a8c04000641f80000000000" + str(a) for a in range(10000,10320)]
}]
self.assert_(len(str(q)), 1024)
self.assertNotEqual(len(freebase.mqlread(q)), 0)
def test_write(self):
read_query = {'type':'/music/artist','name':'Yanni\'s Cousin Tom', 'id':{}}
mss = HTTPMetawebSession(API_HOST, username=USERNAME, password=PASSWORD)
result = mss.mqlread(read_query)
self.assertEqual(None, result)
write_query = {'create':'unless_exists', 'type':'/music/artist','name':'Yanni'}
mss.login()
write_result = mss.mqlwrite(write_query)
self.assertNotEqual(None, write_result)
self.assert_(write_result.has_key('create'))
self.assert_(write_result.has_key('type'))
self.assert_(write_result.has_key('name'))
self.assertEqual('existed', write_result['create'])
self.assertEqual('Yanni', write_result['name'])
self.assertEqual('/music/artist', write_result['type'])
def test_trans_blurb(self):
kurt = "/en/kurt_vonnegut"
blurb = freebase.blurb(kurt)
self.assert_(blurb.startswith("Kurt Vonnegut"))
self.assertNotEqual(len(blurb), 0)
blurb14 = freebase.blurb(kurt, maxlength=14)
blurb57 = freebase.blurb(kurt, maxlength=57)
self.assertNotEqual(len(blurb14), len(blurb57))
blurbpar = freebase.blurb(kurt, break_paragraphs=True, maxlength=20000)
blurbnopar = freebase.blurb(kurt, break_paragraphs=False, maxlength=20000)
# self.assertNotEqual(blurbpar, blurbnopar) this doesn't work unless I get a good example
# of an article with paragraphs.
def test_trans_raw(self):
kurt = "/en/kurt_vonnegut"
self.assertRaises(MetawebError, lambda: freebase.raw(kurt))
r = freebase.mqlread({"id":kurt, "/common/topic/article":[{"id":None, "optional":True, "limit":1}]})
raw = freebase.raw(r["/common/topic/article"][0].id)
self.assertNotEqual(len(raw), 0)
# trans should also work
trans = freebase.trans(r["/common/topic/article"][0].id)
self.assertEqual(trans, raw)
def test_unsafe(self):
kurt = "/en/kurt_vonnegut"
self.assertRaises(MetawebError, lambda: freebase.unsafe(kurt))
r = freebase.mqlread({"id":kurt, "/common/topic/article":[{"id":None, "optional":True, "limit":1}]})
unsafe = freebase.unsafe(r["/common/topic/article"][0].id)
self.assertNotEqual(len(unsafe), 0)
# we need an example of getting unsafe data
# ...
def test_trans_image_thumb(self):
kurt = "/en/kurt_vonnegut"
r = freebase.mqlread({"id":kurt, "/common/topic/image":[{"id":None, "optional":True, "limit":1}]})
imageid = r["/common/topic/image"][0].id
rawimage = freebase.raw(imageid)
thumbedimage = freebase.image_thumb(imageid, maxheight=99)
self.assertNotEqual(rawimage, thumbedimage)
def test_upload(self):
my_text = "Kurt Vonnegut was an author! " + str(random.random())
freebase.sandbox.login(USERNAME, PASSWORD)
response = freebase.sandbox.upload(my_text, "text/plain")
self.assertEqual(freebase.sandbox.raw(response.id), my_text)
# since it's text/plain, blurb should also be equal
self.assertEqual(freebase.sandbox.blurb(response.id), my_text)
def is_kurt_there(self, results):
for result in results:
if result.name == "Kurt Vonnegut":
return True
return False
def test_search(self):
r0 = freebase.search("Kurt V")
self.assertEqual(self.is_kurt_there(r0), True)
r1 = freebase.search("Kurt V", type=["/location/citytown"])
self.assertEqual(self.is_kurt_there(r1), False)
r2 = freebase.search("Kurt V", type=["/location/citytown", "/music/artist"])
self.assertEqual(self.is_kurt_there(r2), False)
self.assertNotEqual(len(r0), len(r1))
self.assertNotEqual(len(r0), len(r2))
self.assertNotEqual(len(r1), len(r2))
def test_touch(self):
# this one's hard to test... let's just make sure it works.
freebase.touch()
def test_geosearch(self):
self.assertRaises(Exception, freebase.geosearch)
r0 = freebase.geosearch(location="/en/california")
self.assertNotEqual(len(r0), 0)
json = freebase.geosearch(location="/en/san_francisco", format="json")
kml = freebase.geosearch(location="/en/san_francisco", format="kml")
self.assertNotEqual(json, kml)
def test_uri_submit(self):
# test a pdf
r = freebase.sandbox.uri_submit("http://www.jcbl.or.jp/game/nec/necfest07/nec2007_data/HayashiMiyake.pdf", content_type="application/pdf")
self.assertEqual(r['/type/content/media_type'], 'application/pdf')
# test an image
r = freebase.sandbox.uri_submit("http://datamob.org/media/detail_freebase.png")
self.assertEqual(r['/type/content/media_type'], 'image/png')
def test_version(self):
r = freebase.version()
self.assertNotEqual(len(r), 0)
def test_status(self):
r = freebase.status()
self.assertNotEqual(len(r), 0)
self.assertEqual(r["status"], u"200 OK")
def test_private_domains(self):
freebase.sandbox.login(username=USERNAME, password=PASSWORD)
r = freebase.sandbox.create_private_domain("superfly" + str(int(random.random() * 1e10)), "Superfly!")
q = {"id" : r["domain_id"], "*" : None}
info = freebase.sandbox.mqlread(q)
self.assertEqual(info["type"], ["/type/domain"])
self.assertNotEqual(len(info["key"]), 0)
self.assertEqual(info["attribution"], info["creator"])
freebase.sandbox.delete_private_domain(info["key"][0])
deleted = freebase.sandbox.mqlread(q)
self.assertEqual(len(deleted["key"]), 0)
self.assertEqual(len(deleted["type"]), 0)
self.assertEqual(deleted["name"], None)
self.assertEqual(deleted["creator"], info["attribution"])
if __name__ == '__main__':
unittest.main()
| Python |
import os
import os.path
import getpass
import freebase
from freebase.api.session import MetawebError
passwordfile = "test/.password.txt"
def main(create_password_file=False):
USERNAME, PASSWORD = "", ""
if not os.path.isfile(passwordfile):
print "In order to run the tests, we need to use a valid freebase username and password"
USERNAME = raw_input("Please enter your username: ")
try:
PASSWORD = getpass.getpass("Please enter your password: ")
except getpass.GetPassWarning:
PASSWORD = raw_input("Please enter your password: ")
freebase.login(USERNAME, PASSWORD)
print "Thanks!"
if create_password_file:
writepassword(passwordfile, USERNAME, PASSWORD)
else:
pf = open(passwordfile, "r")
USERNAME, PASSWORD = pf.read().split("\n")
pf.close()
try:
freebase.login(USERNAME, PASSWORD)
except MetawebError, me:
print "The username/password in your .password.txt file are incorrect"
raise me
return USERNAME, PASSWORD
def writepassword(passwordfile, username, password):
fh = open(passwordfile, "w")
fh.write(username + "\n" + password)
fh.close() | Python |
import unittest
import os
import os.path
import freebase
import getlogindetails
def main():
created = False
passwordfile = "test/.password.txt"
# setup password stuff
if not os.path.isfile(passwordfile):
created = True
USERNAME, PASSWORD = getlogindetails.main(create_password_file=True)
USERNAME, PASSWORD = getlogindetails.main()
# run tests
import test_freebase
import test_schema_manipulation
import test_hardcore_schema_manipulation
s1 = unittest.TestLoader().loadTestsFromTestCase(test_freebase.TestFreebase)
s2 = unittest.TestLoader().loadTestsFromTestCase(test_schema_manipulation.TestSchemaManipulation)
s3 = unittest.TestLoader().loadTestsFromTestCase(test_hardcore_schema_manipulation.TestHardcoreSchemaManipulation)
# This is very strange. If you try to do [s1, s2], thereby running freebase tests first,
# two tests in the testschemamanipulation file fail! They fail because of caching issues; if
# I check on freebase, the changes are actually there. I have racked my mind for explanations.
# this is such a hack, and I'm sorry. The tests run 100% correct individually.
anotherrun = unittest.TestSuite([s1, s3, s2])
#run = unittest.TestSuite(suites)
# delete password stuff
if created: os.remove(passwordfile)
return anotherrun
if __name__ == '__main__':
main() | Python |
#!/usr/bin/python
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
json = []
# if jsonlib2 is already installed, then we're fine
# we don't need anything else
try:
import jsonlib2
except ImportError:
# if python version < 2.6, require simplejson
# if python version >= 2.6, it comes with json
major, minor, micro, releaselevel, serial = sys.version_info
if major <= 2 and minor < 6:
json.append("simplejson")
setup(
name='freebase',
version='1.0.7',
author='Nick Thompson',
author_email='nix@metaweb.com',
maintainer_email='developers@freebase.com',
license='BSD',
url='http://code.google.com/p/freebase-python/',
description='Python client library for the freebase.com service',
long_description="""A Python library providing a convenient
wrapper around the freebase.com service api, as well as some
utility functions helpful in writing clients of the api.""",
packages=['freebase', 'freebase.api', 'freebase.fcl'],
entry_points = {
'console_scripts': [
'fcl = freebase.fcl.fcl:main',
'fb_save_base = freebase.schema_cmd:fb_save_base',
'fb_save_type = freebase.schema_cmd:fb_save_type',
'fb_restore = freebase.schema_cmd:fb_restore'
]
},
test_suite = "test.runtests.main",
install_requires=[] + json,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Database :: Front-Ends',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| Python |
#========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB TECHNOLOGIES BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
#
# This is the full "metaweb.py" module from the Metaweb API documentation
#
# In the documentation, each function is presented as a separate
# example. This is the whole file.
#
# If you find any errors or have suggestions for improving this module,
# send them to the Freebase developers mailing list: developers@freebase.com
# You can subscribe to the mailing list at http://lists.freebase.com/
#
import httplib
import urllib # URL encoding
import urllib2 # Higher-level URL content fetching
import simplejson # JSON serialization and parsing
import cookielib # Cookie handling
import os
#
# When experimenting, use the sandbox-freebase.com service.
# Every Monday, sandbox-freebase.com is erased and it is updated
# with a fresh copy of data from www.freebase.com. This makes
# it an ideal place to experiment.
#
host = 'sandbox-freebase.com' # The Metaweb host
readservice = '/api/service/mqlread' # Path to mqlread service
loginservice = '/api/account/login' # Path to login service
writeservice = '/api/service/mqlwrite' # Path to mqlwrite service
uploadservice = '/api/service/upload' # Path to upload service
searchservice = '/api/service/search' # Path to search service
credentials = None # default credential from login()
escape = False # default escape, set to 'html' for HTML escaping
permission = None # default permission used when creating new objects
debug = False # default debug setting
# Install a CookieProcessor
cookiefile = os.path.join(os.environ["HOME"], ".metaweb.cookies.txt")
cookiejar = cookielib.LWPCookieJar()
if os.path.isfile(cookiefile):
cookiejar.load(cookiefile)
urllib2.install_opener(
urllib2.build_opener(
urllib2.HTTPCookieProcessor(cookiejar)))
# If anything goes wrong when talking to a Metaweb service, we raise MQLError.
class MQLError(Exception):
def __init__(self, value): # This is the exception constructor method
self.value = value
def __str__(self): # Convert error object to a string
return repr(self.value)
# Submit the MQL query q and return the result as a Python object.
# If authentication credentials are supplied, use them in a cookie.
# Raises MQLError if the query was invalid. Raises urllib2.HTTPError if
# mqlread returns an HTTP status code other than 200 (which should not happen).
def read(q, credentials=credentials, escape=escape):
# Put the query in an envelope
envelope = {'query':q}
# Add escape if needed
if escape != 'html':
envelope['escape'] = False if not escape else escape
# Encode the result
encoded = urllib.urlencode({'query': simplejson.dumps(envelope)})
# Build the URL and create a Request object for it
url = 'http://%s%s' % (host, readservice)
req = urllib2.Request(url)
# The body of the POST request is encoded URL parameters
req.add_header('Content-type', 'application/x-www-form-urlencoded')
# Send our authentication credentials, if any, as a cookie.
# The need for mqlread authentication is a temporary restriction.
if credentials: req.add_header('Cookie', credentials)
# Use the encoded envelope as the value of the q parameter in the body
# of the request. Specifying a body automatically makes this a POST.
req.add_data(encoded)
# Now upen the URL and and parse its JSON content
f = urllib2.urlopen(req) # Open the URL
inner = simplejson.load(f) # Parse JSON response to an object
# If anything was wrong with the invocation, mqlread will return an HTTP
# error, and the code above with raise urllib2.HTTPError.
# If anything was wrong with the query, we won't get an HTTP error, but
# will get an error status code in the response envelope. In this case
# we raise our own MQLError exception.
if not inner['code'].startswith('/api/status/ok'):
if debug: print q
if debug: print inner
if debug: print f.info()['X-Metaweb-Cost']
if debug: print f.info()['X-Metaweb-TID']
error = inner['messages'][0]
raise MQLError('%s: %s' % (error['code'], error['message']))
# If there was no error, then just return the result from the envelope
return inner['result']
# Submit the MQL query q and return the result as a Python object
# This function behaves like read() above, but uses cursors so that
# it works even for very large result sets. See also the cursor class below.
def readall(q, credentials=credentials, escape=escape):
# This is the start of the mqlread URL.
# We just need to append the envelope to it
url = 'http://%s%s' % (host, readservice)
# The query and most of the envelope are constant. We just need to append
# the encoded cursor value and some closing braces to this prefix string
jsonq = simplejson.dumps(q)
# Add escape if needed
if escape != 'html':
jsonq += ',"escape":' + ('false' if not escape else escape)
cursor = 'true' # This is the initial value of the cursor
results = [] # We accumulate results in this array
# Loop until mqlread tells us there are no more results
while cursor:
# append the cursor and the closing braces to the envelope
envelope = urllib.urlencode({'query': '{"query":' + jsonq + ',"cursor":' + cursor + '}'})
# Begin an HTTP request for the URL
req = urllib2.Request(url)
# The body of the POST request is encoded URL parameters
req.add_header('Content-type', 'application/x-www-form-urlencoded')
# Send our authentication credentials, if any, as a cookie.
# The need for mqlread authentication is a temporary restriction.
if credentials:
req.add_header('Cookie', credentials)
# Use the encoded envelope as the value of the q parameter in the body
# of the request. Specifying a body automatically makes this a POST.
req.add_data(envelope)
# Read and parse the URL contents
f = urllib2.urlopen(req) # Open URL
inner = simplejson.load(f) # Parse JSON response
# Raise a MQLError if there were errors
if not inner['code'].startswith('/api/status/ok'):
if debug: print q
if debug: print inner
if debug: print f.info()['X-Metaweb-Cost']
if debug: print f.info()['X-Metaweb-TID']
error = inner['messages'][0]
raise MQLError('%s: %s' % (error['code'], error['message']))
# Append this batch of results to the main array of results.
results.extend(inner['result'])
# Finally, get the new value of the cursor for the next iteration
cursor = inner['cursor']
if cursor: # If it is not false, put it
cursor = '"' + cursor + '"' # in quotes as a JSON string
# Now that we're done with the loop, return the results array
return results
# Submit multiple MQL queries and return the result as a Python array.
# If authentication credentials are supplied, use them in a cookie.
# Raises MQLError if the query was invalid. Raises urllib2.HTTPError if
# mqlread returns an HTTP status code other than 200 (which should not happen).
def readmulti(queries, credentials=credentials, escape=escape):
encoded = ""
for i in range(0, len(queries)):
# Put the query in an envelope
envelope = {'query':queries[i]}
# Add escape if needed
if escape != 'html':
envelope['escape'] = False if not escape else escape
if i > 0:
encoded += ","
encoded += '"q%d":%s' % (i, simplejson.dumps(envelope))
# URL encode the outer envelope
encoded = urllib.urlencode({'queries': "{" + encoded + "}"})
# Build the URL and create a Request object for it
url = 'http://%s%s' % (host, readservice)
req = urllib2.Request(url)
# The body of the POST request is encoded URL parameters
req.add_header('Content-type', 'application/x-www-form-urlencoded')
# Send our authentication credentials, if any, as a cookie.
# The need for mqlread authentication is a temporary restriction.
if credentials: req.add_header('Cookie', credentials)
# Use the encoded envelope as the value of the q parameter in the body
# of the request. Specifying a body automatically makes this a POST.
req.add_data(encoded)
# Now upen the URL and and parse its JSON content
f = urllib2.urlopen(req) # Open the URL
inner = simplejson.load(f) # Parse JSON response to an object
# If anything was wrong with the invocation, mqlread will return an HTTP
# error, and the code above with raise urllib2.HTTPError.
# If anything was wrong with the query, we won't get an HTTP error, but
# will get an error status code in the response envelope. In this case
# we raise our own MQLError exception.
if not inner['code'].startswith('/api/status/ok'):
if debug: print queries
if debug: print inner
if debug: print f.info()['X-Metaweb-Cost']
if debug: print f.info()['X-Metaweb-TID']
error = inner['messages'][0]
raise MQLError('%s: %s' % (error['code'], error['message']))
# extract the results
results = []
for i in range(0, len(queries)):
result = inner["q%d" % i]
if not result['code'].startswith('/api/status/ok'):
if debug: print queries[i]
if debug: print result
if debug: print f.info()['X-Metaweb-Cost']
if debug: print f.info()['X-Metaweb-TID']
error = result['messages'][0]
raise MQLError('%s: %s' % (error['code'], error['message']))
results.append(result['result'])
# If there was no error, then just return the result from the envelope
return results
# Submit the specified username and password to the Metaweb login service.
# Return opaque authentication credentials on success.
# Raise MQLError on failure.
def login(username, password):
# Establish a connection to the server and make a request.
# Note that we use the low-level httplib library instead of urllib2.
# This allows us to manage cookies explicitly.
conn = httplib.HTTPConnection(host)
conn.request('POST', # POST the request
loginservice, # The URL path /api/account/login
# The body of the request: encoded username/password
urllib.urlencode({'username':username, 'password':password}),
# This header specifies how the body of the post is encoded.
{'Content-type': 'application/x-www-form-urlencoded'})
# Get the response from the server
response = conn.getresponse()
if response.status == 200: # We get HTTP 200 OK even if login fails
# Parse response body and raise a MQLError if login failed
body = simplejson.loads(response.read())
if not body['code'].startswith('/api/status/ok'):
if debug: print inner
if debug: print f.info()['X-Metaweb-Cost']
if debug: print f.info()['X-Metaweb-TID']
error = body['messages'][0]
raise MQLError('%s: %s' % (error['code'], error['message']))
# Otherwise return cookies to serve as authentication credentials.
# The set-cookie header holds one or more cookie specifications,
# separated by commas. Each specification is a name, an equal
# sign, a value, and one or more trailing clauses that consist
# of a semicolon and some metadata. We don't care about the
# metadata. We just want to return a comma-separated list of
# name=value pairs.
cookies = response.getheader('set-cookie').split(',')
return ';'.join([c[0:c.index(';')] for c in cookies])
else: # This should never happen
raise MQLError('HTTP Error: %d %s' % (response.status,response.reason))
# Submit the MQL write q and return the result as a Python object.
# Authentication credentials are required, obtained from login()
# Raises MQLError if the query was invalid. Raises urllib2.HTTPError if
# mqlwrite returns an HTTP status code other than 200
def write(query, credentials=credentials, escape=escape, permission=permission):
# We're requesting this URL
req = urllib2.Request('http://%s%s' % (host, writeservice))
# Send our authentication credentials as a cookie
if credentials:
req.add_header('Cookie', credentials)
# This custom header is required and guards against XSS attacks
req.add_header('X-Metaweb-Request', 'True')
# The body of the POST request is encoded URL parameters
req.add_header('Content-type', 'application/x-www-form-urlencoded')
# Wrap the query object in a query envelope
envelope = {'qname': {'query': query}}
# Add escape if needed
if escape != 'html':
envelope['qname']['escape'] = (False if not escape else escape)
# Add permissions if needed
if permission:
envelope['qname']['use_permission_of'] = permission
# JSON encode the envelope
encoded = simplejson.dumps(envelope)
# Use the encoded envelope as the value of the q parameter in the body
# of the request. Specifying a body automatically makes this a POST.
req.add_data(urllib.urlencode({'queries':encoded}))
# Now do the POST
f = urllib2.urlopen(req)
response = simplejson.load(f) # Parse HTTP response as JSON
inner = response['qname'] # Open outer envelope; get inner envelope
# If anything was wrong with the invocation, mqlwrite will return an HTTP
# error, and the code above with raise urllib2.HTTPError.
# If anything was wrong with the query, we will get an error status code
# in the response envelope.
# we raise our own MQLError exception.
if not inner['code'].startswith('/api/status/ok'):
if debug: print query
if debug: print inner
if debug: print f.info()['X-Metaweb-Cost']
if debug: print f.info()['X-Metaweb-TID']
error = inner['messages'][0]
raise MQLError('%s: %s' % (error['code'], error['message']))
# save cookie
cookiejar.save(cookiefile)
# If there was no error, then just return the result from the envelope
return inner['result']
# Upload the specified content (and give it the specified type).
# Return the guid of the /type/content object that represents it.
# The returned guid can be used to retrieve the content with /api/trans/raw.
def upload(content, type, credentials=credentials):
# This is the URL we POST content to
url = 'http://%s%s'%(host,uploadservice)
# Build the HTTP request
req = urllib2.Request(url, content) # URL and content to POST
req.add_header('Content-Type', type) # Content type header
if credentials:
req.add_header('Cookie', credentials) # Authentication header
req.add_header('X-Metaweb-Request', 'True') # Guard against XSS attacks
f = urllib2.urlopen(req) # POST the request
response = simplejson.load(f) # Parse the response
if not response['code'].startswith('/api/status/ok'):
if debug: print inner
if debug: print f.info()['X-Metaweb-Cost']
if debug: print f.info()['X-Metaweb-TID']
error = response['messages'][0]
raise MQLError('%s: %s' % (error['code'], error['message']))
return response['result']['id'] # Extract and return content id
# Search for topics
def search(query, type=None, start=0, limit=0):
args = {"query": query}
if type:
args["type"] = type
if start > 0:
args["start"] = start
if limit > 0:
args["limit"] = limit
url = 'http://%s%s?%s'%(host, searchservice, urllib.urlencode(args))
f = urllib2.urlopen(url)
response = simplejson.load(f) # Parse the response
if not response['code'].startswith('/api/status/ok'):
if debug: print query
if debug: print inner
if debug: print f.info()['X-Metaweb-Cost']
if debug: print f.info()['X-Metaweb-TID']
error = response['messages'][0]
raise MQLError('%s: %s' % (error['code'], error['message']))
return response['result']
# Cursor for iterating over large data sets
# For example:
# query = {"name": None, "type":"/type/media_type"}
# for row in metaweb.cursor([query]):
# print row
class cursor:
def __init__(self, query, credentials=credentials, escape=escape):
self.query = query
self.credentials = credentials
self.index = 0
self.results = []
self.cursor = 'true'
self.url = 'http://%s%s' % (host, readservice)
self.jsonq = simplejson.dumps(self.query)
if escape != 'html':
self.jsonq += ',"escape":' + ('false' if not escape else escape)
def __iter__(self):
return self
def next(self):
# return the next value
if self.index < len(self.results):
result = self.results[self.index]
self.index = self.index + 1
return result
# check if there is more
if not self.cursor:
raise StopIteration
# append the cursor and the closing braces to the envelope
envelope = urllib.urlencode({'query': '{"query":' + self.jsonq + ',"cursor":' + self.cursor + '}'})
# Begin an HTTP request for the URL
req = urllib2.Request(self.url)
# The body of the POST request is encoded URL parameters
req.add_header('Content-type', 'application/x-www-form-urlencoded')
# Send our authentication credentials, if any, as a cookie.
# The need for mqlread authentication is a temporary restriction.
if self.credentials: req.add_header('Cookie', self.credentials)
# Use the encoded envelope as the value of the q parameter in the body
# of the request. Specifying a body automatically makes this a POST.
req.add_data(envelope)
# Read and parse the URL contents
f = urllib2.urlopen(req) # Open URL
inner = simplejson.load(f) # Parse JSON response
# Raise a MQLError if there were errors
if not inner['code'].startswith('/api/status/ok'):
if debug: print self.query
if debug: print inner
if debug: print f.info()['X-Metaweb-Cost']
if debug: print f.info()['X-Metaweb-TID']
error = inner['messages'][0]
raise MQLError('%s: %s' % (error['code'], error['message']))
# Remember the next cursor
self.cursor = inner['cursor']
if self.cursor: # If it is not false, put it
self.cursor = '"' + self.cursor + '"' # in quotes as a JSON string
# Append this batch of results to the main array of results.
self.results = inner['result']
if len(self.results) == 0:
raise StopIteration
# Return the first result
self.index = 1
return self.results[0]
| Python |
try:
from google.appengine.api import urlfetch
from cookie_handlers import CookiefulUrlfetch
except:
pass
try:
import httplib2
from cookie_handlers import CookiefulHttp
except:
pass
try:
import urllib2
import socket
except:
pass
import logging
import re
class Urllib2Client(object):
def __init__(self, cookiejar, rse):
cookiespy = urllib2.HTTPCookieProcessor(cookiejar)
self.opener = urllib2.build_opener(cookiespy)
self._raise_service_error = rse
self.log = logging.getLogger("freebase")
def __call__(self, url, method, body, headers):
req = urllib2.Request(url, body, headers)
try:
resp = self.opener.open(req)
except socket.error, e:
self.log.error('SOCKET FAILURE: %s', e.fp.read())
raise MetawebError, 'failed contacting %s: %s' % (url, str(e))
except urllib2.HTTPError, e:
self.log.error('HTTP ERROR: %s', e)
self._raise_service_error(url, e.code, e.info().type, e.fp.read())
for header in resp.info().headers:
self.log.debug('HTTP HEADER %s', header)
name, value = re.split("[:\n\r]", header, 1)
if name.lower() == 'x-metaweb-tid':
self.tid = value.strip()
return (resp, resp.read())
class Httplib2Client(object):
def __init__(self, cookiejar, rse):
self.cookiejar = cookiejar
self._raise_service_error = rse
self.httpclient = CookiefulHttp(cookiejar=self.cookiejar)
def __call__(self, url, method, body, headers):
try:
resp, content = self.httpclient.request(url, method=method,
body=body, headers=headers)
if (resp.status != 200):
self._raise_service_error(url, resp.status, resp['content-type'], content)
except socket.error, e:
self.log.error('SOCKET FAILURE: %s', e.fp.read())
raise MetawebError, 'failed contacting %s: %s' % (url, str(e))
except httplib2.HttpLib2ErrorWithResponse, e:
self._raise_service_error(url, resp.status, resp['content-type'], content)
except httplib2.HttpLib2Error, e:
raise MetawebError(u'HTTP error: %s' % (e,))
#tid = resp.get('x-metaweb-tid', None)
return (resp, content)
class UrlfetchClient(object):
def __init__(self, cookiejar, rse):
self.cookiejar = cookiejar
self._raise_service_error = rse
self.httpclient = CookiefulUrlfetch(cookiejar=self.cookiejar)
def __call__(self, url, method, body, headers):
resp = self.httpclient.request(url, payload=body, method=method, headers=headers)
if resp.status_code != 200:
self._raise_service_error(url, resp.status_code, resp.headers['content-type'], resp.body)
return (resp, resp.content)
| Python |
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
import string
import re
def quotekey(ustr):
"""
quote a unicode string to turn it into a valid namespace key
"""
valid_always = string.ascii_letters + string.digits
valid_interior_only = valid_always + '_-'
if isinstance(ustr, str):
s = unicode(ustr,'utf-8')
elif isinstance(ustr, unicode):
s = ustr
else:
raise ValueError, 'quotekey() expects utf-8 string or unicode'
output = []
if s[0] in valid_always:
output.append(s[0])
else:
output.append('$%04X' % ord(s[0]))
for c in s[1:-1]:
if c in valid_interior_only:
output.append(c)
else:
output.append('$%04X' % ord(c))
if len(s) > 1:
if s[-1] in valid_always:
output.append(s[-1])
else:
output.append('$%04X' % ord(s[-1]))
return str(''.join(output))
def unquotekey(key, encoding=None):
"""
unquote a namespace key and turn it into a unicode string
"""
valid_always = string.ascii_letters + string.digits
output = []
i = 0
while i < len(key):
if key[i] in valid_always:
output.append(key[i])
i += 1
elif key[i] in '_-' and i != 0 and i != len(key):
output.append(key[i])
i += 1
elif key[i] == '$' and i+4 < len(key):
# may raise ValueError if there are invalid characters
output.append(unichr(int(key[i+1:i+5],16)))
i += 5
else:
raise ValueError, "unquote key saw invalid character '%s' at position %d" % (key[i], i)
ustr = u''.join(output)
if encoding is None:
return ustr
return ustr.encode(encoding)
# should this also include "'()" into safe?
def urlencode_pathseg(data):
'''
urlencode for placement between slashes in an url.
'''
if isinstance(data, unicode):
data = data.encode('utf_8')
return urllib.quote(data, "~:@$!*,;=&+")
def id_to_urlid(id):
"""
convert a mql id to an id suitable for embedding in a url path.
"""
segs = id.split('/')
assert isinstance(id, str) and id != '', 'bad id "%s"' % id
if id[0] == '~':
assert len(segs) == 1
# assume valid, should check
return id
if id[0] == '#':
assert len(segs) == 1
# assume valid, should check
return '%23' + id[1:]
if id[0] != '/':
raise ValueError, 'unknown id format %s' % id
# ok, we have a slash-path
# requote components as keys and rejoin.
# urlids do not have leading slashes!!!
return '/'.join(urlencode_pathseg(unquotekey(seg)) for seg in segs[1:])
| Python |
# ==================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ====================================================================
"""
declarations for external metaweb api.
from metaweb.api import HTTPMetawebSession
mss = HTTPMetawebSession('sandbox-freebase.com')
print mss.mqlread([dict(name=None, type='/type/type')])
"""
__all__ = ['MetawebError', 'MetawebSession', 'HTTPMetawebSession', 'attrdict']
__version__ = '1.0.4'
import os, sys, re
import cookielib
SEPARATORS = (",", ":")
# json libraries rundown
# jsonlib2 is the fastest, but it's written in C, thus not as
# accessible. json is included in python2.6. simplejson
# is the same as json.
try:
import jsonlib2 as json
except ImportError:
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
try:
# appengine provides simplejson at django.utils.simplejson
from django.utils import simplejson as json
except ImportError:
raise Exception("unable to import neither json, simplejson, jsonlib2, or django.utils.simplejson")
try:
# python 2.5 and higher
from functools import update_wrapper
except ImportError:
# back-copied verbatim from python 2.6
WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
WRAPPER_UPDATES = ('__dict__',)
def update_wrapper(wrapper,
wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Update a wrapper function to look like the wrapped function
wrapper is the function to be updated
wrapped is the original function
assigned is a tuple naming the attributes assigned directly
from the wrapped function to the wrapper function (defaults to
functools.WRAPPER_ASSIGNMENTS)
updated is a tuple naming the attributes of the wrapper that
are updated with the corresponding attribute from the wrapped
function (defaults to functools.WRAPPER_UPDATES)
"""
for attr in assigned:
setattr(wrapper, attr, getattr(wrapped, attr))
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
# Return the wrapper so this can be used as a decorator via partial()
return wrapper
try:
from urllib import quote as urlquote
except ImportError:
from urlib_stub import quote as urlquote
import pprint
import socket
import logging
LITERAL_TYPE_IDS = set([
"/type/int",
"/type/float",
"/type/boolean",
"/type/rawstring",
"/type/uri",
"/type/text",
"/type/datetime",
"/type/bytestring",
"/type/id",
"/type/key",
"/type/value",
"/type/enumeration"
])
class Delayed(object):
"""
Wrapper for callables in log statements. Avoids actually making
the call until the result is turned into a string.
A few examples:
json.dumps is never called because the logger never
tries to format the result
>>> logging.debug(Delayed(json.dumps, q))
This time json.dumps() is actually called:
>>> logging.warn(Delayed(json.dumps, q))
"""
def __init__(self, f, *args, **kwds):
self.f = f
self.args = args
self.kwds = kwds
def __str__(self):
return str(self.f(*self.args, **self.kwds))
def logformat(result):
"""
Format the dict/list as a json object
"""
rstr = json.dumps(result, indent=2)
if rstr[0] == '{':
rstr = rstr[1:-2]
return rstr
from httpclients import Httplib2Client, Urllib2Client, UrlfetchClient
# Check for urlfetch first so that urlfetch is used when running the appengine SDK
try:
import google.appengine.api.urlfetch
from cookie_handlers import CookiefulUrlfetch
http_client = UrlfetchClient
except ImportError:
try:
import httplib2
from cookie_handlers import CookiefulHttp
http_client = Httplib2Client
except ImportError:
import urllib2
httplib2 = None
CookiefulHttp = None
http_client = Urllib2Client
def urlencode_weak(s):
return urlquote(s, safe=',/:$')
# from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/361668
class attrdict(dict):
"""A dict whose items can also be accessed as member variables.
>>> d = attrdict(a=1, b=2)
>>> d['c'] = 3
>>> print d.a, d.b, d.c
1 2 3
>>> d.b = 10
>>> print d['b']
10
# but be careful, it's easy to hide methods
>>> print d.get('c')
3
>>> d['get'] = 4
>>> print d.get('a')
Traceback (most recent call last):
TypeError: 'int' object is not callable
"""
def __init__(self, *args, **kwargs):
# adds the *args and **kwargs to self (which is a dict)
dict.__init__(self, *args, **kwargs)
self.__dict__ = self
def maybe_dumps(s):
"""
If the given value is a json structure, encode it as a json
string. Otherwise leave it as is.
"""
if isinstance(s, (dict, list)):
return json.dumps(s)
return s
def json_params(f):
"""
Decorator that turns all arguments into string or
string-compatible objects by json-encoding all dicts and lists,
and leaving other types alone
"""
def call_f(*args, **kwds):
new_args = (maybe_dumps(s) for s in args)
new_kwds = dict((k,maybe_dumps(v)) for k,v in kwds.iteritems())
return f(*new_args, **new_kwds)
return update_wrapper(call_f, f)
# TODO expose the common parts of the result envelope
class MetawebError(Exception):
"""
an error report from the metaweb service.
"""
pass
# TODO right now this is a completely unnecessary superclass.
# is there enough common behavior between session types
# to justify it?
class MetawebSession(object):
"""
MetawebSession is the base class for MetawebSession, subclassed for
different connection types. Only http is available externally.
This is more of an interface than a class
"""
# interface definition here...
# from httplib2
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
class HTTPMetawebSession(MetawebSession):
"""
a MetawebSession is a request/response queue.
this version uses the HTTP api, and is synchronous.
"""
# share cookies across sessions, so that different sessions can
# see each other's writes immediately.
_default_cookiejar = cookielib.CookieJar()
def __init__(self, service_url, username=None, password=None, prev_session=None, cookiejar=None, cookiefile=None, application_name=None):
"""
create a new MetawebSession for interacting with the Metaweb.
a new session will inherit state from prev_session if present,
"""
super(HTTPMetawebSession, self).__init__()
self.log = logging.getLogger("freebase")
self.application_name = application_name
assert not service_url.endswith('/')
if not '/' in service_url: # plain host:port
service_url = 'http://' + service_url
self.service_url = service_url
self.username = username
self.password = password
self.tid = None
if prev_session:
self.service_url = prev.service_url
if cookiefile is not None:
cookiejar = self.open_cookie_file(cookiefile)
if cookiejar is not None:
self.cookiejar = cookiejar
elif prev_session:
self.cookiejar = prev_session.cookiejar
else:
self.cookiejar = self._default_cookiejar
self._http_request = http_client(self.cookiejar, self._raise_service_error)
def open_cookie_file(self, cookiefile=None):
if cookiefile is None or cookiefile == '':
if os.environ.has_key('HOME'):
cookiefile = os.path.join(os.environ['HOME'], '.pyfreebase/cookiejar')
else:
raise MetawebError("no cookiefile specified and no $HOME/.pyfreebase directory" % cookiefile)
cookiejar = cookielib.LWPCookieJar(cookiefile)
if os.path.exists(cookiefile):
cookiejar.load(ignore_discard=True)
return cookiejar
def _httpreq(self, service_path, method='GET', body=None, form=None,
headers=None):
"""
make an http request to the service.
form arguments are encoded in the url, even for POST, if a non-form
content-type is given for the body.
returns a pair (resp, body)
resp is the response object and may be different depending
on whether urllib2 or httplib2 is in use?
"""
if method == 'GET':
assert body is None
if method != "GET" and method != "POST":
assert 0, 'unknown method %s' % method
url = self.service_url + service_path
if headers is None:
headers = {}
else:
headers = _normalize_headers(headers)
# this is a lousy way to parse Content-Type, where is the library?
ct = headers.get('content-type', None)
if ct is not None:
ct = ct.split(';')[0]
if body is not None:
# if body is provided, content-type had better be too
assert ct is not None
if form is not None:
qstr = '&'.join(['%s=%s' % (urlencode_weak(unicode(k).encode('utf-8')),
urlencode_weak(unicode(v).encode('utf-8')))
for k,v in form.iteritems()])
if method == 'POST':
# put the args on the url if we're putting something else
# in the body. this is used to add args to raw uploads.
if body is not None:
url += '?' + qstr
else:
if ct is None:
ct = 'application/x-www-form-urlencoded'
headers['content-type'] = ct + '; charset=utf-8'
if ct == 'multipart/form-encoded':
# TODO handle this case
raise NotImplementedError
elif ct == 'application/x-www-form-urlencoded':
body = qstr
else:
# for all methods other than POST, use the url
url += '?' + qstr
# assure the service that this isn't a CSRF form submission
headers['x-metaweb-request'] = 'Python'
if 'user-agent' not in headers:
user_agent = ["python", "freebase.api-%s" % __version__]
if self.application_name:
user_agent.append(self.application_name)
headers['user-agent'] = ' '.join(user_agent)
####### DEBUG MESSAGE - should check log level before generating
loglevel = self.log.getEffectiveLevel()
if loglevel <= 20: # logging.INFO = 20
if form is None:
formstr = ''
else:
formstr = '\nFORM:\n ' + '\n '.join(['%s=%s' % (k,v)
for k,v in form.items()])
if headers is None:
headerstr = ''
else:
headerstr = '\nHEADERS:\n ' + '\n '.join([('%s: %s' % (k,v))
for k,v in headers.items()])
self.log.info('%s %s%s%s', method, url, formstr, headerstr)
# just in case you decide to make SUPER ridiculous GET queries:
if len(url) > 1000 and method == "GET":
method = "POST"
url, body = url.split("?", 1)
ct = 'application/x-www-form-urlencoded'
headers['content-type'] = ct + '; charset=utf-8'
return self._http_request(url, method, body, headers)
def _raise_service_error(self, url, status, ctype, body):
is_jsbody = (ctype.endswith('javascript')
or ctype.endswith('json'))
if str(status) == '400' and is_jsbody:
r = self._loadjson(body)
msg = r.messages[0]
raise MetawebError(u'%s %s %r' % (msg.get('code',''), msg.message, msg.info))
raise MetawebError, 'request failed: %s: %s\n%s' % (url, status, body)
def _httpreq_json(self, *args, **kws):
resp, body = self._httpreq(*args, **kws)
return self._loadjson(body)
def _loadjson(self, json_input):
# TODO really this should be accomplished by hooking
# simplejson to create attrdicts instead of dicts.
def struct2attrdict(st):
"""
copy a json structure, turning all dicts into attrdicts.
copying descends instances of dict and list, including subclasses.
"""
if isinstance(st, dict):
return attrdict([(k,struct2attrdict(v)) for k,v in st.items()])
if isinstance(st, list):
return [struct2attrdict(li) for li in st]
return st
if json_input == '':
self.log.error('the empty string is not valid json')
raise MetawebError('the empty string is not valid json')
try:
r = json.loads(json_input)
except ValueError, e:
self.log.error('error parsing json string %r' % json_input)
raise MetawebError, 'error parsing JSON string: %s' % e
return struct2attrdict(r)
def _check_mqlerror(self, r):
if r.code != '/api/status/ok':
for msg in r.messages:
self.log.error('mql error: %s %s %r' % (msg.code, msg.message, msg.get('query', None)))
raise MetawebError, 'query failed: %s\n%s\n%s' % (r.messages[0].code, r.messages[0].message, json.dumps(r.messages[0].get('query', None), indent=2))
def _mqlresult(self, r):
self._check_mqlerror(r)
self.log.info('result: %s', Delayed(logformat, r))
return r.result
def login(self, username=None, password=None, rememberme=False):
"""sign in to the service. For a more complete description,
see http://www.freebase.com/view/en/api_account_login"""
service = '/api/account/login'
username = username or self.username
password = password or self.password
assert username is not None
assert password is not None
self.log.debug('LOGIN USERNAME: %s', username)
rememberme = rememberme and "true" or "false"
form_params = {"username": username,
"password": password }
if rememberme:
form_params["rememberme"] = "true"
r = self._httpreq_json(service, 'POST',
form=form_params)
if r.code != '/api/status/ok':
raise MetawebError(u'%s %r' % (r.get('code',''), r.messages))
self.log.debug('LOGIN RESP: %r', r)
self.log.debug('LOGIN COOKIES: %s', self.cookiejar)
def logout(self):
"""logout of the service. For a more complete description,
see http://www.freebase.com/view/en/api_account_logout"""
service = '/api/account/logout'
self.log.debug("LOGOUT")
r = self._httpreq_json(service, 'GET')
if r.code != '/api/status/ok':
raise MetawebError(u'%s %r' % (r.get('code',''), r.messages)) #this should never happen
@json_params
def user_info(self, mql_output=None):
""" get user_info. For a more complete description,
see http://www.freebase.com/view/guid/9202a8c04000641f800000000c36a842"""
service = "/api/service/user_info"
r = self._httpreq_json(service, 'POST', form=dict(mql_output=mql_output))
return r
def loggedin(self):
"""check to see whether a user is logged in or not. For a
more complete description, see http://www.freebase.com/view/en/api_account_loggedin"""
service = "/api/account/loggedin"
try:
r = self._httpreq_json(service, 'GET')
if r.code == "/api/status/ok":
return True
except MetawebError, me:
return False
def create_private_domain(self, domain_key, display_name):
""" create a private domain. For a more complete description,
see http://www.freebase.com/edit/topic/en/api_service_create_private_domain"""
service = "/api/service/create_private_domain"
form = dict(domain_key=domain_key, display_name=display_name)
r = self._httpreq_json(service, 'POST', form=form)
return r
def delete_private_domain(self, domain_key):
""" create a private domain. For a more complete description,
see http://www.freebase.com/edit/topic/en/api_service_delete_private_domain"""
service = "/api/service/delete_private_domain"
form = dict(domain_key=domain_key)
return self._httpreq_json(service, 'POST', form=form)
def mqlreaditer(self, sq, asof=None):
"""read a structure query."""
cursor = True
service = '/api/service/mqlread'
if isinstance(sq, (tuple, list)):
if len(sq) > 1:
raise MetawebError("You cannot ask mqlreaditer a query in the form: [{}, {}, ...], just [{}] or {}")
sq = sq[0]
while 1:
subq = dict(query=[sq], cursor=cursor, escape=False)
if asof:
subq['as_of_time'] = asof
qstr = json.dumps(subq, separators=SEPARATORS)
r = self._httpreq_json(service, 'POST', form=dict(query=qstr))
for item in self._mqlresult(r):
yield item
if r['cursor']:
cursor = r['cursor']
self.log.info('CONTINUING with %s', cursor)
else:
return
def mqlread(self, sq, asof=None):
"""read a structure query. For a more complete description,
see http://www.freebase.com/view/en/api_service_mqlread"""
subq = dict(query=sq, escape=False)
if asof:
subq['as_of_time'] = asof
if isinstance(sq, list):
subq['cursor'] = True
service = '/api/service/mqlread'
self.log.info('%s: %s',
service,
Delayed(logformat, sq))
qstr = json.dumps(subq, separators=SEPARATORS)
r = self._httpreq_json(service, 'POST', form=dict(query=qstr))
return self._mqlresult(r)
def mqlreadmulti(self, queries, asof=None):
"""read a structure query"""
keys = [('q%d' % i) for i,v in enumerate(queries)];
envelope = {}
for i,sq in enumerate(queries):
subq = dict(query=sq, escape=False)
if asof:
subq['as_of_time'] = asof
# XXX put this back once mqlreadmulti is working in general
#if isinstance(sq, list):
# subq['cursor'] = True
envelope[keys[i]] = subq
service = '/api/service/mqlread'
self.log.info('%s: %s',
service,
Delayed(logformat, envelope))
qstr = json.dumps(envelope, separators=SEPARATORS)
rs = self._httpreq_json(service, 'POST', form=dict(queries=qstr))
self.log.info('%s result: %s',
service,
Delayed(json.dumps, rs, indent=2))
return [self._mqlresult(rs[key]) for key in keys]
def trans(self, guid):
"""translate blob from id. Identical to `raw`. For more
information, see http://www.freebase.com/view/en/api_trans_raw"""
return self.raw(guid)
def raw(self, id):
"""translate blob from id. For a more complete description,
see http://www.freebase.com/view/en/api_trans_raw"""
url = '/api/trans/raw' + urlquote(id)
self.log.info(url)
resp, body = self._httpreq(url)
self.log.info('raw is %d bytes' % len(body))
return body
def blurb(self, id, break_paragraphs=False, maxlength=200):
"""translate only the text in blob from id. For a more
complete description, see http://www.freebase.com/view/en/api_trans_blurb"""
url = '/api/trans/blurb' + urlquote(id)
self.log.info(url)
resp, body = self._httpreq(url, form=dict(break_paragraphs=break_paragraphs, maxlength=maxlength))
self.log.info('blurb is %d bytes' % len(body))
return body
def unsafe(self, id):
""" unsafe raw... not really documented, but identical to raw,
except it will be exactly what you uploaded. """
url = '/api/trans/unsafe' + urlquote(id)
self.log.info(url)
resp, body = self._httpreq(url, headers={'x-metaweb-request' : 'Python'})
self.log.info('unsafe is %d bytes' % len(body))
return body
def image_thumb(self, id, maxwidth=None, maxheight=None, mode="fit", onfail=None):
""" given the id of an image, this will return a URL of a thumbnail of the image.
The full details of how the image is cropped and finessed is detailed at
http://www.freebase.com/view/en/api_trans_image_thumb """
service = "/api/trans/image_thumb"
assert mode in ["fit", "fill", "fillcrop", "fillcropmid"]
form = dict(mode=mode)
if maxwidth is not None:
form["maxwidth"] = maxwidth
if maxheight is not None:
form["maxheight"] = maxheight
if onfail is not None:
form["onfail"] = onfail
resp, body = self._httpreq(service + urlquote(id), form=form)
self.log.info('image is %d bytes' % len(body))
return body
def mqlwrite(self, sq, use_permission_of=None, attribution_id=None):
"""do a mql write. For a more complete description,
see http://www.freebase.com/view/en/api_service_mqlwrite"""
query = dict(query=sq, escape=False)
if use_permission_of:
query['use_permission_of'] = use_permission_of
if attribution_id:
query['attribution'] = attribution_id
qstr = json.dumps(query, separators=SEPARATORS)
self.log.debug('MQLWRITE: %s', qstr)
service = '/api/service/mqlwrite'
self.log.info('%s: %s',
service,
Delayed(logformat,sq))
r = self._httpreq_json(service, 'POST',
form=dict(query=qstr))
self.log.debug('MQLWRITE RESP: %r', r)
return self._mqlresult(r)
def mqlcheck(self, sq):
""" See if a write is valid, and see what would happen, but do not
actually do the write """
query = dict(query=sq, escape=False)
qstr = json.dumps(query, separators=SEPARATORS)
self.log.debug('MQLCHECK: %s', qstr)
service = '/api/service/mqlcheck'
self.log.info('%s: %s',
service,
Delayed(logformat, sq))
r = self._httpreq_json(service, 'POST',
form=dict(query=qstr))
self.log.debug('MQLCHECK RESP: %r', r)
return self._mqlresult(r)
def mqlflush(self):
"""ask the service not to hand us old data"""
self.log.debug('MQLFLUSH')
service = '/api/service/touch'
r = self._httpreq_json(service)
self._check_mqlerror(r)
return True
def touch(self):
""" make sure you are accessing the most recent data. For a more
complete description, see http://www.freebase.com/view/en/api_service_touch"""
return self.mqlflush()
def upload(self, body, content_type, document_id=False, permission_of=False):
"""upload to the metaweb. For a more complete description,
see http://www.freebase.com/view/en/api_service_upload"""
service = '/api/service/upload'
self.log.info('POST %s: %s (%d bytes)',
service, content_type, len(body))
headers = {}
if content_type is not None:
headers['content-type'] = content_type
form = None
if document_id is not False:
if document_id is None:
form = { 'document': '' }
else:
form = { 'document': document_id }
if permission_of is not False:
if form:
form['permission_of'] = permission_of
else:
form = { 'permission_of' : permission_of }
# note the use of both body and form.
# form parameters get encoded into the URL in this case
r = self._httpreq_json(service, 'POST',
headers=headers, body=body, form=form)
return self._mqlresult(r)
def uri_submit(self, URI, document=None, content_type=None):
""" submit a URI to freebase. For a more complete description,
see http://www.freebase.com/edit/topic/en/api_service_uri_submit """
service = "/api/service/uri_submit"
form = dict(uri=URI)
if document is not None:
form["document"] = document
if content_type is not None:
form["content_type"] = content_type
r = self._httpreq_json(service, 'POST', form=form)
return self._mqlresult(r)
@json_params
def search(self, query, format=None, prefixed=None, limit=20, start=0,
type=None, type_strict="any", domain=None, domain_strict=None,
escape="html", timeout=None, mql_filter=None, mql_output=None):
""" search freebase.com. For a more complete description,
see http://www.freebase.com/view/en/api_service_search"""
service = "/api/service/search"
form = dict(query=query)
if format:
form["format"] = format
if prefixed:
form["prefixed"] = prefixed
if limit:
form["limit"] = limit
if start:
form["start"] = start
if type:
form["type"] = type
if type_strict:
form["type_strict"] = type_strict
if domain:
form["domain"] = domain
if domain_strict:
form["domain_strict"] = domain_strict
if escape:
form["escape"] = escape
if timeout:
form["timeout"] = timeout
if mql_filter:
form["mql_filter"] = mql_filter
if mql_output:
form["mql_output"] = mql_output
r = self._httpreq_json(service, 'POST', form=form)
return self._mqlresult(r)
@json_params
def geosearch(self, location=None, location_type=None,
mql_input=None, limit=20, start=0, type=None,
geometry_type=None, intersect=None, mql_filter=None,
within=None, inside=None, order_by=None, count=None,
format="json", mql_output=None):
""" perform a geosearch. For a more complete description,
see http://www.freebase.com/api/service/geosearch?help """
service = "/api/service/geosearch"
if location is None and location_type is None and mql_input is None:
raise Exception("You have to give it something to work with")
form = dict()
if location:
form["location"] = location
if location_type:
form["location_type"] = location_type
if mql_input:
form["mql_input"] = mql_input
if limit:
form["limit"] = limit
if start:
form["start"] = start
if type:
form["type"] = type
if geometry_type:
form["geometry_type"] = geometry_type
if intersect:
form["intersect"] = intersect
if mql_filter:
form["mql_filter"] = mql_filter
if within:
form["within"] = within
if inside:
form["inside"] = inside
if order_by:
form["order_by"] = order_by
if count:
form["count"] = count
if format:
form["format"] = format
if mql_output:
form["mql_output"] = mql_output
if format == "json":
r = self._httpreq_json(service, 'POST', form=form)
else:
r = self._httpreq(service, 'POST', form=form)
return r
def version(self):
""" get versions for various parts of freebase. For a more
complete description, see http://www.freebase.com/view/en/api_version"""
service = "/api/version"
r = self._httpreq_json(service)
return r
def status(self):
""" get the status for various parts of freebase. For a more
complete description, see http://www.freebase.com/view/en/api_status """
service = "/api/status"
r = self._httpreq_json(service)
return r
### DEPRECATED IN API
def reconcile(self, name, etype=['/common/topic']):
"""DEPRECATED: reconcile name to guid. For a more complete description,
see http://www.freebase.com/view/en/dataserver_reconciliation
If interested in a non-deprecated version,
check out http://data.labs.freebase.com/recon/"""
service = '/dataserver/reconciliation'
r = self._httpreq_json(service, 'GET', form={'name':name, 'types':','.join(etype)})
# TODO non-conforming service, fix later
#self._mqlresult(r)
return r
if __name__ == '__main__':
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
mss = HTTPMetawebSession('sandbox-freebase.com')
mss.log.setLevel(logging.DEBUG)
mss.log.addHandler(console)
print mss.mqlread([dict(name=None, type='/type/type')])
| Python |
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
#
#
# httplib2cookie.py allows you to use python's standard
# CookieJar class with httplib2.
#
#
import re
try:
from google.appengine.api import urlfetch
Http = object
except ImportError:
pass
try:
from httplib2 import Http
except ImportError:
pass
try:
import urllib
except ImportError:
import urllib_stub as urllib
import cookielib
class DummyRequest(object):
"""Simulated urllib2.Request object for httplib2
implements only what's necessary for cookielib.CookieJar to work
"""
def __init__(self, url, headers=None):
self.url = url
self.headers = headers
self.origin_req_host = cookielib.request_host(self)
self.type, r = urllib.splittype(url)
self.host, r = urllib.splithost(r)
if self.host:
self.host = urllib.unquote(self.host)
def get_full_url(self):
return self.url
def get_origin_req_host(self):
# TODO to match urllib2 this should be different for redirects
return self.origin_req_host
def get_type(self):
return self.type
def get_host(self):
return self.host
def get_header(self, key, default=None):
return self.headers.get(key.lower(), default)
def has_header(self, key):
return key in self.headers
def add_unredirected_header(self, key, val):
# TODO this header should not be sent on redirect
self.headers[key.lower()] = val
def is_unverifiable(self):
# TODO to match urllib2, this should be set to True when the
# request is the result of a redirect
return False
class DummyHttplib2Response(object):
"""Simulated urllib2.Request object for httplib2
implements only what's necessary for cookielib.CookieJar to work
"""
def __init__(self, response):
self.response = response
def info(self):
return DummyHttplib2Message(self.response)
class DummyUrlfetchResponse(object):
"""Simulated urllib2.Request object for httplib2
implements only what's necessary for cookielib.CookieJar to work
"""
def __init__(self, response):
self.response = response
def info(self):
return DummyUrlfetchMessage(self.response)
class DummyHttplib2Message(object):
"""Simulated mimetools.Message object for httplib2
implements only what's necessary for cookielib.CookieJar to work
"""
def __init__(self, response):
self.response = response
def getheaders(self, k):
k = k.lower()
v = self.response.get(k.lower(), None)
if k not in self.response:
return []
#return self.response[k].split(re.compile(',\\s*'))
# httplib2 joins multiple values for the same header
# using ','. but the netscape cookie format uses ','
# as part of the expires= date format. so we have
# to split carefully here - header.split(',') won't do it.
HEADERVAL= re.compile(r'\s*(([^,]|(,\s*\d))+)')
return [h[0] for h in HEADERVAL.findall(self.response[k])]
class DummyUrlfetchMessage(object):
"""Simulated mimetools.Message object for httplib2
implements only what's necessary for cookielib.CookieJar to work
"""
def __init__(self, response):
self.response = response
def getheaders(self, k):
k = k.lower()
v = self.response.headers.get(k.lower(), None)
if k not in self.response.headers:
return []
#return self.response[k].split(re.compile(',\\s*'))
# httplib2 joins multiple values for the same header
# using ','. but the netscape cookie format uses ','
# as part of the expires= date format. so we have
# to split carefully here - header.split(',') won't do it.
HEADERVAL= re.compile(r'\s*(([^,]|(,\s*\d))+)')
return [h[0] for h in HEADERVAL.findall(self.response.headers[k])]
class CookiefulHttp(Http):
"""Subclass of httplib2.Http that keeps cookie state
constructor takes an optional cookiejar=cookielib.CookieJar
currently this does not handle redirects completely correctly:
if the server redirects to a different host the original
cookies will still be sent to that host.
"""
def __init__(self, cookiejar=None, **kws):
# note that httplib2.Http is not a new-style-class
Http.__init__(self, **kws)
if cookiejar is None:
cookiejar = cookielib.CookieJar()
self.cookiejar = cookiejar
def request(self, uri, **kws):
headers = kws.pop('headers', None)
req = DummyRequest(uri, headers)
self.cookiejar.add_cookie_header(req)
headers = req.headers
(r, body) = Http.request(self, uri, headers=headers, **kws)
resp = DummyHttplib2Response(r)
self.cookiejar.extract_cookies(resp, req)
return (r, body)
class CookiefulUrlfetch(object):
"""Class that keeps cookie state
constructor takes an optional cookiejar=cookielib.CookieJar
"""
# TODO refactor CookefulHttp so that CookiefulUrlfetch can be a subclass of it
def __init__(self, cookiejar=None, **kws):
if cookiejar is None:
cookejar = cookielib.CookieJar()
self.cookejar = cookiejar
def request(self, uri, **kws):
headers = kws.pop('headers', None)
req = DummyRequest(uri, headers)
self.cookejar.add_cookie_header(req)
headers = req.headers
r = urlfetch.fetch(uri, headers=headers, **kws)
self.cookejar.extract_cookies(DummyUrlfetchResponse(r), req)
return r
| Python |
from session import HTTPMetawebSession, MetawebError, attrdict, LITERAL_TYPE_IDS
from mqlkey import quotekey, unquotekey | Python |
from copy import deepcopy
import sys
from freebase.api.session import HTTPMetawebSession, MetawebError
from freebase.api.session import LITERAL_TYPE_IDS
"""
NOTE
----
graph is used freely in this file. Some information:
- It refers to an internal representation of a group of types.
- It resembles a mqlread result, but it is not a mqlread result
- It also has some helper variables like __requires and __related.
- All helper variables start with __ since that's not valid MQL
- It is produced by _get_graph
- It can be converted into valid json (json.dumps(graph, indent=2))
Its structure is as follows:
"type_id" : {
"name" : "My Type",
"id" : "/base_id/type_id"
...
"__requires" : ["/base_id/type_id2"]
"properties" : [
{
"id" : "/base_id/type_id/my_prop"
...
},
{
...
}
]
},
"type_id2" : {...}
...
"""
class DelegationError(Exception):
"""You can't set the expected_type if the expected_type of the delegated (master) is a primitive"""
class CVTError(Exception):
"""You can't set follow_types to False if there's a cvt. A cvt requires you get all the relevant types. Set follow_types to true."""
def key_exists(s, k):
q = {
"id" : k,
"guid" : None
}
return not None == s.mqlread(q)
### SCHEMA MANIPULATION ###
# Object helpers
def create_object(s, name="", path=None, key=None, namespace=None,
included_types=None, create="unless_exists",
extra=None, use_permission_of=None, attribution=None):
"""
Create object with name, a path or a key and namespace, included_types.
You can also specify how it is created (unless_exists, unconditional..)
as well as use_permission_of and attribution
"""
if type(included_types) is str:
included_types = [included_types]
if path and (key or namespace):
raise ValueError("You can't specify both the path and a key and namespace.")
if path:
key, namespace = get_key_namespace(path)
if (key and not namespace) or (not key and namespace):
raise ValueError("You must either specify both a key and a namespace, or neither.")
if included_types:
its = set(included_types)
q = [{
"id|=" : included_types,
"/freebase/type_hints/included_types" : [{"id" : None}]
}]
for res in s.mqlread(q):
its.update([x["id"] for x in res["/freebase/type_hints/included_types"]])
wq = {
"id" : None,
"name" : name,
"create" : create
}
# conditionally add key creation
if key:
wq.update({"key" : {
"namespace" : namespace,
"value" : key,
}})
if included_types:
wq.update(type = [{ "id" : it, "connect" : "insert" } for it in its])
if extra:
wq.update(extra)
return s.mqlwrite(wq, use_permission_of=use_permission_of, attribution_id=attribution)
def connect_object(s, id, newpath, extra=None, use_permission_of=None, attribution=None):
""" connect object at id to a newpath.
Example:
connect_object(s, "/guid/002", "/en/the_beatles")
"""
key, namespace = get_key_namespace(newpath)
wq = {
"id" : id,
"key" : {
"namespace" : namespace,
"value" : key,
"connect" : "insert"
}
}
if extra: wq.update(extra)
return s.mqlwrite(wq, use_permission_of=use_permission_of, attribution_id=attribution)
def disconnect_object(s, id, extra=None, use_permission_of=None, attribution=None):
""" disconnect objects, as in removing their keys
Example:
disconnect_object(s, "/en/the_beatles")
would remove the `the_beatles` key from /en """
key, namespace = get_key_namespace(id)
wq = {
"id" : id,
"key" : {
"namespace" : namespace,
"value" : key,
"connect" : "delete"
}
}
if extra: wq.update(extra)
return s.mqlwrite(wq, use_permission_of=use_permission_of, attribution_id=attribution)
def move_object(s, oldpath, newpath, use_permission_of=None, attribution=None):
""" move object from one key to another."""
a = connect_object(s, oldpath, newpath, use_permission_of=use_permission_of, attribution=attribution)
b = disconnect_object(s, oldpath, use_permission_of=use_permission_of, attribution=attribution)
return a, b
def get_key_namespace(path):
""" get (key, namespace) from a path
get_key_namespace("/common") -> ("common", "/")
get_key_namespace("/food/drinks") -> ("drinks", "/food")
"""
# be careful with /common
namespace, key = path.rsplit("/", 1)
return (key, namespace or "/")
def add_type_to_object(s, id, type_id):
"""
given an object (id) give it the type, type_id and all of its included types.
"""
q = {
"id" : type_id,
"/freebase/type_hints/included_types" : [{"id" : None, "optional" : True}]
}
included_types = map(lambda x: x["id"], s.mqlread(q)["/freebase/type_hints/included_types"])
wq = {
"id" : id,
"type" : [{
"id" : it,
"connect" : "insert"
} for it in included_types + [type_id]]
}
return s.mqlwrite(wq)
def copy_property(s, id, newid, **extra):
""" create a new property with the same information as the starting property """
newname, newschema = get_key_namespace(newid)
info = get_property_info(s, id)
info["__raw"].update(extra)
unique = None
if info.has_key(unique):
unique = info["unique"]
disambig = None
if info.has_key("/freebase/property_hints/disambiguator"):
disambig = info["/freebase/property_hints/disambiguator"]
create_property(s, info["name"], newname, newschema, info["expected_type"], unique=unique, disambig=disambig,
tip=info["/freebase/documented_object/tip"], extra=info["__raw"])
def move_property(s, id, newid, **extra):
""" create an identical property and delete the old one """
copy_property(s, id, newid, **extra)
disconnect_schema = {"type" : "/type/property", "schema" : {"connect" : "delete", "id" : "/".join(id.split("/")[:-1]) }}
disconnect_object(s, id, extra = disconnect_schema)
def get_property_info(s, prop_id):
"""
get_property_info returns a valid json dictionary object that has all the information
required to describe a property. This is only used by copy_property, but could be used
by whoever.
Ideally, all the required information by create_property is in the root of the dictionary
while all the extra information is in result["__raw"]
"""
q = deepcopy(PROPERTY_QUERY)
q.update(id=prop_id)
q.update(schema={"id" : None, "name" : None})
res = s.mqlread(q)
info = {}
info["name"] = res["name"]["value"]
if res["schema"]:
info["schema"] = res["schema"]["id"]
else: info["schema"] = None
if res["key"]:
info["key"] = [(x["value"], x["namespace"]) for x in res["key"]]
else: info["key"] = None
if res["/freebase/documented_object/tip"]:
info["/freebase/documented_object/tip"] = res["/freebase/documented_object/tip"]["value"]
else: info["/freebase/documented_object/tip"] = None
ignore = ("optional", "type", "key", "/freebase/documented_object/tip")
for prop in PROPERTY_QUERY.iterkeys():
if prop not in ignore:
if not info.has_key(prop):
info[prop] = None
info.update(_generate_extra_properties(res, ignore))
# delete the properties that are going to be asked for in create_property
del res["name"]
del res["schema"]
del res["key"]
del res["expected_type"]
del res["unique"]
del res["/freebase/property_hints/disambiguator"]
del res["/freebase/documented_object/tip"]
# delete other useless things...
del res["id"]
for i in [k for k, v in res.iteritems() if v is None]:
del res[i]
info["__raw"] = res
return info
# Create Type
def create_type(s, name, key, ns, cvt=False, tip=None, included=None, extra=None):
"""
creates a type and takes care of associating it to its domain and attaching
a key.
"""
# TODO: CREATE SYNTHETIC VIEW
if key_exists(s, ns + "/" + key ):
return
# assert isinstance(name, basestring) # name could be mqlish
assert isinstance(key, basestring)
assert isinstance(ns, basestring)
assert tip is None or isinstance(tip, basestring)
assert included is None or isinstance(included, (basestring, list, tuple))
assert extra is None or isinstance(extra, dict)
wq = {
"create" : "unconditional",
"type" : "/type/type",
"/type/type/domain" : { "connect" : "insert", "id" : ns },
"name" : {"connect" : "insert", "value" : name, "lang" : "/lang/en" },
"key" : {
"connect" : "insert",
"value" : key,
"namespace" : ns
}
}
if included:
if isinstance(included, basestring):
included = [included]
its_q = [{
"id|=" : included,
"/freebase/type_hints/included_types" : [{"id" : None}]
}]
r = s.mqlread(its_q)
included_types = set(included)
if r:
for i in r:
included_types.update(map(lambda x: x["id"], i["/freebase/type_hints/included_types"]))
its = [{"connect" : "insert", "id" : t} for t in included_types]
wq['/freebase/type_hints/included_types'] = its
# TODO: enum
if cvt:
wq['/freebase/type_hints/mediator'] = { "connect" : "update", "value" : True }
if tip:
wq['/freebase/documented_object/tip'] = { "connect" : "update", "value" : tip, "lang" : "/lang/en" }
if extra: wq.update(extra)
return s.mqlwrite(wq, use_permission_of=ns)
# Create Property
def create_property(s, name, key, schema, expected, unique=False, disambig=False, tip=None, extra=None):
"""
create a property with ect, unique, etc, and make schema and key links
"""
if key_exists(s, schema + "/" + key):
return
# validate parameters
# assert isinstance(name, basestring) # could be mql
assert isinstance(key, basestring)
assert isinstance(schema, basestring)
assert isinstance(expected, basestring)
assert tip is None or isinstance(tip, basestring)
assert extra is None or isinstance(extra, dict)
wq = {
"create" : "unconditional",
"type" : "/type/property",
"name" : name,
"key" : {
"connect" : "insert",
"value" : key,
"namespace" : { "id" : schema },
},
"schema" : { "connect" : "insert", "id" : schema },
"expected_type" : { "connect" : "insert", "id" : expected }
}
if unique:
wq['unique'] = { "connect" : "update", "value" : unique }
if tip:
wq['/freebase/documented_object/tip'] = { "connect" : "update", "value" : tip, "lang" : "/lang/en" }
if disambig:
wq['/freebase/property_hints/disambiguator'] = { "connect" : "update", "value" : True }
if extra:
wq.update(extra)
return s.mqlwrite(wq, use_permission_of=schema)
def delegate_property(s, p, schema, name=None, key=None, expected=None, tip=None, extra=None):
"""
create a property with a delegate
"""
assert isinstance(p, basestring)
assert isinstance(schema, basestring)
assert key is None or isinstance(key, basestring)
assert expected is None or isinstance(expected, basestring)
assert tip is None or isinstance(tip, basestring)
q = {
"id" : p,
"type" : "/type/property",
"name" : None,
"unique" : None,
"expected_type" : {"id" : None},
"key" : None,
"/freebase/documented_object/tip" : None,
"/freebase/property_hints/disambiguator" : None
}
r = s.mqlread(q)
# If the expected_type of the delegator(master) is a primitive, the delegated's
# expected_type must be the same
if r["expected_type"]["id"] in LITERAL_TYPE_IDS:
if expected:
if expected != r["expected_type"]["id"]:
raise DelegationError("You can't set the expected_type if the expected_type of the delegated (master) is a primitive")
expected = r["expected_type"]["id"]
# If the expected_type of the delegator(master) is not a primitive, the delegated's
# expected_type can be different
elif expected is None:
expected = r["expected_type"]["id"]
if not tip and r["/freebase/documented_object/tip"]:
tip = r["/freebase/documented_object/tip"]
if name is None:
name = r["name"]
if key is None:
key = r["key"]
delegate = { "/type/property/delegated" : p}
if extra: delegate.update(extra)
return create_property(s, name, key, schema, expected, r['unique'],
r["/freebase/property_hints/disambiguator"],
tip,
delegate)
def reciprocate_property(s, name, key, master, unique=False, disambig=False, tip=None, extra=None):
""" We're creating a reciprocate property of the master property. Let's illustrate
the idea behind the function with an example.
Say we examine the /visual_art/art_period_movement/associated_artworks property.
An example of an art_period_movement is the Renaissance, and once associated_artworks
could be the /en/mona_lisa. In this example, /visual_art/art_period_movement/associated_artworks
will be the master property, and /visual_art/artwork/period_or_movement will be the reciprocal.
In order to determine the characterists of the reciprocal property, we must examine the master.
associated_artworks property's schema is /visual_art/art_period_movement and its expected
type is /visual_art/artwork. Notice the similarity to /visual_art/artwork/period_or_movement.
period_or_movement's schema is /visual_art/artwork -- art_period_movement's expected type.
period_or_movement's expected type is /visual_art/art_period_movement -- art_period_movement's
schema!
So, given a master, the reciprocal's schema is the master's expected type and the reciprocal's
expected type is the master's schema. """
# assert isinstance(name, basestring) # name could be mqlish
assert isinstance(key, basestring)
assert isinstance(master, basestring)
assert tip is None or isinstance(tip, basestring)
assert extra is None or isinstance(extra, dict)
# get master information
q = {
"id" : master,
"/type/property/expected_type" : None,
"/type/property/schema" : None,
"/type/property/reverse_property" : None }
r = s.mqlread(q)
ect = r["/type/property/expected_type"]
schema = r["/type/property/schema"]
# check to see if a master existed
if r["/type/property/reverse_property"]:
raise MetawebError("You can't reciprocate property %s who \
already has a reverse property %s",
(master, r["/type/property/reverse_property"]))
master = {"master_property" : master}
if extra: master.update(extra)
# NOTE: swapping ect and schema; see comment above
return create_property(s, name, key, ect, schema, unique, disambig, tip,
extra = master)
# dump / restore types
def dump_base(s, base_id):
""" dump a base into a `graph` object. See information at the top of the file for more
information on the graph file """
domain_types = s.mqlread({"id" : base_id,
"/type/domain/types": [{"id" : None}]})
types = [type_object["id"]
for type_object in domain_types["/type/domain/types"]]
graph = _get_graph(s, types, True)
return graph
def dump_type(s, type_id, follow_types=True):
""" dump a type (similar to dump_base) and has an argument follow_types that determines
if it should dump types that are neccessary or just rely on them """
types = [type_id]
graph = _get_graph(s, types, follow_types)
return graph
def restore(s, graph, new_location, ignore_types=None):
""" given a `graph` object and a another location, we can upload our graph output
into a new location in the freebase graph """
follow_types = graph.get("__follow_types", True)
# We assume the new_location is empty. if it isn't, we bail.
# well, ...
# create type dependencies
type_requires_graph = {}
# create prop dependencies
prop_requires_graph = {}
prop_to_type_map = {}
for type_id, type_information in graph.iteritems():
if not type_id.startswith("__"): # not a real type, but rather a helper
# type dependency generation
type_requires_graph[type_id] = type_information["__requires"]
# prop dependency generation
for prop in type_information["properties"]:
prop_requires_graph[prop["id"]] = prop["__requires"]
prop_to_type_map[prop["id"]] = type_id
types_to_create = _generate_dependency_creation_order(type_requires_graph)
props_to_create = _generate_dependency_creation_order(prop_requires_graph)
# make sure we're starting fresh - sometimes your mwLastWriteTime
# isn't fresh if create_private_domain fails
s.touch()
origin_id, new_location_id = s.mqlreadmulti([{"id" : types_to_create[0],
"type" : "/type/type",
"domain" : {"id" : None}},
{"id" : new_location,
"a:id" : None}])
if new_location_id is None:
# create the domain if it doesnt' exist already
username = s.user_info()["username"]
user_id = "/user/%s" % username
if not new_location.startswith("%s/" % user_id):
sys.stderr.write("%s does not exist: If creating a domain outside of %s, you must create it yourself\n" % (new_location, user_id))
sys.exit(1)
location_key = new_location[len(user_id)+1:]
tail_key = new_location.rsplit("/", 1)[-1]
if tail_key != location_key:
sys.stderr.write("%s does not exist: can only create domains as direct children of %s\n" % (new_location, user_id))
sys.exit(1)
s.create_private_domain(location_key, location_key)
origin_id = origin_id["domain"]["id"]
new_location_id = new_location_id["a:id"]
only_include = types_to_create + props_to_create
for type_id in types_to_create:
# let's find the type's key
key = None
for group in graph[type_id]["key"]:
if group["namespace"] == origin_id:
key = group["value"]
break
if key is None: # this shouldn't happen
key = graph[type_id]["id"].split("/")[-1] # this can be wrong... different key than typeid
tip = None
if graph[type_id]["/freebase/documented_object/tip"]:
tip = graph[type_id]["/freebase/documented_object/tip"]["value"]
ignore = TYPE_INGORE_PROPERTIES
extra = _generate_extra_properties(graph[type_id], ignore)
name = graph[type_id]["name"]["value"]
included = [_convert_name_to_new(included_type["id"], origin_id, new_location_id, only_include)
for included_type in graph[type_id]["/freebase/type_hints/included_types"]]
cvt = graph[type_id]["/freebase/type_hints/mediator"]
create_type(s, name, key, new_location_id, included=included, cvt=cvt, tip=tip, extra=extra)
for prop_id in props_to_create:
type_id = prop_to_type_map[prop_id]
all_properties_for_type = graph[type_id]["properties"]
for prop in all_properties_for_type:
if prop["id"] == prop_id: # good, we are dealing with our specific property
new_schema = _convert_name_to_new(type_id, origin_id, new_location_id, only_include)
name = prop["name"]
expected = None
if prop["expected_type"]:
expected = _convert_name_to_new(prop["expected_type"], origin_id, new_location_id, only_include)
for group in prop["key"]:
if group["namespace"] == type_id:
key = group["value"]
break
tip = None
if prop["/freebase/documented_object/tip"]:
tip = prop["/freebase/documented_object/tip"]["value"]
disambig = prop["/freebase/property_hints/disambiguator"]
unique = prop["unique"]
ignore = PROPERTY_IGNORE_PROPERTIES
extra = _generate_extra_properties(prop, ignore)
if prop['master_property']:
converted_master_property = _convert_name_to_new(prop["master_property"], origin_id, new_location_id, only_include)
if converted_master_property == prop["master_property"]:
raise CVTError("You can't set follow_types to False if there's a cvt. A cvt requires you get all the relevant types. Set follow_types to true.\n" + \
"The offending property was %s, whose master was %s." % (prop["id"], prop["master_property"]))
reciprocate_property(s, name, key, converted_master_property,
unique, disambig=disambig, tip=tip, extra=extra)
elif prop['delegated']:
delegate_property(s, _convert_name_to_new(prop['delegated'], origin_id, new_location_id, only_include), new_schema,
expected=expected, tip=tip, extra=extra)
else:
create_property(s, name, key, new_schema, expected, unique,
disambig=disambig, tip=tip, extra=extra)
def _get_graph(s, initial_types, follow_types):
""" get the graph of dependencies of all the types involved, starting with a list supplied """
assert isinstance(initial_types, (list, tuple))
graph = {}
to_update = set(initial_types)
done = set()
while len(to_update) > 0:
new = to_update.pop()
graph[new] = _get_needed(s, new)
if follow_types:
[to_update.add(b) for b in graph[new]["__related"] if b not in done]
done.update(graph[new]["__related"])
if not follow_types:
# we have to check that there are no cvts attached to us, or else
# ugly things happen (we can't include the cvt because the cvt won't link to us.)
for prop in graph[new]["properties"]:
if prop["master_property"]:
raise CVTError("You can't set follow_types to False if there's a cvt. A cvt requires you get all the relevant types. Set follow_types to true.\n" + \
"The offending property was %s, whose master was %s." % (prop["id"], prop["master_property"]))
graph["__follow_types"] = follow_types
return graph
def _convert_name_to_new(old_name, operating_base, new_base, only_include=None):
if old_name in only_include and old_name.startswith(operating_base):
return new_base + old_name.replace(operating_base, "", 1)
else:
return old_name
def _generate_dependency_creation_order(requires_graph):
# This is a naive topographical sort to determine
# in what order to create types or properties so
# that the other type/properties they rely on
# are already created
# This function is called with the type dependencies
# and then the property dependencies.
# we sort the dependency_list because its a good idea
# to create the guys with zero dependencies before the
# guys with one.. it's just a simple optimization to
# the topographical sort
dependency_list = [(len(requires), name) for (name, requires) in requires_graph.iteritems()]
dependency_list.sort()
creation_order_list = []
while len(dependency_list) > 0:
number_of_requirements, id = dependency_list.pop(0)
if number_of_requirements == 0:
creation_order_list.append(id)
continue
else:
are_the_types_dependencies_already_resolved = True
for requirement in requires_graph[id]:
if requirement not in creation_order_list:
are_the_types_dependencies_already_resolved = False
continue
if are_the_types_dependencies_already_resolved:
creation_order_list.append(id)
else:
dependency_list.append((number_of_requirements, id))
return creation_order_list
def _generate_extra_properties(dictionary_of_values, ignore):
extra = {}
for k, v in dictionary_of_values.iteritems():
if k not in ignore and not k.startswith("__"):
if v:
if isinstance(v, basestring):
extra.update({k:v})
elif isinstance(v, bool):
extra.update({k:v})
elif v.has_key("id"):
extra.update({k:v["id"]})
elif v.has_key("value"):
extra.update({k:v["value"]})
else:
raise ValueError("There is a problem with getting the property value.")
else:
if isinstance(v, bool): # well, if its False...
extra.update({k:v})
return extra
def _get_needed(s, type_id):
q = deepcopy(TYPE_QUERY)
q.update(id=type_id)
r = s.mqlread(q)
properties = r.properties
# let's identify who the parent is in order to only include
# other types in that domain. We don't want to go around including
# all of commons because someone's a /people/person
parents = [r["domain"]["id"]]
included_types = map(lambda x: x["id"], r["/freebase/type_hints/included_types"])
related_types = set(included_types)
for prop in properties:
if prop["expected_type"]:
related_types.add(prop["expected_type"])
# we have two different types of relationships: required and related.
# related can be used to generate subgraphs of types
# required is used to generate the dependency graph of types
related = _return_relevant(related_types, parents)
requires = _return_relevant(included_types, parents)
# get property information
properties = r["properties"]
for prop in properties:
dependent_on = set()
if prop["master_property"]:
dependent_on.add(prop["master_property"])
if prop["delegated"]:
dependent_on.add(prop["delegated"])
prop["__requires"] = _return_relevant(dependent_on, parents)
# return all the information along with our special __* properties
info = r
info.update(__related=related, __requires=requires)
return info
def _return_relevant(start_list, parents):
final = []
for item in start_list:
indomain = False
for parent in parents:
if item.startswith(parent):
indomain = True
continue
if indomain:
final.append(item)
return final
PROPERTY_QUERY = {
"optional" : True,
"type" : "/type/property",
"delegated" : None,
"enumeration" : None,
"expected_type" : None,
"id" : None,
"key" : [{
"namespace" : None,
"value" : None
}],
"master_property" : None,
"name" : {"value" : None, "lang" : "/lang/en", "optional":True},
"unique" : None,
"unit" : None,
"/freebase/documented_object/tip" : {"value" : None, "limit":1, "optional" : True},
"/freebase/property_hints/disambiguator" : None,
"/freebase/property_hints/display_none" : None,
"/freebase/property_hints/display_orientation" : None,
"/freebase/property_hints/enumeration" : None,
"/freebase/property_hints/dont_display_in_weblinks" : None,
"/freebase/property_hints/inverse_description" : None,
}
TYPE_QUERY = {
"type" : "/type/type",
"domain" : {},
"key" : [{"namespace" : None, "value" : None}],
"name" : {"value" : None, "lang" : "/lang/en", "optional":True},
"/freebase/type_hints/included_types" : [{"id" : None, "optional" : True}],
"/freebase/type_hints/mediator" : None,
"/freebase/type_hints/enumeration" : None,
"/freebase/type_hints/minor" : None,
"/freebase/documented_object/tip" : {"value" : None, "limit":1, "optional":True},
}
TYPE_QUERY.update(properties=[deepcopy(PROPERTY_QUERY)])
TYPE_INGORE_PROPERTIES = ("name", "domain", "key", "type", "id", "properties", "/freebase/type_hints/enumeration",
"/freebase/type_hints/included_types", "/freebase/type_hints/mediator", "/freebase/documented_object/tip")
PROPERTY_IGNORE_PROPERTIES = ("name", "expected_type", "key", "id", "master_property", "delegated", "unique", "type", "schema",
"/freebase/property_hints/disambiguator", "enumeration", "/freebase/property_hints/enumeration",
"/freebase/documented_object/tip")
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.